From 7c570e2778fea99dd78493c6b50f5d7da4ffc405 Mon Sep 17 00:00:00 2001 From: Oliver Walsh Date: Wed, 4 Jun 2025 17:53:35 +0100 Subject: [PATCH 01/31] Document helm operator dryrunOption (#6958) Signed-off-by: Oliver Walsh --- .../en/docs/building-operators/helm/reference/watches.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/website/content/en/docs/building-operators/helm/reference/watches.md b/website/content/en/docs/building-operators/helm/reference/watches.md index 7666afc90f..f3a7f416a5 100644 --- a/website/content/en/docs/building-operators/helm/reference/watches.md +++ b/website/content/en/docs/building-operators/helm/reference/watches.md @@ -20,6 +20,7 @@ The follow tables describes the fields in an entry in `watches.yaml`: | watchDependentResources | Enable watching resources that are created by helm (default: `true`). | | overrideValues | Values to be used for overriding Helm chart's defaults. For additional information see the [reference doc][override-values]. | | selector | The conditions that a resource's labels must satisfy in order to get reconciled. For additional information see [labels and selectors documentation][label-selector-doc]. | +| dryRunOption | The helm dry-run method to use when comparing manifests. Set to `server` to ensure `lookup()` functions are evaluated (default: `client/none`) | For reference, here is an example of a simple `watches.yaml` file: @@ -36,6 +37,7 @@ For reference, here is an example of a simple `watches.yaml` file: selector: matchExpressions: - {key: testLabel, operator: Exists, values: []} + dryRunOption: server ``` [override-values]: /docs/building-operators/helm/reference/advanced_features/override_values/ From b70e635a387ebc8d68ed9c989a2bf64eb3c7df81 Mon Sep 17 00:00:00 2001 From: "Adam D. Cornett" Date: Wed, 18 Jun 2025 14:06:43 -0700 Subject: [PATCH 02/31] re-generate scaffolding post release Signed-off-by: Adam D. Cornett --- testdata/go/v4/memcached-operator/Makefile | 2 +- testdata/go/v4/monitoring/memcached-operator/Makefile | 2 +- testdata/helm/memcached-operator/Makefile | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/testdata/go/v4/memcached-operator/Makefile b/testdata/go/v4/memcached-operator/Makefile index 91896ed985..6a6500bb9b 100644 --- a/testdata/go/v4/memcached-operator/Makefile +++ b/testdata/go/v4/memcached-operator/Makefile @@ -48,7 +48,7 @@ endif # Set the Operator SDK version to use. By default, what is installed on the system is used. # This is useful for CI or a project to utilize a specific version of the operator-sdk toolkit. -OPERATOR_SDK_VERSION ?= v1.39.2 +OPERATOR_SDK_VERSION ?= v1.40.0 # Image URL to use all building/pushing image targets IMG ?= controller:latest diff --git a/testdata/go/v4/monitoring/memcached-operator/Makefile b/testdata/go/v4/monitoring/memcached-operator/Makefile index b879aa5789..9545fb9861 100644 --- a/testdata/go/v4/monitoring/memcached-operator/Makefile +++ b/testdata/go/v4/monitoring/memcached-operator/Makefile @@ -48,7 +48,7 @@ endif # Set the Operator SDK version to use. By default, what is installed on the system is used. # This is useful for CI or a project to utilize a specific version of the operator-sdk toolkit. -OPERATOR_SDK_VERSION ?= v1.39.2 +OPERATOR_SDK_VERSION ?= v1.40.0 # Image URL to use all building/pushing image targets IMG ?= controller:latest diff --git a/testdata/helm/memcached-operator/Makefile b/testdata/helm/memcached-operator/Makefile index d4d84a651d..845dda7715 100644 --- a/testdata/helm/memcached-operator/Makefile +++ b/testdata/helm/memcached-operator/Makefile @@ -48,7 +48,7 @@ endif # Set the Operator SDK version to use. By default, what is installed on the system is used. # This is useful for CI or a project to utilize a specific version of the operator-sdk toolkit. -OPERATOR_SDK_VERSION ?= v1.39.2 +OPERATOR_SDK_VERSION ?= v1.40.0 # Container tool to use for building and pushing images CONTAINER_TOOL ?= docker From 156bf05d79658d75e70461e39777a9d313db9bf5 Mon Sep 17 00:00:00 2001 From: "Adam D. Cornett" Date: Wed, 18 Jun 2025 08:41:49 -0700 Subject: [PATCH 03/31] add GO_BUILD_TAGS to make install Signed-off-by: Adam D. Cornett --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 51b63749e2..71a00c55a4 100644 --- a/Makefile +++ b/Makefile @@ -86,7 +86,7 @@ install: ## Install operator-sdk and helm-operator. echo "Error: GOBIN is not set"; \ exit 1; \ fi - $(GO) install $(GO_BUILD_ARGS) ./cmd/{operator-sdk,helm-operator} + $(GO) install $(GO_BUILD_ARGS) -tags=$(GO_BUILD_TAGS) ./cmd/{operator-sdk,helm-operator} .PHONY: build build: ## Build operator-sdk and helm-operator. From 1bf4815b0ad73b15227242f985b3d6a695612bde Mon Sep 17 00:00:00 2001 From: Adam Cornett Date: Wed, 18 Jun 2025 14:33:08 -0700 Subject: [PATCH 04/31] fix broken link in 1.40.0 docs (#6965) Signed-off-by: Adam D. Cornett --- website/content/en/docs/upgrading-sdk-version/v1.40.0.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/content/en/docs/upgrading-sdk-version/v1.40.0.md b/website/content/en/docs/upgrading-sdk-version/v1.40.0.md index 2164854c65..1712e1975d 100644 --- a/website/content/en/docs/upgrading-sdk-version/v1.40.0.md +++ b/website/content/en/docs/upgrading-sdk-version/v1.40.0.md @@ -248,7 +248,7 @@ With: the code from Kubebuilder samples [testdata/project-v4/config/default/kust **NOTE:** You can see the complete file in the repository for the tag release `v1.40.0`: [testdata/go/v4/memcached-operator/config/default/kustomization.yaml](https://github.com/operator-framework/operator-sdk/tree/v1.40.0/testdata/go/v4/memcached-operator/config/default/kustomization.yaml) 4. Add the new file to allow patch the certs for the metrics: [testdata/go/v4/memcached-operator/config/default/cert_metrics_manager_patch.yaml](https://github.com/operator-framework/operator-sdk/tree/v1.40.0/testdata/go/v4/memcached-operator/config/default/cert_metrics_manager_patch.yaml) -5. Replace the content of `config/default/manager_webhook_patch.yaml` with: [testdata/go/v4/memcached-operator/config/default/config/default/manager_webhook_patch.yaml](https://github.com/operator-framework/operator-sdk/tree/v1.40.0/testdata/go/v4/memcached-operator/config/default/config/default/manager_webhook_patch.yaml) +5. Replace the content of `config/default/manager_webhook_patch.yaml` with: [testdata/go/v4/memcached-operator/config/default/manager_webhook_patch.yaml](https://github.com/operator-framework/operator-sdk/tree/v1.40.0/testdata/go/v4/memcached-operator/config/default/manager_webhook_patch.yaml) 6. Update the `config/manager/manager.yaml` to include the ports and volumes to allow the patch to work properly: ``` From 88f7a9312bec247eba60de426d7edbe25b23275f Mon Sep 17 00:00:00 2001 From: Camila Macedo <7708031+camilamacedo86@users.noreply.github.com> Date: Tue, 24 Jun 2025 14:48:47 +0100 Subject: [PATCH 05/31] Upgrade from Kubebuilder 4.5.2 to 4.6.0 and add support for k8s 1.33 (#6954) Signed-off-by: Camila Macedo <7708031+camilamacedo86@users.noreply.github.com> --- changelog/fragments/upgrade_kb_latest.yaml | 130 +++++++++++++ go.mod | 4 +- go.sum | 4 +- images/custom-scorecard-tests/Dockerfile | 2 +- images/helm-operator/Dockerfile | 2 +- images/operator-sdk/Dockerfile | 2 +- images/scorecard-test-kuttl/Dockerfile | 2 +- images/scorecard-test/Dockerfile | 2 +- internal/plugins/manifests/v2/init.go | 20 +- .../.devcontainer/devcontainer.json | 2 +- .../.github/workflows/lint.yml | 4 +- .../.github/workflows/test-e2e.yml | 3 - .../go/v4/memcached-operator/.golangci.yml | 57 +++--- testdata/go/v4/memcached-operator/Dockerfile | 2 +- testdata/go/v4/memcached-operator/Makefile | 31 +++- testdata/go/v4/memcached-operator/README.md | 2 +- .../cache.example.com_memcacheds.yaml | 2 +- testdata/go/v4/memcached-operator/cmd/main.go | 10 +- .../bases/cache.example.com_memcacheds.yaml | 2 +- .../config/rbac/kustomization.yaml | 2 +- testdata/go/v4/memcached-operator/go.mod | 83 ++++----- testdata/go/v4/memcached-operator/go.sum | 171 +++++++++--------- .../controller/memcached_controller.go | 4 +- .../controller/memcached_controller_test.go | 2 +- .../webhook/v1alpha1/memcached_webhook.go | 2 +- .../.devcontainer/devcontainer.json | 2 +- .../.github/workflows/lint.yml | 4 +- .../.github/workflows/test-e2e.yml | 3 - .../memcached-operator/.golangci.yml | 57 +++--- .../monitoring/memcached-operator/Dockerfile | 2 +- .../v4/monitoring/memcached-operator/Makefile | 31 +++- .../monitoring/memcached-operator/README.md | 2 +- .../cache.example.com_memcacheds.yaml | 2 +- .../monitoring/memcached-operator/cmd/main.go | 10 +- .../bases/cache.example.com_memcacheds.yaml | 2 +- .../config/rbac/kustomization.yaml | 2 +- .../v4/monitoring/memcached-operator/go.mod | 84 +++++---- .../v4/monitoring/memcached-operator/go.sum | 168 +++++++++-------- .../controller/memcached_controller.go | 4 +- .../controller/memcached_controller_test.go | 2 +- .../webhook/v1alpha1/memcached_webhook.go | 2 +- .../config/rbac/kustomization.yaml | 2 +- 42 files changed, 555 insertions(+), 371 deletions(-) create mode 100644 changelog/fragments/upgrade_kb_latest.yaml diff --git a/changelog/fragments/upgrade_kb_latest.yaml b/changelog/fragments/upgrade_kb_latest.yaml new file mode 100644 index 0000000000..1a86e8eb95 --- /dev/null +++ b/changelog/fragments/upgrade_kb_latest.yaml @@ -0,0 +1,130 @@ +entries: + - description: > + For Go-based operators, upgrade the Go version from `1.23` to `1.24` + kind: "change" + breaking: true + migration: + header: Upgrade Go version to 1.24 + body: | + Update the Go version used to `1.24`. This affects: + + **Dockerfile:** + ```dockerfile + -FROM golang:1.23 AS builder + +FROM golang:1.24 AS builder + ``` + + **.devcontainer/devcontainer.json:** + ```json + - "image": "golang:1.23", + + "image": "golang:1.24", + ``` + + **go.mod:** + ```go + -go 1.23.0 + +go 1.24.0 + ``` + + - description: > + For Go-based operators, upgrade golangci-lint to `v2.1.0` and update `.golangci.yml` + to the v2 config format with enhanced structure and controls. + kind: "change" + breaking: false + migration: + header: Upgrade golangci-lint and use v2 config + body: | + Update golangci-lint usage across the project: + + **Makefile:** + ```makefile + -GOLANGCI_LINT_VERSION ?= v1.63.4 + +GOLANGCI_LINT_VERSION ?= v2.1.0 + + -$(call go-install-tool,$(GOLANGCI_LINT),github.com/golangci/golangci-lint/cmd/golangci-lint,$(GOLANGCI_LINT_VERSION)) + +$(call go-install-tool,$(GOLANGCI_LINT),github.com/golangci/golangci-lint/v2/cmd/golangci-lint,$(GOLANGCI_LINT_VERSION)) + ``` + + **GitHub Actions Workflow:** + ```yaml + - uses: golangci/golangci-lint-action@v6 + + uses: golangci/golangci-lint-action@v8 + ``` + + **.golangci.yml:** + Convert to v2 layout with keys like `version`, `linters`, `settings`, `formatters`, `exclusions`. + You might want to copy and paste the file from the Memcached sample from the tag release `v1.40.0`: [testdata/go/v4/memcached-operator/.golangci.yml](https://github.com/operator-framework/operator-sdk/tree/v1.40.0/testdata/go/v4/memcached-operator/.golangci.yml) + + - description: > + For Go-based operators, upgrade controller-gen from `v0.17.2` to `v0.18.0`. + kind: "change" + breaking: false + migration: + header: Upgrade controller-gen to `v0.18.0` + body: | + Update controller-gen tooling and annotations: + + **Makefile:** + ```makefile + -CONTROLLER_TOOLS_VERSION ?= v0.17.2 + +CONTROLLER_TOOLS_VERSION ?= v0.18.0 + ``` + + Run `make generate` to regenerate code and manifests with the new version. + + - description: > + For Go-based operators, upgrade controller-runtime from `v0.20.4` to `v0.21.0` + and kubernetes dependencies to `v0.33`. + kind: "change" + breaking: false + migration: + header: Upgrade controller-runtime to `v0.21.0` + body: | + Update the `go.mod` import: + ```go + -sigs.k8s.io/controller-runtime v0.20.4 + +sigs.k8s.io/controller-runtime v0.21.0 + ``` + + Run `go mod tidy` to upgrade the k8s dependencies. + + - description: > + For Go-based operators, add new target to setup/teardown Kind cluster for E2E tests + and remove Kind setup from CI workflows. + kind: "addition" + breaking: false + migration: + header: Add cluster setup for e2e tests in Makefile and update CI workflow + body: | + Remove direct Kind commands in GitHub workflows: + + **Removed:** + ```yaml + - name: Create kind cluster + run: kind create cluster + ``` + + **Added to Makefile:** + ```makefile + KIND_CLUSTER ?= -test-e2e + + .PHONY: setup-test-e2e + setup-test-e2e: ## Set up a Kind cluster for e2e tests if it does not exist + @command -v $(KIND) >/dev/null 2>&1 || { \ + echo "Kind is not installed. Please install Kind manually."; \ + exit 1; \ + } + @case "$$($(KIND) get clusters)" in \ + *"$(KIND_CLUSTER)"*) \ + echo "Kind cluster '$(KIND_CLUSTER)' already exists. Skipping creation." ;; \ + *) \ + echo "Creating Kind cluster '$(KIND_CLUSTER)'..."; \ + $(KIND) create cluster --name $(KIND_CLUSTER) ;; \ + esac + + .PHONY: cleanup-test-e2e + cleanup-test-e2e: + $(KIND) delete cluster --name $(KIND_CLUSTER) + ``` + + Update `test-e2e` target to call these appropriately. diff --git a/go.mod b/go.mod index 938f5f5bab..211ae4d161 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/operator-framework/operator-sdk -go 1.23.6 +go 1.24.3 require ( github.com/blang/semver/v4 v4.0.0 @@ -41,7 +41,7 @@ require ( k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e sigs.k8s.io/controller-runtime v0.20.4 sigs.k8s.io/controller-tools v0.17.2 - sigs.k8s.io/kubebuilder/v4 v4.5.2 + sigs.k8s.io/kubebuilder/v4 v4.6.0 sigs.k8s.io/yaml v1.4.0 ) diff --git a/go.sum b/go.sum index b5cd282d83..8584610aac 100644 --- a/go.sum +++ b/go.sum @@ -874,8 +874,8 @@ sigs.k8s.io/controller-tools v0.17.2 h1:jNFOKps8WnaRKZU2R+4vRCHnXyJanVmXBWqkuUPF sigs.k8s.io/controller-tools v0.17.2/go.mod h1:4q5tZG2JniS5M5bkiXY2/potOiXyhoZVw/U48vLkXk0= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= -sigs.k8s.io/kubebuilder/v4 v4.5.2 h1:57lmVU1zwjOZAF28hBhZyaxE6qXreHYekI4yt/Q5rEU= -sigs.k8s.io/kubebuilder/v4 v4.5.2/go.mod h1:oJrPYf8hkfLCh2vb40vD/Gm22CJsFHlGQz1mw2ZiQaY= +sigs.k8s.io/kubebuilder/v4 v4.6.0 h1:SBc37jghs3L2UaEL91A1t5K5dANrEviUDuNic9hMQSw= +sigs.k8s.io/kubebuilder/v4 v4.6.0/go.mod h1:zlXrnLiJPDPpK4hKCUrlgzzLOusfA8Sd8tpYGIrvD00= sigs.k8s.io/kustomize/api v0.18.0 h1:hTzp67k+3NEVInwz5BHyzc9rGxIauoXferXyjv5lWPo= sigs.k8s.io/kustomize/api v0.18.0/go.mod h1:f8isXnX+8b+SGLHQ6yO4JG1rdkZlvhaCf/uZbLVMb0U= sigs.k8s.io/kustomize/kyaml v0.18.1 h1:WvBo56Wzw3fjS+7vBjN6TeivvpbW9GmRaWZ9CIVmt4E= diff --git a/images/custom-scorecard-tests/Dockerfile b/images/custom-scorecard-tests/Dockerfile index a3559131bd..1721c5111f 100644 --- a/images/custom-scorecard-tests/Dockerfile +++ b/images/custom-scorecard-tests/Dockerfile @@ -1,5 +1,5 @@ # Build the custom-scorecard-tests binary -FROM --platform=$BUILDPLATFORM golang:1.23 AS builder +FROM --platform=$BUILDPLATFORM golang:1.24 AS builder ARG TARGETARCH WORKDIR /workspace diff --git a/images/helm-operator/Dockerfile b/images/helm-operator/Dockerfile index f1bc5e875f..bd779c4d46 100644 --- a/images/helm-operator/Dockerfile +++ b/images/helm-operator/Dockerfile @@ -1,5 +1,5 @@ # Build the manager binary -FROM --platform=$BUILDPLATFORM golang:1.23 AS builder +FROM --platform=$BUILDPLATFORM golang:1.24 AS builder ARG TARGETARCH WORKDIR /workspace diff --git a/images/operator-sdk/Dockerfile b/images/operator-sdk/Dockerfile index 86d60e6259..87ce50f424 100644 --- a/images/operator-sdk/Dockerfile +++ b/images/operator-sdk/Dockerfile @@ -1,5 +1,5 @@ # Build the operator-sdk binary -FROM --platform=$BUILDPLATFORM golang:1.23 AS builder +FROM --platform=$BUILDPLATFORM golang:1.24 AS builder ARG TARGETARCH WORKDIR /workspace diff --git a/images/scorecard-test-kuttl/Dockerfile b/images/scorecard-test-kuttl/Dockerfile index 3265fc1b09..61d212679f 100644 --- a/images/scorecard-test-kuttl/Dockerfile +++ b/images/scorecard-test-kuttl/Dockerfile @@ -1,5 +1,5 @@ # Build the scorecard-test-kuttl binary -FROM --platform=$BUILDPLATFORM golang:1.23 AS builder +FROM --platform=$BUILDPLATFORM golang:1.24 AS builder ARG TARGETARCH ARG BUILDPLATFORM diff --git a/images/scorecard-test/Dockerfile b/images/scorecard-test/Dockerfile index 8f02c4097d..3aac7dcfc9 100644 --- a/images/scorecard-test/Dockerfile +++ b/images/scorecard-test/Dockerfile @@ -1,5 +1,5 @@ # Build the scorecard-test binary -FROM --platform=$BUILDPLATFORM golang:1.23 AS builder +FROM --platform=$BUILDPLATFORM golang:1.24 AS builder ARG TARGETARCH WORKDIR /workspace diff --git a/internal/plugins/manifests/v2/init.go b/internal/plugins/manifests/v2/init.go index 34b69c488c..81cec65e18 100644 --- a/internal/plugins/manifests/v2/init.go +++ b/internal/plugins/manifests/v2/init.go @@ -130,7 +130,7 @@ func (s *initSubcommand) Scaffold(fs machinery.Filesystem) error { // TODO: remove this when we bump kubebuilder to v5.x // Not adopt changes introduced by mistake in the default Makefile of kubebuilder v4.x. if operatorType == projutil.OperatorTypeGo { - err = util.ReplaceInFile("Makefile", makefileTestE2ETarget, "") + err = util.ReplaceInFile("Makefile", "$(KIND) create cluster --name $(KIND_CLUSTER)", makefileTestFix) if err != nil { return fmt.Errorf("error replacing Makefile: %w", err) } @@ -323,16 +323,14 @@ catalog-push: ## Push a catalog image. $(MAKE) docker-push IMG=$(CATALOG_IMG) ` - // TODO: remove it when we bump kubebuilder to v5.x + // TODO: remove it when we bump kubebuilder to v4.x // We will not adopt this change since it did not work and was a bug introduced in the // default Makefile of kubebuilder v4.x. - makefileTestE2ETarget = `@command -v $(KIND) >/dev/null 2>&1 || { \ - echo "Kind is not installed. Please install Kind manually."; \ - exit 1; \ - } - @$(KIND) get clusters | grep -q 'kind' || { \ - echo "No Kind cluster is running. Please start a Kind cluster before running the e2e tests."; \ - exit 1; \ - } - ` + makefileTestFix = `@case "$$($(KIND) get clusters)" in \ + *"$(KIND_CLUSTER)"*) \ + echo "Kind cluster '$(KIND_CLUSTER)' already exists. Skipping creation." ;; \ + *) \ + echo "Creating Kind cluster '$(KIND_CLUSTER)'..."; \ + $(KIND) create cluster --name $(KIND_CLUSTER) ;; \ + esac` ) diff --git a/testdata/go/v4/memcached-operator/.devcontainer/devcontainer.json b/testdata/go/v4/memcached-operator/.devcontainer/devcontainer.json index 0e0eed213f..a3ab7541cb 100644 --- a/testdata/go/v4/memcached-operator/.devcontainer/devcontainer.json +++ b/testdata/go/v4/memcached-operator/.devcontainer/devcontainer.json @@ -1,6 +1,6 @@ { "name": "Kubebuilder DevContainer", - "image": "docker.io/golang:1.23", + "image": "golang:1.24", "features": { "ghcr.io/devcontainers/features/docker-in-docker:2": {}, "ghcr.io/devcontainers/features/git:1": {} diff --git a/testdata/go/v4/memcached-operator/.github/workflows/lint.yml b/testdata/go/v4/memcached-operator/.github/workflows/lint.yml index 4951e3316c..86e3845a6c 100644 --- a/testdata/go/v4/memcached-operator/.github/workflows/lint.yml +++ b/testdata/go/v4/memcached-operator/.github/workflows/lint.yml @@ -18,6 +18,6 @@ jobs: go-version-file: go.mod - name: Run linter - uses: golangci/golangci-lint-action@v6 + uses: golangci/golangci-lint-action@v8 with: - version: v1.63.4 + version: v2.1.0 diff --git a/testdata/go/v4/memcached-operator/.github/workflows/test-e2e.yml b/testdata/go/v4/memcached-operator/.github/workflows/test-e2e.yml index b2eda8c3db..68fd1ed556 100644 --- a/testdata/go/v4/memcached-operator/.github/workflows/test-e2e.yml +++ b/testdata/go/v4/memcached-operator/.github/workflows/test-e2e.yml @@ -26,9 +26,6 @@ jobs: - name: Verify kind installation run: kind version - - name: Create kind cluster - run: kind create cluster - - name: Running Test e2e run: | go mod tidy diff --git a/testdata/go/v4/memcached-operator/.golangci.yml b/testdata/go/v4/memcached-operator/.golangci.yml index 6b29746238..e5b21b0f11 100644 --- a/testdata/go/v4/memcached-operator/.golangci.yml +++ b/testdata/go/v4/memcached-operator/.golangci.yml @@ -1,33 +1,15 @@ +version: "2" run: - timeout: 5m allow-parallel-runners: true - -issues: - # don't skip warning about doc comments - # don't exclude the default set of lint - exclude-use-default: false - # restore some of the defaults - # (fill in the rest as needed) - exclude-rules: - - path: "api/*" - linters: - - lll - - path: "internal/*" - linters: - - dupl - - lll linters: - disable-all: true + default: none enable: + - copyloopvar - dupl - errcheck - - copyloopvar - ginkgolinter - goconst - gocyclo - - gofmt - - goimports - - gosimple - govet - ineffassign - lll @@ -36,12 +18,35 @@ linters: - prealloc - revive - staticcheck - - typecheck - unconvert - unparam - unused - -linters-settings: - revive: + settings: + revive: + rules: + - name: comment-spacings + - name: import-shadowing + exclusions: + generated: lax rules: - - name: comment-spacings + - linters: + - lll + path: api/* + - linters: + - dupl + - lll + path: internal/* + paths: + - third_party$ + - builtin$ + - examples$ +formatters: + enable: + - gofmt + - goimports + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ diff --git a/testdata/go/v4/memcached-operator/Dockerfile b/testdata/go/v4/memcached-operator/Dockerfile index 348b8372cd..cb1b130fd9 100644 --- a/testdata/go/v4/memcached-operator/Dockerfile +++ b/testdata/go/v4/memcached-operator/Dockerfile @@ -1,5 +1,5 @@ # Build the manager binary -FROM docker.io/golang:1.23 AS builder +FROM golang:1.24 AS builder ARG TARGETOS ARG TARGETARCH diff --git a/testdata/go/v4/memcached-operator/Makefile b/testdata/go/v4/memcached-operator/Makefile index 6a6500bb9b..b43a379ad8 100644 --- a/testdata/go/v4/memcached-operator/Makefile +++ b/testdata/go/v4/memcached-operator/Makefile @@ -116,9 +116,30 @@ test: manifests generate fmt vet setup-envtest ## Run tests. # The default setup assumes Kind is pre-installed and builds/loads the Manager Docker image locally. # CertManager is installed by default; skip with: # - CERT_MANAGER_INSTALL_SKIP=true +KIND_CLUSTER ?= memcached-operator-test-e2e + +.PHONY: setup-test-e2e +setup-test-e2e: ## Set up a Kind cluster for e2e tests if it does not exist + @command -v $(KIND) >/dev/null 2>&1 || { \ + echo "Kind is not installed. Please install Kind manually."; \ + exit 1; \ + } + @case "$$($(KIND) get clusters)" in \ + *"$(KIND_CLUSTER)"*) \ + echo "Kind cluster '$(KIND_CLUSTER)' already exists. Skipping creation." ;; \ + *) \ + echo "Creating Kind cluster '$(KIND_CLUSTER)'..."; \ + $(KIND) create cluster --name $(KIND_CLUSTER) ;; \ + esac + .PHONY: test-e2e -test-e2e: manifests generate fmt vet ## Run the e2e tests. Expected an isolated environment using Kind. - go test ./test/e2e/ -v -ginkgo.v +test-e2e: setup-test-e2e manifests generate fmt vet ## Run the e2e tests. Expected an isolated environment using Kind. + KIND_CLUSTER=$(KIND_CLUSTER) go test ./test/e2e/ -v -ginkgo.v + $(MAKE) cleanup-test-e2e + +.PHONY: cleanup-test-e2e +cleanup-test-e2e: ## Tear down the Kind cluster used for e2e tests + @$(KIND) delete cluster --name $(KIND_CLUSTER) .PHONY: lint lint: golangci-lint ## Run golangci-lint linter @@ -216,12 +237,12 @@ GOLANGCI_LINT = $(LOCALBIN)/golangci-lint ## Tool Versions KUSTOMIZE_VERSION ?= v5.6.0 -CONTROLLER_TOOLS_VERSION ?= v0.17.2 +CONTROLLER_TOOLS_VERSION ?= v0.18.0 #ENVTEST_VERSION is the version of controller-runtime release branch to fetch the envtest setup script (i.e. release-0.20) ENVTEST_VERSION ?= $(shell go list -m -f "{{ .Version }}" sigs.k8s.io/controller-runtime | awk -F'[v.]' '{printf "release-%d.%d", $$2, $$3}') #ENVTEST_K8S_VERSION is the version of Kubernetes to use for setting up ENVTEST binaries (i.e. 1.31) ENVTEST_K8S_VERSION ?= $(shell go list -m -f "{{ .Version }}" k8s.io/api | awk -F'[v.]' '{printf "1.%d", $$3}') -GOLANGCI_LINT_VERSION ?= v1.63.4 +GOLANGCI_LINT_VERSION ?= v2.1.0 .PHONY: kustomize kustomize: $(KUSTOMIZE) ## Download kustomize locally if necessary. @@ -249,7 +270,7 @@ $(ENVTEST): $(LOCALBIN) .PHONY: golangci-lint golangci-lint: $(GOLANGCI_LINT) ## Download golangci-lint locally if necessary. $(GOLANGCI_LINT): $(LOCALBIN) - $(call go-install-tool,$(GOLANGCI_LINT),github.com/golangci/golangci-lint/cmd/golangci-lint,$(GOLANGCI_LINT_VERSION)) + $(call go-install-tool,$(GOLANGCI_LINT),github.com/golangci/golangci-lint/v2/cmd/golangci-lint,$(GOLANGCI_LINT_VERSION)) # go-install-tool will 'go install' any package with custom target and name of binary, if it doesn't exist # $1 - target path with name of binary diff --git a/testdata/go/v4/memcached-operator/README.md b/testdata/go/v4/memcached-operator/README.md index 761ce09e92..d246089be5 100644 --- a/testdata/go/v4/memcached-operator/README.md +++ b/testdata/go/v4/memcached-operator/README.md @@ -7,7 +7,7 @@ ## Getting Started ### Prerequisites -- go version v1.23.0+ +- go version v1.24.0+ - docker version 17.03+. - kubectl version v1.11.3+. - Access to a Kubernetes v1.11.3+ cluster. diff --git a/testdata/go/v4/memcached-operator/bundle/manifests/cache.example.com_memcacheds.yaml b/testdata/go/v4/memcached-operator/bundle/manifests/cache.example.com_memcacheds.yaml index 6d7f07e2b0..60620d0cb5 100644 --- a/testdata/go/v4/memcached-operator/bundle/manifests/cache.example.com_memcacheds.yaml +++ b/testdata/go/v4/memcached-operator/bundle/manifests/cache.example.com_memcacheds.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.17.2 + controller-gen.kubebuilder.io/version: v0.18.0 creationTimestamp: null name: memcacheds.cache.example.com spec: diff --git a/testdata/go/v4/memcached-operator/cmd/main.go b/testdata/go/v4/memcached-operator/cmd/main.go index 9be0dc9785..30fc220508 100644 --- a/testdata/go/v4/memcached-operator/cmd/main.go +++ b/testdata/go/v4/memcached-operator/cmd/main.go @@ -39,7 +39,7 @@ import ( cachev1alpha1 "github.com/example/memcached-operator/api/v1alpha1" "github.com/example/memcached-operator/internal/controller" - webhookcachev1alpha1 "github.com/example/memcached-operator/internal/webhook/v1alpha1" + webhookv1alpha1 "github.com/example/memcached-operator/internal/webhook/v1alpha1" // +kubebuilder:scaffold:imports ) @@ -136,7 +136,7 @@ func main() { // Metrics endpoint is enabled in 'config/default/kustomization.yaml'. The Metrics options configure the server. // More info: - // - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.20.4/pkg/metrics/server + // - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.21.0/pkg/metrics/server // - https://book.kubebuilder.io/reference/metrics.html metricsServerOptions := metricsserver.Options{ BindAddress: metricsAddr, @@ -148,7 +148,7 @@ func main() { // FilterProvider is used to protect the metrics endpoint with authn/authz. // These configurations ensure that only authorized users and service accounts // can access the metrics endpoint. The RBAC are configured in 'config/rbac/kustomization.yaml'. More info: - // https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.20.4/pkg/metrics/filters#WithAuthenticationAndAuthorization + // https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.21.0/pkg/metrics/filters#WithAuthenticationAndAuthorization metricsServerOptions.FilterProvider = filters.WithAuthenticationAndAuthorization } @@ -203,7 +203,7 @@ func main() { os.Exit(1) } - if err = (&controller.MemcachedReconciler{ + if err := (&controller.MemcachedReconciler{ Client: mgr.GetClient(), Scheme: mgr.GetScheme(), // Add a Recorder to the reconciler. @@ -215,7 +215,7 @@ func main() { } // nolint:goconst if os.Getenv("ENABLE_WEBHOOKS") != "false" { - if err = webhookcachev1alpha1.SetupMemcachedWebhookWithManager(mgr); err != nil { + if err := webhookv1alpha1.SetupMemcachedWebhookWithManager(mgr); err != nil { setupLog.Error(err, "unable to create webhook", "webhook", "Memcached") os.Exit(1) } diff --git a/testdata/go/v4/memcached-operator/config/crd/bases/cache.example.com_memcacheds.yaml b/testdata/go/v4/memcached-operator/config/crd/bases/cache.example.com_memcacheds.yaml index 76fae09e93..ef3ab264fa 100644 --- a/testdata/go/v4/memcached-operator/config/crd/bases/cache.example.com_memcacheds.yaml +++ b/testdata/go/v4/memcached-operator/config/crd/bases/cache.example.com_memcacheds.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.17.2 + controller-gen.kubebuilder.io/version: v0.18.0 name: memcacheds.cache.example.com spec: group: cache.example.com diff --git a/testdata/go/v4/memcached-operator/config/rbac/kustomization.yaml b/testdata/go/v4/memcached-operator/config/rbac/kustomization.yaml index d8975bd4b4..1117168f18 100644 --- a/testdata/go/v4/memcached-operator/config/rbac/kustomization.yaml +++ b/testdata/go/v4/memcached-operator/config/rbac/kustomization.yaml @@ -20,7 +20,7 @@ resources: - metrics_reader_role.yaml # For each CRD, "Admin", "Editor" and "Viewer" roles are scaffolded by # default, aiding admins in cluster management. Those roles are -# not used by the {{ .ProjectName }} itself. You can comment the following lines +# not used by the memcached-operator itself. You can comment the following lines # if you do not want those helpers be installed with your Project. - memcached_admin_role.yaml - memcached_editor_role.yaml diff --git a/testdata/go/v4/memcached-operator/go.mod b/testdata/go/v4/memcached-operator/go.mod index 015d06ed6c..ec4ac2928f 100644 --- a/testdata/go/v4/memcached-operator/go.mod +++ b/testdata/go/v4/memcached-operator/go.mod @@ -1,28 +1,25 @@ module github.com/example/memcached-operator -go 1.23.0 - -godebug default=go1.23 +go 1.24.0 require ( github.com/onsi/ginkgo/v2 v2.22.0 github.com/onsi/gomega v1.36.1 - k8s.io/api v0.32.1 - k8s.io/apimachinery v0.32.1 - k8s.io/client-go v0.32.1 + k8s.io/api v0.33.0 + k8s.io/apimachinery v0.33.0 + k8s.io/client-go v0.33.0 k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 - sigs.k8s.io/controller-runtime v0.20.4 + sigs.k8s.io/controller-runtime v0.21.0 ) require ( - cel.dev/expr v0.18.0 // indirect + cel.dev/expr v0.19.1 // indirect github.com/antlr4-go/antlr/v4 v4.13.0 // indirect - github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/davecgh/go-spew v1.1.1 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/evanphx/json-patch/v5 v5.9.11 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect @@ -36,15 +33,13 @@ require ( github.com/go-openapi/swag v0.23.0 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/protobuf v1.5.4 // indirect github.com/google/btree v1.1.3 // indirect - github.com/google/cel-go v0.22.0 // indirect - github.com/google/gnostic-models v0.6.8 // indirect - github.com/google/go-cmp v0.6.0 // indirect - github.com/google/gofuzz v1.2.0 // indirect + github.com/google/cel-go v0.23.2 // indirect + github.com/google/gnostic-models v0.6.9 // indirect + github.com/google/go-cmp v0.7.0 // indirect github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db // indirect github.com/google/uuid v1.6.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect @@ -53,48 +48,50 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/pkg/errors v0.9.1 // indirect - github.com/prometheus/client_golang v1.19.1 // indirect + github.com/prometheus/client_golang v1.22.0 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.55.0 // indirect + github.com/prometheus/common v0.62.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/spf13/cobra v1.8.1 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/stoewer/go-strcase v1.3.0 // indirect github.com/x448/float16 v0.8.4 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect - go.opentelemetry.io/otel v1.28.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 // indirect - go.opentelemetry.io/otel/metric v1.28.0 // indirect - go.opentelemetry.io/otel/sdk v1.28.0 // indirect - go.opentelemetry.io/otel/trace v1.28.0 // indirect - go.opentelemetry.io/proto/otlp v1.3.1 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect + go.opentelemetry.io/otel v1.33.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 // indirect + go.opentelemetry.io/otel/metric v1.33.0 // indirect + go.opentelemetry.io/otel/sdk v1.33.0 // indirect + go.opentelemetry.io/otel/trace v1.33.0 // indirect + go.opentelemetry.io/proto/otlp v1.4.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect - golang.org/x/net v0.30.0 // indirect - golang.org/x/oauth2 v0.23.0 // indirect - golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.26.0 // indirect - golang.org/x/term v0.25.0 // indirect - golang.org/x/text v0.19.0 // indirect - golang.org/x/time v0.7.0 // indirect + golang.org/x/net v0.38.0 // indirect + golang.org/x/oauth2 v0.27.0 // indirect + golang.org/x/sync v0.12.0 // indirect + golang.org/x/sys v0.31.0 // indirect + golang.org/x/term v0.30.0 // indirect + golang.org/x/text v0.23.0 // indirect + golang.org/x/time v0.9.0 // indirect golang.org/x/tools v0.26.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 // indirect - google.golang.org/grpc v1.65.0 // indirect - google.golang.org/protobuf v1.35.1 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 // indirect + google.golang.org/grpc v1.68.1 // indirect + google.golang.org/protobuf v1.36.5 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiextensions-apiserver v0.32.1 // indirect - k8s.io/apiserver v0.32.1 // indirect - k8s.io/component-base v0.32.1 // indirect + k8s.io/apiextensions-apiserver v0.33.0 // indirect + k8s.io/apiserver v0.33.0 // indirect + k8s.io/component-base v0.33.0 // indirect k8s.io/klog/v2 v2.130.1 // indirect - k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect - sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 // indirect + k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect + sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 // indirect sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect + sigs.k8s.io/randfill v1.0.0 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect sigs.k8s.io/yaml v1.4.0 // indirect ) diff --git a/testdata/go/v4/memcached-operator/go.sum b/testdata/go/v4/memcached-operator/go.sum index 3719e6c35b..14ef049328 100644 --- a/testdata/go/v4/memcached-operator/go.sum +++ b/testdata/go/v4/memcached-operator/go.sum @@ -1,9 +1,7 @@ -cel.dev/expr v0.18.0 h1:CJ6drgk+Hf96lkLikr4rFf19WrU0BOWEihyZnI2TAzo= -cel.dev/expr v0.18.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= +cel.dev/expr v0.19.1 h1:NciYrtDRIR0lNCnH1LFJegdjspNx9fI59O7TWcua/W4= +cel.dev/expr v0.19.1/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI= github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= @@ -15,9 +13,8 @@ github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XL github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/evanphx/json-patch v0.5.2 h1:xVCHIVMUu1wtM/VkR9jVZ45N3FhZfYMMYGorLCR8P3k= @@ -53,13 +50,13 @@ github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= -github.com/google/cel-go v0.22.0 h1:b3FJZxpiv1vTMo2/5RDUqAHPxkT8mmMfJIrq1llbf7g= -github.com/google/cel-go v0.22.0/go.mod h1:BuznPXXfQDpXKWQ9sPW3TzlAJN5zzFe+i9tIs0yC4s8= -github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= -github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= +github.com/google/cel-go v0.23.2 h1:UdEe3CvQh3Nv+E/j9r1Y//WO0K0cSyD7/y0bzyLIMI4= +github.com/google/cel-go v0.23.2/go.mod h1:52Pb6QsDbC5kvgxvZhiL9QX1oZEkcUF/ZqaPx1J5Wwo= +github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw= +github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -67,8 +64,8 @@ github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgY github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 h1:TmHmbvxPmaegwhDubVz0lICL0J5Ka2vwTzhoePEXsGE= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0/go.mod h1:qztMSjm835F2bXf+5HKAPIS5qsmQDqZna/PgVt4rWtI= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= @@ -77,6 +74,8 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= @@ -84,6 +83,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -99,19 +100,18 @@ github.com/onsi/gomega v1.36.1 h1:bJDPBO7ibjxcbHMgSCoo4Yj18UWbKDlLwX1x9sybDcw= github.com/onsi/gomega v1.36.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= +github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= -github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= @@ -122,32 +122,36 @@ github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8w github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= -go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= -go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 h1:qFffATk0X+HD+f1Z8lswGiOQYKHRlzfmdJm0wEaVrFA= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ= -go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= -go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= -go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= -go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= -go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= -go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= -go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= -go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q= +go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw= +go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 h1:Vh5HayB/0HHfOQA7Ctx69E/Y/DcQSMPpKANYVMQ7fBA= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0/go.mod h1:cpgtDBaqD/6ok/UG0jT15/uKjAY8mRA53diogHBg3UI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 h1:5pojmb1U1AogINhN3SurB+zm/nIcusopeBNp42f45QM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0/go.mod h1:57gTHJSE5S1tqg+EKsLPlTWhpHMsWlVmer+LA926XiA= +go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ= +go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M= +go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM= +go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM= +go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s= +go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck= +go.opentelemetry.io/proto/otlp v1.4.0 h1:TA9WRvW6zMwP+Ssb6fLoUIuirti1gGbP28GcKG1jgeg= +go.opentelemetry.io/proto/otlp v1.4.0/go.mod h1:PPBWZIP98o2ElSqI35IHfu7hIhSwvc5N38Jw8pXuGFY= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= @@ -165,28 +169,28 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= -golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= -golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= -golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= +golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= +golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M= +golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= +golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= -golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= -golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= +golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= +golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= +golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= -golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= -golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= +golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= +golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= +golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -199,14 +203,14 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= -google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 h1:YcyjlL1PRr2Q17/I0dPk2JmYS5CDXfcdb2Z3YRioEbw= -google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 h1:2035KHhUv+EpyB+hWgJnaWKJOdX1E95w2S8Rr4uWKTs= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= -google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= -google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= -google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 h1:CkkIfIt50+lT6NHAVoRYEyAvQGFM7xEwXUUywFvEb3Q= +google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576/go.mod h1:1R3kvZ1dtP3+4p4d3G8uJ8rFk/fWlScl38vanWACI08= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 h1:8ZmaLZE4XWrtU3MyClkYqqtl6Oegr3235h7jxsDyqCY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU= +google.golang.org/grpc v1.68.1 h1:oI5oTa11+ng8r8XMMN7jAOmWfPZWbYpCFaMUTACxkM0= +google.golang.org/grpc v1.68.1/go.mod h1:+q1XYFJjShcqn0QZHvCyeR4CXPA+llXIeUIfIe00waw= +google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= +google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= @@ -217,31 +221,34 @@ gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.32.1 h1:f562zw9cy+GvXzXf0CKlVQ7yHJVYzLfL6JAS4kOAaOc= -k8s.io/api v0.32.1/go.mod h1:/Yi/BqkuueW1BgpoePYBRdDYfjPF5sgTr5+YqDZra5k= -k8s.io/apiextensions-apiserver v0.32.1 h1:hjkALhRUeCariC8DiVmb5jj0VjIc1N0DREP32+6UXZw= -k8s.io/apiextensions-apiserver v0.32.1/go.mod h1:sxWIGuGiYov7Io1fAS2X06NjMIk5CbRHc2StSmbaQto= -k8s.io/apimachinery v0.32.1 h1:683ENpaCBjma4CYqsmZyhEzrGz6cjn1MY/X2jB2hkZs= -k8s.io/apimachinery v0.32.1/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= -k8s.io/apiserver v0.32.1 h1:oo0OozRos66WFq87Zc5tclUX2r0mymoVHRq8JmR7Aak= -k8s.io/apiserver v0.32.1/go.mod h1:UcB9tWjBY7aryeI5zAgzVJB/6k7E97bkr1RgqDz0jPw= -k8s.io/client-go v0.32.1 h1:otM0AxdhdBIaQh7l1Q0jQpmo7WOFIk5FFa4bg6YMdUU= -k8s.io/client-go v0.32.1/go.mod h1:aTTKZY7MdxUaJ/KiUs8D+GssR9zJZi77ZqtzcGXIiDg= -k8s.io/component-base v0.32.1 h1:/5IfJ0dHIKBWysGV0yKTFfacZ5yNV1sulPh3ilJjRZk= -k8s.io/component-base v0.32.1/go.mod h1:j1iMMHi/sqAHeG5z+O9BFNCF698a1u0186zkjMZQ28w= +k8s.io/api v0.33.0 h1:yTgZVn1XEe6opVpP1FylmNrIFWuDqe2H0V8CT5gxfIU= +k8s.io/api v0.33.0/go.mod h1:CTO61ECK/KU7haa3qq8sarQ0biLq2ju405IZAd9zsiM= +k8s.io/apiextensions-apiserver v0.33.0 h1:d2qpYL7Mngbsc1taA4IjJPRJ9ilnsXIrndH+r9IimOs= +k8s.io/apiextensions-apiserver v0.33.0/go.mod h1:VeJ8u9dEEN+tbETo+lFkwaaZPg6uFKLGj5vyNEwwSzc= +k8s.io/apimachinery v0.33.0 h1:1a6kHrJxb2hs4t8EE5wuR/WxKDwGN1FKH3JvDtA0CIQ= +k8s.io/apimachinery v0.33.0/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM= +k8s.io/apiserver v0.33.0 h1:QqcM6c+qEEjkOODHppFXRiw/cE2zP85704YrQ9YaBbc= +k8s.io/apiserver v0.33.0/go.mod h1:EixYOit0YTxt8zrO2kBU7ixAtxFce9gKGq367nFmqI8= +k8s.io/client-go v0.33.0 h1:UASR0sAYVUzs2kYuKn/ZakZlcs2bEHaizrrHUZg0G98= +k8s.io/client-go v0.33.0/go.mod h1:kGkd+l/gNGg8GYWAPr0xF1rRKvVWvzh9vmZAMXtaKOg= +k8s.io/component-base v0.33.0 h1:Ot4PyJI+0JAD9covDhwLp9UNkUja209OzsJ4FzScBNk= +k8s.io/component-base v0.33.0/go.mod h1:aXYZLbw3kihdkOPMDhWbjGCO6sg+luw554KP51t8qCU= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y= -k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4= +k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4= +k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8= k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro= k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 h1:CPT0ExVicCzcpeN4baWEV2ko2Z/AsiZgEdwgcfwLgMo= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= -sigs.k8s.io/controller-runtime v0.20.4 h1:X3c+Odnxz+iPTRobG4tp092+CvBU9UK0t/bRf+n0DGU= -sigs.k8s.io/controller-runtime v0.20.4/go.mod h1:xg2XB0K5ShQzAgsoujxuKN4LNXR2LfwwHsPj7Iaw+XY= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 h1:jpcvIRr3GLoUoEKRkHKSmGjxb6lWwrBlJsXc+eUYQHM= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= +sigs.k8s.io/controller-runtime v0.21.0 h1:CYfjpEuicjUecRk+KAeyYh+ouUBn4llGyDYytIGcJS8= +sigs.k8s.io/controller-runtime v0.21.0/go.mod h1:OSg14+F65eWqIu4DceX7k/+QRAbTTvxeQSNSOQpukWM= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= -sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA= -sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= +sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= +sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc= +sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/testdata/go/v4/memcached-operator/internal/controller/memcached_controller.go b/testdata/go/v4/memcached-operator/internal/controller/memcached_controller.go index 70e803b9e3..7f215361ae 100644 --- a/testdata/go/v4/memcached-operator/internal/controller/memcached_controller.go +++ b/testdata/go/v4/memcached-operator/internal/controller/memcached_controller.go @@ -78,7 +78,7 @@ type MemcachedReconciler struct { // For further info: // - About Operator Pattern: https://kubernetes.io/docs/concepts/extend-kubernetes/operator/ // - About Controllers: https://kubernetes.io/docs/concepts/architecture/controller/ -// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.20.4/pkg/reconcile +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.21.0/pkg/reconcile func (r *MemcachedReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { log := logf.FromContext(ctx) @@ -435,7 +435,7 @@ func imageForMemcached() (string, error) { var imageEnvVar = "MEMCACHED_IMAGE" image, found := os.LookupEnv(imageEnvVar) if !found { - return "", fmt.Errorf("Unable to find %s environment variable with the image", imageEnvVar) + return "", fmt.Errorf("unable to find %s environment variable with the image", imageEnvVar) } return image, nil } diff --git a/testdata/go/v4/memcached-operator/internal/controller/memcached_controller_test.go b/testdata/go/v4/memcached-operator/internal/controller/memcached_controller_test.go index aeee5531c0..b09f48e90f 100644 --- a/testdata/go/v4/memcached-operator/internal/controller/memcached_controller_test.go +++ b/testdata/go/v4/memcached-operator/internal/controller/memcached_controller_test.go @@ -139,7 +139,7 @@ var _ = Describe("Memcached controller", func() { By("Checking the latest Status Condition added to the Memcached instance") Expect(k8sClient.Get(ctx, typeNamespacedName, memcached)).To(Succeed()) - conditions := []metav1.Condition{} + var conditions []metav1.Condition Expect(memcached.Status.Conditions).To(ContainElement( HaveField("Type", Equal(typeAvailableMemcached)), &conditions)) Expect(conditions).To(HaveLen(1), "Multiple conditions of type %s", typeAvailableMemcached) diff --git a/testdata/go/v4/memcached-operator/internal/webhook/v1alpha1/memcached_webhook.go b/testdata/go/v4/memcached-operator/internal/webhook/v1alpha1/memcached_webhook.go index 5ccfb67e35..6a02eebc8a 100644 --- a/testdata/go/v4/memcached-operator/internal/webhook/v1alpha1/memcached_webhook.go +++ b/testdata/go/v4/memcached-operator/internal/webhook/v1alpha1/memcached_webhook.go @@ -55,7 +55,7 @@ type MemcachedCustomDefaulter struct { var _ webhook.CustomDefaulter = &MemcachedCustomDefaulter{} // Default implements webhook.CustomDefaulter so a webhook will be registered for the Kind Memcached. -func (d *MemcachedCustomDefaulter) Default(ctx context.Context, obj runtime.Object) error { +func (d *MemcachedCustomDefaulter) Default(_ context.Context, obj runtime.Object) error { memcached, ok := obj.(*cachev1alpha1.Memcached) if !ok { diff --git a/testdata/go/v4/monitoring/memcached-operator/.devcontainer/devcontainer.json b/testdata/go/v4/monitoring/memcached-operator/.devcontainer/devcontainer.json index 0e0eed213f..a3ab7541cb 100644 --- a/testdata/go/v4/monitoring/memcached-operator/.devcontainer/devcontainer.json +++ b/testdata/go/v4/monitoring/memcached-operator/.devcontainer/devcontainer.json @@ -1,6 +1,6 @@ { "name": "Kubebuilder DevContainer", - "image": "docker.io/golang:1.23", + "image": "golang:1.24", "features": { "ghcr.io/devcontainers/features/docker-in-docker:2": {}, "ghcr.io/devcontainers/features/git:1": {} diff --git a/testdata/go/v4/monitoring/memcached-operator/.github/workflows/lint.yml b/testdata/go/v4/monitoring/memcached-operator/.github/workflows/lint.yml index 4951e3316c..86e3845a6c 100644 --- a/testdata/go/v4/monitoring/memcached-operator/.github/workflows/lint.yml +++ b/testdata/go/v4/monitoring/memcached-operator/.github/workflows/lint.yml @@ -18,6 +18,6 @@ jobs: go-version-file: go.mod - name: Run linter - uses: golangci/golangci-lint-action@v6 + uses: golangci/golangci-lint-action@v8 with: - version: v1.63.4 + version: v2.1.0 diff --git a/testdata/go/v4/monitoring/memcached-operator/.github/workflows/test-e2e.yml b/testdata/go/v4/monitoring/memcached-operator/.github/workflows/test-e2e.yml index b2eda8c3db..68fd1ed556 100644 --- a/testdata/go/v4/monitoring/memcached-operator/.github/workflows/test-e2e.yml +++ b/testdata/go/v4/monitoring/memcached-operator/.github/workflows/test-e2e.yml @@ -26,9 +26,6 @@ jobs: - name: Verify kind installation run: kind version - - name: Create kind cluster - run: kind create cluster - - name: Running Test e2e run: | go mod tidy diff --git a/testdata/go/v4/monitoring/memcached-operator/.golangci.yml b/testdata/go/v4/monitoring/memcached-operator/.golangci.yml index 6b29746238..e5b21b0f11 100644 --- a/testdata/go/v4/monitoring/memcached-operator/.golangci.yml +++ b/testdata/go/v4/monitoring/memcached-operator/.golangci.yml @@ -1,33 +1,15 @@ +version: "2" run: - timeout: 5m allow-parallel-runners: true - -issues: - # don't skip warning about doc comments - # don't exclude the default set of lint - exclude-use-default: false - # restore some of the defaults - # (fill in the rest as needed) - exclude-rules: - - path: "api/*" - linters: - - lll - - path: "internal/*" - linters: - - dupl - - lll linters: - disable-all: true + default: none enable: + - copyloopvar - dupl - errcheck - - copyloopvar - ginkgolinter - goconst - gocyclo - - gofmt - - goimports - - gosimple - govet - ineffassign - lll @@ -36,12 +18,35 @@ linters: - prealloc - revive - staticcheck - - typecheck - unconvert - unparam - unused - -linters-settings: - revive: + settings: + revive: + rules: + - name: comment-spacings + - name: import-shadowing + exclusions: + generated: lax rules: - - name: comment-spacings + - linters: + - lll + path: api/* + - linters: + - dupl + - lll + path: internal/* + paths: + - third_party$ + - builtin$ + - examples$ +formatters: + enable: + - gofmt + - goimports + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ diff --git a/testdata/go/v4/monitoring/memcached-operator/Dockerfile b/testdata/go/v4/monitoring/memcached-operator/Dockerfile index a94ad5ba8d..c3bdee59e2 100644 --- a/testdata/go/v4/monitoring/memcached-operator/Dockerfile +++ b/testdata/go/v4/monitoring/memcached-operator/Dockerfile @@ -1,5 +1,5 @@ # Build the manager binary -FROM docker.io/golang:1.23 AS builder +FROM golang:1.24 AS builder ARG TARGETOS ARG TARGETARCH diff --git a/testdata/go/v4/monitoring/memcached-operator/Makefile b/testdata/go/v4/monitoring/memcached-operator/Makefile index 9545fb9861..346da1ce6b 100644 --- a/testdata/go/v4/monitoring/memcached-operator/Makefile +++ b/testdata/go/v4/monitoring/memcached-operator/Makefile @@ -116,9 +116,30 @@ test: manifests generate fmt vet setup-envtest ## Run tests. # The default setup assumes Kind is pre-installed and builds/loads the Manager Docker image locally. # CertManager is installed by default; skip with: # - CERT_MANAGER_INSTALL_SKIP=true +KIND_CLUSTER ?= memcached-operator-test-e2e + +.PHONY: setup-test-e2e +setup-test-e2e: ## Set up a Kind cluster for e2e tests if it does not exist + @command -v $(KIND) >/dev/null 2>&1 || { \ + echo "Kind is not installed. Please install Kind manually."; \ + exit 1; \ + } + @case "$$($(KIND) get clusters)" in \ + *"$(KIND_CLUSTER)"*) \ + echo "Kind cluster '$(KIND_CLUSTER)' already exists. Skipping creation." ;; \ + *) \ + echo "Creating Kind cluster '$(KIND_CLUSTER)'..."; \ + $(KIND) create cluster --name $(KIND_CLUSTER) ;; \ + esac + .PHONY: test-e2e -test-e2e: manifests generate fmt vet ## Run the e2e tests. Expected an isolated environment using Kind. - go test ./test/e2e/ -v -ginkgo.v +test-e2e: setup-test-e2e manifests generate fmt vet ## Run the e2e tests. Expected an isolated environment using Kind. + KIND_CLUSTER=$(KIND_CLUSTER) go test ./test/e2e/ -v -ginkgo.v + $(MAKE) cleanup-test-e2e + +.PHONY: cleanup-test-e2e +cleanup-test-e2e: ## Tear down the Kind cluster used for e2e tests + @$(KIND) delete cluster --name $(KIND_CLUSTER) .PHONY: lint lint: golangci-lint ## Run golangci-lint linter @@ -231,12 +252,12 @@ GOLANGCI_LINT = $(LOCALBIN)/golangci-lint ## Tool Versions KUSTOMIZE_VERSION ?= v5.6.0 -CONTROLLER_TOOLS_VERSION ?= v0.17.2 +CONTROLLER_TOOLS_VERSION ?= v0.18.0 #ENVTEST_VERSION is the version of controller-runtime release branch to fetch the envtest setup script (i.e. release-0.20) ENVTEST_VERSION ?= $(shell go list -m -f "{{ .Version }}" sigs.k8s.io/controller-runtime | awk -F'[v.]' '{printf "release-%d.%d", $$2, $$3}') #ENVTEST_K8S_VERSION is the version of Kubernetes to use for setting up ENVTEST binaries (i.e. 1.31) ENVTEST_K8S_VERSION ?= $(shell go list -m -f "{{ .Version }}" k8s.io/api | awk -F'[v.]' '{printf "1.%d", $$3}') -GOLANGCI_LINT_VERSION ?= v1.63.4 +GOLANGCI_LINT_VERSION ?= v2.1.0 .PHONY: kustomize kustomize: $(KUSTOMIZE) ## Download kustomize locally if necessary. @@ -264,7 +285,7 @@ $(ENVTEST): $(LOCALBIN) .PHONY: golangci-lint golangci-lint: $(GOLANGCI_LINT) ## Download golangci-lint locally if necessary. $(GOLANGCI_LINT): $(LOCALBIN) - $(call go-install-tool,$(GOLANGCI_LINT),github.com/golangci/golangci-lint/cmd/golangci-lint,$(GOLANGCI_LINT_VERSION)) + $(call go-install-tool,$(GOLANGCI_LINT),github.com/golangci/golangci-lint/v2/cmd/golangci-lint,$(GOLANGCI_LINT_VERSION)) # go-install-tool will 'go install' any package with custom target and name of binary, if it doesn't exist # $1 - target path with name of binary diff --git a/testdata/go/v4/monitoring/memcached-operator/README.md b/testdata/go/v4/monitoring/memcached-operator/README.md index 761ce09e92..d246089be5 100644 --- a/testdata/go/v4/monitoring/memcached-operator/README.md +++ b/testdata/go/v4/monitoring/memcached-operator/README.md @@ -7,7 +7,7 @@ ## Getting Started ### Prerequisites -- go version v1.23.0+ +- go version v1.24.0+ - docker version 17.03+. - kubectl version v1.11.3+. - Access to a Kubernetes v1.11.3+ cluster. diff --git a/testdata/go/v4/monitoring/memcached-operator/bundle/manifests/cache.example.com_memcacheds.yaml b/testdata/go/v4/monitoring/memcached-operator/bundle/manifests/cache.example.com_memcacheds.yaml index 6d7f07e2b0..60620d0cb5 100644 --- a/testdata/go/v4/monitoring/memcached-operator/bundle/manifests/cache.example.com_memcacheds.yaml +++ b/testdata/go/v4/monitoring/memcached-operator/bundle/manifests/cache.example.com_memcacheds.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.17.2 + controller-gen.kubebuilder.io/version: v0.18.0 creationTimestamp: null name: memcacheds.cache.example.com spec: diff --git a/testdata/go/v4/monitoring/memcached-operator/cmd/main.go b/testdata/go/v4/monitoring/memcached-operator/cmd/main.go index febba26a6e..7af196e2e7 100644 --- a/testdata/go/v4/monitoring/memcached-operator/cmd/main.go +++ b/testdata/go/v4/monitoring/memcached-operator/cmd/main.go @@ -44,7 +44,7 @@ import ( "github.com/example/memcached-operator/internal/controller" "github.com/example/memcached-operator/monitoring" - webhookcachev1alpha1 "github.com/example/memcached-operator/internal/webhook/v1alpha1" + webhookv1alpha1 "github.com/example/memcached-operator/internal/webhook/v1alpha1" // +kubebuilder:scaffold:imports ) @@ -145,7 +145,7 @@ func main() { // Metrics endpoint is enabled in 'config/default/kustomization.yaml'. The Metrics options configure the server. // More info: - // - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.20.4/pkg/metrics/server + // - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.21.0/pkg/metrics/server // - https://book.kubebuilder.io/reference/metrics.html metricsServerOptions := metricsserver.Options{ BindAddress: metricsAddr, @@ -157,7 +157,7 @@ func main() { // FilterProvider is used to protect the metrics endpoint with authn/authz. // These configurations ensure that only authorized users and service accounts // can access the metrics endpoint. The RBAC are configured in 'config/rbac/kustomization.yaml'. More info: - // https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.20.4/pkg/metrics/filters#WithAuthenticationAndAuthorization + // https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.21.0/pkg/metrics/filters#WithAuthenticationAndAuthorization metricsServerOptions.FilterProvider = filters.WithAuthenticationAndAuthorization } @@ -212,7 +212,7 @@ func main() { os.Exit(1) } - if err = (&controller.MemcachedReconciler{ + if err := (&controller.MemcachedReconciler{ Client: mgr.GetClient(), Scheme: mgr.GetScheme(), // Add a Recorder to the reconciler. @@ -224,7 +224,7 @@ func main() { } // nolint:goconst if os.Getenv("ENABLE_WEBHOOKS") != "false" { - if err = webhookcachev1alpha1.SetupMemcachedWebhookWithManager(mgr); err != nil { + if err := webhookv1alpha1.SetupMemcachedWebhookWithManager(mgr); err != nil { setupLog.Error(err, "unable to create webhook", "webhook", "Memcached") os.Exit(1) } diff --git a/testdata/go/v4/monitoring/memcached-operator/config/crd/bases/cache.example.com_memcacheds.yaml b/testdata/go/v4/monitoring/memcached-operator/config/crd/bases/cache.example.com_memcacheds.yaml index 76fae09e93..ef3ab264fa 100644 --- a/testdata/go/v4/monitoring/memcached-operator/config/crd/bases/cache.example.com_memcacheds.yaml +++ b/testdata/go/v4/monitoring/memcached-operator/config/crd/bases/cache.example.com_memcacheds.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.17.2 + controller-gen.kubebuilder.io/version: v0.18.0 name: memcacheds.cache.example.com spec: group: cache.example.com diff --git a/testdata/go/v4/monitoring/memcached-operator/config/rbac/kustomization.yaml b/testdata/go/v4/monitoring/memcached-operator/config/rbac/kustomization.yaml index d566f5b077..05ec94bb57 100644 --- a/testdata/go/v4/monitoring/memcached-operator/config/rbac/kustomization.yaml +++ b/testdata/go/v4/monitoring/memcached-operator/config/rbac/kustomization.yaml @@ -22,7 +22,7 @@ resources: - metrics_reader_role.yaml # For each CRD, "Admin", "Editor" and "Viewer" roles are scaffolded by # default, aiding admins in cluster management. Those roles are -# not used by the {{ .ProjectName }} itself. You can comment the following lines +# not used by the memcached-operator itself. You can comment the following lines # if you do not want those helpers be installed with your Project. - memcached_admin_role.yaml - memcached_editor_role.yaml diff --git a/testdata/go/v4/monitoring/memcached-operator/go.mod b/testdata/go/v4/monitoring/memcached-operator/go.mod index 02745f9d72..1a5981fd03 100644 --- a/testdata/go/v4/monitoring/memcached-operator/go.mod +++ b/testdata/go/v4/monitoring/memcached-operator/go.mod @@ -1,30 +1,27 @@ module github.com/example/memcached-operator -go 1.23.0 - -godebug default=go1.23 +go 1.24.0 require ( github.com/onsi/ginkgo/v2 v2.22.0 github.com/onsi/gomega v1.36.1 github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.59.0 - github.com/prometheus/client_golang v1.19.1 - k8s.io/api v0.32.1 - k8s.io/apimachinery v0.32.1 - k8s.io/client-go v0.32.1 + github.com/prometheus/client_golang v1.22.0 + k8s.io/api v0.33.0 + k8s.io/apimachinery v0.33.0 + k8s.io/client-go v0.33.0 k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 - sigs.k8s.io/controller-runtime v0.20.4 + sigs.k8s.io/controller-runtime v0.21.0 ) require ( - cel.dev/expr v0.18.0 // indirect + cel.dev/expr v0.19.1 // indirect github.com/antlr4-go/antlr/v4 v4.13.0 // indirect - github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/davecgh/go-spew v1.1.1 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/evanphx/json-patch v5.6.0+incompatible // indirect github.com/evanphx/json-patch/v5 v5.9.11 // indirect @@ -39,15 +36,13 @@ require ( github.com/go-openapi/swag v0.23.0 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/protobuf v1.5.4 // indirect github.com/google/btree v1.1.3 // indirect - github.com/google/cel-go v0.22.0 // indirect - github.com/google/gnostic-models v0.6.8 // indirect - github.com/google/go-cmp v0.6.0 // indirect - github.com/google/gofuzz v1.2.0 // indirect + github.com/google/cel-go v0.23.2 // indirect + github.com/google/gnostic-models v0.6.9 // indirect + github.com/google/go-cmp v0.7.0 // indirect github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db // indirect github.com/google/uuid v1.6.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect @@ -56,47 +51,50 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.55.0 // indirect + github.com/prometheus/common v0.62.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/spf13/cobra v1.8.1 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/stoewer/go-strcase v1.3.0 // indirect github.com/x448/float16 v0.8.4 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect - go.opentelemetry.io/otel v1.28.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 // indirect - go.opentelemetry.io/otel/metric v1.28.0 // indirect - go.opentelemetry.io/otel/sdk v1.28.0 // indirect - go.opentelemetry.io/otel/trace v1.28.0 // indirect - go.opentelemetry.io/proto/otlp v1.3.1 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect + go.opentelemetry.io/otel v1.33.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 // indirect + go.opentelemetry.io/otel/metric v1.33.0 // indirect + go.opentelemetry.io/otel/sdk v1.33.0 // indirect + go.opentelemetry.io/otel/trace v1.33.0 // indirect + go.opentelemetry.io/proto/otlp v1.4.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect - golang.org/x/net v0.30.0 // indirect - golang.org/x/oauth2 v0.23.0 // indirect - golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.26.0 // indirect - golang.org/x/term v0.25.0 // indirect - golang.org/x/text v0.19.0 // indirect - golang.org/x/time v0.7.0 // indirect + golang.org/x/net v0.38.0 // indirect + golang.org/x/oauth2 v0.27.0 // indirect + golang.org/x/sync v0.12.0 // indirect + golang.org/x/sys v0.31.0 // indirect + golang.org/x/term v0.30.0 // indirect + golang.org/x/text v0.23.0 // indirect + golang.org/x/time v0.9.0 // indirect golang.org/x/tools v0.26.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 // indirect - google.golang.org/grpc v1.65.0 // indirect - google.golang.org/protobuf v1.35.1 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 // indirect + google.golang.org/grpc v1.68.1 // indirect + google.golang.org/protobuf v1.36.5 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiextensions-apiserver v0.32.1 // indirect - k8s.io/apiserver v0.32.1 // indirect - k8s.io/component-base v0.32.1 // indirect + k8s.io/apiextensions-apiserver v0.33.0 // indirect + k8s.io/apiserver v0.33.0 // indirect + k8s.io/component-base v0.33.0 // indirect k8s.io/klog/v2 v2.130.1 // indirect - k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect - sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 // indirect + k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect + sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 // indirect sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect + sigs.k8s.io/randfill v1.0.0 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect sigs.k8s.io/yaml v1.4.0 // indirect ) diff --git a/testdata/go/v4/monitoring/memcached-operator/go.sum b/testdata/go/v4/monitoring/memcached-operator/go.sum index c164356e72..eb631cab20 100644 --- a/testdata/go/v4/monitoring/memcached-operator/go.sum +++ b/testdata/go/v4/monitoring/memcached-operator/go.sum @@ -1,9 +1,7 @@ -cel.dev/expr v0.18.0 h1:CJ6drgk+Hf96lkLikr4rFf19WrU0BOWEihyZnI2TAzo= -cel.dev/expr v0.18.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= +cel.dev/expr v0.19.1 h1:NciYrtDRIR0lNCnH1LFJegdjspNx9fI59O7TWcua/W4= +cel.dev/expr v0.19.1/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI= github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g= -github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d h1:Byv0BzEl3/e6D5CLfI0j/7hiIEtvGVFPCZ7Ei2oq8iQ= -github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= @@ -15,9 +13,8 @@ github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XL github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= @@ -53,13 +50,13 @@ github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= -github.com/google/cel-go v0.22.0 h1:b3FJZxpiv1vTMo2/5RDUqAHPxkT8mmMfJIrq1llbf7g= -github.com/google/cel-go v0.22.0/go.mod h1:BuznPXXfQDpXKWQ9sPW3TzlAJN5zzFe+i9tIs0yC4s8= -github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= -github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= +github.com/google/cel-go v0.23.2 h1:UdEe3CvQh3Nv+E/j9r1Y//WO0K0cSyD7/y0bzyLIMI4= +github.com/google/cel-go v0.23.2/go.mod h1:52Pb6QsDbC5kvgxvZhiL9QX1oZEkcUF/ZqaPx1J5Wwo= +github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw= +github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -67,8 +64,8 @@ github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgY github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 h1:TmHmbvxPmaegwhDubVz0lICL0J5Ka2vwTzhoePEXsGE= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0/go.mod h1:qztMSjm835F2bXf+5HKAPIS5qsmQDqZna/PgVt4rWtI= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= @@ -77,6 +74,8 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= @@ -84,6 +83,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -104,16 +105,16 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.59.0 h1:1aAICc8gsk8Sy/1wzbOwCXbC+DvJBqBoStdkX3zVgm0= github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.59.0/go.mod h1:MNl09GdaKb/vE8QdcCWyICDV7XAbGX6gKKQAS43XW1c= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= +github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= -github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= @@ -124,32 +125,36 @@ github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8w github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= -go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= -go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 h1:qFffATk0X+HD+f1Z8lswGiOQYKHRlzfmdJm0wEaVrFA= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ= -go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= -go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= -go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= -go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= -go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= -go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= -go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= -go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q= +go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw= +go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 h1:Vh5HayB/0HHfOQA7Ctx69E/Y/DcQSMPpKANYVMQ7fBA= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0/go.mod h1:cpgtDBaqD/6ok/UG0jT15/uKjAY8mRA53diogHBg3UI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 h1:5pojmb1U1AogINhN3SurB+zm/nIcusopeBNp42f45QM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0/go.mod h1:57gTHJSE5S1tqg+EKsLPlTWhpHMsWlVmer+LA926XiA= +go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ= +go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M= +go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM= +go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM= +go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s= +go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck= +go.opentelemetry.io/proto/otlp v1.4.0 h1:TA9WRvW6zMwP+Ssb6fLoUIuirti1gGbP28GcKG1jgeg= +go.opentelemetry.io/proto/otlp v1.4.0/go.mod h1:PPBWZIP98o2ElSqI35IHfu7hIhSwvc5N38Jw8pXuGFY= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= @@ -167,28 +172,28 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= -golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= -golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= -golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= +golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= +golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M= +golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= +golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= -golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= -golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= +golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= +golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= +golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= -golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= -golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= +golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= +golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= +golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -201,14 +206,14 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= -google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 h1:YcyjlL1PRr2Q17/I0dPk2JmYS5CDXfcdb2Z3YRioEbw= -google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 h1:2035KHhUv+EpyB+hWgJnaWKJOdX1E95w2S8Rr4uWKTs= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= -google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= -google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= -google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 h1:CkkIfIt50+lT6NHAVoRYEyAvQGFM7xEwXUUywFvEb3Q= +google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576/go.mod h1:1R3kvZ1dtP3+4p4d3G8uJ8rFk/fWlScl38vanWACI08= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 h1:8ZmaLZE4XWrtU3MyClkYqqtl6Oegr3235h7jxsDyqCY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU= +google.golang.org/grpc v1.68.1 h1:oI5oTa11+ng8r8XMMN7jAOmWfPZWbYpCFaMUTACxkM0= +google.golang.org/grpc v1.68.1/go.mod h1:+q1XYFJjShcqn0QZHvCyeR4CXPA+llXIeUIfIe00waw= +google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= +google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= @@ -219,31 +224,34 @@ gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.32.1 h1:f562zw9cy+GvXzXf0CKlVQ7yHJVYzLfL6JAS4kOAaOc= -k8s.io/api v0.32.1/go.mod h1:/Yi/BqkuueW1BgpoePYBRdDYfjPF5sgTr5+YqDZra5k= -k8s.io/apiextensions-apiserver v0.32.1 h1:hjkALhRUeCariC8DiVmb5jj0VjIc1N0DREP32+6UXZw= -k8s.io/apiextensions-apiserver v0.32.1/go.mod h1:sxWIGuGiYov7Io1fAS2X06NjMIk5CbRHc2StSmbaQto= -k8s.io/apimachinery v0.32.1 h1:683ENpaCBjma4CYqsmZyhEzrGz6cjn1MY/X2jB2hkZs= -k8s.io/apimachinery v0.32.1/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= -k8s.io/apiserver v0.32.1 h1:oo0OozRos66WFq87Zc5tclUX2r0mymoVHRq8JmR7Aak= -k8s.io/apiserver v0.32.1/go.mod h1:UcB9tWjBY7aryeI5zAgzVJB/6k7E97bkr1RgqDz0jPw= -k8s.io/client-go v0.32.1 h1:otM0AxdhdBIaQh7l1Q0jQpmo7WOFIk5FFa4bg6YMdUU= -k8s.io/client-go v0.32.1/go.mod h1:aTTKZY7MdxUaJ/KiUs8D+GssR9zJZi77ZqtzcGXIiDg= -k8s.io/component-base v0.32.1 h1:/5IfJ0dHIKBWysGV0yKTFfacZ5yNV1sulPh3ilJjRZk= -k8s.io/component-base v0.32.1/go.mod h1:j1iMMHi/sqAHeG5z+O9BFNCF698a1u0186zkjMZQ28w= +k8s.io/api v0.33.0 h1:yTgZVn1XEe6opVpP1FylmNrIFWuDqe2H0V8CT5gxfIU= +k8s.io/api v0.33.0/go.mod h1:CTO61ECK/KU7haa3qq8sarQ0biLq2ju405IZAd9zsiM= +k8s.io/apiextensions-apiserver v0.33.0 h1:d2qpYL7Mngbsc1taA4IjJPRJ9ilnsXIrndH+r9IimOs= +k8s.io/apiextensions-apiserver v0.33.0/go.mod h1:VeJ8u9dEEN+tbETo+lFkwaaZPg6uFKLGj5vyNEwwSzc= +k8s.io/apimachinery v0.33.0 h1:1a6kHrJxb2hs4t8EE5wuR/WxKDwGN1FKH3JvDtA0CIQ= +k8s.io/apimachinery v0.33.0/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM= +k8s.io/apiserver v0.33.0 h1:QqcM6c+qEEjkOODHppFXRiw/cE2zP85704YrQ9YaBbc= +k8s.io/apiserver v0.33.0/go.mod h1:EixYOit0YTxt8zrO2kBU7ixAtxFce9gKGq367nFmqI8= +k8s.io/client-go v0.33.0 h1:UASR0sAYVUzs2kYuKn/ZakZlcs2bEHaizrrHUZg0G98= +k8s.io/client-go v0.33.0/go.mod h1:kGkd+l/gNGg8GYWAPr0xF1rRKvVWvzh9vmZAMXtaKOg= +k8s.io/component-base v0.33.0 h1:Ot4PyJI+0JAD9covDhwLp9UNkUja209OzsJ4FzScBNk= +k8s.io/component-base v0.33.0/go.mod h1:aXYZLbw3kihdkOPMDhWbjGCO6sg+luw554KP51t8qCU= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y= -k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4= +k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4= +k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8= k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro= k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 h1:CPT0ExVicCzcpeN4baWEV2ko2Z/AsiZgEdwgcfwLgMo= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= -sigs.k8s.io/controller-runtime v0.20.4 h1:X3c+Odnxz+iPTRobG4tp092+CvBU9UK0t/bRf+n0DGU= -sigs.k8s.io/controller-runtime v0.20.4/go.mod h1:xg2XB0K5ShQzAgsoujxuKN4LNXR2LfwwHsPj7Iaw+XY= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 h1:jpcvIRr3GLoUoEKRkHKSmGjxb6lWwrBlJsXc+eUYQHM= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= +sigs.k8s.io/controller-runtime v0.21.0 h1:CYfjpEuicjUecRk+KAeyYh+ouUBn4llGyDYytIGcJS8= +sigs.k8s.io/controller-runtime v0.21.0/go.mod h1:OSg14+F65eWqIu4DceX7k/+QRAbTTvxeQSNSOQpukWM= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= -sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA= -sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= +sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= +sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc= +sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/testdata/go/v4/monitoring/memcached-operator/internal/controller/memcached_controller.go b/testdata/go/v4/monitoring/memcached-operator/internal/controller/memcached_controller.go index 890674db88..bf2ce14683 100644 --- a/testdata/go/v4/monitoring/memcached-operator/internal/controller/memcached_controller.go +++ b/testdata/go/v4/monitoring/memcached-operator/internal/controller/memcached_controller.go @@ -85,7 +85,7 @@ type MemcachedReconciler struct { // For further info: // - About Operator Pattern: https://kubernetes.io/docs/concepts/extend-kubernetes/operator/ // - About Controllers: https://kubernetes.io/docs/concepts/architecture/controller/ -// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.20.4/pkg/reconcile +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.21.0/pkg/reconcile func (r *MemcachedReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { log := logf.FromContext(ctx) @@ -468,7 +468,7 @@ func imageForMemcached() (string, error) { var imageEnvVar = "MEMCACHED_IMAGE" image, found := os.LookupEnv(imageEnvVar) if !found { - return "", fmt.Errorf("Unable to find %s environment variable with the image", imageEnvVar) + return "", fmt.Errorf("unable to find %s environment variable with the image", imageEnvVar) } return image, nil } diff --git a/testdata/go/v4/monitoring/memcached-operator/internal/controller/memcached_controller_test.go b/testdata/go/v4/monitoring/memcached-operator/internal/controller/memcached_controller_test.go index aeee5531c0..b09f48e90f 100644 --- a/testdata/go/v4/monitoring/memcached-operator/internal/controller/memcached_controller_test.go +++ b/testdata/go/v4/monitoring/memcached-operator/internal/controller/memcached_controller_test.go @@ -139,7 +139,7 @@ var _ = Describe("Memcached controller", func() { By("Checking the latest Status Condition added to the Memcached instance") Expect(k8sClient.Get(ctx, typeNamespacedName, memcached)).To(Succeed()) - conditions := []metav1.Condition{} + var conditions []metav1.Condition Expect(memcached.Status.Conditions).To(ContainElement( HaveField("Type", Equal(typeAvailableMemcached)), &conditions)) Expect(conditions).To(HaveLen(1), "Multiple conditions of type %s", typeAvailableMemcached) diff --git a/testdata/go/v4/monitoring/memcached-operator/internal/webhook/v1alpha1/memcached_webhook.go b/testdata/go/v4/monitoring/memcached-operator/internal/webhook/v1alpha1/memcached_webhook.go index 5ccfb67e35..6a02eebc8a 100644 --- a/testdata/go/v4/monitoring/memcached-operator/internal/webhook/v1alpha1/memcached_webhook.go +++ b/testdata/go/v4/monitoring/memcached-operator/internal/webhook/v1alpha1/memcached_webhook.go @@ -55,7 +55,7 @@ type MemcachedCustomDefaulter struct { var _ webhook.CustomDefaulter = &MemcachedCustomDefaulter{} // Default implements webhook.CustomDefaulter so a webhook will be registered for the Kind Memcached. -func (d *MemcachedCustomDefaulter) Default(ctx context.Context, obj runtime.Object) error { +func (d *MemcachedCustomDefaulter) Default(_ context.Context, obj runtime.Object) error { memcached, ok := obj.(*cachev1alpha1.Memcached) if !ok { diff --git a/testdata/helm/memcached-operator/config/rbac/kustomization.yaml b/testdata/helm/memcached-operator/config/rbac/kustomization.yaml index d8975bd4b4..1117168f18 100644 --- a/testdata/helm/memcached-operator/config/rbac/kustomization.yaml +++ b/testdata/helm/memcached-operator/config/rbac/kustomization.yaml @@ -20,7 +20,7 @@ resources: - metrics_reader_role.yaml # For each CRD, "Admin", "Editor" and "Viewer" roles are scaffolded by # default, aiding admins in cluster management. Those roles are -# not used by the {{ .ProjectName }} itself. You can comment the following lines +# not used by the memcached-operator itself. You can comment the following lines # if you do not want those helpers be installed with your Project. - memcached_admin_role.yaml - memcached_editor_role.yaml From 7b18fc9cc28ee88898c274bb56a4c680b5cb8a5d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 7 Jul 2025 21:32:21 +0100 Subject: [PATCH 06/31] Bump github.com/go-viper/mapstructure/v2 from 2.2.1 to 2.3.0 (#6967) Bumps [github.com/go-viper/mapstructure/v2](https://github.com/go-viper/mapstructure) from 2.2.1 to 2.3.0. - [Release notes](https://github.com/go-viper/mapstructure/releases) - [Changelog](https://github.com/go-viper/mapstructure/blob/main/CHANGELOG.md) - [Commits](https://github.com/go-viper/mapstructure/compare/v2.2.1...v2.3.0) --- updated-dependencies: - dependency-name: github.com/go-viper/mapstructure/v2 dependency-version: 2.3.0 dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 211ae4d161..bcda7f6b6b 100644 --- a/go.mod +++ b/go.mod @@ -130,7 +130,7 @@ require ( github.com/go-openapi/swag v0.23.1 // indirect github.com/go-openapi/validate v0.24.0 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect - github.com/go-viper/mapstructure/v2 v2.2.1 // indirect + github.com/go-viper/mapstructure/v2 v2.3.0 // indirect github.com/gobuffalo/envy v1.6.5 // indirect github.com/gobuffalo/flect v1.0.3 // indirect github.com/gobwas/glob v0.2.3 // indirect diff --git a/go.sum b/go.sum index 8584610aac..9c6b531b02 100644 --- a/go.sum +++ b/go.sum @@ -216,8 +216,8 @@ github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1v github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/go-test/deep v1.1.1 h1:0r/53hagsehfO4bzD2Pgr/+RgHqhmf+k1Bpse2cTu1U= github.com/go-test/deep v1.1.1/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= -github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss= -github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/go-viper/mapstructure/v2 v2.3.0 h1:27XbWsHIqhbdR5TIC911OfYvgSaW93HM+dX7970Q7jk= +github.com/go-viper/mapstructure/v2 v2.3.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/gobuffalo/envy v1.6.5 h1:X3is06x7v0nW2xiy2yFbbIjwHz57CD6z6MkvqULTCm8= github.com/gobuffalo/envy v1.6.5/go.mod h1:N+GkhhZ/93bGZc6ZKhJLP6+m+tCNPKwgSpH9kaifseQ= github.com/gobuffalo/flect v1.0.3 h1:xeWBM2nui+qnVvNM4S3foBhCAL2XgPU+a7FdpelbTq4= From 69e6a5d76e9f825bd99aacd2948dcf4e15d8f897 Mon Sep 17 00:00:00 2001 From: Adam Cornett Date: Mon, 7 Jul 2025 13:36:10 -0700 Subject: [PATCH 07/31] update dependencies to support k8s 1.33 (#6966) Signed-off-by: Adam D. Cornett --- Makefile | 7 +- go.mod | 219 +++++++++++++------------- go.sum | 473 ++++++++++++++++++++++++++++--------------------------- 3 files changed, 350 insertions(+), 349 deletions(-) diff --git a/Makefile b/Makefile index 71a00c55a4..0f345b1739 100644 --- a/Makefile +++ b/Makefile @@ -9,7 +9,8 @@ export IMAGE_VERSION = v1.40.0 export SIMPLE_VERSION = $(shell (test "$(shell git describe --tags)" = "$(shell git describe --tags --abbrev=0)" && echo $(shell git describe --tags)) || echo $(shell git describe --tags --abbrev=0)+git) export GIT_VERSION = $(shell git describe --dirty --tags --always) export GIT_COMMIT = $(shell git rev-parse HEAD) -export K8S_VERSION = 1.32.0 +export K8S_VERSION = 1.33.1 +export KIND_VERSION = 0.29.0 # Build settings export TOOLS_DIR = tools/bin @@ -185,12 +186,12 @@ cluster-create:: .PHONY: dev-install dev-install:: - $(SCRIPTS_DIR)/fetch kind 0.24.0 + $(SCRIPTS_DIR)/fetch kind $(KIND_VERSION) $(SCRIPTS_DIR)/fetch kubectl $(K8S_VERSION) # Install kubectl AFTER envtest because envtest includes its own kubectl binary .PHONY: test-e2e-teardown test-e2e-teardown: - $(SCRIPTS_DIR)/fetch kind 0.24.0 + $(SCRIPTS_DIR)/fetch kind $(KIND_VERSION) $(TOOLS_DIR)/kind delete cluster --name $(KIND_CLUSTER) rm -f $(KUBECONFIG) diff --git a/go.mod b/go.mod index bcda7f6b6b..90e0ee9ea1 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.24.3 require ( github.com/blang/semver/v4 v4.0.0 github.com/fatih/structtag v1.2.0 - github.com/go-logr/logr v1.4.2 + github.com/go-logr/logr v1.4.3 github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 github.com/iancoleman/strcase v0.3.0 github.com/kr/text v0.2.0 @@ -13,13 +13,13 @@ require ( github.com/maxbrunsfeld/counterfeiter/v6 v6.11.2 github.com/onsi/ginkgo/v2 v2.23.4 github.com/onsi/gomega v1.37.0 - github.com/operator-framework/ansible-operator-plugins v1.38.1 - github.com/operator-framework/api v0.31.0 - github.com/operator-framework/operator-lib v0.18.0 - github.com/operator-framework/operator-manifest-tools v0.9.0 - github.com/operator-framework/operator-registry v1.55.0 + github.com/operator-framework/ansible-operator-plugins v1.39.0 + github.com/operator-framework/api v0.32.0 + github.com/operator-framework/operator-lib v0.19.0 + github.com/operator-framework/operator-manifest-tools v0.10.0 + github.com/operator-framework/operator-registry v1.56.0 github.com/prometheus/client_golang v1.22.0 - github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 + github.com/sergi/go-diff v1.4.0 github.com/sirupsen/logrus v1.9.3 github.com/spf13/afero v1.14.0 github.com/spf13/cobra v1.9.1 @@ -27,54 +27,50 @@ require ( github.com/spf13/viper v1.20.1 github.com/stretchr/testify v1.10.0 github.com/thoas/go-funk v0.9.3 - golang.org/x/mod v0.24.0 - golang.org/x/text v0.25.0 - golang.org/x/tools v0.33.0 + golang.org/x/mod v0.25.0 + golang.org/x/text v0.26.0 + golang.org/x/tools v0.34.0 gomodules.xyz/jsonpatch/v3 v3.0.1 - helm.sh/helm/v3 v3.17.3 - k8s.io/api v0.32.4 - k8s.io/apiextensions-apiserver v0.32.4 - k8s.io/apimachinery v0.32.4 - k8s.io/cli-runtime v0.32.4 - k8s.io/client-go v0.32.4 - k8s.io/kubectl v0.32.4 - k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e - sigs.k8s.io/controller-runtime v0.20.4 - sigs.k8s.io/controller-tools v0.17.2 + helm.sh/helm/v3 v3.18.3 + k8s.io/api v0.33.2 + k8s.io/apiextensions-apiserver v0.33.2 + k8s.io/apimachinery v0.33.2 + k8s.io/cli-runtime v0.33.2 + k8s.io/client-go v0.33.2 + k8s.io/kubectl v0.33.2 + k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 + sigs.k8s.io/controller-runtime v0.21.0 + sigs.k8s.io/controller-tools v0.18.0 sigs.k8s.io/kubebuilder/v4 v4.6.0 - sigs.k8s.io/yaml v1.4.0 + sigs.k8s.io/yaml v1.5.0 ) -// https://github.com/kubernetes/apiserver/issues/116 -// reevaluate when we bump to k8s v1.33.0 -replace github.com/google/cel-go => github.com/google/cel-go v0.22.1 - require ( - cel.dev/expr v0.23.1 // indirect + cel.dev/expr v0.24.0 // indirect dario.cat/mergo v1.0.1 // indirect - github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect + github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 // indirect github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect github.com/BurntSushi/toml v1.5.0 // indirect github.com/MakeNowJust/heredoc v1.0.0 // indirect github.com/Masterminds/goutils v1.1.1 // indirect - github.com/Masterminds/semver/v3 v3.3.0 // indirect + github.com/Masterminds/semver/v3 v3.3.1 // indirect github.com/Masterminds/sprig/v3 v3.3.0 // indirect github.com/Masterminds/squirrel v1.5.4 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect - github.com/Microsoft/hcsshim v0.12.9 // indirect + github.com/Microsoft/hcsshim v0.13.0 // indirect github.com/VividCortex/ewma v1.2.0 // indirect github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect - github.com/antlr4-go/antlr/v4 v4.13.0 // indirect + github.com/antlr4-go/antlr/v4 v4.13.1 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bshuster-repo/logrus-logstash-hook v1.0.0 // indirect - github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/cenkalti/backoff/v5 v5.0.2 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/chai2010/gettext-go v1.0.2 // indirect github.com/containerd/cgroups/v3 v3.0.5 // indirect github.com/containerd/containerd v1.7.27 // indirect - github.com/containerd/containerd/api v1.8.0 // indirect - github.com/containerd/continuity v0.4.4 // indirect + github.com/containerd/containerd/api v1.9.0 // indirect + github.com/containerd/continuity v0.4.5 // indirect github.com/containerd/errdefs v1.0.0 // indirect github.com/containerd/errdefs/pkg v0.3.0 // indirect github.com/containerd/log v0.1.0 // indirect @@ -82,46 +78,46 @@ require ( github.com/containerd/stargz-snapshotter/estargz v0.16.3 // indirect github.com/containerd/ttrpc v1.2.7 // indirect github.com/containerd/typeurl/v2 v2.2.3 // indirect - github.com/containers/common v0.63.0 // indirect + github.com/containers/common v0.63.1 // indirect github.com/containers/image/v5 v5.35.0 // indirect github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect github.com/containers/ocicrypt v1.2.1 // indirect github.com/containers/storage v1.58.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect - github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.7 // indirect github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 // indirect github.com/cyphar/filepath-securejoin v0.4.1 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect - github.com/distribution/distribution/v3 v3.0.0-rc.3 // indirect + github.com/distribution/distribution/v3 v3.0.0 // indirect github.com/distribution/reference v0.6.0 // indirect - github.com/docker/cli v28.1.1+incompatible // indirect + github.com/docker/cli v28.3.1+incompatible // indirect github.com/docker/distribution v2.8.3+incompatible // indirect - github.com/docker/docker v28.0.4+incompatible // indirect + github.com/docker/docker v28.2.2+incompatible // indirect github.com/docker/docker-credential-helpers v0.9.3 // indirect github.com/docker/go-connections v0.5.0 // indirect - github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect + github.com/docker/go-events v0.0.0-20250114142523-c867878c5e32 // indirect github.com/docker/go-metrics v0.0.1 // indirect github.com/docker/go-units v0.5.0 // indirect - github.com/emicklei/go-restful/v3 v3.11.2 // indirect - github.com/evanphx/json-patch v5.9.0+incompatible // indirect + github.com/emicklei/go-restful/v3 v3.12.2 // indirect + github.com/evanphx/json-patch v5.9.11+incompatible // indirect github.com/evanphx/json-patch/v5 v5.9.11 // indirect github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect github.com/fatih/color v1.18.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.9.0 // indirect - github.com/fxamacker/cbor/v2 v2.7.0 // indirect + github.com/fxamacker/cbor/v2 v2.8.0 // indirect github.com/go-errors/errors v1.4.2 // indirect github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect - github.com/go-git/go-billy/v5 v5.6.1 // indirect - github.com/go-git/go-git/v5 v5.13.1 // indirect + github.com/go-git/go-billy/v5 v5.6.2 // indirect + github.com/go-git/go-git/v5 v5.16.2 // indirect github.com/go-gorp/gorp/v3 v3.1.0 // indirect - github.com/go-jose/go-jose/v4 v4.0.5 // indirect + github.com/go-jose/go-jose/v4 v4.1.0 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-logr/zapr v1.3.0 // indirect github.com/go-openapi/analysis v0.23.0 // indirect github.com/go-openapi/errors v0.22.1 // indirect - github.com/go-openapi/jsonpointer v0.21.0 // indirect + github.com/go-openapi/jsonpointer v0.21.1 // indirect github.com/go-openapi/jsonreference v0.21.0 // indirect github.com/go-openapi/loads v0.22.0 // indirect github.com/go-openapi/runtime v0.28.0 // indirect @@ -140,25 +136,24 @@ require ( github.com/golang/protobuf v1.5.4 // indirect github.com/google/btree v1.1.3 // indirect github.com/google/cel-go v0.25.0 // indirect - github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/gnostic-models v0.6.9 // indirect github.com/google/go-cmp v0.7.0 // indirect - github.com/google/go-containerregistry v0.20.3 // indirect - github.com/google/gofuzz v1.2.0 // indirect - github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect + github.com/google/go-containerregistry v0.20.6 // indirect + github.com/google/pprof v0.0.0-20250607225305-033d6d78b36a // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/google/uuid v1.6.0 // indirect github.com/gorilla/handlers v1.5.2 // indirect github.com/gorilla/mux v1.8.1 // indirect - github.com/gorilla/websocket v1.5.0 // indirect + github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect github.com/gosuri/uitable v0.0.4 // indirect github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.0 // indirect github.com/h2non/filetype v1.1.3 // indirect github.com/h2non/go-is-svg v0.0.0-20160927212452-35e8c4b0612c // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect - github.com/hashicorp/golang-lru/arc/v2 v2.0.5 // indirect - github.com/hashicorp/golang-lru/v2 v2.0.5 // indirect + github.com/hashicorp/golang-lru/arc/v2 v2.0.7 // indirect + github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/huandu/xstrings v1.5.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect @@ -171,7 +166,7 @@ require ( github.com/klauspost/pgzip v1.2.6 // indirect github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect - github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec // indirect + github.com/letsencrypt/boulder v0.0.0-20250624003606-5ddd5acf990d // indirect github.com/lib/pq v1.10.9 // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect github.com/mailru/easyjson v0.9.0 // indirect @@ -189,7 +184,7 @@ require ( github.com/moby/spdystream v0.5.0 // indirect github.com/moby/sys/capability v0.4.0 // indirect github.com/moby/sys/mountinfo v0.7.2 // indirect - github.com/moby/sys/sequential v0.5.0 // indirect + github.com/moby/sys/sequential v0.6.0 // indirect github.com/moby/sys/user v0.4.0 // indirect github.com/moby/sys/userns v0.1.0 // indirect github.com/moby/term v0.5.2 // indirect @@ -211,94 +206,96 @@ require ( github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/proglottis/gpgme v0.1.4 // indirect github.com/prometheus/client_model v0.6.2 // indirect - github.com/prometheus/common v0.62.0 // indirect - github.com/prometheus/procfs v0.15.1 // indirect - github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5 // indirect - github.com/redis/go-redis/extra/redisotel/v9 v9.0.5 // indirect - github.com/redis/go-redis/v9 v9.7.3 // indirect + github.com/prometheus/common v0.65.0 // indirect + github.com/prometheus/procfs v0.16.1 // indirect + github.com/redis/go-redis/extra/rediscmd/v9 v9.10.0 // indirect + github.com/redis/go-redis/extra/redisotel/v9 v9.10.0 // indirect + github.com/redis/go-redis/v9 v9.10.0 // indirect github.com/rivo/uniseg v0.4.7 // indirect - github.com/rubenv/sql-migrate v1.7.1 // indirect + github.com/rubenv/sql-migrate v1.8.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/sagikazarmark/locafero v0.7.0 // indirect github.com/secure-systems-lab/go-securesystemslib v0.9.0 // indirect github.com/shopspring/decimal v1.4.0 // indirect - github.com/sigstore/fulcio v1.6.6 // indirect - github.com/sigstore/protobuf-specs v0.4.1 // indirect + github.com/sigstore/fulcio v1.7.1 // indirect + github.com/sigstore/protobuf-specs v0.4.3 // indirect github.com/sigstore/rekor v1.3.10 // indirect - github.com/sigstore/sigstore v1.9.3 // indirect - github.com/smallstep/pkcs7 v0.1.1 // indirect + github.com/sigstore/sigstore v1.9.5 // indirect + github.com/smallstep/pkcs7 v0.2.1 // indirect github.com/sourcegraph/conc v0.3.0 // indirect github.com/spf13/cast v1.7.1 // indirect github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 // indirect - github.com/stoewer/go-strcase v1.3.0 // indirect + github.com/stoewer/go-strcase v1.3.1 // indirect github.com/subosito/gotenv v1.6.0 // indirect github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect github.com/ulikunitz/xz v0.5.12 // indirect github.com/vbatts/tar-split v0.12.1 // indirect - github.com/vbauerster/mpb/v8 v8.9.3 // indirect + github.com/vbauerster/mpb/v8 v8.10.2 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xeipuuv/gojsonschema v1.2.0 // indirect github.com/xlab/treeprint v1.2.0 // indirect - go.etcd.io/bbolt v1.4.0 // indirect - go.mongodb.org/mongo-driver v1.14.0 // indirect + go.etcd.io/bbolt v1.4.2 // indirect + go.mongodb.org/mongo-driver v1.17.4 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/contrib/bridges/prometheus v0.57.0 // indirect - go.opentelemetry.io/contrib/exporters/autoexport v0.57.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 // indirect - go.opentelemetry.io/otel v1.34.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.8.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0 // indirect - go.opentelemetry.io/otel/exporters/prometheus v0.54.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.8.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.32.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0 // indirect - go.opentelemetry.io/otel/log v0.8.0 // indirect - go.opentelemetry.io/otel/metric v1.34.0 // indirect - go.opentelemetry.io/otel/sdk v1.34.0 // indirect - go.opentelemetry.io/otel/sdk/log v0.8.0 // indirect - go.opentelemetry.io/otel/sdk/metric v1.34.0 // indirect - go.opentelemetry.io/otel/trace v1.34.0 // indirect - go.opentelemetry.io/proto/otlp v1.4.0 // indirect + go.opentelemetry.io/contrib/bridges/prometheus v0.61.0 // indirect + go.opentelemetry.io/contrib/exporters/autoexport v0.61.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect + go.opentelemetry.io/otel v1.36.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.12.2 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.12.2 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.36.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.36.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.36.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0 // indirect + go.opentelemetry.io/otel/exporters/prometheus v0.58.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.12.2 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.36.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.36.0 // indirect + go.opentelemetry.io/otel/log v0.12.2 // indirect + go.opentelemetry.io/otel/metric v1.36.0 // indirect + go.opentelemetry.io/otel/sdk v1.36.0 // indirect + go.opentelemetry.io/otel/sdk/log v0.12.2 // indirect + go.opentelemetry.io/otel/sdk/metric v1.36.0 // indirect + go.opentelemetry.io/otel/trace v1.36.0 // indirect + go.opentelemetry.io/proto/otlp v1.7.0 // indirect go.uber.org/automaxprocs v1.6.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/crypto v0.38.0 // indirect - golang.org/x/exp v0.0.0-20250103183323-7d7fa50e5329 // indirect - golang.org/x/net v0.40.0 // indirect - golang.org/x/oauth2 v0.29.0 // indirect - golang.org/x/sync v0.14.0 // indirect + go.yaml.in/yaml/v2 v2.4.2 // indirect + go.yaml.in/yaml/v3 v3.0.3 // indirect + golang.org/x/crypto v0.39.0 // indirect + golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b // indirect + golang.org/x/net v0.41.0 // indirect + golang.org/x/oauth2 v0.30.0 // indirect + golang.org/x/sync v0.15.0 // indirect golang.org/x/sys v0.33.0 // indirect golang.org/x/term v0.32.0 // indirect - golang.org/x/time v0.11.0 // indirect + golang.org/x/time v0.12.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect gomodules.xyz/orderedmap v0.1.0 // indirect - google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250422160041-2d3770c4ea7f // indirect - google.golang.org/grpc v1.72.1 // indirect + google.golang.org/genproto v0.0.0-20250603155806-513f23925822 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 // indirect + google.golang.org/grpc v1.73.0 // indirect google.golang.org/protobuf v1.36.6 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiserver v0.32.4 // indirect - k8s.io/component-base v0.32.4 // indirect + k8s.io/apiserver v0.33.2 // indirect + k8s.io/component-base v0.33.2 // indirect k8s.io/klog/v2 v2.130.1 // indirect - k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect - oras.land/oras-go v1.2.5 // indirect + k8s.io/kube-openapi v0.0.0-20250610211856-8b98d1ed966a // indirect oras.land/oras-go/v2 v2.6.0 // indirect - sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 // indirect - sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect - sigs.k8s.io/kustomize/api v0.18.0 // indirect - sigs.k8s.io/kustomize/kyaml v0.18.1 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect + sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.33.0 // indirect + sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect + sigs.k8s.io/kustomize/api v0.19.0 // indirect + sigs.k8s.io/kustomize/kyaml v0.19.0 // indirect + sigs.k8s.io/randfill v1.0.0 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.7.0 // indirect ) diff --git a/go.sum b/go.sum index 9c6b531b02..b433a1df45 100644 --- a/go.sum +++ b/go.sum @@ -1,12 +1,12 @@ -cel.dev/expr v0.23.1 h1:K4KOtPCJQjVggkARsjG9RWXP6O4R73aHeJMa/dmCQQg= -cel.dev/expr v0.23.1/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw= +cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY= +cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s= dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= -github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= -github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 h1:He8afgbRMd7mFxO99hRNu+6tazq8nFF9lIwo9JFroBk= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= @@ -18,24 +18,24 @@ github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= -github.com/Masterminds/semver/v3 v3.3.0 h1:B8LGeaivUe71a5qox1ICM/JLl0NqZSW5CHyL+hmvYS0= -github.com/Masterminds/semver/v3 v3.3.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= +github.com/Masterminds/semver/v3 v3.3.1 h1:QtNSWtVZ3nBfk8mAOu/B6v7FMJ+NHTIgUPi7rj+4nv4= +github.com/Masterminds/semver/v3 v3.3.1/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= github.com/Masterminds/sprig/v3 v3.3.0 h1:mQh0Yrg1XPo6vjYXgtf5OtijNAKJRNcTdOOGZe3tPhs= github.com/Masterminds/sprig/v3 v3.3.0/go.mod h1:Zy1iXRYNqNLUolqCpL4uhk6SHUMAOSCzdgBfDb35Lz0= github.com/Masterminds/squirrel v1.5.4 h1:uUcX/aBc8O7Fg9kaISIUsHXdKuqehiXAMQTYX8afzqM= github.com/Masterminds/squirrel v1.5.4/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= -github.com/Microsoft/hcsshim v0.12.9 h1:2zJy5KA+l0loz1HzEGqyNnjd3fyZA31ZBCGKacp6lLg= -github.com/Microsoft/hcsshim v0.12.9/go.mod h1:fJ0gkFAna6ukt0bLdKB8djt4XIJhF/vEPuoIWYVvZ8Y= +github.com/Microsoft/hcsshim v0.13.0 h1:/BcXOiS6Qi7N9XqUcv27vkIuVOkBEcWstd2pMlWSeaA= +github.com/Microsoft/hcsshim v0.13.0/go.mod h1:9KWJ/8DgU+QzYGupX4tzMhRQE8h6w90lH6HAaclpEok= github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow= github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4= github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8= github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI= -github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g= +github.com/antlr4-go/antlr/v4 v4.13.1 h1:SqQKkuVZ+zWkMMNkjy5FZe5mr5WURWnlpmOuzYWrPrQ= +github.com/antlr4-go/antlr/v4 v4.13.1/go.mod h1:GKmUxMtwp6ZgGwZSva4eWPC5mS6vUAmOABFgjdkM7Nw= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= @@ -48,16 +48,13 @@ github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/bshuster-repo/logrus-logstash-hook v1.0.0 h1:e+C0SB5R1pu//O4MQ3f9cFuPGoOVeF2fE4Og9otCc70= github.com/bshuster-repo/logrus-logstash-hook v1.0.0/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= -github.com/bsm/ginkgo/v2 v2.7.0/go.mod h1:AiKlXPm7ItEHNc/2+OkrNG4E0ITzojb9/xWzvQ9XZ9w= github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= -github.com/bsm/gomega v1.26.0/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= -github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= -github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cenkalti/backoff/v5 v5.0.2 h1:rIfFVxEf1QsI7E1ZHfp/B4DF/6QBAUhmgkxc0H7Zss8= +github.com/cenkalti/backoff/v5 v5.0.2/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chai2010/gettext-go v1.0.2 h1:1Lwwip6Q2QGsAdl/ZKPCwTe9fe0CjlUbqj5bFNSjIRk= @@ -68,10 +65,10 @@ github.com/containerd/cgroups/v3 v3.0.5 h1:44na7Ud+VwyE7LIoJ8JTNQOa549a8543BmzaJ github.com/containerd/cgroups/v3 v3.0.5/go.mod h1:SA5DLYnXO8pTGYiAHXz94qvLQTKfVM5GEVisn4jpins= github.com/containerd/containerd v1.7.27 h1:yFyEyojddO3MIGVER2xJLWoCIn+Up4GaHFquP7hsFII= github.com/containerd/containerd v1.7.27/go.mod h1:xZmPnl75Vc+BLGt4MIfu6bp+fy03gdHAn9bz+FreFR0= -github.com/containerd/containerd/api v1.8.0 h1:hVTNJKR8fMc/2Tiw60ZRijntNMd1U+JVMyTRdsD2bS0= -github.com/containerd/containerd/api v1.8.0/go.mod h1:dFv4lt6S20wTu/hMcP4350RL87qPWLVa/OHOwmmdnYc= -github.com/containerd/continuity v0.4.4 h1:/fNVfTJ7wIl/YPMHjf+5H32uFhl63JucB34PlCpMKII= -github.com/containerd/continuity v0.4.4/go.mod h1:/lNJvtJKUQStBzpVQ1+rasXO1LAWtUQssk28EZvJ3nE= +github.com/containerd/containerd/api v1.9.0 h1:HZ/licowTRazus+wt9fM6r/9BQO7S0vD5lMcWspGIg0= +github.com/containerd/containerd/api v1.9.0/go.mod h1:GhghKFmTR3hNtyznBoQ0EMWr9ju5AqHjcZPsSpTKutI= +github.com/containerd/continuity v0.4.5 h1:ZRoN1sXq9u7V6QoHMcVWGhOwDFqZ4B9i5H6un1Wh0x4= +github.com/containerd/continuity v0.4.5/go.mod h1:/lNJvtJKUQStBzpVQ1+rasXO1LAWtUQssk28EZvJ3nE= github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI= github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE= @@ -86,8 +83,8 @@ github.com/containerd/ttrpc v1.2.7 h1:qIrroQvuOL9HQ1X6KHe2ohc7p+HP/0VE6XPU7elJRq github.com/containerd/ttrpc v1.2.7/go.mod h1:YCXHsb32f+Sq5/72xHubdiJRQY9inL4a4ZQrAbN1q9o= github.com/containerd/typeurl/v2 v2.2.3 h1:yNA/94zxWdvYACdYO8zofhrTVuQY73fFU1y++dYSw40= github.com/containerd/typeurl/v2 v2.2.3/go.mod h1:95ljDnPfD3bAbDJRugOiShd/DlAAsxGtUBhJxIn7SCk= -github.com/containers/common v0.63.0 h1:ox6vgUYX5TSvt4W+bE36sYBVz/aXMAfRGVAgvknSjBg= -github.com/containers/common v0.63.0/go.mod h1:+3GCotSqNdIqM3sPs152VvW7m5+Mg8Kk+PExT3G9hZw= +github.com/containers/common v0.63.1 h1:6g02gbW34PaRVH4Heb2Pk11x0SdbQ+8AfeKKeQGqYBE= +github.com/containers/common v0.63.1/go.mod h1:+3GCotSqNdIqM3sPs152VvW7m5+Mg8Kk+PExT3G9hZw= github.com/containers/image/v5 v5.35.0 h1:T1OeyWp3GjObt47bchwD9cqiaAm/u4O4R9hIWdrdrP8= github.com/containers/image/v5 v5.35.0/go.mod h1:8vTsgb+1gKcBL7cnjyNOInhJQfTUQjJoO2WWkKDoebM= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA= @@ -100,8 +97,9 @@ github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/cpuguy83/go-md2man/v2 v2.0.6 h1:XJtiaUW6dEEqVuZiMTn1ldk455QWwEIsMIJlo5vtkx0= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= +github.com/cpuguy83/go-md2man/v2 v2.0.7 h1:zbFlGlXEAKlwXpmvle3d8Oe3YnkKIK4xSRTd3sHPnBo= +github.com/cpuguy83/go-md2man/v2 v2.0.7/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= @@ -115,37 +113,35 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= -github.com/distribution/distribution/v3 v3.0.0-rc.3 h1:JRJso9IVLoooKX76oWR+DWCCdZlK5m4nRtDWvzB1ITg= -github.com/distribution/distribution/v3 v3.0.0-rc.3/go.mod h1:offoOgrnYs+CFwis8nE0hyzYZqRCZj5EFc5kgfszwiE= +github.com/distribution/distribution/v3 v3.0.0 h1:q4R8wemdRQDClzoNNStftB2ZAfqOiN6UX90KJc4HjyM= +github.com/distribution/distribution/v3 v3.0.0/go.mod h1:tRNuFoZsUdyRVegq8xGNeds4KLjwLCRin/tTo6i1DhU= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/cli v28.1.1+incompatible h1:eyUemzeI45DY7eDPuwUcmDyDj1pM98oD5MdSpiItp8k= -github.com/docker/cli v28.1.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v28.3.1+incompatible h1:ZUdwOLDEBoE3TE5rdC9IXGY5HPHksJK3M+hJEWhh2mc= +github.com/docker/cli v28.3.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v28.0.4+incompatible h1:JNNkBctYKurkw6FrHfKqY0nKIDf5nrbxjVBtS+cdcok= -github.com/docker/docker v28.0.4+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v28.2.2+incompatible h1:CjwRSksz8Yo4+RmQ339Dp/D2tGO5JxwYeqtMOEe0LDw= +github.com/docker/docker v28.2.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.9.3 h1:gAm/VtF9wgqJMoxzT3Gj5p4AqIjCBS4wrsOh9yRqcz8= github.com/docker/docker-credential-helpers v0.9.3/go.mod h1:x+4Gbw9aGmChi3qTLZj8Dfn0TD20M/fuWy0E5+WDeCo= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= -github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8= -github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-events v0.0.0-20250114142523-c867878c5e32 h1:EHZfspsnLAz8Hzccd67D5abwLiqoqym2jz/jOS39mCk= +github.com/docker/go-events v0.0.0-20250114142523-c867878c5e32/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 h1:UhxFibDNY/bfvqU5CAUmr9zpesgbU6SWc8/B4mflAE4= -github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= -github.com/emicklei/go-restful/v3 v3.11.2 h1:1onLa9DcsMYO9P+CXaL0dStDqQ2EHHXLiz+BtnqkLAU= -github.com/emicklei/go-restful/v3 v3.11.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU= +github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lShz4oaXpDTX2bLe7ls= -github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v5.9.11+incompatible h1:ixHHqfcGvxhWkniF1tWxBHA0yb4Z+d1UQi45df52xW8= +github.com/evanphx/json-patch v5.9.11+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU= github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM= github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f h1:Wl78ApPPB2Wvf/TIe2xdyJxTlb6obmF18d8QdkxNDu4= @@ -162,26 +158,26 @@ github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHk github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= -github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= -github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/fxamacker/cbor/v2 v2.8.0 h1:fFtUGXUzXPHTIUdne5+zzMPTfffl3RD5qYnkY40vtxU= +github.com/fxamacker/cbor/v2 v2.8.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= -github.com/go-git/go-billy/v5 v5.6.1 h1:u+dcrgaguSSkbjzHwelEjc0Yj300NUevrrPphk/SoRA= -github.com/go-git/go-billy/v5 v5.6.1/go.mod h1:0AsLr1z2+Uksi4NlElmMblP5rPcDZNRCD8ujZCRR2BE= -github.com/go-git/go-git/v5 v5.13.1 h1:DAQ9APonnlvSWpvolXWIuV6Q6zXy2wHbN4cVlNR5Q+M= -github.com/go-git/go-git/v5 v5.13.1/go.mod h1:qryJB4cSBoq3FRoBRf5A77joojuBcmPJ0qu3XXXVixc= +github.com/go-git/go-billy/v5 v5.6.2 h1:6Q86EsPXMa7c3YZ3aLAQsMA0VlWmy43r6FHqa/UNbRM= +github.com/go-git/go-billy/v5 v5.6.2/go.mod h1:rcFC2rAsp/erv7CMz9GczHcuD0D32fWzH+MJAU+jaUU= +github.com/go-git/go-git/v5 v5.16.2 h1:fT6ZIOjE5iEnkzKyxTHK1W4HGAsPhqEqiSAssSO77hM= +github.com/go-git/go-git/v5 v5.16.2/go.mod h1:4Ge4alE/5gPs30F2H1esi2gPd69R0C39lolkucHBOp8= github.com/go-gorp/gorp/v3 v3.1.0 h1:ItKF/Vbuj31dmV4jxA1qblpSwkl9g1typ24xoe70IGs= github.com/go-gorp/gorp/v3 v3.1.0/go.mod h1:dLEjIyyRNiXvNZ8PSmzpt1GsWAUK8kjVhEpjH8TixEw= -github.com/go-jose/go-jose/v4 v4.0.5 h1:M6T8+mKZl/+fNNuFHvGIzDz7BTLQPIounk/b9dw3AaE= -github.com/go-jose/go-jose/v4 v4.0.5/go.mod h1:s3P1lRrkT8igV8D9OjyL4WRyHvjB6a4JSllnOrmmBOA= +github.com/go-jose/go-jose/v4 v4.1.0 h1:cYSYxd3pw5zd2FSXk2vGdn9igQU2PS8MuxrCOCl0FdY= +github.com/go-jose/go-jose/v4 v4.1.0/go.mod h1:GG/vqmYm3Von2nYiB2vGTXzdoNKE5tix5tuc6iAd+sw= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= -github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= @@ -190,8 +186,8 @@ github.com/go-openapi/analysis v0.23.0 h1:aGday7OWupfMs+LbmLZG4k0MYXIANxcuBTYUC0 github.com/go-openapi/analysis v0.23.0/go.mod h1:9mz9ZWaSlV8TvjQHLl2mUW2PbZtemkE8yA5v22ohupo= github.com/go-openapi/errors v0.22.1 h1:kslMRRnK7NCb/CvR1q1VWuEQCEIsBGn5GgKD9e+HYhU= github.com/go-openapi/errors v0.22.1/go.mod h1:+n/5UdIqdVnLIJ6Q9Se8HNGUXYaY6CN8ImWzfi/Gzp0= -github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= -github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= +github.com/go-openapi/jsonpointer v0.21.1 h1:whnzv/pNXtK2FbX/W9yJfRmE2gsmkfahjMKB0fZvcic= +github.com/go-openapi/jsonpointer v0.21.1/go.mod h1:50I1STOfbY1ycR8jGz8DaMeLCdXiI6aDteEdRNNzpdk= github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= github.com/go-openapi/loads v0.22.0 h1:ECPGd4jX1U6NApCGG1We+uEozOAvXvJSF4nnwHZ8Aco= @@ -251,10 +247,10 @@ github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= -github.com/google/cel-go v0.22.1 h1:AfVXx3chM2qwoSbM7Da8g8hX8OVSkBFwX+rz2+PcK40= -github.com/google/cel-go v0.22.1/go.mod h1:BuznPXXfQDpXKWQ9sPW3TzlAJN5zzFe+i9tIs0yC4s8= -github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= -github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= +github.com/google/cel-go v0.25.0 h1:jsFw9Fhn+3y2kBbltZR4VEz5xKkcIFRPDnuEzAGv5GY= +github.com/google/cel-go v0.25.0/go.mod h1:hjEb6r5SuOSlhCHmFoLzu8HGCERvIsDAbxDAyNU/MmI= +github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw= +github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -265,13 +261,13 @@ github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= -github.com/google/go-containerregistry v0.20.3 h1:oNx7IdTI936V8CQRveCjaxOiegWwvM7kqkbXTpyiovI= -github.com/google/go-containerregistry v0.20.3/go.mod h1:w00pIgBRDVUDFM6bq+Qx8lwNWK+cxgCuX1vd3PIBDNI= +github.com/google/go-containerregistry v0.20.6 h1:cvWX87UxxLgaH76b4hIvya6Dzz9qHB31qAwjAohdSTU= +github.com/google/go-containerregistry v0.20.6/go.mod h1:T0x8MuoAoKX/873bkeSfLD2FAkwCDf9/HZgsFJ02E2Y= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J0b1vyeLSOYI8bm5wbJM/8yDe8= -github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA= +github.com/google/pprof v0.0.0-20250607225305-033d6d78b36a h1://KbezygeMJZCSHH+HgUZiTeSoiuFspbMg1ge+eFj18= +github.com/google/pprof v0.0.0-20250607225305-033d6d78b36a/go.mod h1:5hDyRhoBCxViHszMt12TnOpEI4VVi+U8Gm9iphldiMA= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -281,16 +277,16 @@ github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyE github.com/gorilla/handlers v1.5.2/go.mod h1:dX+xVpaxdSw+q0Qek8SSsl3dfMk3jNddUkMzo0GtH0w= github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= -github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= -github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA= github.com/gosuri/uitable v0.0.4 h1:IG2xLKRvErL3uhY6e1BylFzG+aJiwQviDDTfOKeKTpY= github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.1-0.20210315223345-82c243799c99 h1:JYghRBlGCZyCF2wNUJ8W0cwaQdtpcssJ4CgC406g+WU= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.1-0.20210315223345-82c243799c99/go.mod h1:3bDW6wMZJB7tiONtC/1Xpicra6Wp5GgbTbQWCbI5fkc= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 h1:VNqngBF40hVlDloBruUehVYC3ArSgIyScOAyMRqBxRg= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1/go.mod h1:RBRO7fro65R6tjKzYgLAFo0t1QEXY1Dp+i/bvpRiqiQ= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.0 h1:+epNPbD5EqgpEMm5wrl4Hqts3jZt8+kYaqUisuuIGTk= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.0/go.mod h1:Zanoh4+gvIgluNqcfMVTJueD4wSS5hT7zTt4Mrutd90= github.com/h2non/filetype v1.1.3 h1:FKkx9QbD7HR/zjK1Ia5XiBsq9zdLi5Kf3zGyFTAFkGg= github.com/h2non/filetype v1.1.3/go.mod h1:319b3zT68BvV+WRj7cwy856M2ehB3HqNOt6sy1HndBY= github.com/h2non/go-is-svg v0.0.0-20160927212452-35e8c4b0612c h1:fEE5/5VNnYUoBOj2I9TP8Jc+a7lge3QWn9DKE7NCwfc= @@ -300,10 +296,10 @@ github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/golang-lru/arc/v2 v2.0.5 h1:l2zaLDubNhW4XO3LnliVj0GXO3+/CGNJAg1dcN2Fpfw= -github.com/hashicorp/golang-lru/arc/v2 v2.0.5/go.mod h1:ny6zBSQZi2JxIeYcv7kt2sH2PXJtirBN7RDhRpxPkxU= -github.com/hashicorp/golang-lru/v2 v2.0.5 h1:wW7h1TG88eUIJ2i69gaE3uNVtEPIagzhGvHgwfx2Vm4= -github.com/hashicorp/golang-lru/v2 v2.0.5/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= +github.com/hashicorp/golang-lru/arc/v2 v2.0.7 h1:QxkVTxwColcduO+LP7eJO56r2hFiG8zEbfAAzRv52KQ= +github.com/hashicorp/golang-lru/arc/v2 v2.0.7/go.mod h1:Pe7gBlGdc8clY5LJ0LpJXMt5AmgmWNH1g+oFFVUHOEc= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI= github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/iancoleman/strcase v0.3.0 h1:nTXanmYxhfFAMjZL34Ov6gkzEsSJZ5DbhxWjvSASxEI= @@ -348,8 +344,8 @@ github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 h1:SOEGU9fKiNWd/HOJuq github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 h1:P6pPBnrTSX3DEVR4fDembhRWSsG5rVo6hYhAB/ADZrk= github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw= -github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec h1:2tTW6cDth2TSgRbAhD7yjZzTQmcN25sDRPEeinR51yQ= -github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec/go.mod h1:TmwEoGCwIti7BCeJ9hescZgRtatxRE+A72pCoPfmcfk= +github.com/letsencrypt/boulder v0.0.0-20250624003606-5ddd5acf990d h1:fCRb9hXR4QQJpwc7xnGugnva0DD5ollTGkys0n8aXT4= +github.com/letsencrypt/boulder v0.0.0-20250624003606-5ddd5acf990d/go.mod h1:BVoSL2Ed8oCncct0meeBqoTY7b1Mzx7WqEOZ8EisFmY= github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= @@ -371,8 +367,8 @@ github.com/mattn/go-sqlite3 v1.14.28/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxU github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/maxbrunsfeld/counterfeiter/v6 v6.11.2 h1:yVCLo4+ACVroOEr4iFU1iH46Ldlzz2rTuu18Ra7M8sU= github.com/maxbrunsfeld/counterfeiter/v6 v6.11.2/go.mod h1:VzB2VoMh1Y32/QqDfg9ZJYHj99oM4LiGtqPZydTiQSQ= -github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4= -github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY= +github.com/miekg/dns v1.1.61 h1:nLxbwF3XxhwVSm8g9Dghm9MHPaUZuqhPiGL+675ZmEs= +github.com/miekg/dns v1.1.61/go.mod h1:mnAarhS3nWaW+NVP2wTkYVIZyHNJ098SJZUki3eykwQ= github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU= github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= @@ -393,8 +389,8 @@ github.com/moby/sys/capability v0.4.0 h1:4D4mI6KlNtWMCM1Z/K0i7RV1FkX+DBDHKVJpCnd github.com/moby/sys/capability v0.4.0/go.mod h1:4g9IK291rVkms3LKCDOoYlnV8xKwoDTpIrNEE35Wq0I= github.com/moby/sys/mountinfo v0.7.2 h1:1shs6aH5s4o5H2zQLn796ADW1wMrIwHsyJ2v9KouLrg= github.com/moby/sys/mountinfo v0.7.2/go.mod h1:1YOa8w8Ih7uW0wALDUgT1dTTSBrZ+HiBLGws92L2RU4= -github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= -github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo= +github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU= +github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko= github.com/moby/sys/user v0.4.0 h1:jhcMKit7SA80hivmFJcbB1vqmw//wU61Zdui2eQXuMs= github.com/moby/sys/user v0.4.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs= github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g= @@ -431,16 +427,16 @@ github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJw github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= github.com/opencontainers/runtime-spec v1.2.1 h1:S4k4ryNgEpxW1dzyqffOmhI1BHYcjzU8lpJfSlR0xww= github.com/opencontainers/runtime-spec v1.2.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/operator-framework/ansible-operator-plugins v1.38.1 h1:MAw0AgMx0H2z69n0eq6elLFyq5odUW9mv+X1JAy5PcA= -github.com/operator-framework/ansible-operator-plugins v1.38.1/go.mod h1:a/TZX9EPC8UHI638hN4+Pgr2Un3i0x06BAdF4R/N9Cw= -github.com/operator-framework/api v0.31.0 h1:tRsFTuZ51xD8U5QgiPo3+mZgVipHZVgRXYrI6RRXOh8= -github.com/operator-framework/api v0.31.0/go.mod h1:57oCiHNeWcxmzu1Se8qlnwEKr/GGXnuHvspIYFCcXmY= -github.com/operator-framework/operator-lib v0.18.0 h1:6OaWemt/CuyrjFMkLyk4O8Vj4CPHxt/m1DMuMAmPwXo= -github.com/operator-framework/operator-lib v0.18.0/go.mod h1:EWS6xGYBcMn04wj81j0bluAYbFHl3cJcar++poQMzqE= -github.com/operator-framework/operator-manifest-tools v0.9.0 h1:jZ7MlvRB4/cI+t9mkh1Dpu3SJkoVAhFVGvJUGqEdKDE= -github.com/operator-framework/operator-manifest-tools v0.9.0/go.mod h1:0gYuPPAoamZpjbWtZwp4dlgaw3IR64F4KKSVNdDZapk= -github.com/operator-framework/operator-registry v1.55.0 h1:iXlv53fYyg2VtLqSDEalXD72/5Uzc7Rfx17j35+8plA= -github.com/operator-framework/operator-registry v1.55.0/go.mod h1:8htDRYKWZ6UWjGMXbBdwwHefsJknodOiGLnpjxgAflw= +github.com/operator-framework/ansible-operator-plugins v1.39.0 h1:JLlbdGdnGnF8q8WInq24Upde/jfWwRzIJ4gK4xjRLHc= +github.com/operator-framework/ansible-operator-plugins v1.39.0/go.mod h1:XLMYrKfowmX5leL8V4trkgtxfsXYdLynzyHamOR5xJc= +github.com/operator-framework/api v0.32.0 h1:LZSZr7at3NrjsjwQVNsYD+04o5wMq75jrR0dMYiIIH8= +github.com/operator-framework/api v0.32.0/go.mod h1:OGJo6HUYxoQwpGaLr0lPJzSek51RiXajJSSa8Jzjvp8= +github.com/operator-framework/operator-lib v0.19.0 h1:az6ogYj21rtU0SF9uYctRLyKp2dtlqTsmpfehFy6Ce8= +github.com/operator-framework/operator-lib v0.19.0/go.mod h1:KxycAjFnHt0DBtHmH3Jm7yHcY5sdrshPKTqM/HKAQ08= +github.com/operator-framework/operator-manifest-tools v0.10.0 h1:+vtIElvGQ5e43gCD6fF65a0HNH3AD3LGnukUhpl9kjc= +github.com/operator-framework/operator-manifest-tools v0.10.0/go.mod h1:eB/wnr0BOhMLNXPeceE+0p3vudP16zDNWP60Hvn3KaM= +github.com/operator-framework/operator-registry v1.56.0 h1:vbTyee/gahpnh7qw1hV1osnWy9YpTjIbEuHpwIdoEUs= +github.com/operator-framework/operator-registry v1.56.0/go.mod h1:NOmQyrgOGW0cwUxHG5ZqKxdObOzQNmO4Rxcf7JC32FU= github.com/otiai10/copy v1.14.1 h1:5/7E6qsUMBaH5AnQ0sSLzzTg1oTECmcCmT6lvF45Na8= github.com/otiai10/copy v1.14.1/go.mod h1:oQwrEDDOci3IM8dJF0d8+jnbfPDllW6vUjNc3DoZm9I= github.com/otiai10/mint v1.6.3 h1:87qsV/aw1F5as1eH1zS/yqHY85ANKVMgkDrf9rcxbQs= @@ -475,27 +471,26 @@ github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNw github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= -github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= -github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= +github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE= +github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= -github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= -github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= -github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5 h1:EaDatTxkdHG+U3Bk4EUr+DZ7fOGwTfezUiUJMaIcaho= -github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5/go.mod h1:fyalQWdtzDBECAQFBJuQe5bzQ02jGd5Qcbgb97Flm7U= -github.com/redis/go-redis/extra/redisotel/v9 v9.0.5 h1:EfpWLLCyXw8PSM2/XNJLjI3Pb27yVE+gIAfeqp8LUCc= -github.com/redis/go-redis/extra/redisotel/v9 v9.0.5/go.mod h1:WZjPDy7VNzn77AAfnAfVjZNvfJTYfPetfZk5yoSTLaQ= -github.com/redis/go-redis/v9 v9.0.5/go.mod h1:WqMKv5vnQbRuZstUwxQI195wHy+t4PuXDOjzMvcuQHk= -github.com/redis/go-redis/v9 v9.7.3 h1:YpPyAayJV+XErNsatSElgRZZVCwXX9QzkKYNvO7x0wM= -github.com/redis/go-redis/v9 v9.7.3/go.mod h1:bGUrSggJ9X9GUmZpZNEOQKaANxSGgOEBRltRTZHSvrA= +github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg= +github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is= +github.com/redis/go-redis/extra/rediscmd/v9 v9.10.0 h1:uTiEyEyfLhkw678n6EulHVto8AkcXVr8zUcBJNZ0ark= +github.com/redis/go-redis/extra/rediscmd/v9 v9.10.0/go.mod h1:eFYL/99JvdLP4T9/3FZ5t2pClnv7mMskc+WstTcyVr4= +github.com/redis/go-redis/extra/redisotel/v9 v9.10.0 h1:4z7/hCJ9Jft8EBb2tDmK38p2WjyIEJ1ShhhwAhjOCps= +github.com/redis/go-redis/extra/redisotel/v9 v9.10.0/go.mod h1:B0thqLh4hB8MvvcUKSwyP5YiIcCCp8UrQ0cA9gEqyjk= +github.com/redis/go-redis/v9 v9.10.0 h1:FxwK3eV8p/CQa0Ch276C7u2d0eNC9kCmAYQ7mCXCzVs= +github.com/redis/go-redis/v9 v9.10.0/go.mod h1:huWgSWd8mW6+m0VPhJjSSQ+d6Nh1VICQ6Q5lHuCH/Iw= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= -github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= -github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= -github.com/rubenv/sql-migrate v1.7.1 h1:f/o0WgfO/GqNuVg+6801K/KW3WdDSupzSjDYODmiUq4= -github.com/rubenv/sql-migrate v1.7.1/go.mod h1:Ob2Psprc0/3ggbM6wCzyYVFFuc6FyZrb2AS+ezLDFb4= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= +github.com/rubenv/sql-migrate v1.8.0 h1:dXnYiJk9k3wetp7GfQbKJcPHjVJL6YK19tKj8t2Ns0o= +github.com/rubenv/sql-migrate v1.8.0/go.mod h1:F2bGFBwCU+pnmbtNYDeKvSuvL6lBVtXDXUUv5t+u1qw= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sagikazarmark/locafero v0.7.0 h1:5MqpDsTGNDhY8sGp0Aowyf0qKsPrhewaLSsFaodPcyo= @@ -506,23 +501,23 @@ github.com/sclevine/spec v1.4.0 h1:z/Q9idDcay5m5irkZ28M7PtQM4aOISzOpj4bUPkDee8= github.com/sclevine/spec v1.4.0/go.mod h1:LvpgJaFyvQzRvc1kaDs0bulYwzC70PbiYjC4QnFHkOM= github.com/secure-systems-lab/go-securesystemslib v0.9.0 h1:rf1HIbL64nUpEIZnjLZ3mcNEL9NBPB0iuVjyxvq3LZc= github.com/secure-systems-lab/go-securesystemslib v0.9.0/go.mod h1:DVHKMcZ+V4/woA/peqr+L0joiRXbPpQ042GgJckkFgw= -github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8= -github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= +github.com/sergi/go-diff v1.4.0 h1:n/SP9D5ad1fORl+llWyN+D6qoUETXNZARKjyY2/KVCw= +github.com/sergi/go-diff v1.4.0/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= -github.com/sigstore/fulcio v1.6.6 h1:XaMYX6TNT+8n7Npe8D94nyZ7/ERjEsNGFC+REdi/wzw= -github.com/sigstore/fulcio v1.6.6/go.mod h1:BhQ22lwaebDgIxVBEYOOqLRcN5+xOV+C9bh/GUXRhOk= -github.com/sigstore/protobuf-specs v0.4.1 h1:5SsMqZbdkcO/DNHudaxuCUEjj6x29tS2Xby1BxGU7Zc= -github.com/sigstore/protobuf-specs v0.4.1/go.mod h1:+gXR+38nIa2oEupqDdzg4qSBT0Os+sP7oYv6alWewWc= +github.com/sigstore/fulcio v1.7.1 h1:RcoW20Nz49IGeZyu3y9QYhyyV3ZKQ85T+FXPKkvE+aQ= +github.com/sigstore/fulcio v1.7.1/go.mod h1:7lYY+hsd8Dt+IvKQRC+KEhWpCZ/GlmNvwIa5JhypMS8= +github.com/sigstore/protobuf-specs v0.4.3 h1:kRgJ+ciznipH9xhrkAbAEHuuxD3GhYnGC873gZpjJT4= +github.com/sigstore/protobuf-specs v0.4.3/go.mod h1:+gXR+38nIa2oEupqDdzg4qSBT0Os+sP7oYv6alWewWc= github.com/sigstore/rekor v1.3.10 h1:/mSvRo4MZ/59ECIlARhyykAlQlkmeAQpvBPlmJtZOCU= github.com/sigstore/rekor v1.3.10/go.mod h1:JvryKJ40O0XA48MdzYUPu0y4fyvqt0C4iSY7ri9iu3A= -github.com/sigstore/sigstore v1.9.3 h1:y2qlTj+vh+Or3ictKuR3JUFawZPdDxAjrWkeFhon0OQ= -github.com/sigstore/sigstore v1.9.3/go.mod h1:VwYkiw0G0dRtwL25KSs04hCyVFF6CYMd/qvNeYrl7EQ= +github.com/sigstore/sigstore v1.9.5 h1:Wm1LT9yF4LhQdEMy5A2JeGRHTrAWGjT3ubE5JUSrGVU= +github.com/sigstore/sigstore v1.9.5/go.mod h1:VtxgvGqCmEZN9X2zhFSOkfXxvKUjpy8RpUW39oCtoII= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/smallstep/pkcs7 v0.1.1 h1:x+rPdt2W088V9Vkjho4KtoggyktZJlMduZAtRHm68LU= -github.com/smallstep/pkcs7 v0.1.1/go.mod h1:dL6j5AIz9GHjVEBTXtW+QliALcgM19RtXaTeyxI+AfA= +github.com/smallstep/pkcs7 v0.2.1 h1:6Kfzr/QizdIuB6LSv8y1LJdZ3aPSfTNhTLqAx9CTLfA= +github.com/smallstep/pkcs7 v0.2.1/go.mod h1:RcXHsMfL+BzH8tRhmrF1NkkpebKpq3JEM66cOFxanf0= github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= github.com/spf13/afero v1.14.0 h1:9tH6MapGnn/j0eb0yIXiLjERO8RB6xIVZRDCX7PtqWA= @@ -537,8 +532,8 @@ github.com/spf13/viper v1.20.1 h1:ZMi+z/lvLyPSCoNtFCpqjy0S4kPbirhpTMwl8BkW9X4= github.com/spf13/viper v1.20.1/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4= github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 h1:pnnLyeX7o/5aX8qUQ69P/mLojDqwda8hFOCBTmP/6hw= github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6/go.mod h1:39R/xuhNgVhi+K0/zst4TLrJrVmbm6LVgl4A0+ZFS5M= -github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs= -github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo= +github.com/stoewer/go-strcase v1.3.1 h1:iS0MdW+kVTxgMoE1LAZyMiYJFKlOzLooE4MxjirtkAs= +github.com/stoewer/go-strcase v1.3.1/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= @@ -565,8 +560,8 @@ github.com/ulikunitz/xz v0.5.12 h1:37Nm15o69RwBkXM0J6A5OlE67RZTfzUxTj8fB3dfcsc= github.com/ulikunitz/xz v0.5.12/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/vbatts/tar-split v0.12.1 h1:CqKoORW7BUWBe7UL/iqTVvkTBOF8UvOMKOIZykxnnbo= github.com/vbatts/tar-split v0.12.1/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA= -github.com/vbauerster/mpb/v8 v8.9.3 h1:PnMeF+sMvYv9u23l6DO6Q3+Mdj408mjLRXIzmUmU2Z8= -github.com/vbauerster/mpb/v8 v8.9.3/go.mod h1:hxS8Hz4C6ijnppDSIX6LjG8FYJSoPo9iIOcE53Zik0c= +github.com/vbauerster/mpb/v8 v8.10.2 h1:2uBykSHAYHekE11YvJhKxYmLATKHAGorZwFlyNw4hHM= +github.com/vbauerster/mpb/v8 v8.10.2/go.mod h1:+Ja4P92E3/CorSZgfDtK46D7AVbDqmBQRTmyTqPElo0= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= @@ -581,66 +576,68 @@ github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -go.etcd.io/bbolt v1.4.0 h1:TU77id3TnN/zKr7CO/uk+fBCwF2jGcMuw2B/FMAzYIk= -go.etcd.io/bbolt v1.4.0/go.mod h1:AsD+OCi/qPN1giOX1aiLAha3o1U8rAz65bvN4j0sRuk= -go.etcd.io/etcd/api/v3 v3.5.16 h1:WvmyJVbjWqK4R1E+B12RRHz3bRGy9XVfh++MgbN+6n0= -go.etcd.io/etcd/api/v3 v3.5.16/go.mod h1:1P4SlIP/VwkDmGo3OlOD7faPeP8KDIFhqvciH5EfN28= -go.etcd.io/etcd/client/pkg/v3 v3.5.16 h1:ZgY48uH6UvB+/7R9Yf4x574uCO3jIx0TRDyetSfId3Q= -go.etcd.io/etcd/client/pkg/v3 v3.5.16/go.mod h1:V8acl8pcEK0Y2g19YlOV9m9ssUe6MgiDSobSoaBAM0E= -go.etcd.io/etcd/client/v3 v3.5.16 h1:sSmVYOAHeC9doqi0gv7v86oY/BTld0SEFGaxsU9eRhE= -go.etcd.io/etcd/client/v3 v3.5.16/go.mod h1:X+rExSGkyqxvu276cr2OwPLBaeqFu1cIl4vmRjAD/50= -go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd80= -go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c= +go.etcd.io/bbolt v1.4.2 h1:IrUHp260R8c+zYx/Tm8QZr04CX+qWS5PGfPdevhdm1I= +go.etcd.io/bbolt v1.4.2/go.mod h1:Is8rSHO/b4f3XigBC0lL0+4FwAQv3HXEEIgFMuKHceM= +go.etcd.io/etcd/api/v3 v3.5.21 h1:A6O2/JDb3tvHhiIz3xf9nJ7REHvtEFJJ3veW3FbCnS8= +go.etcd.io/etcd/api/v3 v3.5.21/go.mod h1:c3aH5wcvXv/9dqIw2Y810LDXJfhSYdHQ0vxmP3CCHVY= +go.etcd.io/etcd/client/pkg/v3 v3.5.21 h1:lPBu71Y7osQmzlflM9OfeIV2JlmpBjqBNlLtcoBqUTc= +go.etcd.io/etcd/client/pkg/v3 v3.5.21/go.mod h1:BgqT/IXPjK9NkeSDjbzwsHySX3yIle2+ndz28nVsjUs= +go.etcd.io/etcd/client/v3 v3.5.21 h1:T6b1Ow6fNjOLOtM0xSoKNQt1ASPCLWrF9XMHcH9pEyY= +go.etcd.io/etcd/client/v3 v3.5.21/go.mod h1:mFYy67IOqmbRf/kRUvsHixzo3iG+1OF2W2+jVIQRAnU= +go.mongodb.org/mongo-driver v1.17.4 h1:jUorfmVzljjr0FLzYQsGP8cgN/qzzxlY9Vh0C9KFXVw= +go.mongodb.org/mongo-driver v1.17.4/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/contrib/bridges/prometheus v0.57.0 h1:UW0+QyeyBVhn+COBec3nGhfnFe5lwB0ic1JBVjzhk0w= -go.opentelemetry.io/contrib/bridges/prometheus v0.57.0/go.mod h1:ppciCHRLsyCio54qbzQv0E4Jyth/fLWDTJYfvWpcSVk= -go.opentelemetry.io/contrib/exporters/autoexport v0.57.0 h1:jmTVJ86dP60C01K3slFQa2NQ/Aoi7zA+wy7vMOKD9H4= -go.opentelemetry.io/contrib/exporters/autoexport v0.57.0/go.mod h1:EJBheUMttD/lABFyLXhce47Wr6DPWYReCzaZiXadH7g= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.59.0 h1:rgMkmiGfix9vFJDcDi1PK8WEQP4FLQwLDfhp5ZLpFeE= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.59.0/go.mod h1:ijPqXp5P6IRRByFVVg9DY8P5HkxkHE5ARIa+86aXPf4= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 h1:CV7UdSGJt/Ao6Gp4CXckLxVRRsRgDHoI8XjbL3PDl8s= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0/go.mod h1:FRmFuRJfag1IZ2dPkHnEoSFVgTVPUd2qf5Vi69hLb8I= -go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY= -go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI= -go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0 h1:WzNab7hOOLzdDF/EoWCt4glhrbMPVMOO5JYTmpz36Ls= -go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0/go.mod h1:hKvJwTzJdp90Vh7p6q/9PAOd55dI6WA6sWj62a/JvSs= -go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.8.0 h1:S+LdBGiQXtJdowoJoQPEtI52syEP/JYBUpjO49EQhV8= -go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.8.0/go.mod h1:5KXybFvPGds3QinJWQT7pmXf+TN5YIa7CNYObWRkj50= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0 h1:j7ZSD+5yn+lo3sGV69nW04rRR0jhYnBwjuX3r0HvnK0= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0/go.mod h1:WXbYJTUaZXAbYd8lbgGuvih0yuCfOFC5RJoYnoLcGz8= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0 h1:t/Qur3vKSkUCcDVaSumWF2PKHt85pc7fRvFuoVT8qFU= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0/go.mod h1:Rl61tySSdcOJWoEgYZVtmnKdA0GeKrSqkHC1t+91CH8= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 h1:Vh5HayB/0HHfOQA7Ctx69E/Y/DcQSMPpKANYVMQ7fBA= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0/go.mod h1:cpgtDBaqD/6ok/UG0jT15/uKjAY8mRA53diogHBg3UI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 h1:9kV11HXBHZAvuPUZxmMWrH8hZn/6UnHX4K0mu36vNsU= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0/go.mod h1:JyA0FHXe22E1NeNiHmVp7kFHglnexDQ7uRWDiiJ1hKQ= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0 h1:wpMfgF8E1rkrT1Z6meFh1NDtownE9Ii3n3X2GJYjsaU= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0/go.mod h1:wAy0T/dUbs468uOlkT31xjvqQgEVXv58BRFWEgn5v/0= -go.opentelemetry.io/otel/exporters/prometheus v0.54.0 h1:rFwzp68QMgtzu9PgP3jm9XaMICI6TsofWWPcBDKwlsU= -go.opentelemetry.io/otel/exporters/prometheus v0.54.0/go.mod h1:QyjcV9qDP6VeK5qPyKETvNjmaaEc7+gqjh4SS0ZYzDU= -go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.8.0 h1:CHXNXwfKWfzS65yrlB2PVds1IBZcdsX8Vepy9of0iRU= -go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.8.0/go.mod h1:zKU4zUgKiaRxrdovSS2amdM5gOc59slmo/zJwGX+YBg= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.32.0 h1:SZmDnHcgp3zwlPBS2JX2urGYe/jBKEIT6ZedHRUyCz8= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.32.0/go.mod h1:fdWW0HtZJ7+jNpTKUR0GpMEDP69nR8YBJQxNiVCE3jk= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0 h1:cC2yDI3IQd0Udsux7Qmq8ToKAx1XCilTQECZ0KDZyTw= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0/go.mod h1:2PD5Ex6z8CFzDbTdOlwyNIUywRr1DN0ospafJM1wJ+s= -go.opentelemetry.io/otel/log v0.8.0 h1:egZ8vV5atrUWUbnSsHn6vB8R21G2wrKqNiDt3iWertk= -go.opentelemetry.io/otel/log v0.8.0/go.mod h1:M9qvDdUTRCopJcGRKg57+JSQ9LgLBrwwfC32epk5NX8= -go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ= -go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE= -go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A= -go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU= -go.opentelemetry.io/otel/sdk/log v0.8.0 h1:zg7GUYXqxk1jnGF/dTdLPrK06xJdrXgqgFLnI4Crxvs= -go.opentelemetry.io/otel/sdk/log v0.8.0/go.mod h1:50iXr0UVwQrYS45KbruFrEt4LvAdCaWWgIrsN3ZQggo= -go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce1EK0Gyvahk= -go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w= -go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k= -go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE= -go.opentelemetry.io/proto/otlp v1.4.0 h1:TA9WRvW6zMwP+Ssb6fLoUIuirti1gGbP28GcKG1jgeg= -go.opentelemetry.io/proto/otlp v1.4.0/go.mod h1:PPBWZIP98o2ElSqI35IHfu7hIhSwvc5N38Jw8pXuGFY= +go.opentelemetry.io/contrib/bridges/prometheus v0.61.0 h1:RyrtJzu5MAmIcbRrwg75b+w3RlZCP0vJByDVzcpAe3M= +go.opentelemetry.io/contrib/bridges/prometheus v0.61.0/go.mod h1:tirr4p9NXbzjlbruiRGp53IzlYrDk5CO2fdHj0sSSaY= +go.opentelemetry.io/contrib/exporters/autoexport v0.61.0 h1:XfzKtKSrbtYk9TNCF8dkO0Y9M7IOfb4idCwBOTwGBiI= +go.opentelemetry.io/contrib/exporters/autoexport v0.61.0/go.mod h1:N6otC+qXTD5bAnbK2O1f/1SXq3cX+3KYSWrkBUqG0cw= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 h1:q4XOmH/0opmeuJtPsbFNivyl7bCt7yRBbeEm2sC/XtQ= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0/go.mod h1:snMWehoOh2wsEwnvvwtDyFCxVeDAODenXHtn5vzrKjo= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q= +go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg= +go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.12.2 h1:06ZeJRe5BnYXceSM9Vya83XXVaNGe3H1QqsvqRANQq8= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.12.2/go.mod h1:DvPtKE63knkDVP88qpatBj81JxN+w1bqfVbsbCbj1WY= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.12.2 h1:tPLwQlXbJ8NSOfZc4OkgU5h2A38M4c9kfHSVc4PFQGs= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.12.2/go.mod h1:QTnxBwT/1rBIgAG1goq6xMydfYOBKU6KTiYF4fp5zL8= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.36.0 h1:zwdo1gS2eH26Rg+CoqVQpEK1h8gvt5qyU5Kk5Bixvow= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.36.0/go.mod h1:rUKCPscaRWWcqGT6HnEmYrK+YNe5+Sw64xgQTOJ5b30= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.36.0 h1:gAU726w9J8fwr4qRDqu1GYMNNs4gXrU+Pv20/N1UpB4= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.36.0/go.mod h1:RboSDkp7N292rgu+T0MgVt2qgFGu6qa1RpZDOtpL76w= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0 h1:dNzwXjZKpMpE2JhmO+9HsPl42NIXFIFSUSSs0fiqra0= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0/go.mod h1:90PoxvaEB5n6AOdZvi+yWJQoE95U8Dhhw2bSyRqnTD0= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.36.0 h1:JgtbA0xkWHnTmYk7YusopJFX6uleBmAuZ8n05NEh8nQ= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.36.0/go.mod h1:179AK5aar5R3eS9FucPy6rggvU0g52cvKId8pv4+v0c= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0 h1:nRVXXvf78e00EwY6Wp0YII8ww2JVWshZ20HfTlE11AM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0/go.mod h1:r49hO7CgrxY9Voaj3Xe8pANWtr0Oq916d0XAmOoCZAQ= +go.opentelemetry.io/otel/exporters/prometheus v0.58.0 h1:CJAxWKFIqdBennqxJyOgnt5LqkeFRT+Mz3Yjz3hL+h8= +go.opentelemetry.io/otel/exporters/prometheus v0.58.0/go.mod h1:7qo/4CLI+zYSNbv0GMNquzuss2FVZo3OYrGh96n4HNc= +go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.12.2 h1:12vMqzLLNZtXuXbJhSENRg+Vvx+ynNilV8twBLBsXMY= +go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.12.2/go.mod h1:ZccPZoPOoq8x3Trik/fCsba7DEYDUnN6yX79pgp2BUQ= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.36.0 h1:rixTyDGXFxRy1xzhKrotaHy3/KXdPhlWARrCgK+eqUY= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.36.0/go.mod h1:dowW6UsM9MKbJq5JTz2AMVp3/5iW5I/TStsk8S+CfHw= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.36.0 h1:G8Xec/SgZQricwWBJF/mHZc7A02YHedfFDENwJEdRA0= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.36.0/go.mod h1:PD57idA/AiFD5aqoxGxCvT/ILJPeHy3MjqU/NS7KogY= +go.opentelemetry.io/otel/log v0.12.2 h1:yob9JVHn2ZY24byZeaXpTVoPS6l+UrrxmxmPKohXTwc= +go.opentelemetry.io/otel/log v0.12.2/go.mod h1:ShIItIxSYxufUMt+1H5a2wbckGli3/iCfuEbVZi/98E= +go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE= +go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs= +go.opentelemetry.io/otel/sdk v1.36.0 h1:b6SYIuLRs88ztox4EyrvRti80uXIFy+Sqzoh9kFULbs= +go.opentelemetry.io/otel/sdk v1.36.0/go.mod h1:+lC+mTgD+MUWfjJubi2vvXWcVxyr9rmlshZni72pXeY= +go.opentelemetry.io/otel/sdk/log v0.12.2 h1:yNoETvTByVKi7wHvYS6HMcZrN5hFLD7I++1xIZ/k6W0= +go.opentelemetry.io/otel/sdk/log v0.12.2/go.mod h1:DcpdmUXHJgSqN/dh+XMWa7Vf89u9ap0/AAk/XGLnEzY= +go.opentelemetry.io/otel/sdk/log/logtest v0.0.0-20250521073539-a85ae98dcedc h1:uqxdywfHqqCl6LmZzI3pUnXT1RGFYyUgxj0AkWPFxi0= +go.opentelemetry.io/otel/sdk/log/logtest v0.0.0-20250521073539-a85ae98dcedc/go.mod h1:TY/N/FT7dmFrP/r5ym3g0yysP1DefqGpAZr4f82P0dE= +go.opentelemetry.io/otel/sdk/metric v1.36.0 h1:r0ntwwGosWGaa0CrSt8cuNuTcccMXERFwHX4dThiPis= +go.opentelemetry.io/otel/sdk/metric v1.36.0/go.mod h1:qTNOhFDfKRwX0yXOqJYegL5WRaW376QbB7P4Pb0qva4= +go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w= +go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA= +go.opentelemetry.io/proto/otlp v1.7.0 h1:jX1VolD6nHuFzOYso2E73H85i92Mv8JQYk0K9vz09os= +go.opentelemetry.io/proto/otlp v1.7.0/go.mod h1:fSKjH6YJ7HDlwzltzyMj036AJ3ejJLCgCSHGj4efDDo= go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= @@ -651,6 +648,10 @@ go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= +go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= +go.yaml.in/yaml/v3 v3.0.3 h1:bXOww4E/J3f66rav3pX3m8w6jDE4knZjGOw8b5Y6iNE= +go.yaml.in/yaml/v3 v3.0.3/go.mod h1:tBHosrYAkRZjRAOREWbDnBXUf08JOwYq++0QNwQiWzI= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -659,12 +660,12 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= -golang.org/x/crypto v0.30.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= -golang.org/x/crypto v0.38.0 h1:jt+WWG8IZlBnVbomuhg2Mdq0+BBQaHbtqHEFEigjUV8= -golang.org/x/crypto v0.38.0/go.mod h1:MvrbAqul58NNYPKnOra203SB9vpuZW0e+RRZV+Ggqjw= +golang.org/x/crypto v0.33.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5M= +golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM= +golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20250103183323-7d7fa50e5329 h1:9kj3STMvgqy3YA4VQXBrN7925ICMxD5wzMRcgA30588= -golang.org/x/exp v0.0.0-20250103183323-7d7fa50e5329/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= +golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b h1:M2rDM6z3Fhozi9O7NWsxAkg/yqS/lQJ6PmkyIV3YP+o= +golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -675,8 +676,8 @@ golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU= -golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= +golang.org/x/mod v0.25.0 h1:n7a+ZbQKQA/Ysbyb0/6IbB1H/X41mKgbhfv7AfG/44w= +golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -695,11 +696,11 @@ golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= -golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY= -golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds= +golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw= +golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.29.0 h1:WdYw2tdTK1S8olAzWHdgeqfy+Mtm9XNhv/xJsY65d98= -golang.org/x/oauth2 v0.29.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= +golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= +golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -711,9 +712,9 @@ golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ= -golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8= +golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -734,7 +735,7 @@ golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= @@ -745,7 +746,7 @@ golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= -golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= +golang.org/x/term v0.29.0/go.mod h1:6bl4lRlvVuDgSf3179VpIxBF0o10JUpXWOnI7nErv7s= golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg= golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -756,11 +757,11 @@ golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= -golang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4= -golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA= -golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0= -golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= +golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY= +golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M= +golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA= +golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE= +golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -773,8 +774,8 @@ golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= -golang.org/x/tools v0.33.0 h1:4qz2S3zmRxbGIhDIAgjxvFutSvH5EfnsYrRBj0UI0bc= -golang.org/x/tools v0.33.0/go.mod h1:CIJMaWEY88juyUfo7UbgPqbC8rU2OqfAV1h2Qp0oMYI= +golang.org/x/tools v0.34.0 h1:qIpSLOxeCYGg9TrcJokLBG4KFA6d795g0xkBkiESGlo= +golang.org/x/tools v0.34.0/go.mod h1:pAP9OwEaY1CAW3HOmg3hLZC5Z0CCmzjAF2UQMSqNARg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -790,19 +791,19 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb h1:ITgPrl429bc6+2ZraNSzMDk3I95nmQln2fuPstKwFDE= -google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:sAo5UzpjUwgFBCzupwhcLcxHVDK7vG5IqI30YnwX2eE= -google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb h1:p31xT4yrYrSM/G4Sn2+TNUkVhFCbG9y8itM2S6Th950= -google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:jbe3Bkdp+Dh2IrslsFCklNhweNTBgSYanP1UXhJDhKg= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250422160041-2d3770c4ea7f h1:N/PrbTw4kdkqNRzVfWPrBekzLuarFREcbFOiOLkXon4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250422160041-2d3770c4ea7f/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= +google.golang.org/genproto v0.0.0-20250603155806-513f23925822 h1:rHWScKit0gvAPuOnu87KpaYtjK5zBMLcULh7gxkCXu4= +google.golang.org/genproto v0.0.0-20250603155806-513f23925822/go.mod h1:HubltRL7rMh0LfnQPkMH4NPDFEWp0jw3vixw7jEM53s= +google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 h1:oWVWY3NzT7KJppx2UKhKmzPq4SRe0LdCijVRwvGeikY= +google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822/go.mod h1:h3c4v36UTKzUiuaOKQ6gr3S+0hovBtUrXzTG/i3+XEc= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 h1:fc6jSaCT0vBduLYZHYrBBNY4dsWuvgyff9noRNDdBeE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.72.1 h1:HR03wO6eyZ7lknl75XlxABNVLLFc2PAb6mHlYh756mA= -google.golang.org/grpc v1.72.1/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM= +google.golang.org/grpc v1.73.0 h1:VIWSmpI2MegBtTuFt5/JWy2oXxtjJ/e89Z70ImfD2ok= +google.golang.org/grpc v1.73.0/go.mod h1:50sbHOUqWoCQGI8V2HQLJM0B+LMlIUjNSZmow7EVBQc= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -836,51 +837,53 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= -helm.sh/helm/v3 v3.17.3 h1:3n5rW3D0ArjFl0p4/oWO8IbY/HKaNNwJtOQFdH2AZHg= -helm.sh/helm/v3 v3.17.3/go.mod h1:+uJKMH/UiMzZQOALR3XUf3BLIoczI2RKKD6bMhPh4G8= +helm.sh/helm/v3 v3.18.3 h1:+cvyGKgs7Jt7BN3Klmb4SsG4IkVpA7GAZVGvMz6VO4I= +helm.sh/helm/v3 v3.18.3/go.mod h1:wUc4n3txYBocM7S9RjTeZBN9T/b5MjffpcSsWEjSIpw= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -k8s.io/api v0.32.4 h1:kw8Y/G8E7EpNy7gjB8gJZl3KJkNz8HM2YHrZPtAZsF4= -k8s.io/api v0.32.4/go.mod h1:5MYFvLvweRhyKylM3Es/6uh/5hGp0dg82vP34KifX4g= -k8s.io/apiextensions-apiserver v0.32.4 h1:IA+CoR63UDOijR/vEpow6wQnX4V6iVpzazJBskHrpHE= -k8s.io/apiextensions-apiserver v0.32.4/go.mod h1:Y06XO/b92H8ymOdG1HlA1submf7gIhbEDc3RjriqZOs= -k8s.io/apimachinery v0.32.4 h1:8EEksaxA7nd7xWJkkwLDN4SvWS5ot9g6Z/VZb3ju25I= -k8s.io/apimachinery v0.32.4/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= -k8s.io/apiserver v0.32.4 h1:Yf7sd/y+GOQKH1Qf6wUeayZrYXe2SKZ17Bcq7VQM5HQ= -k8s.io/apiserver v0.32.4/go.mod h1:JFUMNtE2M5yqLZpIsgCb06SkVSW1YcxW1oyLSTfjXR8= -k8s.io/cli-runtime v0.32.4 h1:5O9eC50+yFFODAan3QXeTJHtoe69ie/A9vWBITIn+KM= -k8s.io/cli-runtime v0.32.4/go.mod h1:Zn7nvBY625sEEYGtTMMPS619nrmVxabGssHyAKFK7RA= -k8s.io/client-go v0.32.4 h1:zaGJS7xoYOYumoWIFXlcVrsiYioRPrXGO7dBfVC5R6M= -k8s.io/client-go v0.32.4/go.mod h1:k0jftcyYnEtwlFW92xC7MTtFv5BNcZBr+zn9jPlT9Ic= -k8s.io/component-base v0.32.4 h1:HuF+2JVLbFS5GODLIfPCb1Td6b+G2HszJoArcWOSr5I= -k8s.io/component-base v0.32.4/go.mod h1:10KloJEYw1keU/Xmjfy9TKJqUq7J2mYdiD1VDXoco4o= +k8s.io/api v0.33.2 h1:YgwIS5jKfA+BZg//OQhkJNIfie/kmRsO0BmNaVSimvY= +k8s.io/api v0.33.2/go.mod h1:fhrbphQJSM2cXzCWgqU29xLDuks4mu7ti9vveEnpSXs= +k8s.io/apiextensions-apiserver v0.33.2 h1:6gnkIbngnaUflR3XwE1mCefN3YS8yTD631JXQhsU6M8= +k8s.io/apiextensions-apiserver v0.33.2/go.mod h1:IvVanieYsEHJImTKXGP6XCOjTwv2LUMos0YWc9O+QP8= +k8s.io/apimachinery v0.33.2 h1:IHFVhqg59mb8PJWTLi8m1mAoepkUNYmptHsV+Z1m5jY= +k8s.io/apimachinery v0.33.2/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM= +k8s.io/apiserver v0.33.2 h1:KGTRbxn2wJagJowo29kKBp4TchpO1DRO3g+dB/KOJN4= +k8s.io/apiserver v0.33.2/go.mod h1:9qday04wEAMLPWWo9AwqCZSiIn3OYSZacDyu/AcoM/M= +k8s.io/cli-runtime v0.33.2 h1:koNYQKSDdq5AExa/RDudXMhhtFasEg48KLS2KSAU74Y= +k8s.io/cli-runtime v0.33.2/go.mod h1:gnhsAWpovqf1Zj5YRRBBU7PFsRc6NkEkwYNQE+mXL88= +k8s.io/client-go v0.33.2 h1:z8CIcc0P581x/J1ZYf4CNzRKxRvQAwoAolYPbtQes+E= +k8s.io/client-go v0.33.2/go.mod h1:9mCgT4wROvL948w6f6ArJNb7yQd7QsvqavDeZHvNmHo= +k8s.io/component-base v0.33.2 h1:sCCsn9s/dG3ZrQTX/Us0/Sx2R0G5kwa0wbZFYoVp/+0= +k8s.io/component-base v0.33.2/go.mod h1:/41uw9wKzuelhN+u+/C59ixxf4tYQKW7p32ddkYNe2k= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y= -k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4= -k8s.io/kubectl v0.32.4 h1:PhiS4JgnJE6/JcksIJHAQI41F4hwJ270UopbcDsutF0= -k8s.io/kubectl v0.32.4/go.mod h1:XYFDrC1l3cpjWRj7UfS0LDAMlK+6eTema28NeUYFKD8= -k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e h1:KqK5c/ghOm8xkHYhlodbp6i6+r+ChV2vuAuVRdFbLro= -k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -oras.land/oras-go v1.2.5 h1:XpYuAwAb0DfQsunIyMfeET92emK8km3W4yEzZvUbsTo= -oras.land/oras-go v1.2.5/go.mod h1:PuAwRShRZCsZb7g8Ar3jKKQR/2A/qN+pkYxIOd/FAoo= +k8s.io/kube-openapi v0.0.0-20250610211856-8b98d1ed966a h1:ZV3Zr+/7s7aVbjNGICQt+ppKWsF1tehxggNfbM7XnG8= +k8s.io/kube-openapi v0.0.0-20250610211856-8b98d1ed966a/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8= +k8s.io/kubectl v0.33.2 h1:7XKZ6DYCklu5MZQzJe+CkCjoGZwD1wWl7t/FxzhMz7Y= +k8s.io/kubectl v0.33.2/go.mod h1:8rC67FB8tVTYraovAGNi/idWIK90z2CHFNMmGJZJ3KI= +k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y= +k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= oras.land/oras-go/v2 v2.6.0 h1:X4ELRsiGkrbeox69+9tzTu492FMUu7zJQW6eJU+I2oc= oras.land/oras-go/v2 v2.6.0/go.mod h1:magiQDfG6H1O9APp+rOsvCPcW1GD2MM7vgnKY0Y+u1o= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 h1:CPT0ExVicCzcpeN4baWEV2ko2Z/AsiZgEdwgcfwLgMo= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= -sigs.k8s.io/controller-runtime v0.20.4 h1:X3c+Odnxz+iPTRobG4tp092+CvBU9UK0t/bRf+n0DGU= -sigs.k8s.io/controller-runtime v0.20.4/go.mod h1:xg2XB0K5ShQzAgsoujxuKN4LNXR2LfwwHsPj7Iaw+XY= -sigs.k8s.io/controller-tools v0.17.2 h1:jNFOKps8WnaRKZU2R+4vRCHnXyJanVmXBWqkuUPFyFg= -sigs.k8s.io/controller-tools v0.17.2/go.mod h1:4q5tZG2JniS5M5bkiXY2/potOiXyhoZVw/U48vLkXk0= -sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= -sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.33.0 h1:qPrZsv1cwQiFeieFlRqT627fVZ+tyfou/+S5S0H5ua0= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.33.0/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= +sigs.k8s.io/controller-runtime v0.21.0 h1:CYfjpEuicjUecRk+KAeyYh+ouUBn4llGyDYytIGcJS8= +sigs.k8s.io/controller-runtime v0.21.0/go.mod h1:OSg14+F65eWqIu4DceX7k/+QRAbTTvxeQSNSOQpukWM= +sigs.k8s.io/controller-tools v0.18.0 h1:rGxGZCZTV2wJreeRgqVoWab/mfcumTMmSwKzoM9xrsE= +sigs.k8s.io/controller-tools v0.18.0/go.mod h1:gLKoiGBriyNh+x1rWtUQnakUYEujErjXs9pf+x/8n1U= +sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= +sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= sigs.k8s.io/kubebuilder/v4 v4.6.0 h1:SBc37jghs3L2UaEL91A1t5K5dANrEviUDuNic9hMQSw= sigs.k8s.io/kubebuilder/v4 v4.6.0/go.mod h1:zlXrnLiJPDPpK4hKCUrlgzzLOusfA8Sd8tpYGIrvD00= -sigs.k8s.io/kustomize/api v0.18.0 h1:hTzp67k+3NEVInwz5BHyzc9rGxIauoXferXyjv5lWPo= -sigs.k8s.io/kustomize/api v0.18.0/go.mod h1:f8isXnX+8b+SGLHQ6yO4JG1rdkZlvhaCf/uZbLVMb0U= -sigs.k8s.io/kustomize/kyaml v0.18.1 h1:WvBo56Wzw3fjS+7vBjN6TeivvpbW9GmRaWZ9CIVmt4E= -sigs.k8s.io/kustomize/kyaml v0.18.1/go.mod h1:C3L2BFVU1jgcddNBE1TxuVLgS46TjObMwW5FT9FcjYo= -sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA= -sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= -sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/kustomize/api v0.19.0 h1:F+2HB2mU1MSiR9Hp1NEgoU2q9ItNOaBJl0I4Dlus5SQ= +sigs.k8s.io/kustomize/api v0.19.0/go.mod h1:/BbwnivGVcBh1r+8m3tH1VNxJmHSk1PzP5fkP6lbL1o= +sigs.k8s.io/kustomize/kyaml v0.19.0 h1:RFge5qsO1uHhwJsu3ipV7RNolC7Uozc0jUBC/61XSlA= +sigs.k8s.io/kustomize/kyaml v0.19.0/go.mod h1:FeKD5jEOH+FbZPpqUghBP8mrLjJ3+zD3/rf9NNu1cwY= +sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= +sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/structured-merge-diff/v4 v4.7.0 h1:qPeWmscJcXP0snki5IYF79Z8xrl8ETFxgMd7wez1XkI= +sigs.k8s.io/structured-merge-diff/v4 v4.7.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= +sigs.k8s.io/yaml v1.5.0 h1:M10b2U7aEUY6hRtU870n2VTPgR5RZiL/I6Lcc2F4NUQ= +sigs.k8s.io/yaml v1.5.0/go.mod h1:wZs27Rbxoai4C0f8/9urLZtZtF3avA3gKvGyPdDqTO4= From bc31ee0f5074591a695adaa38324eaaaea9bba6c Mon Sep 17 00:00:00 2001 From: Adam Cornett Date: Tue, 8 Jul 2025 14:23:29 -0700 Subject: [PATCH 08/31] moving 1.24 website from netlify to gh (#6968) Signed-off-by: Adam D. Cornett --- website/config.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/config.toml b/website/config.toml index f627d36a79..4834f7987c 100644 --- a/website/config.toml +++ b/website/config.toml @@ -205,7 +205,7 @@ url_latest_version = "https://sdk.operatorframework.io" [[params.versions]] version = "v1.24" - url = "https://v1-24-x.sdk.operatorframework.io" + url = "https://github.com/operator-framework/operator-sdk/tree/v1.24.x/website/content/en/docs" kube_version = "1.24.2" client_go_version = "v0.24.2" From 0eefc52889ff3dfe4af406038709e6c5ba7398e5 Mon Sep 17 00:00:00 2001 From: Adam Cornett Date: Tue, 8 Jul 2025 15:12:49 -0700 Subject: [PATCH 09/31] Release v1.41.0 (#6969) Signed-off-by: Adam D. Cornett --- Makefile | 2 +- changelog/fragments/upgrade_kb_latest.yaml | 130 ------------------ changelog/generated/v1.41.0.md | 12 ++ testdata/go/v4/memcached-operator/Makefile | 2 +- .../bundle/tests/scorecard/config.yaml | 12 +- .../scorecard/patches/basic.config.yaml | 2 +- .../config/scorecard/patches/olm.config.yaml | 10 +- .../v4/monitoring/memcached-operator/Makefile | 2 +- .../bundle/tests/scorecard/config.yaml | 12 +- .../scorecard/patches/basic.config.yaml | 2 +- .../config/scorecard/patches/olm.config.yaml | 10 +- testdata/helm/memcached-operator/Dockerfile | 2 +- testdata/helm/memcached-operator/Makefile | 2 +- .../bundle/tests/scorecard/config.yaml | 12 +- .../scorecard/patches/basic.config.yaml | 2 +- .../config/scorecard/patches/olm.config.yaml | 10 +- website/config.toml | 14 +- .../content/en/docs/installation/_index.md | 2 +- .../en/docs/upgrading-sdk-version/v1.41.0.md | 116 ++++++++++++++++ 19 files changed, 180 insertions(+), 176 deletions(-) delete mode 100644 changelog/fragments/upgrade_kb_latest.yaml create mode 100644 changelog/generated/v1.41.0.md create mode 100644 website/content/en/docs/upgrading-sdk-version/v1.41.0.md diff --git a/Makefile b/Makefile index 0f345b1739..71846ebc41 100644 --- a/Makefile +++ b/Makefile @@ -4,7 +4,7 @@ SHELL = /bin/bash # This value must be updated to the release tag of the most recent release, a change that must # occur in the release commit. IMAGE_VERSION will be removed once each subproject that uses this # version is moved to a separate repo and release process. -export IMAGE_VERSION = v1.40.0 +export IMAGE_VERSION = v1.41.0 # Build-time variables to inject into binaries export SIMPLE_VERSION = $(shell (test "$(shell git describe --tags)" = "$(shell git describe --tags --abbrev=0)" && echo $(shell git describe --tags)) || echo $(shell git describe --tags --abbrev=0)+git) export GIT_VERSION = $(shell git describe --dirty --tags --always) diff --git a/changelog/fragments/upgrade_kb_latest.yaml b/changelog/fragments/upgrade_kb_latest.yaml deleted file mode 100644 index 1a86e8eb95..0000000000 --- a/changelog/fragments/upgrade_kb_latest.yaml +++ /dev/null @@ -1,130 +0,0 @@ -entries: - - description: > - For Go-based operators, upgrade the Go version from `1.23` to `1.24` - kind: "change" - breaking: true - migration: - header: Upgrade Go version to 1.24 - body: | - Update the Go version used to `1.24`. This affects: - - **Dockerfile:** - ```dockerfile - -FROM golang:1.23 AS builder - +FROM golang:1.24 AS builder - ``` - - **.devcontainer/devcontainer.json:** - ```json - - "image": "golang:1.23", - + "image": "golang:1.24", - ``` - - **go.mod:** - ```go - -go 1.23.0 - +go 1.24.0 - ``` - - - description: > - For Go-based operators, upgrade golangci-lint to `v2.1.0` and update `.golangci.yml` - to the v2 config format with enhanced structure and controls. - kind: "change" - breaking: false - migration: - header: Upgrade golangci-lint and use v2 config - body: | - Update golangci-lint usage across the project: - - **Makefile:** - ```makefile - -GOLANGCI_LINT_VERSION ?= v1.63.4 - +GOLANGCI_LINT_VERSION ?= v2.1.0 - - -$(call go-install-tool,$(GOLANGCI_LINT),github.com/golangci/golangci-lint/cmd/golangci-lint,$(GOLANGCI_LINT_VERSION)) - +$(call go-install-tool,$(GOLANGCI_LINT),github.com/golangci/golangci-lint/v2/cmd/golangci-lint,$(GOLANGCI_LINT_VERSION)) - ``` - - **GitHub Actions Workflow:** - ```yaml - - uses: golangci/golangci-lint-action@v6 - + uses: golangci/golangci-lint-action@v8 - ``` - - **.golangci.yml:** - Convert to v2 layout with keys like `version`, `linters`, `settings`, `formatters`, `exclusions`. - You might want to copy and paste the file from the Memcached sample from the tag release `v1.40.0`: [testdata/go/v4/memcached-operator/.golangci.yml](https://github.com/operator-framework/operator-sdk/tree/v1.40.0/testdata/go/v4/memcached-operator/.golangci.yml) - - - description: > - For Go-based operators, upgrade controller-gen from `v0.17.2` to `v0.18.0`. - kind: "change" - breaking: false - migration: - header: Upgrade controller-gen to `v0.18.0` - body: | - Update controller-gen tooling and annotations: - - **Makefile:** - ```makefile - -CONTROLLER_TOOLS_VERSION ?= v0.17.2 - +CONTROLLER_TOOLS_VERSION ?= v0.18.0 - ``` - - Run `make generate` to regenerate code and manifests with the new version. - - - description: > - For Go-based operators, upgrade controller-runtime from `v0.20.4` to `v0.21.0` - and kubernetes dependencies to `v0.33`. - kind: "change" - breaking: false - migration: - header: Upgrade controller-runtime to `v0.21.0` - body: | - Update the `go.mod` import: - ```go - -sigs.k8s.io/controller-runtime v0.20.4 - +sigs.k8s.io/controller-runtime v0.21.0 - ``` - - Run `go mod tidy` to upgrade the k8s dependencies. - - - description: > - For Go-based operators, add new target to setup/teardown Kind cluster for E2E tests - and remove Kind setup from CI workflows. - kind: "addition" - breaking: false - migration: - header: Add cluster setup for e2e tests in Makefile and update CI workflow - body: | - Remove direct Kind commands in GitHub workflows: - - **Removed:** - ```yaml - - name: Create kind cluster - run: kind create cluster - ``` - - **Added to Makefile:** - ```makefile - KIND_CLUSTER ?= -test-e2e - - .PHONY: setup-test-e2e - setup-test-e2e: ## Set up a Kind cluster for e2e tests if it does not exist - @command -v $(KIND) >/dev/null 2>&1 || { \ - echo "Kind is not installed. Please install Kind manually."; \ - exit 1; \ - } - @case "$$($(KIND) get clusters)" in \ - *"$(KIND_CLUSTER)"*) \ - echo "Kind cluster '$(KIND_CLUSTER)' already exists. Skipping creation." ;; \ - *) \ - echo "Creating Kind cluster '$(KIND_CLUSTER)'..."; \ - $(KIND) create cluster --name $(KIND_CLUSTER) ;; \ - esac - - .PHONY: cleanup-test-e2e - cleanup-test-e2e: - $(KIND) delete cluster --name $(KIND_CLUSTER) - ``` - - Update `test-e2e` target to call these appropriately. diff --git a/changelog/generated/v1.41.0.md b/changelog/generated/v1.41.0.md new file mode 100644 index 0000000000..947af9b572 --- /dev/null +++ b/changelog/generated/v1.41.0.md @@ -0,0 +1,12 @@ +## v1.41.0 + +### Additions + +- For Go-based operators, add new target to setup/teardown Kind cluster for E2E tests and remove Kind setup from CI workflows. ([#6954](https://github.com/operator-framework/operator-sdk/pull/6954)) + +### Changes + +- **Breaking change**: For Go-based operators, upgrade the Go version from `1.23` to `1.24`. ([#6954](https://github.com/operator-framework/operator-sdk/pull/6954)) +- For Go-based operators, upgrade golangci-lint to `v2.1.0` and update `.golangci.yml` to the v2 config format with enhanced structure and controls. ([#6954](https://github.com/operator-framework/operator-sdk/pull/6954)) +- For Go-based operators, upgrade controller-gen from `v0.17.2` to `v0.18.0`. ([#6954](https://github.com/operator-framework/operator-sdk/pull/6954)) +- For Go-based operators, upgrade controller-runtime from `v0.20.4` to `v0.21.0` and kubernetes dependencies to `v0.33`. ([#6954](https://github.com/operator-framework/operator-sdk/pull/6954)) diff --git a/testdata/go/v4/memcached-operator/Makefile b/testdata/go/v4/memcached-operator/Makefile index b43a379ad8..191fa26f3c 100644 --- a/testdata/go/v4/memcached-operator/Makefile +++ b/testdata/go/v4/memcached-operator/Makefile @@ -48,7 +48,7 @@ endif # Set the Operator SDK version to use. By default, what is installed on the system is used. # This is useful for CI or a project to utilize a specific version of the operator-sdk toolkit. -OPERATOR_SDK_VERSION ?= v1.40.0 +OPERATOR_SDK_VERSION ?= v1.41.0 # Image URL to use all building/pushing image targets IMG ?= controller:latest diff --git a/testdata/go/v4/memcached-operator/bundle/tests/scorecard/config.yaml b/testdata/go/v4/memcached-operator/bundle/tests/scorecard/config.yaml index f7368a430e..946cf910e2 100644 --- a/testdata/go/v4/memcached-operator/bundle/tests/scorecard/config.yaml +++ b/testdata/go/v4/memcached-operator/bundle/tests/scorecard/config.yaml @@ -8,7 +8,7 @@ stages: - entrypoint: - scorecard-test - basic-check-spec - image: quay.io/operator-framework/scorecard-test:v1.40.0 + image: quay.io/operator-framework/scorecard-test:v1.41.0 labels: suite: basic test: basic-check-spec-test @@ -18,7 +18,7 @@ stages: - entrypoint: - scorecard-test - olm-bundle-validation - image: quay.io/operator-framework/scorecard-test:v1.40.0 + image: quay.io/operator-framework/scorecard-test:v1.41.0 labels: suite: olm test: olm-bundle-validation-test @@ -28,7 +28,7 @@ stages: - entrypoint: - scorecard-test - olm-crds-have-validation - image: quay.io/operator-framework/scorecard-test:v1.40.0 + image: quay.io/operator-framework/scorecard-test:v1.41.0 labels: suite: olm test: olm-crds-have-validation-test @@ -38,7 +38,7 @@ stages: - entrypoint: - scorecard-test - olm-crds-have-resources - image: quay.io/operator-framework/scorecard-test:v1.40.0 + image: quay.io/operator-framework/scorecard-test:v1.41.0 labels: suite: olm test: olm-crds-have-resources-test @@ -48,7 +48,7 @@ stages: - entrypoint: - scorecard-test - olm-spec-descriptors - image: quay.io/operator-framework/scorecard-test:v1.40.0 + image: quay.io/operator-framework/scorecard-test:v1.41.0 labels: suite: olm test: olm-spec-descriptors-test @@ -58,7 +58,7 @@ stages: - entrypoint: - scorecard-test - olm-status-descriptors - image: quay.io/operator-framework/scorecard-test:v1.40.0 + image: quay.io/operator-framework/scorecard-test:v1.41.0 labels: suite: olm test: olm-status-descriptors-test diff --git a/testdata/go/v4/memcached-operator/config/scorecard/patches/basic.config.yaml b/testdata/go/v4/memcached-operator/config/scorecard/patches/basic.config.yaml index f7e0ed492b..22884c5382 100644 --- a/testdata/go/v4/memcached-operator/config/scorecard/patches/basic.config.yaml +++ b/testdata/go/v4/memcached-operator/config/scorecard/patches/basic.config.yaml @@ -4,7 +4,7 @@ entrypoint: - scorecard-test - basic-check-spec - image: quay.io/operator-framework/scorecard-test:v1.40.0 + image: quay.io/operator-framework/scorecard-test:v1.41.0 labels: suite: basic test: basic-check-spec-test diff --git a/testdata/go/v4/memcached-operator/config/scorecard/patches/olm.config.yaml b/testdata/go/v4/memcached-operator/config/scorecard/patches/olm.config.yaml index 85895c9e93..d2f829662c 100644 --- a/testdata/go/v4/memcached-operator/config/scorecard/patches/olm.config.yaml +++ b/testdata/go/v4/memcached-operator/config/scorecard/patches/olm.config.yaml @@ -4,7 +4,7 @@ entrypoint: - scorecard-test - olm-bundle-validation - image: quay.io/operator-framework/scorecard-test:v1.40.0 + image: quay.io/operator-framework/scorecard-test:v1.41.0 labels: suite: olm test: olm-bundle-validation-test @@ -14,7 +14,7 @@ entrypoint: - scorecard-test - olm-crds-have-validation - image: quay.io/operator-framework/scorecard-test:v1.40.0 + image: quay.io/operator-framework/scorecard-test:v1.41.0 labels: suite: olm test: olm-crds-have-validation-test @@ -24,7 +24,7 @@ entrypoint: - scorecard-test - olm-crds-have-resources - image: quay.io/operator-framework/scorecard-test:v1.40.0 + image: quay.io/operator-framework/scorecard-test:v1.41.0 labels: suite: olm test: olm-crds-have-resources-test @@ -34,7 +34,7 @@ entrypoint: - scorecard-test - olm-spec-descriptors - image: quay.io/operator-framework/scorecard-test:v1.40.0 + image: quay.io/operator-framework/scorecard-test:v1.41.0 labels: suite: olm test: olm-spec-descriptors-test @@ -44,7 +44,7 @@ entrypoint: - scorecard-test - olm-status-descriptors - image: quay.io/operator-framework/scorecard-test:v1.40.0 + image: quay.io/operator-framework/scorecard-test:v1.41.0 labels: suite: olm test: olm-status-descriptors-test diff --git a/testdata/go/v4/monitoring/memcached-operator/Makefile b/testdata/go/v4/monitoring/memcached-operator/Makefile index 346da1ce6b..0a1d03b39f 100644 --- a/testdata/go/v4/monitoring/memcached-operator/Makefile +++ b/testdata/go/v4/monitoring/memcached-operator/Makefile @@ -48,7 +48,7 @@ endif # Set the Operator SDK version to use. By default, what is installed on the system is used. # This is useful for CI or a project to utilize a specific version of the operator-sdk toolkit. -OPERATOR_SDK_VERSION ?= v1.40.0 +OPERATOR_SDK_VERSION ?= v1.41.0 # Image URL to use all building/pushing image targets IMG ?= controller:latest diff --git a/testdata/go/v4/monitoring/memcached-operator/bundle/tests/scorecard/config.yaml b/testdata/go/v4/monitoring/memcached-operator/bundle/tests/scorecard/config.yaml index f7368a430e..946cf910e2 100644 --- a/testdata/go/v4/monitoring/memcached-operator/bundle/tests/scorecard/config.yaml +++ b/testdata/go/v4/monitoring/memcached-operator/bundle/tests/scorecard/config.yaml @@ -8,7 +8,7 @@ stages: - entrypoint: - scorecard-test - basic-check-spec - image: quay.io/operator-framework/scorecard-test:v1.40.0 + image: quay.io/operator-framework/scorecard-test:v1.41.0 labels: suite: basic test: basic-check-spec-test @@ -18,7 +18,7 @@ stages: - entrypoint: - scorecard-test - olm-bundle-validation - image: quay.io/operator-framework/scorecard-test:v1.40.0 + image: quay.io/operator-framework/scorecard-test:v1.41.0 labels: suite: olm test: olm-bundle-validation-test @@ -28,7 +28,7 @@ stages: - entrypoint: - scorecard-test - olm-crds-have-validation - image: quay.io/operator-framework/scorecard-test:v1.40.0 + image: quay.io/operator-framework/scorecard-test:v1.41.0 labels: suite: olm test: olm-crds-have-validation-test @@ -38,7 +38,7 @@ stages: - entrypoint: - scorecard-test - olm-crds-have-resources - image: quay.io/operator-framework/scorecard-test:v1.40.0 + image: quay.io/operator-framework/scorecard-test:v1.41.0 labels: suite: olm test: olm-crds-have-resources-test @@ -48,7 +48,7 @@ stages: - entrypoint: - scorecard-test - olm-spec-descriptors - image: quay.io/operator-framework/scorecard-test:v1.40.0 + image: quay.io/operator-framework/scorecard-test:v1.41.0 labels: suite: olm test: olm-spec-descriptors-test @@ -58,7 +58,7 @@ stages: - entrypoint: - scorecard-test - olm-status-descriptors - image: quay.io/operator-framework/scorecard-test:v1.40.0 + image: quay.io/operator-framework/scorecard-test:v1.41.0 labels: suite: olm test: olm-status-descriptors-test diff --git a/testdata/go/v4/monitoring/memcached-operator/config/scorecard/patches/basic.config.yaml b/testdata/go/v4/monitoring/memcached-operator/config/scorecard/patches/basic.config.yaml index f7e0ed492b..22884c5382 100644 --- a/testdata/go/v4/monitoring/memcached-operator/config/scorecard/patches/basic.config.yaml +++ b/testdata/go/v4/monitoring/memcached-operator/config/scorecard/patches/basic.config.yaml @@ -4,7 +4,7 @@ entrypoint: - scorecard-test - basic-check-spec - image: quay.io/operator-framework/scorecard-test:v1.40.0 + image: quay.io/operator-framework/scorecard-test:v1.41.0 labels: suite: basic test: basic-check-spec-test diff --git a/testdata/go/v4/monitoring/memcached-operator/config/scorecard/patches/olm.config.yaml b/testdata/go/v4/monitoring/memcached-operator/config/scorecard/patches/olm.config.yaml index 85895c9e93..d2f829662c 100644 --- a/testdata/go/v4/monitoring/memcached-operator/config/scorecard/patches/olm.config.yaml +++ b/testdata/go/v4/monitoring/memcached-operator/config/scorecard/patches/olm.config.yaml @@ -4,7 +4,7 @@ entrypoint: - scorecard-test - olm-bundle-validation - image: quay.io/operator-framework/scorecard-test:v1.40.0 + image: quay.io/operator-framework/scorecard-test:v1.41.0 labels: suite: olm test: olm-bundle-validation-test @@ -14,7 +14,7 @@ entrypoint: - scorecard-test - olm-crds-have-validation - image: quay.io/operator-framework/scorecard-test:v1.40.0 + image: quay.io/operator-framework/scorecard-test:v1.41.0 labels: suite: olm test: olm-crds-have-validation-test @@ -24,7 +24,7 @@ entrypoint: - scorecard-test - olm-crds-have-resources - image: quay.io/operator-framework/scorecard-test:v1.40.0 + image: quay.io/operator-framework/scorecard-test:v1.41.0 labels: suite: olm test: olm-crds-have-resources-test @@ -34,7 +34,7 @@ entrypoint: - scorecard-test - olm-spec-descriptors - image: quay.io/operator-framework/scorecard-test:v1.40.0 + image: quay.io/operator-framework/scorecard-test:v1.41.0 labels: suite: olm test: olm-spec-descriptors-test @@ -44,7 +44,7 @@ entrypoint: - scorecard-test - olm-status-descriptors - image: quay.io/operator-framework/scorecard-test:v1.40.0 + image: quay.io/operator-framework/scorecard-test:v1.41.0 labels: suite: olm test: olm-status-descriptors-test diff --git a/testdata/helm/memcached-operator/Dockerfile b/testdata/helm/memcached-operator/Dockerfile index 1a8f8ca280..dc997558af 100644 --- a/testdata/helm/memcached-operator/Dockerfile +++ b/testdata/helm/memcached-operator/Dockerfile @@ -1,5 +1,5 @@ # Build the manager binary -FROM quay.io/operator-framework/helm-operator:v1.40.0 +FROM quay.io/operator-framework/helm-operator:v1.41.0 ENV HOME=/opt/helm COPY watches.yaml ${HOME}/watches.yaml diff --git a/testdata/helm/memcached-operator/Makefile b/testdata/helm/memcached-operator/Makefile index 845dda7715..c644e15e68 100644 --- a/testdata/helm/memcached-operator/Makefile +++ b/testdata/helm/memcached-operator/Makefile @@ -150,7 +150,7 @@ ifeq (,$(shell which helm-operator 2>/dev/null)) @{ \ set -e ;\ mkdir -p $(dir $(HELM_OPERATOR)) ;\ - curl -sSLo $(HELM_OPERATOR) https://github.com/operator-framework/operator-sdk/releases/download/v1.40.0/helm-operator_$(OS)_$(ARCH) ;\ + curl -sSLo $(HELM_OPERATOR) https://github.com/operator-framework/operator-sdk/releases/download/v1.41.0/helm-operator_$(OS)_$(ARCH) ;\ chmod +x $(HELM_OPERATOR) ;\ } else diff --git a/testdata/helm/memcached-operator/bundle/tests/scorecard/config.yaml b/testdata/helm/memcached-operator/bundle/tests/scorecard/config.yaml index f7368a430e..946cf910e2 100644 --- a/testdata/helm/memcached-operator/bundle/tests/scorecard/config.yaml +++ b/testdata/helm/memcached-operator/bundle/tests/scorecard/config.yaml @@ -8,7 +8,7 @@ stages: - entrypoint: - scorecard-test - basic-check-spec - image: quay.io/operator-framework/scorecard-test:v1.40.0 + image: quay.io/operator-framework/scorecard-test:v1.41.0 labels: suite: basic test: basic-check-spec-test @@ -18,7 +18,7 @@ stages: - entrypoint: - scorecard-test - olm-bundle-validation - image: quay.io/operator-framework/scorecard-test:v1.40.0 + image: quay.io/operator-framework/scorecard-test:v1.41.0 labels: suite: olm test: olm-bundle-validation-test @@ -28,7 +28,7 @@ stages: - entrypoint: - scorecard-test - olm-crds-have-validation - image: quay.io/operator-framework/scorecard-test:v1.40.0 + image: quay.io/operator-framework/scorecard-test:v1.41.0 labels: suite: olm test: olm-crds-have-validation-test @@ -38,7 +38,7 @@ stages: - entrypoint: - scorecard-test - olm-crds-have-resources - image: quay.io/operator-framework/scorecard-test:v1.40.0 + image: quay.io/operator-framework/scorecard-test:v1.41.0 labels: suite: olm test: olm-crds-have-resources-test @@ -48,7 +48,7 @@ stages: - entrypoint: - scorecard-test - olm-spec-descriptors - image: quay.io/operator-framework/scorecard-test:v1.40.0 + image: quay.io/operator-framework/scorecard-test:v1.41.0 labels: suite: olm test: olm-spec-descriptors-test @@ -58,7 +58,7 @@ stages: - entrypoint: - scorecard-test - olm-status-descriptors - image: quay.io/operator-framework/scorecard-test:v1.40.0 + image: quay.io/operator-framework/scorecard-test:v1.41.0 labels: suite: olm test: olm-status-descriptors-test diff --git a/testdata/helm/memcached-operator/config/scorecard/patches/basic.config.yaml b/testdata/helm/memcached-operator/config/scorecard/patches/basic.config.yaml index f7e0ed492b..22884c5382 100644 --- a/testdata/helm/memcached-operator/config/scorecard/patches/basic.config.yaml +++ b/testdata/helm/memcached-operator/config/scorecard/patches/basic.config.yaml @@ -4,7 +4,7 @@ entrypoint: - scorecard-test - basic-check-spec - image: quay.io/operator-framework/scorecard-test:v1.40.0 + image: quay.io/operator-framework/scorecard-test:v1.41.0 labels: suite: basic test: basic-check-spec-test diff --git a/testdata/helm/memcached-operator/config/scorecard/patches/olm.config.yaml b/testdata/helm/memcached-operator/config/scorecard/patches/olm.config.yaml index 85895c9e93..d2f829662c 100644 --- a/testdata/helm/memcached-operator/config/scorecard/patches/olm.config.yaml +++ b/testdata/helm/memcached-operator/config/scorecard/patches/olm.config.yaml @@ -4,7 +4,7 @@ entrypoint: - scorecard-test - olm-bundle-validation - image: quay.io/operator-framework/scorecard-test:v1.40.0 + image: quay.io/operator-framework/scorecard-test:v1.41.0 labels: suite: olm test: olm-bundle-validation-test @@ -14,7 +14,7 @@ entrypoint: - scorecard-test - olm-crds-have-validation - image: quay.io/operator-framework/scorecard-test:v1.40.0 + image: quay.io/operator-framework/scorecard-test:v1.41.0 labels: suite: olm test: olm-crds-have-validation-test @@ -24,7 +24,7 @@ entrypoint: - scorecard-test - olm-crds-have-resources - image: quay.io/operator-framework/scorecard-test:v1.40.0 + image: quay.io/operator-framework/scorecard-test:v1.41.0 labels: suite: olm test: olm-crds-have-resources-test @@ -34,7 +34,7 @@ entrypoint: - scorecard-test - olm-spec-descriptors - image: quay.io/operator-framework/scorecard-test:v1.40.0 + image: quay.io/operator-framework/scorecard-test:v1.41.0 labels: suite: olm test: olm-spec-descriptors-test @@ -44,7 +44,7 @@ entrypoint: - scorecard-test - olm-status-descriptors - image: quay.io/operator-framework/scorecard-test:v1.40.0 + image: quay.io/operator-framework/scorecard-test:v1.41.0 labels: suite: olm test: olm-status-descriptors-test diff --git a/website/config.toml b/website/config.toml index 4834f7987c..e6b2454ef3 100644 --- a/website/config.toml +++ b/website/config.toml @@ -93,20 +93,26 @@ url_latest_version = "https://sdk.operatorframework.io" version = "master" url = "https://master.sdk.operatorframework.io" ##LATEST_RELEASE_KUBE_VERSION## - kube_version = "1.32.0" + kube_version = "1.33.1" ##LATEST_RELEASE_CLIENT_GO_VERSION## - client_go_version = "v0.32.4" + client_go_version = "v0.33.2" [[params.versions]] version = "Latest Release" url = "https://sdk.operatorframework.io" ##LATEST_RELEASE_KUBE_VERSION## - kube_version = "1.32.0" + kube_version = "1.33.1" ##LATEST_RELEASE_CLIENT_GO_VERSION## - client_go_version = "v0.32.4" + client_go_version = "v0.33.2" ##RELEASE_ADDME## +[[params.versions]] + version = "v1.41" + url = "https://v1-41-x.sdk.operatorframework.io" + kube_version = "1.33.1" + client_go_version = "v0.33.2" + [[params.versions]] version = "v1.40" url = "https://v1-40-x.sdk.operatorframework.io" diff --git a/website/content/en/docs/installation/_index.md b/website/content/en/docs/installation/_index.md index d2b30a0d7c..bc513dc99a 100644 --- a/website/content/en/docs/installation/_index.md +++ b/website/content/en/docs/installation/_index.md @@ -36,7 +36,7 @@ export OS=$(uname | awk '{print tolower($0)}') Download the binary for your platform: ```sh -export OPERATOR_SDK_DL_URL=https://github.com/operator-framework/operator-sdk/releases/download/v1.40.0 +export OPERATOR_SDK_DL_URL=https://github.com/operator-framework/operator-sdk/releases/download/v1.41.0 curl -LO ${OPERATOR_SDK_DL_URL}/operator-sdk_${OS}_${ARCH} ``` diff --git a/website/content/en/docs/upgrading-sdk-version/v1.41.0.md b/website/content/en/docs/upgrading-sdk-version/v1.41.0.md new file mode 100644 index 0000000000..8a3b54fc81 --- /dev/null +++ b/website/content/en/docs/upgrading-sdk-version/v1.41.0.md @@ -0,0 +1,116 @@ +--- +title: v1.41.0 +weight: 998959000 +--- + +## Upgrade Go version to 1.24 + +Update the Go version used to `1.24`. This affects: + +**Dockerfile:** +```diff +- FROM golang:1.23 AS builder ++ FROM golang:1.24 AS builder +``` + +**.devcontainer/devcontainer.json:** +```diff +- "image": "golang:1.23", ++ "image": "golang:1.24", +``` + +**go.mod:** +```diff +- go 1.23.0 ++ go 1.24.0 +``` + +_See [#6954](https://github.com/operator-framework/operator-sdk/pull/6954) for more details._ + +## Upgrade golangci-lint and use v2 config + +Update golangci-lint usage across the project: + +**Makefile:** +```diff +- GOLANGCI_LINT_VERSION ?= v1.63.4 ++ GOLANGCI_LINT_VERSION ?= v2.1.0 + +- $(call go-install-tool,$(GOLANGCI_LINT),github.com/golangci/golangci-lint/cmd/golangci-lint,$(GOLANGCI_LINT_VERSION)) ++ $(call go-install-tool,$(GOLANGCI_LINT),github.com/golangci/golangci-lint/v2/cmd/golangci-lint,$(GOLANGCI_LINT_VERSION)) +``` + +**GitHub Actions Workflow:** +```diff +- uses: golangci/golangci-lint-action@v6 ++ uses: golangci/golangci-lint-action@v8 +``` + +**.golangci.yml:** +Convert to v2 layout with keys like `version`, `linters`, `settings`, `formatters`, `exclusions`. +You might want to copy and paste the file from the Memcached sample from the tag release `v1.40.0`: [testdata/go/v4/memcached-operator/.golangci.yml](https://github.com/operator-framework/operator-sdk/tree/v1.40.0/testdata/go/v4/memcached-operator/.golangci.yml) + +_See [#6954](https://github.com/operator-framework/operator-sdk/pull/6954) for more details._ + +## Upgrade controller-gen to `v0.18.0` + +Update controller-gen tooling and annotations: + +**Makefile:** +```diff +- CONTROLLER_TOOLS_VERSION ?= v0.17.2 ++ CONTROLLER_TOOLS_VERSION ?= v0.18.0 +``` + +Run `make generate` to regenerate code and manifests with the new version. + +_See [#6954](https://github.com/operator-framework/operator-sdk/pull/6954) for more details._ + +## Upgrade controller-runtime to `v0.21.0` + +Update the `go.mod` import: +```diff +- sigs.k8s.io/controller-runtime v0.20.4 ++ sigs.k8s.io/controller-runtime v0.21.0 +``` + +Run `go mod tidy` to upgrade the k8s dependencies. + +_See [#6954](https://github.com/operator-framework/operator-sdk/pull/6954) for more details._ + +## Add cluster setup for e2e tests in Makefile and update CI workflow + +Remove direct Kind commands in GitHub workflows: + +**Removed:** +```yaml +- name: Create kind cluster + run: kind create cluster +``` + +**Added to Makefile:** +```makefile +KIND_CLUSTER ?= -test-e2e + +.PHONY: setup-test-e2e +setup-test-e2e: ## Set up a Kind cluster for e2e tests if it does not exist + @command -v $(KIND) >/dev/null 2>&1 || { \ + echo "Kind is not installed. Please install Kind manually."; \ + exit 1; \ + } + @case "$$($(KIND) get clusters)" in \ + *"$(KIND_CLUSTER)"*) \ + echo "Kind cluster '$(KIND_CLUSTER)' already exists. Skipping creation." ;; \ + *) \ + echo "Creating Kind cluster '$(KIND_CLUSTER)'..."; \ + $(KIND) create cluster --name $(KIND_CLUSTER) ;; \ + esac + +.PHONY: cleanup-test-e2e +cleanup-test-e2e: + $(KIND) delete cluster --name $(KIND_CLUSTER) +``` + +Update `test-e2e` target to call these appropriately. + +_See [#6954](https://github.com/operator-framework/operator-sdk/pull/6954) for more details._ From 385b275f11bbc88db5e26423514dd95fa37367aa Mon Sep 17 00:00:00 2001 From: "Adam D. Cornett" Date: Tue, 8 Jul 2025 15:39:50 -0700 Subject: [PATCH 10/31] re-generate scaffolding post release Signed-off-by: Adam D. Cornett --- testdata/helm/memcached-operator/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testdata/helm/memcached-operator/Makefile b/testdata/helm/memcached-operator/Makefile index c644e15e68..03816b68f8 100644 --- a/testdata/helm/memcached-operator/Makefile +++ b/testdata/helm/memcached-operator/Makefile @@ -48,7 +48,7 @@ endif # Set the Operator SDK version to use. By default, what is installed on the system is used. # This is useful for CI or a project to utilize a specific version of the operator-sdk toolkit. -OPERATOR_SDK_VERSION ?= v1.40.0 +OPERATOR_SDK_VERSION ?= v1.41.0 # Container tool to use for building and pushing images CONTAINER_TOOL ?= docker From 4aeffa75d95d482d0af9cc44398f1389f406ddbf Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 9 Jul 2025 10:11:28 -0700 Subject: [PATCH 11/31] Bump helm.sh/helm/v3 from 3.18.3 to 3.18.4 (#6970) Bumps [helm.sh/helm/v3](https://github.com/helm/helm) from 3.18.3 to 3.18.4. - [Release notes](https://github.com/helm/helm/releases) - [Commits](https://github.com/helm/helm/compare/v3.18.3...v3.18.4) --- updated-dependencies: - dependency-name: helm.sh/helm/v3 dependency-version: 3.18.4 dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 90e0ee9ea1..de1cb80eb0 100644 --- a/go.mod +++ b/go.mod @@ -31,7 +31,7 @@ require ( golang.org/x/text v0.26.0 golang.org/x/tools v0.34.0 gomodules.xyz/jsonpatch/v3 v3.0.1 - helm.sh/helm/v3 v3.18.3 + helm.sh/helm/v3 v3.18.4 k8s.io/api v0.33.2 k8s.io/apiextensions-apiserver v0.33.2 k8s.io/apimachinery v0.33.2 diff --git a/go.sum b/go.sum index b433a1df45..6e8d483867 100644 --- a/go.sum +++ b/go.sum @@ -837,8 +837,8 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= -helm.sh/helm/v3 v3.18.3 h1:+cvyGKgs7Jt7BN3Klmb4SsG4IkVpA7GAZVGvMz6VO4I= -helm.sh/helm/v3 v3.18.3/go.mod h1:wUc4n3txYBocM7S9RjTeZBN9T/b5MjffpcSsWEjSIpw= +helm.sh/helm/v3 v3.18.4 h1:pNhnHM3nAmDrxz6/UC+hfjDY4yeDATQCka2/87hkZXQ= +helm.sh/helm/v3 v3.18.4/go.mod h1:WVnwKARAw01iEdjpEkP7Ii1tT1pTPYfM1HsakFKM3LI= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= k8s.io/api v0.33.2 h1:YgwIS5jKfA+BZg//OQhkJNIfie/kmRsO0BmNaVSimvY= From 35e78646a402b555d43794954ffc7b475c210cc6 Mon Sep 17 00:00:00 2001 From: Scott Trent <32449003+trent-s@users.noreply.github.com> Date: Sat, 12 Jul 2025 00:26:25 +0900 Subject: [PATCH 12/31] fix typo of filename kustomization.yaml (#6974) Signed-off-by: Scott Trent --- website/content/en/docs/upgrading-sdk-version/v1.40.0.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/content/en/docs/upgrading-sdk-version/v1.40.0.md b/website/content/en/docs/upgrading-sdk-version/v1.40.0.md index 1712e1975d..5fe00f7e55 100644 --- a/website/content/en/docs/upgrading-sdk-version/v1.40.0.md +++ b/website/content/en/docs/upgrading-sdk-version/v1.40.0.md @@ -278,7 +278,7 @@ _See [#6928](https://github.com/operator-framework/operator-sdk/pull/6928) for m **Changes required under the hood `config/prometheus/`** -- 1. Update the `config/prometheus/kutomization.yaml` add at the bottom: +- 1. Update the `config/prometheus/kustomization.yaml` add at the bottom: ```yaml # [PROMETHEUS-WITH-CERTS] The following patch configures the ServiceMonitor in ../prometheus From ed4df851fa749dd6eb9674f14d6d1f1e53728f2c Mon Sep 17 00:00:00 2001 From: Simon Shine Date: Thu, 24 Jul 2025 17:42:59 +0200 Subject: [PATCH 13/31] Fix missing +/- prefixes in migration diff for v1.39.0.md (#6975) Improves readability when migrating to v1.39.0 Signed-off-by: Simon Shine --- .../en/docs/upgrading-sdk-version/v1.39.0.md | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/website/content/en/docs/upgrading-sdk-version/v1.39.0.md b/website/content/en/docs/upgrading-sdk-version/v1.39.0.md index 9f0e4315e4..df90c8bb44 100644 --- a/website/content/en/docs/upgrading-sdk-version/v1.39.0.md +++ b/website/content/en/docs/upgrading-sdk-version/v1.39.0.md @@ -16,18 +16,18 @@ so this release should be easier to follow. 2) [go/v4] Update your `go.mod` file to upgrade the dependencies and run `go mod tidy` to download them ```go - github.com/onsi/ginkgo/v2 v2.17.1 - github.com/onsi/gomega v1.32.0 - k8s.io/api v0.30.1 - k8s.io/apimachinery v0.30.1 - k8s.io/client-go v0.30.1 - sigs.k8s.io/controller-runtime v0.18.4 - github.com/onsi/ginkgo/v2 v2.19.0 - github.com/onsi/gomega v1.33.1 - k8s.io/api v0.31.0 - k8s.io/apimachinery v0.31.0 - k8s.io/client-go v0.31.0 - sigs.k8s.io/controller-runtime v0.19.0 + - github.com/onsi/ginkgo/v2 v2.17.1 + - github.com/onsi/gomega v1.32.0 + - k8s.io/api v0.30.1 + - k8s.io/apimachinery v0.30.1 + - k8s.io/client-go v0.30.1 + - sigs.k8s.io/controller-runtime v0.18.4 + + github.com/onsi/ginkgo/v2 v2.19.0 + + github.com/onsi/gomega v1.33.1 + + k8s.io/api v0.31.0 + + k8s.io/apimachinery v0.31.0 + + k8s.io/client-go v0.31.0 + + sigs.k8s.io/controller-runtime v0.19.0 ``` 3) [go/v4] Update your `Makefile` with the below changes: From bbee3935dfa43f484ec1b387b70ae3eca0d37f47 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 29 Jul 2025 13:28:35 -0700 Subject: [PATCH 14/31] Bump github.com/docker/docker (#6976) Bumps [github.com/docker/docker](https://github.com/docker/docker) from 28.2.2+incompatible to 28.3.3+incompatible. - [Release notes](https://github.com/docker/docker/releases) - [Commits](https://github.com/docker/docker/compare/v28.2.2...v28.3.3) --- updated-dependencies: - dependency-name: github.com/docker/docker dependency-version: 28.3.3+incompatible dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index de1cb80eb0..a473a1be3b 100644 --- a/go.mod +++ b/go.mod @@ -93,7 +93,7 @@ require ( github.com/distribution/reference v0.6.0 // indirect github.com/docker/cli v28.3.1+incompatible // indirect github.com/docker/distribution v2.8.3+incompatible // indirect - github.com/docker/docker v28.2.2+incompatible // indirect + github.com/docker/docker v28.3.3+incompatible // indirect github.com/docker/docker-credential-helpers v0.9.3 // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-events v0.0.0-20250114142523-c867878c5e32 // indirect diff --git a/go.sum b/go.sum index 6e8d483867..6093cd45fd 100644 --- a/go.sum +++ b/go.sum @@ -121,8 +121,8 @@ github.com/docker/cli v28.3.1+incompatible h1:ZUdwOLDEBoE3TE5rdC9IXGY5HPHksJK3M+ github.com/docker/cli v28.3.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v28.2.2+incompatible h1:CjwRSksz8Yo4+RmQ339Dp/D2tGO5JxwYeqtMOEe0LDw= -github.com/docker/docker v28.2.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v28.3.3+incompatible h1:Dypm25kh4rmk49v1eiVbsAtpAsYURjYkaKubwuBdxEI= +github.com/docker/docker v28.3.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.9.3 h1:gAm/VtF9wgqJMoxzT3Gj5p4AqIjCBS4wrsOh9yRqcz8= github.com/docker/docker-credential-helpers v0.9.3/go.mod h1:x+4Gbw9aGmChi3qTLZj8Dfn0TD20M/fuWy0E5+WDeCo= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= From 9755c82cb76f150fbf8a0e079affd6fad6f9df6e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 27 Aug 2025 08:53:13 -0700 Subject: [PATCH 15/31] Bump helm.sh/helm/v3 from 3.18.4 to 3.18.5 (#6986) Bumps [helm.sh/helm/v3](https://github.com/helm/helm) from 3.18.4 to 3.18.5. - [Release notes](https://github.com/helm/helm/releases) - [Commits](https://github.com/helm/helm/compare/v3.18.4...v3.18.5) --- updated-dependencies: - dependency-name: helm.sh/helm/v3 dependency-version: 3.18.5 dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 34 +++++++++++++-------------- go.sum | 72 +++++++++++++++++++++++++++------------------------------- 2 files changed, 50 insertions(+), 56 deletions(-) diff --git a/go.mod b/go.mod index a473a1be3b..1dd57a6d7d 100644 --- a/go.mod +++ b/go.mod @@ -23,21 +23,21 @@ require ( github.com/sirupsen/logrus v1.9.3 github.com/spf13/afero v1.14.0 github.com/spf13/cobra v1.9.1 - github.com/spf13/pflag v1.0.6 + github.com/spf13/pflag v1.0.7 github.com/spf13/viper v1.20.1 github.com/stretchr/testify v1.10.0 github.com/thoas/go-funk v0.9.3 golang.org/x/mod v0.25.0 - golang.org/x/text v0.26.0 + golang.org/x/text v0.27.0 golang.org/x/tools v0.34.0 gomodules.xyz/jsonpatch/v3 v3.0.1 - helm.sh/helm/v3 v3.18.4 - k8s.io/api v0.33.2 - k8s.io/apiextensions-apiserver v0.33.2 - k8s.io/apimachinery v0.33.2 - k8s.io/cli-runtime v0.33.2 - k8s.io/client-go v0.33.2 - k8s.io/kubectl v0.33.2 + helm.sh/helm/v3 v3.18.5 + k8s.io/api v0.33.3 + k8s.io/apiextensions-apiserver v0.33.3 + k8s.io/apimachinery v0.33.3 + k8s.io/cli-runtime v0.33.3 + k8s.io/client-go v0.33.3 + k8s.io/kubectl v0.33.3 k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 sigs.k8s.io/controller-runtime v0.21.0 sigs.k8s.io/controller-tools v0.18.0 @@ -215,6 +215,7 @@ require ( github.com/rubenv/sql-migrate v1.8.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/sagikazarmark/locafero v0.7.0 // indirect + github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 // indirect github.com/secure-systems-lab/go-securesystemslib v0.9.0 // indirect github.com/shopspring/decimal v1.4.0 // indirect github.com/sigstore/fulcio v1.7.1 // indirect @@ -232,9 +233,6 @@ require ( github.com/vbatts/tar-split v0.12.1 // indirect github.com/vbauerster/mpb/v8 v8.10.2 // indirect github.com/x448/float16 v0.8.4 // indirect - github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect - github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect - github.com/xeipuuv/gojsonschema v1.2.0 // indirect github.com/xlab/treeprint v1.2.0 // indirect go.etcd.io/bbolt v1.4.2 // indirect go.mongodb.org/mongo-driver v1.17.4 // indirect @@ -267,13 +265,13 @@ require ( go.uber.org/zap v1.27.0 // indirect go.yaml.in/yaml/v2 v2.4.2 // indirect go.yaml.in/yaml/v3 v3.0.3 // indirect - golang.org/x/crypto v0.39.0 // indirect + golang.org/x/crypto v0.40.0 // indirect golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b // indirect golang.org/x/net v0.41.0 // indirect golang.org/x/oauth2 v0.30.0 // indirect - golang.org/x/sync v0.15.0 // indirect - golang.org/x/sys v0.33.0 // indirect - golang.org/x/term v0.32.0 // indirect + golang.org/x/sync v0.16.0 // indirect + golang.org/x/sys v0.34.0 // indirect + golang.org/x/term v0.33.0 // indirect golang.org/x/time v0.12.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect gomodules.xyz/orderedmap v0.1.0 // indirect @@ -287,8 +285,8 @@ require ( gopkg.in/warnings.v0 v0.1.2 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiserver v0.33.2 // indirect - k8s.io/component-base v0.33.2 // indirect + k8s.io/apiserver v0.33.3 // indirect + k8s.io/component-base v0.33.3 // indirect k8s.io/klog/v2 v2.130.1 // indirect k8s.io/kube-openapi v0.0.0-20250610211856-8b98d1ed966a // indirect oras.land/oras-go/v2 v2.6.0 // indirect diff --git a/go.sum b/go.sum index 6093cd45fd..d1e0c51c61 100644 --- a/go.sum +++ b/go.sum @@ -117,6 +117,8 @@ github.com/distribution/distribution/v3 v3.0.0 h1:q4R8wemdRQDClzoNNStftB2ZAfqOiN github.com/distribution/distribution/v3 v3.0.0/go.mod h1:tRNuFoZsUdyRVegq8xGNeds4KLjwLCRin/tTo6i1DhU= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/dlclark/regexp2 v1.11.0 h1:G/nrcoOa7ZXlpoa/91N3X7mM3r8eIlMBBJZvsz/mxKI= +github.com/dlclark/regexp2 v1.11.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= github.com/docker/cli v28.3.1+incompatible h1:ZUdwOLDEBoE3TE5rdC9IXGY5HPHksJK3M+hJEWhh2mc= github.com/docker/cli v28.3.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= @@ -495,8 +497,8 @@ github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sagikazarmark/locafero v0.7.0 h1:5MqpDsTGNDhY8sGp0Aowyf0qKsPrhewaLSsFaodPcyo= github.com/sagikazarmark/locafero v0.7.0/go.mod h1:2za3Cg5rMaTMoG/2Ulr9AwtFaIppKXTRYnozin4aB5k= -github.com/santhosh-tekuri/jsonschema/v6 v6.0.1 h1:PKK9DyHxif4LZo+uQSgXNqs0jj5+xZwwfKHgph2lxBw= -github.com/santhosh-tekuri/jsonschema/v6 v6.0.1/go.mod h1:JXeL+ps8p7/KNMjDQk3TCwPpBy0wYklyWTfbkIzdIFU= +github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 h1:KRzFb2m7YtdldCEkzs6KqmJw4nqEVZGK7IN2kJkjTuQ= +github.com/santhosh-tekuri/jsonschema/v6 v6.0.2/go.mod h1:JXeL+ps8p7/KNMjDQk3TCwPpBy0wYklyWTfbkIzdIFU= github.com/sclevine/spec v1.4.0 h1:z/Q9idDcay5m5irkZ28M7PtQM4aOISzOpj4bUPkDee8= github.com/sclevine/spec v1.4.0/go.mod h1:LvpgJaFyvQzRvc1kaDs0bulYwzC70PbiYjC4QnFHkOM= github.com/secure-systems-lab/go-securesystemslib v0.9.0 h1:rf1HIbL64nUpEIZnjLZ3mcNEL9NBPB0iuVjyxvq3LZc= @@ -526,8 +528,9 @@ github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= -github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.7 h1:vN6T9TfwStFPFM5XzjsvmzZkLuaLX+HS+0SeFLRgU6M= +github.com/spf13/pflag v1.0.7/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.20.1 h1:ZMi+z/lvLyPSCoNtFCpqjy0S4kPbirhpTMwl8BkW9X4= github.com/spf13/viper v1.20.1/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4= github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 h1:pnnLyeX7o/5aX8qUQ69P/mLojDqwda8hFOCBTmP/6hw= @@ -564,13 +567,6 @@ github.com/vbauerster/mpb/v8 v8.10.2 h1:2uBykSHAYHekE11YvJhKxYmLATKHAGorZwFlyNw4 github.com/vbauerster/mpb/v8 v8.10.2/go.mod h1:+Ja4P92E3/CorSZgfDtK46D7AVbDqmBQRTmyTqPElo0= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= -github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= -github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= -github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= -github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= -github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -661,8 +657,8 @@ golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliY golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/crypto v0.33.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5M= -golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM= -golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U= +golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM= +golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b h1:M2rDM6z3Fhozi9O7NWsxAkg/yqS/lQJ6PmkyIV3YP+o= golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8= @@ -713,8 +709,8 @@ golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8= -golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= +golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -736,8 +732,8 @@ golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= -golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA= +golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -747,8 +743,8 @@ golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/term v0.29.0/go.mod h1:6bl4lRlvVuDgSf3179VpIxBF0o10JUpXWOnI7nErv7s= -golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg= -golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ= +golang.org/x/term v0.33.0 h1:NuFncQrRcaRvVmgRkvM3j/F00gWIAlcmlB8ACEKmGIg= +golang.org/x/term v0.33.0/go.mod h1:s18+ql9tYWp1IfpV9DmCtQDDSRBUjKaw9M1eAv5UeF0= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= @@ -758,8 +754,8 @@ golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY= -golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M= -golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA= +golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4= +golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU= golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE= golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -837,30 +833,30 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= -helm.sh/helm/v3 v3.18.4 h1:pNhnHM3nAmDrxz6/UC+hfjDY4yeDATQCka2/87hkZXQ= -helm.sh/helm/v3 v3.18.4/go.mod h1:WVnwKARAw01iEdjpEkP7Ii1tT1pTPYfM1HsakFKM3LI= +helm.sh/helm/v3 v3.18.5 h1:Cc3Z5vd6kDrZq9wO9KxKLNEickiTho6/H/dBNRVSos4= +helm.sh/helm/v3 v3.18.5/go.mod h1:L/dXDR2r539oPlFP1PJqKAC1CUgqHJDLkxKpDGrWnyg= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -k8s.io/api v0.33.2 h1:YgwIS5jKfA+BZg//OQhkJNIfie/kmRsO0BmNaVSimvY= -k8s.io/api v0.33.2/go.mod h1:fhrbphQJSM2cXzCWgqU29xLDuks4mu7ti9vveEnpSXs= -k8s.io/apiextensions-apiserver v0.33.2 h1:6gnkIbngnaUflR3XwE1mCefN3YS8yTD631JXQhsU6M8= -k8s.io/apiextensions-apiserver v0.33.2/go.mod h1:IvVanieYsEHJImTKXGP6XCOjTwv2LUMos0YWc9O+QP8= -k8s.io/apimachinery v0.33.2 h1:IHFVhqg59mb8PJWTLi8m1mAoepkUNYmptHsV+Z1m5jY= -k8s.io/apimachinery v0.33.2/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM= -k8s.io/apiserver v0.33.2 h1:KGTRbxn2wJagJowo29kKBp4TchpO1DRO3g+dB/KOJN4= -k8s.io/apiserver v0.33.2/go.mod h1:9qday04wEAMLPWWo9AwqCZSiIn3OYSZacDyu/AcoM/M= -k8s.io/cli-runtime v0.33.2 h1:koNYQKSDdq5AExa/RDudXMhhtFasEg48KLS2KSAU74Y= -k8s.io/cli-runtime v0.33.2/go.mod h1:gnhsAWpovqf1Zj5YRRBBU7PFsRc6NkEkwYNQE+mXL88= -k8s.io/client-go v0.33.2 h1:z8CIcc0P581x/J1ZYf4CNzRKxRvQAwoAolYPbtQes+E= -k8s.io/client-go v0.33.2/go.mod h1:9mCgT4wROvL948w6f6ArJNb7yQd7QsvqavDeZHvNmHo= -k8s.io/component-base v0.33.2 h1:sCCsn9s/dG3ZrQTX/Us0/Sx2R0G5kwa0wbZFYoVp/+0= -k8s.io/component-base v0.33.2/go.mod h1:/41uw9wKzuelhN+u+/C59ixxf4tYQKW7p32ddkYNe2k= +k8s.io/api v0.33.3 h1:SRd5t//hhkI1buzxb288fy2xvjubstenEKL9K51KBI8= +k8s.io/api v0.33.3/go.mod h1:01Y/iLUjNBM3TAvypct7DIj0M0NIZc+PzAHCIo0CYGE= +k8s.io/apiextensions-apiserver v0.33.3 h1:qmOcAHN6DjfD0v9kxL5udB27SRP6SG/MTopmge3MwEs= +k8s.io/apiextensions-apiserver v0.33.3/go.mod h1:oROuctgo27mUsyp9+Obahos6CWcMISSAPzQ77CAQGz8= +k8s.io/apimachinery v0.33.3 h1:4ZSrmNa0c/ZpZJhAgRdcsFcZOw1PQU1bALVQ0B3I5LA= +k8s.io/apimachinery v0.33.3/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM= +k8s.io/apiserver v0.33.3 h1:Wv0hGc+QFdMJB4ZSiHrCgN3zL3QRatu56+rpccKC3J4= +k8s.io/apiserver v0.33.3/go.mod h1:05632ifFEe6TxwjdAIrwINHWE2hLwyADFk5mBsQa15E= +k8s.io/cli-runtime v0.33.3 h1:Dgy4vPjNIu8LMJBSvs8W0LcdV0PX/8aGG1DA1W8lklA= +k8s.io/cli-runtime v0.33.3/go.mod h1:yklhLklD4vLS8HNGgC9wGiuHWze4g7x6XQZ+8edsKEo= +k8s.io/client-go v0.33.3 h1:M5AfDnKfYmVJif92ngN532gFqakcGi6RvaOF16efrpA= +k8s.io/client-go v0.33.3/go.mod h1:luqKBQggEf3shbxHY4uVENAxrDISLOarxpTKMiUuujg= +k8s.io/component-base v0.33.3 h1:mlAuyJqyPlKZM7FyaoM/LcunZaaY353RXiOd2+B5tGA= +k8s.io/component-base v0.33.3/go.mod h1:ktBVsBzkI3imDuxYXmVxZ2zxJnYTZ4HAsVj9iF09qp4= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20250610211856-8b98d1ed966a h1:ZV3Zr+/7s7aVbjNGICQt+ppKWsF1tehxggNfbM7XnG8= k8s.io/kube-openapi v0.0.0-20250610211856-8b98d1ed966a/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8= -k8s.io/kubectl v0.33.2 h1:7XKZ6DYCklu5MZQzJe+CkCjoGZwD1wWl7t/FxzhMz7Y= -k8s.io/kubectl v0.33.2/go.mod h1:8rC67FB8tVTYraovAGNi/idWIK90z2CHFNMmGJZJ3KI= +k8s.io/kubectl v0.33.3 h1:r/phHvH1iU7gO/l7tTjQk2K01ER7/OAJi8uFHHyWSac= +k8s.io/kubectl v0.33.3/go.mod h1:euj2bG56L6kUGOE/ckZbCoudPwuj4Kud7BR0GzyNiT0= k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y= k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= oras.land/oras-go/v2 v2.6.0 h1:X4ELRsiGkrbeox69+9tzTu492FMUu7zJQW6eJU+I2oc= From d96a80762d8dc65e320f0e90b055a3c91e38a2ea Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 27 Aug 2025 08:54:13 -0700 Subject: [PATCH 16/31] Bump github.com/go-viper/mapstructure/v2 from 2.3.0 to 2.4.0 (#6989) Bumps [github.com/go-viper/mapstructure/v2](https://github.com/go-viper/mapstructure) from 2.3.0 to 2.4.0. - [Release notes](https://github.com/go-viper/mapstructure/releases) - [Changelog](https://github.com/go-viper/mapstructure/blob/main/CHANGELOG.md) - [Commits](https://github.com/go-viper/mapstructure/compare/v2.3.0...v2.4.0) --- updated-dependencies: - dependency-name: github.com/go-viper/mapstructure/v2 dependency-version: 2.4.0 dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 1dd57a6d7d..e9976934fc 100644 --- a/go.mod +++ b/go.mod @@ -126,7 +126,7 @@ require ( github.com/go-openapi/swag v0.23.1 // indirect github.com/go-openapi/validate v0.24.0 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect - github.com/go-viper/mapstructure/v2 v2.3.0 // indirect + github.com/go-viper/mapstructure/v2 v2.4.0 // indirect github.com/gobuffalo/envy v1.6.5 // indirect github.com/gobuffalo/flect v1.0.3 // indirect github.com/gobwas/glob v0.2.3 // indirect diff --git a/go.sum b/go.sum index d1e0c51c61..c8f0dee306 100644 --- a/go.sum +++ b/go.sum @@ -214,8 +214,8 @@ github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1v github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/go-test/deep v1.1.1 h1:0r/53hagsehfO4bzD2Pgr/+RgHqhmf+k1Bpse2cTu1U= github.com/go-test/deep v1.1.1/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= -github.com/go-viper/mapstructure/v2 v2.3.0 h1:27XbWsHIqhbdR5TIC911OfYvgSaW93HM+dX7970Q7jk= -github.com/go-viper/mapstructure/v2 v2.3.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= +github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/gobuffalo/envy v1.6.5 h1:X3is06x7v0nW2xiy2yFbbIjwHz57CD6z6MkvqULTCm8= github.com/gobuffalo/envy v1.6.5/go.mod h1:N+GkhhZ/93bGZc6ZKhJLP6+m+tCNPKwgSpH9kaifseQ= github.com/gobuffalo/flect v1.0.3 h1:xeWBM2nui+qnVvNM4S3foBhCAL2XgPU+a7FdpelbTq4= From ff5409450d98ff061fa977359cf53eea699aad1e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 27 Aug 2025 08:54:51 -0700 Subject: [PATCH 17/31] Bump actions/checkout from 4 to 5 (#6980) Bumps [actions/checkout](https://github.com/actions/checkout) from 4 to 5. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/v4...v5) --- updated-dependencies: - dependency-name: actions/checkout dependency-version: '5' dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/deploy.yml | 8 ++++---- .github/workflows/freshen-images.yml | 4 ++-- .github/workflows/integration.yml | 4 ++-- .github/workflows/olm-check.yml | 2 +- .github/workflows/test-go.yml | 6 +++--- .github/workflows/test-helm.yml | 4 ++-- .github/workflows/test-sample-go.yml | 4 ++-- .github/workflows/test-sanity.yml | 6 +++--- 8 files changed, 19 insertions(+), 19 deletions(-) diff --git a/.github/workflows/deploy.yml b/.github/workflows/deploy.yml index 2abd308591..fd9e1cccbc 100644 --- a/.github/workflows/deploy.yml +++ b/.github/workflows/deploy.yml @@ -18,7 +18,7 @@ jobs: outputs: skip: ${{ steps.check_docs_only.outputs.skip }} steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 with: fetch-depth: 0 - id: check_docs_only @@ -39,7 +39,7 @@ jobs: environment: deploy steps: - name: checkout - uses: actions/checkout@v4 + uses: actions/checkout@v5 with: fetch-depth: 0 @@ -92,7 +92,7 @@ jobs: # Check out repo before tag step for script. - name: checkout - uses: actions/checkout@v4 + uses: actions/checkout@v5 with: fetch-depth: 0 @@ -138,7 +138,7 @@ jobs: # Check out repo before tag step for script. - name: checkout - uses: actions/checkout@v4 + uses: actions/checkout@v5 with: fetch-depth: 0 diff --git a/.github/workflows/freshen-images.yml b/.github/workflows/freshen-images.yml index 477ec4787c..e2dcb4bb81 100644 --- a/.github/workflows/freshen-images.yml +++ b/.github/workflows/freshen-images.yml @@ -18,7 +18,7 @@ jobs: git_tags: ${{ steps.tags.outputs.git_tags }} steps: - name: checkout - uses: actions/checkout@v4 + uses: actions/checkout@v5 with: fetch-depth: 0 - id: tags @@ -45,7 +45,7 @@ jobs: password: ${{ secrets.QUAY_PASSWORD }} registry: quay.io - name: checkout - uses: actions/checkout@v4 + uses: actions/checkout@v5 with: fetch-depth: 1 - name: build and push diff --git a/.github/workflows/integration.yml b/.github/workflows/integration.yml index d90e4f6372..fb55e74472 100644 --- a/.github/workflows/integration.yml +++ b/.github/workflows/integration.yml @@ -9,7 +9,7 @@ jobs: outputs: skip: ${{ steps.check_docs_only.outputs.skip }} steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 with: fetch-depth: 0 - id: check_docs_only @@ -26,7 +26,7 @@ jobs: needs: check_docs_only if: needs.check_docs_only.outputs.skip != 'true' steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 with: fetch-depth: 0 diff --git a/.github/workflows/olm-check.yml b/.github/workflows/olm-check.yml index 8adfa2bfed..3f8fa54d6b 100644 --- a/.github/workflows/olm-check.yml +++ b/.github/workflows/olm-check.yml @@ -12,7 +12,7 @@ jobs: name: check-olm-minor-releases runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 - id: run-check-olm run: ./hack/check-olm.sh diff --git a/.github/workflows/test-go.yml b/.github/workflows/test-go.yml index ff4a13ea5c..98e7b42a92 100644 --- a/.github/workflows/test-go.yml +++ b/.github/workflows/test-go.yml @@ -12,7 +12,7 @@ jobs: outputs: skip: ${{ steps.check_docs_only.outputs.skip }} steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 with: fetch-depth: 0 - id: check_docs_only @@ -29,7 +29,7 @@ jobs: needs: check_docs_only if: needs.check_docs_only.outputs.skip != 'true' steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 with: fetch-depth: 0 @@ -49,7 +49,7 @@ jobs: needs: check_docs_only if: needs.check_docs_only.outputs.skip != 'true' steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 with: fetch-depth: 0 diff --git a/.github/workflows/test-helm.yml b/.github/workflows/test-helm.yml index 79377865e7..94855fdc17 100644 --- a/.github/workflows/test-helm.yml +++ b/.github/workflows/test-helm.yml @@ -9,7 +9,7 @@ jobs: outputs: skip: ${{ steps.check_docs_only.outputs.skip }} steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 with: fetch-depth: 0 - id: check_docs_only @@ -26,7 +26,7 @@ jobs: needs: check_docs_only if: needs.check_docs_only.outputs.skip != 'true' steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 with: fetch-depth: 0 diff --git a/.github/workflows/test-sample-go.yml b/.github/workflows/test-sample-go.yml index 166ef559ba..9067b7c653 100644 --- a/.github/workflows/test-sample-go.yml +++ b/.github/workflows/test-sample-go.yml @@ -9,7 +9,7 @@ jobs: outputs: skip: ${{ steps.check_docs_only.outputs.skip }} steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 with: fetch-depth: 0 - id: check_docs_only @@ -26,7 +26,7 @@ jobs: needs: check_docs_only if: needs.check_docs_only.outputs.skip != 'true' steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 with: fetch-depth: 0 diff --git a/.github/workflows/test-sanity.yml b/.github/workflows/test-sanity.yml index c10159e4e2..8ed83811a1 100644 --- a/.github/workflows/test-sanity.yml +++ b/.github/workflows/test-sanity.yml @@ -9,7 +9,7 @@ jobs: outputs: skip: ${{ steps.check_docs_only.outputs.skip }} steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 with: fetch-depth: 0 - id: check_docs_only @@ -26,7 +26,7 @@ jobs: needs: check_docs_only if: needs.check_docs_only.outputs.skip != 'true' steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 with: fetch-depth: 0 @@ -45,7 +45,7 @@ jobs: name: docs runs-on: ubuntu-24.04 steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 with: fetch-depth: 0 submodules: recursive From 83d7c06bca858cc911813a992eabd420d9e5cd4c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 2 Sep 2025 11:41:32 -0700 Subject: [PATCH 18/31] Bump github.com/ulikunitz/xz from 0.5.12 to 0.5.14 (#6993) Bumps [github.com/ulikunitz/xz](https://github.com/ulikunitz/xz) from 0.5.12 to 0.5.14. - [Commits](https://github.com/ulikunitz/xz/compare/v0.5.12...v0.5.14) --- updated-dependencies: - dependency-name: github.com/ulikunitz/xz dependency-version: 0.5.14 dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index e9976934fc..9cc93f9ffa 100644 --- a/go.mod +++ b/go.mod @@ -229,7 +229,7 @@ require ( github.com/stoewer/go-strcase v1.3.1 // indirect github.com/subosito/gotenv v1.6.0 // indirect github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect - github.com/ulikunitz/xz v0.5.12 // indirect + github.com/ulikunitz/xz v0.5.14 // indirect github.com/vbatts/tar-split v0.12.1 // indirect github.com/vbauerster/mpb/v8 v8.10.2 // indirect github.com/x448/float16 v0.8.4 // indirect diff --git a/go.sum b/go.sum index c8f0dee306..41dbc553f1 100644 --- a/go.sum +++ b/go.sum @@ -559,8 +559,8 @@ github.com/thoas/go-funk v0.9.3 h1:7+nAEx3kn5ZJcnDm2Bh23N2yOtweO14bi//dvRtgLpw= github.com/thoas/go-funk v0.9.3/go.mod h1:+IWnUfUmFO1+WVYQWQtIJHeRRdaIyyYglZN7xzUPe4Q= github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0= github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs= -github.com/ulikunitz/xz v0.5.12 h1:37Nm15o69RwBkXM0J6A5OlE67RZTfzUxTj8fB3dfcsc= -github.com/ulikunitz/xz v0.5.12/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/ulikunitz/xz v0.5.14 h1:uv/0Bq533iFdnMHZdRBTOlaNMdb1+ZxXIlHDZHIHcvg= +github.com/ulikunitz/xz v0.5.14/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/vbatts/tar-split v0.12.1 h1:CqKoORW7BUWBe7UL/iqTVvkTBOF8UvOMKOIZykxnnbo= github.com/vbatts/tar-split v0.12.1/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA= github.com/vbauerster/mpb/v8 v8.10.2 h1:2uBykSHAYHekE11YvJhKxYmLATKHAGorZwFlyNw4hHM= From 25b1186770c6e301d991a8df956bda145320afe3 Mon Sep 17 00:00:00 2001 From: Adam Cornett Date: Tue, 2 Sep 2025 11:55:32 -0700 Subject: [PATCH 19/31] use go version from builder image, instead of downloading in Dockerfile (#6996) Signed-off-by: Adam D. Cornett --- images/operator-sdk/Dockerfile | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/images/operator-sdk/Dockerfile b/images/operator-sdk/Dockerfile index 87ce50f424..e5ad444073 100644 --- a/images/operator-sdk/Dockerfile +++ b/images/operator-sdk/Dockerfile @@ -19,17 +19,12 @@ RUN GOOS=linux GOARCH=$TARGETARCH make build/operator-sdk # Final image. FROM registry.access.redhat.com/ubi9/ubi-minimal:9.6 -# TODO: Figure out how to take the go binary from the builder image so this doesn't have to be maintained. -ENV GO_VERSION=1.23.4 - ARG TARGETARCH RUN microdnf install -y make gcc which tar gzip -RUN curl -sSLo /tmp/go.tar.gz https://golang.org/dl/go${GO_VERSION}.linux-${TARGETARCH}.tar.gz \ - && rm -rf /usr/local/go \ - && tar -C /usr/local -xzf /tmp/go.tar.gz \ - && ln -sf /usr/local/go/bin/* /usr/local/bin/ \ - && rm -f /tmp/go.tar.gz \ - && go version + +# Copy Go runtime from builder image +COPY --from=builder /usr/local/go /usr/local/go +RUN ln -sf /usr/local/go/bin/* /usr/local/bin/ && go version COPY --from=builder /workspace/build/operator-sdk /usr/local/bin/operator-sdk From e27207c069a3d1ad7343181d392a36379cb06b1c Mon Sep 17 00:00:00 2001 From: Adam Cornett Date: Tue, 2 Sep 2025 13:48:32 -0700 Subject: [PATCH 20/31] updating go and various dependencies (#6997) Signed-off-by: Adam D. Cornett --- go.mod | 106 ++++----- go.sum | 214 ++++++++---------- .../packagemanifestfakes/fake_generator.go | 2 - 3 files changed, 147 insertions(+), 175 deletions(-) diff --git a/go.mod b/go.mod index 9cc93f9ffa..6f1eca203d 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/operator-framework/operator-sdk -go 1.24.3 +go 1.24.4 require ( github.com/blang/semver/v4 v4.0.0 @@ -10,50 +10,50 @@ require ( github.com/iancoleman/strcase v0.3.0 github.com/kr/text v0.2.0 github.com/markbates/inflect v1.0.4 - github.com/maxbrunsfeld/counterfeiter/v6 v6.11.2 - github.com/onsi/ginkgo/v2 v2.23.4 - github.com/onsi/gomega v1.37.0 + github.com/maxbrunsfeld/counterfeiter/v6 v6.11.3 + github.com/onsi/ginkgo/v2 v2.25.2 + github.com/onsi/gomega v1.38.2 github.com/operator-framework/ansible-operator-plugins v1.39.0 - github.com/operator-framework/api v0.32.0 + github.com/operator-framework/api v0.34.0 github.com/operator-framework/operator-lib v0.19.0 github.com/operator-framework/operator-manifest-tools v0.10.0 - github.com/operator-framework/operator-registry v1.56.0 - github.com/prometheus/client_golang v1.22.0 + github.com/operator-framework/operator-registry v1.57.0 + github.com/prometheus/client_golang v1.23.0 github.com/sergi/go-diff v1.4.0 github.com/sirupsen/logrus v1.9.3 github.com/spf13/afero v1.14.0 - github.com/spf13/cobra v1.9.1 - github.com/spf13/pflag v1.0.7 + github.com/spf13/cobra v1.10.1 + github.com/spf13/pflag v1.0.10 github.com/spf13/viper v1.20.1 - github.com/stretchr/testify v1.10.0 + github.com/stretchr/testify v1.11.1 github.com/thoas/go-funk v0.9.3 - golang.org/x/mod v0.25.0 - golang.org/x/text v0.27.0 - golang.org/x/tools v0.34.0 + golang.org/x/mod v0.27.0 + golang.org/x/text v0.28.0 + golang.org/x/tools v0.36.0 gomodules.xyz/jsonpatch/v3 v3.0.1 - helm.sh/helm/v3 v3.18.5 - k8s.io/api v0.33.3 - k8s.io/apiextensions-apiserver v0.33.3 - k8s.io/apimachinery v0.33.3 + helm.sh/helm/v3 v3.18.6 + k8s.io/api v0.33.4 + k8s.io/apiextensions-apiserver v0.33.4 + k8s.io/apimachinery v0.33.4 k8s.io/cli-runtime v0.33.3 - k8s.io/client-go v0.33.3 + k8s.io/client-go v0.33.4 k8s.io/kubectl v0.33.3 k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 sigs.k8s.io/controller-runtime v0.21.0 sigs.k8s.io/controller-tools v0.18.0 sigs.k8s.io/kubebuilder/v4 v4.6.0 - sigs.k8s.io/yaml v1.5.0 + sigs.k8s.io/yaml v1.6.0 ) require ( cel.dev/expr v0.24.0 // indirect - dario.cat/mergo v1.0.1 // indirect + dario.cat/mergo v1.0.2 // indirect github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 // indirect github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect github.com/BurntSushi/toml v1.5.0 // indirect github.com/MakeNowJust/heredoc v1.0.0 // indirect github.com/Masterminds/goutils v1.1.1 // indirect - github.com/Masterminds/semver/v3 v3.3.1 // indirect + github.com/Masterminds/semver/v3 v3.4.0 // indirect github.com/Masterminds/sprig/v3 v3.3.0 // indirect github.com/Masterminds/squirrel v1.5.4 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect @@ -68,7 +68,7 @@ require ( github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/chai2010/gettext-go v1.0.2 // indirect github.com/containerd/cgroups/v3 v3.0.5 // indirect - github.com/containerd/containerd v1.7.27 // indirect + github.com/containerd/containerd v1.7.28 // indirect github.com/containerd/containerd/api v1.9.0 // indirect github.com/containerd/continuity v0.4.5 // indirect github.com/containerd/errdefs v1.0.0 // indirect @@ -78,11 +78,11 @@ require ( github.com/containerd/stargz-snapshotter/estargz v0.16.3 // indirect github.com/containerd/ttrpc v1.2.7 // indirect github.com/containerd/typeurl/v2 v2.2.3 // indirect - github.com/containers/common v0.63.1 // indirect - github.com/containers/image/v5 v5.35.0 // indirect + github.com/containers/common v0.64.1 // indirect + github.com/containers/image/v5 v5.36.1 // indirect github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect github.com/containers/ocicrypt v1.2.1 // indirect - github.com/containers/storage v1.58.0 // indirect + github.com/containers/storage v1.59.1 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.7 // indirect github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 // indirect @@ -91,7 +91,7 @@ require ( github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/distribution/distribution/v3 v3.0.0 // indirect github.com/distribution/reference v0.6.0 // indirect - github.com/docker/cli v28.3.1+incompatible // indirect + github.com/docker/cli v28.3.3+incompatible // indirect github.com/docker/distribution v2.8.3+incompatible // indirect github.com/docker/docker v28.3.3+incompatible // indirect github.com/docker/docker-credential-helpers v0.9.3 // indirect @@ -112,19 +112,12 @@ require ( github.com/go-git/go-billy/v5 v5.6.2 // indirect github.com/go-git/go-git/v5 v5.16.2 // indirect github.com/go-gorp/gorp/v3 v3.1.0 // indirect - github.com/go-jose/go-jose/v4 v4.1.0 // indirect + github.com/go-jose/go-jose/v4 v4.1.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-logr/zapr v1.3.0 // indirect - github.com/go-openapi/analysis v0.23.0 // indirect - github.com/go-openapi/errors v0.22.1 // indirect github.com/go-openapi/jsonpointer v0.21.1 // indirect github.com/go-openapi/jsonreference v0.21.0 // indirect - github.com/go-openapi/loads v0.22.0 // indirect - github.com/go-openapi/runtime v0.28.0 // indirect - github.com/go-openapi/spec v0.21.0 // indirect - github.com/go-openapi/strfmt v0.23.0 // indirect github.com/go-openapi/swag v0.23.1 // indirect - github.com/go-openapi/validate v0.24.0 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/go-viper/mapstructure/v2 v2.4.0 // indirect github.com/gobuffalo/envy v1.6.5 // indirect @@ -135,11 +128,11 @@ require ( github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/btree v1.1.3 // indirect - github.com/google/cel-go v0.25.0 // indirect + github.com/google/cel-go v0.26.0 // indirect github.com/google/gnostic-models v0.6.9 // indirect github.com/google/go-cmp v0.7.0 // indirect github.com/google/go-containerregistry v0.20.6 // indirect - github.com/google/pprof v0.0.0-20250607225305-033d6d78b36a // indirect + github.com/google/pprof v0.0.0-20250820193118-f64d9cf942d6 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/google/uuid v1.6.0 // indirect github.com/gorilla/handlers v1.5.2 // indirect @@ -173,12 +166,11 @@ require ( github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/mattn/go-runewidth v0.0.16 // indirect - github.com/mattn/go-sqlite3 v1.14.28 // indirect + github.com/mattn/go-sqlite3 v1.14.32 // indirect github.com/miekg/pkcs11 v1.1.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect - github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/moby/locker v1.0.1 // indirect github.com/moby/spdystream v0.5.0 // indirect @@ -193,7 +185,6 @@ require ( github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect - github.com/oklog/ulid v1.3.1 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.1 // indirect github.com/opencontainers/runtime-spec v1.2.1 // indirect @@ -220,7 +211,6 @@ require ( github.com/shopspring/decimal v1.4.0 // indirect github.com/sigstore/fulcio v1.7.1 // indirect github.com/sigstore/protobuf-specs v0.4.3 // indirect - github.com/sigstore/rekor v1.3.10 // indirect github.com/sigstore/sigstore v1.9.5 // indirect github.com/smallstep/pkcs7 v0.2.1 // indirect github.com/sourcegraph/conc v0.3.0 // indirect @@ -234,14 +224,13 @@ require ( github.com/vbauerster/mpb/v8 v8.10.2 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/xlab/treeprint v1.2.0 // indirect - go.etcd.io/bbolt v1.4.2 // indirect - go.mongodb.org/mongo-driver v1.17.4 // indirect + go.etcd.io/bbolt v1.4.3 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect go.opentelemetry.io/contrib/bridges/prometheus v0.61.0 // indirect go.opentelemetry.io/contrib/exporters/autoexport v0.61.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect - go.opentelemetry.io/otel v1.36.0 // indirect + go.opentelemetry.io/otel v1.37.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.12.2 // indirect go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.12.2 // indirect go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.36.0 // indirect @@ -254,39 +243,40 @@ require ( go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.36.0 // indirect go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.36.0 // indirect go.opentelemetry.io/otel/log v0.12.2 // indirect - go.opentelemetry.io/otel/metric v1.36.0 // indirect - go.opentelemetry.io/otel/sdk v1.36.0 // indirect + go.opentelemetry.io/otel/metric v1.37.0 // indirect + go.opentelemetry.io/otel/sdk v1.37.0 // indirect go.opentelemetry.io/otel/sdk/log v0.12.2 // indirect - go.opentelemetry.io/otel/sdk/metric v1.36.0 // indirect - go.opentelemetry.io/otel/trace v1.36.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.37.0 // indirect + go.opentelemetry.io/otel/trace v1.37.0 // indirect go.opentelemetry.io/proto/otlp v1.7.0 // indirect go.uber.org/automaxprocs v1.6.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect go.yaml.in/yaml/v2 v2.4.2 // indirect - go.yaml.in/yaml/v3 v3.0.3 // indirect - golang.org/x/crypto v0.40.0 // indirect + go.yaml.in/yaml/v3 v3.0.4 // indirect + golang.org/x/crypto v0.41.0 // indirect golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b // indirect - golang.org/x/net v0.41.0 // indirect + golang.org/x/net v0.43.0 // indirect golang.org/x/oauth2 v0.30.0 // indirect golang.org/x/sync v0.16.0 // indirect - golang.org/x/sys v0.34.0 // indirect - golang.org/x/term v0.33.0 // indirect + golang.org/x/sys v0.35.0 // indirect + golang.org/x/term v0.34.0 // indirect golang.org/x/time v0.12.0 // indirect + golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect gomodules.xyz/orderedmap v0.1.0 // indirect google.golang.org/genproto v0.0.0-20250603155806-513f23925822 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 // indirect - google.golang.org/grpc v1.73.0 // indirect - google.golang.org/protobuf v1.36.6 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250707201910-8d1bb00bc6a7 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7 // indirect + google.golang.org/grpc v1.75.0 // indirect + google.golang.org/protobuf v1.36.8 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiserver v0.33.3 // indirect - k8s.io/component-base v0.33.3 // indirect + k8s.io/apiserver v0.33.4 // indirect + k8s.io/component-base v0.33.4 // indirect k8s.io/klog/v2 v2.130.1 // indirect k8s.io/kube-openapi v0.0.0-20250610211856-8b98d1ed966a // indirect oras.land/oras-go/v2 v2.6.0 // indirect diff --git a/go.sum b/go.sum index 41dbc553f1..ae32d2a05c 100644 --- a/go.sum +++ b/go.sum @@ -1,8 +1,8 @@ cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY= cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s= -dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= +dario.cat/mergo v1.0.2 h1:85+piFYR1tMbRrLcDwR18y4UKJ3aH1Tbzi24VRW1TK8= +dario.cat/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA= filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 h1:He8afgbRMd7mFxO99hRNu+6tazq8nFF9lIwo9JFroBk= @@ -18,8 +18,8 @@ github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= -github.com/Masterminds/semver/v3 v3.3.1 h1:QtNSWtVZ3nBfk8mAOu/B6v7FMJ+NHTIgUPi7rj+4nv4= -github.com/Masterminds/semver/v3 v3.3.1/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= +github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0= +github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= github.com/Masterminds/sprig/v3 v3.3.0 h1:mQh0Yrg1XPo6vjYXgtf5OtijNAKJRNcTdOOGZe3tPhs= github.com/Masterminds/sprig/v3 v3.3.0/go.mod h1:Zy1iXRYNqNLUolqCpL4uhk6SHUMAOSCzdgBfDb35Lz0= github.com/Masterminds/squirrel v1.5.4 h1:uUcX/aBc8O7Fg9kaISIUsHXdKuqehiXAMQTYX8afzqM= @@ -63,8 +63,8 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/containerd/cgroups/v3 v3.0.5 h1:44na7Ud+VwyE7LIoJ8JTNQOa549a8543BmzaJHo6Bzo= github.com/containerd/cgroups/v3 v3.0.5/go.mod h1:SA5DLYnXO8pTGYiAHXz94qvLQTKfVM5GEVisn4jpins= -github.com/containerd/containerd v1.7.27 h1:yFyEyojddO3MIGVER2xJLWoCIn+Up4GaHFquP7hsFII= -github.com/containerd/containerd v1.7.27/go.mod h1:xZmPnl75Vc+BLGt4MIfu6bp+fy03gdHAn9bz+FreFR0= +github.com/containerd/containerd v1.7.28 h1:Nsgm1AtcmEh4AHAJ4gGlNSaKgXiNccU270Dnf81FQ3c= +github.com/containerd/containerd v1.7.28/go.mod h1:azUkWcOvHrWvaiUjSQH0fjzuHIwSPg1WL5PshGP4Szs= github.com/containerd/containerd/api v1.9.0 h1:HZ/licowTRazus+wt9fM6r/9BQO7S0vD5lMcWspGIg0= github.com/containerd/containerd/api v1.9.0/go.mod h1:GhghKFmTR3hNtyznBoQ0EMWr9ju5AqHjcZPsSpTKutI= github.com/containerd/continuity v0.4.5 h1:ZRoN1sXq9u7V6QoHMcVWGhOwDFqZ4B9i5H6un1Wh0x4= @@ -83,16 +83,16 @@ github.com/containerd/ttrpc v1.2.7 h1:qIrroQvuOL9HQ1X6KHe2ohc7p+HP/0VE6XPU7elJRq github.com/containerd/ttrpc v1.2.7/go.mod h1:YCXHsb32f+Sq5/72xHubdiJRQY9inL4a4ZQrAbN1q9o= github.com/containerd/typeurl/v2 v2.2.3 h1:yNA/94zxWdvYACdYO8zofhrTVuQY73fFU1y++dYSw40= github.com/containerd/typeurl/v2 v2.2.3/go.mod h1:95ljDnPfD3bAbDJRugOiShd/DlAAsxGtUBhJxIn7SCk= -github.com/containers/common v0.63.1 h1:6g02gbW34PaRVH4Heb2Pk11x0SdbQ+8AfeKKeQGqYBE= -github.com/containers/common v0.63.1/go.mod h1:+3GCotSqNdIqM3sPs152VvW7m5+Mg8Kk+PExT3G9hZw= -github.com/containers/image/v5 v5.35.0 h1:T1OeyWp3GjObt47bchwD9cqiaAm/u4O4R9hIWdrdrP8= -github.com/containers/image/v5 v5.35.0/go.mod h1:8vTsgb+1gKcBL7cnjyNOInhJQfTUQjJoO2WWkKDoebM= +github.com/containers/common v0.64.1 h1:E8vSiL+B84/UCsyVSb70GoxY9cu+0bseLujm4EKF6GE= +github.com/containers/common v0.64.1/go.mod h1:CtfQNHoCAZqWeXMwdShcsxmMJSeGRgKKMqAwRKmWrHE= +github.com/containers/image/v5 v5.36.1 h1:6zpXBqR59UcAzoKpa/By5XekeqFV+htWYfr65+Cgjqo= +github.com/containers/image/v5 v5.36.1/go.mod h1:b4GMKH2z/5t6/09utbse2ZiLK/c72GuGLFdp7K69eA4= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= github.com/containers/ocicrypt v1.2.1 h1:0qIOTT9DoYwcKmxSt8QJt+VzMY18onl9jUXsxpVhSmM= github.com/containers/ocicrypt v1.2.1/go.mod h1:aD0AAqfMp0MtwqWgHM1bUwe1anx0VazI108CRrSKINQ= -github.com/containers/storage v1.58.0 h1:Q7SyyCCjqgT3wYNgRNIL8o/wUS92heIj2/cc8Sewvcc= -github.com/containers/storage v1.58.0/go.mod h1:w7Jl6oG+OpeLGLzlLyOZPkmUso40kjpzgrHUk5tyBlo= +github.com/containers/storage v1.59.1 h1:11Zu68MXsEQGBBd+GadPrHPpWeqjKS8hJDGiAHgIqDs= +github.com/containers/storage v1.59.1/go.mod h1:KoAYHnAjP3/cTsRS+mmWZGkufSY2GACiKQ4V3ZLQnR0= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= @@ -119,8 +119,8 @@ github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5Qvfr github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dlclark/regexp2 v1.11.0 h1:G/nrcoOa7ZXlpoa/91N3X7mM3r8eIlMBBJZvsz/mxKI= github.com/dlclark/regexp2 v1.11.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= -github.com/docker/cli v28.3.1+incompatible h1:ZUdwOLDEBoE3TE5rdC9IXGY5HPHksJK3M+hJEWhh2mc= -github.com/docker/cli v28.3.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v28.3.3+incompatible h1:fp9ZHAr1WWPGdIWBM1b3zLtgCF+83gRdVMTJsUeiyAo= +github.com/docker/cli v28.3.3+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v28.3.3+incompatible h1:Dypm25kh4rmk49v1eiVbsAtpAsYURjYkaKubwuBdxEI= @@ -172,8 +172,8 @@ github.com/go-git/go-git/v5 v5.16.2 h1:fT6ZIOjE5iEnkzKyxTHK1W4HGAsPhqEqiSAssSO77 github.com/go-git/go-git/v5 v5.16.2/go.mod h1:4Ge4alE/5gPs30F2H1esi2gPd69R0C39lolkucHBOp8= github.com/go-gorp/gorp/v3 v3.1.0 h1:ItKF/Vbuj31dmV4jxA1qblpSwkl9g1typ24xoe70IGs= github.com/go-gorp/gorp/v3 v3.1.0/go.mod h1:dLEjIyyRNiXvNZ8PSmzpt1GsWAUK8kjVhEpjH8TixEw= -github.com/go-jose/go-jose/v4 v4.1.0 h1:cYSYxd3pw5zd2FSXk2vGdn9igQU2PS8MuxrCOCl0FdY= -github.com/go-jose/go-jose/v4 v4.1.0/go.mod h1:GG/vqmYm3Von2nYiB2vGTXzdoNKE5tix5tuc6iAd+sw= +github.com/go-jose/go-jose/v4 v4.1.1 h1:JYhSgy4mXXzAdF3nUx3ygx347LRXJRrpgyU3adRmkAI= +github.com/go-jose/go-jose/v4 v4.1.1/go.mod h1:BdsZGqgdO3b6tTc6LSE56wcDbMMLuPsw5d4ZD5f94kA= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= @@ -184,26 +184,12 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= -github.com/go-openapi/analysis v0.23.0 h1:aGday7OWupfMs+LbmLZG4k0MYXIANxcuBTYUC03zFCU= -github.com/go-openapi/analysis v0.23.0/go.mod h1:9mz9ZWaSlV8TvjQHLl2mUW2PbZtemkE8yA5v22ohupo= -github.com/go-openapi/errors v0.22.1 h1:kslMRRnK7NCb/CvR1q1VWuEQCEIsBGn5GgKD9e+HYhU= -github.com/go-openapi/errors v0.22.1/go.mod h1:+n/5UdIqdVnLIJ6Q9Se8HNGUXYaY6CN8ImWzfi/Gzp0= github.com/go-openapi/jsonpointer v0.21.1 h1:whnzv/pNXtK2FbX/W9yJfRmE2gsmkfahjMKB0fZvcic= github.com/go-openapi/jsonpointer v0.21.1/go.mod h1:50I1STOfbY1ycR8jGz8DaMeLCdXiI6aDteEdRNNzpdk= github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= -github.com/go-openapi/loads v0.22.0 h1:ECPGd4jX1U6NApCGG1We+uEozOAvXvJSF4nnwHZ8Aco= -github.com/go-openapi/loads v0.22.0/go.mod h1:yLsaTCS92mnSAZX5WWoxszLj0u+Ojl+Zs5Stn1oF+rs= -github.com/go-openapi/runtime v0.28.0 h1:gpPPmWSNGo214l6n8hzdXYhPuJcGtziTOgUpvsFWGIQ= -github.com/go-openapi/runtime v0.28.0/go.mod h1:QN7OzcS+XuYmkQLw05akXk0jRH/eZ3kb18+1KwW9gyc= -github.com/go-openapi/spec v0.21.0 h1:LTVzPc3p/RzRnkQqLRndbAzjY0d0BCL72A6j3CdL9ZY= -github.com/go-openapi/spec v0.21.0/go.mod h1:78u6VdPw81XU44qEWGhtr982gJ5BWg2c0I5XwVMotYk= -github.com/go-openapi/strfmt v0.23.0 h1:nlUS6BCqcnAk0pyhi9Y+kdDVZdZMHfEKQiS4HaMgO/c= -github.com/go-openapi/strfmt v0.23.0/go.mod h1:NrtIpfKtWIygRkKVsxh7XQMDQW5HKQl6S5ik2elW+K4= github.com/go-openapi/swag v0.23.1 h1:lpsStH0n2ittzTnbaSloVZLuB5+fvSY/+hnagBjSNZU= github.com/go-openapi/swag v0.23.1/go.mod h1:STZs8TbRvEQQKUA+JZNAm3EWlgaOBGpyFDqQnDHMef0= -github.com/go-openapi/validate v0.24.0 h1:LdfDKwNbpB6Vn40xhTdNZAnfLECL81w+VX3BumrGD58= -github.com/go-openapi/validate v0.24.0/go.mod h1:iyeX1sEufmv3nPbBdX3ieNviWnOZaJ1+zquzJEf2BAQ= github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= github.com/go-sql-driver/mysql v1.9.1 h1:FrjNGn/BsJQjVRuSa8CBrM5BWA9BWoXXat3KrtSb/iI= github.com/go-sql-driver/mysql v1.9.1/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU= @@ -249,8 +235,8 @@ github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= -github.com/google/cel-go v0.25.0 h1:jsFw9Fhn+3y2kBbltZR4VEz5xKkcIFRPDnuEzAGv5GY= -github.com/google/cel-go v0.25.0/go.mod h1:hjEb6r5SuOSlhCHmFoLzu8HGCERvIsDAbxDAyNU/MmI= +github.com/google/cel-go v0.26.0 h1:DPGjXackMpJWH680oGY4lZhYjIameYmR+/6RBdDGmaI= +github.com/google/cel-go v0.26.0/go.mod h1:A9O8OU9rdvrK5MQyrqfIxo1a0u4g3sF8KB6PUIaryMM= github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw= github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -268,8 +254,8 @@ github.com/google/go-containerregistry v0.20.6/go.mod h1:T0x8MuoAoKX/873bkeSfLD2 github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20250607225305-033d6d78b36a h1://KbezygeMJZCSHH+HgUZiTeSoiuFspbMg1ge+eFj18= -github.com/google/pprof v0.0.0-20250607225305-033d6d78b36a/go.mod h1:5hDyRhoBCxViHszMt12TnOpEI4VVi+U8Gm9iphldiMA= +github.com/google/pprof v0.0.0-20250820193118-f64d9cf942d6 h1:EEHtgt9IwisQ2AZ4pIsMjahcegHh6rmhqxzIRQIyepY= +github.com/google/pprof v0.0.0-20250820193118-f64d9cf942d6/go.mod h1:I6V7YzU0XDpsHqbsyrghnFZLO1gwK6NPTNvmetQIk9U= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -364,11 +350,11 @@ github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= -github.com/mattn/go-sqlite3 v1.14.28 h1:ThEiQrnbtumT+QMknw63Befp/ce/nUPgBPMlRFEum7A= -github.com/mattn/go-sqlite3 v1.14.28/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= +github.com/mattn/go-sqlite3 v1.14.32 h1:JD12Ag3oLy1zQA+BNn74xRgaBbdhbNIDYvQUEuuErjs= +github.com/mattn/go-sqlite3 v1.14.32/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/maxbrunsfeld/counterfeiter/v6 v6.11.2 h1:yVCLo4+ACVroOEr4iFU1iH46Ldlzz2rTuu18Ra7M8sU= -github.com/maxbrunsfeld/counterfeiter/v6 v6.11.2/go.mod h1:VzB2VoMh1Y32/QqDfg9ZJYHj99oM4LiGtqPZydTiQSQ= +github.com/maxbrunsfeld/counterfeiter/v6 v6.11.3 h1:Eaq36EIyJNp7b3qDhjV7jmDVq/yPeW2v4pTqzGbOGB4= +github.com/maxbrunsfeld/counterfeiter/v6 v6.11.3/go.mod h1:6KKUoQBZBW6PDXJtNfqeEjPXMj/ITTk+cWK9t9uS5+E= github.com/miekg/dns v1.1.61 h1:nLxbwF3XxhwVSm8g9Dghm9MHPaUZuqhPiGL+675ZmEs= github.com/miekg/dns v1.1.61/go.mod h1:mnAarhS3nWaW+NVP2wTkYVIZyHNJ098SJZUki3eykwQ= github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU= @@ -379,8 +365,6 @@ github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= -github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= -github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg= @@ -415,14 +399,12 @@ github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc= -github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.23.4 h1:ktYTpKJAVZnDT4VjxSbiBenUjmlL/5QkBEocaWXiQus= -github.com/onsi/ginkgo/v2 v2.23.4/go.mod h1:Bt66ApGPBFzHyR+JO10Zbt0Gsp4uWxu5mIOTusL46e8= -github.com/onsi/gomega v1.37.0 h1:CdEG8g0S133B4OswTDC/5XPSzE1OeP29QOioj2PID2Y= -github.com/onsi/gomega v1.37.0/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0= +github.com/onsi/ginkgo/v2 v2.25.2 h1:hepmgwx1D+llZleKQDMEvy8vIlCxMGt7W5ZxDjIEhsw= +github.com/onsi/ginkgo/v2 v2.25.2/go.mod h1:43uiyQC4Ed2tkOzLsEYm7hnrb7UJTWHYNsuy3bG/snE= +github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A= +github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= @@ -431,14 +413,14 @@ github.com/opencontainers/runtime-spec v1.2.1 h1:S4k4ryNgEpxW1dzyqffOmhI1BHYcjzU github.com/opencontainers/runtime-spec v1.2.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/operator-framework/ansible-operator-plugins v1.39.0 h1:JLlbdGdnGnF8q8WInq24Upde/jfWwRzIJ4gK4xjRLHc= github.com/operator-framework/ansible-operator-plugins v1.39.0/go.mod h1:XLMYrKfowmX5leL8V4trkgtxfsXYdLynzyHamOR5xJc= -github.com/operator-framework/api v0.32.0 h1:LZSZr7at3NrjsjwQVNsYD+04o5wMq75jrR0dMYiIIH8= -github.com/operator-framework/api v0.32.0/go.mod h1:OGJo6HUYxoQwpGaLr0lPJzSek51RiXajJSSa8Jzjvp8= +github.com/operator-framework/api v0.34.0 h1:REiEaYhG1CWmDoajdcAdZqtgoljWG+ixMY59vUX5pFI= +github.com/operator-framework/api v0.34.0/go.mod h1:eGncUNIYvWtfGCCKmLzGXvoi3P0TDf3Yd/Z0Sn9E6SQ= github.com/operator-framework/operator-lib v0.19.0 h1:az6ogYj21rtU0SF9uYctRLyKp2dtlqTsmpfehFy6Ce8= github.com/operator-framework/operator-lib v0.19.0/go.mod h1:KxycAjFnHt0DBtHmH3Jm7yHcY5sdrshPKTqM/HKAQ08= github.com/operator-framework/operator-manifest-tools v0.10.0 h1:+vtIElvGQ5e43gCD6fF65a0HNH3AD3LGnukUhpl9kjc= github.com/operator-framework/operator-manifest-tools v0.10.0/go.mod h1:eB/wnr0BOhMLNXPeceE+0p3vudP16zDNWP60Hvn3KaM= -github.com/operator-framework/operator-registry v1.56.0 h1:vbTyee/gahpnh7qw1hV1osnWy9YpTjIbEuHpwIdoEUs= -github.com/operator-framework/operator-registry v1.56.0/go.mod h1:NOmQyrgOGW0cwUxHG5ZqKxdObOzQNmO4Rxcf7JC32FU= +github.com/operator-framework/operator-registry v1.57.0 h1:mQ4c8A8VUxZPJ0QCFRNio+7JEsLX6eKxlDSl6ORCRdk= +github.com/operator-framework/operator-registry v1.57.0/go.mod h1:9rAZH/LZ/ttEuTvL1D4KApGqOtRDE6fJzzOrJNcBu7g= github.com/otiai10/copy v1.14.1 h1:5/7E6qsUMBaH5AnQ0sSLzzTg1oTECmcCmT6lvF45Na8= github.com/otiai10/copy v1.14.1/go.mod h1:oQwrEDDOci3IM8dJF0d8+jnbfPDllW6vUjNc3DoZm9I= github.com/otiai10/mint v1.6.3 h1:87qsV/aw1F5as1eH1zS/yqHY85ANKVMgkDrf9rcxbQs= @@ -464,8 +446,8 @@ github.com/proglottis/gpgme v0.1.4/go.mod h1:5LoXMgpE4bttgwwdv9bLs/vwqv3qV7F4glE github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= -github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= -github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= +github.com/prometheus/client_golang v1.23.0 h1:ust4zpdl9r4trLY/gSjlm07PuiBq2ynaXXlptpfy8Uc= +github.com/prometheus/client_golang v1.23.0/go.mod h1:i/o0R9ByOnHX0McrTMTyhYvKE4haaf2mW08I+jGAjEE= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -511,8 +493,6 @@ github.com/sigstore/fulcio v1.7.1 h1:RcoW20Nz49IGeZyu3y9QYhyyV3ZKQ85T+FXPKkvE+aQ github.com/sigstore/fulcio v1.7.1/go.mod h1:7lYY+hsd8Dt+IvKQRC+KEhWpCZ/GlmNvwIa5JhypMS8= github.com/sigstore/protobuf-specs v0.4.3 h1:kRgJ+ciznipH9xhrkAbAEHuuxD3GhYnGC873gZpjJT4= github.com/sigstore/protobuf-specs v0.4.3/go.mod h1:+gXR+38nIa2oEupqDdzg4qSBT0Os+sP7oYv6alWewWc= -github.com/sigstore/rekor v1.3.10 h1:/mSvRo4MZ/59ECIlARhyykAlQlkmeAQpvBPlmJtZOCU= -github.com/sigstore/rekor v1.3.10/go.mod h1:JvryKJ40O0XA48MdzYUPu0y4fyvqt0C4iSY7ri9iu3A= github.com/sigstore/sigstore v1.9.5 h1:Wm1LT9yF4LhQdEMy5A2JeGRHTrAWGjT3ubE5JUSrGVU= github.com/sigstore/sigstore v1.9.5/go.mod h1:VtxgvGqCmEZN9X2zhFSOkfXxvKUjpy8RpUW39oCtoII= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= @@ -526,11 +506,11 @@ github.com/spf13/afero v1.14.0 h1:9tH6MapGnn/j0eb0yIXiLjERO8RB6xIVZRDCX7PtqWA= github.com/spf13/afero v1.14.0/go.mod h1:acJQ8t0ohCGuMN3O+Pv0V0hgMxNYDlvdk+VTfyZmbYo= github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= -github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= -github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= -github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/pflag v1.0.7 h1:vN6T9TfwStFPFM5XzjsvmzZkLuaLX+HS+0SeFLRgU6M= -github.com/spf13/pflag v1.0.7/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s= +github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0= +github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= +github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.20.1 h1:ZMi+z/lvLyPSCoNtFCpqjy0S4kPbirhpTMwl8BkW9X4= github.com/spf13/viper v1.20.1/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4= github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 h1:pnnLyeX7o/5aX8qUQ69P/mLojDqwda8hFOCBTmP/6hw= @@ -551,8 +531,8 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/thoas/go-funk v0.9.3 h1:7+nAEx3kn5ZJcnDm2Bh23N2yOtweO14bi//dvRtgLpw= @@ -572,16 +552,14 @@ github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -go.etcd.io/bbolt v1.4.2 h1:IrUHp260R8c+zYx/Tm8QZr04CX+qWS5PGfPdevhdm1I= -go.etcd.io/bbolt v1.4.2/go.mod h1:Is8rSHO/b4f3XigBC0lL0+4FwAQv3HXEEIgFMuKHceM= +go.etcd.io/bbolt v1.4.3 h1:dEadXpI6G79deX5prL3QRNP6JB8UxVkqo4UPnHaNXJo= +go.etcd.io/bbolt v1.4.3/go.mod h1:tKQlpPaYCVFctUIgFKFnAlvbmB3tpy1vkTnDWohtc0E= go.etcd.io/etcd/api/v3 v3.5.21 h1:A6O2/JDb3tvHhiIz3xf9nJ7REHvtEFJJ3veW3FbCnS8= go.etcd.io/etcd/api/v3 v3.5.21/go.mod h1:c3aH5wcvXv/9dqIw2Y810LDXJfhSYdHQ0vxmP3CCHVY= go.etcd.io/etcd/client/pkg/v3 v3.5.21 h1:lPBu71Y7osQmzlflM9OfeIV2JlmpBjqBNlLtcoBqUTc= go.etcd.io/etcd/client/pkg/v3 v3.5.21/go.mod h1:BgqT/IXPjK9NkeSDjbzwsHySX3yIle2+ndz28nVsjUs= go.etcd.io/etcd/client/v3 v3.5.21 h1:T6b1Ow6fNjOLOtM0xSoKNQt1ASPCLWrF9XMHcH9pEyY= go.etcd.io/etcd/client/v3 v3.5.21/go.mod h1:mFYy67IOqmbRf/kRUvsHixzo3iG+1OF2W2+jVIQRAnU= -go.mongodb.org/mongo-driver v1.17.4 h1:jUorfmVzljjr0FLzYQsGP8cgN/qzzxlY9Vh0C9KFXVw= -go.mongodb.org/mongo-driver v1.17.4/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= @@ -594,8 +572,8 @@ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.6 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0/go.mod h1:snMWehoOh2wsEwnvvwtDyFCxVeDAODenXHtn5vzrKjo= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q= -go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg= -go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E= +go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ= +go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I= go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.12.2 h1:06ZeJRe5BnYXceSM9Vya83XXVaNGe3H1QqsvqRANQq8= go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.12.2/go.mod h1:DvPtKE63knkDVP88qpatBj81JxN+w1bqfVbsbCbj1WY= go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.12.2 h1:tPLwQlXbJ8NSOfZc4OkgU5h2A38M4c9kfHSVc4PFQGs= @@ -620,18 +598,18 @@ go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.36.0 h1:G8Xec/SgZQricwW go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.36.0/go.mod h1:PD57idA/AiFD5aqoxGxCvT/ILJPeHy3MjqU/NS7KogY= go.opentelemetry.io/otel/log v0.12.2 h1:yob9JVHn2ZY24byZeaXpTVoPS6l+UrrxmxmPKohXTwc= go.opentelemetry.io/otel/log v0.12.2/go.mod h1:ShIItIxSYxufUMt+1H5a2wbckGli3/iCfuEbVZi/98E= -go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE= -go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs= -go.opentelemetry.io/otel/sdk v1.36.0 h1:b6SYIuLRs88ztox4EyrvRti80uXIFy+Sqzoh9kFULbs= -go.opentelemetry.io/otel/sdk v1.36.0/go.mod h1:+lC+mTgD+MUWfjJubi2vvXWcVxyr9rmlshZni72pXeY= +go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE= +go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E= +go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI= +go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg= go.opentelemetry.io/otel/sdk/log v0.12.2 h1:yNoETvTByVKi7wHvYS6HMcZrN5hFLD7I++1xIZ/k6W0= go.opentelemetry.io/otel/sdk/log v0.12.2/go.mod h1:DcpdmUXHJgSqN/dh+XMWa7Vf89u9ap0/AAk/XGLnEzY= go.opentelemetry.io/otel/sdk/log/logtest v0.0.0-20250521073539-a85ae98dcedc h1:uqxdywfHqqCl6LmZzI3pUnXT1RGFYyUgxj0AkWPFxi0= go.opentelemetry.io/otel/sdk/log/logtest v0.0.0-20250521073539-a85ae98dcedc/go.mod h1:TY/N/FT7dmFrP/r5ym3g0yysP1DefqGpAZr4f82P0dE= -go.opentelemetry.io/otel/sdk/metric v1.36.0 h1:r0ntwwGosWGaa0CrSt8cuNuTcccMXERFwHX4dThiPis= -go.opentelemetry.io/otel/sdk/metric v1.36.0/go.mod h1:qTNOhFDfKRwX0yXOqJYegL5WRaW376QbB7P4Pb0qva4= -go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w= -go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA= +go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc= +go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps= +go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4= +go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0= go.opentelemetry.io/proto/otlp v1.7.0 h1:jX1VolD6nHuFzOYso2E73H85i92Mv8JQYk0K9vz09os= go.opentelemetry.io/proto/otlp v1.7.0/go.mod h1:fSKjH6YJ7HDlwzltzyMj036AJ3ejJLCgCSHGj4efDDo= go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= @@ -646,8 +624,8 @@ go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= -go.yaml.in/yaml/v3 v3.0.3 h1:bXOww4E/J3f66rav3pX3m8w6jDE4knZjGOw8b5Y6iNE= -go.yaml.in/yaml/v3 v3.0.3/go.mod h1:tBHosrYAkRZjRAOREWbDnBXUf08JOwYq++0QNwQiWzI= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -657,8 +635,8 @@ golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliY golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/crypto v0.33.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5M= -golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM= -golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY= +golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4= +golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b h1:M2rDM6z3Fhozi9O7NWsxAkg/yqS/lQJ6PmkyIV3YP+o= golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8= @@ -672,8 +650,8 @@ golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/mod v0.25.0 h1:n7a+ZbQKQA/Ysbyb0/6IbB1H/X41mKgbhfv7AfG/44w= -golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= +golang.org/x/mod v0.27.0 h1:kb+q2PyFnEADO2IEF935ehFUXlWiNjJWtRNgBLSfbxQ= +golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -692,8 +670,8 @@ golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= -golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw= -golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA= +golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= +golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= @@ -732,8 +710,8 @@ golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA= -golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= +golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -743,8 +721,8 @@ golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/term v0.29.0/go.mod h1:6bl4lRlvVuDgSf3179VpIxBF0o10JUpXWOnI7nErv7s= -golang.org/x/term v0.33.0 h1:NuFncQrRcaRvVmgRkvM3j/F00gWIAlcmlB8ACEKmGIg= -golang.org/x/term v0.33.0/go.mod h1:s18+ql9tYWp1IfpV9DmCtQDDSRBUjKaw9M1eAv5UeF0= +golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4= +golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= @@ -754,8 +732,8 @@ golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY= -golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4= -golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU= +golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng= +golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU= golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE= golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -770,8 +748,12 @@ golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= -golang.org/x/tools v0.34.0 h1:qIpSLOxeCYGg9TrcJokLBG4KFA6d795g0xkBkiESGlo= -golang.org/x/tools v0.34.0/go.mod h1:pAP9OwEaY1CAW3HOmg3hLZC5Z0CCmzjAF2UQMSqNARg= +golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg= +golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s= +golang.org/x/tools/go/expect v0.1.0-deprecated h1:jY2C5HGYR5lqex3gEniOQL0r7Dq5+VGVgY1nudX5lXY= +golang.org/x/tools/go/expect v0.1.0-deprecated/go.mod h1:eihoPOH+FgIqa3FpoTwguz/bVUSGBlGQU67vpBeOrBY= +golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated h1:1h2MnaIAIXISqTFKdENegdpAgUXz6NrPEsbIeWaBRvM= +golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated/go.mod h1:RVAQXBGNv1ib0J382/DPCRS/BPnsGebyM1Gj5VSDpG8= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -782,6 +764,8 @@ gomodules.xyz/jsonpatch/v3 v3.0.1 h1:Te7hKxV52TKCbNYq3t84tzKav3xhThdvSsSp/W89IyI gomodules.xyz/jsonpatch/v3 v3.0.1/go.mod h1:CBhndykehEwTOlEfnsfJwvkFQbSN8YZFr9M+cIHAJto= gomodules.xyz/orderedmap v0.1.0 h1:fM/+TGh/O1KkqGR5xjTKg6bU8OKBkg7p0Y+x/J9m8Os= gomodules.xyz/orderedmap v0.1.0/go.mod h1:g9/TPUCm1t2gwD3j3zfV8uylyYhVdCNSi+xCEIu7yTU= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= @@ -789,17 +773,17 @@ google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98 google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20250603155806-513f23925822 h1:rHWScKit0gvAPuOnu87KpaYtjK5zBMLcULh7gxkCXu4= google.golang.org/genproto v0.0.0-20250603155806-513f23925822/go.mod h1:HubltRL7rMh0LfnQPkMH4NPDFEWp0jw3vixw7jEM53s= -google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 h1:oWVWY3NzT7KJppx2UKhKmzPq4SRe0LdCijVRwvGeikY= -google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822/go.mod h1:h3c4v36UTKzUiuaOKQ6gr3S+0hovBtUrXzTG/i3+XEc= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 h1:fc6jSaCT0vBduLYZHYrBBNY4dsWuvgyff9noRNDdBeE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= +google.golang.org/genproto/googleapis/api v0.0.0-20250707201910-8d1bb00bc6a7 h1:FiusG7LWj+4byqhbvmB+Q93B/mOxJLN2DTozDuZm4EU= +google.golang.org/genproto/googleapis/api v0.0.0-20250707201910-8d1bb00bc6a7/go.mod h1:kXqgZtrWaf6qS3jZOCnCH7WYfrvFjkC51bM8fz3RsCA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7 h1:pFyd6EwwL2TqFf8emdthzeX+gZE1ElRq3iM8pui4KBY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.73.0 h1:VIWSmpI2MegBtTuFt5/JWy2oXxtjJ/e89Z70ImfD2ok= -google.golang.org/grpc v1.73.0/go.mod h1:50sbHOUqWoCQGI8V2HQLJM0B+LMlIUjNSZmow7EVBQc= +google.golang.org/grpc v1.75.0 h1:+TW+dqTd2Biwe6KKfhE5JpiYIBWq865PhKGSXiivqt4= +google.golang.org/grpc v1.75.0/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -809,8 +793,8 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= -google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= +google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= +google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -833,24 +817,24 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= -helm.sh/helm/v3 v3.18.5 h1:Cc3Z5vd6kDrZq9wO9KxKLNEickiTho6/H/dBNRVSos4= -helm.sh/helm/v3 v3.18.5/go.mod h1:L/dXDR2r539oPlFP1PJqKAC1CUgqHJDLkxKpDGrWnyg= +helm.sh/helm/v3 v3.18.6 h1:S/2CqcYnNfLckkHLI0VgQbxgcDaU3N4A/46E3n9wSNY= +helm.sh/helm/v3 v3.18.6/go.mod h1:L/dXDR2r539oPlFP1PJqKAC1CUgqHJDLkxKpDGrWnyg= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -k8s.io/api v0.33.3 h1:SRd5t//hhkI1buzxb288fy2xvjubstenEKL9K51KBI8= -k8s.io/api v0.33.3/go.mod h1:01Y/iLUjNBM3TAvypct7DIj0M0NIZc+PzAHCIo0CYGE= -k8s.io/apiextensions-apiserver v0.33.3 h1:qmOcAHN6DjfD0v9kxL5udB27SRP6SG/MTopmge3MwEs= -k8s.io/apiextensions-apiserver v0.33.3/go.mod h1:oROuctgo27mUsyp9+Obahos6CWcMISSAPzQ77CAQGz8= -k8s.io/apimachinery v0.33.3 h1:4ZSrmNa0c/ZpZJhAgRdcsFcZOw1PQU1bALVQ0B3I5LA= -k8s.io/apimachinery v0.33.3/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM= -k8s.io/apiserver v0.33.3 h1:Wv0hGc+QFdMJB4ZSiHrCgN3zL3QRatu56+rpccKC3J4= -k8s.io/apiserver v0.33.3/go.mod h1:05632ifFEe6TxwjdAIrwINHWE2hLwyADFk5mBsQa15E= +k8s.io/api v0.33.4 h1:oTzrFVNPXBjMu0IlpA2eDDIU49jsuEorGHB4cvKupkk= +k8s.io/api v0.33.4/go.mod h1:VHQZ4cuxQ9sCUMESJV5+Fe8bGnqAARZ08tSTdHWfeAc= +k8s.io/apiextensions-apiserver v0.33.4 h1:rtq5SeXiDbXmSwxsF0MLe2Mtv3SwprA6wp+5qh/CrOU= +k8s.io/apiextensions-apiserver v0.33.4/go.mod h1:mWXcZQkQV1GQyxeIjYApuqsn/081hhXPZwZ2URuJeSs= +k8s.io/apimachinery v0.33.4 h1:SOf/JW33TP0eppJMkIgQ+L6atlDiP/090oaX0y9pd9s= +k8s.io/apimachinery v0.33.4/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM= +k8s.io/apiserver v0.33.4 h1:6N0TEVA6kASUS3owYDIFJjUH6lgN8ogQmzZvaFFj1/Y= +k8s.io/apiserver v0.33.4/go.mod h1:8ODgXMnOoSPLMUg1aAzMFx+7wTJM+URil+INjbTZCok= k8s.io/cli-runtime v0.33.3 h1:Dgy4vPjNIu8LMJBSvs8W0LcdV0PX/8aGG1DA1W8lklA= k8s.io/cli-runtime v0.33.3/go.mod h1:yklhLklD4vLS8HNGgC9wGiuHWze4g7x6XQZ+8edsKEo= -k8s.io/client-go v0.33.3 h1:M5AfDnKfYmVJif92ngN532gFqakcGi6RvaOF16efrpA= -k8s.io/client-go v0.33.3/go.mod h1:luqKBQggEf3shbxHY4uVENAxrDISLOarxpTKMiUuujg= -k8s.io/component-base v0.33.3 h1:mlAuyJqyPlKZM7FyaoM/LcunZaaY353RXiOd2+B5tGA= -k8s.io/component-base v0.33.3/go.mod h1:ktBVsBzkI3imDuxYXmVxZ2zxJnYTZ4HAsVj9iF09qp4= +k8s.io/client-go v0.33.4 h1:TNH+CSu8EmXfitntjUPwaKVPN0AYMbc9F1bBS8/ABpw= +k8s.io/client-go v0.33.4/go.mod h1:LsA0+hBG2DPwovjd931L/AoaezMPX9CmBgyVyBZmbCY= +k8s.io/component-base v0.33.4 h1:Jvb/aw/tl3pfgnJ0E0qPuYLT0NwdYs1VXXYQmSuxJGY= +k8s.io/component-base v0.33.4/go.mod h1:567TeSdixWW2Xb1yYUQ7qk5Docp2kNznKL87eygY8Rc= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20250610211856-8b98d1ed966a h1:ZV3Zr+/7s7aVbjNGICQt+ppKWsF1tehxggNfbM7XnG8= @@ -881,5 +865,5 @@ sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxO sigs.k8s.io/structured-merge-diff/v4 v4.7.0 h1:qPeWmscJcXP0snki5IYF79Z8xrl8ETFxgMd7wez1XkI= sigs.k8s.io/structured-merge-diff/v4 v4.7.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= -sigs.k8s.io/yaml v1.5.0 h1:M10b2U7aEUY6hRtU870n2VTPgR5RZiL/I6Lcc2F4NUQ= -sigs.k8s.io/yaml v1.5.0/go.mod h1:wZs27Rbxoai4C0f8/9urLZtZtF3avA3gKvGyPdDqTO4= +sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= +sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= diff --git a/internal/generate/packagemanifest/packagemanifestfakes/fake_generator.go b/internal/generate/packagemanifest/packagemanifestfakes/fake_generator.go index b4800dee78..7da2c84b7a 100644 --- a/internal/generate/packagemanifest/packagemanifestfakes/fake_generator.go +++ b/internal/generate/packagemanifest/packagemanifestfakes/fake_generator.go @@ -93,8 +93,6 @@ func (fake *FakeGenerator) GenerateReturnsOnCall(i int, result1 error) { func (fake *FakeGenerator) Invocations() map[string][][]interface{} { fake.invocationsMutex.RLock() defer fake.invocationsMutex.RUnlock() - fake.generateMutex.RLock() - defer fake.generateMutex.RUnlock() copiedInvocations := map[string][][]interface{}{} for key, value := range fake.invocations { copiedInvocations[key] = value From 8c4d5513de451e5a317da03f6c4e604c5e299344 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 5 Sep 2025 08:23:27 -0700 Subject: [PATCH 21/31] Bump actions/setup-go from 5 to 6 (#6998) Bumps [actions/setup-go](https://github.com/actions/setup-go) from 5 to 6. - [Release notes](https://github.com/actions/setup-go/releases) - [Commits](https://github.com/actions/setup-go/compare/v5...v6) --- updated-dependencies: - dependency-name: actions/setup-go dependency-version: '6' dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/deploy.yml | 2 +- .github/workflows/integration.yml | 2 +- .github/workflows/test-go.yml | 4 ++-- .github/workflows/test-helm.yml | 2 +- .github/workflows/test-sample-go.yml | 2 +- .github/workflows/test-sanity.yml | 2 +- 6 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/deploy.yml b/.github/workflows/deploy.yml index fd9e1cccbc..425331f69e 100644 --- a/.github/workflows/deploy.yml +++ b/.github/workflows/deploy.yml @@ -44,7 +44,7 @@ jobs: fetch-depth: 0 - name: install - uses: actions/setup-go@v5 + uses: actions/setup-go@v6 with: go-version-file: "go.mod" diff --git a/.github/workflows/integration.yml b/.github/workflows/integration.yml index fb55e74472..8f7dca304b 100644 --- a/.github/workflows/integration.yml +++ b/.github/workflows/integration.yml @@ -34,7 +34,7 @@ jobs: run: | .github/workflows/clean-unused-disk-space.sh - - uses: actions/setup-go@v5 + - uses: actions/setup-go@v6 with: go-version-file: "go.mod" - run: make test-e2e-integration diff --git a/.github/workflows/test-go.yml b/.github/workflows/test-go.yml index 98e7b42a92..1da8a0bfe8 100644 --- a/.github/workflows/test-go.yml +++ b/.github/workflows/test-go.yml @@ -37,7 +37,7 @@ jobs: run: | .github/workflows/clean-unused-disk-space.sh - - uses: actions/setup-go@v5 + - uses: actions/setup-go@v6 with: go-version-file: "go.mod" - run: sudo rm -rf /usr/local/bin/kustomize @@ -57,7 +57,7 @@ jobs: run: | .github/workflows/clean-unused-disk-space.sh - - uses: actions/setup-go@v5 + - uses: actions/setup-go@v6 with: go-version-file: "go.mod" - run: make test-unit diff --git a/.github/workflows/test-helm.yml b/.github/workflows/test-helm.yml index 94855fdc17..a492da1eb3 100644 --- a/.github/workflows/test-helm.yml +++ b/.github/workflows/test-helm.yml @@ -34,7 +34,7 @@ jobs: run: | .github/workflows/clean-unused-disk-space.sh - - uses: actions/setup-go@v5 + - uses: actions/setup-go@v6 with: go-version-file: "go.mod" - run: sudo rm -rf /usr/local/bin/kustomize diff --git a/.github/workflows/test-sample-go.yml b/.github/workflows/test-sample-go.yml index 9067b7c653..ffdf26d4bd 100644 --- a/.github/workflows/test-sample-go.yml +++ b/.github/workflows/test-sample-go.yml @@ -34,7 +34,7 @@ jobs: run: | .github/workflows/clean-unused-disk-space.sh - - uses: actions/setup-go@v5 + - uses: actions/setup-go@v6 with: go-version-file: "go.mod" - run: sudo rm -rf /usr/local/bin/kustomize diff --git a/.github/workflows/test-sanity.yml b/.github/workflows/test-sanity.yml index 8ed83811a1..89677ce3a9 100644 --- a/.github/workflows/test-sanity.yml +++ b/.github/workflows/test-sanity.yml @@ -34,7 +34,7 @@ jobs: run: | .github/workflows/clean-unused-disk-space.sh - - uses: actions/setup-go@v5 + - uses: actions/setup-go@v6 with: go-version-file: "go.mod" id: go From 8eddc56936883cc8a02eedab0ed7d323e8274b17 Mon Sep 17 00:00:00 2001 From: Adam Cornett Date: Tue, 9 Sep 2025 13:12:16 -0700 Subject: [PATCH 22/31] update observability docs to align with prometheus's docs (#7000) Signed-off-by: Adam D. Cornett --- .../best-practices/observability-best-practices.md | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/website/content/en/docs/best-practices/observability-best-practices.md b/website/content/en/docs/best-practices/observability-best-practices.md index 328968a576..719b1920c5 100644 --- a/website/content/en/docs/best-practices/observability-best-practices.md +++ b/website/content/en/docs/best-practices/observability-best-practices.md @@ -88,7 +88,15 @@ See [Alerts, Metrics and Recording Rules Tests](#alerts-metrics-and-recording-ru As per [Prometheus](https://prometheus.io/docs/prometheus) documentation, [Recording rules](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/#recording-rules) allow you to pre-compute frequently needed or computationally expensive expressions and save their result as a new set of time series. **Note:** The Prometheus recording rules appear in Prometheus UI as metrics. -In order to easily identify your operator recording rules, their names should usually follow the same naming guidelines as the metrics. +Recording rule names should follow the `level:metric:operations` format as specified in the [Prometheus recording rules best practices](https://prometheus.io/docs/practices/rules/). This naming convention makes it clear that the metric is a recording rule and helps consumers understand they need to examine the underlying query to fully understand what the metric provides. + +- **level:** represents the aggregation level and labels of the rule output +- **metric:** is the metric name +- **operations:** is a list of operations that were applied to the metric, newest operation first + +For example: `job:up:avg_over_time` or `instance:node_cpu_utilisation:rate5m` + +In addition to this format, your operator recording rules should also follow the same naming guidelines as metrics for consistency within your operator's observability stack. See [Alerts, Metrics and Recording Rules Tests](#alerts-metrics-and-recording-rules-tests) section for recording rules testing recommendations. From cef8969b03283721b68e0e5e9e49fdd185e43413 Mon Sep 17 00:00:00 2001 From: Adam Cornett Date: Tue, 9 Sep 2025 13:13:28 -0700 Subject: [PATCH 23/31] add support to scaffold oci helm registry charts (#6999) Signed-off-by: Adam D. Cornett --- internal/plugins/helm/v1/api.go | 3 +++ internal/plugins/helm/v1/chartutil/chart.go | 16 ++++++++++++++++ 2 files changed, 19 insertions(+) diff --git a/internal/plugins/helm/v1/api.go b/internal/plugins/helm/v1/api.go index 696d7bcca0..d38f795ec8 100644 --- a/internal/plugins/helm/v1/api.go +++ b/internal/plugins/helm/v1/api.go @@ -108,6 +108,9 @@ func (p *createAPISubcommand) UpdateMetadata(cliMeta plugin.CLIMetadata, subcmdM $ %[1]s create api \ --helm-chart=/path/to/local/chart-archives/app-1.2.3.tgz + + $ %[1]s create api \ + --helm-chart=oci://charts.mycompany.com/example-namespace/app:1.2.3 `, cliMeta.CommandName) } diff --git a/internal/plugins/helm/v1/chartutil/chart.go b/internal/plugins/helm/v1/chartutil/chart.go index 5587c2175e..59a3b8bafd 100644 --- a/internal/plugins/helm/v1/chartutil/chart.go +++ b/internal/plugins/helm/v1/chartutil/chart.go @@ -27,6 +27,7 @@ import ( "helm.sh/helm/v3/pkg/cli" "helm.sh/helm/v3/pkg/downloader" "helm.sh/helm/v3/pkg/getter" + "helm.sh/helm/v3/pkg/registry" "helm.sh/helm/v3/pkg/repo" ) @@ -126,11 +127,19 @@ func LoadChart(opts Options) (*chart.Chart, error) { func downloadChart(destDir string, opts Options) (string, error) { settings := cli.New() getters := getter.All(settings) + + // Create registry client for OCI registry support + registryClient, err := registry.NewClient() + if err != nil { + return "", fmt.Errorf("failed to create registry client: %w", err) + } + c := downloader.ChartDownloader{ Out: os.Stderr, Getters: getters, RepositoryConfig: settings.RepositoryConfig, RepositoryCache: settings.RepositoryCache, + RegistryClient: registryClient, } if opts.Repo != "" { @@ -182,6 +191,12 @@ func fetchChartDependencies(chartPath string) error { settings := cli.New() getters := getter.All(settings) + // Create registry client for OCI registry support + registryClient, err := registry.NewClient() + if err != nil { + return fmt.Errorf("failed to create registry client: %w", err) + } + out := &bytes.Buffer{} man := &downloader.Manager{ Out: out, @@ -189,6 +204,7 @@ func fetchChartDependencies(chartPath string) error { Getters: getters, RepositoryConfig: settings.RepositoryConfig, RepositoryCache: settings.RepositoryCache, + RegistryClient: registryClient, } if err := man.Build(); err != nil { fmt.Println(out.String()) From 89b409badcc4f38d1816ddfa13b4d7bfda1a5ce4 Mon Sep 17 00:00:00 2001 From: Adam Cornett Date: Tue, 9 Sep 2025 15:05:12 -0700 Subject: [PATCH 24/31] updating ansible plugin to v1.40.0 and corresponding dependencies (#7001) Signed-off-by: Adam D. Cornett --- go.mod | 15 ++++++++------- go.sum | 34 ++++++++++++++++++---------------- 2 files changed, 26 insertions(+), 23 deletions(-) diff --git a/go.mod b/go.mod index 6f1eca203d..7911efbf6c 100644 --- a/go.mod +++ b/go.mod @@ -10,15 +10,15 @@ require ( github.com/iancoleman/strcase v0.3.0 github.com/kr/text v0.2.0 github.com/markbates/inflect v1.0.4 - github.com/maxbrunsfeld/counterfeiter/v6 v6.11.3 - github.com/onsi/ginkgo/v2 v2.25.2 + github.com/maxbrunsfeld/counterfeiter/v6 v6.12.0 + github.com/onsi/ginkgo/v2 v2.25.3 github.com/onsi/gomega v1.38.2 - github.com/operator-framework/ansible-operator-plugins v1.39.0 + github.com/operator-framework/ansible-operator-plugins v1.40.0 github.com/operator-framework/api v0.34.0 github.com/operator-framework/operator-lib v0.19.0 github.com/operator-framework/operator-manifest-tools v0.10.0 github.com/operator-framework/operator-registry v1.57.0 - github.com/prometheus/client_golang v1.23.0 + github.com/prometheus/client_golang v1.23.1 github.com/sergi/go-diff v1.4.0 github.com/sirupsen/logrus v1.9.3 github.com/spf13/afero v1.14.0 @@ -28,7 +28,7 @@ require ( github.com/stretchr/testify v1.11.1 github.com/thoas/go-funk v0.9.3 golang.org/x/mod v0.27.0 - golang.org/x/text v0.28.0 + golang.org/x/text v0.29.0 golang.org/x/tools v0.36.0 gomodules.xyz/jsonpatch/v3 v3.0.1 helm.sh/helm/v3 v3.18.6 @@ -139,6 +139,7 @@ require ( github.com/gorilla/mux v1.8.1 // indirect github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect github.com/gosuri/uitable v0.0.4 // indirect + github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc // indirect github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.0 // indirect github.com/h2non/filetype v1.1.3 // indirect @@ -197,7 +198,7 @@ require ( github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/proglottis/gpgme v0.1.4 // indirect github.com/prometheus/client_model v0.6.2 // indirect - github.com/prometheus/common v0.65.0 // indirect + github.com/prometheus/common v0.66.0 // indirect github.com/prometheus/procfs v0.16.1 // indirect github.com/redis/go-redis/extra/rediscmd/v9 v9.10.0 // indirect github.com/redis/go-redis/extra/redisotel/v9 v9.10.0 // indirect @@ -258,7 +259,7 @@ require ( golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b // indirect golang.org/x/net v0.43.0 // indirect golang.org/x/oauth2 v0.30.0 // indirect - golang.org/x/sync v0.16.0 // indirect + golang.org/x/sync v0.17.0 // indirect golang.org/x/sys v0.35.0 // indirect golang.org/x/term v0.34.0 // indirect golang.org/x/time v0.12.0 // indirect diff --git a/go.sum b/go.sum index ae32d2a05c..ed40f322a9 100644 --- a/go.sum +++ b/go.sum @@ -269,6 +269,8 @@ github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5T github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA= github.com/gosuri/uitable v0.0.4 h1:IG2xLKRvErL3uhY6e1BylFzG+aJiwQviDDTfOKeKTpY= github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo= +github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248= +github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.1-0.20210315223345-82c243799c99 h1:JYghRBlGCZyCF2wNUJ8W0cwaQdtpcssJ4CgC406g+WU= @@ -353,8 +355,8 @@ github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxU github.com/mattn/go-sqlite3 v1.14.32 h1:JD12Ag3oLy1zQA+BNn74xRgaBbdhbNIDYvQUEuuErjs= github.com/mattn/go-sqlite3 v1.14.32/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/maxbrunsfeld/counterfeiter/v6 v6.11.3 h1:Eaq36EIyJNp7b3qDhjV7jmDVq/yPeW2v4pTqzGbOGB4= -github.com/maxbrunsfeld/counterfeiter/v6 v6.11.3/go.mod h1:6KKUoQBZBW6PDXJtNfqeEjPXMj/ITTk+cWK9t9uS5+E= +github.com/maxbrunsfeld/counterfeiter/v6 v6.12.0 h1:aOeI7xAOVdK+R6xbVsZuU9HmCZYmQVmZgPf9xJUd2Sg= +github.com/maxbrunsfeld/counterfeiter/v6 v6.12.0/go.mod h1:0hZWbtfeCYUQeAQdPLUzETiBhUSns7O6LDj9vH88xKA= github.com/miekg/dns v1.1.61 h1:nLxbwF3XxhwVSm8g9Dghm9MHPaUZuqhPiGL+675ZmEs= github.com/miekg/dns v1.1.61/go.mod h1:mnAarhS3nWaW+NVP2wTkYVIZyHNJ098SJZUki3eykwQ= github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU= @@ -401,8 +403,8 @@ github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.25.2 h1:hepmgwx1D+llZleKQDMEvy8vIlCxMGt7W5ZxDjIEhsw= -github.com/onsi/ginkgo/v2 v2.25.2/go.mod h1:43uiyQC4Ed2tkOzLsEYm7hnrb7UJTWHYNsuy3bG/snE= +github.com/onsi/ginkgo/v2 v2.25.3 h1:Ty8+Yi/ayDAGtk4XxmmfUy4GabvM+MegeB4cDLRi6nw= +github.com/onsi/ginkgo/v2 v2.25.3/go.mod h1:43uiyQC4Ed2tkOzLsEYm7hnrb7UJTWHYNsuy3bG/snE= github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A= github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= @@ -411,8 +413,8 @@ github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJw github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= github.com/opencontainers/runtime-spec v1.2.1 h1:S4k4ryNgEpxW1dzyqffOmhI1BHYcjzU8lpJfSlR0xww= github.com/opencontainers/runtime-spec v1.2.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/operator-framework/ansible-operator-plugins v1.39.0 h1:JLlbdGdnGnF8q8WInq24Upde/jfWwRzIJ4gK4xjRLHc= -github.com/operator-framework/ansible-operator-plugins v1.39.0/go.mod h1:XLMYrKfowmX5leL8V4trkgtxfsXYdLynzyHamOR5xJc= +github.com/operator-framework/ansible-operator-plugins v1.40.0 h1:9zwfsgcvfvyJYS2YnIlrCYSiLJ4Dxgpk8sRp4qKblMA= +github.com/operator-framework/ansible-operator-plugins v1.40.0/go.mod h1:ssqjn16fj00MTUQF7Xa0qr6yDFfyaivA4g0rf+e1dX0= github.com/operator-framework/api v0.34.0 h1:REiEaYhG1CWmDoajdcAdZqtgoljWG+ixMY59vUX5pFI= github.com/operator-framework/api v0.34.0/go.mod h1:eGncUNIYvWtfGCCKmLzGXvoi3P0TDf3Yd/Z0Sn9E6SQ= github.com/operator-framework/operator-lib v0.19.0 h1:az6ogYj21rtU0SF9uYctRLyKp2dtlqTsmpfehFy6Ce8= @@ -446,8 +448,8 @@ github.com/proglottis/gpgme v0.1.4/go.mod h1:5LoXMgpE4bttgwwdv9bLs/vwqv3qV7F4glE github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= -github.com/prometheus/client_golang v1.23.0 h1:ust4zpdl9r4trLY/gSjlm07PuiBq2ynaXXlptpfy8Uc= -github.com/prometheus/client_golang v1.23.0/go.mod h1:i/o0R9ByOnHX0McrTMTyhYvKE4haaf2mW08I+jGAjEE= +github.com/prometheus/client_golang v1.23.1 h1:w6gXMLQGgd0jXXlote9lRHMe0nG01EbnJT+C0EJru2Y= +github.com/prometheus/client_golang v1.23.1/go.mod h1:br8j//v2eg2K5Vvna5klK8Ku5pcU5r4ll73v6ik5dIQ= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -455,8 +457,8 @@ github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNw github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= -github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE= -github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8= +github.com/prometheus/common v0.66.0 h1:K/rJPHrG3+AoQs50r2+0t7zMnMzek2Vbv31OFVsMeVY= +github.com/prometheus/common v0.66.0/go.mod h1:Ux6NtV1B4LatamKE63tJBntoxD++xmtI/lK0VtEplN4= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= @@ -612,8 +614,8 @@ go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mx go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0= go.opentelemetry.io/proto/otlp v1.7.0 h1:jX1VolD6nHuFzOYso2E73H85i92Mv8JQYk0K9vz09os= go.opentelemetry.io/proto/otlp v1.7.0/go.mod h1:fSKjH6YJ7HDlwzltzyMj036AJ3ejJLCgCSHGj4efDDo= -go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= -go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= @@ -687,8 +689,8 @@ golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= -golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug= +golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -732,8 +734,8 @@ golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY= -golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng= -golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU= +golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk= +golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4= golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE= golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= From a42936ad1bdb9e60212d7538c16f805664e567a7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 6 Nov 2025 08:52:51 -0700 Subject: [PATCH 25/31] Bump github.com/containerd/containerd from 1.7.28 to 1.7.29 (#7008) Bumps [github.com/containerd/containerd](https://github.com/containerd/containerd) from 1.7.28 to 1.7.29. - [Release notes](https://github.com/containerd/containerd/releases) - [Changelog](https://github.com/containerd/containerd/blob/main/RELEASES.md) - [Commits](https://github.com/containerd/containerd/compare/v1.7.28...v1.7.29) --- updated-dependencies: - dependency-name: github.com/containerd/containerd dependency-version: 1.7.29 dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 7911efbf6c..44cab6ff21 100644 --- a/go.mod +++ b/go.mod @@ -68,7 +68,7 @@ require ( github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/chai2010/gettext-go v1.0.2 // indirect github.com/containerd/cgroups/v3 v3.0.5 // indirect - github.com/containerd/containerd v1.7.28 // indirect + github.com/containerd/containerd v1.7.29 // indirect github.com/containerd/containerd/api v1.9.0 // indirect github.com/containerd/continuity v0.4.5 // indirect github.com/containerd/errdefs v1.0.0 // indirect diff --git a/go.sum b/go.sum index ed40f322a9..ebf5c0509c 100644 --- a/go.sum +++ b/go.sum @@ -63,8 +63,8 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/containerd/cgroups/v3 v3.0.5 h1:44na7Ud+VwyE7LIoJ8JTNQOa549a8543BmzaJHo6Bzo= github.com/containerd/cgroups/v3 v3.0.5/go.mod h1:SA5DLYnXO8pTGYiAHXz94qvLQTKfVM5GEVisn4jpins= -github.com/containerd/containerd v1.7.28 h1:Nsgm1AtcmEh4AHAJ4gGlNSaKgXiNccU270Dnf81FQ3c= -github.com/containerd/containerd v1.7.28/go.mod h1:azUkWcOvHrWvaiUjSQH0fjzuHIwSPg1WL5PshGP4Szs= +github.com/containerd/containerd v1.7.29 h1:90fWABQsaN9mJhGkoVnuzEY+o1XDPbg9BTC9QTAHnuE= +github.com/containerd/containerd v1.7.29/go.mod h1:azUkWcOvHrWvaiUjSQH0fjzuHIwSPg1WL5PshGP4Szs= github.com/containerd/containerd/api v1.9.0 h1:HZ/licowTRazus+wt9fM6r/9BQO7S0vD5lMcWspGIg0= github.com/containerd/containerd/api v1.9.0/go.mod h1:GhghKFmTR3hNtyznBoQ0EMWr9ju5AqHjcZPsSpTKutI= github.com/containerd/continuity v0.4.5 h1:ZRoN1sXq9u7V6QoHMcVWGhOwDFqZ4B9i5H6un1Wh0x4= From 5295051cf0f92243e1d2001629b4eab912326133 Mon Sep 17 00:00:00 2001 From: Adam Cornett Date: Tue, 11 Nov 2025 13:48:37 -0700 Subject: [PATCH 26/31] updating ansible plugin to v1.42.0 and corresponding dependencies (#7010) Signed-off-by: Adam D. Cornett --- go.mod | 84 +++++++++++++------------ go.sum | 191 ++++++++++++++++++++++++++++++--------------------------- 2 files changed, 143 insertions(+), 132 deletions(-) diff --git a/go.mod b/go.mod index 44cab6ff21..5ae1add8e3 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/operator-framework/operator-sdk -go 1.24.4 +go 1.24.6 require ( github.com/blang/semver/v4 v4.0.0 @@ -11,34 +11,34 @@ require ( github.com/kr/text v0.2.0 github.com/markbates/inflect v1.0.4 github.com/maxbrunsfeld/counterfeiter/v6 v6.12.0 - github.com/onsi/ginkgo/v2 v2.25.3 + github.com/onsi/ginkgo/v2 v2.27.2 github.com/onsi/gomega v1.38.2 - github.com/operator-framework/ansible-operator-plugins v1.40.0 + github.com/operator-framework/ansible-operator-plugins v1.42.0 github.com/operator-framework/api v0.34.0 github.com/operator-framework/operator-lib v0.19.0 github.com/operator-framework/operator-manifest-tools v0.10.0 - github.com/operator-framework/operator-registry v1.57.0 - github.com/prometheus/client_golang v1.23.1 + github.com/operator-framework/operator-registry v1.59.0 + github.com/prometheus/client_golang v1.23.2 github.com/sergi/go-diff v1.4.0 github.com/sirupsen/logrus v1.9.3 - github.com/spf13/afero v1.14.0 + github.com/spf13/afero v1.15.0 github.com/spf13/cobra v1.10.1 github.com/spf13/pflag v1.0.10 - github.com/spf13/viper v1.20.1 + github.com/spf13/viper v1.21.0 github.com/stretchr/testify v1.11.1 github.com/thoas/go-funk v0.9.3 - golang.org/x/mod v0.27.0 - golang.org/x/text v0.29.0 - golang.org/x/tools v0.36.0 + golang.org/x/mod v0.29.0 + golang.org/x/text v0.30.0 + golang.org/x/tools v0.37.0 gomodules.xyz/jsonpatch/v3 v3.0.1 helm.sh/helm/v3 v3.18.6 - k8s.io/api v0.33.4 - k8s.io/apiextensions-apiserver v0.33.4 - k8s.io/apimachinery v0.33.4 - k8s.io/cli-runtime v0.33.3 - k8s.io/client-go v0.33.4 - k8s.io/kubectl v0.33.3 - k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 + k8s.io/api v0.33.5 + k8s.io/apiextensions-apiserver v0.33.5 + k8s.io/apimachinery v0.33.5 + k8s.io/cli-runtime v0.33.5 + k8s.io/client-go v0.33.5 + k8s.io/kubectl v0.33.5 + k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 sigs.k8s.io/controller-runtime v0.21.0 sigs.k8s.io/controller-tools v0.18.0 sigs.k8s.io/kubebuilder/v4 v4.6.0 @@ -75,15 +75,12 @@ require ( github.com/containerd/errdefs/pkg v0.3.0 // indirect github.com/containerd/log v0.1.0 // indirect github.com/containerd/platforms v0.2.1 // indirect - github.com/containerd/stargz-snapshotter/estargz v0.16.3 // indirect + github.com/containerd/stargz-snapshotter/estargz v0.17.0 // indirect github.com/containerd/ttrpc v1.2.7 // indirect github.com/containerd/typeurl/v2 v2.2.3 // indirect - github.com/containers/common v0.64.1 // indirect - github.com/containers/image/v5 v5.36.1 // indirect github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect github.com/containers/ocicrypt v1.2.1 // indirect - github.com/containers/storage v1.59.1 // indirect - github.com/coreos/go-systemd/v22 v22.5.0 // indirect + github.com/coreos/go-systemd/v22 v22.6.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.7 // indirect github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 // indirect github.com/cyphar/filepath-securejoin v0.4.1 // indirect @@ -91,11 +88,11 @@ require ( github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/distribution/distribution/v3 v3.0.0 // indirect github.com/distribution/reference v0.6.0 // indirect - github.com/docker/cli v28.3.3+incompatible // indirect + github.com/docker/cli v28.4.0+incompatible // indirect github.com/docker/distribution v2.8.3+incompatible // indirect github.com/docker/docker v28.3.3+incompatible // indirect github.com/docker/docker-credential-helpers v0.9.3 // indirect - github.com/docker/go-connections v0.5.0 // indirect + github.com/docker/go-connections v0.6.0 // indirect github.com/docker/go-events v0.0.0-20250114142523-c867878c5e32 // indirect github.com/docker/go-metrics v0.0.1 // indirect github.com/docker/go-units v0.5.0 // indirect @@ -124,7 +121,7 @@ require ( github.com/gobuffalo/flect v1.0.3 // indirect github.com/gobwas/glob v0.2.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang-migrate/migrate/v4 v4.18.3 // indirect + github.com/golang-migrate/migrate/v4 v4.19.0 // indirect github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/btree v1.1.3 // indirect @@ -139,7 +136,6 @@ require ( github.com/gorilla/mux v1.8.1 // indirect github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect github.com/gosuri/uitable v0.0.4 // indirect - github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc // indirect github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.0 // indirect github.com/h2non/filetype v1.1.3 // indirect @@ -191,14 +187,14 @@ require ( github.com/opencontainers/runtime-spec v1.2.1 // indirect github.com/otiai10/copy v1.14.1 // indirect github.com/otiai10/mint v1.6.3 // indirect - github.com/pelletier/go-toml/v2 v2.2.3 // indirect + github.com/pelletier/go-toml/v2 v2.2.4 // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/proglottis/gpgme v0.1.4 // indirect + github.com/proglottis/gpgme v0.1.5 // indirect github.com/prometheus/client_model v0.6.2 // indirect - github.com/prometheus/common v0.66.0 // indirect + github.com/prometheus/common v0.66.1 // indirect github.com/prometheus/procfs v0.16.1 // indirect github.com/redis/go-redis/extra/rediscmd/v9 v9.10.0 // indirect github.com/redis/go-redis/extra/redisotel/v9 v9.10.0 // indirect @@ -206,21 +202,21 @@ require ( github.com/rivo/uniseg v0.4.7 // indirect github.com/rubenv/sql-migrate v1.8.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect - github.com/sagikazarmark/locafero v0.7.0 // indirect + github.com/sagikazarmark/locafero v0.11.0 // indirect github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 // indirect - github.com/secure-systems-lab/go-securesystemslib v0.9.0 // indirect + github.com/secure-systems-lab/go-securesystemslib v0.9.1 // indirect github.com/shopspring/decimal v1.4.0 // indirect github.com/sigstore/fulcio v1.7.1 // indirect github.com/sigstore/protobuf-specs v0.4.3 // indirect github.com/sigstore/sigstore v1.9.5 // indirect github.com/smallstep/pkcs7 v0.2.1 // indirect - github.com/sourcegraph/conc v0.3.0 // indirect - github.com/spf13/cast v1.7.1 // indirect + github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect + github.com/spf13/cast v1.10.0 // indirect github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 // indirect github.com/stoewer/go-strcase v1.3.1 // indirect github.com/subosito/gotenv v1.6.0 // indirect github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect - github.com/ulikunitz/xz v0.5.14 // indirect + github.com/ulikunitz/xz v0.5.15 // indirect github.com/vbatts/tar-split v0.12.1 // indirect github.com/vbauerster/mpb/v8 v8.10.2 // indirect github.com/x448/float16 v0.8.4 // indirect @@ -250,18 +246,20 @@ require ( go.opentelemetry.io/otel/sdk/metric v1.37.0 // indirect go.opentelemetry.io/otel/trace v1.37.0 // indirect go.opentelemetry.io/proto/otlp v1.7.0 // indirect - go.uber.org/automaxprocs v1.6.0 // indirect + go.podman.io/common v0.65.0 // indirect + go.podman.io/image/v5 v5.37.0 // indirect + go.podman.io/storage v1.60.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect go.yaml.in/yaml/v2 v2.4.2 // indirect go.yaml.in/yaml/v3 v3.0.4 // indirect - golang.org/x/crypto v0.41.0 // indirect + golang.org/x/crypto v0.43.0 // indirect golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b // indirect - golang.org/x/net v0.43.0 // indirect + golang.org/x/net v0.46.0 // indirect golang.org/x/oauth2 v0.30.0 // indirect golang.org/x/sync v0.17.0 // indirect - golang.org/x/sys v0.35.0 // indirect - golang.org/x/term v0.34.0 // indirect + golang.org/x/sys v0.37.0 // indirect + golang.org/x/term v0.36.0 // indirect golang.org/x/time v0.12.0 // indirect golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect @@ -269,15 +267,15 @@ require ( google.golang.org/genproto v0.0.0-20250603155806-513f23925822 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20250707201910-8d1bb00bc6a7 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7 // indirect - google.golang.org/grpc v1.75.0 // indirect - google.golang.org/protobuf v1.36.8 // indirect + google.golang.org/grpc v1.75.1 // indirect + google.golang.org/protobuf v1.36.9 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiserver v0.33.4 // indirect - k8s.io/component-base v0.33.4 // indirect + k8s.io/apiserver v0.33.5 // indirect + k8s.io/component-base v0.33.5 // indirect k8s.io/klog/v2 v2.130.1 // indirect k8s.io/kube-openapi v0.0.0-20250610211856-8b98d1ed966a // indirect oras.land/oras-go/v2 v2.6.0 // indirect diff --git a/go.sum b/go.sum index ebf5c0509c..774b9ee748 100644 --- a/go.sum +++ b/go.sum @@ -77,26 +77,20 @@ github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A= github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw= -github.com/containerd/stargz-snapshotter/estargz v0.16.3 h1:7evrXtoh1mSbGj/pfRccTampEyKpjpOnS3CyiV1Ebr8= -github.com/containerd/stargz-snapshotter/estargz v0.16.3/go.mod h1:uyr4BfYfOj3G9WBVE8cOlQmXAbPN9VEQpBBeJIuOipU= +github.com/containerd/stargz-snapshotter/estargz v0.17.0 h1:+TyQIsR/zSFI1Rm31EQBwpAA1ovYgIKHy7kctL3sLcE= +github.com/containerd/stargz-snapshotter/estargz v0.17.0/go.mod h1:s06tWAiJcXQo9/8AReBCIo/QxcXFZ2n4qfsRnpl71SM= github.com/containerd/ttrpc v1.2.7 h1:qIrroQvuOL9HQ1X6KHe2ohc7p+HP/0VE6XPU7elJRqQ= github.com/containerd/ttrpc v1.2.7/go.mod h1:YCXHsb32f+Sq5/72xHubdiJRQY9inL4a4ZQrAbN1q9o= github.com/containerd/typeurl/v2 v2.2.3 h1:yNA/94zxWdvYACdYO8zofhrTVuQY73fFU1y++dYSw40= github.com/containerd/typeurl/v2 v2.2.3/go.mod h1:95ljDnPfD3bAbDJRugOiShd/DlAAsxGtUBhJxIn7SCk= -github.com/containers/common v0.64.1 h1:E8vSiL+B84/UCsyVSb70GoxY9cu+0bseLujm4EKF6GE= -github.com/containers/common v0.64.1/go.mod h1:CtfQNHoCAZqWeXMwdShcsxmMJSeGRgKKMqAwRKmWrHE= -github.com/containers/image/v5 v5.36.1 h1:6zpXBqR59UcAzoKpa/By5XekeqFV+htWYfr65+Cgjqo= -github.com/containers/image/v5 v5.36.1/go.mod h1:b4GMKH2z/5t6/09utbse2ZiLK/c72GuGLFdp7K69eA4= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= github.com/containers/ocicrypt v1.2.1 h1:0qIOTT9DoYwcKmxSt8QJt+VzMY18onl9jUXsxpVhSmM= github.com/containers/ocicrypt v1.2.1/go.mod h1:aD0AAqfMp0MtwqWgHM1bUwe1anx0VazI108CRrSKINQ= -github.com/containers/storage v1.59.1 h1:11Zu68MXsEQGBBd+GadPrHPpWeqjKS8hJDGiAHgIqDs= -github.com/containers/storage v1.59.1/go.mod h1:KoAYHnAjP3/cTsRS+mmWZGkufSY2GACiKQ4V3ZLQnR0= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= -github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= -github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/go-systemd/v22 v22.6.0 h1:aGVa/v8B7hpb0TKl0MWoAavPDmHvobFe5R5zn0bCJWo= +github.com/coreos/go-systemd/v22 v22.6.0/go.mod h1:iG+pp635Fo7ZmV/j14KUcmEyWF+0X7Lua8rrTWzYgWU= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/cpuguy83/go-md2man/v2 v2.0.7 h1:zbFlGlXEAKlwXpmvle3d8Oe3YnkKIK4xSRTd3sHPnBo= github.com/cpuguy83/go-md2man/v2 v2.0.7/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= @@ -119,16 +113,16 @@ github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5Qvfr github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dlclark/regexp2 v1.11.0 h1:G/nrcoOa7ZXlpoa/91N3X7mM3r8eIlMBBJZvsz/mxKI= github.com/dlclark/regexp2 v1.11.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= -github.com/docker/cli v28.3.3+incompatible h1:fp9ZHAr1WWPGdIWBM1b3zLtgCF+83gRdVMTJsUeiyAo= -github.com/docker/cli v28.3.3+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v28.4.0+incompatible h1:RBcf3Kjw2pMtwui5V0DIMdyeab8glEw5QY0UUU4C9kY= +github.com/docker/cli v28.4.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v28.3.3+incompatible h1:Dypm25kh4rmk49v1eiVbsAtpAsYURjYkaKubwuBdxEI= github.com/docker/docker v28.3.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.9.3 h1:gAm/VtF9wgqJMoxzT3Gj5p4AqIjCBS4wrsOh9yRqcz8= github.com/docker/docker-credential-helpers v0.9.3/go.mod h1:x+4Gbw9aGmChi3qTLZj8Dfn0TD20M/fuWy0E5+WDeCo= -github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= -github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= +github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94= +github.com/docker/go-connections v0.6.0/go.mod h1:AahvXYshr6JgfUJGdDCs2b5EZG/vmaMAntpSFH5BFKE= github.com/docker/go-events v0.0.0-20250114142523-c867878c5e32 h1:EHZfspsnLAz8Hzccd67D5abwLiqoqym2jz/jOS39mCk= github.com/docker/go-events v0.0.0-20250114142523-c867878c5e32/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= @@ -162,6 +156,12 @@ github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/fxamacker/cbor/v2 v2.8.0 h1:fFtUGXUzXPHTIUdne5+zzMPTfffl3RD5qYnkY40vtxU= github.com/fxamacker/cbor/v2 v2.8.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= +github.com/gkampitakis/ciinfo v0.3.2 h1:JcuOPk8ZU7nZQjdUhctuhQofk7BGHuIy0c9Ez8BNhXs= +github.com/gkampitakis/ciinfo v0.3.2/go.mod h1:1NIwaOcFChN4fa/B0hEBdAb6npDlFL8Bwx4dfRLRqAo= +github.com/gkampitakis/go-diff v1.3.2 h1:Qyn0J9XJSDTgnsgHRdz9Zp24RaJeKMUHg2+PDZZdC4M= +github.com/gkampitakis/go-diff v1.3.2/go.mod h1:LLgOrpqleQe26cte8s36HTWcTmMEur6OPYerdAAS9tk= +github.com/gkampitakis/go-snaps v0.5.15 h1:amyJrvM1D33cPHwVrjo9jQxX8g/7E2wYdZ+01KS3zGE= +github.com/gkampitakis/go-snaps v0.5.15/go.mod h1:HNpx/9GoKisdhw9AFOBT1N7DBs9DiHo/hGheFGBZ+mc= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= @@ -208,12 +208,13 @@ github.com/gobuffalo/flect v1.0.3 h1:xeWBM2nui+qnVvNM4S3foBhCAL2XgPU+a7FdpelbTq4 github.com/gobuffalo/flect v1.0.3/go.mod h1:A5msMlrHtLqh9umBSnvabjsMrCcCpAyzglnDvkbYKHs= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= -github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/goccy/go-yaml v1.18.0 h1:8W7wMFS12Pcas7KU+VVkaiCng+kG8QiFeFwzFb+rwuw= +github.com/goccy/go-yaml v1.18.0/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-migrate/migrate/v4 v4.18.3 h1:EYGkoOsvgHHfm5U/naS1RP/6PL/Xv3S4B/swMiAmDLs= -github.com/golang-migrate/migrate/v4 v4.18.3/go.mod h1:99BKpIi6ruaaXRM1A77eqZ+FWPQ3cfRa+ZVy5bmWMaY= +github.com/golang-migrate/migrate/v4 v4.19.0 h1:RcjOnCGz3Or6HQYEJ/EEVLfWnmw9KnoigPSjzhCuaSE= +github.com/golang-migrate/migrate/v4 v4.19.0/go.mod h1:9dyEcu+hO+G9hPSw8AIg50yg622pXJsoHItQnDGZkI0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ= @@ -269,8 +270,6 @@ github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5T github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA= github.com/gosuri/uitable v0.0.4 h1:IG2xLKRvErL3uhY6e1BylFzG+aJiwQviDDTfOKeKTpY= github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo= -github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248= -github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.1-0.20210315223345-82c243799c99 h1:JYghRBlGCZyCF2wNUJ8W0cwaQdtpcssJ4CgC406g+WU= @@ -308,6 +307,8 @@ github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/joshdk/go-junit v1.0.0 h1:S86cUKIdwBHWwA6xCmFlf3RTLfVXYQfvanM5Uh+K6GE= +github.com/joshdk/go-junit v1.0.0/go.mod h1:TiiV0PqkaNfFXjEiyjWM3XXrhVyCa1K4Zfga6W52ung= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= @@ -344,6 +345,8 @@ github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4 github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= github.com/markbates/inflect v1.0.4 h1:5fh1gzTFhfae06u3hzHYO9xe3l3v3nW5Pwt3naLTP5g= github.com/markbates/inflect v1.0.4/go.mod h1:1fR9+pO2KHEO9ZRtto13gDwwZaAKstQzferVeWqbgNs= +github.com/maruel/natural v1.1.1 h1:Hja7XhhmvEFhcByqDoHz9QZbkWey+COd9xWfCfn1ioo= +github.com/maruel/natural v1.1.1/go.mod h1:v+Rfd79xlw1AgVBjbO0BEQmptqb5HvL/k9GRHB7ZKEg= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= @@ -357,6 +360,8 @@ github.com/mattn/go-sqlite3 v1.14.32/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxU github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/maxbrunsfeld/counterfeiter/v6 v6.12.0 h1:aOeI7xAOVdK+R6xbVsZuU9HmCZYmQVmZgPf9xJUd2Sg= github.com/maxbrunsfeld/counterfeiter/v6 v6.12.0/go.mod h1:0hZWbtfeCYUQeAQdPLUzETiBhUSns7O6LDj9vH88xKA= +github.com/mfridman/tparse v0.18.0 h1:wh6dzOKaIwkUGyKgOntDW4liXSo37qg5AXbIhkMV3vE= +github.com/mfridman/tparse v0.18.0/go.mod h1:gEvqZTuCgEhPbYk/2lS3Kcxg1GmTxxU7kTC8DvP0i/A= github.com/miekg/dns v1.1.61 h1:nLxbwF3XxhwVSm8g9Dghm9MHPaUZuqhPiGL+675ZmEs= github.com/miekg/dns v1.1.61/go.mod h1:mnAarhS3nWaW+NVP2wTkYVIZyHNJ098SJZUki3eykwQ= github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU= @@ -403,8 +408,8 @@ github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.25.3 h1:Ty8+Yi/ayDAGtk4XxmmfUy4GabvM+MegeB4cDLRi6nw= -github.com/onsi/ginkgo/v2 v2.25.3/go.mod h1:43uiyQC4Ed2tkOzLsEYm7hnrb7UJTWHYNsuy3bG/snE= +github.com/onsi/ginkgo/v2 v2.27.2 h1:LzwLj0b89qtIy6SSASkzlNvX6WktqurSHwkk2ipF/Ns= +github.com/onsi/ginkgo/v2 v2.27.2/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo= github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A= github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= @@ -413,22 +418,22 @@ github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJw github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= github.com/opencontainers/runtime-spec v1.2.1 h1:S4k4ryNgEpxW1dzyqffOmhI1BHYcjzU8lpJfSlR0xww= github.com/opencontainers/runtime-spec v1.2.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/operator-framework/ansible-operator-plugins v1.40.0 h1:9zwfsgcvfvyJYS2YnIlrCYSiLJ4Dxgpk8sRp4qKblMA= -github.com/operator-framework/ansible-operator-plugins v1.40.0/go.mod h1:ssqjn16fj00MTUQF7Xa0qr6yDFfyaivA4g0rf+e1dX0= +github.com/operator-framework/ansible-operator-plugins v1.42.0 h1:ahupKUXl7sYKILEUp1tiQNW9WiFxpGGyN1UQ/EfsNGY= +github.com/operator-framework/ansible-operator-plugins v1.42.0/go.mod h1:gGyNgCrNU1opGioTWbYdnbRTcJkJrFPS8Ysu/hKybnE= github.com/operator-framework/api v0.34.0 h1:REiEaYhG1CWmDoajdcAdZqtgoljWG+ixMY59vUX5pFI= github.com/operator-framework/api v0.34.0/go.mod h1:eGncUNIYvWtfGCCKmLzGXvoi3P0TDf3Yd/Z0Sn9E6SQ= github.com/operator-framework/operator-lib v0.19.0 h1:az6ogYj21rtU0SF9uYctRLyKp2dtlqTsmpfehFy6Ce8= github.com/operator-framework/operator-lib v0.19.0/go.mod h1:KxycAjFnHt0DBtHmH3Jm7yHcY5sdrshPKTqM/HKAQ08= github.com/operator-framework/operator-manifest-tools v0.10.0 h1:+vtIElvGQ5e43gCD6fF65a0HNH3AD3LGnukUhpl9kjc= github.com/operator-framework/operator-manifest-tools v0.10.0/go.mod h1:eB/wnr0BOhMLNXPeceE+0p3vudP16zDNWP60Hvn3KaM= -github.com/operator-framework/operator-registry v1.57.0 h1:mQ4c8A8VUxZPJ0QCFRNio+7JEsLX6eKxlDSl6ORCRdk= -github.com/operator-framework/operator-registry v1.57.0/go.mod h1:9rAZH/LZ/ttEuTvL1D4KApGqOtRDE6fJzzOrJNcBu7g= +github.com/operator-framework/operator-registry v1.59.0 h1:SQhT0qMTYJXqStNhBOYXmLAMpS3eszzbcXAg5NLgJu8= +github.com/operator-framework/operator-registry v1.59.0/go.mod h1:QE1RRQGe+iau8sfY10DbP3+eoahH0G0l+coYrnEzJgI= github.com/otiai10/copy v1.14.1 h1:5/7E6qsUMBaH5AnQ0sSLzzTg1oTECmcCmT6lvF45Na8= github.com/otiai10/copy v1.14.1/go.mod h1:oQwrEDDOci3IM8dJF0d8+jnbfPDllW6vUjNc3DoZm9I= github.com/otiai10/mint v1.6.3 h1:87qsV/aw1F5as1eH1zS/yqHY85ANKVMgkDrf9rcxbQs= github.com/otiai10/mint v1.6.3/go.mod h1:MJm72SBthJjz8qhefc4z1PYEieWmy8Bku7CjcAqyUSM= -github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M= -github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc= +github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= +github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 h1:Ii+DKncOVM8Cu1Hc+ETb5K+23HdAMvESYE3ZJ5b5cMI= @@ -441,15 +446,13 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/poy/onpar v1.1.2 h1:QaNrNiZx0+Nar5dLgTVp5mXkyoVFIbepjyEoGSnhbAY= github.com/poy/onpar v1.1.2/go.mod h1:6X8FLNoxyr9kkmnlqpK6LSoiOtrO6MICtWwEuWkLjzg= -github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= -github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= -github.com/proglottis/gpgme v0.1.4 h1:3nE7YNA70o2aLjcg63tXMOhPD7bplfE5CBdV+hLAm2M= -github.com/proglottis/gpgme v0.1.4/go.mod h1:5LoXMgpE4bttgwwdv9bLs/vwqv3qV7F4glEEZ7mRKrM= +github.com/proglottis/gpgme v0.1.5 h1:KCGyOw8sQ+SI96j6G8D8YkOGn+1TwbQTT9/zQXoVlz0= +github.com/proglottis/gpgme v0.1.5/go.mod h1:5LoXMgpE4bttgwwdv9bLs/vwqv3qV7F4glEEZ7mRKrM= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= -github.com/prometheus/client_golang v1.23.1 h1:w6gXMLQGgd0jXXlote9lRHMe0nG01EbnJT+C0EJru2Y= -github.com/prometheus/client_golang v1.23.1/go.mod h1:br8j//v2eg2K5Vvna5klK8Ku5pcU5r4ll73v6ik5dIQ= +github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= +github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -457,8 +460,8 @@ github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNw github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= -github.com/prometheus/common v0.66.0 h1:K/rJPHrG3+AoQs50r2+0t7zMnMzek2Vbv31OFVsMeVY= -github.com/prometheus/common v0.66.0/go.mod h1:Ux6NtV1B4LatamKE63tJBntoxD++xmtI/lK0VtEplN4= +github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs= +github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= @@ -479,14 +482,14 @@ github.com/rubenv/sql-migrate v1.8.0 h1:dXnYiJk9k3wetp7GfQbKJcPHjVJL6YK19tKj8t2N github.com/rubenv/sql-migrate v1.8.0/go.mod h1:F2bGFBwCU+pnmbtNYDeKvSuvL6lBVtXDXUUv5t+u1qw= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/sagikazarmark/locafero v0.7.0 h1:5MqpDsTGNDhY8sGp0Aowyf0qKsPrhewaLSsFaodPcyo= -github.com/sagikazarmark/locafero v0.7.0/go.mod h1:2za3Cg5rMaTMoG/2Ulr9AwtFaIppKXTRYnozin4aB5k= +github.com/sagikazarmark/locafero v0.11.0 h1:1iurJgmM9G3PA/I+wWYIOw/5SyBtxapeHDcg+AAIFXc= +github.com/sagikazarmark/locafero v0.11.0/go.mod h1:nVIGvgyzw595SUSUE6tvCp3YYTeHs15MvlmU87WwIik= github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 h1:KRzFb2m7YtdldCEkzs6KqmJw4nqEVZGK7IN2kJkjTuQ= github.com/santhosh-tekuri/jsonschema/v6 v6.0.2/go.mod h1:JXeL+ps8p7/KNMjDQk3TCwPpBy0wYklyWTfbkIzdIFU= github.com/sclevine/spec v1.4.0 h1:z/Q9idDcay5m5irkZ28M7PtQM4aOISzOpj4bUPkDee8= github.com/sclevine/spec v1.4.0/go.mod h1:LvpgJaFyvQzRvc1kaDs0bulYwzC70PbiYjC4QnFHkOM= -github.com/secure-systems-lab/go-securesystemslib v0.9.0 h1:rf1HIbL64nUpEIZnjLZ3mcNEL9NBPB0iuVjyxvq3LZc= -github.com/secure-systems-lab/go-securesystemslib v0.9.0/go.mod h1:DVHKMcZ+V4/woA/peqr+L0joiRXbPpQ042GgJckkFgw= +github.com/secure-systems-lab/go-securesystemslib v0.9.1 h1:nZZaNz4DiERIQguNy0cL5qTdn9lR8XKHf4RUyG1Sx3g= +github.com/secure-systems-lab/go-securesystemslib v0.9.1/go.mod h1:np53YzT0zXGMv6x4iEWc9Z59uR+x+ndLwCLqPYpLXVU= github.com/sergi/go-diff v1.4.0 h1:n/SP9D5ad1fORl+llWyN+D6qoUETXNZARKjyY2/KVCw= github.com/sergi/go-diff v1.4.0/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= @@ -502,19 +505,19 @@ github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/smallstep/pkcs7 v0.2.1 h1:6Kfzr/QizdIuB6LSv8y1LJdZ3aPSfTNhTLqAx9CTLfA= github.com/smallstep/pkcs7 v0.2.1/go.mod h1:RcXHsMfL+BzH8tRhmrF1NkkpebKpq3JEM66cOFxanf0= -github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= -github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= -github.com/spf13/afero v1.14.0 h1:9tH6MapGnn/j0eb0yIXiLjERO8RB6xIVZRDCX7PtqWA= -github.com/spf13/afero v1.14.0/go.mod h1:acJQ8t0ohCGuMN3O+Pv0V0hgMxNYDlvdk+VTfyZmbYo= -github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= -github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 h1:+jumHNA0Wrelhe64i8F6HNlS8pkoyMv5sreGx2Ry5Rw= +github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8/go.mod h1:3n1Cwaq1E1/1lhQhtRK2ts/ZwZEhjcQeJQ1RuC6Q/8U= +github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I= +github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg= +github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY= +github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo= github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s= github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0= github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.20.1 h1:ZMi+z/lvLyPSCoNtFCpqjy0S4kPbirhpTMwl8BkW9X4= -github.com/spf13/viper v1.20.1/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4= +github.com/spf13/viper v1.21.0 h1:x5S+0EU27Lbphp4UKm1C+1oQO+rKx36vfCoaVebLFSU= +github.com/spf13/viper v1.21.0/go.mod h1:P0lhsswPGWD/1lZJ9ny3fYnVqxiegrlNrEmgLjbTCAY= github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 h1:pnnLyeX7o/5aX8qUQ69P/mLojDqwda8hFOCBTmP/6hw= github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6/go.mod h1:39R/xuhNgVhi+K0/zst4TLrJrVmbm6LVgl4A0+ZFS5M= github.com/stoewer/go-strcase v1.3.1 h1:iS0MdW+kVTxgMoE1LAZyMiYJFKlOzLooE4MxjirtkAs= @@ -539,10 +542,18 @@ github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8 github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/thoas/go-funk v0.9.3 h1:7+nAEx3kn5ZJcnDm2Bh23N2yOtweO14bi//dvRtgLpw= github.com/thoas/go-funk v0.9.3/go.mod h1:+IWnUfUmFO1+WVYQWQtIJHeRRdaIyyYglZN7xzUPe4Q= +github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY= +github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= +github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY= +github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28= github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0= github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs= -github.com/ulikunitz/xz v0.5.14 h1:uv/0Bq533iFdnMHZdRBTOlaNMdb1+ZxXIlHDZHIHcvg= -github.com/ulikunitz/xz v0.5.14/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/ulikunitz/xz v0.5.15 h1:9DNdB5s+SgV3bQ2ApL10xRc35ck0DuIX/isZvIk+ubY= +github.com/ulikunitz/xz v0.5.15/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/vbatts/tar-split v0.12.1 h1:CqKoORW7BUWBe7UL/iqTVvkTBOF8UvOMKOIZykxnnbo= github.com/vbatts/tar-split v0.12.1/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA= github.com/vbauerster/mpb/v8 v8.10.2 h1:2uBykSHAYHekE11YvJhKxYmLATKHAGorZwFlyNw4hHM= @@ -614,10 +625,12 @@ go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mx go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0= go.opentelemetry.io/proto/otlp v1.7.0 h1:jX1VolD6nHuFzOYso2E73H85i92Mv8JQYk0K9vz09os= go.opentelemetry.io/proto/otlp v1.7.0/go.mod h1:fSKjH6YJ7HDlwzltzyMj036AJ3ejJLCgCSHGj4efDDo= -go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= -go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= -go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= +go.podman.io/common v0.65.0 h1:8JNl25U4VpKDkFHSymSPm4te7ZQHJbfAB/l2FqtmYEg= +go.podman.io/common v0.65.0/go.mod h1:+lJu8KHeoDQsD9HDdiFaMaOUiqPLQnK406WuLnqM7Z0= +go.podman.io/image/v5 v5.37.0 h1:yzgQybwuWIIeK63hu+mQqna/wOh96XD5cpVc6j8Dg5M= +go.podman.io/image/v5 v5.37.0/go.mod h1:+s2Sx5dia/jVeT8tI3r2NAPrARMiDdbEq3QPIQogx3I= +go.podman.io/storage v1.60.0 h1:bWNSrR58nxg39VNFDSx3m0AswbvyzPGOo5XsUfomTao= +go.podman.io/storage v1.60.0/go.mod h1:NK+rsWJVuQeCM7ifv7cxD3abegWxwtW/3OkuSUJJoE4= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= @@ -637,8 +650,8 @@ golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliY golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/crypto v0.33.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5M= -golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4= -golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc= +golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04= +golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b h1:M2rDM6z3Fhozi9O7NWsxAkg/yqS/lQJ6PmkyIV3YP+o= golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8= @@ -652,8 +665,8 @@ golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/mod v0.27.0 h1:kb+q2PyFnEADO2IEF935ehFUXlWiNjJWtRNgBLSfbxQ= -golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc= +golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA= +golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -672,8 +685,8 @@ golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= -golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= -golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= +golang.org/x/net v0.46.0 h1:giFlY12I07fugqwPuWJi68oOnpfqFnJIJzaIIm2JVV4= +golang.org/x/net v0.46.0/go.mod h1:Q9BGdFy1y4nkUwiLvT5qtyhAnEHgnQ/zd8PfU6nc210= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= @@ -712,8 +725,8 @@ golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= -golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ= +golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -723,8 +736,8 @@ golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/term v0.29.0/go.mod h1:6bl4lRlvVuDgSf3179VpIxBF0o10JUpXWOnI7nErv7s= -golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4= -golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw= +golang.org/x/term v0.36.0 h1:zMPR+aF8gfksFprF/Nc/rd1wRS1EI6nDBGyWAvDzx2Q= +golang.org/x/term v0.36.0/go.mod h1:Qu394IJq6V6dCBRgwqshf3mPF85AqzYEzofzRdZkWss= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= @@ -734,8 +747,8 @@ golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY= -golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk= -golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4= +golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k= +golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM= golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE= golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -750,8 +763,8 @@ golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= -golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg= -golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s= +golang.org/x/tools v0.37.0 h1:DVSRzp7FwePZW356yEAChSdNcQo6Nsp+fex1SUW09lE= +golang.org/x/tools v0.37.0/go.mod h1:MBN5QPQtLMHVdvsbtarmTNukZDdgwdwlO5qGacAzF0w= golang.org/x/tools/go/expect v0.1.0-deprecated h1:jY2C5HGYR5lqex3gEniOQL0r7Dq5+VGVgY1nudX5lXY= golang.org/x/tools/go/expect v0.1.0-deprecated/go.mod h1:eihoPOH+FgIqa3FpoTwguz/bVUSGBlGQU67vpBeOrBY= golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated h1:1h2MnaIAIXISqTFKdENegdpAgUXz6NrPEsbIeWaBRvM= @@ -784,8 +797,8 @@ google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyac google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.75.0 h1:+TW+dqTd2Biwe6KKfhE5JpiYIBWq865PhKGSXiivqt4= -google.golang.org/grpc v1.75.0/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= +google.golang.org/grpc v1.75.1 h1:/ODCNEuf9VghjgO3rqLcfg8fiOP0nSluljWFlDxELLI= +google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -795,8 +808,8 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= -google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= +google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw= +google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -823,28 +836,28 @@ helm.sh/helm/v3 v3.18.6 h1:S/2CqcYnNfLckkHLI0VgQbxgcDaU3N4A/46E3n9wSNY= helm.sh/helm/v3 v3.18.6/go.mod h1:L/dXDR2r539oPlFP1PJqKAC1CUgqHJDLkxKpDGrWnyg= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -k8s.io/api v0.33.4 h1:oTzrFVNPXBjMu0IlpA2eDDIU49jsuEorGHB4cvKupkk= -k8s.io/api v0.33.4/go.mod h1:VHQZ4cuxQ9sCUMESJV5+Fe8bGnqAARZ08tSTdHWfeAc= -k8s.io/apiextensions-apiserver v0.33.4 h1:rtq5SeXiDbXmSwxsF0MLe2Mtv3SwprA6wp+5qh/CrOU= -k8s.io/apiextensions-apiserver v0.33.4/go.mod h1:mWXcZQkQV1GQyxeIjYApuqsn/081hhXPZwZ2URuJeSs= -k8s.io/apimachinery v0.33.4 h1:SOf/JW33TP0eppJMkIgQ+L6atlDiP/090oaX0y9pd9s= -k8s.io/apimachinery v0.33.4/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM= -k8s.io/apiserver v0.33.4 h1:6N0TEVA6kASUS3owYDIFJjUH6lgN8ogQmzZvaFFj1/Y= -k8s.io/apiserver v0.33.4/go.mod h1:8ODgXMnOoSPLMUg1aAzMFx+7wTJM+URil+INjbTZCok= -k8s.io/cli-runtime v0.33.3 h1:Dgy4vPjNIu8LMJBSvs8W0LcdV0PX/8aGG1DA1W8lklA= -k8s.io/cli-runtime v0.33.3/go.mod h1:yklhLklD4vLS8HNGgC9wGiuHWze4g7x6XQZ+8edsKEo= -k8s.io/client-go v0.33.4 h1:TNH+CSu8EmXfitntjUPwaKVPN0AYMbc9F1bBS8/ABpw= -k8s.io/client-go v0.33.4/go.mod h1:LsA0+hBG2DPwovjd931L/AoaezMPX9CmBgyVyBZmbCY= -k8s.io/component-base v0.33.4 h1:Jvb/aw/tl3pfgnJ0E0qPuYLT0NwdYs1VXXYQmSuxJGY= -k8s.io/component-base v0.33.4/go.mod h1:567TeSdixWW2Xb1yYUQ7qk5Docp2kNznKL87eygY8Rc= +k8s.io/api v0.33.5 h1:YR+uhYj05jdRpcksv8kjSliW+v9hwXxn6Cv10aR8Juw= +k8s.io/api v0.33.5/go.mod h1:2gzShdwXKT5yPGiqrTrn/U/nLZ7ZyT4WuAj3XGDVgVs= +k8s.io/apiextensions-apiserver v0.33.5 h1:93NZh6rmrcamX/tfv/dZrTsMiQX69ufANmDcKPEgSeA= +k8s.io/apiextensions-apiserver v0.33.5/go.mod h1:JIbyQnNlu6nQa7b1vgFi51pmlXOk8mdn0WJwUJnz/7U= +k8s.io/apimachinery v0.33.5 h1:NiT64hln4TQXeYR18/ES39OrNsjGz8NguxsBgp+6QIo= +k8s.io/apimachinery v0.33.5/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM= +k8s.io/apiserver v0.33.5 h1:X1Gy33r4YkRLRqTjGjofk7X1/EjSLEVSJ/A+1qjoj60= +k8s.io/apiserver v0.33.5/go.mod h1:Q+b5Btbc8x0PqOCeh/xBTesKk+cXQRN+PF2wdrTKDeg= +k8s.io/cli-runtime v0.33.5 h1:wM7DoglOkrJDmddla864mVpueaEDX7/XGAkHGMWQkpc= +k8s.io/cli-runtime v0.33.5/go.mod h1:ZmUR+ybq97SqxSkkqGQdIhzCfk/+ETUhwKQq5EguaCw= +k8s.io/client-go v0.33.5 h1:I8BdmQGxInpkMEnJvV6iG7dqzP3JRlpZZlib3OMFc3o= +k8s.io/client-go v0.33.5/go.mod h1:W8PQP4MxbM4ypgagVE65mUUqK1/ByQkSALF9tzuQ6u0= +k8s.io/component-base v0.33.5 h1:4D3kxjEx1pJRy3WHAZsmX3+LCpmd4ftE+2J4v6naTnQ= +k8s.io/component-base v0.33.5/go.mod h1:Zma1YjBVuuGxIbspj1vGR3/5blzo2ARf1v0QTtog1to= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20250610211856-8b98d1ed966a h1:ZV3Zr+/7s7aVbjNGICQt+ppKWsF1tehxggNfbM7XnG8= k8s.io/kube-openapi v0.0.0-20250610211856-8b98d1ed966a/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8= -k8s.io/kubectl v0.33.3 h1:r/phHvH1iU7gO/l7tTjQk2K01ER7/OAJi8uFHHyWSac= -k8s.io/kubectl v0.33.3/go.mod h1:euj2bG56L6kUGOE/ckZbCoudPwuj4Kud7BR0GzyNiT0= -k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y= -k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/kubectl v0.33.5 h1:/wj5EjXXrVeSd8+FcZ2sIIP1PlQkq8HWsR9T1Nsl32c= +k8s.io/kubectl v0.33.5/go.mod h1:YrBGE7U+nz7+UatG+aNDocIQtdTyqN528dwFCv6+Kuw= +k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 h1:SjGebBtkBqHFOli+05xYbK8YF1Dzkbzn+gDM4X9T4Ck= +k8s.io/utils v0.0.0-20251002143259-bc988d571ff4/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= oras.land/oras-go/v2 v2.6.0 h1:X4ELRsiGkrbeox69+9tzTu492FMUu7zJQW6eJU+I2oc= oras.land/oras-go/v2 v2.6.0/go.mod h1:magiQDfG6H1O9APp+rOsvCPcW1GD2MM7vgnKY0Y+u1o= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.33.0 h1:qPrZsv1cwQiFeieFlRqT627fVZ+tyfou/+S5S0H5ua0= From 8cf170b4dba65a334f085b27ecf50fb54f750d7e Mon Sep 17 00:00:00 2001 From: "Adam D. Cornett" Date: Tue, 11 Nov 2025 14:03:22 -0700 Subject: [PATCH 27/31] moving 1.25 website from netlify to gh Signed-off-by: Adam D. Cornett --- website/config.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/config.toml b/website/config.toml index e6b2454ef3..574799509f 100644 --- a/website/config.toml +++ b/website/config.toml @@ -205,7 +205,7 @@ url_latest_version = "https://sdk.operatorframework.io" [[params.versions]] version = "v1.25" - url = "https://v1-25-x.sdk.operatorframework.io" + url = "https://github.com/operator-framework/operator-sdk/tree/v1.25.x/website/content/en/docs" kube_version = "1.25.0" client_go_version = "v0.25.3" From 1b9cb8dc55a83d027e562d81dcb134ebc8700ba1 Mon Sep 17 00:00:00 2001 From: Adam Cornett Date: Tue, 11 Nov 2025 14:31:12 -0700 Subject: [PATCH 28/31] Release v1.42.0 (#7011) Signed-off-by: Adam D. Cornett --- Makefile | 2 +- changelog/generated/v1.42.0.md | 3 +++ .../bundle/tests/scorecard/config.yaml | 12 ++++++------ .../config/scorecard/patches/basic.config.yaml | 2 +- .../config/scorecard/patches/olm.config.yaml | 10 +++++----- .../bundle/tests/scorecard/config.yaml | 12 ++++++------ .../config/scorecard/patches/basic.config.yaml | 2 +- .../config/scorecard/patches/olm.config.yaml | 10 +++++----- testdata/helm/memcached-operator/Dockerfile | 2 +- testdata/helm/memcached-operator/Makefile | 2 +- .../bundle/tests/scorecard/config.yaml | 12 ++++++------ .../config/scorecard/patches/basic.config.yaml | 2 +- .../config/scorecard/patches/olm.config.yaml | 10 +++++----- website/config.toml | 10 ++++++++-- website/content/en/docs/installation/_index.md | 2 +- .../content/en/docs/upgrading-sdk-version/v1.42.0.md | 6 ++++++ 16 files changed, 57 insertions(+), 42 deletions(-) create mode 100644 changelog/generated/v1.42.0.md create mode 100644 website/content/en/docs/upgrading-sdk-version/v1.42.0.md diff --git a/Makefile b/Makefile index 71846ebc41..d17151491b 100644 --- a/Makefile +++ b/Makefile @@ -4,7 +4,7 @@ SHELL = /bin/bash # This value must be updated to the release tag of the most recent release, a change that must # occur in the release commit. IMAGE_VERSION will be removed once each subproject that uses this # version is moved to a separate repo and release process. -export IMAGE_VERSION = v1.41.0 +export IMAGE_VERSION = v1.42.0 # Build-time variables to inject into binaries export SIMPLE_VERSION = $(shell (test "$(shell git describe --tags)" = "$(shell git describe --tags --abbrev=0)" && echo $(shell git describe --tags)) || echo $(shell git describe --tags --abbrev=0)+git) export GIT_VERSION = $(shell git describe --dirty --tags --always) diff --git a/changelog/generated/v1.42.0.md b/changelog/generated/v1.42.0.md new file mode 100644 index 0000000000..421acbe50b --- /dev/null +++ b/changelog/generated/v1.42.0.md @@ -0,0 +1,3 @@ +## v1.42.0 + +No changes for this release! diff --git a/testdata/go/v4/memcached-operator/bundle/tests/scorecard/config.yaml b/testdata/go/v4/memcached-operator/bundle/tests/scorecard/config.yaml index 946cf910e2..6a6e414080 100644 --- a/testdata/go/v4/memcached-operator/bundle/tests/scorecard/config.yaml +++ b/testdata/go/v4/memcached-operator/bundle/tests/scorecard/config.yaml @@ -8,7 +8,7 @@ stages: - entrypoint: - scorecard-test - basic-check-spec - image: quay.io/operator-framework/scorecard-test:v1.41.0 + image: quay.io/operator-framework/scorecard-test:v1.42.0 labels: suite: basic test: basic-check-spec-test @@ -18,7 +18,7 @@ stages: - entrypoint: - scorecard-test - olm-bundle-validation - image: quay.io/operator-framework/scorecard-test:v1.41.0 + image: quay.io/operator-framework/scorecard-test:v1.42.0 labels: suite: olm test: olm-bundle-validation-test @@ -28,7 +28,7 @@ stages: - entrypoint: - scorecard-test - olm-crds-have-validation - image: quay.io/operator-framework/scorecard-test:v1.41.0 + image: quay.io/operator-framework/scorecard-test:v1.42.0 labels: suite: olm test: olm-crds-have-validation-test @@ -38,7 +38,7 @@ stages: - entrypoint: - scorecard-test - olm-crds-have-resources - image: quay.io/operator-framework/scorecard-test:v1.41.0 + image: quay.io/operator-framework/scorecard-test:v1.42.0 labels: suite: olm test: olm-crds-have-resources-test @@ -48,7 +48,7 @@ stages: - entrypoint: - scorecard-test - olm-spec-descriptors - image: quay.io/operator-framework/scorecard-test:v1.41.0 + image: quay.io/operator-framework/scorecard-test:v1.42.0 labels: suite: olm test: olm-spec-descriptors-test @@ -58,7 +58,7 @@ stages: - entrypoint: - scorecard-test - olm-status-descriptors - image: quay.io/operator-framework/scorecard-test:v1.41.0 + image: quay.io/operator-framework/scorecard-test:v1.42.0 labels: suite: olm test: olm-status-descriptors-test diff --git a/testdata/go/v4/memcached-operator/config/scorecard/patches/basic.config.yaml b/testdata/go/v4/memcached-operator/config/scorecard/patches/basic.config.yaml index 22884c5382..0b45f2fe95 100644 --- a/testdata/go/v4/memcached-operator/config/scorecard/patches/basic.config.yaml +++ b/testdata/go/v4/memcached-operator/config/scorecard/patches/basic.config.yaml @@ -4,7 +4,7 @@ entrypoint: - scorecard-test - basic-check-spec - image: quay.io/operator-framework/scorecard-test:v1.41.0 + image: quay.io/operator-framework/scorecard-test:v1.42.0 labels: suite: basic test: basic-check-spec-test diff --git a/testdata/go/v4/memcached-operator/config/scorecard/patches/olm.config.yaml b/testdata/go/v4/memcached-operator/config/scorecard/patches/olm.config.yaml index d2f829662c..8cc125899d 100644 --- a/testdata/go/v4/memcached-operator/config/scorecard/patches/olm.config.yaml +++ b/testdata/go/v4/memcached-operator/config/scorecard/patches/olm.config.yaml @@ -4,7 +4,7 @@ entrypoint: - scorecard-test - olm-bundle-validation - image: quay.io/operator-framework/scorecard-test:v1.41.0 + image: quay.io/operator-framework/scorecard-test:v1.42.0 labels: suite: olm test: olm-bundle-validation-test @@ -14,7 +14,7 @@ entrypoint: - scorecard-test - olm-crds-have-validation - image: quay.io/operator-framework/scorecard-test:v1.41.0 + image: quay.io/operator-framework/scorecard-test:v1.42.0 labels: suite: olm test: olm-crds-have-validation-test @@ -24,7 +24,7 @@ entrypoint: - scorecard-test - olm-crds-have-resources - image: quay.io/operator-framework/scorecard-test:v1.41.0 + image: quay.io/operator-framework/scorecard-test:v1.42.0 labels: suite: olm test: olm-crds-have-resources-test @@ -34,7 +34,7 @@ entrypoint: - scorecard-test - olm-spec-descriptors - image: quay.io/operator-framework/scorecard-test:v1.41.0 + image: quay.io/operator-framework/scorecard-test:v1.42.0 labels: suite: olm test: olm-spec-descriptors-test @@ -44,7 +44,7 @@ entrypoint: - scorecard-test - olm-status-descriptors - image: quay.io/operator-framework/scorecard-test:v1.41.0 + image: quay.io/operator-framework/scorecard-test:v1.42.0 labels: suite: olm test: olm-status-descriptors-test diff --git a/testdata/go/v4/monitoring/memcached-operator/bundle/tests/scorecard/config.yaml b/testdata/go/v4/monitoring/memcached-operator/bundle/tests/scorecard/config.yaml index 946cf910e2..6a6e414080 100644 --- a/testdata/go/v4/monitoring/memcached-operator/bundle/tests/scorecard/config.yaml +++ b/testdata/go/v4/monitoring/memcached-operator/bundle/tests/scorecard/config.yaml @@ -8,7 +8,7 @@ stages: - entrypoint: - scorecard-test - basic-check-spec - image: quay.io/operator-framework/scorecard-test:v1.41.0 + image: quay.io/operator-framework/scorecard-test:v1.42.0 labels: suite: basic test: basic-check-spec-test @@ -18,7 +18,7 @@ stages: - entrypoint: - scorecard-test - olm-bundle-validation - image: quay.io/operator-framework/scorecard-test:v1.41.0 + image: quay.io/operator-framework/scorecard-test:v1.42.0 labels: suite: olm test: olm-bundle-validation-test @@ -28,7 +28,7 @@ stages: - entrypoint: - scorecard-test - olm-crds-have-validation - image: quay.io/operator-framework/scorecard-test:v1.41.0 + image: quay.io/operator-framework/scorecard-test:v1.42.0 labels: suite: olm test: olm-crds-have-validation-test @@ -38,7 +38,7 @@ stages: - entrypoint: - scorecard-test - olm-crds-have-resources - image: quay.io/operator-framework/scorecard-test:v1.41.0 + image: quay.io/operator-framework/scorecard-test:v1.42.0 labels: suite: olm test: olm-crds-have-resources-test @@ -48,7 +48,7 @@ stages: - entrypoint: - scorecard-test - olm-spec-descriptors - image: quay.io/operator-framework/scorecard-test:v1.41.0 + image: quay.io/operator-framework/scorecard-test:v1.42.0 labels: suite: olm test: olm-spec-descriptors-test @@ -58,7 +58,7 @@ stages: - entrypoint: - scorecard-test - olm-status-descriptors - image: quay.io/operator-framework/scorecard-test:v1.41.0 + image: quay.io/operator-framework/scorecard-test:v1.42.0 labels: suite: olm test: olm-status-descriptors-test diff --git a/testdata/go/v4/monitoring/memcached-operator/config/scorecard/patches/basic.config.yaml b/testdata/go/v4/monitoring/memcached-operator/config/scorecard/patches/basic.config.yaml index 22884c5382..0b45f2fe95 100644 --- a/testdata/go/v4/monitoring/memcached-operator/config/scorecard/patches/basic.config.yaml +++ b/testdata/go/v4/monitoring/memcached-operator/config/scorecard/patches/basic.config.yaml @@ -4,7 +4,7 @@ entrypoint: - scorecard-test - basic-check-spec - image: quay.io/operator-framework/scorecard-test:v1.41.0 + image: quay.io/operator-framework/scorecard-test:v1.42.0 labels: suite: basic test: basic-check-spec-test diff --git a/testdata/go/v4/monitoring/memcached-operator/config/scorecard/patches/olm.config.yaml b/testdata/go/v4/monitoring/memcached-operator/config/scorecard/patches/olm.config.yaml index d2f829662c..8cc125899d 100644 --- a/testdata/go/v4/monitoring/memcached-operator/config/scorecard/patches/olm.config.yaml +++ b/testdata/go/v4/monitoring/memcached-operator/config/scorecard/patches/olm.config.yaml @@ -4,7 +4,7 @@ entrypoint: - scorecard-test - olm-bundle-validation - image: quay.io/operator-framework/scorecard-test:v1.41.0 + image: quay.io/operator-framework/scorecard-test:v1.42.0 labels: suite: olm test: olm-bundle-validation-test @@ -14,7 +14,7 @@ entrypoint: - scorecard-test - olm-crds-have-validation - image: quay.io/operator-framework/scorecard-test:v1.41.0 + image: quay.io/operator-framework/scorecard-test:v1.42.0 labels: suite: olm test: olm-crds-have-validation-test @@ -24,7 +24,7 @@ entrypoint: - scorecard-test - olm-crds-have-resources - image: quay.io/operator-framework/scorecard-test:v1.41.0 + image: quay.io/operator-framework/scorecard-test:v1.42.0 labels: suite: olm test: olm-crds-have-resources-test @@ -34,7 +34,7 @@ entrypoint: - scorecard-test - olm-spec-descriptors - image: quay.io/operator-framework/scorecard-test:v1.41.0 + image: quay.io/operator-framework/scorecard-test:v1.42.0 labels: suite: olm test: olm-spec-descriptors-test @@ -44,7 +44,7 @@ entrypoint: - scorecard-test - olm-status-descriptors - image: quay.io/operator-framework/scorecard-test:v1.41.0 + image: quay.io/operator-framework/scorecard-test:v1.42.0 labels: suite: olm test: olm-status-descriptors-test diff --git a/testdata/helm/memcached-operator/Dockerfile b/testdata/helm/memcached-operator/Dockerfile index dc997558af..1334830083 100644 --- a/testdata/helm/memcached-operator/Dockerfile +++ b/testdata/helm/memcached-operator/Dockerfile @@ -1,5 +1,5 @@ # Build the manager binary -FROM quay.io/operator-framework/helm-operator:v1.41.0 +FROM quay.io/operator-framework/helm-operator:v1.42.0 ENV HOME=/opt/helm COPY watches.yaml ${HOME}/watches.yaml diff --git a/testdata/helm/memcached-operator/Makefile b/testdata/helm/memcached-operator/Makefile index 03816b68f8..ae10162278 100644 --- a/testdata/helm/memcached-operator/Makefile +++ b/testdata/helm/memcached-operator/Makefile @@ -150,7 +150,7 @@ ifeq (,$(shell which helm-operator 2>/dev/null)) @{ \ set -e ;\ mkdir -p $(dir $(HELM_OPERATOR)) ;\ - curl -sSLo $(HELM_OPERATOR) https://github.com/operator-framework/operator-sdk/releases/download/v1.41.0/helm-operator_$(OS)_$(ARCH) ;\ + curl -sSLo $(HELM_OPERATOR) https://github.com/operator-framework/operator-sdk/releases/download/v1.42.0/helm-operator_$(OS)_$(ARCH) ;\ chmod +x $(HELM_OPERATOR) ;\ } else diff --git a/testdata/helm/memcached-operator/bundle/tests/scorecard/config.yaml b/testdata/helm/memcached-operator/bundle/tests/scorecard/config.yaml index 946cf910e2..6a6e414080 100644 --- a/testdata/helm/memcached-operator/bundle/tests/scorecard/config.yaml +++ b/testdata/helm/memcached-operator/bundle/tests/scorecard/config.yaml @@ -8,7 +8,7 @@ stages: - entrypoint: - scorecard-test - basic-check-spec - image: quay.io/operator-framework/scorecard-test:v1.41.0 + image: quay.io/operator-framework/scorecard-test:v1.42.0 labels: suite: basic test: basic-check-spec-test @@ -18,7 +18,7 @@ stages: - entrypoint: - scorecard-test - olm-bundle-validation - image: quay.io/operator-framework/scorecard-test:v1.41.0 + image: quay.io/operator-framework/scorecard-test:v1.42.0 labels: suite: olm test: olm-bundle-validation-test @@ -28,7 +28,7 @@ stages: - entrypoint: - scorecard-test - olm-crds-have-validation - image: quay.io/operator-framework/scorecard-test:v1.41.0 + image: quay.io/operator-framework/scorecard-test:v1.42.0 labels: suite: olm test: olm-crds-have-validation-test @@ -38,7 +38,7 @@ stages: - entrypoint: - scorecard-test - olm-crds-have-resources - image: quay.io/operator-framework/scorecard-test:v1.41.0 + image: quay.io/operator-framework/scorecard-test:v1.42.0 labels: suite: olm test: olm-crds-have-resources-test @@ -48,7 +48,7 @@ stages: - entrypoint: - scorecard-test - olm-spec-descriptors - image: quay.io/operator-framework/scorecard-test:v1.41.0 + image: quay.io/operator-framework/scorecard-test:v1.42.0 labels: suite: olm test: olm-spec-descriptors-test @@ -58,7 +58,7 @@ stages: - entrypoint: - scorecard-test - olm-status-descriptors - image: quay.io/operator-framework/scorecard-test:v1.41.0 + image: quay.io/operator-framework/scorecard-test:v1.42.0 labels: suite: olm test: olm-status-descriptors-test diff --git a/testdata/helm/memcached-operator/config/scorecard/patches/basic.config.yaml b/testdata/helm/memcached-operator/config/scorecard/patches/basic.config.yaml index 22884c5382..0b45f2fe95 100644 --- a/testdata/helm/memcached-operator/config/scorecard/patches/basic.config.yaml +++ b/testdata/helm/memcached-operator/config/scorecard/patches/basic.config.yaml @@ -4,7 +4,7 @@ entrypoint: - scorecard-test - basic-check-spec - image: quay.io/operator-framework/scorecard-test:v1.41.0 + image: quay.io/operator-framework/scorecard-test:v1.42.0 labels: suite: basic test: basic-check-spec-test diff --git a/testdata/helm/memcached-operator/config/scorecard/patches/olm.config.yaml b/testdata/helm/memcached-operator/config/scorecard/patches/olm.config.yaml index d2f829662c..8cc125899d 100644 --- a/testdata/helm/memcached-operator/config/scorecard/patches/olm.config.yaml +++ b/testdata/helm/memcached-operator/config/scorecard/patches/olm.config.yaml @@ -4,7 +4,7 @@ entrypoint: - scorecard-test - olm-bundle-validation - image: quay.io/operator-framework/scorecard-test:v1.41.0 + image: quay.io/operator-framework/scorecard-test:v1.42.0 labels: suite: olm test: olm-bundle-validation-test @@ -14,7 +14,7 @@ entrypoint: - scorecard-test - olm-crds-have-validation - image: quay.io/operator-framework/scorecard-test:v1.41.0 + image: quay.io/operator-framework/scorecard-test:v1.42.0 labels: suite: olm test: olm-crds-have-validation-test @@ -24,7 +24,7 @@ entrypoint: - scorecard-test - olm-crds-have-resources - image: quay.io/operator-framework/scorecard-test:v1.41.0 + image: quay.io/operator-framework/scorecard-test:v1.42.0 labels: suite: olm test: olm-crds-have-resources-test @@ -34,7 +34,7 @@ entrypoint: - scorecard-test - olm-spec-descriptors - image: quay.io/operator-framework/scorecard-test:v1.41.0 + image: quay.io/operator-framework/scorecard-test:v1.42.0 labels: suite: olm test: olm-spec-descriptors-test @@ -44,7 +44,7 @@ entrypoint: - scorecard-test - olm-status-descriptors - image: quay.io/operator-framework/scorecard-test:v1.41.0 + image: quay.io/operator-framework/scorecard-test:v1.42.0 labels: suite: olm test: olm-status-descriptors-test diff --git a/website/config.toml b/website/config.toml index 574799509f..0dae361926 100644 --- a/website/config.toml +++ b/website/config.toml @@ -95,7 +95,7 @@ url_latest_version = "https://sdk.operatorframework.io" ##LATEST_RELEASE_KUBE_VERSION## kube_version = "1.33.1" ##LATEST_RELEASE_CLIENT_GO_VERSION## - client_go_version = "v0.33.2" + client_go_version = "v0.33.5" [[params.versions]] version = "Latest Release" @@ -103,10 +103,16 @@ url_latest_version = "https://sdk.operatorframework.io" ##LATEST_RELEASE_KUBE_VERSION## kube_version = "1.33.1" ##LATEST_RELEASE_CLIENT_GO_VERSION## - client_go_version = "v0.33.2" + client_go_version = "v0.33.5" ##RELEASE_ADDME## +[[params.versions]] + version = "v1.42" + url = "https://v1-42-x.sdk.operatorframework.io" + kube_version = "1.33.1" + client_go_version = "v0.33.5" + [[params.versions]] version = "v1.41" url = "https://v1-41-x.sdk.operatorframework.io" diff --git a/website/content/en/docs/installation/_index.md b/website/content/en/docs/installation/_index.md index bc513dc99a..46aed3c50b 100644 --- a/website/content/en/docs/installation/_index.md +++ b/website/content/en/docs/installation/_index.md @@ -36,7 +36,7 @@ export OS=$(uname | awk '{print tolower($0)}') Download the binary for your platform: ```sh -export OPERATOR_SDK_DL_URL=https://github.com/operator-framework/operator-sdk/releases/download/v1.41.0 +export OPERATOR_SDK_DL_URL=https://github.com/operator-framework/operator-sdk/releases/download/v1.42.0 curl -LO ${OPERATOR_SDK_DL_URL}/operator-sdk_${OS}_${ARCH} ``` diff --git a/website/content/en/docs/upgrading-sdk-version/v1.42.0.md b/website/content/en/docs/upgrading-sdk-version/v1.42.0.md new file mode 100644 index 0000000000..11707db928 --- /dev/null +++ b/website/content/en/docs/upgrading-sdk-version/v1.42.0.md @@ -0,0 +1,6 @@ +--- +title: v1.42.0 +weight: 998958000 +--- + +There are no migrations for this release! 🎉 From ab5563df5499cafa4ea9d40d4b36b51899a4718e Mon Sep 17 00:00:00 2001 From: Joe Lanford Date: Thu, 13 Nov 2025 09:05:17 -0500 Subject: [PATCH 29/31] update signing keys (#7017) Signed-off-by: Joe Lanford --- .ci/gpg/pubring.auto | 134 +++++++++++++++++++++++++-------------- .ci/gpg/secring.auto.gpg | Bin 5289 -> 6556 bytes 2 files changed, 85 insertions(+), 49 deletions(-) diff --git a/.ci/gpg/pubring.auto b/.ci/gpg/pubring.auto index a8509e0fe4..340d5d78db 100644 --- a/.ci/gpg/pubring.auto +++ b/.ci/gpg/pubring.auto @@ -12,53 +12,89 @@ GRh5EVXydbyMxqEpq2Su+rHlzfzgPh+hORNQgrag+qdbTVMimCoD+datX4854Hkb nah+mq7RtI0k5Nn+ENm4ufbHEKiNb56qFTNgMkquG5vxpA6NOlZ0QfKUxiDU08+g Pix7+TY7lzNhGipD7QjqfuJJr+1k3p/GrIpoHlU8/8FvlNYBDG3oMUvxNwARAQAB tDJPcGVyYXRvciBTREsgKHJlbGVhc2UpIDxjbmNmLW9wZXJhdG9yLXNka0BjbmNm -LmlvPokCVAQTAQgAPgIbAQULCQgHAgYVCgkICwIEFgIDAQIeAQIXgBYhBDsvFIHR -RiOAgLNGuwUpluKiC1x+BQJjacK+BQkJcMgKAAoJEAUpluKiC1x+ltYP/2dXjU72 -9vJrJtYXZ86MeH8UeH5ITpz9miUkJ08WDK4m7VXOnhvNQZoi/IFmIJ5i6lcXgNDW -UGbckG5BHWSQzaBAsZ8Vu8XWglI2iz4g04x/L4AizLjFGvjSlwAY+YSzPwLIlW+N -lzaNHZZWNle1yRc+rcFNc5ok1ZGmI0baO834Jnx8zPvjeDo/FyAmIl25SnZ2xat2 -hq3Ijxn6M/gwnmnCZXSOaNCPnSxJC4uzOWquT8/ew6O2QOV2zcP4Qkob0kHzyyjE -AOqyXXtqeMJaGJTQifgt/i/wcRddebOW443+8TRgOtqoqD1j2nupvdWmU2bzORG4 -o2XFiFUoyy1Jn4nW4FbP/ws4nzvyH6HniDnOqxxAK7hYfFO4aiveb9XVDNtsyvsh -jwx/SOiCIZIdftSTY4Kyfp3L6x2RiTWdFCKM/WG5NcylMOHpa55zHKHlW0EKrtfN -B3GiqJqiWJBS+u0ug93o/KOuWR/6r3EXD9+bvC3sFD0JmbY/GS0toq9uq20E1PlX -osASe/Gtisgn0RYT12NcH3O55HtzmoQazOJMTXbpfHrSFq1gycQPRMCP25yZt7rp -BivJeK+JXk+B+SkIhRmU/7w1Exw2lWAglEZVwT1Ky2CGhh9Y5J1We1lKPvcWd5Y8 -sy0ARusDDuu8IWOVWbCSVJBase/ZpcSbVAPSuQINBF+clhYBEACj1YQhSMK8kp1W -oDL5As2yFlljmdkXTrYtMBLjLnkUaKoxIEGbrB/aeyph9PC84iKGLrHGC6rNBdVq -2mnGyJCXKKeJLovJnopz3+2bTOnypaOdk1QhovFw8CXRMVhjRehDe9PWQYXk2aL7 -sPvtLl5clw2iULdjxs2KfBGwSlEV6eXjGCFUGfIvMEQ/gjbTIiUtkhqaMCsEuyrB -aliNNfuBYsmnP5pHvn7yI/kMiNB8d0LmI8PCb+zdzZVbu9mID8P0Eyy6imbfwzIt -f21OP78lvGBVGzd2mH/EYyBswHEUblqBcb9maTz2Yy85dTFXKWU7n+OjKCCYpOK7 -SVffQFdR2ylUtv2JvLOCR/gH1Z0ac8ZF2DEI9C+owsVS9dqMk9l4p3cNeQzgRshN -qhO9eP9qGZ1LIgEKOeyLm5TgUcPLnq49vS4/eCo+p+Qa1FcGEs+b6rqIxSzyxNxs -v2lRmUQ/A3BToV321De2zfr51u1rJJVpYIEvbMPRyiciZzkDu/D5Z5fR1nytoFcR -t3osFILI0lilvzpSzxlHmnM480JADiTlKGz6YTnYG2mrZCFOxrmAsA/yDO4v41Ii -7O7z0cJO3l3mZ1fbqqAqqyHU0EGcxYOAmfM8azSrxj0MOM2jfGDMPWg3g3SXTXIl -6qyWOVUWfP4+QBsHrByHTSpGCgyTWwARAQABiQRyBBgBCAAmAhsCFiEEOy8UgdFG -I4CAs0a7BSmW4qILXH4FAmNrMGAFCQlyNMoCQMF0IAQZAQgAHRYhBIYT24eluoJe -8/0OviqFnQi/mIbbBQJfnJYWAAoJECqFnQi/mIbbHdwP/0qoULqBuHM2ki9nbKuu -LWXVIisH48q8J5lI5gOXEMndiAC7/f53sTA6i8/wi4RvfjRHzDSiex1gMcIc4MKB -1r4+79a9bHHn/BI5vJ3SQc8znlEvHkLlQc92W37IonRDCIw8XyACpUNeRzZciCEE -gPjKQ/RXmt726DzuFV3YRieuQwNro36Ve2W/Om0VOMElV27et4ykvAepL8YCbGGD -AXqRU/FlEdwwgovB4s3YmAZ3ZoVohAGP47jddE6Evmiv4X8TrTBzhlNSn9rxpHEl -GvOUeA0UFtXqiSfPBNBFoHUIPRheMVkvaTmsQR+/J9eC4JMPJlOMmNi8I7QwPEv8 -oj1/ImlxT56e0ah3zc17z3ddcJLZKZpNjcu9D6/97322uHibtK2g5DzGbpvXSwjp -NuFuA85iC1M8vHDcGV1OtTkemHP6y4+twfJsqffnuXtt/3JhN8q2ysAo9U4w0XtA -R1HdSV2qgbOROjnmXiia8xL3e1ZIb7DjtbtAgBCPBOWoPKNhWDfaXxeV8O1xHST5 -jo5uC4QxTC2yKQr4W+pVLlkI8jK+/iQwIhrMyURahyx++sg2NDggjZb6hC/m8mY7 -JjfS08Qurfm1of2g4yO1jcpZlWY/PDr4MZFMDZENH7uqba/eWCWLj03OL+JIG8N5 -VnFIEdFGMf84vtMpY35bALYjCRAFKZbiogtcfuZoD/4hHpYVyF6E+mVblD4XZfWT -WlpjzAerAMiffjudYrmoFdUOMKjcaDecUEfIu7f4zaC+kWe2pAGI22AgokuSNR/U -GBIuW2ApLm8HdPr3JpzXixVvFYvTRpw0p3A0CXZGR3gXU3yClhKriwmN38Z3mbJ3 -qtwIF8al5ANHlcWEdI7+MWWdU7eFfGzRYtCS2TYapl3NqAGKpSgViUxqhb5ui/YM -6muB2NSy54p+jt9I3TURYEiLm+stHAqB1rw6StCvXjqKi81nD5E0c6DzhN5Wcy0H -blcl63V5/FLZdoW6VUAIpgVLLoenQ3yM2rDylvNQmq18L7hduQFbtnGn5Nb/d4Ch -jXkdN/0n3pM+Qynw4qwOzoaC6rKV1K4ZVsz/LsnoV5U9mJfnX2G8mCpmv+MByPmz -GRzmPfT8tBVrq4u0/SqLKirLlP9ugJqHytHlYQv31zyTyquwdhzTd12MO/9wIyx3 -JaFyx7L+X1v2Cqs66AmhWG+E8pDxYSbq0Tow65xhKgDdElqUNNqns4m/t6hEWU+1 -blgtzBf680IJhbwJKyGMJLID6mAsNwitnoZ6hAVcqmtzMrnJ+m0wWAFTbGs/KY+m -0Cc8IzQzYa70jlyPt/+iwUkpzspXmRDlaWJULylGdaxRbSYmRZFMr95NB/ef33cF -uNlJZ+sbXAWLfIUlDs7KVQ== -=CWwB +LmlvPokCVAQTAQgAPhYhBDsvFIHRRiOAgLNGuwUpluKiC1x+BQJfnJU0AhsBBQkD +w7iABQsJCAcCBhUKCQgLAgQWAgMBAh4BAheAAAoJEAUpluKiC1x+P5IQAJXpQMA1 +kIr6S2N9A4TE6z+dhN0g3oPdZqOYwlKpX32H4nLdv219Ns1mwBHUfTFmcbUuQLwH +1TjF7cVya/tUoyh/P7bBBOy/vC0NvvaOuhRXxeJJD7Q8neuXyCpIoCW8x2Eq47ut +21AL79ZrzZEBpavJ80S2uNTx7HGKYug491OKkEWO3Y+FOmTV38WsN+lpM+atn1LP +gWkEhWaxwkfLrYUgZ/lDBAIhPZ7n3gYptmTQdCzlp4dSEwJXesV35aMWfJOM848M +fVJFyFcMNo6ww0tHD+7btrGc4fHSJC/dKZcYVoiSHmpuAqRBXHWMxKPfijgwWQs5 +6JjxCWt4bwouF0D2uE6SD/MYsxN05yZL6OGfzzQES5Ilt0DS3QRLktN8PdeuS+WN +jLVo7/Q2SUGZcANm+5/ul7Qwj9JeFSK3VloLKY0YFEbnyTHw2TU4oDqyffUWTn+h +Pt34Wy+OWRM+2ykxFP1VklgCN07ESRSZOTN6iUzqets50rKpY3okNiZeMPcblxQo +uQ5/NFmYV/de87JuSmOKXB2yy/xdr7oxkbw9uYZmBEvw4etxH2yyzVxr0BJ4r0DW +5DlSxOeHaNa7aUVQnlK+Xf27Pj1XyYvV6G7NWEZYZQ/pclO0rhFH21ZiGo3DHgSo +cAGv6SWU01nELYYHTn3QFdmdjxmbqjSC0t+EiQJUBBMBCAA+AhsBBQsJCAcCBhUK +CQgLAgQWAgMBAh4BAheAFiEEOy8UgdFGI4CAs0a7BSmW4qILXH4FAmkUpkMFCQ8b +q48ACgkQBSmW4qILXH7MSw/+MniPUuEPYy42zuAHP1NGeHRJCWWzDRGwI5QML1LE +xqDmhzLAxLClZrSjDHRNFMNIYkIjzo1X858jIFgl8l5IdVfy/NZ2VkI46YP8rKFB +5L/2KJC+zVUF0x57vG72Dyt5fq77ytnSPqNKt7+hB5DlsjA2ZpCPZ9t1QTrw04jD +NpPjl2OmZKUwN6xh7bebb7RA+p+XmwhP0xKCcJtYbqOl3IdqrVz0Vz4ppvRIPlim +cVBHS2ufowjPQNR/iS6HGKyVfzMRNFWqP4nz67uzh1rK0vdBnCSf1vFJLFyKi2S+ +OFUUQu2uKibq1n4jGgtY7YQO422GWr9jWyV9NrAgNT+FIKrozLoCjqgbvZpdglA5 +s4CQbBxLXtlRFwTRtv1mZKraou7ybyip+CdB8mySmVmpYpUDv8ABEGReZKVgrI/y +8D4K+1TmdSvEkoUtuSjm/N/u4PtDZDFOCozEGGTvAjdpvFjdp2Bw3WNmWMNSnkJE +kqurCAlsqQTF+ckqcn2MrP9oE2v+GPyipN/sx60wNuoIB+xKT2RwO8kP7KBZl1D8 +ppRq6oEMx+XJSoekAenR5vXWsV1ZYcG4I33FcwSOgRd842R5dEDG5mXJExBb8Jky +FbWqq/2uoRT0+AUitOUrd5so58EIvVBxeRYO2C+pJS0vkrbE916h9iVWwhOMsY5Y +FNa5Ag0EX5yWFgEQAKPVhCFIwrySnVagMvkCzbIWWWOZ2RdOti0wEuMueRRoqjEg +QZusH9p7KmH08LziIoYuscYLqs0F1WraacbIkJcop4kui8meinPf7ZtM6fKlo52T +VCGi8XDwJdExWGNF6EN709ZBheTZovuw++0uXlyXDaJQt2PGzYp8EbBKURXp5eMY +IVQZ8i8wRD+CNtMiJS2SGpowKwS7KsFqWI01+4Fiyac/mke+fvIj+QyI0Hx3QuYj +w8Jv7N3NlVu72YgPw/QTLLqKZt/DMi1/bU4/vyW8YFUbN3aYf8RjIGzAcRRuWoFx +v2ZpPPZjLzl1MVcpZTuf46MoIJik4rtJV99AV1HbKVS2/Ym8s4JH+AfVnRpzxkXY +MQj0L6jCxVL12oyT2Xindw15DOBGyE2qE714/2oZnUsiAQo57IublOBRw8uerj29 +Lj94Kj6n5BrUVwYSz5vquojFLPLE3Gy/aVGZRD8DcFOhXfbUN7bN+vnW7WsklWlg +gS9sw9HKJyJnOQO78Plnl9HWfK2gVxG3eiwUgsjSWKW/OlLPGUeaczjzQkAOJOUo +bPphOdgbaatkIU7GuYCwD/IM7i/jUiLs7vPRwk7eXeZnV9uqoCqrIdTQQZzFg4CZ +8zxrNKvGPQw4zaN8YMw9aDeDdJdNciXqrJY5VRZ8/j5AGwesHIdNKkYKDJNbABEB +AAGJBHIEGAEIACYWIQQ7LxSB0UYjgICzRrsFKZbiogtcfgUCX5yWFgIbAgUJA8O4 +gAJACRAFKZbiogtcfsF0IAQZAQgAHRYhBIYT24eluoJe8/0OviqFnQi/mIbbBQJf +nJYWAAoJECqFnQi/mIbbHdwP/0qoULqBuHM2ki9nbKuuLWXVIisH48q8J5lI5gOX +EMndiAC7/f53sTA6i8/wi4RvfjRHzDSiex1gMcIc4MKB1r4+79a9bHHn/BI5vJ3S +Qc8znlEvHkLlQc92W37IonRDCIw8XyACpUNeRzZciCEEgPjKQ/RXmt726DzuFV3Y +RieuQwNro36Ve2W/Om0VOMElV27et4ykvAepL8YCbGGDAXqRU/FlEdwwgovB4s3Y +mAZ3ZoVohAGP47jddE6Evmiv4X8TrTBzhlNSn9rxpHElGvOUeA0UFtXqiSfPBNBF +oHUIPRheMVkvaTmsQR+/J9eC4JMPJlOMmNi8I7QwPEv8oj1/ImlxT56e0ah3zc17 +z3ddcJLZKZpNjcu9D6/97322uHibtK2g5DzGbpvXSwjpNuFuA85iC1M8vHDcGV1O +tTkemHP6y4+twfJsqffnuXtt/3JhN8q2ysAo9U4w0XtAR1HdSV2qgbOROjnmXiia +8xL3e1ZIb7DjtbtAgBCPBOWoPKNhWDfaXxeV8O1xHST5jo5uC4QxTC2yKQr4W+pV +LlkI8jK+/iQwIhrMyURahyx++sg2NDggjZb6hC/m8mY7JjfS08Qurfm1of2g4yO1 +jcpZlWY/PDr4MZFMDZENH7uqba/eWCWLj03OL+JIG8N5VnFIEdFGMf84vtMpY35b +ALYjjBgP/iKjHX5Zwo+wj7vsdpQu+w+pr5tecptmJ2uypJ6Ns+cuDcz1qfaAOjR3 +rZDGuckuX06G6E5tc/9vMKOtwySD2FgEAdpNdtG1WVIqldgTK7A7odhDtbIeVj3L +2ysci9xa4Gukj0qB3byuRPhBkHlYkP8KCPiSwkB75OxKZxx3DfMbc5ZZWzge6NCd +aeH41sETtzJiploHIqIOMve8o/7Rl+Mz9MXyedjvOqBKWNDFJxtbv/9TcaIH0MqD +Wv1MQzOZJMO6DC/u7aQz+Jk+Or529Swqoq7JfbdYMpEvLO6wOohYqxGuR36DW8F/ +FJSFbDG3DPLLWsv+01xcYG4s7GNSK30uF2jCMVEb3dOzBpz9DxUY8QHleMGBE2Vb +29JiDD7nBMd/TVQFaARa0WGXpImcPq4COcwG126pcFeadxMDobPlcHWHvEzfakud +xYkYd+LFeIlw6Aapb4u/Vjd5o+oIU6bjJ9gN8Ev5Q3xjuxHdatXLVHpS3/xt/iwP ++LVTIE0pLN43mwGmLsqADrJjs+lUg3sftXVA3jUHD28AblZcI7y+ZjQagIxAAozm +nTiYy4GvqezDLVb3irq64g3CrqrXJx9hAnKgqFeJ8+F2JzAMxh65bto/tac8hvdX +B22UTIP7JwGxqTQmyr/7Kui/eleCt0t2UXY7i5Ovh+vApGXbx4YRiQRyBBgBCAAm +AhsCFiEEOy8UgdFGI4CAs0a7BSmW4qILXH4FAmkU5xUFCQ8b638CQMF0IAQZAQgA +HRYhBIYT24eluoJe8/0OviqFnQi/mIbbBQJfnJYWAAoJECqFnQi/mIbbHdwP/0qo +ULqBuHM2ki9nbKuuLWXVIisH48q8J5lI5gOXEMndiAC7/f53sTA6i8/wi4RvfjRH +zDSiex1gMcIc4MKB1r4+79a9bHHn/BI5vJ3SQc8znlEvHkLlQc92W37IonRDCIw8 +XyACpUNeRzZciCEEgPjKQ/RXmt726DzuFV3YRieuQwNro36Ve2W/Om0VOMElV27e +t4ykvAepL8YCbGGDAXqRU/FlEdwwgovB4s3YmAZ3ZoVohAGP47jddE6Evmiv4X8T +rTBzhlNSn9rxpHElGvOUeA0UFtXqiSfPBNBFoHUIPRheMVkvaTmsQR+/J9eC4JMP +JlOMmNi8I7QwPEv8oj1/ImlxT56e0ah3zc17z3ddcJLZKZpNjcu9D6/97322uHib +tK2g5DzGbpvXSwjpNuFuA85iC1M8vHDcGV1OtTkemHP6y4+twfJsqffnuXtt/3Jh +N8q2ysAo9U4w0XtAR1HdSV2qgbOROjnmXiia8xL3e1ZIb7DjtbtAgBCPBOWoPKNh +WDfaXxeV8O1xHST5jo5uC4QxTC2yKQr4W+pVLlkI8jK+/iQwIhrMyURahyx++sg2 +NDggjZb6hC/m8mY7JjfS08Qurfm1of2g4yO1jcpZlWY/PDr4MZFMDZENH7uqba/e +WCWLj03OL+JIG8N5VnFIEdFGMf84vtMpY35bALYjCRAFKZbiogtcfhHND/9A54vj +2qrbv58mlK0RLV3PQJPlYlD1uzWXqZi35197b+T8gm5YBJ7d2/Q50DolX2k04AcE +X4gklwaiV83EpZrMYKeqHrOot6H1FJ86oQmStRzVhgiWZjfCX8Ll4wDNOC0FsIus +O3kL6p4TNlPlg3rx+kELIYMSzGQipmBSziXC7Xm3/nFGPior1gfy+62SXlVV37zG +1LCJwVdx+0bgcv/OD9FQx7tOHvrvcJgRcwKq23ElnPQwTmlnqzp7B77Jy4E2QQ6U +fAo10fcjFu3yjyhTeuLkgei1uRJL7yKoWjok3kOGfxy6sJGY6BIpIHVtWDGWVK5r +SY+YR9MfnORd2PCEmaNF7mpOTf0Cn1JAXFrAFVYAWVXBQX4y1nx/9UlSi3C3gnpH +ExoFX5Kc/Cvwib4W9md5ZtGKszUAEDZv7Eb4G/o5pWP1C5dyuX8qP95+2Qlwxmom +40rm8Sk+RBlubBUxPeevxQXIATLPDEKStk/5guFzrOilbDeLaSJYS424eSpDGgvt +8fJjJxM2Rc+zmFob6vpKGFhGJCxSIFArRO9J3g0G1LGJj+38ZL+LWqcQbjqy2pEF +DaVoVyi9q09uZ/NOC/JDV+pbTOoShaWPYOoYE4F4A8EXkpRBhQxT6BYEvPLCgiCz +TsHpyxJ86L++bxtv9JhE6w19geq68spOgobXuw== +=pKuo -----END PGP PUBLIC KEY BLOCK----- diff --git a/.ci/gpg/secring.auto.gpg b/.ci/gpg/secring.auto.gpg index ed1881bee7d4269f6706e037441007fcb48bd9a2..e7f523e335884bf4d1af787fdedfaeab84a4956b 100644 GIT binary patch literal 6556 zcmV;N8Dr**4Fm}T2msfE=79GlkN?u@0pUw-Ous|iZ((W(j#8Huvq6koTXbYz1Kp6`K%Hah~S}|Ha3TA`XuZ(tnfaV3>+)wA+lH0a*4il3@F9TjI?k z^Cmvvlb|9vj9XTsZ}5*^0`&G{B1^gA0b!;d3&CJcc(|$hzNs<=Ou@8-PO+X&`A~4P zSkT>D|ENh(vmm`#w5sm#Do5FU&TIC(a5ki)MI`17e*k*!xs#qVn%oB{R9y9Hm37Ku zV*iLpUUo{5tmln()(uijSQLuGhYcDDJ8sE6s0$lU5IsZi9B^U+-ZiUqUcV_n0HJ<{ zF_9_WyS{imAYCU5qLM*bG*+FW=a2L6qZVl`5x$Ap!?>*0`6&_# zteqk-hK#amd0tbjn0k=FtzFqpqd$ZIf(jC==)Q^vA`>jB=LUaL!FXHcTE=)txvUFv z5`JsSk6sQI6X*>ptuEo>q__<%#;uTkngn`#!xL96V0}VC4#m21UZc>hqd!z{tT3Cb zP+!k`6L->km?6P5_~A0D`*t#`+y>B@g5i3vz-zR#XY`7^g38U-EhEnQ}3oeX;*N^-96Fyc&yN&B{_F9Sq1|VWVAmn z5uTMikjE768ggLrJE6d$w3X%!2TF75SZ|&%oNYGThGAZ3e4u1h3IWGf!*IbjRR&>I z75YE9S)&kaY?|6zXl0~Z1~udJ4T%O1cS$6Ff;8 z1FSl6sQ)i;X~k0%ooNW&pEIv3r!{k~1~}P_Xg-=^ck&ekDO9+iAi+M1O-1hB)+&T9 zV|419FS<{tNE^oB1<;VHpilZsQ^{iXjm+v8v*NZJValxwp^6Y=4_H~OV$7{`KpTNH!E7o5k7VKcnxF~^OJhT`mfPB0UfyLblZN+5Wmh9Mm2Q;<`{8)hV_IgOl3sDxbnVKPa>h#y9^+oXE52ihVs0tjk z%|oK)2cy!p>cjF*u@;Nqo*Py*c2*6mDqD{FvnYk>BHNfeJ27B-Izx@zvvcq5K9(Ah zzB8g{!c_gss9=xcVV-7M@K7@Vl4f&`MOY^3R*cHKJ2a|{$y)79IDZ+KD`$`oEyi7} zyJ?3(RxC@7pnG*Ji1d!yLGc*W4IyOi(9re;qdBxRrgXgUgMDO{Q^dBYQUUdw70sZ& z6F~79-0Kbv?ZENLga)g*D=as*Svc6l+W9q2az?J4m_Pyh1-kvK8a$k+nMIl9_XPci%4tNk@RI;rDM@(xR2x2&)s^+ zxqH<9tqBMnwu;NPAlgxyQJJ@%KO+0$ae+>_6)3YG*ieF(yp?eoIkA0>-v?qdwK!Hv zaO&);vn3ICO@l_k%`ARj6F3NTWEa-`=QSc&slh&3_WW^MM@-aW@N}~c7g5-@6|VGP zoTtA?6JDc%F%H(WbtbkWr;cUf7*YlHQNEjSu5Dl)m(se=98Se=*1P&u-eeS_2B`ko zx=C_~A0qx!?p>*D)*9v5LBAZTYz6)NgYlI&ud8k=ox=y;S>&W{KnN`8E1y7NvTo2W zi#s|#qPBG>79T)cllrRCN6;k*;oMFo%JSI?tdTqbB1iv~*->6uw2GqQz7$>AGxKu* z!}H3qlA2lN+Mv<&D;?d9CU-2s&zh)F}|^O5=k*A=d0~hau29LuRaARf+CUJ(%9c1J=|og$1%py~@L9 zECiL#d4R_Y&{o>_(!e_M4)0Wx+M_CUKq?ear!m4st?p@M`wC8#Ivp)_ZXhi+C$h{E ztd8i-eUdWK2*xy+_oIem#P}6DQt8ac0ay+)afRy8G?!jo-o91ES8(^p?!oFkAzJ+4 z-_nHvgCQpFuf9+Z5{LR#OG$g$e&y(g7FO2eWp=eQrCOI63<6d&T>&w}Q5OP^O1=|U z!>1o940ZJV*XYMHhH8fWE}Ih&tHnf#Yh>RcK*4LJWvyOZ`BYvBx%rv>HoJ^&iBSuJ z8J`-pT~OV!>70qz&F#6QUDB5&#tgrr_r>ROmtB|Dyq=RVo}qrWGD)46L+nKr5jAy1 zWG2kG!43uT5;9~&&XPLEgVpyOMxs0K+-kZAs1ACwA9o>$jdgMfPhM(y0XZ%%4 zW^?Zf8ip0#y_EnyDT{x<`QN@L`Di|m=a<<4Qw^EbmSU~Yh@W!{rCgr5phT-9adhlf zuiiw;1I|g&tGXP)b931oGSb2mZEAfacBhSF;0yftVL@sKzXq&Zome$GK8q84b=JC-Qq};P*&F>4 z2c7Ub7MKgS{Zl!XkD?6{UuMrfiE1fR$M~*Gyq0{$vL-iT)1rK!dpXc_3#WOeFWOLR;JYBpIb{u1N8$BL1-GA7)3>Ans*b#-SYIN z4Q>n$(Pau$ky}~j&j`dZL%MYkuR1gtRj%oxex(^VHWX_H*`|exq2B{Z0UnvcZlcbM$ z_Ew-a1Sep{BF!b^6(*=to~icSN_*b>-5N@}0@9Yw+PI!950awLLhm#O4vH{PPVwgw*w(0CRVo2V};PpHa#FSZMh! zhgG|MS;vT@&k^OpH|ZQ@=+8vM2>?=c{q@fUT}RzK)jwjqU+GN@a3TXRclv8FEC$$P zDVeGUlMvE;O8WX(_XPM1pC8#^MrULecr+joLE}-LOyk9Wu|IH37x<*ZrCZfr!F>{A z-g>xSEFoU;@h^G~ToB3Klv`?+{`*pUyGAqwmQD{Dsbi=Sp)@iDS75woimhCaUvztd zWS|8#)7m?*G&tw^R8;1Yn)IWlP=-)x;f0XrDAFg^p^$;9G+b*x?zs>a0wj}t; zlgcr&#h%^nW6E|yVSW%5m!7G|+6H*K2)yudU9dP-%JJClqSesz@pD+9P2U*qZGY5+ zE2H-N3;v*g4+53_shc&#+kfb`tRhyrNGSOV)_r3*cf8jv@ea?OFx&TpZfuzqR%=I2F29!_L{c)#+Dd6|Iz9u_fcBpe}{oCFF+SyhVf3 z*rxyKtm!~=fhT_@CY2RpLg6NK=KOt#7+~Cu@5gW?$@|@%^r&Qxi7wiKyY*lb@EkR` z@%!JXiOd+psg~aC*ZwZ%_~@eHYr%=@ck9`z{M?VQK9C=yyF{ zT?)0!e;wYiXj?z%DcWPiJv91>V)hy0hom-=19}Qm3#s$3Nt1V+TxS|L!ku)P zx@QJzw-uC<<)Ft%aXAv{w>qVKzJhE#T2cbY<4j@}$diG42$WM~tTbadMkQ z!$FY71x?Q`@n*(}vVKFi0W60kw*RG{hYTOCs>VwjSi7{FYg$dN^XI#ERUm*($*^&E z$^vwzAroBNI&#h#lk#Vy`I71ts~^85_`Z{GO(^VL@x% z00KKelCcI#XaM$b%}S5vQ-i4`!d;$9fi!M2$Z=>ZB|z)FtCFch-)-BOC~Y@Au77Zq zf~VV}L?zp@Opf6Dc%-tvfvSg+%hh>|rN+}C=F>(ptQb%Ws%!65c^L98BZCZjXEeY% z|9o7iEsAfG>Ty`>r>ubg1vz)*49jx%&2VrRdQpL?fxpk_YEp*&phrN zj#b+kENWYY-0VI-pDC-yxvc895RQhD52a?ZzAKj^19os3!tFwfpK3xq+GS~ld-`4& z$}i?}kBGnDpEhz*CuC{Plo5a2m#|&^f_s~aH23EYUmyyMiI+E#mz!{*AV3pnL}ww= z(9`L_ck~<`W(My7y&Wz|VIST7s_PfS$g1>Gk5VEgY>? zp^XN>nTIjCpxoAvCTJ9qXhUytstrkc)EhE><<4dY{;0FGQ((0Js;=tDs31PyC*ahD zQ12aIcV*Cm-N3PofaC(NF;TA_s{Vm)OWJ@)H^^{zo!fb2%Vpm;SE)k-!1$MRgEjUtT9rx_e zNV@G66UB4Ig)>PrC|-&I4{wc%B$7x@eG3iMdonp|%Ul{Ea@nr+5eUeUP_p&&tq*v7 zh-9+NN=h{GqPiE+*`jR|&<3nIQ5 zk9y93)^pPZtX?H!({-EgvsYg+eqIvrCA?*3COrxD@Cs#T#@Oy~vbk@oJWv4^6b9@-sG`{#E=%UZx=@dTGI>7vCPibu7MGft zo?Jb&FIUB8!ff{f%6)=^yg4LqUjuF#wm1wU#th_OGMot!>38oND97E99xurfZ~ zos2duNRtT!W?@{1n*g z!_$^L5udjn48B8(X3T&~)`A$iMI-}`SOUf8sv(3DZI4z0?lqV->1FN3ID-`4$?_p; z+Tn=h|Fqrg{Q;lk{)fBWX_9fKO^;VB(c9R#BKfC{0+NStWX|1jlNs54VLEl*DBPZN z`kNd-nNd?9v7q0lpT~$&AiDIgoby|1?xU519`cO)tPQoN(;;bjM?yoCUi}b57=J>+g(>w80 z6pBi)uDH7*N3X)Jo!|AAOsunvlH-D$6=9*jb-_aX{>mDxG+QkyPbO(HE*NbenKI*@ zE`3B7Tv*St%cPffPtTJ=ND-p#-k*cI4$Gc9ME_#;!iLsYewZtWCiul^Pws-+&l;b& zqrp*b|D&g&KgHb=pv;ABU*e1Cx!aUtxZ@7=4J54tt7M+=P=a(9n}(ZL{~=n5!_Z&P zfDHdRxTvPXX}wl^GvUv=p4#-_^) zRk!aruaNBK*3$Sqcc6i=)RI1c?AgqCFC9r{6Kr+)4!a9L_`i3w85LB{=$B$oIYPx2 z1OnPm9Im^k+T5x;pRwW;FxF4(CN!R4r^ohgWJX1~JKWQ@bjb_$U*BaDF}i=S7|0?y zd`H3{{P?!&;2S>@AE()sYC>1M^5@CG&kZ$aj#&GiQ{*Yhe{rR~R;P&Z@N59Ze3PFvq+KANj2s*fN;Thu& zI+aQYvcY1y1C7$o+r$KqYGjoC_hLK0;>+_T8Lz)xAgq#mork%$Sp^YG zV^9JK=8G561d*b5^L&lAcZ6y3wNG3EdiEm2r%r^oM+Zbi(VM6tu6(2tSkqM^W4yPX zJ|P%m$nZ^@ezFe^KxyFdJbdt{s_wjZ0GUZNFdzn{*CN2jHKV8u3YF}zgOumtn%f{; zq2yUK$t9p1%f@tpJ6jELQsq03mK~}Y(g<=(mfVRR2=a6vNv)w?JF;YXvF)c$wowzM*G zWB#i?n2SeY>n|G0WWfcJqV9L@Ym`nB@ou{HJGW;4GFQ9e(Q;UQ6#y4Lxm3K8R%Ghq zfP!_a6LN4K$EZ7$XlX41Mpz_I1&ZMx)MNCU&KsA*>@4vN*J-A!kkH@_dd+2x+Vm3` z1jbANyVC$9?qJ2vtl4Vre3ld5=X#5-zY22y!R+vzqpQ(!YbThOIhjYt zxjs5s2c$3SF%TkS6pQ=EvZ)d9Uziem>iKCb{+{xaISavIrf{UFs(NKyil!<>kbOA8 z!{{=z!DcHUoghv;g9yn4D$x&PlX2cINKclxfo^^%dt^k1;T|($bJxXkBWrJB)u}0Np9|7o7#h`qb z#MP*+jK&&+d|(|&yP^ln_N&K&RMtC2>>Badc~>MewPi)6Q7ZA0D0fKymBjuG^E)sT z2znJCC^F41c--GX|Ef$99<18>ck4%itn$4}VgE7>V!NL%JkPZmyd>AtxW8)@qrA@< z?OLRl&tr<{LR6*{Jx88$1-fi;3cQZ{$wDooBMxX1F4WktNT>%*pJAVV88J@MI`6pT z@@6>;iXZ=L*Pe0tt>qdG-?s{69Q*#m^9UXbF#Y+HNO0tJ`*Bhe(SwSe5)e*|VZ~NG z_0hC&6`qE_X4nM8tW+U2DF%aJ)p2do9S0LX0{Nv;8G1&VT=LLy?U;YTv0%$_}uu9k89lW3N$_TM{e z=aw)@gfr&$ph;nvGZ$o*-56M$ zh>El5Cl78tOE~U#0tnWn#?^iI(MB`hwYwqRwv(0%m>@>J zx-XAP#&rO|AO|45PffofDXkd@UZ_nqjnstLK`JLF+8~U4RD{U+`|Fe{C;=|VHBOS= z-nN94C!!g{B$YbS3R+(pWdYm_Kli8Y}Qwl zWJ9Q7o0R<$Az#Q1pQ}WHt%bMg`b$k;8YP0^lINST=HECH0d)n*gv#c#WY|P2L)!ve zVR>p$Zt}J ze}|2o4+Nl=>L~{g6rAGuGxA7}q?sNH6A=lOdp(z<{qP!j9Kp^FgT1&dEY!89oTv_< zzad-AA5%XmqG`agSMd44L>sKK4mwo5-AW#iLaSrceaewhS&%#7^p>V49&l_dO8x}wrl7gBMs3m6P7lsU_b?On{dRB!He2 z9$^M3@@P`!2+hx(Qb!`2R%PIa06oo7=|Z{B(LS9~EWflS_qH%)Zdc56O9aUW<0iiP z%K3s72B3~EVN3`@#er?0bTUByV&mF6%|WBm?G>{C@dY764Z5=#gfU&NoX23nQ;Orw zv12?nmuS0kHrsigV}dBMR7kTGux`>xSuWhJ>B|&0`mT^VIkU!>ZC&Ce3Q$SSTAYb@ zK%GKN0;7rT2?}Z-eijG`Zvzp?T{$wN|LNCb<%K>PrZoUDKGMpE#+HQuln=>M_i$(2 z<>a~%O>&tt;M@OrI&K%0dlVaNwAaVpUK-EmS6+w!S+6%pD9zptA1s|jYe5hmG2G@d z^(1^+&6t|?+8p2ID1j+)qejGJN8U%~Eg!ImZ6qCrTbSo_noW1bxCjCb?2-A+l1#|G z_omk3)&Uo6#vEta(4sk~OexD$Im=qLk&488a$cGGK+^99iv+eJG!HbY-J75ohJ2}3 zDTfZ6WRrZQ#*^Bey4n=eZ4gG*Y@s`YXO!8d`$lv8PzYb`)vYBPIDCHH1EjNOKq@eR ztBYTVYX{&mrDt6Pw-0?7lqgEGi9pY@A5hv7Tn6BM-$%7NkfV z&C{_>d-uCdY?R(4s}cB!@QH{`u0>SB$m~WYW~a^mN|X$$G4Z*<@{EdpX+;r)E`TN^ zldK1fx7NDxZ8Afl{lk>9uBrdN6HAvk)b3nzecku2qT6!ZIU&b`QGX8Du=`7YZy)MG zl=w53ywn`}kN7!(FzD)d#AG88DeBLX@K<{h9{%(KF0U@ld`wLcd6}>uHq})z~?Q zOtZa-&Og@JV(JCMK2$Rf)JSfH;~_VhN&p*#U?Q?X%PF@*!vR{u9CS&E)MkUR&w!OW zo6+X^>sxUAYcSo%K(`<-5H}7kuFx-aB#($58C;p-sw@_`i9ffBTdG)72kWsQ>eWPd zf6Qjb?C-x%3<|CX_13t<_MiX1lXCv2Dcu2Fa3S#43X4CO=FU)Nav<27%!yTPI)Mxr zWcWeJj<`^ige;Uw^WVc77CYp3NpY)2gyK$-L4_%syhnk&(M9|FTtol51cU;*b)IA+ z$G|t+Ta$34NpAoY9u^r>x4+Vd1xx}K?)Cn-S|b=&nntUzek{U)FqSRF55?{1D0<3# zI39ilZUj2ev5@&NPUPH+fWemX+MDu5^zD(@wL@7+?80$TU2^D2aN}}tRYOKNQ(;`L z^~;CbcZ`NhBk;W68N0Vf;u#Y*-%2T#n^sU_yk7gG|7phqkDg0LjE?ID2wQuig!GC$ zF4nSsX2|qcDj4BNETehYwv&viGBa)S>=~oz;x9sctdxQI*%j zg-0YMQQ=VxffWTk(y@qB4qZdC_gI@aOiaft2pq1*Q*0vpRI%tH`?*(iQcaU`rVR%P z=u}mpfriX7WD9XwCHhmbRwm)AwVlcs`$|7OZpS{pv*e&hb22a~d(20N+XANeAb6FG zrpd-NIH^*)8@<}Nt2SAE#2OF!*_wAkxTWLcD$#bBTxJcGH#h)?1*|@DN=;6S5e-E#N7+9*1 z|dq4mOog5jerH6UnjfPyW;KCUEjNW~2tJ+zv1>bI3v*119UUL%KN@&R8= z1hF&s_(;GMD2W6k&O-_<$_Ov1hJcy7mpMd8TZpY5z!S07Dam{5T=OfAPg{t6rWo{E zML;1+r0VHav5z>jvThhjI?N=#DA$ja7ugO zG3^+c=6oq+4w*j$&68Z(`@M~0^roziB$mPOQmH}^Wnu2uN5~Anykg2VujI06$oS_J zez46vuqjoE3J^cpXZpzMKXQjeoDg&Ri$6VpI}S29S_`4q-{*j z)W%ovQgX*;G@jeGmGxXkLUbngQt`UbTlb-~EdQ9rDzeDn@E>xQJb97_aJ=})aCE`9 zvaEo-*>DVf29xVG-mr%3c$6ON+w{S=LTbI$&TlJlYw zTqVwdqOIO>uB8co;X>v%e>kGBZ|$NV*i`00h?Y0+dlBvLhfZ-!r24bb!da@vA*Ql2 zjlnxcx4jfc5I{54rv1IHcQ9?H6GuqZ#j}Jk+^l`cv6ufu%>_zY5MFOaOTdda6^8;) zWZYN8S~nosb~uPCObto6sg`vDz>&vWrGOA4@*QcK?RGR>)u$2Ld!dB<#8G)QuwQ*x z6##XKtyPil)Q9RfrF`ENftQ25elU6wOOFU3YoVx*{+yV2V76W4G0R9DVXviNY>W-_ z;<$8yETGN(?$(VsXfMpHys_AQRtCec=5{d-829)z&+N2n zUXM~Wq@lIQ%~bf1nYh1^4%o24h}SJ35scs>%N(8VnJM~j&chl9BqTPGM>(YtR%IRg zIE!-C4w06J*}zAWgXuErUy7Jz16lo0{^(A*`-UoVj-Dvi$GUAYT9n~a?T;6 zWT@xyHf@+Gh6xby7}{x_p!PFmh}NX6+GRxF;y4}Ql~W%$jT1vDoBJ&^JTTs4u6}{g z0RoP;M`E3U5XW%U3RU?|OTU^buW@Y6&0-+Sxc_oF>hUmo^1Uk(x*W7St*Ad5`Kic&oV z@kCi4WEZt=0O~h_AI9oCs|{2jgbkP-fULl=znJz33vHr<+yjQ~DDtqr!=f;CkI4nE z_96aAGiJl2NxKCa0o6|_3gks3&$pA5@`R<84>rxqX#6rz|E;&d$!B!BBXMx0|8t?lgjmr<)# zEU;#p@7--nVV+1XOEk9v2(@3b^$sV&PrAgc76>!k8I3J)Co%SgoL*Pr$8RR7y@4B9^E~_8HsRf_tNjrQT$wmUhXhPZ zSwXQ6)`GQo)9`)zr(%y7QBlImm>Y1H@yb+;qRhXCZ7P7+D?O%*SCtj<3LKRKXM7Qu zCdITO&1bj7oHibG*K_&!?E|6{LBx4edyp$CNWbP@|GEx8MI1 zu=+5~s_5K-OvTz+K;8H#l%_X9{DEt%3QWlNpQ{1b*Ji-w&LzphSVN<9toT#qBh?|* zjCm(h!pLFImugGEj{(?I!NRGcEi=Cz7`Za21}bTYaH_nM8PyJdI@{PcbT1og_wNYw-ASfXZ}w0~oJl zo}#?M{%2EH8m+a*L(z#KWWA9)4l|Hd z=F@Gyx|oWQ43_I>>9Ouov=+>l1jl^g&GA{q9kyv{0tx?}n880)3EG? Date: Tue, 18 Nov 2025 19:06:24 +0530 Subject: [PATCH 30/31] Add replace directive for dario.cat/mergo to fix upstream merge --- go.mod | 3 +++ 1 file changed, 3 insertions(+) diff --git a/go.mod b/go.mod index 938f5f5bab..b5ba1fc7a5 100644 --- a/go.mod +++ b/go.mod @@ -49,6 +49,9 @@ require ( // reevaluate when we bump to k8s v1.33.0 replace github.com/google/cel-go => github.com/google/cel-go v0.22.1 +// dario.cat/mergo domain is having issues, redirect to the actual repository +replace dario.cat/mergo => github.com/darccio/mergo v1.0.2 + require ( cel.dev/expr v0.23.1 // indirect dario.cat/mergo v1.0.1 // indirect From 1263e7e3cad3adeb93e130b36c27b452dd302772 Mon Sep 17 00:00:00 2001 From: Manish Pillai Date: Tue, 18 Nov 2025 19:08:32 +0530 Subject: [PATCH 31/31] UPSTREAM: : Update vendor directory --- go.mod | 3 + go.sum | 4 +- vendor/cel.dev/expr/eval.pb.go | 361 +- vendor/dario.cat/mergo/FUNDING.json | 7 + vendor/dario.cat/mergo/README.md | 5 - vendor/dario.cat/mergo/SECURITY.md | 4 +- .../AdaLogics/go-fuzz-headers/consumer.go | 48 +- .../Masterminds/semver/v3/CHANGELOG.md | 28 +- .../Masterminds/semver/v3/README.md | 18 +- .../Masterminds/semver/v3/constraints.go | 127 +- .../Masterminds/semver/v3/version.go | 173 +- .../Microsoft/hcsshim/.clang-format | 12 + .../Microsoft/hcsshim/.golangci.yml | 43 +- vendor/github.com/Microsoft/hcsshim/Makefile | 116 +- .../Microsoft/hcsshim/Makefile.bootfiles | 197 + vendor/github.com/Microsoft/hcsshim/README.md | 2 +- .../Microsoft/hcsshim/internal/hcs/process.go | 9 +- .../hcsshim/internal/hcs/schema2/chipset.go | 2 + .../hcs/schema2/{cim_mount.go => cimfs.go} | 8 - .../hcsshim/internal/hcs/schema2/firmware.go | 8 + .../hcsshim/internal/hcs/schema2/memory_2.go | 49 - .../hcs/schema2/memory_backing_type.go | 21 + .../hcsshim/internal/hcs/schema2/numa.go | 19 + .../hcsshim/internal/hcs/schema2/numa_node.go | 17 + .../internal/hcs/schema2/numa_node_memory.go | 19 + .../hcs/schema2/numa_node_processor.go | 17 + .../internal/hcs/schema2/numa_processors.go | 21 + .../internal/hcs/schema2/numa_setting.go | 21 + .../internal/hcs/schema2/processor_2.go | 23 - .../internal/hcs/schema2/properties.go | 2 + .../internal/hcs/schema2/property_type.go | 1 + .../hcsshim/internal/hcs/schema2/topology.go | 12 +- .../internal/hcs/schema2/virtual_machine.go | 39 +- .../hcs/schema2/virtual_machine_memory.go | 33 + .../hcs/schema2/virtual_machine_processor.go | 21 + .../hcs/schema2/virtual_pci_device.go | 3 +- .../internal/hcs/schema2/virtual_slit_type.go | 23 + .../hcs/schema2/windows_crash_reporting.go | 2 + .../Microsoft/hcsshim/internal/hcs/system.go | 21 +- .../hcsshim/internal/hns/hnsaccelnet.go | 4 +- .../hcsshim/internal/jobobject/jobobject.go | 20 +- .../hcsshim/internal/jobobject/limits.go | 1 + .../Microsoft/hcsshim/internal/log/context.go | 28 +- .../Microsoft/hcsshim/internal/log/format.go | 4 +- .../Microsoft/hcsshim/internal/log/scrub.go | 15 +- .../hcsshim/internal/vmcompute/vmcompute.go | 2 +- .../hcsshim/internal/wclayer/legacy.go | 1 + .../hcsshim/internal/winapi/cimfs.go | 11 + .../internal/winapi/zsyscall_windows.go | 146 + .../osversion/platform_compat_windows.go | 22 +- .../hcsshim/osversion/windowsbuilds.go | 7 + .../hcsshim/pkg/ociwclayer/import.go | 9 +- .../github.com/antlr4-go/antlr/v4/antlrdoc.go | 8 +- vendor/github.com/antlr4-go/antlr/v4/atn.go | 8 +- .../antlr4-go/antlr/v4/atn_config.go | 3 - .../antlr4-go/antlr/v4/input_stream.go | 2 +- .../github.com/antlr4-go/antlr/v4/jcollect.go | 5 +- vendor/github.com/antlr4-go/antlr/v4/lexer.go | 2 +- .../antlr4-go/antlr/v4/ll1_analyzer.go | 1 + vendor/github.com/antlr4-go/antlr/v4/mutex.go | 41 + .../antlr4-go/antlr/v4/mutex_nomutex.go | 32 + .../antlr/v4/parser_atn_simulator.go | 4 +- .../antlr4-go/antlr/v4/prediction_context.go | 60 +- .../antlr4-go/antlr/v4/recognizer.go | 2 +- .../antlr4-go/antlr/v4/statistics.go | 3 +- vendor/github.com/antlr4-go/antlr/v4/token.go | 82 +- vendor/github.com/antlr4-go/antlr/v4/utils.go | 53 + .../github.com/cenkalti/backoff/v4/context.go | 62 - .../cenkalti/backoff/v4/exponential.go | 216 - .../github.com/cenkalti/backoff/v4/retry.go | 146 - .../github.com/cenkalti/backoff/v4/tries.go | 38 - .../cenkalti/backoff/{v4 => v5}/.gitignore | 0 .../cenkalti/backoff/v5/CHANGELOG.md | 29 + .../cenkalti/backoff/{v4 => v5}/LICENSE | 0 .../cenkalti/backoff/{v4 => v5}/README.md | 15 +- .../cenkalti/backoff/{v4 => v5}/backoff.go | 14 +- .../github.com/cenkalti/backoff/v5/error.go | 46 + .../cenkalti/backoff/v5/exponential.go | 125 + .../github.com/cenkalti/backoff/v5/retry.go | 139 + .../cenkalti/backoff/{v4 => v5}/ticker.go | 18 +- .../cenkalti/backoff/{v4 => v5}/timer.go | 2 +- .../containerd/api/events/content.pb.go | 98 +- .../containerd/api/events/content.proto | 5 + .../api/events/content_fieldpath.pb.go | 14 + .../archive/compression/compression.go | 4 + .../containerd/log/context_deprecated.go | 149 - .../containerd/containerd/pkg/epoch/epoch.go | 25 +- .../containerd/remotes/docker/auth/fetch.go | 20 +- .../containerd/remotes/docker/authorizer.go | 27 +- .../containerd/containerd/version/version.go | 2 +- .../continuity/fs/stat_darwinbsd.go | 26 + .../containerd/continuity/fs/stat_unix.go | 4 +- .../stargz-snapshotter/estargz/build.go | 8 +- .../stargz-snapshotter/estargz/gzip.go | 6 +- .../stargz-snapshotter/estargz/testutil.go | 13 +- .../image/v5/signature/fulcio_cert_stub.go | 27 - .../v5/signature/internal/rekor_set_stub.go | 14 - .../go-systemd/v22/activation/files_unix.go | 10 +- .../cpuguy83/go-md2man/v2/md2man/md2man.go | 1 + .../cpuguy83/go-md2man/v2/md2man/roff.go | 15 +- .../distribution/v3/.golangci.yml | 3 +- .../distribution/distribution/v3/.mailmap | 5 + .../distribution/distribution/v3/AUTHORS | 6 + .../distribution/distribution/v3/BUILDING.md | 14 +- .../distribution/distribution/v3/Dockerfile | 2 +- .../distribution/distribution/v3/Makefile | 39 +- .../v3/configuration/configuration.go | 487 +- .../internal/client/transport/http_reader.go | 12 +- .../distribution/v3/registry/root.go | 3 + .../v3/registry/storage/garbagecollect.go | 38 +- .../v3/registry/storage/registry.go | 4 + .../distribution/v3/version/version.go | 2 +- vendor/github.com/docker/cli/AUTHORS | 31 +- .../docker/cli/cli/config/configfile/file.go | 98 +- .../cli/cli/config/memorystore/store.go | 126 + .../docker/cli/cli/config/types/authconfig.go | 4 +- .../docker/distribution/.dockerignore | 1 - .../github.com/docker/distribution/.gitignore | 38 - .../docker/distribution/.golangci.yml | 33 - .../github.com/docker/distribution/.mailmap | 54 - .../docker/distribution/BUILDING.md | 117 - .../docker/distribution/CONTRIBUTING.md | 148 - .../github.com/docker/distribution/Dockerfile | 60 - .../docker/distribution/MAINTAINERS | 243 - .../github.com/docker/distribution/Makefile | 102 - .../github.com/docker/distribution/README.md | 130 - .../github.com/docker/distribution/ROADMAP.md | 267 - .../github.com/docker/distribution/blobs.go | 265 - vendor/github.com/docker/distribution/doc.go | 7 - .../docker/distribution/docker-bake.hcl | 56 - .../github.com/docker/distribution/errors.go | 119 - .../docker/distribution/manifests.go | 125 - .../docker/distribution/metrics/prometheus.go | 13 - .../docker/distribution/registry.go | 118 - .../registry/client/auth/api_version.go | 58 - .../registry/client/auth/session.go | 530 - .../registry/client/blob_writer.go | 164 - .../distribution/registry/client/errors.go | 160 - .../registry/client/repository.go | 870 - .../registry/client/transport/http_reader.go | 249 - .../registry/client/transport/transport.go | 147 - .../registry/storage/cache/cache.go | 35 - .../cache/cachedblobdescriptorstore.go | 129 - .../registry/storage/cache/memory/memory.go | 179 - vendor/github.com/docker/distribution/tags.go | 27 - .../docker/distribution/vendor.conf | 52 - vendor/github.com/docker/docker/AUTHORS | 12 +- .../docker/docker/api/types/filters/errors.go | 24 - .../docker/docker/api/types/filters/parse.go | 336 - .../docker/api/types/registry/authconfig.go | 109 - .../docker/api/types/registry/authenticate.go | 21 - .../docker/api/types/registry/registry.go | 116 - .../docker/api/types/registry/search.go | 48 - .../docker/api/types/versions/compare.go | 2 +- .../github.com/docker/docker/errdefs/defs.go | 69 - .../github.com/docker/docker/errdefs/doc.go | 8 - .../docker/docker/errdefs/helpers.go | 305 - .../docker/docker/errdefs/http_helpers.go | 47 - vendor/github.com/docker/docker/errdefs/is.go | 123 - .../docker/internal/lazyregexp/lazyregexp.go | 90 - .../docker/docker/pkg/homedir/homedir.go | 28 - .../docker/pkg/homedir/homedir_linux.go | 105 - .../docker/pkg/homedir/homedir_others.go | 32 - .../github.com/docker/docker/registry/auth.go | 202 - .../docker/docker/registry/config.go | 481 - .../docker/docker/registry/config_unix.go | 16 - .../docker/docker/registry/config_windows.go | 20 - .../docker/docker/registry/errors.go | 36 - .../docker/docker/registry/registry.go | 144 - .../docker/docker/registry/search.go | 177 - .../docker/registry/search_endpoint_v1.go | 200 - .../docker/docker/registry/search_session.go | 247 - .../docker/docker/registry/service.go | 147 - .../docker/docker/registry/service_v2.go | 69 - .../docker/docker/registry/types.go | 24 - .../docker/go-connections/tlsconfig/config.go | 100 +- .../tlsconfig/config_client_ciphers.go | 14 - vendor/github.com/docker/go-events/README.md | 1 - .../github.com/docker/go-events/SECURITY.md | 36 + vendor/github.com/docker/go-events/vendor.mod | 5 + vendor/github.com/docker/go-events/vendor.sum | 16 + .../emicklei/go-restful/v3/CHANGES.md | 21 + .../emicklei/go-restful/v3/README.md | 3 +- .../emicklei/go-restful/v3/compress.go | 10 + .../emicklei/go-restful/v3/curly.go | 48 +- .../emicklei/go-restful/v3/jsr311.go | 19 +- .../emicklei/go-restful/v3/route.go | 2 + .../github.com/evanphx/json-patch/README.md | 4 +- vendor/github.com/evanphx/json-patch/patch.go | 101 +- vendor/github.com/fxamacker/cbor/v2/README.md | 408 +- .../fxamacker/cbor/v2/bytestring.go | 27 + vendor/github.com/fxamacker/cbor/v2/cache.go | 22 +- vendor/github.com/fxamacker/cbor/v2/decode.go | 102 +- vendor/github.com/fxamacker/cbor/v2/doc.go | 51 +- vendor/github.com/fxamacker/cbor/v2/encode.go | 191 +- .../fxamacker/cbor/v2/encode_map.go | 10 +- .../fxamacker/cbor/v2/encode_map_go117.go | 60 - .../fxamacker/cbor/v2/omitzero_go124.go | 8 + .../fxamacker/cbor/v2/omitzero_pre_go124.go | 8 + .../fxamacker/cbor/v2/simplevalue.go | 29 + vendor/github.com/fxamacker/cbor/v2/stream.go | 4 +- .../fxamacker/cbor/v2/structfields.go | 15 +- vendor/github.com/fxamacker/cbor/v2/tag.go | 35 +- .../go-jose/go-jose/v4/CHANGELOG.md | 5 + vendor/github.com/go-jose/go-jose/v4/jwe.go | 19 +- vendor/github.com/go-jose/go-jose/v4/jwk.go | 2 +- vendor/github.com/go-jose/go-jose/v4/jws.go | 74 +- .../go-jose/go-jose/v4/symmetric.go | 12 +- .../go-jose/go-jose/v4/symmetric_go124.go | 28 + .../go-jose/go-jose/v4/symmetric_legacy.go | 29 + vendor/github.com/go-logr/logr/.golangci.yaml | 16 +- vendor/github.com/go-logr/logr/funcr/funcr.go | 8 +- .../go-openapi/analysis/.codecov.yml | 5 - .../go-openapi/analysis/.gitattributes | 2 - .../github.com/go-openapi/analysis/.gitignore | 5 - .../go-openapi/analysis/.golangci.yml | 61 - .../go-openapi/analysis/CODE_OF_CONDUCT.md | 74 - .../github.com/go-openapi/analysis/README.md | 27 - .../go-openapi/analysis/analyzer.go | 1064 -- .../github.com/go-openapi/analysis/debug.go | 23 - vendor/github.com/go-openapi/analysis/doc.go | 43 - .../github.com/go-openapi/analysis/fixer.go | 79 - .../github.com/go-openapi/analysis/flatten.go | 814 - .../go-openapi/analysis/flatten_name.go | 308 - .../go-openapi/analysis/flatten_options.go | 79 - .../analysis/internal/debug/debug.go | 41 - .../internal/flatten/normalize/normalize.go | 87 - .../internal/flatten/operations/operations.go | 90 - .../internal/flatten/replace/replace.go | 458 - .../flatten/schutils/flatten_schema.go | 29 - .../analysis/internal/flatten/sortref/keys.go | 201 - .../internal/flatten/sortref/sort_ref.go | 141 - .../github.com/go-openapi/analysis/mixin.go | 515 - .../github.com/go-openapi/analysis/schema.go | 256 - .../go-openapi/errors/.gitattributes | 1 - .../github.com/go-openapi/errors/.gitignore | 2 - .../go-openapi/errors/.golangci.yml | 55 - .../go-openapi/errors/CODE_OF_CONDUCT.md | 74 - vendor/github.com/go-openapi/errors/LICENSE | 202 - vendor/github.com/go-openapi/errors/README.md | 8 - vendor/github.com/go-openapi/errors/api.go | 192 - vendor/github.com/go-openapi/errors/auth.go | 22 - vendor/github.com/go-openapi/errors/doc.go | 26 - .../github.com/go-openapi/errors/headers.go | 103 - .../go-openapi/errors/middleware.go | 50 - .../github.com/go-openapi/errors/parsing.go | 79 - vendor/github.com/go-openapi/errors/schema.go | 619 - .../go-openapi/jsonpointer/.golangci.yml | 31 +- .../go-openapi/jsonpointer/errors.go | 18 + .../go-openapi/jsonpointer/pointer.go | 49 +- .../github.com/go-openapi/loads/.editorconfig | 26 - vendor/github.com/go-openapi/loads/.gitignore | 4 - .../github.com/go-openapi/loads/.golangci.yml | 61 - .../github.com/go-openapi/loads/.travis.yml | 25 - .../go-openapi/loads/CODE_OF_CONDUCT.md | 74 - vendor/github.com/go-openapi/loads/LICENSE | 202 - vendor/github.com/go-openapi/loads/README.md | 6 - vendor/github.com/go-openapi/loads/doc.go | 18 - vendor/github.com/go-openapi/loads/loaders.go | 133 - vendor/github.com/go-openapi/loads/options.go | 61 - vendor/github.com/go-openapi/loads/spec.go | 275 - .../go-openapi/runtime/.editorconfig | 26 - .../go-openapi/runtime/.gitattributes | 1 - .../github.com/go-openapi/runtime/.gitignore | 5 - .../go-openapi/runtime/.golangci.yml | 62 - .../go-openapi/runtime/CODE_OF_CONDUCT.md | 74 - vendor/github.com/go-openapi/runtime/LICENSE | 202 - .../github.com/go-openapi/runtime/README.md | 10 - .../go-openapi/runtime/bytestream.go | 222 - .../go-openapi/runtime/client_auth_info.go | 30 - .../go-openapi/runtime/client_operation.go | 41 - .../go-openapi/runtime/client_request.go | 152 - .../go-openapi/runtime/client_response.go | 110 - .../go-openapi/runtime/constants.go | 49 - vendor/github.com/go-openapi/runtime/csv.go | 350 - .../go-openapi/runtime/csv_options.go | 121 - .../github.com/go-openapi/runtime/discard.go | 9 - vendor/github.com/go-openapi/runtime/file.go | 19 - .../github.com/go-openapi/runtime/headers.go | 45 - .../go-openapi/runtime/interfaces.go | 112 - vendor/github.com/go-openapi/runtime/json.go | 38 - .../github.com/go-openapi/runtime/request.go | 149 - .../github.com/go-openapi/runtime/statuses.go | 90 - vendor/github.com/go-openapi/runtime/text.go | 116 - .../github.com/go-openapi/runtime/values.go | 19 - vendor/github.com/go-openapi/runtime/xml.go | 36 - .../github.com/go-openapi/spec/.editorconfig | 26 - vendor/github.com/go-openapi/spec/.gitignore | 1 - .../github.com/go-openapi/spec/.golangci.yml | 61 - .../go-openapi/spec/CODE_OF_CONDUCT.md | 74 - vendor/github.com/go-openapi/spec/LICENSE | 202 - vendor/github.com/go-openapi/spec/README.md | 54 - vendor/github.com/go-openapi/spec/cache.go | 98 - .../go-openapi/spec/contact_info.go | 57 - vendor/github.com/go-openapi/spec/debug.go | 49 - vendor/github.com/go-openapi/spec/embed.go | 17 - vendor/github.com/go-openapi/spec/errors.go | 19 - vendor/github.com/go-openapi/spec/expander.go | 607 - .../go-openapi/spec/external_docs.go | 24 - vendor/github.com/go-openapi/spec/header.go | 203 - vendor/github.com/go-openapi/spec/info.go | 184 - vendor/github.com/go-openapi/spec/items.go | 234 - vendor/github.com/go-openapi/spec/license.go | 56 - .../github.com/go-openapi/spec/normalizer.go | 202 - .../go-openapi/spec/normalizer_nonwindows.go | 44 - .../go-openapi/spec/normalizer_windows.go | 154 - .../github.com/go-openapi/spec/operation.go | 400 - .../github.com/go-openapi/spec/parameter.go | 326 - .../github.com/go-openapi/spec/path_item.go | 87 - vendor/github.com/go-openapi/spec/paths.go | 97 - .../github.com/go-openapi/spec/properties.go | 91 - vendor/github.com/go-openapi/spec/ref.go | 193 - vendor/github.com/go-openapi/spec/resolver.go | 127 - vendor/github.com/go-openapi/spec/response.go | 152 - .../github.com/go-openapi/spec/responses.go | 140 - vendor/github.com/go-openapi/spec/schema.go | 645 - .../go-openapi/spec/schema_loader.go | 331 - .../spec/schemas/jsonschema-draft-04.json | 149 - .../go-openapi/spec/schemas/v2/schema.json | 1607 -- .../go-openapi/spec/security_scheme.go | 170 - vendor/github.com/go-openapi/spec/spec.go | 78 - vendor/github.com/go-openapi/spec/swagger.go | 448 - vendor/github.com/go-openapi/spec/tag.go | 75 - vendor/github.com/go-openapi/spec/url_go19.go | 11 - .../github.com/go-openapi/spec/validations.go | 215 - .../github.com/go-openapi/spec/xml_object.go | 68 - .../go-openapi/strfmt/.editorconfig | 26 - .../go-openapi/strfmt/.gitattributes | 2 - .../github.com/go-openapi/strfmt/.gitignore | 2 - .../go-openapi/strfmt/.golangci.yml | 61 - .../go-openapi/strfmt/CODE_OF_CONDUCT.md | 74 - vendor/github.com/go-openapi/strfmt/LICENSE | 202 - vendor/github.com/go-openapi/strfmt/README.md | 87 - vendor/github.com/go-openapi/strfmt/bson.go | 165 - vendor/github.com/go-openapi/strfmt/date.go | 187 - .../github.com/go-openapi/strfmt/default.go | 2051 --- vendor/github.com/go-openapi/strfmt/doc.go | 18 - .../github.com/go-openapi/strfmt/duration.go | 211 - vendor/github.com/go-openapi/strfmt/format.go | 327 - vendor/github.com/go-openapi/strfmt/time.go | 321 - vendor/github.com/go-openapi/strfmt/ulid.go | 230 - .../go-openapi/validate/.editorconfig | 26 - .../go-openapi/validate/.gitattributes | 2 - .../github.com/go-openapi/validate/.gitignore | 5 - .../go-openapi/validate/.golangci.yml | 61 - .../go-openapi/validate/BENCHMARK.md | 31 - .../go-openapi/validate/CODE_OF_CONDUCT.md | 74 - vendor/github.com/go-openapi/validate/LICENSE | 202 - .../github.com/go-openapi/validate/README.md | 36 - .../github.com/go-openapi/validate/context.go | 56 - .../github.com/go-openapi/validate/debug.go | 47 - .../go-openapi/validate/default_validator.go | 304 - vendor/github.com/go-openapi/validate/doc.go | 87 - .../go-openapi/validate/example_validator.go | 299 - .../github.com/go-openapi/validate/formats.go | 99 - .../github.com/go-openapi/validate/helpers.go | 333 - .../go-openapi/validate/object_validator.go | 431 - .../github.com/go-openapi/validate/options.go | 62 - .../github.com/go-openapi/validate/pools.go | 366 - .../go-openapi/validate/pools_debug.go | 1012 -- .../github.com/go-openapi/validate/result.go | 563 - vendor/github.com/go-openapi/validate/rexp.go | 71 - .../github.com/go-openapi/validate/schema.go | 354 - .../go-openapi/validate/schema_messages.go | 78 - .../go-openapi/validate/schema_option.go | 83 - .../go-openapi/validate/schema_props.go | 356 - .../go-openapi/validate/slice_validator.go | 150 - vendor/github.com/go-openapi/validate/spec.go | 852 - .../go-openapi/validate/spec_messages.go | 366 - vendor/github.com/go-openapi/validate/type.go | 213 - .../go-openapi/validate/update-fixtures.sh | 15 - .../go-openapi/validate/validator.go | 1051 -- .../github.com/go-openapi/validate/values.go | 450 - .../go-viper/mapstructure/v2/.editorconfig | 3 + .../go-viper/mapstructure/v2/.golangci.yaml | 55 +- .../go-viper/mapstructure/v2/README.md | 7 +- .../go-viper/mapstructure/v2/decode_hooks.go | 234 +- .../go-viper/mapstructure/v2/errors.go | 244 + .../go-viper/mapstructure/v2/flake.lock | 390 +- .../go-viper/mapstructure/v2/flake.nix | 45 +- .../go-viper/mapstructure/v2/mapstructure.go | 308 +- .../github.com/golang/protobuf/ptypes/any.go | 180 - .../golang/protobuf/ptypes/any/any.pb.go | 62 - .../github.com/golang/protobuf/ptypes/doc.go | 10 - .../golang/protobuf/ptypes/duration.go | 76 - .../protobuf/ptypes/duration/duration.pb.go | 63 - .../golang/protobuf/ptypes/timestamp.go | 112 - .../protobuf/ptypes/timestamp/timestamp.pb.go | 64 - .../github.com/google/cel-go/cel/BUILD.bazel | 13 +- vendor/github.com/google/cel-go/cel/decls.go | 70 +- vendor/github.com/google/cel-go/cel/env.go | 238 +- .../github.com/google/cel-go/cel/folding.go | 60 +- vendor/github.com/google/cel-go/cel/io.go | 57 +- .../github.com/google/cel-go/cel/library.go | 477 +- vendor/github.com/google/cel-go/cel/macro.go | 30 +- .../github.com/google/cel-go/cel/options.go | 245 +- .../github.com/google/cel-go/cel/program.go | 274 +- vendor/github.com/google/cel-go/cel/prompt.go | 155 + .../cel-go/cel/templates/authoring.tmpl | 56 + .../github.com/google/cel-go/cel/validator.go | 70 +- .../google/cel-go/checker/checker.go | 11 + .../github.com/google/cel-go/checker/cost.go | 659 +- .../google/cel-go/checker/decls/decls.go | 37 +- .../google/cel-go/checker/errors.go | 4 + .../google/cel-go/common/BUILD.bazel | 2 + .../google/cel-go/common/ast/ast.go | 78 + .../google/cel-go/common/ast/factory.go | 29 +- .../google/cel-go/common/ast/navigable.go | 7 +- .../cel-go/common/containers/container.go | 14 +- .../google/cel-go/common/debug/debug.go | 2 +- .../google/cel-go/common/decls/BUILD.bazel | 2 + .../google/cel-go/common/decls/decls.go | 287 +- vendor/github.com/google/cel-go/common/doc.go | 154 + .../google/cel-go/common/env/BUILD.bazel | 50 + .../google/cel-go/common/env/env.go | 887 + .../github.com/google/cel-go/common/errors.go | 5 + .../google/cel-go/common/stdlib/BUILD.bazel | 1 + .../google/cel-go/common/stdlib/standard.go | 702 +- .../google/cel-go/common/types/BUILD.bazel | 1 + .../google/cel-go/common/types/bool.go | 9 + .../google/cel-go/common/types/bytes.go | 15 + .../google/cel-go/common/types/double.go | 22 + .../google/cel-go/common/types/duration.go | 5 + .../google/cel-go/common/types/err.go | 6 + .../google/cel-go/common/types/format.go | 42 + .../google/cel-go/common/types/int.go | 5 + .../google/cel-go/common/types/list.go | 20 +- .../google/cel-go/common/types/map.go | 36 + .../google/cel-go/common/types/null.go | 5 + .../google/cel-go/common/types/object.go | 31 +- .../google/cel-go/common/types/optional.go | 11 + .../google/cel-go/common/types/string.go | 4 + .../google/cel-go/common/types/timestamp.go | 4 + .../google/cel-go/common/types/types.go | 24 +- .../google/cel-go/common/types/uint.go | 6 + .../github.com/google/cel-go/ext/BUILD.bazel | 17 +- vendor/github.com/google/cel-go/ext/README.md | 64 +- .../github.com/google/cel-go/ext/bindings.go | 10 +- .../google/cel-go/ext/comprehensions.go | 58 +- .../github.com/google/cel-go/ext/encoders.go | 30 +- .../cel-go/ext/extension_option_factory.go | 75 + .../google/cel-go/ext/formatting.go | 27 +- .../google/cel-go/ext/formatting_v2.go | 788 + vendor/github.com/google/cel-go/ext/guards.go | 8 +- vendor/github.com/google/cel-go/ext/lists.go | 305 +- vendor/github.com/google/cel-go/ext/math.go | 47 + vendor/github.com/google/cel-go/ext/native.go | 22 +- vendor/github.com/google/cel-go/ext/protos.go | 25 +- vendor/github.com/google/cel-go/ext/regex.go | 332 + vendor/github.com/google/cel-go/ext/sets.go | 37 +- .../github.com/google/cel-go/ext/strings.go | 63 +- .../google/cel-go/interpreter/activation.go | 24 + .../cel-go/interpreter/attribute_patterns.go | 13 +- .../cel-go/interpreter/interpretable.go | 122 +- .../google/cel-go/interpreter/interpreter.go | 188 +- .../google/cel-go/interpreter/planner.go | 48 +- .../google/cel-go/interpreter/prune.go | 63 +- .../google/cel-go/interpreter/runtimecost.go | 253 +- .../github.com/google/cel-go/parser/errors.go | 6 +- .../google/cel-go/parser/gen/CEL.g4 | 13 +- .../google/cel-go/parser/gen/CEL.interp | 5 +- .../google/cel-go/parser/gen/CEL.tokens | 1 + .../google/cel-go/parser/gen/CELLexer.interp | 5 +- .../google/cel-go/parser/gen/CELLexer.tokens | 1 + .../cel-go/parser/gen/cel_base_listener.go | 28 +- .../cel-go/parser/gen/cel_base_visitor.go | 17 +- .../google/cel-go/parser/gen/cel_lexer.go | 595 +- .../google/cel-go/parser/gen/cel_listener.go | 29 +- .../google/cel-go/parser/gen/cel_parser.go | 2383 ++- .../google/cel-go/parser/gen/cel_visitor.go | 19 +- .../github.com/google/cel-go/parser/helper.go | 5 + .../github.com/google/cel-go/parser/macro.go | 204 +- .../google/cel-go/parser/options.go | 23 + .../github.com/google/cel-go/parser/parser.go | 85 +- .../google/cel-go/parser/unescape.go | 42 +- .../google/cel-go/parser/unparser.go | 42 +- .../gnostic-models/compiler/extensions.go | 8 +- .../gnostic-models/extensions/extension.pb.go | 96 +- .../gnostic-models/extensions/extensions.go | 6 +- .../gnostic-models/openapiv2/OpenAPIv2.pb.go | 1349 +- .../gnostic-models/openapiv3/OpenAPIv3.pb.go | 1763 +- .../openapiv3/annotations.pb.go | 182 + .../openapiv3/annotations.proto | 56 + .../go-containerregistry/pkg/crane/copy.go | 2 +- .../go-containerregistry/pkg/crane/pull.go | 2 +- .../go-containerregistry/pkg/name/digest.go | 21 +- .../go-containerregistry/pkg/name/registry.go | 37 + .../pkg/name/repository.go | 37 + .../go-containerregistry/pkg/name/tag.go | 40 +- .../go-containerregistry/pkg/v1/hash.go | 23 +- .../pkg/v1/layout/write.go | 6 +- .../pkg/v1/mutate/index.go | 28 +- .../pkg/v1/mutate/mutate.go | 14 +- .../pkg/v1/remote/pusher.go | 2 +- .../pkg/v1/remote/write.go | 4 +- vendor/github.com/google/gofuzz/.travis.yml | 10 - .../github.com/google/gofuzz/CONTRIBUTING.md | 67 - vendor/github.com/google/gofuzz/LICENSE | 202 - vendor/github.com/google/gofuzz/fuzz.go | 605 - .../github.com/google/pprof/profile/merge.go | 11 +- .../google/pprof/profile/profile.go | 20 +- .../github.com/google/pprof/profile/prune.go | 9 +- vendor/github.com/gorilla/websocket/README.md | 17 +- vendor/github.com/gorilla/websocket/client.go | 245 +- .../gorilla/websocket/compression.go | 6 +- vendor/github.com/gorilla/websocket/conn.go | 112 +- vendor/github.com/gorilla/websocket/proxy.go | 53 +- vendor/github.com/gorilla/websocket/server.go | 122 +- .../gorilla/websocket/tls_handshake.go | 21 - .../gorilla/websocket/tls_handshake_116.go | 21 - vendor/github.com/gorilla/websocket/util.go | 15 + .../gorilla/websocket/x_net_proxy.go | 473 - .../grpc-gateway/v2/runtime/errors.go | 32 +- .../grpc-gateway/v2/runtime/handler.go | 12 +- .../grpc-gateway/v2/runtime/mux.go | 8 + .../grpc-gateway/v2/runtime/query.go | 16 +- .../hashicorp/golang-lru/v2/simplelru/lru.go | 3 - .../letsencrypt/boulder/core/interfaces.go | 6 +- .../letsencrypt/boulder/core/objects.go | 99 +- .../letsencrypt/boulder/core/proto/core.pb.go | 1164 ++ .../letsencrypt/boulder/core/proto/core.proto | 139 + .../letsencrypt/boulder/core/util.go | 29 +- .../letsencrypt/boulder/goodkey/blocked.go | 95 - .../letsencrypt/boulder/goodkey/good_key.go | 67 +- .../letsencrypt/boulder/goodkey/weak.go | 66 - .../boulder/identifier/identifier.go | 194 +- .../letsencrypt/boulder/probs/probs.go | 91 +- .../letsencrypt/boulder/strictyaml/yaml.go | 46 - vendor/github.com/mattn/go-sqlite3/README.md | 2 + .../mattn/go-sqlite3/sqlite3-binding.c | 4747 ++++-- .../mattn/go-sqlite3/sqlite3-binding.h | 316 +- .../mattn/go-sqlite3/sqlite3_opt_userauth.go | 153 +- .../github.com/mattn/go-sqlite3/sqlite3ext.h | 4 + .../maxbrunsfeld/counterfeiter/v6/.gitignore | 1 + .../maxbrunsfeld/counterfeiter/v6/README.md | 35 +- .../counterfeiter/v6/generator/fake.go | 66 + .../v6/generator/interface_template.go | 19 +- .../counterfeiter/v6/generator/loader.go | 4 + .../mitchellh/mapstructure/CHANGELOG.md | 96 - .../mitchellh/mapstructure/README.md | 46 - .../mitchellh/mapstructure/decode_hooks.go | 279 - .../mitchellh/mapstructure/error.go | 50 - .../mitchellh/mapstructure/mapstructure.go | 1540 -- .../moby/sys/sequential/sequential_unix.go | 27 +- .../moby/sys/sequential/sequential_windows.go | 89 +- vendor/github.com/oklog/ulid/.gitignore | 29 - vendor/github.com/oklog/ulid/.travis.yml | 16 - vendor/github.com/oklog/ulid/AUTHORS.md | 2 - vendor/github.com/oklog/ulid/CHANGELOG.md | 33 - vendor/github.com/oklog/ulid/CONTRIBUTING.md | 17 - vendor/github.com/oklog/ulid/Gopkg.lock | 15 - vendor/github.com/oklog/ulid/Gopkg.toml | 26 - vendor/github.com/oklog/ulid/LICENSE | 201 - vendor/github.com/oklog/ulid/README.md | 150 - vendor/github.com/oklog/ulid/ulid.go | 614 - vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md | 103 + vendor/github.com/onsi/ginkgo/v2/README.md | 10 + vendor/github.com/onsi/ginkgo/v2/core_dsl.go | 106 +- .../onsi/ginkgo/v2/decorator_dsl.go | 50 + .../onsi/ginkgo/v2/ginkgo/automaxprocs.go | 8 + .../ginkgo/v2/ginkgo/automaxprocs/README.md | 3 + .../v2/ginkgo/automaxprocs/automaxprocs.go | 71 + .../ginkgo/v2/ginkgo/automaxprocs}/cgroup.go | 2 +- .../ginkgo/v2/ginkgo/automaxprocs}/cgroups.go | 2 +- .../v2/ginkgo/automaxprocs}/cgroups2.go | 2 +- .../ginkgo/automaxprocs}/cpu_quota_linux.go | 10 +- .../automaxprocs}/cpu_quota_unsupported.go | 2 +- .../ginkgo/v2/ginkgo/automaxprocs}/errors.go | 2 +- .../v2/ginkgo/automaxprocs}/mountpoint.go | 2 +- .../ginkgo/v2/ginkgo/automaxprocs}/runtime.go | 2 +- .../ginkgo/v2/ginkgo/automaxprocs}/subsys.go | 2 +- .../ginkgo/v2/ginkgo/build/build_command.go | 1 - .../ginkgo/internal/profiles_and_reports.go | 3 + .../onsi/ginkgo/v2/ginkgo/internal/run.go | 6 + .../github.com/onsi/ginkgo/v2/ginkgo/main.go | 1 - .../ginkgo/v2/ginkgo/watch/dependencies.go | 15 +- .../github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go | 9 + .../onsi/ginkgo/v2/internal/around_node.go | 34 + .../onsi/ginkgo/v2/internal/focus.go | 9 +- .../onsi/ginkgo/v2/internal/group.go | 58 +- .../onsi/ginkgo/v2/internal/node.go | 196 +- .../onsi/ginkgo/v2/internal/ordering.go | 35 +- .../ginkgo/v2/internal/progress_report.go | 2 +- .../ginkgo/v2/internal/reporters/gojson.go | 158 + .../internal/reporters/gojson_event_writer.go | 111 + .../v2/internal/reporters/gojson_reporter.go | 45 + .../onsi/ginkgo/v2/internal/spec_context.go | 27 + .../onsi/ginkgo/v2/internal/suite.go | 59 +- .../internal/testingtproxy/testing_t_proxy.go | 6 + .../ginkgo/v2/reporters/default_reporter.go | 56 +- .../onsi/ginkgo/v2/reporters/gojson_report.go | 61 + .../onsi/ginkgo/v2/reporters/junit_report.go | 9 + .../ginkgo/v2/reporters/teamcity_report.go | 8 + .../onsi/ginkgo/v2/reporting_dsl.go | 56 +- vendor/github.com/onsi/ginkgo/v2/table_dsl.go | 4 +- .../onsi/ginkgo/v2/types/around_node.go | 56 + .../github.com/onsi/ginkgo/v2/types/config.go | 21 +- .../github.com/onsi/ginkgo/v2/types/errors.go | 18 + .../onsi/ginkgo/v2/types/semver_filter.go | 60 + .../github.com/onsi/ginkgo/v2/types/types.go | 193 +- .../onsi/ginkgo/v2/types/version.go | 2 +- vendor/github.com/onsi/gomega/CHANGELOG.md | 29 + vendor/github.com/onsi/gomega/gomega_dsl.go | 4 +- .../onsi/gomega/internal/async_assertion.go | 2 +- .../matchers/be_comparable_to_matcher.go | 3 +- .../gomega/matchers/match_yaml_matcher.go | 2 +- .../internal/version/version.go | 2 +- .../api/pkg/manifests/bundleloader.go | 2 +- .../api/pkg/validation/internal/multiarch.go | 2 +- .../operator-registry/alpha/declcfg/write.go | 149 +- .../operator-registry/alpha/model/model.go | 4 + .../pkg/image/containerdregistry/registry.go | 2 +- .../pkg/image/containerdregistry/resolver.go | 6 +- .../image/containersimageregistry/registry.go | 20 +- .../pelletier/go-toml/v2/.goreleaser.yaml | 2 +- .../pelletier/go-toml/v2/unmarshaler.go | 2 +- vendor/github.com/proglottis/gpgme/gpgme.go | 6 + .../client_golang/prometheus/desc.go | 3 +- .../prometheus/internal/difflib.go | 4 +- .../prometheus/internal/go_runtime_metrics.go | 2 +- .../client_golang/prometheus/labels.go | 3 +- .../client_golang/prometheus/metric.go | 25 +- .../prometheus/process_collector_darwin.go | 6 +- .../process_collector_mem_nocgo_darwin.go | 2 +- .../process_collector_procfsenabled.go | 8 +- .../prometheus/promhttp/instrument_server.go | 2 +- .../client_golang/prometheus/vec.go | 10 +- .../client_golang/prometheus/wrap.go | 36 +- .../prometheus/common/expfmt/decode.go | 39 +- .../prometheus/common/expfmt/encode.go | 10 +- .../prometheus/common/expfmt/expfmt.go | 12 +- .../prometheus/common/expfmt/fuzz.go | 9 +- .../common/expfmt/openmetrics_create.go | 11 +- .../prometheus/common/expfmt/text_create.go | 8 +- .../prometheus/common/expfmt/text_parse.go | 48 +- .../prometheus/common/model/alert.go | 2 +- .../prometheus/common/model/labels.go | 36 +- .../prometheus/common/model/labelset.go | 10 +- .../prometheus/common/model/metric.go | 224 +- .../prometheus/common/model/time.go | 37 +- .../prometheus/common/model/value.go | 15 +- .../common/model/value_histogram.go | 10 +- .../prometheus/common/model/value_type.go | 4 +- .../prometheus/procfs/.golangci.yml | 63 +- .../prometheus/procfs/Makefile.common | 10 +- vendor/github.com/prometheus/procfs/README.md | 6 +- vendor/github.com/prometheus/procfs/arp.go | 4 +- vendor/github.com/prometheus/procfs/fs.go | 10 +- .../prometheus/procfs/fs_statfs_notype.go | 4 +- .../github.com/prometheus/procfs/fscache.go | 6 +- .../prometheus/procfs/internal/fs/fs.go | 3 + .../prometheus/procfs/internal/util/parse.go | 14 + .../procfs/internal/util/sysreadfile.go | 20 + .../prometheus/procfs/mountstats.go | 27 +- .../prometheus/procfs/net_dev_snmp6.go | 96 + .../prometheus/procfs/net_ip_socket.go | 8 +- .../prometheus/procfs/net_protocols.go | 21 +- .../github.com/prometheus/procfs/net_tcp.go | 4 + .../github.com/prometheus/procfs/net_unix.go | 8 +- vendor/github.com/prometheus/procfs/proc.go | 8 +- .../prometheus/procfs/proc_cgroup.go | 2 +- .../github.com/prometheus/procfs/proc_io.go | 2 +- .../prometheus/procfs/proc_netstat.go | 224 +- .../prometheus/procfs/proc_smaps.go | 4 +- .../github.com/prometheus/procfs/proc_snmp.go | 120 +- .../prometheus/procfs/proc_snmp6.go | 150 +- .../prometheus/procfs/proc_status.go | 18 +- .../github.com/prometheus/procfs/proc_sys.go | 2 +- .../github.com/prometheus/procfs/softirqs.go | 22 +- .../go-redis/extra/rediscmd/v9/rediscmd.go | 18 - .../go-redis/extra/redisotel/v9/config.go | 2 +- .../go-redis/extra/redisotel/v9/metrics.go | 27 +- .../go-redis/extra/redisotel/v9/tracing.go | 65 +- .../github.com/redis/go-redis/v9/.gitignore | 7 +- .../redis/go-redis/v9/.golangci.yml | 31 + .../redis/go-redis/v9/CONTRIBUTING.md | 27 +- vendor/github.com/redis/go-redis/v9/Makefile | 63 +- vendor/github.com/redis/go-redis/v9/README.md | 204 +- .../redis/go-redis/v9/RELEASE-NOTES.md | 163 + .../redis/go-redis/v9/acl_commands.go | 54 + .../github.com/redis/go-redis/v9/auth/auth.go | 61 + .../v9/auth/reauth_credentials_listener.go | 47 + .../redis/go-redis/v9/cluster_commands.go | 7 + .../github.com/redis/go-redis/v9/command.go | 177 +- .../github.com/redis/go-redis/v9/commands.go | 18 +- .../redis/go-redis/v9/docker-compose.yml | 106 + vendor/github.com/redis/go-redis/v9/error.go | 22 + .../redis/go-redis/v9/gears_commands.go | 149 - .../redis/go-redis/v9/hash_commands.go | 175 +- .../redis/go-redis/v9/internal/pool/conn.go | 10 + .../redis/go-redis/v9/internal/pool/pool.go | 34 +- .../go-redis/v9/internal/proto/writer.go | 53 + .../redis/go-redis/v9/internal/util.go | 17 +- .../go-redis/v9/internal/util/convert.go | 30 + .../github.com/redis/go-redis/v9/options.go | 142 +- .../redis/go-redis/v9/osscluster.go | 91 +- .../redis/go-redis/v9/probabilistic.go | 72 +- vendor/github.com/redis/go-redis/v9/pubsub.go | 5 +- vendor/github.com/redis/go-redis/v9/redis.go | 161 +- vendor/github.com/redis/go-redis/v9/result.go | 8 + vendor/github.com/redis/go-redis/v9/ring.go | 55 +- .../redis/go-redis/v9/search_commands.go | 331 +- .../github.com/redis/go-redis/v9/sentinel.go | 232 +- vendor/github.com/redis/go-redis/v9/tx.go | 7 +- .../github.com/redis/go-redis/v9/universal.go | 31 +- .../redis/go-redis/v9/vectorset_commands.go | 348 + .../github.com/redis/go-redis/v9/version.go | 2 +- .../rubenv/sql-migrate/.golangci.yaml | 204 +- .../github.com/rubenv/sql-migrate/migrate.go | 12 +- .../github.com/sagikazarmark/locafero/.envrc | 4 +- .../sagikazarmark/locafero/.golangci.yaml | 48 +- .../sagikazarmark/locafero/README.md | 4 +- .../sagikazarmark/locafero/file_type.go | 12 +- .../sagikazarmark/locafero/finder.go | 102 +- .../sagikazarmark/locafero/flake.lock | 401 +- .../sagikazarmark/locafero/flake.nix | 66 +- .../sagikazarmark/locafero/justfile | 7 +- .../santhosh-tekuri/jsonschema/v6/.gitmodules | 4 + .../jsonschema/v6/.golangci.yml | 7 + .../jsonschema/v6/.pre-commit-hooks.yaml | 7 + .../jsonschema/v6/LICENSE} | 29 +- .../santhosh-tekuri/jsonschema/v6/README.md | 88 + .../santhosh-tekuri/jsonschema/v6/compiler.go | 332 + .../santhosh-tekuri/jsonschema/v6/content.go | 51 + .../santhosh-tekuri/jsonschema/v6/draft.go | 360 + .../santhosh-tekuri/jsonschema/v6/format.go | 708 + .../santhosh-tekuri/jsonschema/v6/go.work | 8 + .../santhosh-tekuri/jsonschema/v6/go.work.sum | 4 + .../jsonschema/v6/kind/kind.go | 651 + .../santhosh-tekuri/jsonschema/v6/loader.go | 266 + .../jsonschema/v6/metaschemas/draft-04/schema | 151 + .../jsonschema/v6/metaschemas/draft-06/schema | 150 + .../jsonschema/v6/metaschemas/draft-07/schema | 172 + .../metaschemas/draft/2019-09/meta/applicator | 55 + .../v6/metaschemas/draft/2019-09/meta/content | 15 + .../v6/metaschemas/draft/2019-09/meta/core | 56 + .../v6/metaschemas/draft/2019-09/meta/format | 13 + .../metaschemas/draft/2019-09/meta/meta-data | 35 + .../metaschemas/draft/2019-09/meta/validation | 97 + .../v6/metaschemas/draft/2019-09/schema | 41 + .../metaschemas/draft/2020-12/meta/applicator | 47 + .../v6/metaschemas/draft/2020-12/meta/content | 15 + .../v6/metaschemas/draft/2020-12/meta/core | 50 + .../draft/2020-12/meta/format-annotation | 13 + .../draft/2020-12/meta/format-assertion | 13 + .../metaschemas/draft/2020-12/meta/meta-data | 35 + .../draft/2020-12/meta/unevaluated | 14 + .../metaschemas/draft/2020-12/meta/validation | 97 + .../v6/metaschemas/draft/2020-12/schema | 57 + .../jsonschema/v6/objcompiler.go | 549 + .../santhosh-tekuri/jsonschema/v6/output.go | 216 + .../santhosh-tekuri/jsonschema/v6/position.go | 142 + .../santhosh-tekuri/jsonschema/v6/root.go | 202 + .../santhosh-tekuri/jsonschema/v6/roots.go | 286 + .../santhosh-tekuri/jsonschema/v6/schema.go | 254 + .../santhosh-tekuri/jsonschema/v6/util.go | 464 + .../jsonschema/v6/validator.go | 975 ++ .../santhosh-tekuri/jsonschema/v6/vocab.go | 111 + .../sergi/go-diff/diffmatchpatch/diff.go | 39 +- .../sergi/go-diff/diffmatchpatch/index.go | 32 + .../go-diff/diffmatchpatch/stringutil.go | 4 +- .../gen/pb-go/common/v1/sigstore_common.pb.go | 76 +- .../github.com/sigstore/rekor/CONTRIBUTORS.md | 122 - vendor/github.com/sigstore/rekor/LICENSE | 202 - .../rekor/pkg/generated/models/alpine.go | 210 - .../pkg/generated/models/alpine_schema.go | 29 - .../generated/models/alpine_v001_schema.go | 455 - .../pkg/generated/models/consistency_proof.go | 118 - .../rekor/pkg/generated/models/cose.go | 210 - .../rekor/pkg/generated/models/cose_schema.go | 29 - .../pkg/generated/models/cose_v001_schema.go | 521 - .../rekor/pkg/generated/models/dsse.go | 210 - .../rekor/pkg/generated/models/dsse_schema.go | 29 - .../pkg/generated/models/dsse_v001_schema.go | 685 - .../rekor/pkg/generated/models/error.go | 69 - .../pkg/generated/models/hashedrekord.go | 210 - .../generated/models/hashedrekord_schema.go | 29 - .../models/hashedrekord_v001_schema.go | 519 - .../rekor/pkg/generated/models/helm.go | 210 - .../rekor/pkg/generated/models/helm_schema.go | 29 - .../pkg/generated/models/helm_v001_schema.go | 662 - .../models/inactive_shard_log_info.go | 153 - .../pkg/generated/models/inclusion_proof.go | 179 - .../rekor/pkg/generated/models/intoto.go | 210 - .../pkg/generated/models/intoto_schema.go | 29 - .../generated/models/intoto_v001_schema.go | 514 - .../generated/models/intoto_v002_schema.go | 757 - .../rekor/pkg/generated/models/jar.go | 210 - .../rekor/pkg/generated/models/jar_schema.go | 29 - .../pkg/generated/models/jar_v001_schema.go | 569 - .../rekor/pkg/generated/models/log_entry.go | 445 - .../rekor/pkg/generated/models/log_info.go | 221 - .../pkg/generated/models/proposed_entry.go | 195 - .../rekor/pkg/generated/models/rekord.go | 210 - .../pkg/generated/models/rekord_schema.go | 29 - .../generated/models/rekord_v001_schema.go | 611 - .../rekor/pkg/generated/models/rfc3161.go | 210 - .../pkg/generated/models/rfc3161_schema.go | 29 - .../generated/models/rfc3161_v001_schema.go | 183 - .../rekor/pkg/generated/models/rpm.go | 210 - .../rekor/pkg/generated/models/rpm_schema.go | 29 - .../pkg/generated/models/rpm_v001_schema.go | 450 - .../pkg/generated/models/search_index.go | 341 - .../pkg/generated/models/search_log_query.go | 297 - .../rekor/pkg/generated/models/tuf.go | 210 - .../rekor/pkg/generated/models/tuf_schema.go | 29 - .../pkg/generated/models/tuf_v001_schema.go | 304 - vendor/github.com/smallstep/pkcs7/pkcs7.go | 7 + vendor/github.com/smallstep/pkcs7/sign.go | 47 +- vendor/github.com/smallstep/pkcs7/verify.go | 32 +- vendor/github.com/sourcegraph/conc/Makefile | 24 + .../internal/multierror/multierror_go119.go | 10 - .../internal/multierror/multierror_go120.go | 10 - .../github.com/sourcegraph/conc/iter/iter.go | 85 - .../github.com/sourcegraph/conc/iter/map.go | 65 - .../sourcegraph/conc/pool/context_pool.go | 104 + .../sourcegraph/conc/pool/error_pool.go | 100 + .../github.com/sourcegraph/conc/pool/pool.go | 174 + .../conc/pool/result_context_pool.go | 85 + .../conc/pool/result_error_pool.go | 80 + .../sourcegraph/conc/pool/result_pool.go | 142 + vendor/github.com/spf13/afero/.editorconfig | 3 + vendor/github.com/spf13/afero/.golangci.yaml | 62 +- vendor/github.com/spf13/afero/README.md | 679 +- .../github.com/spf13/afero/copyOnWriteFs.go | 9 +- vendor/github.com/spf13/afero/iofs.go | 9 +- vendor/github.com/spf13/afero/lstater.go | 4 +- vendor/github.com/spf13/afero/mem/file.go | 22 +- vendor/github.com/spf13/afero/unionFile.go | 5 +- vendor/github.com/spf13/afero/util.go | 4 +- vendor/github.com/spf13/cast/.editorconfig | 15 + vendor/github.com/spf13/cast/.golangci.yaml | 39 + vendor/github.com/spf13/cast/README.md | 12 +- vendor/github.com/spf13/cast/alias.go | 69 + vendor/github.com/spf13/cast/basic.go | 131 + vendor/github.com/spf13/cast/cast.go | 232 +- vendor/github.com/spf13/cast/caste.go | 1510 -- vendor/github.com/spf13/cast/indirect.go | 37 + vendor/github.com/spf13/cast/internal/time.go | 79 + .../cast/internal/timeformattype_string.go | 27 + vendor/github.com/spf13/cast/map.go | 212 + vendor/github.com/spf13/cast/number.go | 549 + vendor/github.com/spf13/cast/slice.go | 106 + vendor/github.com/spf13/cast/time.go | 116 + .../spf13/cast/timeformattype_string.go | 27 - vendor/github.com/spf13/cast/zz_generated.go | 261 + vendor/github.com/spf13/cobra/.golangci.yml | 28 +- vendor/github.com/spf13/cobra/README.md | 24 +- vendor/github.com/spf13/cobra/SECURITY.md | 105 + vendor/github.com/spf13/cobra/command.go | 11 +- vendor/github.com/spf13/cobra/completions.go | 19 +- vendor/github.com/spf13/cobra/doc/man_docs.go | 4 +- vendor/github.com/spf13/cobra/doc/md_docs.go | 8 +- .../github.com/spf13/cobra/doc/rest_docs.go | 8 +- .../github.com/spf13/cobra/doc/yaml_docs.go | 2 +- vendor/github.com/spf13/pflag/README.md | 27 + vendor/github.com/spf13/pflag/bool_func.go | 40 + vendor/github.com/spf13/pflag/count.go | 2 +- vendor/github.com/spf13/pflag/errors.go | 149 + vendor/github.com/spf13/pflag/flag.go | 121 +- vendor/github.com/spf13/pflag/func.go | 37 + vendor/github.com/spf13/pflag/golangflag.go | 56 + vendor/github.com/spf13/pflag/ipnet_slice.go | 2 +- .../spf13/pflag/string_to_string.go | 10 +- vendor/github.com/spf13/pflag/text.go | 81 + vendor/github.com/spf13/pflag/time.go | 124 + vendor/github.com/spf13/viper/.editorconfig | 3 + vendor/github.com/spf13/viper/.golangci.yaml | 209 +- vendor/github.com/spf13/viper/README.md | 4 +- .../spf13/viper/{UPDATES.md => UPGRADE.md} | 21 + vendor/github.com/spf13/viper/flake.lock | 401 +- vendor/github.com/spf13/viper/flake.nix | 76 +- .../viper/internal/encoding/yaml/codec.go | 2 +- vendor/github.com/spf13/viper/remote.go | 5 +- vendor/github.com/spf13/viper/util.go | 5 +- vendor/github.com/spf13/viper/viper.go | 42 +- .../stoewer/go-strcase/.golangci.yml | 37 +- vendor/github.com/stoewer/go-strcase/camel.go | 3 + .../github.com/stoewer/go-strcase/helper.go | 6 + .../testify/assert/assertion_compare.go | 22 +- .../testify/assert/assertion_format.go | 51 +- .../testify/assert/assertion_forward.go | 102 +- .../testify/assert/assertion_order.go | 2 +- .../stretchr/testify/assert/assertions.go | 367 +- .../github.com/stretchr/testify/assert/doc.go | 4 + .../testify/assert/http_assertions.go | 4 +- .../testify/assert/yaml/yaml_custom.go | 1 - .../testify/assert/yaml/yaml_default.go | 1 - .../stretchr/testify/assert/yaml/yaml_fail.go | 1 - .../stretchr/testify/require/doc.go | 2 + .../stretchr/testify/require/require.go | 108 +- .../testify/require/require_forward.go | 102 +- vendor/github.com/ulikunitz/xz/TODO.md | 17 +- vendor/github.com/ulikunitz/xz/lzma/header.go | 55 +- vendor/github.com/ulikunitz/xz/lzma/reader.go | 123 +- vendor/github.com/ulikunitz/xz/lzma/writer.go | 28 +- vendor/github.com/vbauerster/mpb/v8/README.md | 10 +- vendor/github.com/vbauerster/mpb/v8/bar.go | 20 +- .../vbauerster/mpb/v8/bar_filler_bar.go | 134 +- .../vbauerster/mpb/v8/bar_filler_spinner.go | 18 +- .../vbauerster/mpb/v8/bar_option.go | 19 + .../vbauerster/mpb/v8/decor/size_type.go | 6 +- .../vbauerster/mpb/v8/heap_manager.go | 8 +- .../vbauerster/mpb/v8/priority_queue.go | 22 +- .../github.com/vbauerster/mpb/v8/progress.go | 33 +- .../xeipuuv/gojsonpointer/README.md | 41 - .../xeipuuv/gojsonpointer/pointer.go | 211 - .../gojsonreference/LICENSE-APACHE-2.0.txt | 202 - .../xeipuuv/gojsonreference/README.md | 10 - .../xeipuuv/gojsonreference/reference.go | 147 - .../xeipuuv/gojsonschema/.gitignore | 3 - .../xeipuuv/gojsonschema/.travis.yml | 9 - .../gojsonschema/LICENSE-APACHE-2.0.txt | 202 - .../github.com/xeipuuv/gojsonschema/README.md | 466 - .../github.com/xeipuuv/gojsonschema/draft.go | 125 - .../github.com/xeipuuv/gojsonschema/errors.go | 364 - .../xeipuuv/gojsonschema/format_checkers.go | 368 - .../xeipuuv/gojsonschema/glide.yaml | 13 - .../xeipuuv/gojsonschema/internalLog.go | 37 - .../xeipuuv/gojsonschema/jsonContext.go | 73 - .../xeipuuv/gojsonschema/jsonLoader.go | 386 - .../xeipuuv/gojsonschema/locales.go | 472 - .../github.com/xeipuuv/gojsonschema/result.go | 220 - .../github.com/xeipuuv/gojsonschema/schema.go | 1087 -- .../xeipuuv/gojsonschema/schemaLoader.go | 206 - .../xeipuuv/gojsonschema/schemaPool.go | 215 - .../gojsonschema/schemaReferencePool.go | 68 - .../xeipuuv/gojsonschema/schemaType.go | 83 - .../xeipuuv/gojsonschema/subSchema.go | 149 - .../github.com/xeipuuv/gojsonschema/types.go | 62 - .../github.com/xeipuuv/gojsonschema/utils.go | 197 - .../xeipuuv/gojsonschema/validation.go | 858 - vendor/go.etcd.io/bbolt/.go-version | 2 +- vendor/go.etcd.io/bbolt/bolt_386.go | 7 - vendor/go.etcd.io/bbolt/bolt_aix.go | 4 +- vendor/go.etcd.io/bbolt/bolt_amd64.go | 7 - vendor/go.etcd.io/bbolt/bolt_android.go | 4 +- vendor/go.etcd.io/bbolt/bolt_arm.go | 7 - vendor/go.etcd.io/bbolt/bolt_arm64.go | 9 - vendor/go.etcd.io/bbolt/bolt_loong64.go | 9 - vendor/go.etcd.io/bbolt/bolt_mips64x.go | 9 - vendor/go.etcd.io/bbolt/bolt_mipsx.go | 9 - vendor/go.etcd.io/bbolt/bolt_ppc.go | 9 - vendor/go.etcd.io/bbolt/bolt_ppc64.go | 9 - vendor/go.etcd.io/bbolt/bolt_ppc64le.go | 9 - vendor/go.etcd.io/bbolt/bolt_riscv64.go | 9 - vendor/go.etcd.io/bbolt/bolt_s390x.go | 9 - vendor/go.etcd.io/bbolt/bolt_solaris.go | 4 +- vendor/go.etcd.io/bbolt/bolt_unix.go | 3 +- vendor/go.etcd.io/bbolt/bolt_windows.go | 3 +- vendor/go.etcd.io/bbolt/db.go | 16 +- .../bbolt/internal/common/bolt_386.go | 7 + .../bbolt/internal/common/bolt_amd64.go | 7 + .../bbolt/internal/common/bolt_arm.go | 7 + .../bbolt/internal/common/bolt_arm64.go | 9 + .../bbolt/internal/common/bolt_loong64.go | 9 + .../bbolt/internal/common/bolt_mips64x.go | 9 + .../bbolt/internal/common/bolt_mipsx.go | 9 + .../bbolt/internal/common/bolt_ppc.go | 9 + .../bbolt/internal/common/bolt_ppc64.go | 9 + .../bbolt/internal/common/bolt_ppc64le.go | 9 + .../bbolt/internal/common/bolt_riscv64.go | 9 + .../bbolt/internal/common/bolt_s390x.go | 9 + .../go.etcd.io/bbolt/internal/common/types.go | 3 - .../bbolt/internal/common/unsafe.go | 2 +- vendor/go.etcd.io/bbolt/tx.go | 75 +- vendor/go.mongodb.org/mongo-driver/LICENSE | 201 - .../go.mongodb.org/mongo-driver/bson/bson.go | 50 - .../bson/bsoncodec/array_codec.go | 55 - .../mongo-driver/bson/bsoncodec/bsoncodec.go | 382 - .../bson/bsoncodec/byte_slice_codec.go | 138 - .../bson/bsoncodec/codec_cache.go | 166 - .../bson/bsoncodec/cond_addr_codec.go | 63 - .../bson/bsoncodec/default_value_decoders.go | 1807 -- .../bson/bsoncodec/default_value_encoders.go | 856 - .../mongo-driver/bson/bsoncodec/doc.go | 95 - .../bson/bsoncodec/empty_interface_codec.go | 173 - .../mongo-driver/bson/bsoncodec/map_codec.go | 343 - .../mongo-driver/bson/bsoncodec/mode.go | 65 - .../bson/bsoncodec/pointer_codec.go | 108 - .../mongo-driver/bson/bsoncodec/proxy.go | 14 - .../mongo-driver/bson/bsoncodec/registry.go | 524 - .../bson/bsoncodec/slice_codec.go | 214 - .../bson/bsoncodec/string_codec.go | 140 - .../bson/bsoncodec/struct_codec.go | 736 - .../bson/bsoncodec/struct_tag_parser.go | 148 - .../mongo-driver/bson/bsoncodec/time_codec.go | 151 - .../mongo-driver/bson/bsoncodec/types.go | 58 - .../mongo-driver/bson/bsoncodec/uint_codec.go | 198 - .../bsonoptions/byte_slice_codec_options.go | 49 - .../mongo-driver/bson/bsonoptions/doc.go | 8 - .../empty_interface_codec_options.go | 49 - .../bson/bsonoptions/map_codec_options.go | 82 - .../bson/bsonoptions/slice_codec_options.go | 49 - .../bson/bsonoptions/string_codec_options.go | 52 - .../bson/bsonoptions/struct_codec_options.go | 107 - .../bson/bsonoptions/time_codec_options.go | 49 - .../bson/bsonoptions/uint_codec_options.go | 49 - .../mongo-driver/bson/bsonrw/copier.go | 489 - .../mongo-driver/bson/bsonrw/doc.go | 9 - .../bson/bsonrw/extjson_parser.go | 806 - .../bson/bsonrw/extjson_reader.go | 653 - .../bson/bsonrw/extjson_tables.go | 223 - .../bson/bsonrw/extjson_wrappers.go | 492 - .../bson/bsonrw/extjson_writer.go | 751 - .../mongo-driver/bson/bsonrw/json_scanner.go | 528 - .../mongo-driver/bson/bsonrw/mode.go | 108 - .../mongo-driver/bson/bsonrw/reader.go | 65 - .../mongo-driver/bson/bsonrw/value_reader.go | 890 - .../mongo-driver/bson/bsonrw/value_writer.go | 640 - .../mongo-driver/bson/bsonrw/writer.go | 87 - .../mongo-driver/bson/bsontype/bsontype.go | 116 - .../mongo-driver/bson/decoder.go | 208 - .../go.mongodb.org/mongo-driver/bson/doc.go | 139 - .../mongo-driver/bson/encoder.go | 199 - .../mongo-driver/bson/marshal.go | 453 - .../mongo-driver/bson/primitive/decimal.go | 434 - .../mongo-driver/bson/primitive/objectid.go | 206 - .../mongo-driver/bson/primitive/primitive.go | 231 - .../mongo-driver/bson/primitive_codecs.go | 122 - .../go.mongodb.org/mongo-driver/bson/raw.go | 101 - .../mongo-driver/bson/raw_element.go | 48 - .../mongo-driver/bson/raw_value.go | 320 - .../mongo-driver/bson/registry.go | 35 - .../go.mongodb.org/mongo-driver/bson/types.go | 50 - .../mongo-driver/bson/unmarshal.go | 177 - .../mongo-driver/x/bsonx/bsoncore/array.go | 164 - .../x/bsonx/bsoncore/bson_arraybuilder.go | 201 - .../x/bsonx/bsoncore/bson_documentbuilder.go | 189 - .../mongo-driver/x/bsonx/bsoncore/bsoncore.go | 846 - .../mongo-driver/x/bsonx/bsoncore/doc.go | 29 - .../mongo-driver/x/bsonx/bsoncore/document.go | 386 - .../x/bsonx/bsoncore/document_sequence.go | 189 - .../mongo-driver/x/bsonx/bsoncore/element.go | 152 - .../mongo-driver/x/bsonx/bsoncore/tables.go | 223 - .../mongo-driver/x/bsonx/bsoncore/value.go | 965 -- .../contrib/bridges/prometheus/BENCHMARKS.md | 2 +- .../contrib/bridges/prometheus/producer.go | 5 +- .../contrib/exporters/autoexport/logs.go | 6 + .../net/http/otelhttp/config.go | 4 + .../net/http/otelhttp/handler.go | 9 +- .../otelhttp/internal/request/body_wrapper.go | 7 +- .../net/http/otelhttp/internal/request/gen.go | 10 + .../internal/request/resp_writer_wrapper.go | 5 +- .../net/http/otelhttp/internal/semconv/env.go | 121 +- .../net/http/otelhttp/internal/semconv/gen.go | 16 +- .../otelhttp/internal/semconv/httpconv.go | 76 +- .../http/otelhttp/internal/semconv/util.go | 28 +- .../http/otelhttp/internal/semconv/v1.20.0.go | 27 +- .../otelhttp/internal/semconvutil/httpconv.go | 53 +- .../otelhttp/internal/semconvutil/netconv.go | 13 +- .../net/http/otelhttp/labeler.go | 6 +- .../net/http/otelhttp/version.go | 9 +- .../go.opentelemetry.io/otel/.clomonitor.yml | 3 + vendor/go.opentelemetry.io/otel/.gitignore | 1 + vendor/go.opentelemetry.io/otel/.golangci.yml | 529 +- vendor/go.opentelemetry.io/otel/CHANGELOG.md | 154 +- .../go.opentelemetry.io/otel/CONTRIBUTING.md | 30 +- vendor/go.opentelemetry.io/otel/Makefile | 60 +- vendor/go.opentelemetry.io/otel/README.md | 20 +- vendor/go.opentelemetry.io/otel/RELEASING.md | 52 +- .../otel/attribute/filter.go | 4 +- .../internal}/attribute.go | 2 +- .../otel/attribute/rawhelpers.go | 37 + .../otel/attribute/value.go | 15 +- .../otel/dependencies.Dockerfile | 4 + .../otlp/otlplog/otlploggrpc/config.go | 33 +- .../otlploggrpc/internal/retry/retry.go | 28 +- .../otlploggrpc/internal/transform/log.go | 5 +- .../otlp/otlplog/otlploggrpc/version.go | 2 +- .../otlp/otlplog/otlploghttp/client.go | 92 +- .../otlp/otlplog/otlploghttp/config.go | 48 +- .../otlploghttp/internal/retry/retry.go | 28 +- .../otlploghttp/internal/transform/log.go | 5 +- .../otlp/otlplog/otlploghttp/version.go | 2 +- .../otlp/otlpmetric/otlpmetricgrpc/config.go | 5 +- .../otlpmetric/otlpmetricgrpc/exporter.go | 3 +- .../internal/envconfig/envconfig.go | 4 +- .../otlpmetric/otlpmetricgrpc/internal/gen.go | 1 + .../internal/oconf/envconfig.go | 36 +- .../otlpmetricgrpc/internal/oconf/options.go | 22 +- .../internal/oconf/optiontypes.go | 2 +- .../otlpmetricgrpc/internal/oconf/tls.go | 2 +- .../otlpmetricgrpc/internal/partialsuccess.go | 2 +- .../otlpmetricgrpc/internal/retry/retry.go | 28 +- .../internal/transform/attribute.go | 2 +- .../internal/transform/error.go | 2 +- .../internal/transform/metricdata.go | 14 +- .../otlp/otlpmetric/otlpmetricgrpc/version.go | 2 +- .../otlp/otlpmetric/otlpmetrichttp/client.go | 96 +- .../otlp/otlpmetric/otlpmetrichttp/config.go | 16 + .../otlpmetric/otlpmetrichttp/exporter.go | 3 +- .../internal/envconfig/envconfig.go | 4 +- .../otlpmetric/otlpmetrichttp/internal/gen.go | 1 + .../internal/oconf/envconfig.go | 36 +- .../otlpmetrichttp/internal/oconf/options.go | 22 +- .../internal/oconf/optiontypes.go | 2 +- .../otlpmetrichttp/internal/oconf/tls.go | 2 +- .../otlpmetrichttp/internal/partialsuccess.go | 2 +- .../otlpmetrichttp/internal/retry/retry.go | 28 +- .../internal/transform/attribute.go | 2 +- .../internal/transform/error.go | 2 +- .../internal/transform/metricdata.go | 14 +- .../otlp/otlpmetric/otlpmetrichttp/version.go | 2 +- .../internal/tracetransform/attribute.go | 2 + .../otlp/otlptrace/otlptracegrpc/client.go | 2 +- .../internal/envconfig/envconfig.go | 4 +- .../otlptrace/otlptracegrpc/internal/gen.go | 1 + .../internal/otlpconfig/envconfig.go | 14 +- .../internal/otlpconfig/options.go | 16 +- .../internal/otlpconfig/optiontypes.go | 2 +- .../otlptracegrpc/internal/otlpconfig/tls.go | 2 +- .../otlptracegrpc/internal/partialsuccess.go | 2 +- .../otlptracegrpc/internal/retry/retry.go | 28 +- .../otlp/otlptrace/otlptracegrpc/options.go | 5 +- .../otlp/otlptrace/otlptracehttp/client.go | 32 +- .../internal/envconfig/envconfig.go | 4 +- .../otlptrace/otlptracehttp/internal/gen.go | 1 + .../internal/otlpconfig/envconfig.go | 14 +- .../internal/otlpconfig/options.go | 14 +- .../internal/otlpconfig/optiontypes.go | 2 +- .../otlptracehttp/internal/otlpconfig/tls.go | 2 +- .../otlptracehttp/internal/partialsuccess.go | 2 +- .../otlptracehttp/internal/retry/retry.go | 28 +- .../otlp/otlptrace/otlptracehttp/options.go | 16 + .../otel/exporters/otlp/otlptrace/version.go | 2 +- .../otel/exporters/prometheus/config.go | 11 +- .../otel/exporters/prometheus/exporter.go | 179 +- .../otel/exporters/stdout/stdoutlog/record.go | 2 + .../exporters/stdout/stdoutmetric/exporter.go | 4 +- .../go.opentelemetry.io/otel/get_main_pkgs.sh | 30 - .../go.opentelemetry.io/otel/internal/gen.go | 18 - .../otel/internal/global/handler.go | 1 + .../otel/internal/global/meter.go | 45 +- .../otel/internal/global/trace.go | 13 +- .../otel/internal/rawhelpers.go | 48 - vendor/go.opentelemetry.io/otel/log/DESIGN.md | 77 +- vendor/go.opentelemetry.io/otel/log/doc.go | 17 +- .../otel/log/embedded/embedded.go | 18 +- .../go.opentelemetry.io/otel/log/keyvalue.go | 58 +- vendor/go.opentelemetry.io/otel/log/logger.go | 33 +- .../go.opentelemetry.io/otel/log/noop/noop.go | 6 +- vendor/go.opentelemetry.io/otel/log/record.go | 14 + .../otel/metric/asyncfloat64.go | 12 +- .../otel/metric/asyncint64.go | 8 +- .../otel/metric/instrument.go | 16 +- .../go.opentelemetry.io/otel/metric/meter.go | 10 +- .../otel/metric/noop/noop.go | 25 +- .../otel/propagation/baggage.go | 36 +- .../otel/propagation/propagation.go | 30 +- vendor/go.opentelemetry.io/otel/renovate.json | 11 +- .../go.opentelemetry.io/otel/requirements.txt | 2 +- .../otel/sdk/internal/env/env.go | 2 + .../go.opentelemetry.io/otel/sdk/log/batch.go | 28 +- .../go.opentelemetry.io/otel/sdk/log/doc.go | 5 +- .../otel/sdk/log/exporter.go | 11 +- .../otel/sdk/log/filter_processor.go | 60 + .../otel/sdk/log/internal/x/README.md | 35 - .../otel/sdk/log/internal/x/x.go | 47 - .../otel/sdk/log/logger.go | 18 +- .../otel/sdk/log/processor.go | 9 +- .../otel/sdk/log/provider.go | 33 +- .../otel/sdk/log/record.go | 108 +- .../otel/sdk/metric/exemplar.go | 5 +- .../metric/exemplar/fixed_size_reservoir.go | 36 +- .../otel/sdk/metric/instrument.go | 8 +- .../metric/internal/aggregate/aggregate.go | 10 +- .../aggregate/exponential_histogram.go | 45 +- .../internal/aggregate/filtered_reservoir.go | 5 +- .../metric/internal/aggregate/histogram.go | 21 +- .../metric/internal/aggregate/lastvalue.go | 5 +- .../otel/sdk/metric/internal/aggregate/sum.go | 6 +- .../otel/sdk/metric/internal/reuse_slice.go | 1 + .../otel/sdk/metric/manual_reader.go | 4 +- .../otel/sdk/metric/meter.go | 74 +- .../otel/sdk/metric/metricdata/data.go | 1 + .../otel/sdk/metric/metricdata/temporality.go | 2 +- .../otel/sdk/metric/periodic_reader.go | 10 +- .../otel/sdk/metric/pipeline.go | 41 +- .../otel/sdk/metric/reader.go | 5 +- .../otel/sdk/metric/version.go | 2 +- .../otel/sdk/resource/builtin.go | 2 +- .../otel/sdk/resource/container.go | 2 +- .../otel/sdk/resource/env.go | 2 +- .../otel/sdk/resource/host_id.go | 2 +- .../otel/sdk/resource/os.go | 2 +- .../otel/sdk/resource/os_release_darwin.go | 3 +- .../otel/sdk/resource/process.go | 2 +- .../otel/sdk/resource/resource.go | 25 +- .../otel/sdk/trace/batch_span_processor.go | 6 +- .../otel/sdk/trace/id_generator.go | 26 +- .../otel/sdk/trace/provider.go | 12 +- .../otel/sdk/trace/sampling.go | 8 +- .../otel/sdk/trace/simple_span_processor.go | 2 +- .../otel/sdk/trace/span.go | 2 +- .../otel/sdk/trace/tracer.go | 13 +- .../go.opentelemetry.io/otel/sdk/version.go | 3 +- .../otel/semconv/internal/http.go | 21 +- .../otel/semconv/v1.10.0/README.md | 3 - .../otel/semconv/v1.10.0/http.go | 103 - .../otel/semconv/v1.10.0/resource.go | 970 -- .../otel/semconv/v1.10.0/trace.go | 1689 -- .../otel/semconv/v1.12.0/README.md | 3 - .../otel/semconv/v1.12.0/http.go | 103 - .../otel/semconv/v1.12.0/resource.go | 1031 -- .../otel/semconv/v1.12.0/trace.go | 1693 -- .../otel/semconv/v1.24.0/README.md | 3 + .../otel/semconv/v1.24.0/attribute_group.go | 4387 +++++ .../otel/semconv/{v1.10.0 => v1.24.0}/doc.go | 6 +- .../otel/semconv/v1.24.0/event.go | 200 + .../semconv/{v1.12.0 => v1.24.0}/exception.go | 2 +- .../otel/semconv/v1.24.0/metric.go | 1071 ++ .../otel/semconv/v1.24.0/resource.go | 2545 +++ .../semconv/{v1.12.0 => v1.24.0}/schema.go | 4 +- .../otel/semconv/v1.24.0/trace.go | 1323 ++ .../otel/semconv/v1.34.0/MIGRATION.md | 4 + .../otel/semconv/v1.34.0/README.md | 3 + .../otel/semconv/v1.34.0/attribute_group.go | 13851 ++++++++++++++++ .../otel/semconv/{v1.12.0 => v1.34.0}/doc.go | 6 +- .../semconv/{v1.10.0 => v1.34.0}/exception.go | 2 +- .../semconv/{v1.10.0 => v1.34.0}/schema.go | 4 +- vendor/go.opentelemetry.io/otel/trace/auto.go | 662 + .../otel/trace/internal/telemetry/attr.go | 58 + .../otel/trace/internal/telemetry/doc.go | 8 + .../otel/trace/internal/telemetry/id.go | 103 + .../otel/trace/internal/telemetry/number.go | 67 + .../otel/trace/internal/telemetry/resource.go | 66 + .../otel/trace/internal/telemetry/scope.go | 67 + .../otel/trace/internal/telemetry/span.go | 472 + .../otel/trace/internal/telemetry/status.go | 42 + .../otel/trace/internal/telemetry/traces.go | 189 + .../otel/trace/internal/telemetry/value.go | 453 + vendor/go.opentelemetry.io/otel/trace/noop.go | 22 +- .../otel/verify_readmes.sh | 21 - vendor/go.opentelemetry.io/otel/version.go | 2 +- vendor/go.opentelemetry.io/otel/versions.yaml | 10 +- .../collector/logs/v1/logs_service_grpc.pb.go | 4 - .../metrics/v1/metrics_service_grpc.pb.go | 4 - .../trace/v1/trace_service_grpc.pb.go | 4 - .../proto/otlp/common/v1/common.pb.go | 138 +- .../proto/otlp/logs/v1/logs.pb.go | 138 +- .../proto/otlp/metrics/v1/metrics.pb.go | 23 +- .../proto/otlp/resource/v1/resource.pb.go | 56 +- .../proto/otlp/trace/v1/trace.pb.go | 6 +- .../common/LICENSE | 0 .../common/pkg/auth/auth.go | 20 +- .../common/pkg/auth/cli.go | 14 +- .../common/pkg/capabilities/capabilities.go | 2 +- .../common/pkg/completion/completion.go | 18 +- .../common/pkg/password/password_supported.go | 0 .../common/pkg/password/password_windows.go | 0 .../image/v5/LICENSE | 0 .../image/v5/copy/blob.go | 6 +- .../image/v5/copy/compression.go | 12 +- .../image/v5/copy/copy.go | 28 +- .../image/v5/copy/digesting_reader.go | 0 .../image/v5/copy/encryption.go | 2 +- .../image/v5/copy/manifest.go | 10 +- .../image/v5/copy/multiple.go | 12 +- .../image/v5/copy/progress_bars.go | 4 +- .../image/v5/copy/progress_channel.go | 2 +- .../image/v5/copy/sign.go | 14 +- .../image/v5/copy/single.go | 23 +- .../v5/directory/explicitfilepath/path.go | 2 +- .../image/v5/docker/body_reader.go | 0 .../image/v5/docker/cache.go | 4 +- .../image/v5/docker/distribution_error.go | 0 .../image/v5/docker/docker_client.go | 31 +- .../image/v5/docker/docker_image.go | 8 +- .../image/v5/docker/docker_image_dest.go | 34 +- .../image/v5/docker/docker_image_src.go | 22 +- .../image/v5/docker/docker_transport.go | 8 +- .../image/v5/docker/errors.go | 0 .../image/v5/docker/paths_common.go | 0 .../image/v5/docker/paths_freebsd.go | 0 .../v5/docker/policyconfiguration/naming.go | 2 +- .../image/v5/docker/reference/README.md | 0 .../image/v5/docker/reference/helpers.go | 0 .../image/v5/docker/reference/normalize.go | 0 .../image/v5/docker/reference/reference.go | 0 .../v5/docker/reference/regexp-additions.go | 0 .../image/v5/docker/reference/regexp.go | 2 +- .../image/v5/docker/registries_d.go | 12 +- .../image/v5/docker/wwwauthenticate.go | 0 .../image/v5/image/docker_schema2.go | 2 +- .../image/v5/image/sourced.go | 4 +- .../image/v5/image/unparsed.go | 8 +- .../internal/blobinfocache/blobinfocache.go | 2 +- .../image/v5/internal/blobinfocache/types.go | 4 +- .../image/v5/internal/image/docker_list.go | 4 +- .../image/v5/internal/image/docker_schema1.go | 10 +- .../image/v5/internal/image/docker_schema2.go | 10 +- .../image/v5/internal/image/manifest.go | 6 +- .../image/v5/internal/image/memory.go | 2 +- .../image/v5/internal/image/oci.go | 12 +- .../image/v5/internal/image/oci_index.go | 4 +- .../image/v5/internal/image/sourced.go | 2 +- .../image/v5/internal/image/unparsed.go | 12 +- .../internal/imagedestination/impl/compat.go | 8 +- .../internal/imagedestination/impl/helpers.go | 4 +- .../imagedestination/impl/properties.go | 2 +- .../stubs/original_oci_config.go | 0 .../stubs/put_blob_partial.go | 4 +- .../imagedestination/stubs/signatures.go | 2 +- .../internal/imagedestination/stubs/stubs.go | 0 .../v5/internal/imagedestination/wrapper.go | 8 +- .../v5/internal/imagesource/impl/compat.go | 4 +- .../internal/imagesource/impl/layer_infos.go | 2 +- .../internal/imagesource/impl/properties.go | 0 .../internal/imagesource/impl/signatures.go | 2 +- .../internal/imagesource/stubs/get_blob_at.go | 4 +- .../v5/internal/imagesource/stubs/stubs.go | 0 .../image/v5/internal/imagesource/wrapper.go | 8 +- .../image/v5/internal/iolimits/iolimits.go | 0 .../image/v5/internal/manifest/common.go | 0 .../v5/internal/manifest/docker_schema2.go | 0 .../internal/manifest/docker_schema2_list.go | 6 +- .../image/v5/internal/manifest/errors.go | 0 .../image/v5/internal/manifest/list.go | 4 +- .../image/v5/internal/manifest/manifest.go | 2 +- .../image/v5/internal/manifest/oci_index.go | 6 +- .../image/v5/internal/multierr/multierr.go | 0 .../internal/pkg/platform/platform_matcher.go | 2 +- .../image/v5/internal/private/private.go | 11 +- .../internal/putblobdigest/put_blob_digest.go | 2 +- .../image/v5/internal/rootless/rootless.go | 0 .../image/v5/internal/set/set.go | 0 .../image/v5/internal/signature/signature.go | 0 .../image/v5/internal/signature/sigstore.go | 0 .../image/v5/internal/signature/simple.go | 0 .../image/v5/internal/signer/signer.go | 4 +- .../v5/internal/streamdigest/stream_digest.go | 6 +- .../image/v5/internal/tmpdir/tmpdir.go | 4 +- .../v5/internal/unparsedimage/wrapper.go | 6 +- .../v5/internal/uploadreader/upload_reader.go | 0 .../image/v5/internal/useragent/useragent.go | 2 +- .../image/v5/manifest/common.go | 4 +- .../image/v5/manifest/docker_schema1.go | 12 +- .../image/v5/manifest/docker_schema2.go | 8 +- .../image/v5/manifest/docker_schema2_list.go | 2 +- .../image/v5/manifest/list.go | 2 +- .../image/v5/manifest/manifest.go | 4 +- .../image/v5/manifest/oci.go | 6 +- .../image/v5/manifest/oci_index.go | 2 +- .../image/v5/oci/internal/oci_util.go | 0 .../image/v5/oci/layout/oci_delete.go | 4 +- .../image/v5/oci/layout/oci_dest.go | 14 +- .../image/v5/oci/layout/oci_src.go | 24 +- .../image/v5/oci/layout/oci_transport.go | 14 +- .../image/v5/oci/layout/reader.go | 2 +- .../image/v5/pkg/blobinfocache/default.go | 8 +- .../internal/prioritize/prioritize.go | 8 +- .../v5/pkg/blobinfocache/memory/memory.go | 8 +- .../image/v5/pkg/blobinfocache/none/none.go | 4 +- .../v5/pkg/blobinfocache/sqlite/sqlite.go | 6 +- .../image/v5/pkg/compression/compression.go | 6 +- .../v5/pkg/compression/internal/types.go | 0 .../image/v5/pkg/compression/types/types.go | 2 +- .../image/v5/pkg/compression/zstd.go | 0 .../image/v5/pkg/docker/config/config.go | 16 +- .../image/v5/pkg/strslice/README.md | 0 .../image/v5/pkg/strslice/strslice.go | 0 .../v5/pkg/sysregistriesv2/paths_common.go | 0 .../v5/pkg/sysregistriesv2/paths_freebsd.go | 0 .../v5/pkg/sysregistriesv2/shortnames.go | 12 +- .../sysregistriesv2/system_registries_v2.go | 14 +- .../v5/pkg/tlsclientconfig/tlsclientconfig.go | 0 .../image/v5/signature/docker.go | 18 +- .../image/v5/signature/fulcio_cert.go | 4 +- .../image/v5/signature/internal/errors.go | 0 .../image/v5/signature/internal/json.go | 2 +- .../v5/signature/internal/rekor_api_types.go | 95 + .../image/v5/signature/internal/rekor_set.go | 30 +- .../v5/signature/internal/sequoia/gosequoia.c | 200 + .../v5/signature/internal/sequoia/gosequoia.h | 54 + .../internal/sequoia/gosequoiafuncs.h | 21 + .../v5/signature/internal/sequoia/sequoia.go | 223 + .../v5/signature/internal/sequoia/sequoia.h | 85 + .../v5/signature/internal/sigstore_payload.go | 2 +- .../image/v5/signature/mechanism.go | 15 +- .../image/v5/signature/mechanism_gpgme.go | 101 +- .../v5/signature/mechanism_gpgme_only.go | 64 + .../image/v5/signature/mechanism_openpgp.go | 15 +- .../image/v5/signature/mechanism_sequoia.go | 84 + .../image/v5/signature/pki_cert.go | 2 +- .../image/v5/signature/policy_config.go | 16 +- .../v5/signature/policy_config_sigstore.go | 2 +- .../image/v5/signature/policy_eval.go | 6 +- .../v5/signature/policy_eval_baselayer.go | 2 +- .../v5/signature/policy_eval_signedby.go | 18 +- .../v5/signature/policy_eval_sigstore.go | 10 +- .../image/v5/signature/policy_eval_simple.go | 4 +- .../image/v5/signature/policy_paths_common.go | 0 .../v5/signature/policy_paths_freebsd.go | 0 .../v5/signature/policy_reference_match.go | 6 +- .../image/v5/signature/policy_types.go | 0 .../image/v5/signature/signer/signer.go | 2 +- .../image/v5/signature/sigstore/copied.go | 0 .../image/v5/signature/sigstore/generate.go | 0 .../v5/signature/sigstore/internal/signer.go | 8 +- .../image/v5/signature/sigstore/signer.go | 6 +- .../image/v5/signature/simple.go | 40 +- .../v5/signature/simplesigning/signer.go | 10 +- .../image/v5/transports/stub.go | 2 +- .../image/v5/transports/transports.go | 6 +- .../image/v5/types/types.go | 6 +- .../image/v5/version/version.go | 2 +- .../storage/AUTHORS | 0 .../storage/LICENSE | 0 .../storage/NOTICE | 0 .../internal/rawfilelock/rawfilelock.go | 64 + .../internal/rawfilelock/rawfilelock_unix.go | 49 + .../rawfilelock/rawfilelock_windows.go | 48 + .../storage/pkg/archive/README.md | 0 .../storage/pkg/archive/archive.go | 407 +- .../storage/pkg/archive/archive_110.go | 0 .../storage/pkg/archive/archive_19.go | 0 .../storage/pkg/archive/archive_bsd.go | 0 .../storage/pkg/archive/archive_linux.go | 4 +- .../storage/pkg/archive/archive_other.go | 0 .../storage/pkg/archive/archive_unix.go | 6 +- .../storage/pkg/archive/archive_windows.go | 6 +- .../storage/pkg/archive/archive_zstd.go | 0 .../storage/pkg/archive/changes.go | 16 +- .../storage/pkg/archive/changes_linux.go | 4 +- .../storage/pkg/archive/changes_other.go | 4 +- .../storage/pkg/archive/changes_unix.go | 4 +- .../storage/pkg/archive/changes_windows.go | 2 +- .../storage/pkg/archive/copy.go | 2 +- .../storage/pkg/archive/copy_unix.go | 0 .../storage/pkg/archive/copy_windows.go | 0 .../storage/pkg/archive/diff.go | 8 +- .../storage/pkg/archive/fflags_bsd.go | 2 +- .../storage/pkg/archive/fflags_unsupported.go | 0 .../storage/pkg/archive/filter.go | 0 .../storage/pkg/archive/time_linux.go | 0 .../storage/pkg/archive/time_unsupported.go | 0 .../storage/pkg/archive/whiteouts.go | 0 .../storage/pkg/archive/wrap.go | 0 .../pkg/chunked/compressor/compressor.go | 69 +- .../storage/pkg/chunked/compressor/rollsum.go | 0 .../chunked/internal/minimal/compression.go | 17 +- .../storage/pkg/chunked/toc/toc.go | 2 +- .../storage/pkg/fileutils/exists_freebsd.go | 0 .../storage/pkg/fileutils/exists_unix.go | 0 .../storage/pkg/fileutils/exists_windows.go | 0 .../storage/pkg/fileutils/fileutils.go | 0 .../storage/pkg/fileutils/fileutils_darwin.go | 0 .../pkg/fileutils/fileutils_solaris.go | 0 .../storage/pkg/fileutils/fileutils_unix.go | 0 .../pkg/fileutils/fileutils_windows.go | 0 .../storage/pkg/fileutils/reflink_linux.go | 0 .../pkg/fileutils/reflink_unsupported.go | 0 .../storage/pkg/homedir/homedir.go | 0 .../storage/pkg/homedir/homedir_unix.go | 2 +- .../storage/pkg/homedir/homedir_windows.go | 0 .../storage/pkg/idtools/idtools.go | 2 +- .../storage/pkg/idtools/idtools_supported.go | 0 .../storage/pkg/idtools/idtools_unix.go | 4 +- .../pkg/idtools/idtools_unsupported.go | 0 .../storage/pkg/idtools/idtools_windows.go | 0 .../storage/pkg/idtools/parser.go | 0 .../storage/pkg/idtools/usergroupadd_linux.go | 2 +- .../pkg/idtools/usergroupadd_unsupported.go | 0 .../storage/pkg/idtools/utils_unix.go | 0 .../storage/pkg/ioutils/buffer.go | 0 .../storage/pkg/ioutils/bytespipe.go | 0 .../storage/pkg/ioutils/fswriters.go | 0 .../storage/pkg/ioutils/fswriters_linux.go | 0 .../storage/pkg/ioutils/fswriters_other.go | 0 .../storage/pkg/ioutils/readers.go | 0 .../storage/pkg/ioutils/temp_unix.go | 0 .../storage/pkg/ioutils/temp_windows.go | 2 +- .../storage/pkg/ioutils/writeflusher.go | 0 .../storage/pkg/ioutils/writers.go | 0 .../storage/pkg/lockfile/lastwrite.go | 0 .../storage/pkg/lockfile/lockfile.go | 97 +- .../storage/pkg/lockfile/lockfile_unix.go | 42 +- .../storage/pkg/lockfile/lockfile_windows.go | 36 - .../storage/pkg/longpath/longpath.go | 0 .../storage/pkg/mount/flags.go | 0 .../storage/pkg/mount/flags_freebsd.go | 0 .../storage/pkg/mount/flags_linux.go | 0 .../storage/pkg/mount/flags_unsupported.go | 0 .../storage/pkg/mount/mount.go | 0 .../storage/pkg/mount/mounter_freebsd.go | 0 .../storage/pkg/mount/mounter_linux.go | 0 .../storage/pkg/mount/mounter_unsupported.go | 0 .../storage/pkg/mount/mountinfo.go | 0 .../storage/pkg/mount/mountinfo_linux.go | 0 .../storage/pkg/mount/sharedsubtree_linux.go | 0 .../storage/pkg/mount/unmount_unix.go | 0 .../storage/pkg/mount/unmount_unsupported.go | 0 .../storage/pkg/pools/pools.go | 2 +- .../storage/pkg/promise/promise.go | 0 .../storage/pkg/reexec/README.md | 0 .../storage/pkg/reexec/command_freebsd.go | 0 .../storage/pkg/reexec/command_linux.go | 0 .../storage/pkg/reexec/command_unix.go | 0 .../storage/pkg/reexec/command_unsupported.go | 0 .../storage/pkg/reexec/command_windows.go | 0 .../storage/pkg/reexec/reexec.go | 0 .../storage/pkg/regexp/regexp.go | 0 .../pkg/regexp/regexp_dontprecompile.go | 0 .../storage/pkg/regexp/regexp_precompile.go | 0 .../storage/pkg/system/chmod.go | 0 .../storage/pkg/system/chtimes.go | 0 .../storage/pkg/system/chtimes_unix.go | 0 .../storage/pkg/system/chtimes_windows.go | 0 .../storage/pkg/system/errors.go | 0 .../storage/pkg/system/exitcode.go | 0 .../storage/pkg/system/extattr_freebsd.go | 0 .../storage/pkg/system/extattr_unsupported.go | 0 .../storage/pkg/system/init.go | 0 .../storage/pkg/system/init_windows.go | 0 .../storage/pkg/system/lchflags_bsd.go | 0 .../storage/pkg/system/lchown.go | 0 .../storage/pkg/system/lcow_unix.go | 0 .../storage/pkg/system/lcow_windows.go | 0 .../storage/pkg/system/lstat_unix.go | 0 .../storage/pkg/system/lstat_windows.go | 0 .../storage/pkg/system/meminfo.go | 0 .../storage/pkg/system/meminfo_freebsd.go | 0 .../storage/pkg/system/meminfo_linux.go | 0 .../storage/pkg/system/meminfo_solaris.go | 0 .../storage/pkg/system/meminfo_unsupported.go | 0 .../storage/pkg/system/meminfo_windows.go | 0 .../storage/pkg/system/mknod.go | 0 .../storage/pkg/system/mknod_freebsd.go | 0 .../storage/pkg/system/mknod_windows.go | 0 .../storage/pkg/system/path.go | 0 .../storage/pkg/system/path_unix.go | 0 .../storage/pkg/system/path_windows.go | 0 .../storage/pkg/system/process_unix.go | 0 .../storage/pkg/system/rm.go | 2 +- .../storage/pkg/system/rm_common.go | 0 .../storage/pkg/system/rm_freebsd.go | 0 .../storage/pkg/system/stat_common.go | 0 .../storage/pkg/system/stat_darwin.go | 0 .../storage/pkg/system/stat_freebsd.go | 0 .../storage/pkg/system/stat_linux.go | 0 .../storage/pkg/system/stat_netbsd.go | 0 .../storage/pkg/system/stat_openbsd.go | 0 .../storage/pkg/system/stat_solaris.go | 0 .../storage/pkg/system/stat_unix.go | 0 .../storage/pkg/system/stat_windows.go | 0 .../storage/pkg/system/syscall_unix.go | 0 .../storage/pkg/system/syscall_windows.go | 0 .../storage/pkg/system/umask.go | 0 .../storage/pkg/system/umask_windows.go | 0 .../storage/pkg/system/utimes_freebsd.go | 0 .../storage/pkg/system/utimes_linux.go | 0 .../storage/pkg/system/utimes_unsupported.go | 0 .../storage/pkg/system/xattrs_darwin.go | 0 .../storage/pkg/system/xattrs_freebsd.go | 0 .../storage/pkg/system/xattrs_linux.go | 0 .../storage/pkg/system/xattrs_unsupported.go | 0 .../storage/pkg/unshare/getenv_linux_cgo.go | 0 .../storage/pkg/unshare/getenv_linux_nocgo.go | 0 .../storage/pkg/unshare/unshare.c | 0 .../storage/pkg/unshare/unshare.go | 0 .../storage/pkg/unshare/unshare_cgo.go | 0 .../storage/pkg/unshare/unshare_darwin.go | 2 +- .../storage/pkg/unshare/unshare_freebsd.c | 0 .../storage/pkg/unshare/unshare_freebsd.go | 2 +- .../storage/pkg/unshare/unshare_gccgo.go | 0 .../storage/pkg/unshare/unshare_linux.go | 4 +- .../pkg/unshare/unshare_unsupported.go | 2 +- .../pkg/unshare/unshare_unsupported_cgo.go | 0 vendor/go.uber.org/automaxprocs/.codecov.yml | 14 - vendor/go.uber.org/automaxprocs/.gitignore | 33 - vendor/go.uber.org/automaxprocs/CHANGELOG.md | 52 - .../automaxprocs/CODE_OF_CONDUCT.md | 75 - .../go.uber.org/automaxprocs/CONTRIBUTING.md | 81 - vendor/go.uber.org/automaxprocs/LICENSE | 19 - vendor/go.uber.org/automaxprocs/Makefile | 46 - vendor/go.uber.org/automaxprocs/README.md | 71 - .../go.uber.org/automaxprocs/automaxprocs.go | 33 - .../automaxprocs/internal/cgroups/doc.go | 23 - .../automaxprocs/maxprocs/maxprocs.go | 139 - .../automaxprocs/maxprocs/version.go | 24 - vendor/go.yaml.in/yaml/v2/.travis.yml | 17 + .../goyaml.v2 => go.yaml.in/yaml/v2}/LICENSE | 0 .../yaml/v2}/LICENSE.libyaml | 0 .../goyaml.v2 => go.yaml.in/yaml/v2}/NOTICE | 0 vendor/go.yaml.in/yaml/v2/README.md | 131 + .../goyaml.v2 => go.yaml.in/yaml/v2}/apic.go | 0 .../yaml/v2}/decode.go | 0 .../yaml/v2}/emitterc.go | 0 .../yaml/v2}/encode.go | 0 .../yaml/v2}/parserc.go | 0 .../yaml/v2}/readerc.go | 0 .../yaml/v2}/resolve.go | 0 .../yaml/v2}/scannerc.go | 0 .../yaml/v2}/sorter.go | 0 .../yaml/v2}/writerc.go | 0 .../goyaml.v2 => go.yaml.in/yaml/v2}/yaml.go | 2 +- .../goyaml.v2 => go.yaml.in/yaml/v2}/yamlh.go | 0 .../yaml/v2}/yamlprivateh.go | 0 .../goyaml.v3 => go.yaml.in/yaml/v3}/LICENSE | 0 .../goyaml.v3 => go.yaml.in/yaml/v3}/NOTICE | 0 vendor/go.yaml.in/yaml/v3/README.md | 171 + .../goyaml.v3 => go.yaml.in/yaml/v3}/apic.go | 8 +- .../yaml/v3}/decode.go | 24 +- .../yaml/v3}/emitterc.go | 19 +- .../yaml/v3}/encode.go | 0 .../yaml/v3}/parserc.go | 140 +- .../yaml/v3}/readerc.go | 8 +- .../yaml/v3}/resolve.go | 0 .../yaml/v3}/scannerc.go | 42 +- .../yaml/v3}/sorter.go | 0 .../yaml/v3}/writerc.go | 8 +- .../goyaml.v3 => go.yaml.in/yaml/v3}/yaml.go | 85 +- .../goyaml.v3 => go.yaml.in/yaml/v3}/yamlh.go | 10 +- .../yaml/v3}/yamlprivateh.go | 20 +- vendor/golang.org/x/crypto/acme/acme.go | 90 +- vendor/golang.org/x/crypto/acme/rfc8555.go | 4 +- vendor/golang.org/x/crypto/acme/types.go | 7 +- vendor/golang.org/x/crypto/bcrypt/bcrypt.go | 2 +- .../x/crypto/salsa20/salsa/hsalsa20.go | 4 + vendor/golang.org/x/exp/maps/maps.go | 30 +- vendor/golang.org/x/exp/slices/slices.go | 41 +- vendor/golang.org/x/exp/slices/sort.go | 25 +- vendor/golang.org/x/mod/modfile/rule.go | 126 +- vendor/golang.org/x/mod/modfile/work.go | 8 +- vendor/golang.org/x/mod/module/module.go | 19 +- vendor/golang.org/x/mod/semver/semver.go | 30 +- vendor/golang.org/x/net/context/context.go | 35 +- vendor/golang.org/x/net/html/escape.go | 2 +- vendor/golang.org/x/net/html/parse.go | 57 +- vendor/golang.org/x/net/html/render.go | 2 +- vendor/golang.org/x/net/http2/config.go | 63 +- vendor/golang.org/x/net/http2/config_go124.go | 61 - vendor/golang.org/x/net/http2/config_go125.go | 15 + vendor/golang.org/x/net/http2/config_go126.go | 15 + .../x/net/http2/config_pre_go124.go | 16 - vendor/golang.org/x/net/http2/frame.go | 41 +- vendor/golang.org/x/net/http2/gotrack.go | 17 +- vendor/golang.org/x/net/http2/http2.go | 37 +- vendor/golang.org/x/net/http2/server.go | 143 +- vendor/golang.org/x/net/http2/timer.go | 20 - vendor/golang.org/x/net/http2/transport.go | 100 +- vendor/golang.org/x/net/http2/writesched.go | 2 + ...rity.go => writesched_priority_rfc7540.go} | 104 +- .../net/http2/writesched_priority_rfc9128.go | 209 + .../x/net/http2/writesched_roundrobin.go | 2 +- .../x/net/internal/httpcommon/request.go | 4 +- .../golang.org/x/net/internal/socks/socks.go | 2 +- vendor/golang.org/x/net/trace/events.go | 2 +- vendor/golang.org/x/oauth2/internal/doc.go | 2 +- vendor/golang.org/x/oauth2/internal/oauth2.go | 2 +- vendor/golang.org/x/oauth2/internal/token.go | 50 +- .../golang.org/x/oauth2/internal/transport.go | 4 +- vendor/golang.org/x/oauth2/oauth2.go | 55 +- vendor/golang.org/x/oauth2/pkce.go | 15 +- vendor/golang.org/x/oauth2/token.go | 15 +- vendor/golang.org/x/oauth2/transport.go | 24 +- vendor/golang.org/x/sync/errgroup/errgroup.go | 121 +- .../golang.org/x/sys/plan9/pwd_go15_plan9.go | 21 - vendor/golang.org/x/sys/plan9/pwd_plan9.go | 14 +- .../golang.org/x/sys/unix/affinity_linux.go | 9 +- vendor/golang.org/x/sys/unix/fdset.go | 4 +- vendor/golang.org/x/sys/unix/ifreq_linux.go | 4 +- vendor/golang.org/x/sys/unix/mkall.sh | 1 + vendor/golang.org/x/sys/unix/mkerrors.sh | 3 + .../golang.org/x/sys/unix/syscall_darwin.go | 56 +- vendor/golang.org/x/sys/unix/syscall_linux.go | 4 +- .../golang.org/x/sys/unix/syscall_netbsd.go | 17 + .../golang.org/x/sys/unix/syscall_solaris.go | 2 +- vendor/golang.org/x/sys/unix/zerrors_linux.go | 63 +- .../x/sys/unix/zerrors_linux_386.go | 3 + .../x/sys/unix/zerrors_linux_amd64.go | 3 + .../x/sys/unix/zerrors_linux_arm.go | 3 + .../x/sys/unix/zerrors_linux_arm64.go | 3 + .../x/sys/unix/zerrors_linux_loong64.go | 3 + .../x/sys/unix/zerrors_linux_mips.go | 3 + .../x/sys/unix/zerrors_linux_mips64.go | 3 + .../x/sys/unix/zerrors_linux_mips64le.go | 3 + .../x/sys/unix/zerrors_linux_mipsle.go | 3 + .../x/sys/unix/zerrors_linux_ppc.go | 3 + .../x/sys/unix/zerrors_linux_ppc64.go | 3 + .../x/sys/unix/zerrors_linux_ppc64le.go | 3 + .../x/sys/unix/zerrors_linux_riscv64.go | 3 + .../x/sys/unix/zerrors_linux_s390x.go | 3 + .../x/sys/unix/zerrors_linux_sparc64.go | 3 + .../x/sys/unix/zsyscall_solaris_amd64.go | 8 +- .../x/sys/unix/zsysnum_linux_386.go | 1 + .../x/sys/unix/zsysnum_linux_amd64.go | 1 + .../x/sys/unix/zsysnum_linux_arm.go | 1 + .../x/sys/unix/zsysnum_linux_arm64.go | 1 + .../x/sys/unix/zsysnum_linux_loong64.go | 1 + .../x/sys/unix/zsysnum_linux_mips.go | 1 + .../x/sys/unix/zsysnum_linux_mips64.go | 1 + .../x/sys/unix/zsysnum_linux_mips64le.go | 1 + .../x/sys/unix/zsysnum_linux_mipsle.go | 1 + .../x/sys/unix/zsysnum_linux_ppc.go | 1 + .../x/sys/unix/zsysnum_linux_ppc64.go | 1 + .../x/sys/unix/zsysnum_linux_ppc64le.go | 1 + .../x/sys/unix/zsysnum_linux_riscv64.go | 1 + .../x/sys/unix/zsysnum_linux_s390x.go | 1 + .../x/sys/unix/zsysnum_linux_sparc64.go | 1 + vendor/golang.org/x/sys/unix/ztypes_linux.go | 180 +- .../golang.org/x/sys/unix/ztypes_linux_386.go | 18 +- .../x/sys/unix/ztypes_linux_amd64.go | 16 + .../golang.org/x/sys/unix/ztypes_linux_arm.go | 20 +- .../x/sys/unix/ztypes_linux_arm64.go | 16 + .../x/sys/unix/ztypes_linux_loong64.go | 16 + .../x/sys/unix/ztypes_linux_mips.go | 18 +- .../x/sys/unix/ztypes_linux_mips64.go | 16 + .../x/sys/unix/ztypes_linux_mips64le.go | 16 + .../x/sys/unix/ztypes_linux_mipsle.go | 18 +- .../golang.org/x/sys/unix/ztypes_linux_ppc.go | 20 +- .../x/sys/unix/ztypes_linux_ppc64.go | 16 + .../x/sys/unix/ztypes_linux_ppc64le.go | 16 + .../x/sys/unix/ztypes_linux_riscv64.go | 16 + .../x/sys/unix/ztypes_linux_s390x.go | 16 + .../x/sys/unix/ztypes_linux_sparc64.go | 16 + .../sys/windows/registry/zsyscall_windows.go | 16 +- .../x/sys/windows/syscall_windows.go | 2 + .../golang.org/x/sys/windows/types_windows.go | 22 + .../x/sys/windows/zsyscall_windows.go | 984 +- vendor/golang.org/x/term/term_windows.go | 4 +- vendor/golang.org/x/term/terminal.go | 9 +- vendor/golang.org/x/text/unicode/bidi/core.go | 11 +- vendor/golang.org/x/time/rate/sometimes.go | 4 +- .../x/tools/go/ast/astutil/enclosing.go | 23 +- .../x/tools/go/ast/astutil/rewrite.go | 4 + .../{internal/astutil => go/ast}/edge/edge.go | 0 .../x/tools/go/ast/inspector/cursor.go | 502 + .../x/tools/go/ast/inspector/inspector.go | 49 +- .../x/tools/go/ast/inspector/typeof.go | 3 - .../x/tools/go/ast/inspector/walk.go | 2 +- vendor/golang.org/x/tools/go/packages/doc.go | 2 + .../golang.org/x/tools/go/packages/golist.go | 18 +- .../x/tools/go/packages/golist_overlay.go | 2 +- .../golang.org/x/tools/go/packages/visit.go | 85 +- .../x/tools/go/types/objectpath/objectpath.go | 7 +- .../x/tools/go/types/typeutil/map.go | 19 +- .../internal/gcimporter/iimport_go122.go | 53 - .../x/tools/internal/imports/fix.go | 9 +- .../x/tools/internal/imports/imports.go | 2 +- .../tools/internal/imports/source_modindex.go | 47 +- .../x/tools/internal/modindex/directories.go | 148 +- .../x/tools/internal/modindex/index.go | 233 +- .../x/tools/internal/modindex/lookup.go | 18 +- .../x/tools/internal/modindex/modindex.go | 205 +- .../x/tools/internal/modindex/symbols.go | 73 +- .../x/tools/internal/modindex/types.go | 25 - .../internal/packagesinternal/packages.go | 6 + .../x/tools/internal/stdlib/deps.go | 596 +- .../x/tools/internal/stdlib/manifest.go | 58 +- .../internal/typesinternal/classify_call.go | 8 +- .../x/tools/internal/typesinternal/types.go | 61 +- .../api/annotations/annotations.pb.go | 2 +- .../googleapis/api/annotations/client.pb.go | 2 +- .../api/annotations/field_behavior.pb.go | 2 +- .../api/annotations/field_info.pb.go | 2 +- .../googleapis/api/annotations/http.pb.go | 2 +- .../googleapis/api/annotations/resource.pb.go | 2 +- .../googleapis/api/annotations/routing.pb.go | 2 +- .../api/expr/v1alpha1/checked.pb.go | 2 +- .../googleapis/api/expr/v1alpha1/eval.pb.go | 2 +- .../api/expr/v1alpha1/explain.pb.go | 2 +- .../googleapis/api/expr/v1alpha1/syntax.pb.go | 2 +- .../googleapis/api/expr/v1alpha1/value.pb.go | 2 +- .../googleapis/api/httpbody/httpbody.pb.go | 2 +- .../googleapis/api/launch_stage.pb.go | 2 +- vendor/google.golang.org/grpc/CONTRIBUTING.md | 129 +- vendor/google.golang.org/grpc/MAINTAINERS.md | 8 +- vendor/google.golang.org/grpc/README.md | 1 + .../grpc/balancer/balancer.go | 8 +- .../endpointsharding/endpointsharding.go | 57 +- .../pickfirst/pickfirstleaf/pickfirstleaf.go | 33 +- .../grpc/balancer/roundrobin/roundrobin.go | 7 - .../grpc_binarylog_v1/binarylog.pb.go | 191 +- vendor/google.golang.org/grpc/clientconn.go | 36 +- .../grpc/credentials/credentials.go | 59 +- .../grpc/credentials/insecure/insecure.go | 8 +- .../google.golang.org/grpc/credentials/tls.go | 46 +- vendor/google.golang.org/grpc/dialoptions.go | 25 +- .../grpc/health/grpc_health_v1/health.pb.go | 82 +- .../health/grpc_health_v1/health_grpc.pb.go | 6 +- .../balancer/gracefulswitch/gracefulswitch.go | 10 +- .../grpc/internal/credentials/credentials.go | 14 - .../grpc/internal/envconfig/envconfig.go | 24 +- .../grpc/internal/envconfig/xds.go | 5 + .../grpc/internal/grpcsync/event.go | 19 +- .../grpc/internal/internal.go | 36 +- .../delegatingresolver/delegatingresolver.go | 49 +- .../internal/resolver/dns/dns_resolver.go | 20 +- .../grpc/internal/status/status.go | 8 + .../grpc/internal/transport/controlbuf.go | 68 +- .../grpc/internal/transport/handler_server.go | 2 + .../grpc/internal/transport/http2_client.go | 44 +- .../grpc/internal/transport/http2_server.go | 57 +- .../grpc/internal/transport/http_util.go | 4 +- .../grpc/internal/transport/transport.go | 8 + .../grpc/mem/buffer_slice.go | 11 + .../google.golang.org/grpc/picker_wrapper.go | 36 +- .../grpc/resolver/resolver.go | 5 + vendor/google.golang.org/grpc/rpc_util.go | 31 + vendor/google.golang.org/grpc/server.go | 27 + .../google.golang.org/grpc/stats/handlers.go | 9 + vendor/google.golang.org/grpc/stats/stats.go | 20 +- vendor/google.golang.org/grpc/stream.go | 91 +- vendor/google.golang.org/grpc/version.go | 2 +- .../protobuf/encoding/protowire/wire.go | 26 +- .../editiondefaults/editions_defaults.binpb | Bin 146 -> 154 bytes .../internal/editionssupport/editions.go | 2 +- .../protobuf/internal/filedesc/editions.go | 15 +- .../protobuf/internal/filedesc/presence.go | 33 + .../protobuf/internal/genid/api_gen.go | 6 + .../protobuf/internal/genid/descriptor_gen.go | 90 +- .../internal/impl/codec_message_opaque.go | 3 +- .../protobuf/internal/impl/message_opaque.go | 45 +- .../protobuf/internal/impl/presence.go | 3 - .../protobuf/internal/version/version.go | 2 +- .../reflect/protoreflect/source_gen.go | 8 + .../types/descriptorpb/descriptor.pb.go | 643 +- .../helm/v3/internal/version/version.go | 2 +- vendor/helm.sh/helm/v3/pkg/action/action.go | 12 +- vendor/helm.sh/helm/v3/pkg/action/hooks.go | 78 +- vendor/helm.sh/helm/v3/pkg/action/install.go | 6 +- .../helm/v3/pkg/action/registry_login.go | 25 +- .../helm/v3/pkg/chartutil/dependencies.go | 5 +- .../helm.sh/helm/v3/pkg/chartutil/expand.go | 3 + .../helm/v3/pkg/chartutil/jsonschema.go | 93 +- .../helm.sh/helm/v3/pkg/downloader/manager.go | 14 + vendor/helm.sh/helm/v3/pkg/kube/client.go | 107 +- vendor/helm.sh/helm/v3/pkg/kube/fake/fake.go | 12 + .../helm.sh/helm/v3/pkg/kube/fake/printer.go | 15 +- vendor/helm.sh/helm/v3/pkg/kube/interface.go | 24 +- vendor/helm.sh/helm/v3/pkg/kube/wait.go | 12 +- .../helm/v3/pkg/lint/rules/chartfile.go | 3 + vendor/helm.sh/helm/v3/pkg/plugin/hooks.go | 3 + vendor/helm.sh/helm/v3/pkg/plugin/plugin.go | 181 +- vendor/helm.sh/helm/v3/pkg/registry/client.go | 577 +- .../helm.sh/helm/v3/pkg/registry/fallback.go | 60 + .../helm.sh/helm/v3/pkg/registry/reference.go | 6 +- .../helm.sh/helm/v3/pkg/registry/transport.go | 187 + vendor/helm.sh/helm/v3/pkg/registry/util.go | 23 +- vendor/helm.sh/helm/v3/pkg/release/hook.go | 16 + .../v3/pkg/releaseutil/manifest_sorter.go | 27 +- vendor/helm.sh/helm/v3/pkg/repo/index.go | 1 + .../helm/v3/pkg/repo/repotest/server.go | 5 +- vendor/k8s.io/api/admission/v1/doc.go | 2 +- vendor/k8s.io/api/admission/v1beta1/doc.go | 2 +- .../api/admissionregistration/v1/doc.go | 2 +- .../api/admissionregistration/v1alpha1/doc.go | 2 +- .../v1alpha1/generated.proto | 13 +- .../admissionregistration/v1alpha1/types.go | 23 +- .../v1alpha1/types_swagger_doc_generated.go | 8 +- .../api/admissionregistration/v1beta1/doc.go | 2 +- vendor/k8s.io/api/apidiscovery/v2/doc.go | 2 +- vendor/k8s.io/api/apidiscovery/v2beta1/doc.go | 2 +- .../api/apiserverinternal/v1alpha1/doc.go | 2 +- vendor/k8s.io/api/apps/v1/doc.go | 2 +- vendor/k8s.io/api/apps/v1/generated.pb.go | 336 +- vendor/k8s.io/api/apps/v1/generated.proto | 41 +- vendor/k8s.io/api/apps/v1/types.go | 41 +- .../apps/v1/types_swagger_doc_generated.go | 24 +- .../api/apps/v1/zz_generated.deepcopy.go | 10 + vendor/k8s.io/api/apps/v1beta1/doc.go | 2 +- .../k8s.io/api/apps/v1beta1/generated.pb.go | 286 +- .../k8s.io/api/apps/v1beta1/generated.proto | 22 +- vendor/k8s.io/api/apps/v1beta1/types.go | 22 +- .../v1beta1/types_swagger_doc_generated.go | 15 +- .../api/apps/v1beta1/zz_generated.deepcopy.go | 5 + vendor/k8s.io/api/apps/v1beta2/doc.go | 2 +- .../k8s.io/api/apps/v1beta2/generated.pb.go | 352 +- .../k8s.io/api/apps/v1beta2/generated.proto | 41 +- vendor/k8s.io/api/apps/v1beta2/types.go | 41 +- .../v1beta2/types_swagger_doc_generated.go | 24 +- .../api/apps/v1beta2/zz_generated.deepcopy.go | 10 + vendor/k8s.io/api/authentication/v1/doc.go | 2 +- .../k8s.io/api/authentication/v1alpha1/doc.go | 2 +- .../k8s.io/api/authentication/v1beta1/doc.go | 2 +- vendor/k8s.io/api/authorization/v1/doc.go | 2 +- .../k8s.io/api/authorization/v1beta1/doc.go | 2 +- vendor/k8s.io/api/autoscaling/v1/doc.go | 2 +- vendor/k8s.io/api/autoscaling/v2/doc.go | 2 +- .../k8s.io/api/autoscaling/v2/generated.pb.go | 272 +- .../k8s.io/api/autoscaling/v2/generated.proto | 30 +- vendor/k8s.io/api/autoscaling/v2/types.go | 30 +- .../v2/types_swagger_doc_generated.go | 5 +- .../autoscaling/v2/zz_generated.deepcopy.go | 5 + vendor/k8s.io/api/autoscaling/v2beta1/doc.go | 2 +- vendor/k8s.io/api/autoscaling/v2beta2/doc.go | 2 +- vendor/k8s.io/api/batch/v1/doc.go | 2 +- vendor/k8s.io/api/batch/v1/generated.proto | 10 - vendor/k8s.io/api/batch/v1/types.go | 15 - .../batch/v1/types_swagger_doc_generated.go | 10 +- vendor/k8s.io/api/batch/v1beta1/doc.go | 2 +- vendor/k8s.io/api/certificates/v1/doc.go | 2 +- .../k8s.io/api/certificates/v1alpha1/doc.go | 2 +- vendor/k8s.io/api/certificates/v1beta1/doc.go | 2 +- .../api/certificates/v1beta1/generated.pb.go | 761 +- .../api/certificates/v1beta1/generated.proto | 73 + .../api/certificates/v1beta1/register.go | 2 + .../k8s.io/api/certificates/v1beta1/types.go | 85 + .../v1beta1/types_swagger_doc_generated.go | 30 + .../v1beta1/zz_generated.deepcopy.go | 76 + .../zz_generated.prerelease-lifecycle.go | 36 + vendor/k8s.io/api/coordination/v1/doc.go | 2 +- .../k8s.io/api/coordination/v1alpha2/doc.go | 2 +- .../api/coordination/v1alpha2/generated.proto | 2 - .../k8s.io/api/coordination/v1alpha2/types.go | 2 - .../v1alpha2/types_swagger_doc_generated.go | 2 +- vendor/k8s.io/api/coordination/v1beta1/doc.go | 2 +- .../api/coordination/v1beta1/generated.pb.go | 915 +- .../api/coordination/v1beta1/generated.proto | 69 + .../api/coordination/v1beta1/register.go | 2 + .../k8s.io/api/coordination/v1beta1/types.go | 73 + .../v1beta1/types_swagger_doc_generated.go | 34 + .../v1beta1/zz_generated.deepcopy.go | 84 + .../zz_generated.prerelease-lifecycle.go | 36 + vendor/k8s.io/api/core/v1/doc.go | 2 +- vendor/k8s.io/api/core/v1/generated.pb.go | 2767 +-- vendor/k8s.io/api/core/v1/generated.proto | 77 +- vendor/k8s.io/api/core/v1/lifecycle.go | 24 + vendor/k8s.io/api/core/v1/types.go | 201 +- .../core/v1/types_swagger_doc_generated.go | 50 +- .../api/core/v1/zz_generated.deepcopy.go | 38 +- vendor/k8s.io/api/discovery/v1/doc.go | 2 +- .../k8s.io/api/discovery/v1/generated.pb.go | 336 +- .../k8s.io/api/discovery/v1/generated.proto | 78 +- vendor/k8s.io/api/discovery/v1/types.go | 78 +- .../v1/types_swagger_doc_generated.go | 28 +- .../api/discovery/v1/zz_generated.deepcopy.go | 21 + vendor/k8s.io/api/discovery/v1beta1/doc.go | 2 +- .../api/discovery/v1beta1/generated.pb.go | 329 +- .../api/discovery/v1beta1/generated.proto | 13 + vendor/k8s.io/api/discovery/v1beta1/types.go | 13 + .../v1beta1/types_swagger_doc_generated.go | 10 + .../v1beta1/zz_generated.deepcopy.go | 21 + vendor/k8s.io/api/events/v1/doc.go | 2 +- vendor/k8s.io/api/events/v1beta1/doc.go | 2 +- vendor/k8s.io/api/extensions/v1beta1/doc.go | 2 +- .../api/extensions/v1beta1/generated.pb.go | 418 +- .../api/extensions/v1beta1/generated.proto | 40 +- vendor/k8s.io/api/extensions/v1beta1/types.go | 40 +- .../v1beta1/types_swagger_doc_generated.go | 24 +- .../v1beta1/zz_generated.deepcopy.go | 10 + vendor/k8s.io/api/flowcontrol/v1/doc.go | 2 +- vendor/k8s.io/api/flowcontrol/v1beta1/doc.go | 2 +- vendor/k8s.io/api/flowcontrol/v1beta2/doc.go | 2 +- vendor/k8s.io/api/flowcontrol/v1beta3/doc.go | 2 +- vendor/k8s.io/api/imagepolicy/v1alpha1/doc.go | 2 +- vendor/k8s.io/api/networking/v1/doc.go | 2 +- .../k8s.io/api/networking/v1/generated.pb.go | 3729 +++-- .../k8s.io/api/networking/v1/generated.proto | 109 + vendor/k8s.io/api/networking/v1/register.go | 4 + vendor/k8s.io/api/networking/v1/types.go | 130 + .../v1/types_swagger_doc_generated.go | 80 + .../api/networking/v1/well_known_labels.go | 33 + .../networking/v1/zz_generated.deepcopy.go | 202 + .../v1/zz_generated.prerelease-lifecycle.go | 24 + vendor/k8s.io/api/networking/v1alpha1/doc.go | 2 +- vendor/k8s.io/api/networking/v1beta1/doc.go | 2 +- vendor/k8s.io/api/node/v1/doc.go | 2 +- vendor/k8s.io/api/node/v1alpha1/doc.go | 2 +- vendor/k8s.io/api/node/v1beta1/doc.go | 2 +- vendor/k8s.io/api/policy/v1/doc.go | 2 +- vendor/k8s.io/api/policy/v1/generated.proto | 3 - vendor/k8s.io/api/policy/v1/types.go | 3 - .../policy/v1/types_swagger_doc_generated.go | 2 +- vendor/k8s.io/api/policy/v1beta1/doc.go | 2 +- .../k8s.io/api/policy/v1beta1/generated.proto | 3 - vendor/k8s.io/api/policy/v1beta1/types.go | 3 - .../v1beta1/types_swagger_doc_generated.go | 2 +- vendor/k8s.io/api/rbac/v1/doc.go | 2 +- vendor/k8s.io/api/rbac/v1alpha1/doc.go | 2 +- vendor/k8s.io/api/rbac/v1beta1/doc.go | 2 +- vendor/k8s.io/api/resource/v1alpha3/doc.go | 2 +- .../api/resource/v1alpha3/generated.pb.go | 6403 +++++-- .../api/resource/v1alpha3/generated.proto | 514 +- .../k8s.io/api/resource/v1alpha3/register.go | 2 + vendor/k8s.io/api/resource/v1alpha3/types.go | 609 +- .../v1alpha3/types_swagger_doc_generated.go | 165 +- .../v1alpha3/zz_generated.deepcopy.go | 327 +- .../zz_generated.prerelease-lifecycle.go | 36 + .../api/resource/v1beta1/devicetaint.go} | 30 +- vendor/k8s.io/api/resource/v1beta1/doc.go | 2 +- .../api/resource/v1beta1/generated.pb.go | 3890 ++++- .../api/resource/v1beta1/generated.proto | 428 +- vendor/k8s.io/api/resource/v1beta1/types.go | 506 +- .../v1beta1/types_swagger_doc_generated.go | 124 +- .../resource/v1beta1/zz_generated.deepcopy.go | 202 +- .../api/resource/v1beta2/devicetaint.go | 35 + vendor/k8s.io/api/resource/v1beta2/doc.go | 24 + .../api/resource/v1beta2/generated.pb.go | 11047 ++++++++++++ .../api/resource/v1beta2/generated.proto | 1278 ++ .../k8s.io/api/resource/v1beta2/register.go | 60 + vendor/k8s.io/api/resource/v1beta2/types.go | 1552 ++ .../v1beta2/types_swagger_doc_generated.go | 464 + .../resource/v1beta2/zz_generated.deepcopy.go | 1092 ++ .../zz_generated.prerelease-lifecycle.go | 166 + vendor/k8s.io/api/scheduling/v1/doc.go | 2 +- vendor/k8s.io/api/scheduling/v1alpha1/doc.go | 2 +- vendor/k8s.io/api/scheduling/v1beta1/doc.go | 2 +- vendor/k8s.io/api/storage/v1/doc.go | 2 +- vendor/k8s.io/api/storage/v1/generated.pb.go | 271 +- vendor/k8s.io/api/storage/v1/generated.proto | 22 + vendor/k8s.io/api/storage/v1/types.go | 22 + .../storage/v1/types_swagger_doc_generated.go | 26 +- .../api/storage/v1/zz_generated.deepcopy.go | 10 + vendor/k8s.io/api/storage/v1alpha1/doc.go | 2 +- .../api/storage/v1alpha1/generated.pb.go | 160 +- .../api/storage/v1alpha1/generated.proto | 8 + vendor/k8s.io/api/storage/v1alpha1/types.go | 8 + .../v1alpha1/types_swagger_doc_generated.go | 7 +- .../storage/v1alpha1/zz_generated.deepcopy.go | 5 + vendor/k8s.io/api/storage/v1beta1/doc.go | 2 +- .../api/storage/v1beta1/generated.pb.go | 280 +- .../api/storage/v1beta1/generated.proto | 22 + vendor/k8s.io/api/storage/v1beta1/types.go | 22 + .../v1beta1/types_swagger_doc_generated.go | 26 +- .../storage/v1beta1/zz_generated.deepcopy.go | 10 + .../api/storagemigration/v1alpha1/doc.go | 2 +- .../pkg/apis/apiextensions/doc.go | 2 +- .../pkg/apis/apiextensions/v1/doc.go | 2 +- .../pkg/apis/apiextensions/v1beta1/doc.go | 2 +- .../apiextensions/validation/validation.go | 3 +- .../pkg/apiserver/validation/formats.go | 133 +- .../pkg/apiserver/validation/validation.go | 4 +- .../pkg/features/kube_features.go | 1 + .../k8s.io/apimachinery/pkg/api/errors/doc.go | 2 +- .../k8s.io/apimachinery/pkg/api/meta/doc.go | 2 +- .../k8s.io/apimachinery/pkg/api/meta/help.go | 3 + .../pkg/api/operation/operation.go | 56 + .../apimachinery/pkg/api/validation/doc.go | 2 +- .../pkg/api/validation/generic.go | 2 +- .../k8s.io/apimachinery/pkg/apis/asn1/oid.go | 43 + .../pkg/apis/meta/internalversion/doc.go | 2 +- .../apis/meta/internalversion/scheme/doc.go | 2 +- .../pkg/apis/meta/internalversion/types.go | 2 - .../apimachinery/pkg/apis/meta/v1/doc.go | 2 +- .../pkg/apis/meta/v1/micro_time_fuzz.go | 13 +- .../pkg/apis/meta/v1/time_fuzz.go | 13 +- .../pkg/apis/meta/v1/unstructured/helpers.go | 31 +- .../apis/meta/v1/unstructured/unstructured.go | 4 +- .../pkg/apis/meta/v1/validation/validation.go | 2 +- .../apimachinery/pkg/apis/meta/v1beta1/doc.go | 2 +- .../k8s.io/apimachinery/pkg/conversion/doc.go | 2 +- .../pkg/conversion/queryparams/doc.go | 2 +- vendor/k8s.io/apimachinery/pkg/fields/doc.go | 2 +- vendor/k8s.io/apimachinery/pkg/labels/doc.go | 2 +- vendor/k8s.io/apimachinery/pkg/runtime/doc.go | 2 +- .../apimachinery/pkg/runtime/interfaces.go | 1 + .../k8s.io/apimachinery/pkg/runtime/scheme.go | 39 + .../serializer/cbor/internal/modes/custom.go | 4 +- .../pkg/runtime/serializer/codec_factory.go | 23 +- .../runtime/serializer/json/collections.go | 230 + .../pkg/runtime/serializer/json/json.go | 16 +- .../serializer/protobuf/collections.go | 174 + .../pkg/runtime/serializer/protobuf/doc.go | 2 +- .../runtime/serializer/protobuf/protobuf.go | 87 +- .../apimachinery/pkg/runtime/types_proto.go | 127 +- vendor/k8s.io/apimachinery/pkg/types/doc.go | 2 +- .../k8s.io/apimachinery/pkg/util/diff/diff.go | 2 +- .../apimachinery/pkg/util/errors/doc.go | 2 +- .../apimachinery/pkg/util/framer/framer.go | 6 +- .../apimachinery/pkg/util/httpstream/doc.go | 2 +- .../pkg/util/httpstream/wsstream/doc.go | 2 +- .../pkg/util/intstr/instr_fuzz.go | 14 +- .../pkg/util/jsonmergepatch/patch.go | 160 + .../k8s.io/apimachinery/pkg/util/proxy/doc.go | 2 +- .../apimachinery/pkg/util/runtime/runtime.go | 46 +- .../k8s.io/apimachinery/pkg/util/sets/doc.go | 2 +- .../util/validation/field/error_matcher.go | 212 + .../pkg/util/validation/field/errors.go | 132 +- .../apimachinery/pkg/util/validation/ip.go | 278 + .../pkg/util/validation/validation.go | 40 - .../apimachinery/pkg/util/version/doc.go | 2 +- .../apimachinery/pkg/util/version/version.go | 18 +- .../apimachinery/pkg/util/wait/backoff.go | 50 +- .../k8s.io/apimachinery/pkg/util/wait/doc.go | 2 +- .../k8s.io/apimachinery/pkg/util/wait/loop.go | 4 +- .../k8s.io/apimachinery/pkg/util/wait/wait.go | 9 +- .../apimachinery/pkg/util/yaml/decoder.go | 170 +- .../pkg/util/yaml/stream_reader.go | 130 + vendor/k8s.io/apimachinery/pkg/version/doc.go | 4 +- .../k8s.io/apimachinery/pkg/version/types.go | 28 +- vendor/k8s.io/apimachinery/pkg/watch/doc.go | 2 +- .../apimachinery/pkg/watch/streamwatcher.go | 15 +- vendor/k8s.io/apimachinery/pkg/watch/watch.go | 35 +- .../apiserver/pkg/apis/apiserver/doc.go | 2 +- .../apiserver/pkg/apis/apiserver/v1/doc.go | 2 +- .../pkg/apis/apiserver/v1alpha1/doc.go | 2 +- .../pkg/apis/apiserver/v1alpha1/types.go | 4 +- .../pkg/apis/apiserver/v1beta1/doc.go | 2 +- .../pkg/apis/apiserver/v1beta1/types.go | 4 +- vendor/k8s.io/apiserver/pkg/apis/audit/doc.go | 2 +- .../k8s.io/apiserver/pkg/apis/audit/v1/doc.go | 2 +- .../pkg/authentication/request/x509/doc.go | 2 +- .../pkg/authentication/request/x509/x509.go | 39 + .../apiserver/pkg/authentication/user/doc.go | 2 +- .../apiserver/pkg/cel/environment/base.go | 17 +- .../k8s.io/apiserver/pkg/cel/library/cidr.go | 4 +- .../k8s.io/apiserver/pkg/cel/library/cost.go | 11 +- .../apiserver/pkg/cel/library/format.go | 12 +- vendor/k8s.io/apiserver/pkg/cel/library/ip.go | 4 +- .../apiserver/pkg/cel/library/semverlib.go | 115 +- .../apiserver/pkg/endpoints/request/doc.go | 2 +- .../pkg/endpoints/request/webhook_duration.go | 32 + .../apiserver/pkg/features/kube_features.go | 117 +- .../pkg/util/compatibility/registry.go | 53 + .../pkg/util/compatibility/version.go | 65 + .../cli-runtime/pkg/genericclioptions/doc.go | 2 +- vendor/k8s.io/cli-runtime/pkg/printers/doc.go | 2 +- .../cli-runtime/pkg/resource/builder.go | 2 +- vendor/k8s.io/cli-runtime/pkg/resource/doc.go | 2 +- .../k8s.io/cli-runtime/pkg/resource/result.go | 6 +- .../apps/v1/deploymentstatus.go | 9 + .../apps/v1/replicasetstatus.go | 9 + .../apps/v1beta1/deploymentstatus.go | 9 + .../apps/v1beta2/deploymentstatus.go | 9 + .../apps/v1beta2/replicasetstatus.go | 9 + .../autoscaling/v2/hpascalingrules.go | 10 + .../v1beta1/clustertrustbundle.go | 253 + .../v1beta1/clustertrustbundlespec.go | 48 + .../coordination/v1beta1/leasecandidate.go | 255 + .../v1beta1/leasecandidatespec.go | 89 + .../core/v1/containerstatus.go | 9 + .../applyconfigurations/core/v1/lifecycle.go | 17 +- .../core/v1/nodeswapstatus.go | 39 + .../core/v1/nodesysteminfo.go | 29 +- .../core/v1/podcondition.go | 9 + .../applyconfigurations/core/v1/podstatus.go | 9 + .../discovery/v1/endpointhints.go | 14 + .../discovery/v1/fornode.go | 39 + .../discovery/v1beta1/endpointhints.go | 14 + .../discovery/v1beta1/fornode.go | 39 + .../extensions/v1beta1/deploymentstatus.go | 9 + .../extensions/v1beta1/replicasetstatus.go | 9 + .../applyconfigurations/internal/internal.go | 1344 +- .../networking/v1/ipaddress.go | 253 + .../networking/v1/ipaddressspec.go | 39 + .../networking/v1/parentreference.go | 66 + .../networking/v1/servicecidr.go | 262 + .../networking/v1/servicecidrspec.go | 41 + .../networking/v1/servicecidrstatus.go | 48 + .../resource/v1alpha3/basicdevice.go | 60 +- .../resource/v1alpha3/counter.go | 43 + .../resource/v1alpha3/counterset.go | 54 + .../v1alpha3/devicecounterconsumption.go | 54 + .../resource/v1alpha3/devicerequest.go | 28 + .../v1alpha3/devicerequestallocationresult.go | 24 +- .../resource/v1alpha3/devicesubrequest.go | 98 + .../resource/v1alpha3/devicetaint.go | 71 + .../resource/v1alpha3/devicetaintrule.go | 253 + .../resource/v1alpha3/devicetaintrulespec.go | 48 + .../resource/v1alpha3/devicetaintselector.go | 80 + .../resource/v1alpha3/devicetoleration.go | 79 + .../resource/v1alpha3/resourceslicespec.go | 35 +- .../resource/v1beta1/basicdevice.go | 60 +- .../resource/v1beta1/counter.go | 43 + .../resource/v1beta1/counterset.go | 54 + .../v1beta1/devicecounterconsumption.go | 54 + .../resource/v1beta1/devicerequest.go | 28 + .../v1beta1/devicerequestallocationresult.go | 24 +- .../resource/v1beta1/devicesubrequest.go | 98 + .../resource/v1beta1/devicetaint.go | 71 + .../resource/v1beta1/devicetoleration.go | 79 + .../resource/v1beta1/resourceslicespec.go | 35 +- .../resource/v1beta2/allocateddevicestatus.go | 94 + .../resource/v1beta2/allocationresult.go | 52 + .../resource/v1beta2/celdeviceselector.go | 39 + .../resource/v1beta2/counter.go | 43 + .../resource/v1beta2/counterset.go | 54 + .../resource/v1beta2/device.go | 129 + .../v1beta2/deviceallocationconfiguration.go | 63 + .../v1beta2/deviceallocationresult.go | 58 + .../resource/v1beta2/deviceattribute.go | 66 + .../resource/v1beta2/devicecapacity.go | 43 + .../resource/v1beta2/deviceclaim.go | 72 + .../v1beta2/deviceclaimconfiguration.go | 50 + .../resource/v1beta2/deviceclass.go | 253 + .../v1beta2/deviceclassconfiguration.go | 39 + .../resource/v1beta2/deviceclassspec.go | 58 + .../resource/v1beta2/deviceconfiguration.go | 39 + .../resource/v1beta2/deviceconstraint.go | 54 + .../v1beta2/devicecounterconsumption.go | 54 + .../resource/v1beta2/devicerequest.go | 62 + .../v1beta2/devicerequestallocationresult.go | 89 + .../resource/v1beta2/deviceselector.go | 39 + .../resource/v1beta2/devicesubrequest.go | 98 + .../resource/v1beta2/devicetaint.go | 71 + .../resource/v1beta2/devicetoleration.go | 79 + .../resource/v1beta2/exactdevicerequest.go | 98 + .../resource/v1beta2/networkdevicedata.go | 59 + .../v1beta2/opaquedeviceconfiguration.go | 52 + .../resource/v1beta2/resourceclaim.go | 264 + .../v1beta2/resourceclaimconsumerreference.go | 70 + .../resource/v1beta2/resourceclaimspec.go | 39 + .../resource/v1beta2/resourceclaimstatus.go | 67 + .../resource/v1beta2/resourceclaimtemplate.go | 255 + .../v1beta2/resourceclaimtemplatespec.go | 194 + .../resource/v1beta2/resourcepool.go | 57 + .../resource/v1beta2/resourceslice.go | 253 + .../resource/v1beta2/resourceslicespec.go | 116 + .../storage/v1/csidriverspec.go | 25 +- .../storage/v1/volumeerror.go | 13 +- .../storage/v1alpha1/volumeerror.go | 13 +- .../storage/v1beta1/csidriverspec.go | 25 +- .../storage/v1beta1/volumeerror.go | 13 +- .../discovery/aggregated_discovery.go | 2 +- .../client-go/discovery/discovery_client.go | 3 +- vendor/k8s.io/client-go/discovery/doc.go | 2 +- .../client-go/features/known_features.go | 7 + vendor/k8s.io/client-go/gentype/fake.go | 1 + .../v1/mutatingwebhookconfiguration.go | 16 +- .../v1/validatingadmissionpolicy.go | 16 +- .../v1/validatingadmissionpolicybinding.go | 16 +- .../v1/validatingwebhookconfiguration.go | 16 +- .../v1alpha1/mutatingadmissionpolicy.go | 16 +- .../mutatingadmissionpolicybinding.go | 16 +- .../v1alpha1/validatingadmissionpolicy.go | 16 +- .../validatingadmissionpolicybinding.go | 16 +- .../v1beta1/mutatingwebhookconfiguration.go | 16 +- .../v1beta1/validatingadmissionpolicy.go | 16 +- .../validatingadmissionpolicybinding.go | 16 +- .../v1beta1/validatingwebhookconfiguration.go | 16 +- .../v1alpha1/storageversion.go | 16 +- .../informers/apps/v1/controllerrevision.go | 16 +- .../client-go/informers/apps/v1/daemonset.go | 16 +- .../client-go/informers/apps/v1/deployment.go | 16 +- .../client-go/informers/apps/v1/replicaset.go | 16 +- .../informers/apps/v1/statefulset.go | 16 +- .../apps/v1beta1/controllerrevision.go | 16 +- .../informers/apps/v1beta1/deployment.go | 16 +- .../informers/apps/v1beta1/statefulset.go | 16 +- .../apps/v1beta2/controllerrevision.go | 16 +- .../informers/apps/v1beta2/daemonset.go | 16 +- .../informers/apps/v1beta2/deployment.go | 16 +- .../informers/apps/v1beta2/replicaset.go | 16 +- .../informers/apps/v1beta2/statefulset.go | 16 +- .../autoscaling/v1/horizontalpodautoscaler.go | 16 +- .../autoscaling/v2/horizontalpodautoscaler.go | 16 +- .../v2beta1/horizontalpodautoscaler.go | 16 +- .../v2beta2/horizontalpodautoscaler.go | 16 +- .../client-go/informers/batch/v1/cronjob.go | 16 +- .../client-go/informers/batch/v1/job.go | 16 +- .../informers/batch/v1beta1/cronjob.go | 16 +- .../v1/certificatesigningrequest.go | 16 +- .../v1alpha1/clustertrustbundle.go | 16 +- .../v1beta1/certificatesigningrequest.go | 16 +- .../v1beta1/clustertrustbundle.go | 101 + .../certificates/v1beta1/interface.go | 7 + .../informers/coordination/v1/lease.go | 16 +- .../coordination/v1alpha2/leasecandidate.go | 16 +- .../coordination/v1beta1/interface.go | 7 + .../informers/coordination/v1beta1/lease.go | 16 +- .../coordination/v1beta1/leasecandidate.go | 102 + .../informers/core/v1/componentstatus.go | 16 +- .../client-go/informers/core/v1/configmap.go | 16 +- .../client-go/informers/core/v1/endpoints.go | 16 +- .../client-go/informers/core/v1/event.go | 16 +- .../client-go/informers/core/v1/limitrange.go | 16 +- .../client-go/informers/core/v1/namespace.go | 16 +- .../client-go/informers/core/v1/node.go | 16 +- .../informers/core/v1/persistentvolume.go | 16 +- .../core/v1/persistentvolumeclaim.go | 16 +- .../k8s.io/client-go/informers/core/v1/pod.go | 16 +- .../informers/core/v1/podtemplate.go | 16 +- .../core/v1/replicationcontroller.go | 16 +- .../informers/core/v1/resourcequota.go | 16 +- .../client-go/informers/core/v1/secret.go | 16 +- .../client-go/informers/core/v1/service.go | 16 +- .../informers/core/v1/serviceaccount.go | 16 +- .../informers/discovery/v1/endpointslice.go | 16 +- .../discovery/v1beta1/endpointslice.go | 16 +- vendor/k8s.io/client-go/informers/doc.go | 2 +- .../client-go/informers/events/v1/event.go | 16 +- .../informers/events/v1beta1/event.go | 16 +- .../informers/extensions/v1beta1/daemonset.go | 16 +- .../extensions/v1beta1/deployment.go | 16 +- .../informers/extensions/v1beta1/ingress.go | 16 +- .../extensions/v1beta1/networkpolicy.go | 16 +- .../extensions/v1beta1/replicaset.go | 16 +- .../informers/flowcontrol/v1/flowschema.go | 16 +- .../v1/prioritylevelconfiguration.go | 16 +- .../flowcontrol/v1beta1/flowschema.go | 16 +- .../v1beta1/prioritylevelconfiguration.go | 16 +- .../flowcontrol/v1beta2/flowschema.go | 16 +- .../v1beta2/prioritylevelconfiguration.go | 16 +- .../flowcontrol/v1beta3/flowschema.go | 16 +- .../v1beta3/prioritylevelconfiguration.go | 16 +- vendor/k8s.io/client-go/informers/generic.go | 21 + .../informers/networking/v1/ingress.go | 16 +- .../informers/networking/v1/ingressclass.go | 16 +- .../informers/networking/v1/interface.go | 14 + .../informers/networking/v1/ipaddress.go | 101 + .../informers/networking/v1/networkpolicy.go | 16 +- .../informers/networking/v1/servicecidr.go | 101 + .../networking/v1alpha1/ipaddress.go | 16 +- .../networking/v1alpha1/servicecidr.go | 16 +- .../informers/networking/v1beta1/ingress.go | 16 +- .../networking/v1beta1/ingressclass.go | 16 +- .../informers/networking/v1beta1/ipaddress.go | 16 +- .../networking/v1beta1/servicecidr.go | 16 +- .../informers/node/v1/runtimeclass.go | 16 +- .../informers/node/v1alpha1/runtimeclass.go | 16 +- .../informers/node/v1beta1/runtimeclass.go | 16 +- .../policy/v1/poddisruptionbudget.go | 16 +- .../policy/v1beta1/poddisruptionbudget.go | 16 +- .../informers/rbac/v1/clusterrole.go | 16 +- .../informers/rbac/v1/clusterrolebinding.go | 16 +- .../client-go/informers/rbac/v1/role.go | 16 +- .../informers/rbac/v1/rolebinding.go | 16 +- .../informers/rbac/v1alpha1/clusterrole.go | 16 +- .../rbac/v1alpha1/clusterrolebinding.go | 16 +- .../client-go/informers/rbac/v1alpha1/role.go | 16 +- .../informers/rbac/v1alpha1/rolebinding.go | 16 +- .../informers/rbac/v1beta1/clusterrole.go | 16 +- .../rbac/v1beta1/clusterrolebinding.go | 16 +- .../client-go/informers/rbac/v1beta1/role.go | 16 +- .../informers/rbac/v1beta1/rolebinding.go | 16 +- .../client-go/informers/resource/interface.go | 8 + .../resource/v1alpha3/deviceclass.go | 16 +- .../resource/v1alpha3/devicetaintrule.go | 101 + .../informers/resource/v1alpha3/interface.go | 7 + .../resource/v1alpha3/resourceclaim.go | 16 +- .../v1alpha3/resourceclaimtemplate.go | 16 +- .../resource/v1alpha3/resourceslice.go | 16 +- .../informers/resource/v1beta1/deviceclass.go | 16 +- .../resource/v1beta1/resourceclaim.go | 16 +- .../resource/v1beta1/resourceclaimtemplate.go | 16 +- .../resource/v1beta1/resourceslice.go | 16 +- .../informers/resource/v1beta2/deviceclass.go | 101 + .../informers/resource/v1beta2/interface.go | 66 + .../resource/v1beta2/resourceclaim.go | 102 + .../resource/v1beta2/resourceclaimtemplate.go | 102 + .../resource/v1beta2/resourceslice.go | 101 + .../informers/scheduling/v1/priorityclass.go | 16 +- .../scheduling/v1alpha1/priorityclass.go | 16 +- .../scheduling/v1beta1/priorityclass.go | 16 +- .../informers/storage/v1/csidriver.go | 16 +- .../client-go/informers/storage/v1/csinode.go | 16 +- .../storage/v1/csistoragecapacity.go | 16 +- .../informers/storage/v1/storageclass.go | 16 +- .../informers/storage/v1/volumeattachment.go | 16 +- .../storage/v1alpha1/csistoragecapacity.go | 16 +- .../storage/v1alpha1/volumeattachment.go | 16 +- .../storage/v1alpha1/volumeattributesclass.go | 16 +- .../informers/storage/v1beta1/csidriver.go | 16 +- .../informers/storage/v1beta1/csinode.go | 16 +- .../storage/v1beta1/csistoragecapacity.go | 16 +- .../informers/storage/v1beta1/storageclass.go | 16 +- .../storage/v1beta1/volumeattachment.go | 16 +- .../storage/v1beta1/volumeattributesclass.go | 16 +- .../v1alpha1/storageversionmigration.go | 16 +- .../k8s.io/client-go/kubernetes/clientset.go | 13 + vendor/k8s.io/client-go/kubernetes/doc.go | 2 +- vendor/k8s.io/client-go/kubernetes/import.go | 2 +- .../client-go/kubernetes/scheme/register.go | 2 + .../v1/admissionregistration_client.go | 12 +- .../v1alpha1/admissionregistration_client.go | 12 +- .../v1beta1/admissionregistration_client.go | 12 +- .../v1alpha1/apiserverinternal_client.go | 12 +- .../kubernetes/typed/apps/v1/apps_client.go | 12 +- .../typed/apps/v1beta1/apps_client.go | 12 +- .../typed/apps/v1beta2/apps_client.go | 12 +- .../v1/authentication_client.go | 12 +- .../v1alpha1/authentication_client.go | 12 +- .../v1beta1/authentication_client.go | 12 +- .../authorization/v1/authorization_client.go | 12 +- .../v1beta1/authorization_client.go | 12 +- .../autoscaling/v1/autoscaling_client.go | 12 +- .../autoscaling/v2/autoscaling_client.go | 12 +- .../autoscaling/v2beta1/autoscaling_client.go | 12 +- .../autoscaling/v2beta2/autoscaling_client.go | 12 +- .../kubernetes/typed/batch/v1/batch_client.go | 12 +- .../typed/batch/v1beta1/batch_client.go | 12 +- .../certificates/v1/certificates_client.go | 12 +- .../v1alpha1/certificates_client.go | 12 +- .../v1beta1/certificates_client.go | 17 +- .../v1beta1/clustertrustbundle.go | 73 + .../v1beta1/generated_expansion.go | 2 + .../coordination/v1/coordination_client.go | 12 +- .../v1alpha2/coordination_client.go | 12 +- .../v1beta1/coordination_client.go | 17 +- .../v1beta1/generated_expansion.go | 2 + .../coordination/v1beta1/leasecandidate.go | 71 + .../kubernetes/typed/core/v1/core_client.go | 12 +- .../typed/core/v1/event_expansion.go | 65 +- .../typed/discovery/v1/discovery_client.go | 12 +- .../discovery/v1beta1/discovery_client.go | 12 +- .../typed/events/v1/events_client.go | 12 +- .../typed/events/v1beta1/event_expansion.go | 6 +- .../typed/events/v1beta1/events_client.go | 12 +- .../extensions/v1beta1/extensions_client.go | 12 +- .../flowcontrol/v1/flowcontrol_client.go | 12 +- .../flowcontrol/v1beta1/flowcontrol_client.go | 12 +- .../flowcontrol/v1beta2/flowcontrol_client.go | 12 +- .../flowcontrol/v1beta3/flowcontrol_client.go | 12 +- .../networking/v1/generated_expansion.go | 4 + .../typed/networking/v1/ipaddress.go | 71 + .../typed/networking/v1/networking_client.go | 22 +- .../typed/networking/v1/servicecidr.go | 75 + .../networking/v1alpha1/networking_client.go | 12 +- .../networking/v1beta1/networking_client.go | 12 +- .../kubernetes/typed/node/v1/node_client.go | 12 +- .../typed/node/v1alpha1/node_client.go | 12 +- .../typed/node/v1beta1/node_client.go | 12 +- .../typed/policy/v1/policy_client.go | 12 +- .../typed/policy/v1beta1/policy_client.go | 12 +- .../kubernetes/typed/rbac/v1/rbac_client.go | 12 +- .../typed/rbac/v1alpha1/rbac_client.go | 12 +- .../typed/rbac/v1beta1/rbac_client.go | 12 +- .../resource/v1alpha3/devicetaintrule.go | 71 + .../resource/v1alpha3/generated_expansion.go | 2 + .../resource/v1alpha3/resource_client.go | 17 +- .../typed/resource/v1beta1/resource_client.go | 12 +- .../typed/resource/v1beta2/deviceclass.go | 71 + .../kubernetes/typed/resource/v1beta2}/doc.go | 8 +- .../resource/v1beta2/generated_expansion.go} | 17 +- .../typed/resource/v1beta2/resource_client.go | 116 + .../typed/resource/v1beta2/resourceclaim.go | 75 + .../resource/v1beta2/resourceclaimtemplate.go | 71 + .../typed/resource/v1beta2/resourceslice.go | 71 + .../typed/scheduling/v1/scheduling_client.go | 12 +- .../scheduling/v1alpha1/scheduling_client.go | 12 +- .../scheduling/v1beta1/scheduling_client.go | 12 +- .../typed/storage/v1/storage_client.go | 12 +- .../typed/storage/v1alpha1/storage_client.go | 12 +- .../typed/storage/v1beta1/storage_client.go | 12 +- .../v1alpha1/storagemigration_client.go | 12 +- .../v1beta1/clustertrustbundle.go | 48 + .../v1beta1/expansion_generated.go | 4 + .../v1beta1/expansion_generated.go | 8 + .../coordination/v1beta1/leasecandidate.go | 70 + vendor/k8s.io/client-go/listers/doc.go | 2 +- .../networking/v1/expansion_generated.go | 8 + .../listers/networking/v1/ipaddress.go | 48 + .../listers/networking/v1/servicecidr.go | 48 + .../resource/v1alpha3/devicetaintrule.go | 48 + .../resource/v1alpha3/expansion_generated.go | 4 + .../listers/resource/v1beta2/deviceclass.go | 48 + .../resource/v1beta2/expansion_generated.go | 43 + .../listers/resource/v1beta2/resourceclaim.go | 70 + .../resource/v1beta2/resourceclaimtemplate.go | 70 + .../listers/resource/v1beta2/resourceslice.go | 48 + .../pkg/apis/clientauthentication/doc.go | 2 +- .../pkg/apis/clientauthentication/v1/doc.go | 2 +- .../apis/clientauthentication/v1beta1/doc.go | 2 +- vendor/k8s.io/client-go/pkg/version/doc.go | 2 +- vendor/k8s.io/client-go/rest/.mockery.yaml | 10 + vendor/k8s.io/client-go/rest/client.go | 6 +- vendor/k8s.io/client-go/rest/config.go | 85 +- vendor/k8s.io/client-go/rest/plugin.go | 7 +- vendor/k8s.io/client-go/rest/request.go | 138 +- vendor/k8s.io/client-go/rest/urlbackoff.go | 101 +- vendor/k8s.io/client-go/rest/warnings.go | 57 +- vendor/k8s.io/client-go/rest/with_retry.go | 12 +- vendor/k8s.io/client-go/scale/doc.go | 2 +- .../client-go/scale/scheme/appsint/doc.go | 2 +- .../client-go/scale/scheme/appsv1beta1/doc.go | 2 +- .../client-go/scale/scheme/appsv1beta2/doc.go | 2 +- .../scale/scheme/autoscalingv1/doc.go | 2 +- vendor/k8s.io/client-go/scale/scheme/doc.go | 2 +- .../scale/scheme/extensionsint/doc.go | 2 +- .../scale/scheme/extensionsv1beta1/doc.go | 2 +- .../client-go/tools/cache/controller.go | 84 +- .../client-go/tools/cache/delta_fifo.go | 125 +- vendor/k8s.io/client-go/tools/cache/doc.go | 2 +- vendor/k8s.io/client-go/tools/cache/fifo.go | 97 +- .../k8s.io/client-go/tools/cache/listers.go | 7 +- .../k8s.io/client-go/tools/cache/listwatch.go | 174 +- .../client-go/tools/cache/mutation_cache.go | 8 +- .../tools/cache/mutation_detector.go | 1 + .../k8s.io/client-go/tools/cache/reflector.go | 238 +- .../client-go/tools/cache/shared_informer.go | 194 +- .../client-go/tools/cache/the_real_fifo.go | 407 + .../client-go/tools/clientcmd/api/doc.go | 2 +- .../client-go/tools/clientcmd/api/v1/doc.go | 2 +- .../k8s.io/client-go/tools/clientcmd/doc.go | 2 +- vendor/k8s.io/client-go/tools/events/doc.go | 2 +- .../client-go/tools/events/event_recorder.go | 2 +- .../tools/leaderelection/leasecandidate.go | 18 +- vendor/k8s.io/client-go/tools/record/doc.go | 2 +- vendor/k8s.io/client-go/tools/record/event.go | 2 +- .../client-go/tools/record/util/util.go | 17 + .../client-go/tools/remotecommand/doc.go | 2 +- .../tools/remotecommand/errorstream.go | 2 +- .../tools/remotecommand/websocket.go | 19 +- .../client-go/tools/watch/informerwatcher.go | 18 +- .../client-go/tools/watch/retrywatcher.go | 103 +- vendor/k8s.io/client-go/tools/watch/until.go | 6 +- vendor/k8s.io/client-go/transport/cache.go | 8 +- .../client-go/transport/cert_rotation.go | 17 +- .../client-go/transport/round_trippers.go | 192 +- .../client-go/transport/token_source.go | 5 +- .../k8s.io/client-go/transport/transport.go | 2 +- vendor/k8s.io/client-go/util/cert/cert.go | 48 +- .../data_consistency_detector.go | 2 +- .../client-go/util/flowcontrol/backoff.go | 5 +- vendor/k8s.io/client-go/util/jsonpath/doc.go | 2 +- .../util/workqueue/delaying_queue.go | 19 +- vendor/k8s.io/client-go/util/workqueue/doc.go | 2 +- .../client-go/util/workqueue/parallelizer.go | 2 +- .../component-base/cli/flag/tracker_flag.go | 82 + .../component-base/compatibility/OWNERS | 13 + .../registry.go | 96 +- .../component-base/compatibility/version.go | 239 + .../featuregate/feature_gate.go | 41 +- .../component-base/tracing/api/v1/doc.go | 2 +- vendor/k8s.io/component-base/version/base.go | 2 +- .../k8s.io/component-base/version/version.go | 159 +- .../kube-openapi/pkg/handler3/handler.go | 2 +- .../internal/third_party/govalidator}/LICENSE | 10 +- .../third_party/govalidator/patterns.go | 26 + .../third_party/govalidator/validator.go | 181 + vendor/k8s.io/kube-openapi/pkg/spec3/fuzz.go | 146 +- .../pkg/validation/strfmt/default.go | 4 +- .../pkg/validation/strfmt/format.go | 24 + .../strfmt/kubernetes-extensions.go | 143 + vendor/k8s.io/kubectl/pkg/cmd/util/helpers.go | 7 +- vendor/k8s.io/kubectl/pkg/util/openapi/doc.go | 2 +- vendor/k8s.io/kubectl/pkg/util/slice/slice.go | 15 + vendor/k8s.io/utils/buffer/ring_growing.go | 116 +- .../k8s.io/utils/clock/testing/fake_clock.go | 362 - .../clock/testing/simple_interval_clock.go | 44 - vendor/k8s.io/utils/net/multi_listen.go | 6 +- vendor/modules.txt | 762 +- vendor/oras.land/oras-go/LICENSE | 201 - vendor/oras.land/oras-go/pkg/auth/client.go | 45 - .../oras.land/oras-go/pkg/auth/client_opts.go | 123 - .../oras-go/pkg/auth/docker/client.go | 123 - .../oras-go/pkg/auth/docker/login.go | 103 - .../oras-go/pkg/auth/docker/login_tls.go | 220 - .../oras-go/pkg/auth/docker/logout.go | 42 - .../oras-go/pkg/auth/docker/resolver.go | 86 - .../oras.land/oras-go/pkg/content/consts.go | 57 - .../oras-go/pkg/content/decompress.go | 151 - .../oras.land/oras-go/pkg/content/errors.go | 33 - vendor/oras.land/oras-go/pkg/content/file.go | 534 - .../oras.land/oras-go/pkg/content/gunzip.go | 72 - .../oras.land/oras-go/pkg/content/iowriter.go | 112 - .../oras.land/oras-go/pkg/content/manifest.go | 95 - .../oras.land/oras-go/pkg/content/memory.go | 284 - .../oras-go/pkg/content/multireader.go | 56 - .../oras-go/pkg/content/multiwriter.go | 42 - vendor/oras.land/oras-go/pkg/content/oci.go | 335 - vendor/oras.land/oras-go/pkg/content/opts.go | 112 - .../oras-go/pkg/content/passthrough.go | 286 - .../oras.land/oras-go/pkg/content/readerat.go | 68 - .../oras.land/oras-go/pkg/content/registry.go | 84 - vendor/oras.land/oras-go/pkg/content/untar.go | 157 - vendor/oras.land/oras-go/pkg/content/utils.go | 223 - .../oras.land/oras-go/pkg/context/logger.go | 50 - vendor/oras.land/oras-go/pkg/oras/copy.go | 213 - vendor/oras.land/oras-go/pkg/oras/errors.go | 42 - vendor/oras.land/oras-go/pkg/oras/opts.go | 254 - vendor/oras.land/oras-go/pkg/oras/provider.go | 79 - vendor/oras.land/oras-go/pkg/oras/store.go | 213 - .../oras-go/pkg/registry/reference.go | 177 - .../remote/internal/errutil/errors.go | 83 - .../registry/remote/internal/syncutil/once.go | 69 - .../oras-go/pkg/registry/remote/repository.go | 171 - .../oras-go/pkg/registry/remote/url.go | 42 - .../oras-go/pkg/registry/repository.go | 57 - vendor/oras.land/oras-go/v2/.gitignore | 41 + vendor/oras.land/oras-go/v2/CODEOWNERS | 2 + .../oras.land/oras-go/v2/CODE_OF_CONDUCT.md | 3 + .../oras.land/oras-go/v2/MIGRATION_GUIDE.md | 61 + vendor/oras.land/oras-go/v2/Makefile | 38 + vendor/oras.land/oras-go/v2/OWNERS.md | 11 + vendor/oras.land/oras-go/v2/README.md | 66 + vendor/oras.land/oras-go/v2/SECURITY.md | 3 + vendor/oras.land/oras-go/v2/content.go | 411 + .../oras-go/v2/content/memory/memory.go | 96 + vendor/oras.land/oras-go/v2/copy.go | 533 + vendor/oras.land/oras-go/v2/copyerror.go | 78 + vendor/oras.land/oras-go/v2/extendedcopy.go | 404 + .../oras-go/v2/internal/cas/memory.go | 88 + .../oras-go/v2/internal/cas/proxy.go | 125 + .../oras-go/v2/internal/copyutil/stack.go | 55 + .../oras-go/v2/internal/httputil/seek.go | 116 + .../internal/interfaces/registry.go} | 14 +- .../oras-go/v2/internal/platform/platform.go | 145 + .../oras-go/v2/internal/registryutil/proxy.go | 102 + vendor/oras.land/oras-go/v2/pack.go | 448 + .../{pkg => v2}/registry/remote/auth/cache.go | 78 +- .../registry/remote/auth/challenge.go | 3 +- .../registry/remote/auth/client.go | 125 +- .../registry/remote/auth/credential.go | 5 +- .../{pkg => v2}/registry/remote/auth/scope.go | 114 +- .../registry/remote/credentials/file_store.go | 97 + .../credentials/internal/config/config.go | 332 + .../credentials/internal/executer/executer.go | 80 + .../credentials/internal/ioutil/ioutil.go | 49 + .../remote/credentials/memory_store.go | 81 + .../remote/credentials/native_store.go | 139 + .../credentials/native_store_darwin.go} | 13 +- .../credentials/native_store_generic.go} | 17 +- .../remote/credentials/native_store_linux.go | 29 + .../credentials/native_store_windows.go} | 13 +- .../registry/remote/credentials/registry.go | 102 + .../v2/registry/remote/credentials/store.go | 262 + .../remote/credentials/trace/trace.go | 94 + .../v2/registry/remote/errcode/errors.go | 128 + .../remote/internal/errutil/errutil.go | 54 + .../oras-go/v2/registry/remote/manifest.go | 59 + .../oras-go/v2/registry/remote/referrers.go | 225 + .../oras-go/v2/registry/remote/registry.go | 190 + .../oras-go/v2/registry/remote/repository.go | 1681 ++ .../v2/registry/remote/retry/client.go | 114 + .../v2/registry/remote/retry/policy.go | 154 + .../oras-go/v2/registry/remote/url.go | 119 + .../{pkg => v2}/registry/remote/utils.go | 40 +- .../oras-go/v2/registry/remote/warning.go | 100 + vendor/oras.land/oras-go/v2/target.go | 43 + .../proto/client/client.pb.go | 1 - .../controller-runtime/pkg/cache/cache.go | 8 +- .../pkg/cache/internal/cache_reader.go | 8 +- .../pkg/cache/internal/informers.go | 58 +- .../pkg/cache/multi_namespace_cache.go | 46 +- .../controller-runtime/pkg/client/client.go | 9 +- .../pkg/client/config/config.go | 16 +- .../pkg/client/fake/client.go | 33 +- .../pkg/config/controller.go | 13 +- .../pkg/controller/controller.go | 86 +- .../controllerutil/controllerutil.go | 61 +- .../pkg/controller/priorityqueue/metrics.go | 54 +- .../controller/priorityqueue/priorityqueue.go | 12 +- .../controller-runtime/pkg/event/event.go | 3 + .../pkg/handler/enqueue_mapped.go | 5 +- .../pkg/handler/eventhandler.go | 18 +- .../pkg/internal/controller/controller.go | 118 +- .../internal/controller/metrics/metrics.go | 9 +- .../pkg/internal/metrics/workqueue.go | 61 +- .../pkg/internal/source/event_handler.go | 18 +- .../pkg/internal/source/kind.go | 14 +- .../pkg/log/warning_handler.go | 27 +- .../controller-runtime/pkg/log/zap/flags.go | 1 + .../pkg/log/zap/kube_helpers.go | 1 + .../controller-runtime/pkg/log/zap/zap.go | 4 +- .../controller-runtime/pkg/manager/manager.go | 4 + .../pkg/reconcile/reconcile.go | 12 +- .../controller-runtime/pkg/source/source.go | 8 +- .../pkg/webhook/admission/multi.go | 6 + .../pkg/webhook/internal/metrics/metrics.go | 8 +- .../controller-tools/pkg/crd/desc_visitor.go | 2 +- .../controller-tools/pkg/crd/flatten.go | 5 +- .../controller-tools/pkg/crd/gen.go | 1 - .../controller-tools/pkg/crd/known_types.go | 1 - .../controller-tools/pkg/crd/markers/crd.go | 1 - .../pkg/crd/markers/topology.go | 22 +- .../pkg/crd/markers/validation.go | 38 +- .../crd/markers/zz_generated.markerhelp.go | 20 + .../controller-tools/pkg/crd/parser.go | 1 - .../controller-tools/pkg/crd/schema.go | 7 +- .../controller-tools/pkg/crd/spec.go | 2 - .../controller-tools/pkg/genall/genall.go | 9 +- .../controller-tools/pkg/genall/options.go | 9 +- .../controller-tools/pkg/genall/output.go | 6 + .../controller-tools/pkg/loader/errors.go | 4 +- .../controller-tools/pkg/loader/loader.go | 10 +- .../controller-tools/pkg/markers/parse.go | 4 +- .../controller-tools/pkg/markers/zip.go | 2 +- .../v4/pkg/cli/alpha/internal/generate.go | 231 +- .../sigs.k8s.io/kubebuilder/v4/pkg/cli/cli.go | 20 +- .../kubebuilder/v4/pkg/cli/cmd_helpers.go | 8 + .../kubebuilder/v4/pkg/cli/options.go | 40 +- .../kubebuilder/v4/pkg/config/interface.go | 6 + .../v4/pkg/config/store/yaml/store.go | 2 +- .../kubebuilder/v4/pkg/config/v3/config.go | 28 +- .../kubebuilder/v4/pkg/config/version.go | 21 +- .../kubebuilder/v4/pkg/machinery/scaffold.go | 57 +- .../kubebuilder/v4/pkg/model/resource/gvk.go | 2 +- .../v4/pkg/model/resource/resource.go | 2 +- .../v4/pkg/model/resource/utils.go | 4 +- .../kubebuilder/v4/pkg/plugin/helpers.go | 10 +- .../kubebuilder/v4/pkg/plugin/util/exec.go | 8 +- .../kubebuilder/v4/pkg/plugin/util/util.go | 101 +- .../kubebuilder/v4/pkg/plugin/version.go | 10 +- .../v4/pkg/plugins/common/kustomize/v2/api.go | 8 +- .../pkg/plugins/common/kustomize/v2/create.go | 3 +- .../pkg/plugins/common/kustomize/v2/init.go | 19 +- .../common/kustomize/v2/scaffolds/api.go | 14 +- .../common/kustomize/v2/scaffolds/init.go | 14 +- .../common/kustomize/v2/scaffolds/webhook.go | 10 +- .../plugins/common/kustomize/v2/webhook.go | 8 +- .../v4/pkg/plugins/external/helpers.go | 50 +- .../golang/deploy-image/v1alpha1/api.go | 29 +- .../deploy-image/v1alpha1/scaffolds/api.go | 34 +- .../scaffolds/internal/templates/api/types.go | 2 +- .../templates/controllers/controller-test.go | 4 +- .../templates/controllers/controller.go | 4 +- .../v4/pkg/plugins/golang/go_version.go | 12 +- .../v4/pkg/plugins/golang/options.go | 1 - .../v4/pkg/plugins/golang/repository.go | 13 +- .../v4/pkg/plugins/golang/v4/api.go | 16 +- .../v4/pkg/plugins/golang/v4/edit.go | 6 +- .../v4/pkg/plugins/golang/v4/init.go | 32 +- .../v4/pkg/plugins/golang/v4/scaffolds/api.go | 10 +- .../pkg/plugins/golang/v4/scaffolds/edit.go | 12 +- .../pkg/plugins/golang/v4/scaffolds/init.go | 19 +- .../scaffolds/internal/templates/api/hub.go | 2 +- .../scaffolds/internal/templates/api/types.go | 2 +- .../scaffolds/internal/templates/cmd/main.go | 28 +- .../templates/controllers/controller.go | 2 +- .../controllers/controller_suitetest.go | 2 +- .../controllers/controller_test_template.go | 2 +- .../internal/templates/devcontainer.go | 2 +- .../internal/templates/dockerfile.go | 2 +- .../internal/templates/github/lint.go | 2 +- .../internal/templates/github/test-e2e.go | 4 +- .../scaffolds/internal/templates/golangci.go | 59 +- .../v4/scaffolds/internal/templates/gomod.go | 4 +- .../internal/templates/hack/boilerplate.go | 4 +- .../scaffolds/internal/templates/makefile.go | 23 +- .../v4/scaffolds/internal/templates/readme.go | 2 +- .../internal/templates/test/utils/utils.go | 41 +- .../internal/templates/webhooks/webhook.go | 11 +- .../templates/webhooks/webhook_suitetest.go | 2 +- .../webhooks/webhook_test_template.go | 3 +- .../plugins/golang/v4/scaffolds/webhook.go | 32 +- .../v4/pkg/plugins/golang/v4/webhook.go | 21 +- .../optional/grafana/v1alpha/commons.go | 7 +- .../plugins/optional/grafana/v1alpha/edit.go | 9 +- .../plugins/optional/grafana/v1alpha/init.go | 9 +- .../grafana/v1alpha/scaffolds/edit.go | 16 +- .../grafana/v1alpha/scaffolds/init.go | 11 +- .../plugins/optional/helm/v1alpha/commons.go | 5 +- .../pkg/plugins/optional/helm/v1alpha/edit.go | 2 +- .../pkg/plugins/optional/helm/v1alpha/init.go | 2 +- .../optional/helm/v1alpha/scaffolds/init.go | 48 +- .../metrics/metrics_service.go | 1 + .../chart-templates/prometheus/monitor.go | 1 + .../kubebuilder/v4/test/e2e/utils/kubectl.go | 15 +- .../v4/test/e2e/utils/test_context.go | 21 +- .../kubebuilder/v4/test/e2e/utils/webhooks.go | 16 +- .../api/filters/replacement/replacement.go | 6 +- .../internal/accumulator/resaccumulator.go | 2 +- .../builtins/HelmChartInflationGenerator.go | 3 +- .../internal/builtins/NamespaceTransformer.go | 2 + .../api/internal/loader/fileloader.go | 2 +- .../kustomize/api/internal/loader/loader.go | 2 +- .../builtinconfig/transformerconfig.go | 2 + .../internal/plugins/execplugin/execplugin.go | 1 + .../api/internal/plugins/loader/loader.go | 21 +- .../kustomize/kyaml/filesys/filesystem.go | 1 + vendor/sigs.k8s.io/randfill/CONTRIBUTING.md | 43 + .../analysis => sigs.k8s.io/randfill}/LICENSE | 6 +- vendor/sigs.k8s.io/randfill/NOTICE | 24 + vendor/sigs.k8s.io/randfill/OWNERS | 8 + vendor/sigs.k8s.io/randfill/OWNERS_ALIASES | 14 + .../gofuzz => sigs.k8s.io/randfill}/README.md | 45 +- vendor/sigs.k8s.io/randfill/SECURITY_CONTACTS | 16 + .../randfill}/bytesource/bytesource.go | 0 .../sigs.k8s.io/randfill/code-of-conduct.md | 3 + vendor/sigs.k8s.io/randfill/randfill.go | 682 + .../structured-merge-diff/v4/merge/update.go | 50 +- .../structured-merge-diff/v4/typed/typed.go | 47 +- .../v4/typed/validate.go | 4 +- .../v4/value/jsontagutil.go | 63 +- .../v4/value/reflectcache.go | 14 +- .../structured-merge-diff/v4/value/scalar.go | 2 +- vendor/sigs.k8s.io/yaml/.travis.yml | 12 - vendor/sigs.k8s.io/yaml/goyaml.v2/OWNERS | 24 - vendor/sigs.k8s.io/yaml/goyaml.v2/README.md | 200 +- .../yaml/goyaml.v2/yaml_aliases.go | 85 + vendor/sigs.k8s.io/yaml/goyaml.v3/OWNERS | 24 - vendor/sigs.k8s.io/yaml/goyaml.v3/README.md | 210 +- vendor/sigs.k8s.io/yaml/goyaml.v3/patch.go | 39 - .../yaml/goyaml.v3/yaml_aliases.go | 130 + vendor/sigs.k8s.io/yaml/yaml.go | 11 +- 2769 files changed, 137384 insertions(+), 122008 deletions(-) create mode 100644 vendor/dario.cat/mergo/FUNDING.json create mode 100644 vendor/github.com/Microsoft/hcsshim/.clang-format create mode 100644 vendor/github.com/Microsoft/hcsshim/Makefile.bootfiles rename vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/{cim_mount.go => cimfs.go} (70%) create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/firmware.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_2.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_backing_type.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa_node.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa_node_memory.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa_node_processor.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa_processors.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa_setting.go delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_2.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_machine_memory.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_machine_processor.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_slit_type.go create mode 100644 vendor/github.com/antlr4-go/antlr/v4/mutex.go create mode 100644 vendor/github.com/antlr4-go/antlr/v4/mutex_nomutex.go delete mode 100644 vendor/github.com/cenkalti/backoff/v4/context.go delete mode 100644 vendor/github.com/cenkalti/backoff/v4/exponential.go delete mode 100644 vendor/github.com/cenkalti/backoff/v4/retry.go delete mode 100644 vendor/github.com/cenkalti/backoff/v4/tries.go rename vendor/github.com/cenkalti/backoff/{v4 => v5}/.gitignore (100%) create mode 100644 vendor/github.com/cenkalti/backoff/v5/CHANGELOG.md rename vendor/github.com/cenkalti/backoff/{v4 => v5}/LICENSE (100%) rename vendor/github.com/cenkalti/backoff/{v4 => v5}/README.md (64%) rename vendor/github.com/cenkalti/backoff/{v4 => v5}/backoff.go (87%) create mode 100644 vendor/github.com/cenkalti/backoff/v5/error.go create mode 100644 vendor/github.com/cenkalti/backoff/v5/exponential.go create mode 100644 vendor/github.com/cenkalti/backoff/v5/retry.go rename vendor/github.com/cenkalti/backoff/{v4 => v5}/ticker.go (80%) rename vendor/github.com/cenkalti/backoff/{v4 => v5}/timer.go (96%) delete mode 100644 vendor/github.com/containerd/containerd/log/context_deprecated.go delete mode 100644 vendor/github.com/containers/image/v5/signature/fulcio_cert_stub.go delete mode 100644 vendor/github.com/containers/image/v5/signature/internal/rekor_set_stub.go create mode 100644 vendor/github.com/docker/cli/cli/config/memorystore/store.go delete mode 100644 vendor/github.com/docker/distribution/.dockerignore delete mode 100644 vendor/github.com/docker/distribution/.gitignore delete mode 100644 vendor/github.com/docker/distribution/.golangci.yml delete mode 100644 vendor/github.com/docker/distribution/.mailmap delete mode 100644 vendor/github.com/docker/distribution/BUILDING.md delete mode 100644 vendor/github.com/docker/distribution/CONTRIBUTING.md delete mode 100644 vendor/github.com/docker/distribution/Dockerfile delete mode 100644 vendor/github.com/docker/distribution/MAINTAINERS delete mode 100644 vendor/github.com/docker/distribution/Makefile delete mode 100644 vendor/github.com/docker/distribution/README.md delete mode 100644 vendor/github.com/docker/distribution/ROADMAP.md delete mode 100644 vendor/github.com/docker/distribution/blobs.go delete mode 100644 vendor/github.com/docker/distribution/doc.go delete mode 100644 vendor/github.com/docker/distribution/docker-bake.hcl delete mode 100644 vendor/github.com/docker/distribution/errors.go delete mode 100644 vendor/github.com/docker/distribution/manifests.go delete mode 100644 vendor/github.com/docker/distribution/metrics/prometheus.go delete mode 100644 vendor/github.com/docker/distribution/registry.go delete mode 100644 vendor/github.com/docker/distribution/registry/client/auth/api_version.go delete mode 100644 vendor/github.com/docker/distribution/registry/client/auth/session.go delete mode 100644 vendor/github.com/docker/distribution/registry/client/blob_writer.go delete mode 100644 vendor/github.com/docker/distribution/registry/client/errors.go delete mode 100644 vendor/github.com/docker/distribution/registry/client/repository.go delete mode 100644 vendor/github.com/docker/distribution/registry/client/transport/http_reader.go delete mode 100644 vendor/github.com/docker/distribution/registry/client/transport/transport.go delete mode 100644 vendor/github.com/docker/distribution/registry/storage/cache/cache.go delete mode 100644 vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go delete mode 100644 vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go delete mode 100644 vendor/github.com/docker/distribution/tags.go delete mode 100644 vendor/github.com/docker/distribution/vendor.conf delete mode 100644 vendor/github.com/docker/docker/api/types/filters/errors.go delete mode 100644 vendor/github.com/docker/docker/api/types/filters/parse.go delete mode 100644 vendor/github.com/docker/docker/api/types/registry/authconfig.go delete mode 100644 vendor/github.com/docker/docker/api/types/registry/authenticate.go delete mode 100644 vendor/github.com/docker/docker/api/types/registry/registry.go delete mode 100644 vendor/github.com/docker/docker/api/types/registry/search.go delete mode 100644 vendor/github.com/docker/docker/errdefs/defs.go delete mode 100644 vendor/github.com/docker/docker/errdefs/doc.go delete mode 100644 vendor/github.com/docker/docker/errdefs/helpers.go delete mode 100644 vendor/github.com/docker/docker/errdefs/http_helpers.go delete mode 100644 vendor/github.com/docker/docker/errdefs/is.go delete mode 100644 vendor/github.com/docker/docker/internal/lazyregexp/lazyregexp.go delete mode 100644 vendor/github.com/docker/docker/pkg/homedir/homedir.go delete mode 100644 vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go delete mode 100644 vendor/github.com/docker/docker/pkg/homedir/homedir_others.go delete mode 100644 vendor/github.com/docker/docker/registry/auth.go delete mode 100644 vendor/github.com/docker/docker/registry/config.go delete mode 100644 vendor/github.com/docker/docker/registry/config_unix.go delete mode 100644 vendor/github.com/docker/docker/registry/config_windows.go delete mode 100644 vendor/github.com/docker/docker/registry/errors.go delete mode 100644 vendor/github.com/docker/docker/registry/registry.go delete mode 100644 vendor/github.com/docker/docker/registry/search.go delete mode 100644 vendor/github.com/docker/docker/registry/search_endpoint_v1.go delete mode 100644 vendor/github.com/docker/docker/registry/search_session.go delete mode 100644 vendor/github.com/docker/docker/registry/service.go delete mode 100644 vendor/github.com/docker/docker/registry/service_v2.go delete mode 100644 vendor/github.com/docker/docker/registry/types.go delete mode 100644 vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go create mode 100644 vendor/github.com/docker/go-events/SECURITY.md create mode 100644 vendor/github.com/docker/go-events/vendor.mod create mode 100644 vendor/github.com/docker/go-events/vendor.sum delete mode 100644 vendor/github.com/fxamacker/cbor/v2/encode_map_go117.go create mode 100644 vendor/github.com/fxamacker/cbor/v2/omitzero_go124.go create mode 100644 vendor/github.com/fxamacker/cbor/v2/omitzero_pre_go124.go create mode 100644 vendor/github.com/go-jose/go-jose/v4/symmetric_go124.go create mode 100644 vendor/github.com/go-jose/go-jose/v4/symmetric_legacy.go delete mode 100644 vendor/github.com/go-openapi/analysis/.codecov.yml delete mode 100644 vendor/github.com/go-openapi/analysis/.gitattributes delete mode 100644 vendor/github.com/go-openapi/analysis/.gitignore delete mode 100644 vendor/github.com/go-openapi/analysis/.golangci.yml delete mode 100644 vendor/github.com/go-openapi/analysis/CODE_OF_CONDUCT.md delete mode 100644 vendor/github.com/go-openapi/analysis/README.md delete mode 100644 vendor/github.com/go-openapi/analysis/analyzer.go delete mode 100644 vendor/github.com/go-openapi/analysis/debug.go delete mode 100644 vendor/github.com/go-openapi/analysis/doc.go delete mode 100644 vendor/github.com/go-openapi/analysis/fixer.go delete mode 100644 vendor/github.com/go-openapi/analysis/flatten.go delete mode 100644 vendor/github.com/go-openapi/analysis/flatten_name.go delete mode 100644 vendor/github.com/go-openapi/analysis/flatten_options.go delete mode 100644 vendor/github.com/go-openapi/analysis/internal/debug/debug.go delete mode 100644 vendor/github.com/go-openapi/analysis/internal/flatten/normalize/normalize.go delete mode 100644 vendor/github.com/go-openapi/analysis/internal/flatten/operations/operations.go delete mode 100644 vendor/github.com/go-openapi/analysis/internal/flatten/replace/replace.go delete mode 100644 vendor/github.com/go-openapi/analysis/internal/flatten/schutils/flatten_schema.go delete mode 100644 vendor/github.com/go-openapi/analysis/internal/flatten/sortref/keys.go delete mode 100644 vendor/github.com/go-openapi/analysis/internal/flatten/sortref/sort_ref.go delete mode 100644 vendor/github.com/go-openapi/analysis/mixin.go delete mode 100644 vendor/github.com/go-openapi/analysis/schema.go delete mode 100644 vendor/github.com/go-openapi/errors/.gitattributes delete mode 100644 vendor/github.com/go-openapi/errors/.gitignore delete mode 100644 vendor/github.com/go-openapi/errors/.golangci.yml delete mode 100644 vendor/github.com/go-openapi/errors/CODE_OF_CONDUCT.md delete mode 100644 vendor/github.com/go-openapi/errors/LICENSE delete mode 100644 vendor/github.com/go-openapi/errors/README.md delete mode 100644 vendor/github.com/go-openapi/errors/api.go delete mode 100644 vendor/github.com/go-openapi/errors/auth.go delete mode 100644 vendor/github.com/go-openapi/errors/doc.go delete mode 100644 vendor/github.com/go-openapi/errors/headers.go delete mode 100644 vendor/github.com/go-openapi/errors/middleware.go delete mode 100644 vendor/github.com/go-openapi/errors/parsing.go delete mode 100644 vendor/github.com/go-openapi/errors/schema.go create mode 100644 vendor/github.com/go-openapi/jsonpointer/errors.go delete mode 100644 vendor/github.com/go-openapi/loads/.editorconfig delete mode 100644 vendor/github.com/go-openapi/loads/.gitignore delete mode 100644 vendor/github.com/go-openapi/loads/.golangci.yml delete mode 100644 vendor/github.com/go-openapi/loads/.travis.yml delete mode 100644 vendor/github.com/go-openapi/loads/CODE_OF_CONDUCT.md delete mode 100644 vendor/github.com/go-openapi/loads/LICENSE delete mode 100644 vendor/github.com/go-openapi/loads/README.md delete mode 100644 vendor/github.com/go-openapi/loads/doc.go delete mode 100644 vendor/github.com/go-openapi/loads/loaders.go delete mode 100644 vendor/github.com/go-openapi/loads/options.go delete mode 100644 vendor/github.com/go-openapi/loads/spec.go delete mode 100644 vendor/github.com/go-openapi/runtime/.editorconfig delete mode 100644 vendor/github.com/go-openapi/runtime/.gitattributes delete mode 100644 vendor/github.com/go-openapi/runtime/.gitignore delete mode 100644 vendor/github.com/go-openapi/runtime/.golangci.yml delete mode 100644 vendor/github.com/go-openapi/runtime/CODE_OF_CONDUCT.md delete mode 100644 vendor/github.com/go-openapi/runtime/LICENSE delete mode 100644 vendor/github.com/go-openapi/runtime/README.md delete mode 100644 vendor/github.com/go-openapi/runtime/bytestream.go delete mode 100644 vendor/github.com/go-openapi/runtime/client_auth_info.go delete mode 100644 vendor/github.com/go-openapi/runtime/client_operation.go delete mode 100644 vendor/github.com/go-openapi/runtime/client_request.go delete mode 100644 vendor/github.com/go-openapi/runtime/client_response.go delete mode 100644 vendor/github.com/go-openapi/runtime/constants.go delete mode 100644 vendor/github.com/go-openapi/runtime/csv.go delete mode 100644 vendor/github.com/go-openapi/runtime/csv_options.go delete mode 100644 vendor/github.com/go-openapi/runtime/discard.go delete mode 100644 vendor/github.com/go-openapi/runtime/file.go delete mode 100644 vendor/github.com/go-openapi/runtime/headers.go delete mode 100644 vendor/github.com/go-openapi/runtime/interfaces.go delete mode 100644 vendor/github.com/go-openapi/runtime/json.go delete mode 100644 vendor/github.com/go-openapi/runtime/request.go delete mode 100644 vendor/github.com/go-openapi/runtime/statuses.go delete mode 100644 vendor/github.com/go-openapi/runtime/text.go delete mode 100644 vendor/github.com/go-openapi/runtime/values.go delete mode 100644 vendor/github.com/go-openapi/runtime/xml.go delete mode 100644 vendor/github.com/go-openapi/spec/.editorconfig delete mode 100644 vendor/github.com/go-openapi/spec/.gitignore delete mode 100644 vendor/github.com/go-openapi/spec/.golangci.yml delete mode 100644 vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md delete mode 100644 vendor/github.com/go-openapi/spec/LICENSE delete mode 100644 vendor/github.com/go-openapi/spec/README.md delete mode 100644 vendor/github.com/go-openapi/spec/cache.go delete mode 100644 vendor/github.com/go-openapi/spec/contact_info.go delete mode 100644 vendor/github.com/go-openapi/spec/debug.go delete mode 100644 vendor/github.com/go-openapi/spec/embed.go delete mode 100644 vendor/github.com/go-openapi/spec/errors.go delete mode 100644 vendor/github.com/go-openapi/spec/expander.go delete mode 100644 vendor/github.com/go-openapi/spec/external_docs.go delete mode 100644 vendor/github.com/go-openapi/spec/header.go delete mode 100644 vendor/github.com/go-openapi/spec/info.go delete mode 100644 vendor/github.com/go-openapi/spec/items.go delete mode 100644 vendor/github.com/go-openapi/spec/license.go delete mode 100644 vendor/github.com/go-openapi/spec/normalizer.go delete mode 100644 vendor/github.com/go-openapi/spec/normalizer_nonwindows.go delete mode 100644 vendor/github.com/go-openapi/spec/normalizer_windows.go delete mode 100644 vendor/github.com/go-openapi/spec/operation.go delete mode 100644 vendor/github.com/go-openapi/spec/parameter.go delete mode 100644 vendor/github.com/go-openapi/spec/path_item.go delete mode 100644 vendor/github.com/go-openapi/spec/paths.go delete mode 100644 vendor/github.com/go-openapi/spec/properties.go delete mode 100644 vendor/github.com/go-openapi/spec/ref.go delete mode 100644 vendor/github.com/go-openapi/spec/resolver.go delete mode 100644 vendor/github.com/go-openapi/spec/response.go delete mode 100644 vendor/github.com/go-openapi/spec/responses.go delete mode 100644 vendor/github.com/go-openapi/spec/schema.go delete mode 100644 vendor/github.com/go-openapi/spec/schema_loader.go delete mode 100644 vendor/github.com/go-openapi/spec/schemas/jsonschema-draft-04.json delete mode 100644 vendor/github.com/go-openapi/spec/schemas/v2/schema.json delete mode 100644 vendor/github.com/go-openapi/spec/security_scheme.go delete mode 100644 vendor/github.com/go-openapi/spec/spec.go delete mode 100644 vendor/github.com/go-openapi/spec/swagger.go delete mode 100644 vendor/github.com/go-openapi/spec/tag.go delete mode 100644 vendor/github.com/go-openapi/spec/url_go19.go delete mode 100644 vendor/github.com/go-openapi/spec/validations.go delete mode 100644 vendor/github.com/go-openapi/spec/xml_object.go delete mode 100644 vendor/github.com/go-openapi/strfmt/.editorconfig delete mode 100644 vendor/github.com/go-openapi/strfmt/.gitattributes delete mode 100644 vendor/github.com/go-openapi/strfmt/.gitignore delete mode 100644 vendor/github.com/go-openapi/strfmt/.golangci.yml delete mode 100644 vendor/github.com/go-openapi/strfmt/CODE_OF_CONDUCT.md delete mode 100644 vendor/github.com/go-openapi/strfmt/LICENSE delete mode 100644 vendor/github.com/go-openapi/strfmt/README.md delete mode 100644 vendor/github.com/go-openapi/strfmt/bson.go delete mode 100644 vendor/github.com/go-openapi/strfmt/date.go delete mode 100644 vendor/github.com/go-openapi/strfmt/default.go delete mode 100644 vendor/github.com/go-openapi/strfmt/doc.go delete mode 100644 vendor/github.com/go-openapi/strfmt/duration.go delete mode 100644 vendor/github.com/go-openapi/strfmt/format.go delete mode 100644 vendor/github.com/go-openapi/strfmt/time.go delete mode 100644 vendor/github.com/go-openapi/strfmt/ulid.go delete mode 100644 vendor/github.com/go-openapi/validate/.editorconfig delete mode 100644 vendor/github.com/go-openapi/validate/.gitattributes delete mode 100644 vendor/github.com/go-openapi/validate/.gitignore delete mode 100644 vendor/github.com/go-openapi/validate/.golangci.yml delete mode 100644 vendor/github.com/go-openapi/validate/BENCHMARK.md delete mode 100644 vendor/github.com/go-openapi/validate/CODE_OF_CONDUCT.md delete mode 100644 vendor/github.com/go-openapi/validate/LICENSE delete mode 100644 vendor/github.com/go-openapi/validate/README.md delete mode 100644 vendor/github.com/go-openapi/validate/context.go delete mode 100644 vendor/github.com/go-openapi/validate/debug.go delete mode 100644 vendor/github.com/go-openapi/validate/default_validator.go delete mode 100644 vendor/github.com/go-openapi/validate/doc.go delete mode 100644 vendor/github.com/go-openapi/validate/example_validator.go delete mode 100644 vendor/github.com/go-openapi/validate/formats.go delete mode 100644 vendor/github.com/go-openapi/validate/helpers.go delete mode 100644 vendor/github.com/go-openapi/validate/object_validator.go delete mode 100644 vendor/github.com/go-openapi/validate/options.go delete mode 100644 vendor/github.com/go-openapi/validate/pools.go delete mode 100644 vendor/github.com/go-openapi/validate/pools_debug.go delete mode 100644 vendor/github.com/go-openapi/validate/result.go delete mode 100644 vendor/github.com/go-openapi/validate/rexp.go delete mode 100644 vendor/github.com/go-openapi/validate/schema.go delete mode 100644 vendor/github.com/go-openapi/validate/schema_messages.go delete mode 100644 vendor/github.com/go-openapi/validate/schema_option.go delete mode 100644 vendor/github.com/go-openapi/validate/schema_props.go delete mode 100644 vendor/github.com/go-openapi/validate/slice_validator.go delete mode 100644 vendor/github.com/go-openapi/validate/spec.go delete mode 100644 vendor/github.com/go-openapi/validate/spec_messages.go delete mode 100644 vendor/github.com/go-openapi/validate/type.go delete mode 100644 vendor/github.com/go-openapi/validate/update-fixtures.sh delete mode 100644 vendor/github.com/go-openapi/validate/validator.go delete mode 100644 vendor/github.com/go-openapi/validate/values.go create mode 100644 vendor/github.com/go-viper/mapstructure/v2/errors.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/any.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/any/any.pb.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/doc.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/duration.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/timestamp.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go create mode 100644 vendor/github.com/google/cel-go/cel/prompt.go create mode 100644 vendor/github.com/google/cel-go/cel/templates/authoring.tmpl create mode 100644 vendor/github.com/google/cel-go/common/env/BUILD.bazel create mode 100644 vendor/github.com/google/cel-go/common/env/env.go create mode 100644 vendor/github.com/google/cel-go/common/types/format.go create mode 100644 vendor/github.com/google/cel-go/ext/extension_option_factory.go create mode 100644 vendor/github.com/google/cel-go/ext/formatting_v2.go create mode 100644 vendor/github.com/google/cel-go/ext/regex.go create mode 100644 vendor/github.com/google/gnostic-models/openapiv3/annotations.pb.go create mode 100644 vendor/github.com/google/gnostic-models/openapiv3/annotations.proto delete mode 100644 vendor/github.com/google/gofuzz/.travis.yml delete mode 100644 vendor/github.com/google/gofuzz/CONTRIBUTING.md delete mode 100644 vendor/github.com/google/gofuzz/LICENSE delete mode 100644 vendor/github.com/google/gofuzz/fuzz.go delete mode 100644 vendor/github.com/gorilla/websocket/tls_handshake.go delete mode 100644 vendor/github.com/gorilla/websocket/tls_handshake_116.go delete mode 100644 vendor/github.com/gorilla/websocket/x_net_proxy.go create mode 100644 vendor/github.com/letsencrypt/boulder/core/proto/core.pb.go create mode 100644 vendor/github.com/letsencrypt/boulder/core/proto/core.proto delete mode 100644 vendor/github.com/letsencrypt/boulder/goodkey/blocked.go delete mode 100644 vendor/github.com/letsencrypt/boulder/goodkey/weak.go delete mode 100644 vendor/github.com/letsencrypt/boulder/strictyaml/yaml.go delete mode 100644 vendor/github.com/mitchellh/mapstructure/CHANGELOG.md delete mode 100644 vendor/github.com/mitchellh/mapstructure/README.md delete mode 100644 vendor/github.com/mitchellh/mapstructure/decode_hooks.go delete mode 100644 vendor/github.com/mitchellh/mapstructure/error.go delete mode 100644 vendor/github.com/mitchellh/mapstructure/mapstructure.go delete mode 100644 vendor/github.com/oklog/ulid/.gitignore delete mode 100644 vendor/github.com/oklog/ulid/.travis.yml delete mode 100644 vendor/github.com/oklog/ulid/AUTHORS.md delete mode 100644 vendor/github.com/oklog/ulid/CHANGELOG.md delete mode 100644 vendor/github.com/oklog/ulid/CONTRIBUTING.md delete mode 100644 vendor/github.com/oklog/ulid/Gopkg.lock delete mode 100644 vendor/github.com/oklog/ulid/Gopkg.toml delete mode 100644 vendor/github.com/oklog/ulid/LICENSE delete mode 100644 vendor/github.com/oklog/ulid/README.md delete mode 100644 vendor/github.com/oklog/ulid/ulid.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/README.md create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/automaxprocs.go rename vendor/{go.uber.org/automaxprocs/internal/cgroups => github.com/onsi/ginkgo/v2/ginkgo/automaxprocs}/cgroup.go (99%) rename vendor/{go.uber.org/automaxprocs/internal/cgroups => github.com/onsi/ginkgo/v2/ginkgo/automaxprocs}/cgroups.go (99%) rename vendor/{go.uber.org/automaxprocs/internal/cgroups => github.com/onsi/ginkgo/v2/ginkgo/automaxprocs}/cgroups2.go (99%) rename vendor/{go.uber.org/automaxprocs/internal/runtime => github.com/onsi/ginkgo/v2/ginkgo/automaxprocs}/cpu_quota_linux.go (91%) rename vendor/{go.uber.org/automaxprocs/internal/runtime => github.com/onsi/ginkgo/v2/ginkgo/automaxprocs}/cpu_quota_unsupported.go (98%) rename vendor/{go.uber.org/automaxprocs/internal/cgroups => github.com/onsi/ginkgo/v2/ginkgo/automaxprocs}/errors.go (98%) rename vendor/{go.uber.org/automaxprocs/internal/cgroups => github.com/onsi/ginkgo/v2/ginkgo/automaxprocs}/mountpoint.go (99%) rename vendor/{go.uber.org/automaxprocs/internal/runtime => github.com/onsi/ginkgo/v2/ginkgo/automaxprocs}/runtime.go (98%) rename vendor/{go.uber.org/automaxprocs/internal/cgroups => github.com/onsi/ginkgo/v2/ginkgo/automaxprocs}/subsys.go (99%) create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/around_node.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/reporters/gojson.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/reporters/gojson_event_writer.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/reporters/gojson_reporter.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/reporters/gojson_report.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/types/around_node.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/types/semver_filter.go create mode 100644 vendor/github.com/prometheus/procfs/net_dev_snmp6.go create mode 100644 vendor/github.com/redis/go-redis/v9/RELEASE-NOTES.md create mode 100644 vendor/github.com/redis/go-redis/v9/auth/auth.go create mode 100644 vendor/github.com/redis/go-redis/v9/auth/reauth_credentials_listener.go create mode 100644 vendor/github.com/redis/go-redis/v9/docker-compose.yml delete mode 100644 vendor/github.com/redis/go-redis/v9/gears_commands.go create mode 100644 vendor/github.com/redis/go-redis/v9/internal/util/convert.go create mode 100644 vendor/github.com/redis/go-redis/v9/vectorset_commands.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/.gitmodules create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/.golangci.yml create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/.pre-commit-hooks.yaml rename vendor/github.com/{xeipuuv/gojsonpointer/LICENSE-APACHE-2.0.txt => santhosh-tekuri/jsonschema/v6/LICENSE} (89%) create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/README.md create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/compiler.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/content.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/draft.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/format.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/go.work create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/go.work.sum create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/kind/kind.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/loader.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft-04/schema create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft-06/schema create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft-07/schema create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/applicator create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/content create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/core create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/format create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/meta-data create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/validation create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/schema create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/applicator create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/content create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/core create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/format-annotation create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/format-assertion create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/meta-data create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/unevaluated create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/validation create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/schema create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/objcompiler.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/output.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/position.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/root.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/roots.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/schema.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/util.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/validator.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/vocab.go create mode 100644 vendor/github.com/sergi/go-diff/diffmatchpatch/index.go delete mode 100644 vendor/github.com/sigstore/rekor/CONTRIBUTORS.md delete mode 100644 vendor/github.com/sigstore/rekor/LICENSE delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/alpine.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/alpine_schema.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/alpine_v001_schema.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/consistency_proof.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/cose.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/cose_schema.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/cose_v001_schema.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/dsse.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/dsse_schema.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/dsse_v001_schema.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/error.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord_schema.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord_v001_schema.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/helm.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/helm_schema.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/helm_v001_schema.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/inactive_shard_log_info.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/inclusion_proof.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/intoto.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_schema.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v001_schema.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v002_schema.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/jar.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/jar_schema.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/jar_v001_schema.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/log_entry.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/log_info.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/proposed_entry.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/rekord.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/rekord_schema.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/rekord_v001_schema.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/rfc3161.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/rfc3161_schema.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/rfc3161_v001_schema.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/rpm.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/rpm_schema.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/rpm_v001_schema.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/search_index.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/search_log_query.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/tuf.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/tuf_schema.go delete mode 100644 vendor/github.com/sigstore/rekor/pkg/generated/models/tuf_v001_schema.go create mode 100644 vendor/github.com/sourcegraph/conc/Makefile delete mode 100644 vendor/github.com/sourcegraph/conc/internal/multierror/multierror_go119.go delete mode 100644 vendor/github.com/sourcegraph/conc/internal/multierror/multierror_go120.go delete mode 100644 vendor/github.com/sourcegraph/conc/iter/iter.go delete mode 100644 vendor/github.com/sourcegraph/conc/iter/map.go create mode 100644 vendor/github.com/sourcegraph/conc/pool/context_pool.go create mode 100644 vendor/github.com/sourcegraph/conc/pool/error_pool.go create mode 100644 vendor/github.com/sourcegraph/conc/pool/pool.go create mode 100644 vendor/github.com/sourcegraph/conc/pool/result_context_pool.go create mode 100644 vendor/github.com/sourcegraph/conc/pool/result_error_pool.go create mode 100644 vendor/github.com/sourcegraph/conc/pool/result_pool.go create mode 100644 vendor/github.com/spf13/cast/.editorconfig create mode 100644 vendor/github.com/spf13/cast/.golangci.yaml create mode 100644 vendor/github.com/spf13/cast/alias.go create mode 100644 vendor/github.com/spf13/cast/basic.go delete mode 100644 vendor/github.com/spf13/cast/caste.go create mode 100644 vendor/github.com/spf13/cast/indirect.go create mode 100644 vendor/github.com/spf13/cast/internal/time.go create mode 100644 vendor/github.com/spf13/cast/internal/timeformattype_string.go create mode 100644 vendor/github.com/spf13/cast/map.go create mode 100644 vendor/github.com/spf13/cast/number.go create mode 100644 vendor/github.com/spf13/cast/slice.go create mode 100644 vendor/github.com/spf13/cast/time.go delete mode 100644 vendor/github.com/spf13/cast/timeformattype_string.go create mode 100644 vendor/github.com/spf13/cast/zz_generated.go create mode 100644 vendor/github.com/spf13/cobra/SECURITY.md create mode 100644 vendor/github.com/spf13/pflag/bool_func.go create mode 100644 vendor/github.com/spf13/pflag/errors.go create mode 100644 vendor/github.com/spf13/pflag/func.go create mode 100644 vendor/github.com/spf13/pflag/text.go create mode 100644 vendor/github.com/spf13/pflag/time.go rename vendor/github.com/spf13/viper/{UPDATES.md => UPGRADE.md} (79%) delete mode 100644 vendor/github.com/xeipuuv/gojsonpointer/README.md delete mode 100644 vendor/github.com/xeipuuv/gojsonpointer/pointer.go delete mode 100644 vendor/github.com/xeipuuv/gojsonreference/LICENSE-APACHE-2.0.txt delete mode 100644 vendor/github.com/xeipuuv/gojsonreference/README.md delete mode 100644 vendor/github.com/xeipuuv/gojsonreference/reference.go delete mode 100644 vendor/github.com/xeipuuv/gojsonschema/.gitignore delete mode 100644 vendor/github.com/xeipuuv/gojsonschema/.travis.yml delete mode 100644 vendor/github.com/xeipuuv/gojsonschema/LICENSE-APACHE-2.0.txt delete mode 100644 vendor/github.com/xeipuuv/gojsonschema/README.md delete mode 100644 vendor/github.com/xeipuuv/gojsonschema/draft.go delete mode 100644 vendor/github.com/xeipuuv/gojsonschema/errors.go delete mode 100644 vendor/github.com/xeipuuv/gojsonschema/format_checkers.go delete mode 100644 vendor/github.com/xeipuuv/gojsonschema/glide.yaml delete mode 100644 vendor/github.com/xeipuuv/gojsonschema/internalLog.go delete mode 100644 vendor/github.com/xeipuuv/gojsonschema/jsonContext.go delete mode 100644 vendor/github.com/xeipuuv/gojsonschema/jsonLoader.go delete mode 100644 vendor/github.com/xeipuuv/gojsonschema/locales.go delete mode 100644 vendor/github.com/xeipuuv/gojsonschema/result.go delete mode 100644 vendor/github.com/xeipuuv/gojsonschema/schema.go delete mode 100644 vendor/github.com/xeipuuv/gojsonschema/schemaLoader.go delete mode 100644 vendor/github.com/xeipuuv/gojsonschema/schemaPool.go delete mode 100644 vendor/github.com/xeipuuv/gojsonschema/schemaReferencePool.go delete mode 100644 vendor/github.com/xeipuuv/gojsonschema/schemaType.go delete mode 100644 vendor/github.com/xeipuuv/gojsonschema/subSchema.go delete mode 100644 vendor/github.com/xeipuuv/gojsonschema/types.go delete mode 100644 vendor/github.com/xeipuuv/gojsonschema/utils.go delete mode 100644 vendor/github.com/xeipuuv/gojsonschema/validation.go delete mode 100644 vendor/go.etcd.io/bbolt/bolt_386.go delete mode 100644 vendor/go.etcd.io/bbolt/bolt_amd64.go delete mode 100644 vendor/go.etcd.io/bbolt/bolt_arm.go delete mode 100644 vendor/go.etcd.io/bbolt/bolt_arm64.go delete mode 100644 vendor/go.etcd.io/bbolt/bolt_loong64.go delete mode 100644 vendor/go.etcd.io/bbolt/bolt_mips64x.go delete mode 100644 vendor/go.etcd.io/bbolt/bolt_mipsx.go delete mode 100644 vendor/go.etcd.io/bbolt/bolt_ppc.go delete mode 100644 vendor/go.etcd.io/bbolt/bolt_ppc64.go delete mode 100644 vendor/go.etcd.io/bbolt/bolt_ppc64le.go delete mode 100644 vendor/go.etcd.io/bbolt/bolt_riscv64.go delete mode 100644 vendor/go.etcd.io/bbolt/bolt_s390x.go create mode 100644 vendor/go.etcd.io/bbolt/internal/common/bolt_386.go create mode 100644 vendor/go.etcd.io/bbolt/internal/common/bolt_amd64.go create mode 100644 vendor/go.etcd.io/bbolt/internal/common/bolt_arm.go create mode 100644 vendor/go.etcd.io/bbolt/internal/common/bolt_arm64.go create mode 100644 vendor/go.etcd.io/bbolt/internal/common/bolt_loong64.go create mode 100644 vendor/go.etcd.io/bbolt/internal/common/bolt_mips64x.go create mode 100644 vendor/go.etcd.io/bbolt/internal/common/bolt_mipsx.go create mode 100644 vendor/go.etcd.io/bbolt/internal/common/bolt_ppc.go create mode 100644 vendor/go.etcd.io/bbolt/internal/common/bolt_ppc64.go create mode 100644 vendor/go.etcd.io/bbolt/internal/common/bolt_ppc64le.go create mode 100644 vendor/go.etcd.io/bbolt/internal/common/bolt_riscv64.go create mode 100644 vendor/go.etcd.io/bbolt/internal/common/bolt_s390x.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/LICENSE delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bson.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/array_codec.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/codec_cache.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/cond_addr_codec.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/mode.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/proxy.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/byte_slice_codec_options.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/doc.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/empty_interface_codec_options.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/map_codec_options.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/slice_codec_options.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/string_codec_options.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/struct_codec_options.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/time_codec_options.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/uint_codec_options.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/doc.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_tables.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_wrappers.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_writer.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/json_scanner.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/mode.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/reader.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_writer.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/writer.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsontype/bsontype.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/decoder.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/doc.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/encoder.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/marshal.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/primitive/decimal.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/primitive/objectid.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/primitive/primitive.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/primitive_codecs.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/raw.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/raw_element.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/raw_value.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/registry.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/types.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/unmarshal.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/array.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bson_arraybuilder.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bson_documentbuilder.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bsoncore.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/doc.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/document.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/document_sequence.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/element.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/tables.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/value.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/gen.go create mode 100644 vendor/go.opentelemetry.io/otel/.clomonitor.yml rename vendor/go.opentelemetry.io/otel/{internal/attribute => attribute/internal}/attribute.go (97%) create mode 100644 vendor/go.opentelemetry.io/otel/attribute/rawhelpers.go create mode 100644 vendor/go.opentelemetry.io/otel/dependencies.Dockerfile delete mode 100644 vendor/go.opentelemetry.io/otel/get_main_pkgs.sh delete mode 100644 vendor/go.opentelemetry.io/otel/internal/gen.go delete mode 100644 vendor/go.opentelemetry.io/otel/internal/rawhelpers.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/log/filter_processor.go delete mode 100644 vendor/go.opentelemetry.io/otel/sdk/log/internal/x/README.md delete mode 100644 vendor/go.opentelemetry.io/otel/sdk/log/internal/x/x.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.10.0/README.md delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.10.0/http.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.10.0/resource.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.10.0/trace.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.12.0/README.md delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.12.0/http.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.12.0/resource.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.12.0/trace.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.24.0/README.md create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.24.0/attribute_group.go rename vendor/go.opentelemetry.io/otel/semconv/{v1.10.0 => v1.24.0}/doc.go (65%) create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.24.0/event.go rename vendor/go.opentelemetry.io/otel/semconv/{v1.12.0 => v1.24.0}/exception.go (74%) create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.24.0/metric.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.24.0/resource.go rename vendor/go.opentelemetry.io/otel/semconv/{v1.12.0 => v1.24.0}/schema.go (71%) create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.24.0/trace.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.34.0/MIGRATION.md create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.34.0/README.md create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.34.0/attribute_group.go rename vendor/go.opentelemetry.io/otel/semconv/{v1.12.0 => v1.34.0}/doc.go (65%) rename vendor/go.opentelemetry.io/otel/semconv/{v1.10.0 => v1.34.0}/exception.go (74%) rename vendor/go.opentelemetry.io/otel/semconv/{v1.10.0 => v1.34.0}/schema.go (71%) create mode 100644 vendor/go.opentelemetry.io/otel/trace/auto.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/internal/telemetry/attr.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/internal/telemetry/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/internal/telemetry/id.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/internal/telemetry/number.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/internal/telemetry/resource.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/internal/telemetry/scope.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/internal/telemetry/span.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/internal/telemetry/status.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/internal/telemetry/traces.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go delete mode 100644 vendor/go.opentelemetry.io/otel/verify_readmes.sh rename vendor/{github.com/containers => go.podman.io}/common/LICENSE (100%) rename vendor/{github.com/containers => go.podman.io}/common/pkg/auth/auth.go (96%) rename vendor/{github.com/containers => go.podman.io}/common/pkg/auth/cli.go (96%) rename vendor/{github.com/containers => go.podman.io}/common/pkg/capabilities/capabilities.go (99%) rename vendor/{github.com/containers => go.podman.io}/common/pkg/completion/completion.go (94%) rename vendor/{github.com/containers => go.podman.io}/common/pkg/password/password_supported.go (100%) rename vendor/{github.com/containers => go.podman.io}/common/pkg/password/password_windows.go (100%) rename vendor/{github.com/containers => go.podman.io}/image/v5/LICENSE (100%) rename vendor/{github.com/containers => go.podman.io}/image/v5/copy/blob.go (98%) rename vendor/{github.com/containers => go.podman.io}/image/v5/copy/compression.go (98%) rename vendor/{github.com/containers => go.podman.io}/image/v5/copy/copy.go (96%) rename vendor/{github.com/containers => go.podman.io}/image/v5/copy/digesting_reader.go (100%) rename vendor/{github.com/containers => go.podman.io}/image/v5/copy/encryption.go (99%) rename vendor/{github.com/containers => go.podman.io}/image/v5/copy/manifest.go (97%) rename vendor/{github.com/containers => go.podman.io}/image/v5/copy/multiple.go (97%) rename vendor/{github.com/containers => go.podman.io}/image/v5/copy/progress_bars.go (98%) rename vendor/{github.com/containers => go.podman.io}/image/v5/copy/progress_channel.go (98%) rename vendor/{github.com/containers => go.podman.io}/image/v5/copy/sign.go (90%) rename vendor/{github.com/containers => go.podman.io}/image/v5/copy/single.go (98%) rename vendor/{github.com/containers => go.podman.io}/image/v5/directory/explicitfilepath/path.go (98%) rename vendor/{github.com/containers => go.podman.io}/image/v5/docker/body_reader.go (100%) rename vendor/{github.com/containers => go.podman.io}/image/v5/docker/cache.go (89%) rename vendor/{github.com/containers => go.podman.io}/image/v5/docker/distribution_error.go (100%) rename vendor/{github.com/containers => go.podman.io}/image/v5/docker/docker_client.go (97%) rename vendor/{github.com/containers => go.podman.io}/image/v5/docker/docker_image.go (96%) rename vendor/{github.com/containers => go.podman.io}/image/v5/docker/docker_image_dest.go (97%) rename vendor/{github.com/containers => go.podman.io}/image/v5/docker/docker_image_src.go (98%) rename vendor/{github.com/containers => go.podman.io}/image/v5/docker/docker_transport.go (97%) rename vendor/{github.com/containers => go.podman.io}/image/v5/docker/errors.go (100%) rename vendor/{github.com/containers => go.podman.io}/image/v5/docker/paths_common.go (100%) rename vendor/{github.com/containers => go.podman.io}/image/v5/docker/paths_freebsd.go (100%) rename vendor/{github.com/containers => go.podman.io}/image/v5/docker/policyconfiguration/naming.go (98%) rename vendor/{github.com/containers => go.podman.io}/image/v5/docker/reference/README.md (100%) rename vendor/{github.com/containers => go.podman.io}/image/v5/docker/reference/helpers.go (100%) rename vendor/{github.com/containers => go.podman.io}/image/v5/docker/reference/normalize.go (100%) rename vendor/{github.com/containers => go.podman.io}/image/v5/docker/reference/reference.go (100%) rename vendor/{github.com/containers => go.podman.io}/image/v5/docker/reference/regexp-additions.go (100%) rename vendor/{github.com/containers => go.podman.io}/image/v5/docker/reference/regexp.go (98%) rename vendor/{github.com/containers => go.podman.io}/image/v5/docker/registries_d.go (97%) rename vendor/{github.com/containers => go.podman.io}/image/v5/docker/wwwauthenticate.go (100%) rename vendor/{github.com/containers => go.podman.io}/image/v5/image/docker_schema2.go (91%) rename vendor/{github.com/containers => go.podman.io}/image/v5/image/sourced.go (95%) rename vendor/{github.com/containers => go.podman.io}/image/v5/image/unparsed.go (90%) rename vendor/{github.com/containers => go.podman.io}/image/v5/internal/blobinfocache/blobinfocache.go (97%) rename vendor/{github.com/containers => go.podman.io}/image/v5/internal/blobinfocache/types.go (97%) rename vendor/{github.com/containers => go.podman.io}/image/v5/internal/image/docker_list.go (91%) rename vendor/{github.com/containers => go.podman.io}/image/v5/internal/image/docker_schema1.go (97%) rename vendor/{github.com/containers => go.podman.io}/image/v5/internal/image/docker_schema2.go (98%) rename vendor/{github.com/containers => go.podman.io}/image/v5/internal/image/manifest.go (97%) rename vendor/{github.com/containers => go.podman.io}/image/v5/internal/image/memory.go (98%) rename vendor/{github.com/containers => go.podman.io}/image/v5/internal/image/oci.go (97%) rename vendor/{github.com/containers => go.podman.io}/image/v5/internal/image/oci_index.go (91%) rename vendor/{github.com/containers => go.podman.io}/image/v5/internal/image/sourced.go (99%) rename vendor/{github.com/containers => go.podman.io}/image/v5/internal/image/unparsed.go (94%) rename vendor/{github.com/containers => go.podman.io}/image/v5/internal/imagedestination/impl/compat.go (96%) rename vendor/{github.com/containers => go.podman.io}/image/v5/internal/imagedestination/impl/helpers.go (82%) rename vendor/{github.com/containers => go.podman.io}/image/v5/internal/imagedestination/impl/properties.go (98%) rename vendor/{github.com/containers => go.podman.io}/image/v5/internal/imagedestination/stubs/original_oci_config.go (100%) rename vendor/{github.com/containers => go.podman.io}/image/v5/internal/imagedestination/stubs/put_blob_partial.go (95%) rename vendor/{github.com/containers => go.podman.io}/image/v5/internal/imagedestination/stubs/signatures.go (97%) rename vendor/{github.com/containers => go.podman.io}/image/v5/internal/imagedestination/stubs/stubs.go (100%) rename vendor/{github.com/containers => go.podman.io}/image/v5/internal/imagedestination/wrapper.go (96%) rename vendor/{github.com/containers => go.podman.io}/image/v5/internal/imagesource/impl/compat.go (94%) rename vendor/{github.com/containers => go.podman.io}/image/v5/internal/imagesource/impl/layer_infos.go (96%) rename vendor/{github.com/containers => go.podman.io}/image/v5/internal/imagesource/impl/properties.go (100%) rename vendor/{github.com/containers => go.podman.io}/image/v5/internal/imagesource/impl/signatures.go (93%) rename vendor/{github.com/containers => go.podman.io}/image/v5/internal/imagesource/stubs/get_blob_at.go (95%) rename vendor/{github.com/containers => go.podman.io}/image/v5/internal/imagesource/stubs/stubs.go (100%) rename vendor/{github.com/containers => go.podman.io}/image/v5/internal/imagesource/wrapper.go (90%) rename vendor/{github.com/containers => go.podman.io}/image/v5/internal/iolimits/iolimits.go (100%) rename vendor/{github.com/containers => go.podman.io}/image/v5/internal/manifest/common.go (100%) rename vendor/{github.com/containers => go.podman.io}/image/v5/internal/manifest/docker_schema2.go (100%) rename vendor/{github.com/containers => go.podman.io}/image/v5/internal/manifest/docker_schema2_list.go (98%) rename vendor/{github.com/containers => go.podman.io}/image/v5/internal/manifest/errors.go (100%) rename vendor/{github.com/containers => go.podman.io}/image/v5/internal/manifest/list.go (97%) rename vendor/{github.com/containers => go.podman.io}/image/v5/internal/manifest/manifest.go (99%) rename vendor/{github.com/containers => go.podman.io}/image/v5/internal/manifest/oci_index.go (99%) rename vendor/{github.com/containers => go.podman.io}/image/v5/internal/multierr/multierr.go (100%) rename vendor/{github.com/containers => go.podman.io}/image/v5/internal/pkg/platform/platform_matcher.go (99%) rename vendor/{github.com/containers => go.podman.io}/image/v5/internal/private/private.go (97%) rename vendor/{github.com/containers => go.podman.io}/image/v5/internal/putblobdigest/put_blob_digest.go (98%) rename vendor/{github.com/containers => go.podman.io}/image/v5/internal/rootless/rootless.go (100%) rename vendor/{github.com/containers => go.podman.io}/image/v5/internal/set/set.go (100%) rename vendor/{github.com/containers => go.podman.io}/image/v5/internal/signature/signature.go (100%) rename vendor/{github.com/containers => go.podman.io}/image/v5/internal/signature/sigstore.go (100%) rename vendor/{github.com/containers => go.podman.io}/image/v5/internal/signature/simple.go (100%) rename vendor/{github.com/containers => go.podman.io}/image/v5/internal/signer/signer.go (94%) rename vendor/{github.com/containers => go.podman.io}/image/v5/internal/streamdigest/stream_digest.go (89%) rename vendor/{github.com/containers => go.podman.io}/image/v5/internal/tmpdir/tmpdir.go (91%) rename vendor/{github.com/containers => go.podman.io}/image/v5/internal/unparsedimage/wrapper.go (86%) rename vendor/{github.com/containers => go.podman.io}/image/v5/internal/uploadreader/upload_reader.go (100%) rename vendor/{github.com/containers => go.podman.io}/image/v5/internal/useragent/useragent.go (83%) rename vendor/{github.com/containers => go.podman.io}/image/v5/manifest/common.go (98%) rename vendor/{github.com/containers => go.podman.io}/image/v5/manifest/docker_schema1.go (97%) rename vendor/{github.com/containers => go.podman.io}/image/v5/manifest/docker_schema2.go (98%) rename vendor/{github.com/containers => go.podman.io}/image/v5/manifest/docker_schema2_list.go (95%) rename vendor/{github.com/containers => go.podman.io}/image/v5/manifest/list.go (95%) rename vendor/{github.com/containers => go.podman.io}/image/v5/manifest/manifest.go (98%) rename vendor/{github.com/containers => go.podman.io}/image/v5/manifest/oci.go (98%) rename vendor/{github.com/containers => go.podman.io}/image/v5/manifest/oci_index.go (94%) rename vendor/{github.com/containers => go.podman.io}/image/v5/oci/internal/oci_util.go (100%) rename vendor/{github.com/containers => go.podman.io}/image/v5/oci/layout/oci_delete.go (98%) rename vendor/{github.com/containers => go.podman.io}/image/v5/oci/layout/oci_dest.go (97%) rename vendor/{github.com/containers => go.podman.io}/image/v5/oci/layout/oci_src.go (88%) rename vendor/{github.com/containers => go.podman.io}/image/v5/oci/layout/oci_transport.go (97%) rename vendor/{github.com/containers => go.podman.io}/image/v5/oci/layout/reader.go (96%) rename vendor/{github.com/containers => go.podman.io}/image/v5/pkg/blobinfocache/default.go (94%) rename vendor/{github.com/containers => go.podman.io}/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go (98%) rename vendor/{github.com/containers => go.podman.io}/image/v5/pkg/blobinfocache/memory/memory.go (98%) rename vendor/{github.com/containers => go.podman.io}/image/v5/pkg/blobinfocache/none/none.go (97%) rename vendor/{github.com/containers => go.podman.io}/image/v5/pkg/blobinfocache/sqlite/sqlite.go (99%) rename vendor/{github.com/containers => go.podman.io}/image/v5/pkg/compression/compression.go (97%) rename vendor/{github.com/containers => go.podman.io}/image/v5/pkg/compression/internal/types.go (100%) rename vendor/{github.com/containers => go.podman.io}/image/v5/pkg/compression/types/types.go (97%) rename vendor/{github.com/containers => go.podman.io}/image/v5/pkg/compression/zstd.go (100%) rename vendor/{github.com/containers => go.podman.io}/image/v5/pkg/docker/config/config.go (98%) rename vendor/{github.com/containers => go.podman.io}/image/v5/pkg/strslice/README.md (100%) rename vendor/{github.com/containers => go.podman.io}/image/v5/pkg/strslice/strslice.go (100%) rename vendor/{github.com/containers => go.podman.io}/image/v5/pkg/sysregistriesv2/paths_common.go (100%) rename vendor/{github.com/containers => go.podman.io}/image/v5/pkg/sysregistriesv2/paths_freebsd.go (100%) rename vendor/{github.com/containers => go.podman.io}/image/v5/pkg/sysregistriesv2/shortnames.go (97%) rename vendor/{github.com/containers => go.podman.io}/image/v5/pkg/sysregistriesv2/system_registries_v2.go (98%) rename vendor/{github.com/containers => go.podman.io}/image/v5/pkg/tlsclientconfig/tlsclientconfig.go (100%) rename vendor/{github.com/containers => go.podman.io}/image/v5/signature/docker.go (85%) rename vendor/{github.com/containers => go.podman.io}/image/v5/signature/fulcio_cert.go (99%) rename vendor/{github.com/containers => go.podman.io}/image/v5/signature/internal/errors.go (100%) rename vendor/{github.com/containers => go.podman.io}/image/v5/signature/internal/json.go (98%) create mode 100644 vendor/go.podman.io/image/v5/signature/internal/rekor_api_types.go rename vendor/{github.com/containers => go.podman.io}/image/v5/signature/internal/rekor_set.go (88%) create mode 100644 vendor/go.podman.io/image/v5/signature/internal/sequoia/gosequoia.c create mode 100644 vendor/go.podman.io/image/v5/signature/internal/sequoia/gosequoia.h create mode 100644 vendor/go.podman.io/image/v5/signature/internal/sequoia/gosequoiafuncs.h create mode 100644 vendor/go.podman.io/image/v5/signature/internal/sequoia/sequoia.go create mode 100644 vendor/go.podman.io/image/v5/signature/internal/sequoia/sequoia.h rename vendor/{github.com/containers => go.podman.io}/image/v5/signature/internal/sigstore_payload.go (99%) rename vendor/{github.com/containers => go.podman.io}/image/v5/signature/mechanism.go (83%) rename vendor/{github.com/containers => go.podman.io}/image/v5/signature/mechanism_gpgme.go (72%) create mode 100644 vendor/go.podman.io/image/v5/signature/mechanism_gpgme_only.go rename vendor/{github.com/containers => go.podman.io}/image/v5/signature/mechanism_openpgp.go (90%) create mode 100644 vendor/go.podman.io/image/v5/signature/mechanism_sequoia.go rename vendor/{github.com/containers => go.podman.io}/image/v5/signature/pki_cert.go (97%) rename vendor/{github.com/containers => go.podman.io}/image/v5/signature/policy_config.go (98%) rename vendor/{github.com/containers => go.podman.io}/image/v5/signature/policy_config_sigstore.go (99%) rename vendor/{github.com/containers => go.podman.io}/image/v5/signature/policy_eval.go (98%) rename vendor/{github.com/containers => go.podman.io}/image/v5/signature/policy_eval_baselayer.go (92%) rename vendor/{github.com/containers => go.podman.io}/image/v5/signature/policy_eval_signedby.go (85%) rename vendor/{github.com/containers => go.podman.io}/image/v5/signature/policy_eval_sigstore.go (98%) rename vendor/{github.com/containers => go.podman.io}/image/v5/signature/policy_eval_simple.go (92%) rename vendor/{github.com/containers => go.podman.io}/image/v5/signature/policy_paths_common.go (100%) rename vendor/{github.com/containers => go.podman.io}/image/v5/signature/policy_paths_freebsd.go (100%) rename vendor/{github.com/containers => go.podman.io}/image/v5/signature/policy_reference_match.go (97%) rename vendor/{github.com/containers => go.podman.io}/image/v5/signature/policy_types.go (100%) rename vendor/{github.com/containers => go.podman.io}/image/v5/signature/signer/signer.go (85%) rename vendor/{github.com/containers => go.podman.io}/image/v5/signature/sigstore/copied.go (100%) rename vendor/{github.com/containers => go.podman.io}/image/v5/signature/sigstore/generate.go (100%) rename vendor/{github.com/containers => go.podman.io}/image/v5/signature/sigstore/internal/signer.go (94%) rename vendor/{github.com/containers => go.podman.io}/image/v5/signature/sigstore/signer.go (89%) rename vendor/{github.com/containers => go.podman.io}/image/v5/signature/simple.go (89%) rename vendor/{github.com/containers => go.podman.io}/image/v5/signature/simplesigning/signer.go (92%) rename vendor/{github.com/containers => go.podman.io}/image/v5/transports/stub.go (97%) rename vendor/{github.com/containers => go.podman.io}/image/v5/transports/transports.go (93%) rename vendor/{github.com/containers => go.podman.io}/image/v5/types/types.go (99%) rename vendor/{github.com/containers => go.podman.io}/image/v5/version/version.go (96%) rename vendor/{github.com/containers => go.podman.io}/storage/AUTHORS (100%) rename vendor/{github.com/containers => go.podman.io}/storage/LICENSE (100%) rename vendor/{github.com/containers => go.podman.io}/storage/NOTICE (100%) create mode 100644 vendor/go.podman.io/storage/internal/rawfilelock/rawfilelock.go create mode 100644 vendor/go.podman.io/storage/internal/rawfilelock/rawfilelock_unix.go create mode 100644 vendor/go.podman.io/storage/internal/rawfilelock/rawfilelock_windows.go rename vendor/{github.com/containers => go.podman.io}/storage/pkg/archive/README.md (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/archive/archive.go (85%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/archive/archive_110.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/archive/archive_19.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/archive/archive_bsd.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/archive/archive_linux.go (98%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/archive/archive_other.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/archive/archive_unix.go (96%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/archive/archive_windows.go (94%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/archive/archive_zstd.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/archive/changes.go (97%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/archive/changes_linux.go (99%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/archive/changes_other.go (96%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/archive/changes_unix.go (94%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/archive/changes_windows.go (93%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/archive/copy.go (99%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/archive/copy_unix.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/archive/copy_windows.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/archive/diff.go (97%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/archive/fflags_bsd.go (98%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/archive/fflags_unsupported.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/archive/filter.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/archive/time_linux.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/archive/time_unsupported.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/archive/whiteouts.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/archive/wrap.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/chunked/compressor/compressor.go (85%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/chunked/compressor/rollsum.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/chunked/internal/minimal/compression.go (95%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/chunked/toc/toc.go (95%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/fileutils/exists_freebsd.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/fileutils/exists_unix.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/fileutils/exists_windows.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/fileutils/fileutils.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/fileutils/fileutils_darwin.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/fileutils/fileutils_solaris.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/fileutils/fileutils_unix.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/fileutils/fileutils_windows.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/fileutils/reflink_linux.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/fileutils/reflink_unsupported.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/homedir/homedir.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/homedir/homedir_unix.go (99%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/homedir/homedir_windows.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/idtools/idtools.go (99%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/idtools/idtools_supported.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/idtools/idtools_unix.go (98%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/idtools/idtools_unsupported.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/idtools/idtools_windows.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/idtools/parser.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/idtools/usergroupadd_linux.go (99%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/idtools/usergroupadd_unsupported.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/idtools/utils_unix.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/ioutils/buffer.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/ioutils/bytespipe.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/ioutils/fswriters.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/ioutils/fswriters_linux.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/ioutils/fswriters_other.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/ioutils/readers.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/ioutils/temp_unix.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/ioutils/temp_windows.go (87%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/ioutils/writeflusher.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/ioutils/writers.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/lockfile/lastwrite.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/lockfile/lockfile.go (92%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/lockfile/lockfile_unix.go (70%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/lockfile/lockfile_windows.go (71%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/longpath/longpath.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/mount/flags.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/mount/flags_freebsd.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/mount/flags_linux.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/mount/flags_unsupported.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/mount/mount.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/mount/mounter_freebsd.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/mount/mounter_linux.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/mount/mounter_unsupported.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/mount/mountinfo.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/mount/mountinfo_linux.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/mount/sharedsubtree_linux.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/mount/unmount_unix.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/mount/unmount_unsupported.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/pools/pools.go (98%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/promise/promise.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/reexec/README.md (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/reexec/command_freebsd.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/reexec/command_linux.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/reexec/command_unix.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/reexec/command_unsupported.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/reexec/command_windows.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/reexec/reexec.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/regexp/regexp.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/regexp/regexp_dontprecompile.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/regexp/regexp_precompile.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/system/chmod.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/system/chtimes.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/system/chtimes_unix.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/system/chtimes_windows.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/system/errors.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/system/exitcode.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/system/extattr_freebsd.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/system/extattr_unsupported.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/system/init.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/system/init_windows.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/system/lchflags_bsd.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/system/lchown.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/system/lcow_unix.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/system/lcow_windows.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/system/lstat_unix.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/system/lstat_windows.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/system/meminfo.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/system/meminfo_freebsd.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/system/meminfo_linux.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/system/meminfo_solaris.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/system/meminfo_unsupported.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/system/meminfo_windows.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/system/mknod.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/system/mknod_freebsd.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/system/mknod_windows.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/system/path.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/system/path_unix.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/system/path_windows.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/system/process_unix.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/system/rm.go (98%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/system/rm_common.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/system/rm_freebsd.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/system/stat_common.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/system/stat_darwin.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/system/stat_freebsd.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/system/stat_linux.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/system/stat_netbsd.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/system/stat_openbsd.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/system/stat_solaris.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/system/stat_unix.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/system/stat_windows.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/system/syscall_unix.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/system/syscall_windows.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/system/umask.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/system/umask_windows.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/system/utimes_freebsd.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/system/utimes_linux.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/system/utimes_unsupported.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/system/xattrs_darwin.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/system/xattrs_freebsd.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/system/xattrs_linux.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/system/xattrs_unsupported.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/unshare/getenv_linux_cgo.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/unshare/getenv_linux_nocgo.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/unshare/unshare.c (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/unshare/unshare.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/unshare/unshare_cgo.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/unshare/unshare_darwin.go (97%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/unshare/unshare_freebsd.c (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/unshare/unshare_freebsd.go (98%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/unshare/unshare_gccgo.go (100%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/unshare/unshare_linux.go (99%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/unshare/unshare_unsupported.go (97%) rename vendor/{github.com/containers => go.podman.io}/storage/pkg/unshare/unshare_unsupported_cgo.go (100%) delete mode 100644 vendor/go.uber.org/automaxprocs/.codecov.yml delete mode 100644 vendor/go.uber.org/automaxprocs/.gitignore delete mode 100644 vendor/go.uber.org/automaxprocs/CHANGELOG.md delete mode 100644 vendor/go.uber.org/automaxprocs/CODE_OF_CONDUCT.md delete mode 100644 vendor/go.uber.org/automaxprocs/CONTRIBUTING.md delete mode 100644 vendor/go.uber.org/automaxprocs/LICENSE delete mode 100644 vendor/go.uber.org/automaxprocs/Makefile delete mode 100644 vendor/go.uber.org/automaxprocs/README.md delete mode 100644 vendor/go.uber.org/automaxprocs/automaxprocs.go delete mode 100644 vendor/go.uber.org/automaxprocs/internal/cgroups/doc.go delete mode 100644 vendor/go.uber.org/automaxprocs/maxprocs/maxprocs.go delete mode 100644 vendor/go.uber.org/automaxprocs/maxprocs/version.go create mode 100644 vendor/go.yaml.in/yaml/v2/.travis.yml rename vendor/{sigs.k8s.io/yaml/goyaml.v2 => go.yaml.in/yaml/v2}/LICENSE (100%) rename vendor/{sigs.k8s.io/yaml/goyaml.v2 => go.yaml.in/yaml/v2}/LICENSE.libyaml (100%) rename vendor/{sigs.k8s.io/yaml/goyaml.v2 => go.yaml.in/yaml/v2}/NOTICE (100%) create mode 100644 vendor/go.yaml.in/yaml/v2/README.md rename vendor/{sigs.k8s.io/yaml/goyaml.v2 => go.yaml.in/yaml/v2}/apic.go (100%) rename vendor/{sigs.k8s.io/yaml/goyaml.v2 => go.yaml.in/yaml/v2}/decode.go (100%) rename vendor/{sigs.k8s.io/yaml/goyaml.v2 => go.yaml.in/yaml/v2}/emitterc.go (100%) rename vendor/{sigs.k8s.io/yaml/goyaml.v2 => go.yaml.in/yaml/v2}/encode.go (100%) rename vendor/{sigs.k8s.io/yaml/goyaml.v2 => go.yaml.in/yaml/v2}/parserc.go (100%) rename vendor/{sigs.k8s.io/yaml/goyaml.v2 => go.yaml.in/yaml/v2}/readerc.go (100%) rename vendor/{sigs.k8s.io/yaml/goyaml.v2 => go.yaml.in/yaml/v2}/resolve.go (100%) rename vendor/{sigs.k8s.io/yaml/goyaml.v2 => go.yaml.in/yaml/v2}/scannerc.go (100%) rename vendor/{sigs.k8s.io/yaml/goyaml.v2 => go.yaml.in/yaml/v2}/sorter.go (100%) rename vendor/{sigs.k8s.io/yaml/goyaml.v2 => go.yaml.in/yaml/v2}/writerc.go (100%) rename vendor/{sigs.k8s.io/yaml/goyaml.v2 => go.yaml.in/yaml/v2}/yaml.go (99%) rename vendor/{sigs.k8s.io/yaml/goyaml.v2 => go.yaml.in/yaml/v2}/yamlh.go (100%) rename vendor/{sigs.k8s.io/yaml/goyaml.v2 => go.yaml.in/yaml/v2}/yamlprivateh.go (100%) rename vendor/{sigs.k8s.io/yaml/goyaml.v3 => go.yaml.in/yaml/v3}/LICENSE (100%) rename vendor/{sigs.k8s.io/yaml/goyaml.v3 => go.yaml.in/yaml/v3}/NOTICE (100%) create mode 100644 vendor/go.yaml.in/yaml/v3/README.md rename vendor/{sigs.k8s.io/yaml/goyaml.v3 => go.yaml.in/yaml/v3}/apic.go (99%) rename vendor/{sigs.k8s.io/yaml/goyaml.v3 => go.yaml.in/yaml/v3}/decode.go (97%) rename vendor/{sigs.k8s.io/yaml/goyaml.v3 => go.yaml.in/yaml/v3}/emitterc.go (98%) rename vendor/{sigs.k8s.io/yaml/goyaml.v3 => go.yaml.in/yaml/v3}/encode.go (100%) rename vendor/{sigs.k8s.io/yaml/goyaml.v3 => go.yaml.in/yaml/v3}/parserc.go (93%) rename vendor/{sigs.k8s.io/yaml/goyaml.v3 => go.yaml.in/yaml/v3}/readerc.go (99%) rename vendor/{sigs.k8s.io/yaml/goyaml.v3 => go.yaml.in/yaml/v3}/resolve.go (100%) rename vendor/{sigs.k8s.io/yaml/goyaml.v3 => go.yaml.in/yaml/v3}/scannerc.go (99%) rename vendor/{sigs.k8s.io/yaml/goyaml.v3 => go.yaml.in/yaml/v3}/sorter.go (100%) rename vendor/{sigs.k8s.io/yaml/goyaml.v3 => go.yaml.in/yaml/v3}/writerc.go (99%) rename vendor/{sigs.k8s.io/yaml/goyaml.v3 => go.yaml.in/yaml/v3}/yaml.go (91%) rename vendor/{sigs.k8s.io/yaml/goyaml.v3 => go.yaml.in/yaml/v3}/yamlh.go (99%) rename vendor/{sigs.k8s.io/yaml/goyaml.v3 => go.yaml.in/yaml/v3}/yamlprivateh.go (97%) delete mode 100644 vendor/golang.org/x/net/http2/config_go124.go create mode 100644 vendor/golang.org/x/net/http2/config_go125.go create mode 100644 vendor/golang.org/x/net/http2/config_go126.go delete mode 100644 vendor/golang.org/x/net/http2/config_pre_go124.go delete mode 100644 vendor/golang.org/x/net/http2/timer.go rename vendor/golang.org/x/net/http2/{writesched_priority.go => writesched_priority_rfc7540.go} (78%) create mode 100644 vendor/golang.org/x/net/http2/writesched_priority_rfc9128.go delete mode 100644 vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go rename vendor/golang.org/x/tools/{internal/astutil => go/ast}/edge/edge.go (100%) create mode 100644 vendor/golang.org/x/tools/go/ast/inspector/cursor.go delete mode 100644 vendor/golang.org/x/tools/internal/gcimporter/iimport_go122.go delete mode 100644 vendor/golang.org/x/tools/internal/modindex/types.go create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/presence.go create mode 100644 vendor/helm.sh/helm/v3/pkg/registry/fallback.go create mode 100644 vendor/helm.sh/helm/v3/pkg/registry/transport.go create mode 100644 vendor/k8s.io/api/networking/v1/well_known_labels.go rename vendor/{sigs.k8s.io/yaml/yaml_go110.go => k8s.io/api/resource/v1beta1/devicetaint.go} (50%) create mode 100644 vendor/k8s.io/api/resource/v1beta2/devicetaint.go create mode 100644 vendor/k8s.io/api/resource/v1beta2/doc.go create mode 100644 vendor/k8s.io/api/resource/v1beta2/generated.pb.go create mode 100644 vendor/k8s.io/api/resource/v1beta2/generated.proto create mode 100644 vendor/k8s.io/api/resource/v1beta2/register.go create mode 100644 vendor/k8s.io/api/resource/v1beta2/types.go create mode 100644 vendor/k8s.io/api/resource/v1beta2/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/resource/v1beta2/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/resource/v1beta2/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/operation/operation.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/asn1/oid.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/collections.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/collections.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/jsonmergepatch/patch.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/validation/field/error_matcher.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/validation/ip.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/yaml/stream_reader.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/compatibility/registry.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/compatibility/version.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/clustertrustbundle.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/clustertrustbundlespec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/leasecandidate.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/leasecandidatespec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeswapstatus.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/discovery/v1/fornode.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/fornode.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/networking/v1/ipaddress.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/networking/v1/ipaddressspec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/networking/v1/parentreference.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/networking/v1/servicecidr.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/networking/v1/servicecidrspec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/networking/v1/servicecidrstatus.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/counter.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/counterset.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicecounterconsumption.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicesubrequest.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicetaint.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicetaintrule.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicetaintrulespec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicetaintselector.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicetoleration.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/counter.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/counterset.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/devicecounterconsumption.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/devicesubrequest.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/devicetaint.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/devicetoleration.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/allocateddevicestatus.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/allocationresult.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/celdeviceselector.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/counter.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/counterset.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/device.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/deviceallocationconfiguration.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/deviceallocationresult.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/deviceattribute.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/devicecapacity.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/deviceclaim.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/deviceclaimconfiguration.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/deviceclass.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/deviceclassconfiguration.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/deviceclassspec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/deviceconfiguration.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/deviceconstraint.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/devicecounterconsumption.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/devicerequest.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/devicerequestallocationresult.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/deviceselector.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/devicesubrequest.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/devicetaint.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/devicetoleration.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/exactdevicerequest.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/networkdevicedata.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/opaquedeviceconfiguration.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/resourceclaim.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/resourceclaimconsumerreference.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/resourceclaimspec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/resourceclaimstatus.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/resourceclaimtemplate.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/resourceclaimtemplatespec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/resourcepool.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/resourceslice.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/resourceslicespec.go create mode 100644 vendor/k8s.io/client-go/informers/certificates/v1beta1/clustertrustbundle.go create mode 100644 vendor/k8s.io/client-go/informers/coordination/v1beta1/leasecandidate.go create mode 100644 vendor/k8s.io/client-go/informers/networking/v1/ipaddress.go create mode 100644 vendor/k8s.io/client-go/informers/networking/v1/servicecidr.go create mode 100644 vendor/k8s.io/client-go/informers/resource/v1alpha3/devicetaintrule.go create mode 100644 vendor/k8s.io/client-go/informers/resource/v1beta2/deviceclass.go create mode 100644 vendor/k8s.io/client-go/informers/resource/v1beta2/interface.go create mode 100644 vendor/k8s.io/client-go/informers/resource/v1beta2/resourceclaim.go create mode 100644 vendor/k8s.io/client-go/informers/resource/v1beta2/resourceclaimtemplate.go create mode 100644 vendor/k8s.io/client-go/informers/resource/v1beta2/resourceslice.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/clustertrustbundle.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/leasecandidate.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1/ipaddress.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1/servicecidr.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/devicetaintrule.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta2/deviceclass.go rename vendor/{github.com/google/gofuzz => k8s.io/client-go/kubernetes/typed/resource/v1beta2}/doc.go (76%) rename vendor/{github.com/sigstore/rekor/COPYRIGHT.txt => k8s.io/client-go/kubernetes/typed/resource/v1beta2/generated_expansion.go} (65%) create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta2/resource_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta2/resourceclaim.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta2/resourceclaimtemplate.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta2/resourceslice.go create mode 100644 vendor/k8s.io/client-go/listers/certificates/v1beta1/clustertrustbundle.go create mode 100644 vendor/k8s.io/client-go/listers/coordination/v1beta1/leasecandidate.go create mode 100644 vendor/k8s.io/client-go/listers/networking/v1/ipaddress.go create mode 100644 vendor/k8s.io/client-go/listers/networking/v1/servicecidr.go create mode 100644 vendor/k8s.io/client-go/listers/resource/v1alpha3/devicetaintrule.go create mode 100644 vendor/k8s.io/client-go/listers/resource/v1beta2/deviceclass.go create mode 100644 vendor/k8s.io/client-go/listers/resource/v1beta2/expansion_generated.go create mode 100644 vendor/k8s.io/client-go/listers/resource/v1beta2/resourceclaim.go create mode 100644 vendor/k8s.io/client-go/listers/resource/v1beta2/resourceclaimtemplate.go create mode 100644 vendor/k8s.io/client-go/listers/resource/v1beta2/resourceslice.go create mode 100644 vendor/k8s.io/client-go/rest/.mockery.yaml create mode 100644 vendor/k8s.io/client-go/tools/cache/the_real_fifo.go create mode 100644 vendor/k8s.io/component-base/cli/flag/tracker_flag.go create mode 100644 vendor/k8s.io/component-base/compatibility/OWNERS rename vendor/k8s.io/component-base/{featuregate => compatibility}/registry.go (78%) create mode 100644 vendor/k8s.io/component-base/compatibility/version.go rename vendor/{github.com/mitchellh/mapstructure => k8s.io/kube-openapi/pkg/internal/third_party/govalidator}/LICENSE (88%) create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/govalidator/patterns.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/govalidator/validator.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/validation/strfmt/kubernetes-extensions.go delete mode 100644 vendor/k8s.io/utils/clock/testing/fake_clock.go delete mode 100644 vendor/k8s.io/utils/clock/testing/simple_interval_clock.go delete mode 100644 vendor/oras.land/oras-go/LICENSE delete mode 100644 vendor/oras.land/oras-go/pkg/auth/client.go delete mode 100644 vendor/oras.land/oras-go/pkg/auth/client_opts.go delete mode 100644 vendor/oras.land/oras-go/pkg/auth/docker/client.go delete mode 100644 vendor/oras.land/oras-go/pkg/auth/docker/login.go delete mode 100644 vendor/oras.land/oras-go/pkg/auth/docker/login_tls.go delete mode 100644 vendor/oras.land/oras-go/pkg/auth/docker/logout.go delete mode 100644 vendor/oras.land/oras-go/pkg/auth/docker/resolver.go delete mode 100644 vendor/oras.land/oras-go/pkg/content/consts.go delete mode 100644 vendor/oras.land/oras-go/pkg/content/decompress.go delete mode 100644 vendor/oras.land/oras-go/pkg/content/errors.go delete mode 100644 vendor/oras.land/oras-go/pkg/content/file.go delete mode 100644 vendor/oras.land/oras-go/pkg/content/gunzip.go delete mode 100644 vendor/oras.land/oras-go/pkg/content/iowriter.go delete mode 100644 vendor/oras.land/oras-go/pkg/content/manifest.go delete mode 100644 vendor/oras.land/oras-go/pkg/content/memory.go delete mode 100644 vendor/oras.land/oras-go/pkg/content/multireader.go delete mode 100644 vendor/oras.land/oras-go/pkg/content/multiwriter.go delete mode 100644 vendor/oras.land/oras-go/pkg/content/oci.go delete mode 100644 vendor/oras.land/oras-go/pkg/content/opts.go delete mode 100644 vendor/oras.land/oras-go/pkg/content/passthrough.go delete mode 100644 vendor/oras.land/oras-go/pkg/content/readerat.go delete mode 100644 vendor/oras.land/oras-go/pkg/content/registry.go delete mode 100644 vendor/oras.land/oras-go/pkg/content/untar.go delete mode 100644 vendor/oras.land/oras-go/pkg/content/utils.go delete mode 100644 vendor/oras.land/oras-go/pkg/context/logger.go delete mode 100644 vendor/oras.land/oras-go/pkg/oras/copy.go delete mode 100644 vendor/oras.land/oras-go/pkg/oras/errors.go delete mode 100644 vendor/oras.land/oras-go/pkg/oras/opts.go delete mode 100644 vendor/oras.land/oras-go/pkg/oras/provider.go delete mode 100644 vendor/oras.land/oras-go/pkg/oras/store.go delete mode 100644 vendor/oras.land/oras-go/pkg/registry/reference.go delete mode 100644 vendor/oras.land/oras-go/pkg/registry/remote/internal/errutil/errors.go delete mode 100644 vendor/oras.land/oras-go/pkg/registry/remote/internal/syncutil/once.go delete mode 100644 vendor/oras.land/oras-go/pkg/registry/remote/repository.go delete mode 100644 vendor/oras.land/oras-go/pkg/registry/remote/url.go delete mode 100644 vendor/oras.land/oras-go/pkg/registry/repository.go create mode 100644 vendor/oras.land/oras-go/v2/.gitignore create mode 100644 vendor/oras.land/oras-go/v2/CODEOWNERS create mode 100644 vendor/oras.land/oras-go/v2/CODE_OF_CONDUCT.md create mode 100644 vendor/oras.land/oras-go/v2/MIGRATION_GUIDE.md create mode 100644 vendor/oras.land/oras-go/v2/Makefile create mode 100644 vendor/oras.land/oras-go/v2/OWNERS.md create mode 100644 vendor/oras.land/oras-go/v2/README.md create mode 100644 vendor/oras.land/oras-go/v2/SECURITY.md create mode 100644 vendor/oras.land/oras-go/v2/content.go create mode 100644 vendor/oras.land/oras-go/v2/content/memory/memory.go create mode 100644 vendor/oras.land/oras-go/v2/copy.go create mode 100644 vendor/oras.land/oras-go/v2/copyerror.go create mode 100644 vendor/oras.land/oras-go/v2/extendedcopy.go create mode 100644 vendor/oras.land/oras-go/v2/internal/cas/memory.go create mode 100644 vendor/oras.land/oras-go/v2/internal/cas/proxy.go create mode 100644 vendor/oras.land/oras-go/v2/internal/copyutil/stack.go create mode 100644 vendor/oras.land/oras-go/v2/internal/httputil/seek.go rename vendor/oras.land/oras-go/{pkg/content/interface.go => v2/internal/interfaces/registry.go} (67%) create mode 100644 vendor/oras.land/oras-go/v2/internal/platform/platform.go create mode 100644 vendor/oras.land/oras-go/v2/internal/registryutil/proxy.go create mode 100644 vendor/oras.land/oras-go/v2/pack.go rename vendor/oras.land/oras-go/{pkg => v2}/registry/remote/auth/cache.go (67%) rename vendor/oras.land/oras-go/{pkg => v2}/registry/remote/auth/challenge.go (98%) rename vendor/oras.land/oras-go/{pkg => v2}/registry/remote/auth/client.go (70%) rename vendor/oras.land/oras-go/{pkg => v2}/registry/remote/auth/credential.go (89%) rename vendor/oras.land/oras-go/{pkg => v2}/registry/remote/auth/scope.go (58%) create mode 100644 vendor/oras.land/oras-go/v2/registry/remote/credentials/file_store.go create mode 100644 vendor/oras.land/oras-go/v2/registry/remote/credentials/internal/config/config.go create mode 100644 vendor/oras.land/oras-go/v2/registry/remote/credentials/internal/executer/executer.go create mode 100644 vendor/oras.land/oras-go/v2/registry/remote/credentials/internal/ioutil/ioutil.go create mode 100644 vendor/oras.land/oras-go/v2/registry/remote/credentials/memory_store.go create mode 100644 vendor/oras.land/oras-go/v2/registry/remote/credentials/native_store.go rename vendor/oras.land/oras-go/{pkg/artifact/consts.go => v2/registry/remote/credentials/native_store_darwin.go} (66%) rename vendor/oras.land/oras-go/{pkg/target/target.go => v2/registry/remote/credentials/native_store_generic.go} (64%) create mode 100644 vendor/oras.land/oras-go/v2/registry/remote/credentials/native_store_linux.go rename vendor/oras.land/oras-go/{pkg/context/context.go => v2/registry/remote/credentials/native_store_windows.go} (67%) create mode 100644 vendor/oras.land/oras-go/v2/registry/remote/credentials/registry.go create mode 100644 vendor/oras.land/oras-go/v2/registry/remote/credentials/store.go create mode 100644 vendor/oras.land/oras-go/v2/registry/remote/credentials/trace/trace.go create mode 100644 vendor/oras.land/oras-go/v2/registry/remote/errcode/errors.go create mode 100644 vendor/oras.land/oras-go/v2/registry/remote/internal/errutil/errutil.go create mode 100644 vendor/oras.land/oras-go/v2/registry/remote/manifest.go create mode 100644 vendor/oras.land/oras-go/v2/registry/remote/referrers.go create mode 100644 vendor/oras.land/oras-go/v2/registry/remote/registry.go create mode 100644 vendor/oras.land/oras-go/v2/registry/remote/repository.go create mode 100644 vendor/oras.land/oras-go/v2/registry/remote/retry/client.go create mode 100644 vendor/oras.land/oras-go/v2/registry/remote/retry/policy.go create mode 100644 vendor/oras.land/oras-go/v2/registry/remote/url.go rename vendor/oras.land/oras-go/{pkg => v2}/registry/remote/utils.go (65%) create mode 100644 vendor/oras.land/oras-go/v2/registry/remote/warning.go create mode 100644 vendor/oras.land/oras-go/v2/target.go create mode 100644 vendor/sigs.k8s.io/randfill/CONTRIBUTING.md rename vendor/{github.com/go-openapi/analysis => sigs.k8s.io/randfill}/LICENSE (99%) create mode 100644 vendor/sigs.k8s.io/randfill/NOTICE create mode 100644 vendor/sigs.k8s.io/randfill/OWNERS create mode 100644 vendor/sigs.k8s.io/randfill/OWNERS_ALIASES rename vendor/{github.com/google/gofuzz => sigs.k8s.io/randfill}/README.md (53%) create mode 100644 vendor/sigs.k8s.io/randfill/SECURITY_CONTACTS rename vendor/{github.com/google/gofuzz => sigs.k8s.io/randfill}/bytesource/bytesource.go (100%) create mode 100644 vendor/sigs.k8s.io/randfill/code-of-conduct.md create mode 100644 vendor/sigs.k8s.io/randfill/randfill.go delete mode 100644 vendor/sigs.k8s.io/yaml/.travis.yml delete mode 100644 vendor/sigs.k8s.io/yaml/goyaml.v2/OWNERS create mode 100644 vendor/sigs.k8s.io/yaml/goyaml.v2/yaml_aliases.go delete mode 100644 vendor/sigs.k8s.io/yaml/goyaml.v3/OWNERS delete mode 100644 vendor/sigs.k8s.io/yaml/goyaml.v3/patch.go create mode 100644 vendor/sigs.k8s.io/yaml/goyaml.v3/yaml_aliases.go diff --git a/go.mod b/go.mod index 5ae1add8e3..3fd4421e99 100644 --- a/go.mod +++ b/go.mod @@ -45,6 +45,9 @@ require ( sigs.k8s.io/yaml v1.6.0 ) +// dario.cat/mergo domain is having issues, redirect to the actual repository +replace dario.cat/mergo => github.com/darccio/mergo v1.0.2 + require ( cel.dev/expr v0.24.0 // indirect dario.cat/mergo v1.0.2 // indirect diff --git a/go.sum b/go.sum index 774b9ee748..ce1a9859e3 100644 --- a/go.sum +++ b/go.sum @@ -1,8 +1,6 @@ cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY= cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -dario.cat/mergo v1.0.2 h1:85+piFYR1tMbRrLcDwR18y4UKJ3aH1Tbzi24VRW1TK8= -dario.cat/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA= filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 h1:He8afgbRMd7mFxO99hRNu+6tazq8nFF9lIwo9JFroBk= @@ -101,6 +99,8 @@ github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 h github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= github.com/cyphar/filepath-securejoin v0.4.1 h1:JyxxyPEaktOD+GAnqIqTf9A8tHyAG22rowi7HkoSU1s= github.com/cyphar/filepath-securejoin v0.4.1/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI= +github.com/darccio/mergo v1.0.2 h1:LJqV0GD/o8kMWAWN4XCVj1g5A+b4Nxr845y7iRNK3iY= +github.com/darccio/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= diff --git a/vendor/cel.dev/expr/eval.pb.go b/vendor/cel.dev/expr/eval.pb.go index 8f651f9cc6..a7aae0900c 100644 --- a/vendor/cel.dev/expr/eval.pb.go +++ b/vendor/cel.dev/expr/eval.pb.go @@ -1,15 +1,15 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.5 +// protoc-gen-go v1.36.3 +// protoc v5.27.1 // source: cel/expr/eval.proto package expr import ( - status "google.golang.org/genproto/googleapis/rpc/status" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" reflect "reflect" sync "sync" ) @@ -22,21 +22,18 @@ const ( ) type EvalState struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Values []*ExprValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` + Results []*EvalState_Result `protobuf:"bytes,3,rep,name=results,proto3" json:"results,omitempty"` unknownFields protoimpl.UnknownFields - - Values []*ExprValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` - Results []*EvalState_Result `protobuf:"bytes,3,rep,name=results,proto3" json:"results,omitempty"` + sizeCache protoimpl.SizeCache } func (x *EvalState) Reset() { *x = EvalState{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_eval_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_eval_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EvalState) String() string { @@ -47,7 +44,7 @@ func (*EvalState) ProtoMessage() {} func (x *EvalState) ProtoReflect() protoreflect.Message { mi := &file_cel_expr_eval_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -77,25 +74,22 @@ func (x *EvalState) GetResults() []*EvalState_Result { } type ExprValue struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to Kind: + state protoimpl.MessageState `protogen:"open.v1"` + // Types that are valid to be assigned to Kind: // // *ExprValue_Value // *ExprValue_Error // *ExprValue_Unknown - Kind isExprValue_Kind `protobuf_oneof:"kind"` + Kind isExprValue_Kind `protobuf_oneof:"kind"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ExprValue) Reset() { *x = ExprValue{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_eval_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_eval_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ExprValue) String() string { @@ -106,7 +100,7 @@ func (*ExprValue) ProtoMessage() {} func (x *ExprValue) ProtoReflect() protoreflect.Message { mi := &file_cel_expr_eval_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -121,30 +115,36 @@ func (*ExprValue) Descriptor() ([]byte, []int) { return file_cel_expr_eval_proto_rawDescGZIP(), []int{1} } -func (m *ExprValue) GetKind() isExprValue_Kind { - if m != nil { - return m.Kind +func (x *ExprValue) GetKind() isExprValue_Kind { + if x != nil { + return x.Kind } return nil } func (x *ExprValue) GetValue() *Value { - if x, ok := x.GetKind().(*ExprValue_Value); ok { - return x.Value + if x != nil { + if x, ok := x.Kind.(*ExprValue_Value); ok { + return x.Value + } } return nil } func (x *ExprValue) GetError() *ErrorSet { - if x, ok := x.GetKind().(*ExprValue_Error); ok { - return x.Error + if x != nil { + if x, ok := x.Kind.(*ExprValue_Error); ok { + return x.Error + } } return nil } func (x *ExprValue) GetUnknown() *UnknownSet { - if x, ok := x.GetKind().(*ExprValue_Unknown); ok { - return x.Unknown + if x != nil { + if x, ok := x.Kind.(*ExprValue_Unknown); ok { + return x.Unknown + } } return nil } @@ -172,20 +172,17 @@ func (*ExprValue_Error) isExprValue_Kind() {} func (*ExprValue_Unknown) isExprValue_Kind() {} type ErrorSet struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Errors []*Status `protobuf:"bytes,1,rep,name=errors,proto3" json:"errors,omitempty"` unknownFields protoimpl.UnknownFields - - Errors []*status.Status `protobuf:"bytes,1,rep,name=errors,proto3" json:"errors,omitempty"` + sizeCache protoimpl.SizeCache } func (x *ErrorSet) Reset() { *x = ErrorSet{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_eval_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_eval_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ErrorSet) String() string { @@ -196,7 +193,7 @@ func (*ErrorSet) ProtoMessage() {} func (x *ErrorSet) ProtoReflect() protoreflect.Message { mi := &file_cel_expr_eval_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -211,28 +208,85 @@ func (*ErrorSet) Descriptor() ([]byte, []int) { return file_cel_expr_eval_proto_rawDescGZIP(), []int{2} } -func (x *ErrorSet) GetErrors() []*status.Status { +func (x *ErrorSet) GetErrors() []*Status { if x != nil { return x.Errors } return nil } -type UnknownSet struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type Status struct { + state protoimpl.MessageState `protogen:"open.v1"` + Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + Details []*anypb.Any `protobuf:"bytes,3,rep,name=details,proto3" json:"details,omitempty"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} - Exprs []int64 `protobuf:"varint,1,rep,packed,name=exprs,proto3" json:"exprs,omitempty"` +func (x *Status) Reset() { + *x = Status{} + mi := &file_cel_expr_eval_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *UnknownSet) Reset() { - *x = UnknownSet{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_eval_proto_msgTypes[3] +func (x *Status) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Status) ProtoMessage() {} + +func (x *Status) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_eval_proto_msgTypes[3] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Status.ProtoReflect.Descriptor instead. +func (*Status) Descriptor() ([]byte, []int) { + return file_cel_expr_eval_proto_rawDescGZIP(), []int{3} +} + +func (x *Status) GetCode() int32 { + if x != nil { + return x.Code } + return 0 +} + +func (x *Status) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +func (x *Status) GetDetails() []*anypb.Any { + if x != nil { + return x.Details + } + return nil +} + +type UnknownSet struct { + state protoimpl.MessageState `protogen:"open.v1"` + Exprs []int64 `protobuf:"varint,1,rep,packed,name=exprs,proto3" json:"exprs,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *UnknownSet) Reset() { + *x = UnknownSet{} + mi := &file_cel_expr_eval_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *UnknownSet) String() string { @@ -242,8 +296,8 @@ func (x *UnknownSet) String() string { func (*UnknownSet) ProtoMessage() {} func (x *UnknownSet) ProtoReflect() protoreflect.Message { - mi := &file_cel_expr_eval_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_cel_expr_eval_proto_msgTypes[4] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -255,7 +309,7 @@ func (x *UnknownSet) ProtoReflect() protoreflect.Message { // Deprecated: Use UnknownSet.ProtoReflect.Descriptor instead. func (*UnknownSet) Descriptor() ([]byte, []int) { - return file_cel_expr_eval_proto_rawDescGZIP(), []int{3} + return file_cel_expr_eval_proto_rawDescGZIP(), []int{4} } func (x *UnknownSet) GetExprs() []int64 { @@ -266,21 +320,18 @@ func (x *UnknownSet) GetExprs() []int64 { } type EvalState_Result struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Expr int64 `protobuf:"varint,1,opt,name=expr,proto3" json:"expr,omitempty"` + Value int64 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"` unknownFields protoimpl.UnknownFields - - Expr int64 `protobuf:"varint,1,opt,name=expr,proto3" json:"expr,omitempty"` - Value int64 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"` + sizeCache protoimpl.SizeCache } func (x *EvalState_Result) Reset() { *x = EvalState_Result{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_eval_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_eval_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EvalState_Result) String() string { @@ -290,8 +341,8 @@ func (x *EvalState_Result) String() string { func (*EvalState_Result) ProtoMessage() {} func (x *EvalState_Result) ProtoReflect() protoreflect.Message { - mi := &file_cel_expr_eval_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_cel_expr_eval_proto_msgTypes[5] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -325,39 +376,45 @@ var File_cel_expr_eval_proto protoreflect.FileDescriptor var file_cel_expr_eval_proto_rawDesc = []byte{ 0x0a, 0x13, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x65, 0x76, 0x61, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x1a, - 0x14, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, - 0x63, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa2, - 0x01, 0x0a, 0x09, 0x45, 0x76, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2b, 0x0a, 0x06, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, - 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x34, 0x0a, 0x07, 0x72, 0x65, 0x73, - 0x75, 0x6c, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x63, 0x65, 0x6c, - 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x76, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, - 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x1a, - 0x32, 0x0a, 0x06, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x78, 0x70, - 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x65, 0x78, 0x70, 0x72, 0x12, 0x14, 0x0a, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x22, 0x9a, 0x01, 0x0a, 0x09, 0x45, 0x78, 0x70, 0x72, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x0f, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x48, 0x00, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2a, 0x0a, 0x05, 0x65, 0x72, - 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c, 0x2e, - 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x48, 0x00, 0x52, - 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x30, 0x0a, 0x07, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, - 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, - 0x70, 0x72, 0x2e, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x53, 0x65, 0x74, 0x48, 0x00, 0x52, - 0x07, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, - 0x22, 0x36, 0x0a, 0x08, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x12, 0x2a, 0x0a, 0x06, - 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x52, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x22, 0x22, 0x0a, 0x0a, 0x55, 0x6e, 0x6b, 0x6e, - 0x6f, 0x77, 0x6e, 0x53, 0x65, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x78, 0x70, 0x72, 0x73, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x03, 0x52, 0x05, 0x65, 0x78, 0x70, 0x72, 0x73, 0x42, 0x2c, 0x0a, 0x0c, - 0x64, 0x65, 0x76, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x09, 0x45, 0x76, - 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64, - 0x65, 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, + 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x14, 0x63, 0x65, 0x6c, 0x2f, + 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0xa2, 0x01, 0x0a, 0x09, 0x45, 0x76, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2b, + 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, + 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x34, 0x0a, 0x07, 0x72, + 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x63, + 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x76, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, + 0x73, 0x1a, 0x32, 0x0a, 0x06, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x65, + 0x78, 0x70, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x65, 0x78, 0x70, 0x72, 0x12, + 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x9a, 0x01, 0x0a, 0x09, 0x45, 0x78, 0x70, 0x72, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2a, 0x0a, 0x05, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x65, + 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x48, + 0x00, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x30, 0x0a, 0x07, 0x75, 0x6e, 0x6b, 0x6e, + 0x6f, 0x77, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x65, 0x6c, 0x2e, + 0x65, 0x78, 0x70, 0x72, 0x2e, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x53, 0x65, 0x74, 0x48, + 0x00, 0x52, 0x07, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69, + 0x6e, 0x64, 0x22, 0x34, 0x0a, 0x08, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x12, 0x28, + 0x0a, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, + 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x22, 0x66, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x12, 0x2e, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, + 0x22, 0x22, 0x0a, 0x0a, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x53, 0x65, 0x74, 0x12, 0x14, + 0x0a, 0x05, 0x65, 0x78, 0x70, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x03, 0x52, 0x05, 0x65, + 0x78, 0x70, 0x72, 0x73, 0x42, 0x2c, 0x0a, 0x0c, 0x64, 0x65, 0x76, 0x2e, 0x63, 0x65, 0x6c, 0x2e, + 0x65, 0x78, 0x70, 0x72, 0x42, 0x09, 0x45, 0x76, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64, 0x65, 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, + 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -372,28 +429,30 @@ func file_cel_expr_eval_proto_rawDescGZIP() []byte { return file_cel_expr_eval_proto_rawDescData } -var file_cel_expr_eval_proto_msgTypes = make([]protoimpl.MessageInfo, 5) -var file_cel_expr_eval_proto_goTypes = []interface{}{ +var file_cel_expr_eval_proto_msgTypes = make([]protoimpl.MessageInfo, 6) +var file_cel_expr_eval_proto_goTypes = []any{ (*EvalState)(nil), // 0: cel.expr.EvalState (*ExprValue)(nil), // 1: cel.expr.ExprValue (*ErrorSet)(nil), // 2: cel.expr.ErrorSet - (*UnknownSet)(nil), // 3: cel.expr.UnknownSet - (*EvalState_Result)(nil), // 4: cel.expr.EvalState.Result - (*Value)(nil), // 5: cel.expr.Value - (*status.Status)(nil), // 6: google.rpc.Status + (*Status)(nil), // 3: cel.expr.Status + (*UnknownSet)(nil), // 4: cel.expr.UnknownSet + (*EvalState_Result)(nil), // 5: cel.expr.EvalState.Result + (*Value)(nil), // 6: cel.expr.Value + (*anypb.Any)(nil), // 7: google.protobuf.Any } var file_cel_expr_eval_proto_depIdxs = []int32{ 1, // 0: cel.expr.EvalState.values:type_name -> cel.expr.ExprValue - 4, // 1: cel.expr.EvalState.results:type_name -> cel.expr.EvalState.Result - 5, // 2: cel.expr.ExprValue.value:type_name -> cel.expr.Value + 5, // 1: cel.expr.EvalState.results:type_name -> cel.expr.EvalState.Result + 6, // 2: cel.expr.ExprValue.value:type_name -> cel.expr.Value 2, // 3: cel.expr.ExprValue.error:type_name -> cel.expr.ErrorSet - 3, // 4: cel.expr.ExprValue.unknown:type_name -> cel.expr.UnknownSet - 6, // 5: cel.expr.ErrorSet.errors:type_name -> google.rpc.Status - 6, // [6:6] is the sub-list for method output_type - 6, // [6:6] is the sub-list for method input_type - 6, // [6:6] is the sub-list for extension type_name - 6, // [6:6] is the sub-list for extension extendee - 0, // [0:6] is the sub-list for field type_name + 4, // 4: cel.expr.ExprValue.unknown:type_name -> cel.expr.UnknownSet + 3, // 5: cel.expr.ErrorSet.errors:type_name -> cel.expr.Status + 7, // 6: cel.expr.Status.details:type_name -> google.protobuf.Any + 7, // [7:7] is the sub-list for method output_type + 7, // [7:7] is the sub-list for method input_type + 7, // [7:7] is the sub-list for extension type_name + 7, // [7:7] is the sub-list for extension extendee + 0, // [0:7] is the sub-list for field type_name } func init() { file_cel_expr_eval_proto_init() } @@ -402,69 +461,7 @@ func file_cel_expr_eval_proto_init() { return } file_cel_expr_value_proto_init() - if !protoimpl.UnsafeEnabled { - file_cel_expr_eval_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*EvalState); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cel_expr_eval_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExprValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cel_expr_eval_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ErrorSet); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cel_expr_eval_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UnknownSet); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cel_expr_eval_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*EvalState_Result); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_cel_expr_eval_proto_msgTypes[1].OneofWrappers = []interface{}{ + file_cel_expr_eval_proto_msgTypes[1].OneofWrappers = []any{ (*ExprValue_Value)(nil), (*ExprValue_Error)(nil), (*ExprValue_Unknown)(nil), @@ -475,7 +472,7 @@ func file_cel_expr_eval_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_cel_expr_eval_proto_rawDesc, NumEnums: 0, - NumMessages: 5, + NumMessages: 6, NumExtensions: 0, NumServices: 0, }, diff --git a/vendor/dario.cat/mergo/FUNDING.json b/vendor/dario.cat/mergo/FUNDING.json new file mode 100644 index 0000000000..0585e1fe13 --- /dev/null +++ b/vendor/dario.cat/mergo/FUNDING.json @@ -0,0 +1,7 @@ +{ + "drips": { + "ethereum": { + "ownedBy": "0x6160020e7102237aC41bdb156e94401692D76930" + } + } +} diff --git a/vendor/dario.cat/mergo/README.md b/vendor/dario.cat/mergo/README.md index 0b3c488893..0e4a59afd9 100644 --- a/vendor/dario.cat/mergo/README.md +++ b/vendor/dario.cat/mergo/README.md @@ -85,7 +85,6 @@ Mergo is used by [thousands](https://deps.dev/go/dario.cat%2Fmergo/v1.0.0/depend * [goreleaser/goreleaser](https://github.com/goreleaser/goreleaser) * [go-micro/go-micro](https://github.com/go-micro/go-micro) * [grafana/loki](https://github.com/grafana/loki) -* [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes) * [masterminds/sprig](github.com/Masterminds/sprig) * [moby/moby](https://github.com/moby/moby) * [slackhq/nebula](https://github.com/slackhq/nebula) @@ -191,10 +190,6 @@ func main() { } ``` -Note: if test are failing due missing package, please execute: - - go get gopkg.in/yaml.v3 - ### Transformers Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, `time.Time` is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero `time.Time`? diff --git a/vendor/dario.cat/mergo/SECURITY.md b/vendor/dario.cat/mergo/SECURITY.md index a5de61f77b..3788fcc1c2 100644 --- a/vendor/dario.cat/mergo/SECURITY.md +++ b/vendor/dario.cat/mergo/SECURITY.md @@ -4,8 +4,8 @@ | Version | Supported | | ------- | ------------------ | -| 0.3.x | :white_check_mark: | -| < 0.3 | :x: | +| 1.x.x | :white_check_mark: | +| < 1.0 | :x: | ## Security contact information diff --git a/vendor/github.com/AdaLogics/go-fuzz-headers/consumer.go b/vendor/github.com/AdaLogics/go-fuzz-headers/consumer.go index adfeedf5e8..361c9ac692 100644 --- a/vendor/github.com/AdaLogics/go-fuzz-headers/consumer.go +++ b/vendor/github.com/AdaLogics/go-fuzz-headers/consumer.go @@ -48,6 +48,7 @@ type ConsumeFuzzer struct { NumberOfCalls int position uint32 fuzzUnexportedFields bool + forceUTF8Strings bool curDepth int Funcs map[reflect.Type]reflect.Value } @@ -104,6 +105,14 @@ func (f *ConsumeFuzzer) DisallowUnexportedFields() { f.fuzzUnexportedFields = false } +func (f *ConsumeFuzzer) AllowNonUTF8Strings() { + f.forceUTF8Strings = false +} + +func (f *ConsumeFuzzer) DisallowNonUTF8Strings() { + f.forceUTF8Strings = true +} + func (f *ConsumeFuzzer) GenerateStruct(targetStruct interface{}) error { e := reflect.ValueOf(targetStruct).Elem() return f.fuzzStruct(e, false) @@ -224,6 +233,14 @@ func (f *ConsumeFuzzer) fuzzStruct(e reflect.Value, customFunctions bool) error if e.CanSet() { e.Set(uu) } + case reflect.Uint: + newInt, err := f.GetUint() + if err != nil { + return err + } + if e.CanSet() { + e.SetUint(uint64(newInt)) + } case reflect.Uint16: newInt, err := f.GetUint16() if err != nil { @@ -309,6 +326,14 @@ func (f *ConsumeFuzzer) fuzzStruct(e reflect.Value, customFunctions bool) error if e.CanSet() { e.SetUint(uint64(b)) } + case reflect.Bool: + b, err := f.GetBool() + if err != nil { + return err + } + if e.CanSet() { + e.SetBool(b) + } } return nil } @@ -410,6 +435,23 @@ func (f *ConsumeFuzzer) GetUint64() (uint64, error) { return binary.BigEndian.Uint64(u64), nil } +func (f *ConsumeFuzzer) GetUint() (uint, error) { + var zero uint + size := int(unsafe.Sizeof(zero)) + if size == 8 { + u64, err := f.GetUint64() + if err != nil { + return 0, err + } + return uint(u64), nil + } + u32, err := f.GetUint32() + if err != nil { + return 0, err + } + return uint(u32), nil +} + func (f *ConsumeFuzzer) GetBytes() ([]byte, error) { var length uint32 var err error @@ -461,7 +503,11 @@ func (f *ConsumeFuzzer) GetString() (string, error) { return "nil", errors.New("numbers overflow") } f.position = byteBegin + length - return string(f.data[byteBegin:f.position]), nil + s := string(f.data[byteBegin:f.position]) + if f.forceUTF8Strings { + s = strings.ToValidUTF8(s, "") + } + return s, nil } func (f *ConsumeFuzzer) GetBool() (bool, error) { diff --git a/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md b/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md index f95a504fe7..fabe5e43dc 100644 --- a/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md +++ b/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md @@ -1,5 +1,31 @@ # Changelog +## 3.4.0 (2025-06-27) + +### Added + +- #268: Added property to Constraints to include prereleases for Check and Validate + +### Changed + +- #263: Updated Go testing for 1.24, 1.23, and 1.22 +- #269: Updated the error message handling for message case and wrapping errors +- #266: Restore the ability to have leading 0's when parsing with NewVersion. + Opt-out of this by setting CoerceNewVersion to false. + +### Fixed + +- #257: Fixed the CodeQL link (thanks @dmitris) +- #262: Restored detailed errors when failed to parse with NewVersion. Opt-out + of this by setting DetailedNewVersionErrors to false for faster performance. +- #267: Handle pre-releases for an "and" group if one constraint includes them + +## 3.3.1 (2024-11-19) + +### Fixed + +- #253: Fix for allowing some version that were invalid + ## 3.3.0 (2024-08-27) ### Added @@ -137,7 +163,7 @@ functions. These are described in the added and changed sections below. - #78: Fix unchecked error in example code (thanks @ravron) - #70: Fix the handling of pre-releases and the 0.0.0 release edge case - #97: Fixed copyright file for proper display on GitHub -- #107: Fix handling prerelease when sorting alphanum and num +- #107: Fix handling prerelease when sorting alphanum and num - #109: Fixed where Validate sometimes returns wrong message on error ## 1.4.2 (2018-04-10) diff --git a/vendor/github.com/Masterminds/semver/v3/README.md b/vendor/github.com/Masterminds/semver/v3/README.md index ed56936084..2f56c676a5 100644 --- a/vendor/github.com/Masterminds/semver/v3/README.md +++ b/vendor/github.com/Masterminds/semver/v3/README.md @@ -50,6 +50,18 @@ other versions, convert the version back into a string, and get the original string. Getting the original string is useful if the semantic version was coerced into a valid form. +There are package level variables that affect how `NewVersion` handles parsing. + +- `CoerceNewVersion` is `true` by default. When set to `true` it coerces non-compliant + versions into SemVer. For example, allowing a leading 0 in a major, minor, or patch + part. This enables the use of CalVer in versions even when not compliant with SemVer. + When set to `false` less coercion work is done. +- `DetailedNewVersionErrors` provides more detailed errors. It only has an affect when + `CoerceNewVersion` is set to `false`. When `DetailedNewVersionErrors` is set to `true` + it can provide some more insight into why a version is invalid. Setting + `DetailedNewVersionErrors` to `false` is faster on performance but provides less + detailed error messages if a version fails to parse. + ## Sorting Semantic Versions A set of versions can be sorted using the `sort` package from the standard library. @@ -160,6 +172,10 @@ means `>=1.2.3-BETA` will return `1.2.3-alpha`. What you might expect from case sensitivity doesn't apply here. This is due to ASCII sort ordering which is what the spec specifies. +The `Constraints` instance returned from `semver.NewConstraint()` has a property +`IncludePrerelease` that, when set to true, will return prerelease versions when calls +to `Check()` and `Validate()` are made. + ### Hyphen Range Comparisons There are multiple methods to handle ranges and the first is hyphens ranges. @@ -250,7 +266,7 @@ or [create a pull request](https://github.com/Masterminds/semver/pulls). Security is an important consideration for this project. The project currently uses the following tools to help discover security issues: -* [CodeQL](https://github.com/Masterminds/semver) +* [CodeQL](https://codeql.github.com) * [gosec](https://github.com/securego/gosec) * Daily Fuzz testing diff --git a/vendor/github.com/Masterminds/semver/v3/constraints.go b/vendor/github.com/Masterminds/semver/v3/constraints.go index 8461c7ed90..8b7a10f836 100644 --- a/vendor/github.com/Masterminds/semver/v3/constraints.go +++ b/vendor/github.com/Masterminds/semver/v3/constraints.go @@ -12,6 +12,13 @@ import ( // checked against. type Constraints struct { constraints [][]*constraint + containsPre []bool + + // IncludePrerelease specifies if pre-releases should be included in + // the results. Note, if a constraint range has a prerelease than + // prereleases will be included for that AND group even if this is + // set to false. + IncludePrerelease bool } // NewConstraint returns a Constraints instance that a Version instance can @@ -22,11 +29,10 @@ func NewConstraint(c string) (*Constraints, error) { c = rewriteRange(c) ors := strings.Split(c, "||") - or := make([][]*constraint, len(ors)) + lenors := len(ors) + or := make([][]*constraint, lenors) + hasPre := make([]bool, lenors) for k, v := range ors { - - // TODO: Find a way to validate and fetch all the constraints in a simpler form - // Validate the segment if !validConstraintRegex.MatchString(v) { return nil, fmt.Errorf("improper constraint: %s", v) @@ -43,12 +49,22 @@ func NewConstraint(c string) (*Constraints, error) { return nil, err } + // If one of the constraints has a prerelease record this. + // This information is used when checking all in an "and" + // group to ensure they all check for prereleases. + if pc.con.pre != "" { + hasPre[k] = true + } + result[i] = pc } or[k] = result } - o := &Constraints{constraints: or} + o := &Constraints{ + constraints: or, + containsPre: hasPre, + } return o, nil } @@ -57,10 +73,10 @@ func (cs Constraints) Check(v *Version) bool { // TODO(mattfarina): For v4 of this library consolidate the Check and Validate // functions as the underlying functions make that possible now. // loop over the ORs and check the inner ANDs - for _, o := range cs.constraints { + for i, o := range cs.constraints { joy := true for _, c := range o { - if check, _ := c.check(v); !check { + if check, _ := c.check(v, (cs.IncludePrerelease || cs.containsPre[i])); !check { joy = false break } @@ -83,12 +99,12 @@ func (cs Constraints) Validate(v *Version) (bool, []error) { // Capture the prerelease message only once. When it happens the first time // this var is marked var prerelesase bool - for _, o := range cs.constraints { + for i, o := range cs.constraints { joy := true for _, c := range o { // Before running the check handle the case there the version is // a prerelease and the check is not searching for prereleases. - if c.con.pre == "" && v.pre != "" { + if !(cs.IncludePrerelease || cs.containsPre[i]) && v.pre != "" { if !prerelesase { em := fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) e = append(e, em) @@ -98,7 +114,7 @@ func (cs Constraints) Validate(v *Version) (bool, []error) { } else { - if _, err := c.check(v); err != nil { + if _, err := c.check(v, (cs.IncludePrerelease || cs.containsPre[i])); err != nil { e = append(e, err) joy = false } @@ -227,8 +243,8 @@ type constraint struct { } // Check if a version meets the constraint -func (c *constraint) check(v *Version) (bool, error) { - return constraintOps[c.origfunc](v, c) +func (c *constraint) check(v *Version, includePre bool) (bool, error) { + return constraintOps[c.origfunc](v, c, includePre) } // String prints an individual constraint into a string @@ -236,7 +252,7 @@ func (c *constraint) string() string { return c.origfunc + c.orig } -type cfunc func(v *Version, c *constraint) (bool, error) +type cfunc func(v *Version, c *constraint, includePre bool) (bool, error) func parseConstraint(c string) (*constraint, error) { if len(c) > 0 { @@ -272,7 +288,7 @@ func parseConstraint(c string) (*constraint, error) { // The constraintRegex should catch any regex parsing errors. So, // we should never get here. - return nil, errors.New("constraint Parser Error") + return nil, errors.New("constraint parser error") } cs.con = con @@ -290,7 +306,7 @@ func parseConstraint(c string) (*constraint, error) { // The constraintRegex should catch any regex parsing errors. So, // we should never get here. - return nil, errors.New("constraint Parser Error") + return nil, errors.New("constraint parser error") } cs := &constraint{ @@ -305,16 +321,14 @@ func parseConstraint(c string) (*constraint, error) { } // Constraint functions -func constraintNotEqual(v *Version, c *constraint) (bool, error) { - if c.dirty { - - // If there is a pre-release on the version but the constraint isn't looking - // for them assume that pre-releases are not compatible. See issue 21 for - // more details. - if v.Prerelease() != "" && c.con.Prerelease() == "" { - return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) - } +func constraintNotEqual(v *Version, c *constraint, includePre bool) (bool, error) { + // The existence of prereleases is checked at the group level and passed in. + // Exit early if the version has a prerelease but those are to be ignored. + if v.Prerelease() != "" && !includePre { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + if c.dirty { if c.con.Major() != v.Major() { return true, nil } @@ -345,12 +359,11 @@ func constraintNotEqual(v *Version, c *constraint) (bool, error) { return true, nil } -func constraintGreaterThan(v *Version, c *constraint) (bool, error) { +func constraintGreaterThan(v *Version, c *constraint, includePre bool) (bool, error) { - // If there is a pre-release on the version but the constraint isn't looking - // for them assume that pre-releases are not compatible. See issue 21 for - // more details. - if v.Prerelease() != "" && c.con.Prerelease() == "" { + // The existence of prereleases is checked at the group level and passed in. + // Exit early if the version has a prerelease but those are to be ignored. + if v.Prerelease() != "" && !includePre { return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) } @@ -391,11 +404,10 @@ func constraintGreaterThan(v *Version, c *constraint) (bool, error) { return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) } -func constraintLessThan(v *Version, c *constraint) (bool, error) { - // If there is a pre-release on the version but the constraint isn't looking - // for them assume that pre-releases are not compatible. See issue 21 for - // more details. - if v.Prerelease() != "" && c.con.Prerelease() == "" { +func constraintLessThan(v *Version, c *constraint, includePre bool) (bool, error) { + // The existence of prereleases is checked at the group level and passed in. + // Exit early if the version has a prerelease but those are to be ignored. + if v.Prerelease() != "" && !includePre { return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) } @@ -406,12 +418,11 @@ func constraintLessThan(v *Version, c *constraint) (bool, error) { return false, fmt.Errorf("%s is greater than or equal to %s", v, c.orig) } -func constraintGreaterThanEqual(v *Version, c *constraint) (bool, error) { +func constraintGreaterThanEqual(v *Version, c *constraint, includePre bool) (bool, error) { - // If there is a pre-release on the version but the constraint isn't looking - // for them assume that pre-releases are not compatible. See issue 21 for - // more details. - if v.Prerelease() != "" && c.con.Prerelease() == "" { + // The existence of prereleases is checked at the group level and passed in. + // Exit early if the version has a prerelease but those are to be ignored. + if v.Prerelease() != "" && !includePre { return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) } @@ -422,11 +433,10 @@ func constraintGreaterThanEqual(v *Version, c *constraint) (bool, error) { return false, fmt.Errorf("%s is less than %s", v, c.orig) } -func constraintLessThanEqual(v *Version, c *constraint) (bool, error) { - // If there is a pre-release on the version but the constraint isn't looking - // for them assume that pre-releases are not compatible. See issue 21 for - // more details. - if v.Prerelease() != "" && c.con.Prerelease() == "" { +func constraintLessThanEqual(v *Version, c *constraint, includePre bool) (bool, error) { + // The existence of prereleases is checked at the group level and passed in. + // Exit early if the version has a prerelease but those are to be ignored. + if v.Prerelease() != "" && !includePre { return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) } @@ -455,11 +465,10 @@ func constraintLessThanEqual(v *Version, c *constraint) (bool, error) { // ~1.2, ~1.2.x, ~>1.2, ~>1.2.x --> >=1.2.0, <1.3.0 // ~1.2.3, ~>1.2.3 --> >=1.2.3, <1.3.0 // ~1.2.0, ~>1.2.0 --> >=1.2.0, <1.3.0 -func constraintTilde(v *Version, c *constraint) (bool, error) { - // If there is a pre-release on the version but the constraint isn't looking - // for them assume that pre-releases are not compatible. See issue 21 for - // more details. - if v.Prerelease() != "" && c.con.Prerelease() == "" { +func constraintTilde(v *Version, c *constraint, includePre bool) (bool, error) { + // The existence of prereleases is checked at the group level and passed in. + // Exit early if the version has a prerelease but those are to be ignored. + if v.Prerelease() != "" && !includePre { return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) } @@ -487,16 +496,15 @@ func constraintTilde(v *Version, c *constraint) (bool, error) { // When there is a .x (dirty) status it automatically opts in to ~. Otherwise // it's a straight = -func constraintTildeOrEqual(v *Version, c *constraint) (bool, error) { - // If there is a pre-release on the version but the constraint isn't looking - // for them assume that pre-releases are not compatible. See issue 21 for - // more details. - if v.Prerelease() != "" && c.con.Prerelease() == "" { +func constraintTildeOrEqual(v *Version, c *constraint, includePre bool) (bool, error) { + // The existence of prereleases is checked at the group level and passed in. + // Exit early if the version has a prerelease but those are to be ignored. + if v.Prerelease() != "" && !includePre { return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) } if c.dirty { - return constraintTilde(v, c) + return constraintTilde(v, c, includePre) } eq := v.Equal(c.con) @@ -516,11 +524,10 @@ func constraintTildeOrEqual(v *Version, c *constraint) (bool, error) { // ^0.0.3 --> >=0.0.3 <0.0.4 // ^0.0 --> >=0.0.0 <0.1.0 // ^0 --> >=0.0.0 <1.0.0 -func constraintCaret(v *Version, c *constraint) (bool, error) { - // If there is a pre-release on the version but the constraint isn't looking - // for them assume that pre-releases are not compatible. See issue 21 for - // more details. - if v.Prerelease() != "" && c.con.Prerelease() == "" { +func constraintCaret(v *Version, c *constraint, includePre bool) (bool, error) { + // The existence of prereleases is checked at the group level and passed in. + // Exit early if the version has a prerelease but those are to be ignored. + if v.Prerelease() != "" && !includePre { return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) } diff --git a/vendor/github.com/Masterminds/semver/v3/version.go b/vendor/github.com/Masterminds/semver/v3/version.go index ff499fb664..7a3ba73887 100644 --- a/vendor/github.com/Masterminds/semver/v3/version.go +++ b/vendor/github.com/Masterminds/semver/v3/version.go @@ -14,32 +14,52 @@ import ( // The compiled version of the regex created at init() is cached here so it // only needs to be created once. var versionRegex *regexp.Regexp +var looseVersionRegex *regexp.Regexp + +// CoerceNewVersion sets if leading 0's are allowd in the version part. Leading 0's are +// not allowed in a valid semantic version. When set to true, NewVersion will coerce +// leading 0's into a valid version. +var CoerceNewVersion = true + +// DetailedNewVersionErrors specifies if detailed errors are returned from the NewVersion +// function. This is used when CoerceNewVersion is set to false. If set to false +// ErrInvalidSemVer is returned for an invalid version. This does not apply to +// StrictNewVersion. Setting this function to false returns errors more quickly. +var DetailedNewVersionErrors = true var ( // ErrInvalidSemVer is returned a version is found to be invalid when // being parsed. - ErrInvalidSemVer = errors.New("Invalid Semantic Version") + ErrInvalidSemVer = errors.New("invalid semantic version") // ErrEmptyString is returned when an empty string is passed in for parsing. - ErrEmptyString = errors.New("Version string empty") + ErrEmptyString = errors.New("version string empty") // ErrInvalidCharacters is returned when invalid characters are found as // part of a version - ErrInvalidCharacters = errors.New("Invalid characters in version") + ErrInvalidCharacters = errors.New("invalid characters in version") // ErrSegmentStartsZero is returned when a version segment starts with 0. // This is invalid in SemVer. - ErrSegmentStartsZero = errors.New("Version segment starts with 0") + ErrSegmentStartsZero = errors.New("version segment starts with 0") // ErrInvalidMetadata is returned when the metadata is an invalid format - ErrInvalidMetadata = errors.New("Invalid Metadata string") + ErrInvalidMetadata = errors.New("invalid metadata string") // ErrInvalidPrerelease is returned when the pre-release is an invalid format - ErrInvalidPrerelease = errors.New("Invalid Prerelease string") + ErrInvalidPrerelease = errors.New("invalid prerelease string") ) // semVerRegex is the regular expression used to parse a semantic version. -const semVerRegex string = `v?([0-9]+)(\.[0-9]+)?(\.[0-9]+)?` + +// This is not the official regex from the semver spec. It has been modified to allow for loose handling +// where versions like 2.1 are detected. +const semVerRegex string = `v?(0|[1-9]\d*)(?:\.(0|[1-9]\d*))?(?:\.(0|[1-9]\d*))?` + + `(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?` + + `(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?` + +// looseSemVerRegex is a regular expression that lets invalid semver expressions through +// with enough detail that certain errors can be checked for. +const looseSemVerRegex string = `v?([0-9]+)(\.[0-9]+)?(\.[0-9]+)?` + `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` @@ -53,6 +73,7 @@ type Version struct { func init() { versionRegex = regexp.MustCompile("^" + semVerRegex + "$") + looseVersionRegex = regexp.MustCompile("^" + looseSemVerRegex + "$") } const ( @@ -140,7 +161,80 @@ func StrictNewVersion(v string) (*Version, error) { // attempts to convert it to SemVer. If you want to validate it was a strict // semantic version at parse time see StrictNewVersion(). func NewVersion(v string) (*Version, error) { + if CoerceNewVersion { + return coerceNewVersion(v) + } m := versionRegex.FindStringSubmatch(v) + if m == nil { + + // Disabling detailed errors is first so that it is in the fast path. + if !DetailedNewVersionErrors { + return nil, ErrInvalidSemVer + } + + // Check for specific errors with the semver string and return a more detailed + // error. + m = looseVersionRegex.FindStringSubmatch(v) + if m == nil { + return nil, ErrInvalidSemVer + } + err := validateVersion(m) + if err != nil { + return nil, err + } + return nil, ErrInvalidSemVer + } + + sv := &Version{ + metadata: m[5], + pre: m[4], + original: v, + } + + var err error + sv.major, err = strconv.ParseUint(m[1], 10, 64) + if err != nil { + return nil, fmt.Errorf("error parsing version segment: %w", err) + } + + if m[2] != "" { + sv.minor, err = strconv.ParseUint(m[2], 10, 64) + if err != nil { + return nil, fmt.Errorf("error parsing version segment: %w", err) + } + } else { + sv.minor = 0 + } + + if m[3] != "" { + sv.patch, err = strconv.ParseUint(m[3], 10, 64) + if err != nil { + return nil, fmt.Errorf("error parsing version segment: %w", err) + } + } else { + sv.patch = 0 + } + + // Perform some basic due diligence on the extra parts to ensure they are + // valid. + + if sv.pre != "" { + if err = validatePrerelease(sv.pre); err != nil { + return nil, err + } + } + + if sv.metadata != "" { + if err = validateMetadata(sv.metadata); err != nil { + return nil, err + } + } + + return sv, nil +} + +func coerceNewVersion(v string) (*Version, error) { + m := looseVersionRegex.FindStringSubmatch(v) if m == nil { return nil, ErrInvalidSemVer } @@ -154,13 +248,13 @@ func NewVersion(v string) (*Version, error) { var err error sv.major, err = strconv.ParseUint(m[1], 10, 64) if err != nil { - return nil, fmt.Errorf("Error parsing version segment: %s", err) + return nil, fmt.Errorf("error parsing version segment: %w", err) } if m[2] != "" { sv.minor, err = strconv.ParseUint(strings.TrimPrefix(m[2], "."), 10, 64) if err != nil { - return nil, fmt.Errorf("Error parsing version segment: %s", err) + return nil, fmt.Errorf("error parsing version segment: %w", err) } } else { sv.minor = 0 @@ -169,7 +263,7 @@ func NewVersion(v string) (*Version, error) { if m[3] != "" { sv.patch, err = strconv.ParseUint(strings.TrimPrefix(m[3], "."), 10, 64) if err != nil { - return nil, fmt.Errorf("Error parsing version segment: %s", err) + return nil, fmt.Errorf("error parsing version segment: %w", err) } } else { sv.patch = 0 @@ -612,7 +706,9 @@ func containsOnly(s string, comp string) bool { func validatePrerelease(p string) error { eparts := strings.Split(p, ".") for _, p := range eparts { - if containsOnly(p, num) { + if p == "" { + return ErrInvalidPrerelease + } else if containsOnly(p, num) { if len(p) > 1 && p[0] == '0' { return ErrSegmentStartsZero } @@ -631,9 +727,62 @@ func validatePrerelease(p string) error { func validateMetadata(m string) error { eparts := strings.Split(m, ".") for _, p := range eparts { - if !containsOnly(p, allowed) { + if p == "" { return ErrInvalidMetadata + } else if !containsOnly(p, allowed) { + return ErrInvalidMetadata + } + } + return nil +} + +// validateVersion checks for common validation issues but may not catch all errors +func validateVersion(m []string) error { + var err error + var v string + if m[1] != "" { + if len(m[1]) > 1 && m[1][0] == '0' { + return ErrSegmentStartsZero + } + _, err = strconv.ParseUint(m[1], 10, 64) + if err != nil { + return fmt.Errorf("error parsing version segment: %w", err) } } + + if m[2] != "" { + v = strings.TrimPrefix(m[2], ".") + if len(v) > 1 && v[0] == '0' { + return ErrSegmentStartsZero + } + _, err = strconv.ParseUint(v, 10, 64) + if err != nil { + return fmt.Errorf("error parsing version segment: %w", err) + } + } + + if m[3] != "" { + v = strings.TrimPrefix(m[3], ".") + if len(v) > 1 && v[0] == '0' { + return ErrSegmentStartsZero + } + _, err = strconv.ParseUint(v, 10, 64) + if err != nil { + return fmt.Errorf("error parsing version segment: %w", err) + } + } + + if m[5] != "" { + if err = validatePrerelease(m[5]); err != nil { + return err + } + } + + if m[8] != "" { + if err = validateMetadata(m[8]); err != nil { + return err + } + } + return nil } diff --git a/vendor/github.com/Microsoft/hcsshim/.clang-format b/vendor/github.com/Microsoft/hcsshim/.clang-format new file mode 100644 index 0000000000..fd843ce399 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/.clang-format @@ -0,0 +1,12 @@ +Language: Cpp +BasedOnStyle: Microsoft +BreakBeforeBraces: Attach +PointerAlignment: Left +AllowShortFunctionsOnASingleLine: All +# match Go style +IndentCaseLabels: false +# don't break comments over line limit (needed for CodeQL exceptions) +ReflowComments: false +InsertNewlineAtEOF: true +KeepEmptyLines: + AtEndOfFile: true diff --git a/vendor/github.com/Microsoft/hcsshim/.golangci.yml b/vendor/github.com/Microsoft/hcsshim/.golangci.yml index 7d38a2fb9e..113e6f07ac 100644 --- a/vendor/github.com/Microsoft/hcsshim/.golangci.yml +++ b/vendor/github.com/Microsoft/hcsshim/.golangci.yml @@ -5,9 +5,6 @@ run: - admin - functional - integration - skip-dirs: - # paths are relative to module root - - cri-containerd/test-images linters: enable: @@ -34,13 +31,15 @@ linters-settings: # struct order is often for Win32 compat # also, ignore pointer bytes/GC issues for now until performance becomes an issue - fieldalignment - check-shadowing: true stylecheck: # https://staticcheck.io/docs/checks checks: ["all"] issues: + exclude-dirs: + # paths are relative to module root + - cri-containerd/test-images exclude-rules: # err is very often shadowed in nested scopes - linters: @@ -70,22 +69,22 @@ issues: - path: layer.go linters: - stylecheck - Text: "ST1003:" + text: "ST1003:" - path: hcsshim.go linters: - stylecheck - Text: "ST1003:" + text: "ST1003:" - path: cmd\\ncproxy\\nodenetsvc\\ linters: - stylecheck - Text: "ST1003:" + text: "ST1003:" - path: cmd\\ncproxy_mock\\ linters: - stylecheck - Text: "ST1003:" + text: "ST1003:" - path: internal\\hcs\\schema2\\ linters: @@ -95,67 +94,67 @@ issues: - path: internal\\wclayer\\ linters: - stylecheck - Text: "ST1003:" + text: "ST1003:" - path: hcn\\ linters: - stylecheck - Text: "ST1003:" + text: "ST1003:" - path: internal\\hcs\\schema1\\ linters: - stylecheck - Text: "ST1003:" + text: "ST1003:" - path: internal\\hns\\ linters: - stylecheck - Text: "ST1003:" + text: "ST1003:" - path: ext4\\internal\\compactext4\\ linters: - stylecheck - Text: "ST1003:" + text: "ST1003:" - path: ext4\\internal\\format\\ linters: - stylecheck - Text: "ST1003:" + text: "ST1003:" - path: internal\\guestrequest\\ linters: - stylecheck - Text: "ST1003:" + text: "ST1003:" - path: internal\\guest\\prot\\ linters: - stylecheck - Text: "ST1003:" + text: "ST1003:" - path: internal\\windevice\\ linters: - stylecheck - Text: "ST1003:" + text: "ST1003:" - path: internal\\winapi\\ linters: - stylecheck - Text: "ST1003:" + text: "ST1003:" - path: internal\\vmcompute\\ linters: - stylecheck - Text: "ST1003:" + text: "ST1003:" - path: internal\\regstate\\ linters: - stylecheck - Text: "ST1003:" + text: "ST1003:" - path: internal\\hcserror\\ linters: - stylecheck - Text: "ST1003:" + text: "ST1003:" # v0 APIs are deprecated, but still retained for backwards compatability - path: cmd\\ncproxy\\ @@ -171,4 +170,4 @@ issues: - path: internal\\vhdx\\info linters: - stylecheck - Text: "ST1003:" + text: "ST1003:" diff --git a/vendor/github.com/Microsoft/hcsshim/Makefile b/vendor/github.com/Microsoft/hcsshim/Makefile index de64358948..9a9f5b4014 100644 --- a/vendor/github.com/Microsoft/hcsshim/Makefile +++ b/vendor/github.com/Microsoft/hcsshim/Makefile @@ -1,13 +1,20 @@ -BASE:=base.tar.gz -DEV_BUILD:=0 +include Makefile.bootfiles GO:=go GO_FLAGS:=-ldflags "-s -w" # strip Go binaries CGO_ENABLED:=0 GOMODVENDOR:= +KMOD:=0 CFLAGS:=-O2 -Wall -LDFLAGS:=-static -s # strip C binaries +LDFLAGS:=-static -s #strip C binaries +LDLIBS:= +PREPROCESSORFLAGS:= +ifeq "$(KMOD)" "1" +LDFLAGS:= -s +LDLIBS:= -lkmod +PREPROCESSORFLAGS:=-DMODULES=1 +endif GO_FLAGS_EXTRA:= ifeq "$(GOMODVENDOR)" "1" @@ -23,108 +30,14 @@ SRCROOT=$(dir $(abspath $(firstword $(MAKEFILE_LIST)))) # additional directories to search for rule prerequisites and targets VPATH=$(SRCROOT) -DELTA_TARGET=out/delta.tar.gz - -ifeq "$(DEV_BUILD)" "1" -DELTA_TARGET=out/delta-dev.tar.gz -endif - -ifeq "$(SNP_BUILD)" "1" -DELTA_TARGET=out/delta-snp.tar.gz -endif - # The link aliases for gcstools GCS_TOOLS=\ generichook \ install-drivers -# Common path prefix. -PATH_PREFIX:= -# These have PATH_PREFIX prepended to obtain the full path in recipies e.g. $(PATH_PREFIX)/$(VMGS_TOOL) -VMGS_TOOL:= -IGVM_TOOL:= -KERNEL_PATH:= - -.PHONY: all always rootfs test snp simple - -.DEFAULT_GOAL := all - -all: out/initrd.img out/rootfs.tar.gz - -clean: - find -name '*.o' -print0 | xargs -0 -r rm - rm -rf bin deps rootfs out - test: cd $(SRCROOT) && $(GO) test -v ./internal/guest/... -rootfs: out/rootfs.vhd - -snp: out/kernelinitrd.vmgs out/rootfs.hash.vhd out/rootfs.vhd out/v2056.vmgs - -simple: out/simple.vmgs snp - -%.vmgs: %.bin - rm -f $@ - # du -BM returns the size of the bin file in M, eg 7M. The sed command replaces the M with *1024*1024 and then bc does the math to convert to bytes - $(PATH_PREFIX)/$(VMGS_TOOL) create --filepath $@ --filesize `du -BM $< | sed "s/M.*/*1024*1024/" | bc` - $(PATH_PREFIX)/$(VMGS_TOOL) write --filepath $@ --datapath $< -i=8 - -# Simplest debug UVM used to test changes to the linux kernel. No dmverity protection. Boots an initramdisk rather than directly booting a vhd disk. -out/simple.bin: out/initrd.img $(PATH_PREFIX)/$(KERNEL_PATH) boot/startup_simple.sh - rm -f $@ - python3 $(PATH_PREFIX)/$(IGVM_TOOL) -o $@ -kernel $(PATH_PREFIX)/$(KERNEL_PATH) -append "8250_core.nr_uarts=0 panic=-1 debug loglevel=7 rdinit=/startup_simple.sh" -rdinit out/initrd.img -vtl 0 - -ROOTFS_DEVICE:=/dev/sda -VERITY_DEVICE:=/dev/sdb -# Debug build for use with uvmtester. UVM with dm-verity protected vhd disk mounted directly via the kernel command line. Ignores corruption in dm-verity protected disk. (Use dmesg to see if dm-verity is ignoring data corruption.) -out/v2056.bin: out/rootfs.vhd out/rootfs.hash.vhd $(PATH_PREFIX)/$(KERNEL_PATH) out/rootfs.hash.datasectors out/rootfs.hash.datablocksize out/rootfs.hash.hashblocksize out/rootfs.hash.datablocks out/rootfs.hash.rootdigest out/rootfs.hash.salt boot/startup_v2056.sh - rm -f $@ - python3 $(PATH_PREFIX)/$(IGVM_TOOL) -o $@ -kernel $(PATH_PREFIX)/$(KERNEL_PATH) -append "8250_core.nr_uarts=0 panic=-1 debug loglevel=7 root=/dev/dm-0 dm-mod.create=\"dmverity,,,ro,0 $(shell cat out/rootfs.hash.datasectors) verity 1 $(ROOTFS_DEVICE) $(VERITY_DEVICE) $(shell cat out/rootfs.hash.datablocksize) $(shell cat out/rootfs.hash.hashblocksize) $(shell cat out/rootfs.hash.datablocks) 0 sha256 $(shell cat out/rootfs.hash.rootdigest) $(shell cat out/rootfs.hash.salt) 1 ignore_corruption\" init=/startup_v2056.sh" -vtl 0 - -# Full UVM with dm-verity protected vhd disk mounted directly via the kernel command line. -out/kernelinitrd.bin: out/rootfs.vhd out/rootfs.hash.vhd out/rootfs.hash.datasectors out/rootfs.hash.datablocksize out/rootfs.hash.hashblocksize out/rootfs.hash.datablocks out/rootfs.hash.rootdigest out/rootfs.hash.salt $(PATH_PREFIX)/$(KERNEL_PATH) boot/startup.sh - rm -f $@ - python3 $(PATH_PREFIX)/$(IGVM_TOOL) -o $@ -kernel $(PATH_PREFIX)/$(KERNEL_PATH) -append "8250_core.nr_uarts=0 panic=-1 debug loglevel=7 root=/dev/dm-0 dm-mod.create=\"dmverity,,,ro,0 $(shell cat out/rootfs.hash.datasectors) verity 1 $(ROOTFS_DEVICE) $(VERITY_DEVICE) $(shell cat out/rootfs.hash.datablocksize) $(shell cat out/rootfs.hash.hashblocksize) $(shell cat out/rootfs.hash.datablocks) 0 sha256 $(shell cat out/rootfs.hash.rootdigest) $(shell cat out/rootfs.hash.salt)\" init=/startup.sh" -vtl 0 - -# Rule to make a vhd from a file. This is used to create the rootfs.hash.vhd from rootfs.hash. -%.vhd: % bin/cmd/tar2ext4 - ./bin/cmd/tar2ext4 -only-vhd -i $< -o $@ - -# Rule to make a vhd from an ext4 file. This is used to create the rootfs.vhd from rootfs.ext4. -%.vhd: %.ext4 bin/cmd/tar2ext4 - ./bin/cmd/tar2ext4 -only-vhd -i $< -o $@ - -%.hash %.hash.info %.hash.datablocks %.hash.rootdigest %hash.datablocksize %.hash.datasectors %.hash.hashblocksize: %.ext4 %.hash.salt - veritysetup format --no-superblock --salt $(shell cat out/rootfs.hash.salt) $< $*.hash > $*.hash.info - # Retrieve info required by dm-verity at boot time - # Get the blocksize of rootfs - cat $*.hash.info | awk '/^Root hash:/{ print $$3 }' > $*.hash.rootdigest - cat $*.hash.info | awk '/^Salt:/{ print $$2 }' > $*.hash.salt - cat $*.hash.info | awk '/^Data block size:/{ print $$4 }' > $*.hash.datablocksize - cat $*.hash.info | awk '/^Hash block size:/{ print $$4 }' > $*.hash.hashblocksize - cat $*.hash.info | awk '/^Data blocks:/{ print $$3 }' > $*.hash.datablocks - echo $$(( $$(cat $*.hash.datablocks) * $$(cat $*.hash.datablocksize) / 512 )) > $*.hash.datasectors - -out/rootfs.hash.salt: - hexdump -vn32 -e'8/4 "%08X" 1 "\n"' /dev/random > $@ - -out/rootfs.ext4: out/rootfs.tar.gz bin/cmd/tar2ext4 - gzip -f -d ./out/rootfs.tar.gz - ./bin/cmd/tar2ext4 -i ./out/rootfs.tar -o $@ - -out/rootfs.tar.gz: out/initrd.img - rm -rf rootfs-conv - mkdir rootfs-conv - gunzip -c out/initrd.img | (cd rootfs-conv && cpio -imd) - tar -zcf $@ -C rootfs-conv . - rm -rf rootfs-conv - -out/initrd.img: $(BASE) $(DELTA_TARGET) $(SRCROOT)/hack/catcpio.sh - $(SRCROOT)/hack/catcpio.sh "$(BASE)" $(DELTA_TARGET) > out/initrd.img.uncompressed - gzip -c out/initrd.img.uncompressed > $@ - rm out/initrd.img.uncompressed - # This target includes utilities which may be useful for testing purposes. out/delta-dev.tar.gz: out/delta.tar.gz bin/internal/tools/snp-report rm -rf rootfs-dev @@ -168,10 +81,7 @@ out/delta.tar.gz: bin/init bin/vsockexec bin/cmd/gcs bin/cmd/gcstools bin/cmd/ho tar -zcf $@ -C rootfs . rm -rf rootfs -out/containerd-shim-runhcs-v1.exe: - GOOS=windows $(GO_BUILD) -o $@ $(SRCROOT)/cmd/containerd-shim-runhcs-v1 - -bin/cmd/gcs bin/cmd/gcstools bin/cmd/hooks/wait-paths bin/cmd/tar2ext4 bin/internal/tools/snp-report bin/cmd/dmverity-vhd: +bin/cmd/gcs bin/cmd/gcstools bin/cmd/hooks/wait-paths bin/cmd/tar2ext4 bin/internal/tools/snp-report: @mkdir -p $(dir $@) GOOS=linux $(GO_BUILD) -o $@ $(SRCROOT)/$(@:bin/%=%) @@ -181,8 +91,8 @@ bin/vsockexec: vsockexec/vsockexec.o vsockexec/vsock.o bin/init: init/init.o vsockexec/vsock.o @mkdir -p bin - $(CC) $(LDFLAGS) -o $@ $^ + $(CC) $(LDFLAGS) -o $@ $^ $(LDLIBS) %.o: %.c @mkdir -p $(dir $@) - $(CC) $(CFLAGS) $(CPPFLAGS) -c -o $@ $< \ No newline at end of file + $(CC) $(PREPROCESSORFLAGS) $(CFLAGS) $(CPPFLAGS) -c -o $@ $< diff --git a/vendor/github.com/Microsoft/hcsshim/Makefile.bootfiles b/vendor/github.com/Microsoft/hcsshim/Makefile.bootfiles new file mode 100644 index 0000000000..e6f06d4916 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/Makefile.bootfiles @@ -0,0 +1,197 @@ +BASE:=base.tar.gz +DEV_BUILD:=0 + +DELTA_TARGET=out/delta.tar.gz + +ifeq "$(DEV_BUILD)" "1" +DELTA_TARGET=out/delta-dev.tar.gz +endif + +ifeq "$(SNP_BUILD)" "1" +DELTA_TARGET=out/delta-snp.tar.gz +endif + +SRCROOT=$(dir $(abspath $(firstword $(MAKEFILE_LIST)))) + +PATH_PREFIX:= +# These have PATH_PREFIX prepended to obtain the full path in recipies e.g. $(PATH_PREFIX)/$(VMGS_TOOL) +VMGS_TOOL:= +IGVM_TOOL:= +KERNEL_PATH:= +TAR2EXT4_TOOL:=bin/cmd/tar2ext4 + +ROOTFS_DEVICE:=/dev/sda +HASH_DEVICE:=/dev/sdb + +.PHONY: all always rootfs test snp simple + +.DEFAULT_GOAL := all + +all: out/initrd.img out/rootfs.tar.gz + +clean: + find -name '*.o' -print0 | xargs -0 -r rm + rm -rf bin rootfs out + +rootfs: out/rootfs.vhd + +snp: out/kernel.vmgs out/rootfs-verity.vhd out/v2056.vmgs out/v2056combined.vmgs + +simple: out/simple.vmgs snp + +%.vmgs: %.bin + rm -f $@ + # du -BM returns the size of the bin file in M, eg 7M. The sed command replaces the M with *1024*1024 and then bc does the math to convert to bytes + $(PATH_PREFIX)/$(VMGS_TOOL) create --filepath $@ --filesize `du -BM $< | sed "s/M.*/*1024*1024/" | bc` + $(PATH_PREFIX)/$(VMGS_TOOL) write --filepath $@ --datapath $< -i=8 + +# Simplest debug UVM used to test changes to the linux kernel. No dmverity protection. Boots an initramdisk rather than directly booting a vhd disk. +out/simple.bin: out/initrd.img $(PATH_PREFIX)/$(KERNEL_PATH) boot/startup_simple.sh + rm -f $@ + python3 $(PATH_PREFIX)/$(IGVM_TOOL) \ + -o $@ \ + -kernel $(PATH_PREFIX)/$(KERNEL_PATH) \ + -append "8250_core.nr_uarts=0 panic=-1 debug loglevel=7 rdinit=/startup_simple.sh" \ + -rdinit out/initrd.img \ + -vtl 0 + +# The boot performance is optimized by supplying rootfs as a SCSI attachment. In this case the kernel boots with +# dm-verity to ensure the integrity. Similar to layer VHDs the verity Merkle tree is appended to ext4 filesystem. +# It transpires that the /dev/sd* order is not deterministic wrt the scsi device order. Thus build a single userland +# fs + merkle tree device and boot that. +# +# From https://www.kernel.org/doc/html/latest/admin-guide/device-mapper/dm-init.html +# +# dm-mod.create=,,,,[,
+][;,,,,
[,
+]+] +# +# where: +# ::= The device name. +# ::= xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx | "" +# ::= The device minor number | "" +# ::= "ro" | "rw" +#
::= +# ::= "verity" | "linear" | ... (see list below) +# +# From https://docs.kernel.org/admin-guide/device-mapper/verity.html +# +# +# +# +# [<#opt_params> ] +# +# typical igvm tool line once all the macros are expanded +# python3 /home/user/igvmfile.py -o out/v2056.bin -kernel /hose/user/bzImage -append "8250_core.nr_uarts=0 panic=-1 debug loglevel=9 ignore_loglevel dev.scsi.logging_level=9411 root=/dev/dm-0 dm-mod.create=\"dmverity,,,ro,0 196744 verity 1 /dev/sda /dev/sdb 4096 4096 24593 0 sha256 6d625a306aafdf73125a84388b7bfdd2c3a154bd8d698955f4adffc736bdfd66 b9065c23231f0d8901cc3a68e1d3b8d624213e76d6f9f6d3ccbcb829f9c710ba 1 ignore_corruption\" init=/startup_v2056.sh" -vtl 0 +# +# so a kernel command line of: +# 8250_core.nr_uarts=0 panic=-1 debug loglevel=9 ignore_loglevel dev.scsi.logging_level=9411 root=/dev/dm-0 dm-mod.create=\"dmverity,,,ro,0 196744 verity 1 /dev/sda /dev/sdb 4096 4096 24593 0 sha256 6d625a306aafdf73125a84388b7bfdd2c3a154bd8d698955f4adffc736bdfd66 b9065c23231f0d8901cc3a68e1d3b8d624213e76d6f9f6d3ccbcb829f9c710ba 1 ignore_corruption\" init=/startup_v2056.sh +# +# and a dm-mod.create of: +# dmverity,,,ro,0 196744 verity 1 /dev/sda /dev/sdb 4096 4096 24593 0 sha256 6d625a306aafdf73125a84388b7bfdd2c3a154bd8d698955f4adffc736bdfd66 b9065c23231f0d8901cc3a68e1d3b8d624213e76d6f9f6d3ccbcb829f9c710ba 1 ignore_corruption +# +# which breaks down to: +# +# name = "dmverity" +# uuid = "" +# minor = "" +# flags = "ro" +# table = 0 196744 verity "args" +# start_sector = 0 +# num_sectors = 196744 +# target_type = verity +# target_args = 1 /dev/sda /dev/sdb 4096 4096 24593 0 sha256 6d625a306aafdf73125a84388b7bfdd2c3a154bd8d698955f4adffc736bdfd66 b9065c23231f0d8901cc3a68e1d3b8d624213e76d6f9f6d3ccbcb829f9c710ba 1 ignore_corruption +# args: +# version 1 +# dev /dev/sda +# hash_dev /dev/sdb +# data_block_size 4096 +# hash_block_size 4096 +# num_data_blocks 24593 +# hash_start_block 0 +# algorithm sha256 +# digest 6d625a306aafdf73125a84388b7bfdd2c3a154bd8d698955f4adffc736bdfd66 +# salt b9065c23231f0d8901cc3a68e1d3b8d624213e76d6f9f6d3ccbcb829f9c710ba +# opt_params +# count = 1 +# ignore_corruption +# +# combined typical (not bigger count of sectors for the whole device) +# dmverity,,,ro,0 199672 verity 1 /dev/sda /dev/sda 4096 4096 24959 24959 sha256 4aa6e79866ee946ddbd9cddd6554bc6449272942fcc65934326817785a3bd374 adc4956274489c936395bab046a2d476f21ef436e571ba53da2fdf3aee59bf0a +# +# A few notes: +# - num_sectors is the size of the final (aka target) verity device, i.e. the size of our rootfs excluding the Merkle +# tree. +# - We don't add verity superblock, so the will be exactly at the end of ext4 filesystem and equal +# to its size. In the case when verity superblock is present an extra block should be added to the offset value, +# i.e. 24959 becomes 24960. + + +# Debug build for use with uvmtester. UVM with dm-verity protected vhd disk mounted directly via the kernel command line. +# Ignores corruption in dm-verity protected disk. (Use dmesg to see if dm-verity is ignoring data corruption.) +out/v2056.bin: out/rootfs.vhd out/rootfs.hash.vhd $(PATH_PREFIX)/$(KERNEL_PATH) out/rootfs.hash.datasectors out/rootfs.hash.datablocksize out/rootfs.hash.hashblocksize out/rootfs.hash.datablocks out/rootfs.hash.rootdigest out/rootfs.hash.salt boot/startup_v2056.sh + rm -f $@ + python3 $(PATH_PREFIX)/$(IGVM_TOOL) \ + -o $@ \ + -kernel $(PATH_PREFIX)/$(KERNEL_PATH) \ + -append "8250_core.nr_uarts=0 panic=-1 debug loglevel=9 root=/dev/dm-0 dm-mod.create=\"dmverity,,,ro,0 $(shell cat out/rootfs.hash.datasectors) verity 1 $(ROOTFS_DEVICE) $(HASH_DEVICE) $(shell cat out/rootfs.hash.datablocksize) $(shell cat out/rootfs.hash.hashblocksize) $(shell cat out/rootfs.hash.datablocks) $(shell cat out/rootfs.hash.datablocks) sha256 $(shell cat out/rootfs.hash.rootdigest) $(shell cat out/rootfs.hash.salt) 1 ignore_corruption\" init=/startup_v2056.sh" \ + -vtl 0 + +out/v2056combined.bin: out/rootfs-verity.vhd $(PATH_PREFIX)/$(KERNEL_PATH) out/rootfs.hash.datablocksize out/rootfs.hash.hashblocksize out/rootfs.hash.datablocks out/rootfs.hash.rootdigest out/rootfs.hash.salt boot/startup_v2056.sh + rm -f $@ + echo root=/dev/dm-0 dm-mod.create=\"dmverity,,,ro,0 $(shell cat out/rootfs.hash.datasectors) verity 1 $(ROOTFS_DEVICE) $(ROOTFS_DEVICE) $(shell cat out/rootfs.hash.datablocksize) $(shell cat out/rootfs.hash.hashblocksize) $(shell cat out/rootfs.hash.datablocks) $(shell cat out/rootfs.hash.datablocks) sha256 $(shell cat out/rootfs.hash.rootdigest) $(shell cat out/rootfs.hash.salt) 1 ignore_corruption\" + python3 $(PATH_PREFIX)/$(IGVM_TOOL) \ + -o $@ \ + -kernel $(PATH_PREFIX)/$(KERNEL_PATH) \ + -append "8250_core.nr_uarts=0 panic=-1 debug loglevel=9 ignore_loglevel dev.scsi.logging_level=9411 root=/dev/dm-0 dm-mod.create=\"dmverity,,,ro,0 $(shell cat out/rootfs.hash.datasectors) verity 1 $(ROOTFS_DEVICE) $(ROOTFS_DEVICE) $(shell cat out/rootfs.hash.datablocksize) $(shell cat out/rootfs.hash.hashblocksize) $(shell cat out/rootfs.hash.datablocks) $(shell cat out/rootfs.hash.datablocks) sha256 $(shell cat out/rootfs.hash.rootdigest) $(shell cat out/rootfs.hash.salt) 1 ignore_corruption\" init=/startup_v2056.sh" \ + -vtl 0 + +# Full UVM with dm-verity protected vhd disk mounted directly via the kernel command line. +out/kernel.bin: out/rootfs-verity.vhd $(PATH_PREFIX)/$(KERNEL_PATH) out/rootfs.hash.datasectors out/rootfs.hash.datablocksize out/rootfs.hash.hashblocksize out/rootfs.hash.datablocks out/rootfs.hash.rootdigest out/rootfs.hash.salt boot/startup.sh + rm -f $@ + echo root=/dev/dm-0 dm-mod.create=\"dmverity,,,ro,0 $(shell cat out/rootfs.hash.datasectors) verity 1 $(ROOTFS_DEVICE) $(ROOTFS_DEVICE) $(shell cat out/rootfs.hash.datablocksize) $(shell cat out/rootfs.hash.hashblocksize) $(shell cat out/rootfs.hash.datablocks) $(shell cat out/rootfs.hash.datablocks) sha256 $(shell cat out/rootfs.hash.rootdigest) $(shell cat out/rootfs.hash.salt)\" + python3 $(PATH_PREFIX)/$(IGVM_TOOL) \ + -o $@ \ + -kernel $(PATH_PREFIX)/$(KERNEL_PATH) \ + -append "8250_core.nr_uarts=0 panic=-1 debug loglevel=7 root=/dev/dm-0 dm-mod.create=\"dmverity,,,ro,0 $(shell cat out/rootfs.hash.datasectors) verity 1 $(ROOTFS_DEVICE) $(ROOTFS_DEVICE) $(shell cat out/rootfs.hash.datablocksize) $(shell cat out/rootfs.hash.hashblocksize) $(shell cat out/rootfs.hash.datablocks) $(shell cat out/rootfs.hash.datablocks) sha256 $(shell cat out/rootfs.hash.rootdigest) $(shell cat out/rootfs.hash.salt)\" init=/startup.sh" \ + -vtl 0 + +# Rule to make a vhd from a file. This is used to create the rootfs.hash.vhd from rootfs.hash. +%.vhd: % $(TAR2EXT4_TOOL) + $(TAR2EXT4_TOOL) -only-vhd -i $< -o $@ + +# Rule to make a vhd from an ext4 file. This is used to create the rootfs.vhd from rootfs.ext4. +%.vhd: %.ext4 $(TAR2EXT4_TOOL) + $(TAR2EXT4_TOOL) -only-vhd -i $< -o $@ + +%.hash %.hash.info %.hash.datablocks %.hash.rootdigest %hash.datablocksize %.hash.datasectors %.hash.hashblocksize: %.ext4 %.hash.salt + veritysetup format --no-superblock --salt $(shell cat out/rootfs.hash.salt) $< $*.hash > $*.hash.info + # Retrieve info required by dm-verity at boot time + # Get the blocksize of rootfs + cat $*.hash.info | awk '/^Root hash:/{ print $$3 }' > $*.hash.rootdigest + cat $*.hash.info | awk '/^Salt:/{ print $$2 }' > $*.hash.salt + cat $*.hash.info | awk '/^Data block size:/{ print $$4 }' > $*.hash.datablocksize + cat $*.hash.info | awk '/^Hash block size:/{ print $$4 }' > $*.hash.hashblocksize + cat $*.hash.info | awk '/^Data blocks:/{ print $$3 }' > $*.hash.datablocks + echo $$(( $$(cat $*.hash.datablocks) * $$(cat $*.hash.datablocksize) / 512 )) > $*.hash.datasectors + +out/rootfs.hash.salt: + hexdump -vn32 -e'8/4 "%08X" 1 "\n"' /dev/random > $@ + +out/rootfs.ext4: out/rootfs.tar.gz $(TAR2EXT4_TOOL) + gzip -f -d ./out/rootfs.tar.gz + $(TAR2EXT4_TOOL) -i ./out/rootfs.tar -o $@ + +out/rootfs-verity.ext4: out/rootfs.ext4 out/rootfs.hash + cp out/rootfs.ext4 $@ + cat out/rootfs.hash >> $@ + +out/rootfs.tar.gz: out/initrd.img + rm -rf rootfs-conv + mkdir rootfs-conv + gunzip -c out/initrd.img | (cd rootfs-conv && cpio -imd) + tar -zcf $@ -C rootfs-conv . + rm -rf rootfs-conv + +out/initrd.img: $(BASE) $(DELTA_TARGET) $(SRCROOT)/hack/catcpio.sh + $(SRCROOT)/hack/catcpio.sh "$(BASE)" $(DELTA_TARGET) > out/initrd.img.uncompressed + gzip -c out/initrd.img.uncompressed > $@ + rm out/initrd.img.uncompressed diff --git a/vendor/github.com/Microsoft/hcsshim/README.md b/vendor/github.com/Microsoft/hcsshim/README.md index 3204380484..ae66682637 100644 --- a/vendor/github.com/Microsoft/hcsshim/README.md +++ b/vendor/github.com/Microsoft/hcsshim/README.md @@ -44,7 +44,7 @@ delta.tar.gz initrd.img rootfs.tar.gz ### Containerd Shim -For info on the [Runtime V2 API](https://github.com/containerd/containerd/blob/master/runtime/v2/README.md). +For info on the [Runtime V2 API](https://github.com/containerd/containerd/blob/main/core/runtime/v2/README.md). Contrary to the typical Linux architecture of shim -> runc, the runhcs shim is used both to launch and manage the lifetime of containers. diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go index 8ef611d6a0..fef2bf546c 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go @@ -63,10 +63,10 @@ func (process *Process) SystemID() string { } func (process *Process) processSignalResult(ctx context.Context, err error) (bool, error) { - switch err { //nolint:errorlint - case nil: + if err == nil { return true, nil - case ErrVmcomputeOperationInvalidState, ErrComputeSystemDoesNotExist, ErrElementNotFound: + } + if errors.Is(err, ErrVmcomputeOperationInvalidState) || errors.Is(err, ErrComputeSystemDoesNotExist) || errors.Is(err, ErrElementNotFound) { if !process.stopped() { // The process should be gone, but we have not received the notification. // After a second, force unblock the process wait to work around a possible @@ -82,9 +82,8 @@ func (process *Process) processSignalResult(ctx context.Context, err error) (boo }() } return false, nil - default: - return false, err } + return false, nil } // Signal signals the process with `options`. diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/chipset.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/chipset.go index ca75277a3f..93857da69f 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/chipset.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/chipset.go @@ -24,4 +24,6 @@ type Chipset struct { // LinuxKernelDirect - Added in v2.2 Builds >=181117 LinuxKernelDirect *LinuxKernelDirect `json:"LinuxKernelDirect,omitempty"` + + FirmwareFile *FirmwareFile `json:"FirmwareFile,omitempty"` } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cim_mount.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cimfs.go similarity index 70% rename from vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cim_mount.go rename to vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cimfs.go index 81865e7ea4..52fb62a829 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cim_mount.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cimfs.go @@ -9,14 +9,6 @@ package hcsschema -const ( - CimMountFlagNone uint32 = 0x0 - CimMountFlagChildOnly uint32 = 0x1 - CimMountFlagEnableDax uint32 = 0x2 - CimMountFlagCacheFiles uint32 = 0x4 - CimMountFlagCacheRegions uint32 = 0x8 -) - type CimMount struct { ImagePath string `json:"ImagePath,omitempty"` FileSystemName string `json:"FileSystemName,omitempty"` diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/firmware.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/firmware.go new file mode 100644 index 0000000000..c27a132006 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/firmware.go @@ -0,0 +1,8 @@ +package hcsschema + +type FirmwareFile struct { + // Parameters is an experimental/pre-release field. The field itself or its + // behavior can change in future iterations of the schema. Avoid taking a hard + // dependency on this field. + Parameters []byte `json:"Parameters,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_2.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_2.go deleted file mode 100644 index 71224c75b9..0000000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_2.go +++ /dev/null @@ -1,49 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.1 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type Memory2 struct { - SizeInMB uint64 `json:"SizeInMB,omitempty"` - - AllowOvercommit bool `json:"AllowOvercommit,omitempty"` - - EnableHotHint bool `json:"EnableHotHint,omitempty"` - - EnableColdHint bool `json:"EnableColdHint,omitempty"` - - EnableEpf bool `json:"EnableEpf,omitempty"` - - // EnableDeferredCommit is private in the schema. If regenerated need to add back. - EnableDeferredCommit bool `json:"EnableDeferredCommit,omitempty"` - - // EnableColdDiscardHint if enabled, then the memory cold discard hint feature is exposed - // to the VM, allowing it to trim non-zeroed pages from the working set (if supported by - // the guest operating system). - EnableColdDiscardHint bool `json:"EnableColdDiscardHint,omitempty"` - - // LowMmioGapInMB is the low MMIO region allocated below 4GB. - // - // TODO: This is pre-release support in schema 2.3. Need to add build number - // docs when a public build with this is out. - LowMMIOGapInMB uint64 `json:"LowMmioGapInMB,omitempty"` - - // HighMmioBaseInMB is the high MMIO region allocated above 4GB (base and - // size). - // - // TODO: This is pre-release support in schema 2.3. Need to add build number - // docs when a public build with this is out. - HighMMIOBaseInMB uint64 `json:"HighMmioBaseInMB,omitempty"` - - // HighMmioGapInMB is the high MMIO region. - // - // TODO: This is pre-release support in schema 2.3. Need to add build number - // docs when a public build with this is out. - HighMMIOGapInMB uint64 `json:"HighMmioGapInMB,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_backing_type.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_backing_type.go new file mode 100644 index 0000000000..41837416ca --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/memory_backing_type.go @@ -0,0 +1,21 @@ +// Autogenerated code; DO NOT EDIT. + +/* + * Schema Open API + * + * No description provided (generated by Swagger Codegen https://github.com/swaggerapi/swaggercodegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swaggerapi/swaggercodegen.git) + */ + +package hcsschema + +type MemoryBackingType string + +// List of MemoryBackingType +const ( + MemoryBackingType_PHYSICAL MemoryBackingType = "Physical" + MemoryBackingType_VIRTUAL MemoryBackingType = "Virtual" + MemoryBackingType_HYBRID MemoryBackingType = "Hybrid" +) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa.go new file mode 100644 index 0000000000..70a1395198 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa.go @@ -0,0 +1,19 @@ +// Autogenerated code; DO NOT EDIT. + +/* + * Schema Open API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type Numa struct { + VirtualNodeCount uint8 `json:"VirtualNodeCount,omitempty"` + PreferredPhysicalNodes []int64 `json:"PreferredPhysicalNodes,omitempty"` + Settings []NumaSetting `json:"Settings,omitempty"` + MaxSizePerNode uint64 `json:"MaxSizePerNode,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa_node.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa_node.go new file mode 100644 index 0000000000..5984bdecd7 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa_node.go @@ -0,0 +1,17 @@ +// Autogenerated code; DO NOT EDIT. + +/* + * Schema Open API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type NumaNode struct { + VirtualNodeIndex uint32 `json:"VirtualNodeIndex,omitempty"` + PhysicalNodeIndex uint32 `json:"PhysicalNodeIndex,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa_node_memory.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa_node_memory.go new file mode 100644 index 0000000000..88567f0f6d --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa_node_memory.go @@ -0,0 +1,19 @@ +// Autogenerated code; DO NOT EDIT. + +/* + * Schema Open API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type NumaNodeMemory struct { + // Total physical memory on on this physical NUMA node that is consumable by the VMs. + TotalConsumableMemoryInPages uint64 `json:"TotalConsumableMemoryInPages,omitempty"` + // Currently available physical memory on this physical NUMA node for the VMs. + AvailableMemoryInPages uint64 `json:"AvailableMemoryInPages,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa_node_processor.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa_node_processor.go new file mode 100644 index 0000000000..4b6795bb90 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa_node_processor.go @@ -0,0 +1,17 @@ +// Autogenerated code; DO NOT EDIT. + +/* + * Schema Open API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type NumaNodeProcessor struct { + TotalAssignedProcessors uint32 `json:"TotalAssignedProcessors,omitempty"` + TotalAvailableProcessors uint32 `json:"TotalAvailableProcessors,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa_processors.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa_processors.go new file mode 100644 index 0000000000..bc3fba37a5 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa_processors.go @@ -0,0 +1,21 @@ +// Autogenerated code; DO NOT EDIT. + +/* + * Schema Open API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type NumaProcessors struct { + CountPerNode Range `json:"count_per_node,omitempty"` + NodePerSocket uint32 `json:"node_per_socket,omitempty"` +} + +type Range struct { + Max uint32 `json:"max,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa_setting.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa_setting.go new file mode 100644 index 0000000000..3f27b2ca01 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/numa_setting.go @@ -0,0 +1,21 @@ +// Autogenerated code; DO NOT EDIT. + +/* + * Schema Open API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type NumaSetting struct { + VirtualNodeNumber uint32 `json:"VirtualNodeNumber,omitempty"` + PhysicalNodeNumber uint32 `json:"PhysicalNodeNumber,omitempty"` + VirtualSocketNumber uint32 `json:"VirtualSocketNumber,omitempty"` + CountOfProcessors uint32 `json:"CountOfProcessors,omitempty"` + CountOfMemoryBlocks uint64 `json:"CountOfMemoryBlocks,omitempty"` + MemoryBackingType MemoryBackingType `json:"MemoryBackingType,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_2.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_2.go deleted file mode 100644 index c64f335ec7..0000000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/processor_2.go +++ /dev/null @@ -1,23 +0,0 @@ -/* - * HCS API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 2.5 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ - -package hcsschema - -type Processor2 struct { - Count int32 `json:"Count,omitempty"` - - Limit int32 `json:"Limit,omitempty"` - - Weight int32 `json:"Weight,omitempty"` - - ExposeVirtualizationExtensions bool `json:"ExposeVirtualizationExtensions,omitempty"` - - // An optional object that configures the CPU Group to which a Virtual Machine is going to bind to. - CpuGroup *CpuGroup `json:"CpuGroup,omitempty"` -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/properties.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/properties.go index 0c7efe8d40..d4cb95bdde 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/properties.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/properties.go @@ -26,6 +26,8 @@ type Properties struct { RuntimeId string `json:"RuntimeId,omitempty"` + SystemGUID string `json:"SystemGUID,omitempty"` + RuntimeTemplateId string `json:"RuntimeTemplateId,omitempty"` State string `json:"State,omitempty"` diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/property_type.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/property_type.go index 98f2c96edb..934f777fcf 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/property_type.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/property_type.go @@ -23,4 +23,5 @@ const ( PTICHeartbeatStatus PropertyType = "ICHeartbeatStatus" PTProcessorTopology PropertyType = "ProcessorTopology" PTCPUGroup PropertyType = "CpuGroup" + PTSystemGUID PropertyType = "SystemGUID" ) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/topology.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/topology.go index 8348699403..9cca85171e 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/topology.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/topology.go @@ -1,16 +1,18 @@ +// Autogenerated code; DO NOT EDIT. + /* - * HCS API + * Schema Open API * * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) * - * API version: 2.1 + * API version: 2.4 * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) */ package hcsschema type Topology struct { - Memory *Memory2 `json:"Memory,omitempty"` - - Processor *Processor2 `json:"Processor,omitempty"` + Memory *VirtualMachineMemory `json:"Memory,omitempty"` + Processor *VirtualMachineProcessor `json:"Processor,omitempty"` + Numa *Numa `json:"Numa,omitempty"` } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_machine.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_machine.go index 1e0fab2890..3f750466f8 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_machine.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_machine.go @@ -1,36 +1,29 @@ +// Autogenerated code; DO NOT EDIT. + /* - * HCS API + * Schema Open API * * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) * - * API version: 2.1 + * API version: 2.4 * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) */ package hcsschema +// Configuration of a virtual machine, used during its creation to set up and/or use resources. type VirtualMachine struct { - - // StopOnReset is private in the schema. If regenerated need to put back. - StopOnReset bool `json:"StopOnReset,omitempty"` - - Chipset *Chipset `json:"Chipset,omitempty"` - - ComputeTopology *Topology `json:"ComputeTopology,omitempty"` - - Devices *Devices `json:"Devices,omitempty"` - - GuestState *GuestState `json:"GuestState,omitempty"` - - RestoreState *RestoreState `json:"RestoreState,omitempty"` - + Version *Version `json:"Version,omitempty"` + // When set to true, the virtual machine will treat a reset as a stop, releasing resources and cleaning up state. + StopOnReset bool `json:"StopOnReset,omitempty"` + Chipset *Chipset `json:"Chipset,omitempty"` + ComputeTopology *Topology `json:"ComputeTopology,omitempty"` + Devices *Devices `json:"Devices,omitempty"` + GuestState *GuestState `json:"GuestState,omitempty"` + RestoreState *RestoreState `json:"RestoreState,omitempty"` RegistryChanges *RegistryChanges `json:"RegistryChanges,omitempty"` - - StorageQoS *StorageQoS `json:"StorageQoS,omitempty"` - + StorageQoS *StorageQoS `json:"StorageQoS,omitempty"` + DebugOptions *DebugOptions `json:"DebugOptions,omitempty"` GuestConnection *GuestConnection `json:"GuestConnection,omitempty"` - - SecuritySettings *SecuritySettings `json:"SecuritySettings,omitempty"` - - DebugOptions *DebugOptions `json:"DebugOptions,omitempty"` + SecuritySettings *SecuritySettings `json:"SecuritySettings,omitempty"` } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_machine_memory.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_machine_memory.go new file mode 100644 index 0000000000..17573c92a5 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_machine_memory.go @@ -0,0 +1,33 @@ +// Autogenerated code; DO NOT EDIT. + +/* + * Schema Open API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type VirtualMachineMemory struct { + SizeInMB uint64 `json:"SizeInMB,omitempty"` + Backing *MemoryBackingType `json:"Backing,omitempty"` + // If enabled, then the VM's memory is backed by the Windows pagefile rather than physically backed, statically allocated memory. + AllowOvercommit bool `json:"AllowOvercommit,omitempty"` + // If enabled, then the memory hot hint feature is exposed to the VM, allowing it to prefetch pages into its working set. (if supported by the guest operating system). + EnableHotHint bool `json:"EnableHotHint,omitempty"` + // If enabled, then the memory cold hint feature is exposed to the VM, allowing it to trim zeroed pages from its working set (if supported by the guest operating system). + EnableColdHint bool `json:"EnableColdHint,omitempty"` + // If enabled, then the memory cold discard hint feature is exposed to the VM, allowing it to trim non-zeroed pages from the working set (if supported by the guest operating system). + EnableColdDiscardHint bool `json:"EnableColdDiscardHint,omitempty"` + // If enabled, then commit is not charged for each backing page until first access. + EnableDeferredCommit bool `json:"EnableDeferredCommit,omitempty"` + // Low MMIO region allocated below 4GB + LowMMIOGapInMB uint64 `json:"LowMmioGapInMB,omitempty"` + // High MMIO region allocated above 4GB (base and size) + HighMMIOBaseInMB uint64 `json:"HighMmioBaseInMB,omitempty"` + HighMMIOGapInMB uint64 `json:"HighMmioGapInMB,omitempty"` + SlitType *VirtualSlitType `json:"SlitType,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_machine_processor.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_machine_processor.go new file mode 100644 index 0000000000..619cd83400 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_machine_processor.go @@ -0,0 +1,21 @@ +// Autogenerated code; DO NOT EDIT. + +/* + * Schema Open API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type VirtualMachineProcessor struct { + Count uint32 `json:"Count,omitempty"` + Limit uint64 `json:"Limit,omitempty"` + Weight uint64 `json:"Weight,omitempty"` + Reservation uint64 `json:"Reservation,omitempty"` + CpuGroup *CpuGroup `json:"CpuGroup,omitempty"` + NumaProcessorsSettings *NumaProcessors `json:"NumaProcessorsSettings,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_pci_device.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_pci_device.go index f5e05903c5..a4a62da163 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_pci_device.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_pci_device.go @@ -9,8 +9,9 @@ package hcsschema -// TODO: This is pre-release support in schema 2.3. Need to add build number +// TODO: PropagateNumaAffinity is pre-release/experimental field in schema 2.11. Need to add build number // docs when a public build with this is out. type VirtualPciDevice struct { Functions []VirtualPciFunction `json:",omitempty"` + PropagateNumaAffinity *bool `json:"PropagateNumaAffinity,omitempty"` } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_slit_type.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_slit_type.go new file mode 100644 index 0000000000..dfad623134 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_slit_type.go @@ -0,0 +1,23 @@ +// Autogenerated code; DO NOT EDIT. + +/* + * Schema Open API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +// VirtualSlitType : Indicates if a virtual SLIT should ne enabled for a VM and the type of virtual SLIT to be enabled. +type VirtualSlitType string + +// List of VirtualSlitType +const ( + VirtualSlitType_NONE VirtualSlitType = "None" + VirtualSlitType_FIRMWARE VirtualSlitType = "Firmware" + VirtualSlitType_MEASURED VirtualSlitType = "Measured" + VirtualSlitType_FIRMWARE_FALLBACK_MEASURED VirtualSlitType = "FirmwareFallbackMeasured" +) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/windows_crash_reporting.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/windows_crash_reporting.go index 8ed7e566d6..ee85c43b3e 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/windows_crash_reporting.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/windows_crash_reporting.go @@ -13,4 +13,6 @@ type WindowsCrashReporting struct { DumpFileName string `json:"DumpFileName,omitempty"` MaxDumpSize int64 `json:"MaxDumpSize,omitempty"` + + DumpType string `json:"DumpType,omitempty"` } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go index 81d60ed434..b1597466f6 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go @@ -238,9 +238,10 @@ func (computeSystem *System) Shutdown(ctx context.Context) error { resultJSON, err := vmcompute.HcsShutdownComputeSystem(ctx, computeSystem.handle, "") events := processHcsResult(ctx, resultJSON) - switch err { //nolint:errorlint - case nil, ErrVmcomputeAlreadyStopped, ErrComputeSystemDoesNotExist, ErrVmcomputeOperationPending: - default: + if err != nil && + !errors.Is(err, ErrVmcomputeAlreadyStopped) && + !errors.Is(err, ErrComputeSystemDoesNotExist) && + !errors.Is(err, ErrVmcomputeOperationPending) { return makeSystemError(computeSystem, operation, err, events) } return nil @@ -259,9 +260,10 @@ func (computeSystem *System) Terminate(ctx context.Context) error { resultJSON, err := vmcompute.HcsTerminateComputeSystem(ctx, computeSystem.handle, "") events := processHcsResult(ctx, resultJSON) - switch err { //nolint:errorlint - case nil, ErrVmcomputeAlreadyStopped, ErrComputeSystemDoesNotExist, ErrVmcomputeOperationPending: - default: + if err != nil && + !errors.Is(err, ErrVmcomputeAlreadyStopped) && + !errors.Is(err, ErrComputeSystemDoesNotExist) && + !errors.Is(err, ErrVmcomputeOperationPending) { return makeSystemError(computeSystem, operation, err, events) } return nil @@ -279,14 +281,13 @@ func (computeSystem *System) waitBackground() { span.AddAttributes(trace.StringAttribute("cid", computeSystem.id)) err := waitForNotification(ctx, computeSystem.callbackNumber, hcsNotificationSystemExited, nil) - switch err { //nolint:errorlint - case nil: + if err == nil { log.G(ctx).Debug("system exited") - case ErrVmcomputeUnexpectedExit: + } else if errors.Is(err, ErrVmcomputeUnexpectedExit) { log.G(ctx).Debug("unexpected system exit") computeSystem.exitError = makeSystemError(computeSystem, operation, err, nil) err = nil - default: + } else { err = makeSystemError(computeSystem, operation, err, nil) } computeSystem.closedWaitOnce.Do(func() { diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsaccelnet.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsaccelnet.go index 82ca5baefd..4b1e51cb73 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsaccelnet.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsaccelnet.go @@ -47,7 +47,7 @@ func (nnvManagementMacList *HNSNnvManagementMacList) Set() (*HNSNnvManagementMac func GetNnvManagementMacAddressList() (*HNSNnvManagementMacList, error) { operation := "Get" title := "hcsshim::nnvManagementMacList::" + operation - logrus.Debugf(title) + logrus.Debug(title) return HNSNnvManagementMacRequest("GET", "", "") } @@ -55,6 +55,6 @@ func GetNnvManagementMacAddressList() (*HNSNnvManagementMacList, error) { func DeleteNnvManagementMacAddressList() (*HNSNnvManagementMacList, error) { operation := "Delete" title := "hcsshim::nnvManagementMacList::" + operation - logrus.Debugf(title) + logrus.Debug(title) return HNSNnvManagementMacRequest("DELETE", "", "") } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go index b505731c36..3afa240aa6 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go @@ -22,9 +22,8 @@ import ( // of the job and a mutex for synchronized handle access. type JobObject struct { handle windows.Handle - // All accesses to this MUST be done atomically except in `Open` as the object - // is being created in the function. 1 signifies that this job is currently a silo. - silo uint32 + // silo signifies that this job is currently a silo. + silo atomic.Bool mq *queue.MessageQueue handleLock sync.RWMutex } @@ -204,9 +203,7 @@ func Open(ctx context.Context, options *Options) (_ *JobObject, err error) { handle: jobHandle, } - if isJobSilo(jobHandle) { - job.silo = 1 - } + job.silo.Store(isJobSilo(jobHandle)) // If the IOCP we'll be using to receive messages for all jobs hasn't been // created, create it and start polling. @@ -479,7 +476,7 @@ func (job *JobObject) ApplyFileBinding(root, target string, readOnly bool) error return ErrAlreadyClosed } - if !job.isSilo() { + if !job.silo.Load() { return ErrNotSilo } @@ -546,7 +543,7 @@ func (job *JobObject) PromoteToSilo() error { return ErrAlreadyClosed } - if job.isSilo() { + if job.silo.Load() { return nil } @@ -569,15 +566,10 @@ func (job *JobObject) PromoteToSilo() error { return fmt.Errorf("failed to promote job to silo: %w", err) } - atomic.StoreUint32(&job.silo, 1) + job.silo.Store(true) return nil } -// isSilo returns if the job object is a silo. -func (job *JobObject) isSilo() bool { - return atomic.LoadUint32(&job.silo) == 1 -} - // QueryPrivateWorkingSet returns the private working set size for the job. This is calculated by adding up the // private working set for every process running in the job. func (job *JobObject) QueryPrivateWorkingSet() (uint64, error) { diff --git a/vendor/github.com/Microsoft/hcsshim/internal/jobobject/limits.go b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/limits.go index e3b1a1edc9..fedf8add6c 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/jobobject/limits.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/limits.go @@ -150,6 +150,7 @@ func (job *JobObject) SetCPUAffinity(affinityBitMask uint64) error { return fmt.Errorf("affinity bitmask (%d) exceeds max allowable value (%d)", affinityBitMask, maxUintptr) } + // CodeQL [SM03681] checked against max value above (there is no math.MaxUintPtr ...) info.BasicLimitInformation.Affinity = uintptr(affinityBitMask) return job.setExtendedInformation(info) } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/log/context.go b/vendor/github.com/Microsoft/hcsshim/internal/log/context.go index d17d909d93..4399cec6f8 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/log/context.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/log/context.go @@ -4,7 +4,6 @@ import ( "context" "github.com/sirupsen/logrus" - "go.opencensus.io/trace" ) type entryContextKeyType int @@ -20,13 +19,13 @@ var ( // Instead, use `L.With*` or `L.Dup()`. Or `G(context.Background())`. L = logrus.NewEntry(logrus.StandardLogger()) - // G is an alias for GetEntry + // G is an alias for GetEntry. G = GetEntry - // S is an alias for SetEntry + // S is an alias for SetEntry. S = SetEntry - // U is an alias for UpdateContext + // U is an alias for UpdateContext. U = UpdateContext ) @@ -83,7 +82,7 @@ func UpdateContext(ctx context.Context) context.Context { // WithContext returns a context that contains the provided log entry. // The entry can be extracted with `GetEntry` (`G`) // -// The entry in the context is a copy of `entry` (generated by `entry.WithContext`) +// The entry in the context is a copy of `entry` (generated by `entry.WithContext`). func WithContext(ctx context.Context, entry *logrus.Entry) (context.Context, *logrus.Entry) { // regardless of the order, entry.Context != GetEntry(ctx) // here, the returned entry will reference the supplied context @@ -93,25 +92,6 @@ func WithContext(ctx context.Context, entry *logrus.Entry) (context.Context, *lo return ctx, entry } -// Copy extracts the tracing Span and logging entry from the src Context, if they -// exist, and adds them to the dst Context. -// -// This is useful to share tracing and logging between contexts, but not the -// cancellation. For example, if the src Context has been cancelled but cleanup -// operations triggered by the cancellation require a non-cancelled context to -// execute. -func Copy(dst context.Context, src context.Context) context.Context { - if s := trace.FromContext(src); s != nil { - dst = trace.NewContext(dst, s) - } - - if e := fromContext(src); e != nil { - dst, _ = WithContext(dst, e) - } - - return dst -} - func fromContext(ctx context.Context) *logrus.Entry { e, _ := ctx.Value(_entryContextKey).(*logrus.Entry) return e diff --git a/vendor/github.com/Microsoft/hcsshim/internal/log/format.go b/vendor/github.com/Microsoft/hcsshim/internal/log/format.go index 1ceb26bada..f26316fabf 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/log/format.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/log/format.go @@ -103,9 +103,7 @@ func encode(v interface{}) (_ []byte, err error) { if jErr := enc.Encode(v); jErr != nil { if err != nil { - // TODO (go1.20): use multierror via fmt.Errorf("...: %w; ...: %w", ...) - //nolint:errorlint // non-wrapping format verb for fmt.Errorf - return nil, fmt.Errorf("protojson encoding: %v; json encoding: %w", err, jErr) + return nil, fmt.Errorf("protojson encoding: %w; json encoding: %w", err, jErr) } return nil, fmt.Errorf("json encoding: %w", jErr) } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/log/scrub.go b/vendor/github.com/Microsoft/hcsshim/internal/log/scrub.go index 5a960e0d35..5346f9b7cf 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/log/scrub.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/log/scrub.go @@ -22,23 +22,14 @@ var ( // case sensitive keywords, so "env" is not a substring on "Environment" _scrubKeywords = [][]byte{[]byte("env"), []byte("Environment")} - _scrub int32 + _scrub atomic.Bool ) // SetScrubbing enables scrubbing -func SetScrubbing(enable bool) { - v := int32(0) // cant convert from bool to int32 directly - if enable { - v = 1 - } - atomic.StoreInt32(&_scrub, v) -} +func SetScrubbing(enable bool) { _scrub.Store(enable) } // IsScrubbingEnabled checks if scrubbing is enabled -func IsScrubbingEnabled() bool { - v := atomic.LoadInt32(&_scrub) - return v != 0 -} +func IsScrubbingEnabled() bool { return _scrub.Load() } // ScrubProcessParameters scrubs HCS Create Process requests with config parameters of // type internal/hcs/schema2.ScrubProcessParameters (aka hcsshema.ScrubProcessParameters) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/vmcompute.go b/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/vmcompute.go index 67ca897cfc..965086a580 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/vmcompute.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/vmcompute.go @@ -104,7 +104,7 @@ func execute(ctx gcontext.Context, timeout time.Duration, f func() error) error }() select { case <-ctx.Done(): - if ctx.Err() == gcontext.DeadlineExceeded { //nolint:errorlint + if ctx.Err() == gcontext.DeadlineExceeded { log.G(ctx).WithField(logfields.Timeout, trueTimeout). Warning("Syscall did not complete within operation timeout. This may indicate a platform issue. " + "If it appears to be making no forward progress, obtain the stacks and see if there is a syscall " + diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go index fc12eeba4d..627060cee4 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go @@ -34,6 +34,7 @@ const ( UtilityVMPath = `UtilityVM` UtilityVMFilesPath = `UtilityVM\Files` RegFilesPath = `Files\Windows\System32\config` + BootDirRelativePath = `\EFI\Microsoft\Boot` BcdFilePath = `UtilityVM\Files\EFI\Microsoft\Boot\BCD` BootMgrFilePath = `UtilityVM\Files\EFI\Microsoft\Boot\bootmgfw.efi` ContainerBaseVhd = `blank-base.vhdx` diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/cimfs.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/cimfs.go index 21664577b7..6c026d9822 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/cimfs.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/cimfs.go @@ -32,10 +32,16 @@ type CimFsFileMetadata struct { EACount uint32 } +type CimFsImagePath struct { + ImageDir *uint16 + ImageName *uint16 +} + //sys CimMountImage(imagePath string, fsName string, flags uint32, volumeID *g) (hr error) = cimfs.CimMountImage? //sys CimDismountImage(volumeID *g) (hr error) = cimfs.CimDismountImage? //sys CimCreateImage(imagePath string, oldFSName *uint16, newFSName *uint16, cimFSHandle *FsHandle) (hr error) = cimfs.CimCreateImage? +//sys CimCreateImage2(imagePath string, flags uint32, oldFSName *uint16, newFSName *uint16, cimFSHandle *FsHandle) (hr error) = cimfs.CimCreateImage2? //sys CimCloseImage(cimFSHandle FsHandle) = cimfs.CimCloseImage? //sys CimCommitImage(cimFSHandle FsHandle) (hr error) = cimfs.CimCommitImage? @@ -45,3 +51,8 @@ type CimFsFileMetadata struct { //sys CimDeletePath(cimFSHandle FsHandle, path string) (hr error) = cimfs.CimDeletePath? //sys CimCreateHardLink(cimFSHandle FsHandle, newPath string, oldPath string) (hr error) = cimfs.CimCreateHardLink? //sys CimCreateAlternateStream(cimFSHandle FsHandle, path string, size uint64, cimStreamHandle *StreamHandle) (hr error) = cimfs.CimCreateAlternateStream? +//sys CimAddFsToMergedImage(cimFSHandle FsHandle, path string) (hr error) = cimfs.CimAddFsToMergedImage? +//sys CimAddFsToMergedImage2(cimFSHandle FsHandle, path string, flags uint32) (hr error) = cimfs.CimAddFsToMergedImage2? +//sys CimMergeMountImage(numCimPaths uint32, backingImagePaths *CimFsImagePath, flags uint32, volumeID *g) (hr error) = cimfs.CimMergeMountImage? +//sys CimTombstoneFile(cimFSHandle FsHandle, path string) (hr error) = cimfs.CimTombstoneFile? +//sys CimCreateMergeLink(cimFSHandle FsHandle, newPath string, oldPath string) (hr error) = cimfs.CimCreateMergeLink? diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go index ecdded312e..2abdc2e072 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go @@ -53,6 +53,8 @@ var ( procCM_Get_Device_ID_ListA = modcfgmgr32.NewProc("CM_Get_Device_ID_ListA") procCM_Get_Device_ID_List_SizeA = modcfgmgr32.NewProc("CM_Get_Device_ID_List_SizeA") procCM_Locate_DevNodeW = modcfgmgr32.NewProc("CM_Locate_DevNodeW") + procCimAddFsToMergedImage = modcimfs.NewProc("CimAddFsToMergedImage") + procCimAddFsToMergedImage2 = modcimfs.NewProc("CimAddFsToMergedImage2") procCimCloseImage = modcimfs.NewProc("CimCloseImage") procCimCloseStream = modcimfs.NewProc("CimCloseStream") procCimCommitImage = modcimfs.NewProc("CimCommitImage") @@ -60,9 +62,13 @@ var ( procCimCreateFile = modcimfs.NewProc("CimCreateFile") procCimCreateHardLink = modcimfs.NewProc("CimCreateHardLink") procCimCreateImage = modcimfs.NewProc("CimCreateImage") + procCimCreateImage2 = modcimfs.NewProc("CimCreateImage2") + procCimCreateMergeLink = modcimfs.NewProc("CimCreateMergeLink") procCimDeletePath = modcimfs.NewProc("CimDeletePath") procCimDismountImage = modcimfs.NewProc("CimDismountImage") + procCimMergeMountImage = modcimfs.NewProc("CimMergeMountImage") procCimMountImage = modcimfs.NewProc("CimMountImage") + procCimTombstoneFile = modcimfs.NewProc("CimTombstoneFile") procCimWriteStream = modcimfs.NewProc("CimWriteStream") procSetJobCompartmentId = modiphlpapi.NewProc("SetJobCompartmentId") procClosePseudoConsole = modkernel32.NewProc("ClosePseudoConsole") @@ -181,6 +187,54 @@ func _CMLocateDevNode(pdnDevInst *uint32, pDeviceID *uint16, uFlags uint32) (hr return } +func CimAddFsToMergedImage(cimFSHandle FsHandle, path string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(path) + if hr != nil { + return + } + return _CimAddFsToMergedImage(cimFSHandle, _p0) +} + +func _CimAddFsToMergedImage(cimFSHandle FsHandle, path *uint16) (hr error) { + hr = procCimAddFsToMergedImage.Find() + if hr != nil { + return + } + r0, _, _ := syscall.SyscallN(procCimAddFsToMergedImage.Addr(), uintptr(cimFSHandle), uintptr(unsafe.Pointer(path))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func CimAddFsToMergedImage2(cimFSHandle FsHandle, path string, flags uint32) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(path) + if hr != nil { + return + } + return _CimAddFsToMergedImage2(cimFSHandle, _p0, flags) +} + +func _CimAddFsToMergedImage2(cimFSHandle FsHandle, path *uint16, flags uint32) (hr error) { + hr = procCimAddFsToMergedImage2.Find() + if hr != nil { + return + } + r0, _, _ := syscall.SyscallN(procCimAddFsToMergedImage2.Addr(), uintptr(cimFSHandle), uintptr(unsafe.Pointer(path)), uintptr(flags)) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + func CimCloseImage(cimFSHandle FsHandle) (err error) { err = procCimCloseImage.Find() if err != nil { @@ -321,6 +375,59 @@ func _CimCreateImage(imagePath *uint16, oldFSName *uint16, newFSName *uint16, ci return } +func CimCreateImage2(imagePath string, flags uint32, oldFSName *uint16, newFSName *uint16, cimFSHandle *FsHandle) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(imagePath) + if hr != nil { + return + } + return _CimCreateImage2(_p0, flags, oldFSName, newFSName, cimFSHandle) +} + +func _CimCreateImage2(imagePath *uint16, flags uint32, oldFSName *uint16, newFSName *uint16, cimFSHandle *FsHandle) (hr error) { + hr = procCimCreateImage2.Find() + if hr != nil { + return + } + r0, _, _ := syscall.SyscallN(procCimCreateImage2.Addr(), uintptr(unsafe.Pointer(imagePath)), uintptr(flags), uintptr(unsafe.Pointer(oldFSName)), uintptr(unsafe.Pointer(newFSName)), uintptr(unsafe.Pointer(cimFSHandle))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func CimCreateMergeLink(cimFSHandle FsHandle, newPath string, oldPath string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(newPath) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(oldPath) + if hr != nil { + return + } + return _CimCreateMergeLink(cimFSHandle, _p0, _p1) +} + +func _CimCreateMergeLink(cimFSHandle FsHandle, newPath *uint16, oldPath *uint16) (hr error) { + hr = procCimCreateMergeLink.Find() + if hr != nil { + return + } + r0, _, _ := syscall.SyscallN(procCimCreateMergeLink.Addr(), uintptr(cimFSHandle), uintptr(unsafe.Pointer(newPath)), uintptr(unsafe.Pointer(oldPath))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + func CimDeletePath(cimFSHandle FsHandle, path string) (hr error) { var _p0 *uint16 _p0, hr = syscall.UTF16PtrFromString(path) @@ -360,6 +467,21 @@ func CimDismountImage(volumeID *g) (hr error) { return } +func CimMergeMountImage(numCimPaths uint32, backingImagePaths *CimFsImagePath, flags uint32, volumeID *g) (hr error) { + hr = procCimMergeMountImage.Find() + if hr != nil { + return + } + r0, _, _ := syscall.SyscallN(procCimMergeMountImage.Addr(), uintptr(numCimPaths), uintptr(unsafe.Pointer(backingImagePaths)), uintptr(flags), uintptr(unsafe.Pointer(volumeID))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + func CimMountImage(imagePath string, fsName string, flags uint32, volumeID *g) (hr error) { var _p0 *uint16 _p0, hr = syscall.UTF16PtrFromString(imagePath) @@ -389,6 +511,30 @@ func _CimMountImage(imagePath *uint16, fsName *uint16, flags uint32, volumeID *g return } +func CimTombstoneFile(cimFSHandle FsHandle, path string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(path) + if hr != nil { + return + } + return _CimTombstoneFile(cimFSHandle, _p0) +} + +func _CimTombstoneFile(cimFSHandle FsHandle, path *uint16) (hr error) { + hr = procCimTombstoneFile.Find() + if hr != nil { + return + } + r0, _, _ := syscall.SyscallN(procCimTombstoneFile.Addr(), uintptr(cimFSHandle), uintptr(unsafe.Pointer(path))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + func CimWriteStream(cimStreamHandle StreamHandle, buffer uintptr, bufferSize uint32) (hr error) { hr = procCimWriteStream.Find() if hr != nil { diff --git a/vendor/github.com/Microsoft/hcsshim/osversion/platform_compat_windows.go b/vendor/github.com/Microsoft/hcsshim/osversion/platform_compat_windows.go index f8d411ad7e..a7860895c7 100644 --- a/vendor/github.com/Microsoft/hcsshim/osversion/platform_compat_windows.go +++ b/vendor/github.com/Microsoft/hcsshim/osversion/platform_compat_windows.go @@ -3,7 +3,8 @@ package osversion // List of stable ABI compliant ltsc releases // Note: List must be sorted in ascending order var compatLTSCReleases = []uint16{ - V21H2Server, + LTSC2022, + LTSC2025, } // CheckHostAndContainerCompat checks if given host and container @@ -20,16 +21,25 @@ func CheckHostAndContainerCompat(host, ctr OSVersion) bool { } // If host is < WS 2022, exact version match is required - if host.Build < V21H2Server { + if host.Build < LTSC2022 { return host.Build == ctr.Build } - var supportedLtscRelease uint16 + // Find the latest LTSC version that is earlier than the host version. + // This is the earliest version of container that the host can run. + // + // If the host version is an LTSC, then it supports compatibility with + // everything from the previous LTSC up to itself, so we want supportedLTSCRelease + // to be the previous entry. + // + // If no match is found, then we know that the host is LTSC2022 exactly, + // since we already checked that it's not less than LTSC2022. + var supportedLTSCRelease uint16 = LTSC2022 for i := len(compatLTSCReleases) - 1; i >= 0; i-- { - if host.Build >= compatLTSCReleases[i] { - supportedLtscRelease = compatLTSCReleases[i] + if host.Build > compatLTSCReleases[i] { + supportedLTSCRelease = compatLTSCReleases[i] break } } - return ctr.Build >= supportedLtscRelease && ctr.Build <= host.Build + return supportedLTSCRelease <= ctr.Build && ctr.Build <= host.Build } diff --git a/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go b/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go index 446369591a..5392a4cea1 100644 --- a/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go +++ b/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go @@ -81,4 +81,11 @@ const ( // V22H2Win11 corresponds to Windows 11 (2022 Update). V22H2Win11 = 22621 + + // V23H2 is the 23H2 release in the Windows Server annual channel. + V23H2 = 25398 + + // Windows Server 2025 build 26100 + V25H1Server = 26100 + LTSC2025 = V25H1Server ) diff --git a/vendor/github.com/Microsoft/hcsshim/pkg/ociwclayer/import.go b/vendor/github.com/Microsoft/hcsshim/pkg/ociwclayer/import.go index 4ebfbbc2f7..17247f0c56 100644 --- a/vendor/github.com/Microsoft/hcsshim/pkg/ociwclayer/import.go +++ b/vendor/github.com/Microsoft/hcsshim/pkg/ociwclayer/import.go @@ -61,8 +61,7 @@ func ImportLayerFromTar(ctx context.Context, r io.Reader, path string, parentLay func writeLayerFromTar(ctx context.Context, r io.Reader, w wclayer.LayerWriter, root string) (int64, error) { t := tar.NewReader(r) - // CodeQL [SM03409] False positive, `internal/safefile` package ensures tar extractions are always - // bound to the layer root directory. + // CodeQL [SM03409] `internal\wclayer` uses `internal/safefile` to bind tar extraction to the layer's root directory hdr, err := t.Next() totalSize := int64(0) buf := bufio.NewWriter(nil) @@ -80,16 +79,14 @@ func writeLayerFromTar(ctx context.Context, r io.Reader, w wclayer.LayerWriter, if err != nil { return 0, err } - // CodeQL [SM03409] False positive, `internal/safefile` package ensures tar extractions are always - // bound to the layer root directory. + // CodeQL [SM03409] `internal\wclayer` uses `internal/safefile` to bind tar extraction to the layer's root directory hdr, err = t.Next() } else if hdr.Typeflag == tar.TypeLink { err = w.AddLink(filepath.FromSlash(hdr.Name), filepath.FromSlash(hdr.Linkname)) if err != nil { return 0, err } - // CodeQL [SM03409] False positive, `internal/safefile` package ensures tar extractions are always - // bound to the layer root directory. + // CodeQL [SM03409] `internal\wclayer` uses `internal/safefile` to bind tar extraction to the layer's root directory hdr, err = t.Next() } else { var ( diff --git a/vendor/github.com/antlr4-go/antlr/v4/antlrdoc.go b/vendor/github.com/antlr4-go/antlr/v4/antlrdoc.go index 3bb4fd7c4e..48bd362bf5 100644 --- a/vendor/github.com/antlr4-go/antlr/v4/antlrdoc.go +++ b/vendor/github.com/antlr4-go/antlr/v4/antlrdoc.go @@ -17,9 +17,9 @@ ANTLR4 that it is compatible with (I.E. uses the /v4 path). However, this was found to be problematic, as it meant that with the runtime embedded so far underneath the root of the repo, the `go get` and related commands could not properly resolve the location of the go runtime source code. This meant that the reference to the runtime in your `go.mod` file would refer to the correct source code, but would not -list the release tag such as @4.12.0 - this was confusing, to say the least. +list the release tag such as @4.13.1 - this was confusing, to say the least. -As of 4.12.1, the runtime is now available as a go module in its own repo, and can be imported as `github.com/antlr4-go/antlr` +As of 4.13.0, the runtime is now available as a go module in its own repo, and can be imported as `github.com/antlr4-go/antlr` (the go get command should also be used with this path). See the main documentation for the ANTLR4 project for more information, which is available at [ANTLR docs]. The documentation for using the Go runtime is available at [Go runtime docs]. @@ -49,7 +49,7 @@ Here is a general/recommended template for an ANTLR based recognizer in Go: . ├── parser │ ├── mygrammar.g4 - │ ├── antlr-4.12.1-complete.jar + │ ├── antlr-4.13.1-complete.jar │ ├── generate.go │ └── generate.sh ├── parsing - generated code goes here @@ -71,7 +71,7 @@ And the generate.sh file will look similar to this: #!/bin/sh - alias antlr4='java -Xmx500M -cp "./antlr4-4.12.1-complete.jar:$CLASSPATH" org.antlr.v4.Tool' + alias antlr4='java -Xmx500M -cp "./antlr4-4.13.1-complete.jar:$CLASSPATH" org.antlr.v4.Tool' antlr4 -Dlanguage=Go -no-visitor -package parsing *.g4 depending on whether you want visitors or listeners or any other ANTLR options. Not that another option here diff --git a/vendor/github.com/antlr4-go/antlr/v4/atn.go b/vendor/github.com/antlr4-go/antlr/v4/atn.go index cdeefed247..e749ebd0cf 100644 --- a/vendor/github.com/antlr4-go/antlr/v4/atn.go +++ b/vendor/github.com/antlr4-go/antlr/v4/atn.go @@ -4,8 +4,6 @@ package antlr -import "sync" - // ATNInvalidAltNumber is used to represent an ALT number that has yet to be calculated or // which is invalid for a particular struct such as [*antlr.BaseRuleContext] var ATNInvalidAltNumber int @@ -56,9 +54,9 @@ type ATN struct { // states []ATNState - mu sync.Mutex - stateMu sync.RWMutex - edgeMu sync.RWMutex + mu Mutex + stateMu RWMutex + edgeMu RWMutex } // NewATN returns a new ATN struct representing the given grammarType and is used diff --git a/vendor/github.com/antlr4-go/antlr/v4/atn_config.go b/vendor/github.com/antlr4-go/antlr/v4/atn_config.go index a83f25d349..267308bb3d 100644 --- a/vendor/github.com/antlr4-go/antlr/v4/atn_config.go +++ b/vendor/github.com/antlr4-go/antlr/v4/atn_config.go @@ -73,9 +73,6 @@ func NewATNConfig1(c *ATNConfig, state ATNState, context *PredictionContext) *AT // NewATNConfig creates a new ATNConfig instance given an existing config, a state, a context and a semantic context, other 'constructors' // are just wrappers around this one. func NewATNConfig(c *ATNConfig, state ATNState, context *PredictionContext, semanticContext SemanticContext) *ATNConfig { - if semanticContext == nil { - panic("semanticContext cannot be nil") // TODO: Remove this - probably put here for some bug that is now fixed - } b := &ATNConfig{} b.InitATNConfig(c, state, c.GetAlt(), context, semanticContext) b.cType = parserConfig diff --git a/vendor/github.com/antlr4-go/antlr/v4/input_stream.go b/vendor/github.com/antlr4-go/antlr/v4/input_stream.go index b737fe85fb..ab4e96be52 100644 --- a/vendor/github.com/antlr4-go/antlr/v4/input_stream.go +++ b/vendor/github.com/antlr4-go/antlr/v4/input_stream.go @@ -148,7 +148,7 @@ func (is *InputStream) GetTextFromInterval(i Interval) string { } func (*InputStream) GetSourceName() string { - return "" + return "Obtained from string" } // String returns the entire input stream as a string diff --git a/vendor/github.com/antlr4-go/antlr/v4/jcollect.go b/vendor/github.com/antlr4-go/antlr/v4/jcollect.go index ceccd96d25..6d668f7983 100644 --- a/vendor/github.com/antlr4-go/antlr/v4/jcollect.go +++ b/vendor/github.com/antlr4-go/antlr/v4/jcollect.go @@ -8,7 +8,6 @@ import ( "container/list" "runtime/debug" "sort" - "sync" ) // Collectable is an interface that a struct should implement if it is to be @@ -587,12 +586,12 @@ type VisitRecord struct { type VisitList struct { cache *list.List - lock sync.RWMutex + lock RWMutex } var visitListPool = VisitList{ cache: list.New(), - lock: sync.RWMutex{}, + lock: RWMutex{}, } // NewVisitRecord returns a new VisitRecord instance from the pool if available. diff --git a/vendor/github.com/antlr4-go/antlr/v4/lexer.go b/vendor/github.com/antlr4-go/antlr/v4/lexer.go index 3c7896a918..e5594b2168 100644 --- a/vendor/github.com/antlr4-go/antlr/v4/lexer.go +++ b/vendor/github.com/antlr4-go/antlr/v4/lexer.go @@ -207,7 +207,7 @@ func (b *BaseLexer) NextToken() Token { for { b.thetype = TokenInvalidType - ttype := b.safeMatch() + ttype := b.safeMatch() // Defaults to LexerSkip if b.input.LA(1) == TokenEOF { b.hitEOF = true diff --git a/vendor/github.com/antlr4-go/antlr/v4/ll1_analyzer.go b/vendor/github.com/antlr4-go/antlr/v4/ll1_analyzer.go index 4955ac876f..dfdff000bc 100644 --- a/vendor/github.com/antlr4-go/antlr/v4/ll1_analyzer.go +++ b/vendor/github.com/antlr4-go/antlr/v4/ll1_analyzer.go @@ -40,6 +40,7 @@ func (la *LL1Analyzer) getDecisionLookahead(s ATNState) []*IntervalSet { for alt := 0; alt < count; alt++ { look[alt] = NewIntervalSet() + // TODO: This is one of the reasons that ATNConfigs are allocated and freed all the time - fix this tomorrow jim! lookBusy := NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfEqInst, ClosureBusyCollection, "LL1Analyzer.getDecisionLookahead for lookBusy") la.look1(s.GetTransitions()[alt].getTarget(), nil, BasePredictionContextEMPTY, look[alt], lookBusy, NewBitSet(), false, false) diff --git a/vendor/github.com/antlr4-go/antlr/v4/mutex.go b/vendor/github.com/antlr4-go/antlr/v4/mutex.go new file mode 100644 index 0000000000..2b0cda4745 --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/mutex.go @@ -0,0 +1,41 @@ +//go:build !antlr.nomutex +// +build !antlr.nomutex + +package antlr + +import "sync" + +// Mutex is a simple mutex implementation which just delegates to sync.Mutex, it +// is used to provide a mutex implementation for the antlr package, which users +// can turn off with the build tag -tags antlr.nomutex +type Mutex struct { + mu sync.Mutex +} + +func (m *Mutex) Lock() { + m.mu.Lock() +} + +func (m *Mutex) Unlock() { + m.mu.Unlock() +} + +type RWMutex struct { + mu sync.RWMutex +} + +func (m *RWMutex) Lock() { + m.mu.Lock() +} + +func (m *RWMutex) Unlock() { + m.mu.Unlock() +} + +func (m *RWMutex) RLock() { + m.mu.RLock() +} + +func (m *RWMutex) RUnlock() { + m.mu.RUnlock() +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/mutex_nomutex.go b/vendor/github.com/antlr4-go/antlr/v4/mutex_nomutex.go new file mode 100644 index 0000000000..35ce4353ee --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/mutex_nomutex.go @@ -0,0 +1,32 @@ +//go:build antlr.nomutex +// +build antlr.nomutex + +package antlr + +type Mutex struct{} + +func (m *Mutex) Lock() { + // No-op +} + +func (m *Mutex) Unlock() { + // No-op +} + +type RWMutex struct{} + +func (m *RWMutex) Lock() { + // No-op +} + +func (m *RWMutex) Unlock() { + // No-op +} + +func (m *RWMutex) RLock() { + // No-op +} + +func (m *RWMutex) RUnlock() { + // No-op +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/parser_atn_simulator.go b/vendor/github.com/antlr4-go/antlr/v4/parser_atn_simulator.go index ae2869692a..724fa17a19 100644 --- a/vendor/github.com/antlr4-go/antlr/v4/parser_atn_simulator.go +++ b/vendor/github.com/antlr4-go/antlr/v4/parser_atn_simulator.go @@ -10,8 +10,6 @@ import ( "strings" ) -var () - // ClosureBusy is a store of ATNConfigs and is a tiny abstraction layer over // a standard JStore so that we can use Lazy instantiation of the JStore, mostly // to avoid polluting the stats module with a ton of JStore instances with nothing in them. @@ -883,7 +881,7 @@ func (p *ParserATNSimulator) getPredicatePredictions(ambigAlts *BitSet, altToPre // the ERROR state was reached, outerContext as the initial parser context from the paper // or the parser stack at the instant before prediction commences. // -// Teh func returns the value to return from [AdaptivePredict], or +// The func returns the value to return from [AdaptivePredict], or // [ATNInvalidAltNumber] if a suitable alternative was not // identified and [AdaptivePredict] should report an error instead. func (p *ParserATNSimulator) getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(configs *ATNConfigSet, outerContext ParserRuleContext) int { diff --git a/vendor/github.com/antlr4-go/antlr/v4/prediction_context.go b/vendor/github.com/antlr4-go/antlr/v4/prediction_context.go index c1b80cc1f0..a1d5186b8f 100644 --- a/vendor/github.com/antlr4-go/antlr/v4/prediction_context.go +++ b/vendor/github.com/antlr4-go/antlr/v4/prediction_context.go @@ -6,7 +6,6 @@ package antlr import ( "fmt" - "golang.org/x/exp/slices" "strconv" ) @@ -101,7 +100,7 @@ func NewArrayPredictionContext(parents []*PredictionContext, returnStates []int) hash = murmurUpdate(hash, returnState) } hash = murmurFinish(hash, len(parents)<<1) - + nec := &PredictionContext{} nec.cachedHash = hash nec.pcType = PredictionContextArray @@ -115,6 +114,9 @@ func (p *PredictionContext) Hash() int { } func (p *PredictionContext) Equals(other Collectable[*PredictionContext]) bool { + if p == other { + return true + } switch p.pcType { case PredictionContextEmpty: otherP := other.(*PredictionContext) @@ -138,13 +140,11 @@ func (p *PredictionContext) ArrayEquals(o Collectable[*PredictionContext]) bool if p.cachedHash != other.Hash() { return false // can't be same if hash is different } - + // Must compare the actual array elements and not just the array address // - return slices.Equal(p.returnStates, other.returnStates) && - slices.EqualFunc(p.parents, other.parents, func(x, y *PredictionContext) bool { - return x.Equals(y) - }) + return intSlicesEqual(p.returnStates, other.returnStates) && + pcSliceEqual(p.parents, other.parents) } func (p *PredictionContext) SingletonEquals(other Collectable[*PredictionContext]) bool { @@ -152,23 +152,23 @@ func (p *PredictionContext) SingletonEquals(other Collectable[*PredictionContext return false } otherP := other.(*PredictionContext) - if otherP == nil { + if otherP == nil || otherP.pcType != PredictionContextSingleton { return false } - + if p.cachedHash != otherP.Hash() { return false // Can't be same if hash is different } - + if p.returnState != otherP.getReturnState(0) { return false } - + // Both parents must be nil if one is if p.parentCtx == nil { return otherP.parentCtx == nil } - + return p.parentCtx.Equals(otherP.parentCtx) } @@ -225,27 +225,27 @@ func (p *PredictionContext) String() string { return "$" case PredictionContextSingleton: var up string - + if p.parentCtx == nil { up = "" } else { up = p.parentCtx.String() } - + if len(up) == 0 { if p.returnState == BasePredictionContextEmptyReturnState { return "$" } - + return strconv.Itoa(p.returnState) } - + return strconv.Itoa(p.returnState) + " " + up case PredictionContextArray: if p.isEmpty() { return "[]" } - + s := "[" for i := 0; i < len(p.returnStates); i++ { if i > 0 { @@ -263,7 +263,7 @@ func (p *PredictionContext) String() string { } } return s + "]" - + default: return "unknown" } @@ -309,18 +309,18 @@ func predictionContextFromRuleContext(a *ATN, outerContext RuleContext) *Predict parent := predictionContextFromRuleContext(a, outerContext.GetParent().(RuleContext)) state := a.states[outerContext.GetInvokingState()] transition := state.GetTransitions()[0] - + return SingletonBasePredictionContextCreate(parent, transition.(*RuleTransition).followState.GetStateNumber()) } func merge(a, b *PredictionContext, rootIsWildcard bool, mergeCache *JPCMap) *PredictionContext { - + // Share same graph if both same // if a == b || a.Equals(b) { return a } - + if a.pcType == PredictionContextSingleton && b.pcType == PredictionContextSingleton { return mergeSingletons(a, b, rootIsWildcard, mergeCache) } @@ -334,7 +334,7 @@ func merge(a, b *PredictionContext, rootIsWildcard bool, mergeCache *JPCMap) *Pr return b } } - + // Convert either Singleton or Empty to arrays, so that we can merge them // ara := convertToArray(a) @@ -395,7 +395,7 @@ func mergeSingletons(a, b *PredictionContext, rootIsWildcard bool, mergeCache *J return previous } } - + rootMerge := mergeRoot(a, b, rootIsWildcard) if rootMerge != nil { if mergeCache != nil { @@ -564,7 +564,7 @@ func mergeArrays(a, b *PredictionContext, rootIsWildcard bool, mergeCache *JPCMa i := 0 // walks a j := 0 // walks b k := 0 // walks target M array - + mergedReturnStates := make([]int, len(a.returnStates)+len(b.returnStates)) mergedParents := make([]*PredictionContext, len(a.returnStates)+len(b.returnStates)) // walk and merge to yield mergedParents, mergedReturnStates @@ -626,9 +626,9 @@ func mergeArrays(a, b *PredictionContext, rootIsWildcard bool, mergeCache *JPCMa mergedParents = mergedParents[0:k] mergedReturnStates = mergedReturnStates[0:k] } - + M := NewArrayPredictionContext(mergedParents, mergedReturnStates) - + // if we created same array as a or b, return that instead // TODO: JI track whether this is possible above during merge sort for speed and possibly avoid an allocation if M.Equals(a) { @@ -650,7 +650,7 @@ func mergeArrays(a, b *PredictionContext, rootIsWildcard bool, mergeCache *JPCMa return b } combineCommonParents(&mergedParents) - + if mergeCache != nil { mergeCache.Put(a, b, M) } @@ -666,7 +666,7 @@ func mergeArrays(a, b *PredictionContext, rootIsWildcard bool, mergeCache *JPCMa //goland:noinspection GoUnusedFunction func combineCommonParents(parents *[]*PredictionContext) { uniqueParents := NewJStore[*PredictionContext, Comparator[*PredictionContext]](pContextEqInst, PredictionContextCollection, "combineCommonParents for PredictionContext") - + for p := 0; p < len(*parents); p++ { parent := (*parents)[p] _, _ = uniqueParents.Put(parent) @@ -685,7 +685,7 @@ func getCachedBasePredictionContext(context *PredictionContext, contextCache *Pr if present { return existing } - + existing, present = contextCache.Get(context) if present { visited.Put(context, existing) @@ -722,6 +722,6 @@ func getCachedBasePredictionContext(context *PredictionContext, contextCache *Pr contextCache.add(updated) visited.Put(updated, updated) visited.Put(context, updated) - + return updated } diff --git a/vendor/github.com/antlr4-go/antlr/v4/recognizer.go b/vendor/github.com/antlr4-go/antlr/v4/recognizer.go index 2e0b504fb3..dcb8548cd1 100644 --- a/vendor/github.com/antlr4-go/antlr/v4/recognizer.go +++ b/vendor/github.com/antlr4-go/antlr/v4/recognizer.go @@ -56,7 +56,7 @@ var tokenTypeMapCache = make(map[string]int) var ruleIndexMapCache = make(map[string]int) func (b *BaseRecognizer) checkVersion(toolVersion string) { - runtimeVersion := "4.12.0" + runtimeVersion := "4.13.1" if runtimeVersion != toolVersion { fmt.Println("ANTLR runtime and generated code versions disagree: " + runtimeVersion + "!=" + toolVersion) } diff --git a/vendor/github.com/antlr4-go/antlr/v4/statistics.go b/vendor/github.com/antlr4-go/antlr/v4/statistics.go index 70c0673a0f..8cb5f3ed6f 100644 --- a/vendor/github.com/antlr4-go/antlr/v4/statistics.go +++ b/vendor/github.com/antlr4-go/antlr/v4/statistics.go @@ -9,7 +9,6 @@ import ( "path/filepath" "sort" "strconv" - "sync" ) // This file allows the user to collect statistics about the runtime of the ANTLR runtime. It is not enabled by default @@ -30,7 +29,7 @@ type goRunStats struct { // within this package. // jStats []*JStatRec - jStatsLock sync.RWMutex + jStatsLock RWMutex topN int topNByMax []*JStatRec topNByUsed []*JStatRec diff --git a/vendor/github.com/antlr4-go/antlr/v4/token.go b/vendor/github.com/antlr4-go/antlr/v4/token.go index 9670efb829..f5bc34229d 100644 --- a/vendor/github.com/antlr4-go/antlr/v4/token.go +++ b/vendor/github.com/antlr4-go/antlr/v4/token.go @@ -104,6 +104,25 @@ func (b *BaseToken) GetSource() *TokenSourceCharStreamPair { return b.source } +func (b *BaseToken) GetText() string { + if b.text != "" { + return b.text + } + input := b.GetInputStream() + if input == nil { + return "" + } + n := input.Size() + if b.GetStart() < n && b.GetStop() < n { + return input.GetTextFromInterval(NewInterval(b.GetStart(), b.GetStop())) + } + return "" +} + +func (b *BaseToken) SetText(text string) { + b.text = text +} + func (b *BaseToken) GetTokenIndex() int { return b.tokenIndex } @@ -120,6 +139,28 @@ func (b *BaseToken) GetInputStream() CharStream { return b.source.charStream } +func (b *BaseToken) String() string { + txt := b.GetText() + if txt != "" { + txt = strings.Replace(txt, "\n", "\\n", -1) + txt = strings.Replace(txt, "\r", "\\r", -1) + txt = strings.Replace(txt, "\t", "\\t", -1) + } else { + txt = "" + } + + var ch string + if b.GetChannel() > 0 { + ch = ",channel=" + strconv.Itoa(b.GetChannel()) + } else { + ch = "" + } + + return "[@" + strconv.Itoa(b.GetTokenIndex()) + "," + strconv.Itoa(b.GetStart()) + ":" + strconv.Itoa(b.GetStop()) + "='" + + txt + "',<" + strconv.Itoa(b.GetTokenType()) + ">" + + ch + "," + strconv.Itoa(b.GetLine()) + ":" + strconv.Itoa(b.GetColumn()) + "]" +} + type CommonToken struct { BaseToken } @@ -170,44 +211,3 @@ func (c *CommonToken) clone() *CommonToken { t.text = c.GetText() return t } - -func (c *CommonToken) GetText() string { - if c.text != "" { - return c.text - } - input := c.GetInputStream() - if input == nil { - return "" - } - n := input.Size() - if c.start < n && c.stop < n { - return input.GetTextFromInterval(NewInterval(c.start, c.stop)) - } - return "" -} - -func (c *CommonToken) SetText(text string) { - c.text = text -} - -func (c *CommonToken) String() string { - txt := c.GetText() - if txt != "" { - txt = strings.Replace(txt, "\n", "\\n", -1) - txt = strings.Replace(txt, "\r", "\\r", -1) - txt = strings.Replace(txt, "\t", "\\t", -1) - } else { - txt = "" - } - - var ch string - if c.channel > 0 { - ch = ",channel=" + strconv.Itoa(c.channel) - } else { - ch = "" - } - - return "[@" + strconv.Itoa(c.tokenIndex) + "," + strconv.Itoa(c.start) + ":" + strconv.Itoa(c.stop) + "='" + - txt + "',<" + strconv.Itoa(c.tokenType) + ">" + - ch + "," + strconv.Itoa(c.line) + ":" + strconv.Itoa(c.column) + "]" -} diff --git a/vendor/github.com/antlr4-go/antlr/v4/utils.go b/vendor/github.com/antlr4-go/antlr/v4/utils.go index 733d7df9dc..36a37f247a 100644 --- a/vendor/github.com/antlr4-go/antlr/v4/utils.go +++ b/vendor/github.com/antlr4-go/antlr/v4/utils.go @@ -326,3 +326,56 @@ func isDirectory(dir string) (bool, error) { } return fileInfo.IsDir(), err } + +// intSlicesEqual returns true if the two slices of ints are equal, and is a little +// faster than slices.Equal. +func intSlicesEqual(s1, s2 []int) bool { + if s1 == nil && s2 == nil { + return true + } + if s1 == nil || s2 == nil { + return false + } + if len(s1) == 0 && len(s2) == 0 { + return true + } + + if len(s1) == 0 || len(s2) == 0 || len(s1) != len(s2) { + return false + } + // If the slices are using the same memory, then they are the same slice + if &s1[0] == &s2[0] { + return true + } + for i, v := range s1 { + if v != s2[i] { + return false + } + } + return true +} + +func pcSliceEqual(s1, s2 []*PredictionContext) bool { + if s1 == nil && s2 == nil { + return true + } + if s1 == nil || s2 == nil { + return false + } + if len(s1) == 0 && len(s2) == 0 { + return true + } + if len(s1) == 0 || len(s2) == 0 || len(s1) != len(s2) { + return false + } + // If the slices are using the same memory, then they are the same slice + if &s1[0] == &s2[0] { + return true + } + for i, v := range s1 { + if !v.Equals(s2[i]) { + return false + } + } + return true +} diff --git a/vendor/github.com/cenkalti/backoff/v4/context.go b/vendor/github.com/cenkalti/backoff/v4/context.go deleted file mode 100644 index 48482330eb..0000000000 --- a/vendor/github.com/cenkalti/backoff/v4/context.go +++ /dev/null @@ -1,62 +0,0 @@ -package backoff - -import ( - "context" - "time" -) - -// BackOffContext is a backoff policy that stops retrying after the context -// is canceled. -type BackOffContext interface { // nolint: golint - BackOff - Context() context.Context -} - -type backOffContext struct { - BackOff - ctx context.Context -} - -// WithContext returns a BackOffContext with context ctx -// -// ctx must not be nil -func WithContext(b BackOff, ctx context.Context) BackOffContext { // nolint: golint - if ctx == nil { - panic("nil context") - } - - if b, ok := b.(*backOffContext); ok { - return &backOffContext{ - BackOff: b.BackOff, - ctx: ctx, - } - } - - return &backOffContext{ - BackOff: b, - ctx: ctx, - } -} - -func getContext(b BackOff) context.Context { - if cb, ok := b.(BackOffContext); ok { - return cb.Context() - } - if tb, ok := b.(*backOffTries); ok { - return getContext(tb.delegate) - } - return context.Background() -} - -func (b *backOffContext) Context() context.Context { - return b.ctx -} - -func (b *backOffContext) NextBackOff() time.Duration { - select { - case <-b.ctx.Done(): - return Stop - default: - return b.BackOff.NextBackOff() - } -} diff --git a/vendor/github.com/cenkalti/backoff/v4/exponential.go b/vendor/github.com/cenkalti/backoff/v4/exponential.go deleted file mode 100644 index aac99f196a..0000000000 --- a/vendor/github.com/cenkalti/backoff/v4/exponential.go +++ /dev/null @@ -1,216 +0,0 @@ -package backoff - -import ( - "math/rand" - "time" -) - -/* -ExponentialBackOff is a backoff implementation that increases the backoff -period for each retry attempt using a randomization function that grows exponentially. - -NextBackOff() is calculated using the following formula: - - randomized interval = - RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor]) - -In other words NextBackOff() will range between the randomization factor -percentage below and above the retry interval. - -For example, given the following parameters: - - RetryInterval = 2 - RandomizationFactor = 0.5 - Multiplier = 2 - -the actual backoff period used in the next retry attempt will range between 1 and 3 seconds, -multiplied by the exponential, that is, between 2 and 6 seconds. - -Note: MaxInterval caps the RetryInterval and not the randomized interval. - -If the time elapsed since an ExponentialBackOff instance is created goes past the -MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop. - -The elapsed time can be reset by calling Reset(). - -Example: Given the following default arguments, for 10 tries the sequence will be, -and assuming we go over the MaxElapsedTime on the 10th try: - - Request # RetryInterval (seconds) Randomized Interval (seconds) - - 1 0.5 [0.25, 0.75] - 2 0.75 [0.375, 1.125] - 3 1.125 [0.562, 1.687] - 4 1.687 [0.8435, 2.53] - 5 2.53 [1.265, 3.795] - 6 3.795 [1.897, 5.692] - 7 5.692 [2.846, 8.538] - 8 8.538 [4.269, 12.807] - 9 12.807 [6.403, 19.210] - 10 19.210 backoff.Stop - -Note: Implementation is not thread-safe. -*/ -type ExponentialBackOff struct { - InitialInterval time.Duration - RandomizationFactor float64 - Multiplier float64 - MaxInterval time.Duration - // After MaxElapsedTime the ExponentialBackOff returns Stop. - // It never stops if MaxElapsedTime == 0. - MaxElapsedTime time.Duration - Stop time.Duration - Clock Clock - - currentInterval time.Duration - startTime time.Time -} - -// Clock is an interface that returns current time for BackOff. -type Clock interface { - Now() time.Time -} - -// ExponentialBackOffOpts is a function type used to configure ExponentialBackOff options. -type ExponentialBackOffOpts func(*ExponentialBackOff) - -// Default values for ExponentialBackOff. -const ( - DefaultInitialInterval = 500 * time.Millisecond - DefaultRandomizationFactor = 0.5 - DefaultMultiplier = 1.5 - DefaultMaxInterval = 60 * time.Second - DefaultMaxElapsedTime = 15 * time.Minute -) - -// NewExponentialBackOff creates an instance of ExponentialBackOff using default values. -func NewExponentialBackOff(opts ...ExponentialBackOffOpts) *ExponentialBackOff { - b := &ExponentialBackOff{ - InitialInterval: DefaultInitialInterval, - RandomizationFactor: DefaultRandomizationFactor, - Multiplier: DefaultMultiplier, - MaxInterval: DefaultMaxInterval, - MaxElapsedTime: DefaultMaxElapsedTime, - Stop: Stop, - Clock: SystemClock, - } - for _, fn := range opts { - fn(b) - } - b.Reset() - return b -} - -// WithInitialInterval sets the initial interval between retries. -func WithInitialInterval(duration time.Duration) ExponentialBackOffOpts { - return func(ebo *ExponentialBackOff) { - ebo.InitialInterval = duration - } -} - -// WithRandomizationFactor sets the randomization factor to add jitter to intervals. -func WithRandomizationFactor(randomizationFactor float64) ExponentialBackOffOpts { - return func(ebo *ExponentialBackOff) { - ebo.RandomizationFactor = randomizationFactor - } -} - -// WithMultiplier sets the multiplier for increasing the interval after each retry. -func WithMultiplier(multiplier float64) ExponentialBackOffOpts { - return func(ebo *ExponentialBackOff) { - ebo.Multiplier = multiplier - } -} - -// WithMaxInterval sets the maximum interval between retries. -func WithMaxInterval(duration time.Duration) ExponentialBackOffOpts { - return func(ebo *ExponentialBackOff) { - ebo.MaxInterval = duration - } -} - -// WithMaxElapsedTime sets the maximum total time for retries. -func WithMaxElapsedTime(duration time.Duration) ExponentialBackOffOpts { - return func(ebo *ExponentialBackOff) { - ebo.MaxElapsedTime = duration - } -} - -// WithRetryStopDuration sets the duration after which retries should stop. -func WithRetryStopDuration(duration time.Duration) ExponentialBackOffOpts { - return func(ebo *ExponentialBackOff) { - ebo.Stop = duration - } -} - -// WithClockProvider sets the clock used to measure time. -func WithClockProvider(clock Clock) ExponentialBackOffOpts { - return func(ebo *ExponentialBackOff) { - ebo.Clock = clock - } -} - -type systemClock struct{} - -func (t systemClock) Now() time.Time { - return time.Now() -} - -// SystemClock implements Clock interface that uses time.Now(). -var SystemClock = systemClock{} - -// Reset the interval back to the initial retry interval and restarts the timer. -// Reset must be called before using b. -func (b *ExponentialBackOff) Reset() { - b.currentInterval = b.InitialInterval - b.startTime = b.Clock.Now() -} - -// NextBackOff calculates the next backoff interval using the formula: -// Randomized interval = RetryInterval * (1 ± RandomizationFactor) -func (b *ExponentialBackOff) NextBackOff() time.Duration { - // Make sure we have not gone over the maximum elapsed time. - elapsed := b.GetElapsedTime() - next := getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval) - b.incrementCurrentInterval() - if b.MaxElapsedTime != 0 && elapsed+next > b.MaxElapsedTime { - return b.Stop - } - return next -} - -// GetElapsedTime returns the elapsed time since an ExponentialBackOff instance -// is created and is reset when Reset() is called. -// -// The elapsed time is computed using time.Now().UnixNano(). It is -// safe to call even while the backoff policy is used by a running -// ticker. -func (b *ExponentialBackOff) GetElapsedTime() time.Duration { - return b.Clock.Now().Sub(b.startTime) -} - -// Increments the current interval by multiplying it with the multiplier. -func (b *ExponentialBackOff) incrementCurrentInterval() { - // Check for overflow, if overflow is detected set the current interval to the max interval. - if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier { - b.currentInterval = b.MaxInterval - } else { - b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier) - } -} - -// Returns a random value from the following interval: -// [currentInterval - randomizationFactor * currentInterval, currentInterval + randomizationFactor * currentInterval]. -func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration { - if randomizationFactor == 0 { - return currentInterval // make sure no randomness is used when randomizationFactor is 0. - } - var delta = randomizationFactor * float64(currentInterval) - var minInterval = float64(currentInterval) - delta - var maxInterval = float64(currentInterval) + delta - - // Get a random value from the range [minInterval, maxInterval]. - // The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then - // we want a 33% chance for selecting either 1, 2 or 3. - return time.Duration(minInterval + (random * (maxInterval - minInterval + 1))) -} diff --git a/vendor/github.com/cenkalti/backoff/v4/retry.go b/vendor/github.com/cenkalti/backoff/v4/retry.go deleted file mode 100644 index b9c0c51cd7..0000000000 --- a/vendor/github.com/cenkalti/backoff/v4/retry.go +++ /dev/null @@ -1,146 +0,0 @@ -package backoff - -import ( - "errors" - "time" -) - -// An OperationWithData is executing by RetryWithData() or RetryNotifyWithData(). -// The operation will be retried using a backoff policy if it returns an error. -type OperationWithData[T any] func() (T, error) - -// An Operation is executing by Retry() or RetryNotify(). -// The operation will be retried using a backoff policy if it returns an error. -type Operation func() error - -func (o Operation) withEmptyData() OperationWithData[struct{}] { - return func() (struct{}, error) { - return struct{}{}, o() - } -} - -// Notify is a notify-on-error function. It receives an operation error and -// backoff delay if the operation failed (with an error). -// -// NOTE that if the backoff policy stated to stop retrying, -// the notify function isn't called. -type Notify func(error, time.Duration) - -// Retry the operation o until it does not return error or BackOff stops. -// o is guaranteed to be run at least once. -// -// If o returns a *PermanentError, the operation is not retried, and the -// wrapped error is returned. -// -// Retry sleeps the goroutine for the duration returned by BackOff after a -// failed operation returns. -func Retry(o Operation, b BackOff) error { - return RetryNotify(o, b, nil) -} - -// RetryWithData is like Retry but returns data in the response too. -func RetryWithData[T any](o OperationWithData[T], b BackOff) (T, error) { - return RetryNotifyWithData(o, b, nil) -} - -// RetryNotify calls notify function with the error and wait duration -// for each failed attempt before sleep. -func RetryNotify(operation Operation, b BackOff, notify Notify) error { - return RetryNotifyWithTimer(operation, b, notify, nil) -} - -// RetryNotifyWithData is like RetryNotify but returns data in the response too. -func RetryNotifyWithData[T any](operation OperationWithData[T], b BackOff, notify Notify) (T, error) { - return doRetryNotify(operation, b, notify, nil) -} - -// RetryNotifyWithTimer calls notify function with the error and wait duration using the given Timer -// for each failed attempt before sleep. -// A default timer that uses system timer is used when nil is passed. -func RetryNotifyWithTimer(operation Operation, b BackOff, notify Notify, t Timer) error { - _, err := doRetryNotify(operation.withEmptyData(), b, notify, t) - return err -} - -// RetryNotifyWithTimerAndData is like RetryNotifyWithTimer but returns data in the response too. -func RetryNotifyWithTimerAndData[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) { - return doRetryNotify(operation, b, notify, t) -} - -func doRetryNotify[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) { - var ( - err error - next time.Duration - res T - ) - if t == nil { - t = &defaultTimer{} - } - - defer func() { - t.Stop() - }() - - ctx := getContext(b) - - b.Reset() - for { - res, err = operation() - if err == nil { - return res, nil - } - - var permanent *PermanentError - if errors.As(err, &permanent) { - return res, permanent.Err - } - - if next = b.NextBackOff(); next == Stop { - if cerr := ctx.Err(); cerr != nil { - return res, cerr - } - - return res, err - } - - if notify != nil { - notify(err, next) - } - - t.Start(next) - - select { - case <-ctx.Done(): - return res, ctx.Err() - case <-t.C(): - } - } -} - -// PermanentError signals that the operation should not be retried. -type PermanentError struct { - Err error -} - -func (e *PermanentError) Error() string { - return e.Err.Error() -} - -func (e *PermanentError) Unwrap() error { - return e.Err -} - -func (e *PermanentError) Is(target error) bool { - _, ok := target.(*PermanentError) - return ok -} - -// Permanent wraps the given err in a *PermanentError. -func Permanent(err error) error { - if err == nil { - return nil - } - return &PermanentError{ - Err: err, - } -} diff --git a/vendor/github.com/cenkalti/backoff/v4/tries.go b/vendor/github.com/cenkalti/backoff/v4/tries.go deleted file mode 100644 index 28d58ca37c..0000000000 --- a/vendor/github.com/cenkalti/backoff/v4/tries.go +++ /dev/null @@ -1,38 +0,0 @@ -package backoff - -import "time" - -/* -WithMaxRetries creates a wrapper around another BackOff, which will -return Stop if NextBackOff() has been called too many times since -the last time Reset() was called - -Note: Implementation is not thread-safe. -*/ -func WithMaxRetries(b BackOff, max uint64) BackOff { - return &backOffTries{delegate: b, maxTries: max} -} - -type backOffTries struct { - delegate BackOff - maxTries uint64 - numTries uint64 -} - -func (b *backOffTries) NextBackOff() time.Duration { - if b.maxTries == 0 { - return Stop - } - if b.maxTries > 0 { - if b.maxTries <= b.numTries { - return Stop - } - b.numTries++ - } - return b.delegate.NextBackOff() -} - -func (b *backOffTries) Reset() { - b.numTries = 0 - b.delegate.Reset() -} diff --git a/vendor/github.com/cenkalti/backoff/v4/.gitignore b/vendor/github.com/cenkalti/backoff/v5/.gitignore similarity index 100% rename from vendor/github.com/cenkalti/backoff/v4/.gitignore rename to vendor/github.com/cenkalti/backoff/v5/.gitignore diff --git a/vendor/github.com/cenkalti/backoff/v5/CHANGELOG.md b/vendor/github.com/cenkalti/backoff/v5/CHANGELOG.md new file mode 100644 index 0000000000..658c37436d --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v5/CHANGELOG.md @@ -0,0 +1,29 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [5.0.0] - 2024-12-19 + +### Added + +- RetryAfterError can be returned from an operation to indicate how long to wait before the next retry. + +### Changed + +- Retry function now accepts additional options for specifying max number of tries and max elapsed time. +- Retry function now accepts a context.Context. +- Operation function signature changed to return result (any type) and error. + +### Removed + +- RetryNotify* and RetryWithData functions. Only single Retry function remains. +- Optional arguments from ExponentialBackoff constructor. +- Clock and Timer interfaces. + +### Fixed + +- The original error is returned from Retry if there's a PermanentError. (#144) +- The Retry function respects the wrapped PermanentError. (#140) diff --git a/vendor/github.com/cenkalti/backoff/v4/LICENSE b/vendor/github.com/cenkalti/backoff/v5/LICENSE similarity index 100% rename from vendor/github.com/cenkalti/backoff/v4/LICENSE rename to vendor/github.com/cenkalti/backoff/v5/LICENSE diff --git a/vendor/github.com/cenkalti/backoff/v4/README.md b/vendor/github.com/cenkalti/backoff/v5/README.md similarity index 64% rename from vendor/github.com/cenkalti/backoff/v4/README.md rename to vendor/github.com/cenkalti/backoff/v5/README.md index 9433004a28..4611b1d170 100644 --- a/vendor/github.com/cenkalti/backoff/v4/README.md +++ b/vendor/github.com/cenkalti/backoff/v5/README.md @@ -1,4 +1,4 @@ -# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Coverage Status][coveralls image]][coveralls] +# Exponential Backoff [![GoDoc][godoc image]][godoc] This is a Go port of the exponential backoff algorithm from [Google's HTTP Client Library for Java][google-http-java-client]. @@ -9,9 +9,11 @@ The retries exponentially increase and stop increasing when a certain threshold ## Usage -Import path is `github.com/cenkalti/backoff/v4`. Please note the version part at the end. +Import path is `github.com/cenkalti/backoff/v5`. Please note the version part at the end. -Use https://pkg.go.dev/github.com/cenkalti/backoff/v4 to view the documentation. +For most cases, use `Retry` function. See [example_test.go][example] for an example. + +If you have specific needs, copy `Retry` function (from [retry.go][retry-src]) into your code and modify it as needed. ## Contributing @@ -19,12 +21,11 @@ Use https://pkg.go.dev/github.com/cenkalti/backoff/v4 to view the documentation. * Please don't send a PR without opening an issue and discussing it first. * If proposed change is not a common use case, I will probably not accept it. -[godoc]: https://pkg.go.dev/github.com/cenkalti/backoff/v4 +[godoc]: https://pkg.go.dev/github.com/cenkalti/backoff/v5 [godoc image]: https://godoc.org/github.com/cenkalti/backoff?status.png -[coveralls]: https://coveralls.io/github/cenkalti/backoff?branch=master -[coveralls image]: https://coveralls.io/repos/github/cenkalti/backoff/badge.svg?branch=master [google-http-java-client]: https://github.com/google/google-http-java-client/blob/da1aa993e90285ec18579f1553339b00e19b3ab5/google-http-client/src/main/java/com/google/api/client/util/ExponentialBackOff.java [exponential backoff wiki]: http://en.wikipedia.org/wiki/Exponential_backoff -[advanced example]: https://pkg.go.dev/github.com/cenkalti/backoff/v4?tab=doc#pkg-examples +[retry-src]: https://github.com/cenkalti/backoff/blob/v5/retry.go +[example]: https://github.com/cenkalti/backoff/blob/v5/example_test.go diff --git a/vendor/github.com/cenkalti/backoff/v4/backoff.go b/vendor/github.com/cenkalti/backoff/v5/backoff.go similarity index 87% rename from vendor/github.com/cenkalti/backoff/v4/backoff.go rename to vendor/github.com/cenkalti/backoff/v5/backoff.go index 3676ee405d..dd2b24ca73 100644 --- a/vendor/github.com/cenkalti/backoff/v4/backoff.go +++ b/vendor/github.com/cenkalti/backoff/v5/backoff.go @@ -15,16 +15,16 @@ import "time" // BackOff is a backoff policy for retrying an operation. type BackOff interface { // NextBackOff returns the duration to wait before retrying the operation, - // or backoff. Stop to indicate that no more retries should be made. + // backoff.Stop to indicate that no more retries should be made. // // Example usage: // - // duration := backoff.NextBackOff(); - // if (duration == backoff.Stop) { - // // Do not retry operation. - // } else { - // // Sleep for duration and retry operation. - // } + // duration := backoff.NextBackOff() + // if duration == backoff.Stop { + // // Do not retry operation. + // } else { + // // Sleep for duration and retry operation. + // } // NextBackOff() time.Duration diff --git a/vendor/github.com/cenkalti/backoff/v5/error.go b/vendor/github.com/cenkalti/backoff/v5/error.go new file mode 100644 index 0000000000..beb2b38a23 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v5/error.go @@ -0,0 +1,46 @@ +package backoff + +import ( + "fmt" + "time" +) + +// PermanentError signals that the operation should not be retried. +type PermanentError struct { + Err error +} + +// Permanent wraps the given err in a *PermanentError. +func Permanent(err error) error { + if err == nil { + return nil + } + return &PermanentError{ + Err: err, + } +} + +// Error returns a string representation of the Permanent error. +func (e *PermanentError) Error() string { + return e.Err.Error() +} + +// Unwrap returns the wrapped error. +func (e *PermanentError) Unwrap() error { + return e.Err +} + +// RetryAfterError signals that the operation should be retried after the given duration. +type RetryAfterError struct { + Duration time.Duration +} + +// RetryAfter returns a RetryAfter error that specifies how long to wait before retrying. +func RetryAfter(seconds int) error { + return &RetryAfterError{Duration: time.Duration(seconds) * time.Second} +} + +// Error returns a string representation of the RetryAfter error. +func (e *RetryAfterError) Error() string { + return fmt.Sprintf("retry after %s", e.Duration) +} diff --git a/vendor/github.com/cenkalti/backoff/v5/exponential.go b/vendor/github.com/cenkalti/backoff/v5/exponential.go new file mode 100644 index 0000000000..c1f3e442d3 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v5/exponential.go @@ -0,0 +1,125 @@ +package backoff + +import ( + "math/rand" + "time" +) + +/* +ExponentialBackOff is a backoff implementation that increases the backoff +period for each retry attempt using a randomization function that grows exponentially. + +NextBackOff() is calculated using the following formula: + + randomized interval = + RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor]) + +In other words NextBackOff() will range between the randomization factor +percentage below and above the retry interval. + +For example, given the following parameters: + + RetryInterval = 2 + RandomizationFactor = 0.5 + Multiplier = 2 + +the actual backoff period used in the next retry attempt will range between 1 and 3 seconds, +multiplied by the exponential, that is, between 2 and 6 seconds. + +Note: MaxInterval caps the RetryInterval and not the randomized interval. + +If the time elapsed since an ExponentialBackOff instance is created goes past the +MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop. + +The elapsed time can be reset by calling Reset(). + +Example: Given the following default arguments, for 10 tries the sequence will be, +and assuming we go over the MaxElapsedTime on the 10th try: + + Request # RetryInterval (seconds) Randomized Interval (seconds) + + 1 0.5 [0.25, 0.75] + 2 0.75 [0.375, 1.125] + 3 1.125 [0.562, 1.687] + 4 1.687 [0.8435, 2.53] + 5 2.53 [1.265, 3.795] + 6 3.795 [1.897, 5.692] + 7 5.692 [2.846, 8.538] + 8 8.538 [4.269, 12.807] + 9 12.807 [6.403, 19.210] + 10 19.210 backoff.Stop + +Note: Implementation is not thread-safe. +*/ +type ExponentialBackOff struct { + InitialInterval time.Duration + RandomizationFactor float64 + Multiplier float64 + MaxInterval time.Duration + + currentInterval time.Duration +} + +// Default values for ExponentialBackOff. +const ( + DefaultInitialInterval = 500 * time.Millisecond + DefaultRandomizationFactor = 0.5 + DefaultMultiplier = 1.5 + DefaultMaxInterval = 60 * time.Second +) + +// NewExponentialBackOff creates an instance of ExponentialBackOff using default values. +func NewExponentialBackOff() *ExponentialBackOff { + return &ExponentialBackOff{ + InitialInterval: DefaultInitialInterval, + RandomizationFactor: DefaultRandomizationFactor, + Multiplier: DefaultMultiplier, + MaxInterval: DefaultMaxInterval, + } +} + +// Reset the interval back to the initial retry interval and restarts the timer. +// Reset must be called before using b. +func (b *ExponentialBackOff) Reset() { + b.currentInterval = b.InitialInterval +} + +// NextBackOff calculates the next backoff interval using the formula: +// +// Randomized interval = RetryInterval * (1 ± RandomizationFactor) +func (b *ExponentialBackOff) NextBackOff() time.Duration { + if b.currentInterval == 0 { + b.currentInterval = b.InitialInterval + } + + next := getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval) + b.incrementCurrentInterval() + return next +} + +// Increments the current interval by multiplying it with the multiplier. +func (b *ExponentialBackOff) incrementCurrentInterval() { + // Check for overflow, if overflow is detected set the current interval to the max interval. + if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier { + b.currentInterval = b.MaxInterval + } else { + b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier) + } +} + +// Returns a random value from the following interval: +// +// [currentInterval - randomizationFactor * currentInterval, currentInterval + randomizationFactor * currentInterval]. +func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration { + if randomizationFactor == 0 { + return currentInterval // make sure no randomness is used when randomizationFactor is 0. + } + var delta = randomizationFactor * float64(currentInterval) + var minInterval = float64(currentInterval) - delta + var maxInterval = float64(currentInterval) + delta + + // Get a random value from the range [minInterval, maxInterval]. + // The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then + // we want a 33% chance for selecting either 1, 2 or 3. + return time.Duration(minInterval + (random * (maxInterval - minInterval + 1))) +} diff --git a/vendor/github.com/cenkalti/backoff/v5/retry.go b/vendor/github.com/cenkalti/backoff/v5/retry.go new file mode 100644 index 0000000000..e43f47fb8a --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v5/retry.go @@ -0,0 +1,139 @@ +package backoff + +import ( + "context" + "errors" + "time" +) + +// DefaultMaxElapsedTime sets a default limit for the total retry duration. +const DefaultMaxElapsedTime = 15 * time.Minute + +// Operation is a function that attempts an operation and may be retried. +type Operation[T any] func() (T, error) + +// Notify is a function called on operation error with the error and backoff duration. +type Notify func(error, time.Duration) + +// retryOptions holds configuration settings for the retry mechanism. +type retryOptions struct { + BackOff BackOff // Strategy for calculating backoff periods. + Timer timer // Timer to manage retry delays. + Notify Notify // Optional function to notify on each retry error. + MaxTries uint // Maximum number of retry attempts. + MaxElapsedTime time.Duration // Maximum total time for all retries. +} + +type RetryOption func(*retryOptions) + +// WithBackOff configures a custom backoff strategy. +func WithBackOff(b BackOff) RetryOption { + return func(args *retryOptions) { + args.BackOff = b + } +} + +// withTimer sets a custom timer for managing delays between retries. +func withTimer(t timer) RetryOption { + return func(args *retryOptions) { + args.Timer = t + } +} + +// WithNotify sets a notification function to handle retry errors. +func WithNotify(n Notify) RetryOption { + return func(args *retryOptions) { + args.Notify = n + } +} + +// WithMaxTries limits the number of retry attempts. +func WithMaxTries(n uint) RetryOption { + return func(args *retryOptions) { + args.MaxTries = n + } +} + +// WithMaxElapsedTime limits the total duration for retry attempts. +func WithMaxElapsedTime(d time.Duration) RetryOption { + return func(args *retryOptions) { + args.MaxElapsedTime = d + } +} + +// Retry attempts the operation until success, a permanent error, or backoff completion. +// It ensures the operation is executed at least once. +// +// Returns the operation result or error if retries are exhausted or context is cancelled. +func Retry[T any](ctx context.Context, operation Operation[T], opts ...RetryOption) (T, error) { + // Initialize default retry options. + args := &retryOptions{ + BackOff: NewExponentialBackOff(), + Timer: &defaultTimer{}, + MaxElapsedTime: DefaultMaxElapsedTime, + } + + // Apply user-provided options to the default settings. + for _, opt := range opts { + opt(args) + } + + defer args.Timer.Stop() + + startedAt := time.Now() + args.BackOff.Reset() + for numTries := uint(1); ; numTries++ { + // Execute the operation. + res, err := operation() + if err == nil { + return res, nil + } + + // Stop retrying if maximum tries exceeded. + if args.MaxTries > 0 && numTries >= args.MaxTries { + return res, err + } + + // Handle permanent errors without retrying. + var permanent *PermanentError + if errors.As(err, &permanent) { + return res, err + } + + // Stop retrying if context is cancelled. + if cerr := context.Cause(ctx); cerr != nil { + return res, cerr + } + + // Calculate next backoff duration. + next := args.BackOff.NextBackOff() + if next == Stop { + return res, err + } + + // Reset backoff if RetryAfterError is encountered. + var retryAfter *RetryAfterError + if errors.As(err, &retryAfter) { + next = retryAfter.Duration + args.BackOff.Reset() + } + + // Stop retrying if maximum elapsed time exceeded. + if args.MaxElapsedTime > 0 && time.Since(startedAt)+next > args.MaxElapsedTime { + return res, err + } + + // Notify on error if a notifier function is provided. + if args.Notify != nil { + args.Notify(err, next) + } + + // Wait for the next backoff period or context cancellation. + args.Timer.Start(next) + select { + case <-args.Timer.C(): + case <-ctx.Done(): + return res, context.Cause(ctx) + } + } +} diff --git a/vendor/github.com/cenkalti/backoff/v4/ticker.go b/vendor/github.com/cenkalti/backoff/v5/ticker.go similarity index 80% rename from vendor/github.com/cenkalti/backoff/v4/ticker.go rename to vendor/github.com/cenkalti/backoff/v5/ticker.go index df9d68bce5..f0d4b2ae72 100644 --- a/vendor/github.com/cenkalti/backoff/v4/ticker.go +++ b/vendor/github.com/cenkalti/backoff/v5/ticker.go @@ -1,7 +1,6 @@ package backoff import ( - "context" "sync" "time" ) @@ -14,8 +13,7 @@ type Ticker struct { C <-chan time.Time c chan time.Time b BackOff - ctx context.Context - timer Timer + timer timer stop chan struct{} stopOnce sync.Once } @@ -27,22 +25,12 @@ type Ticker struct { // provided backoff policy (notably calling NextBackOff or Reset) // while the ticker is running. func NewTicker(b BackOff) *Ticker { - return NewTickerWithTimer(b, &defaultTimer{}) -} - -// NewTickerWithTimer returns a new Ticker with a custom timer. -// A default timer that uses system timer is used when nil is passed. -func NewTickerWithTimer(b BackOff, timer Timer) *Ticker { - if timer == nil { - timer = &defaultTimer{} - } c := make(chan time.Time) t := &Ticker{ C: c, c: c, b: b, - ctx: getContext(b), - timer: timer, + timer: &defaultTimer{}, stop: make(chan struct{}), } t.b.Reset() @@ -73,8 +61,6 @@ func (t *Ticker) run() { case <-t.stop: t.c = nil // Prevent future ticks from being sent to the channel. return - case <-t.ctx.Done(): - return } } } diff --git a/vendor/github.com/cenkalti/backoff/v4/timer.go b/vendor/github.com/cenkalti/backoff/v5/timer.go similarity index 96% rename from vendor/github.com/cenkalti/backoff/v4/timer.go rename to vendor/github.com/cenkalti/backoff/v5/timer.go index 8120d0213c..a895309747 100644 --- a/vendor/github.com/cenkalti/backoff/v4/timer.go +++ b/vendor/github.com/cenkalti/backoff/v5/timer.go @@ -2,7 +2,7 @@ package backoff import "time" -type Timer interface { +type timer interface { Start(duration time.Duration) Stop() C() <-chan time.Time diff --git a/vendor/github.com/containerd/containerd/api/events/content.pb.go b/vendor/github.com/containerd/containerd/api/events/content.pb.go index 8f183f60c0..fdadd72661 100644 --- a/vendor/github.com/containerd/containerd/api/events/content.pb.go +++ b/vendor/github.com/containerd/containerd/api/events/content.pb.go @@ -36,6 +36,61 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +type ContentCreate struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Digest string `protobuf:"bytes,1,opt,name=digest,proto3" json:"digest,omitempty"` + Size int64 `protobuf:"varint,2,opt,name=size,proto3" json:"size,omitempty"` +} + +func (x *ContentCreate) Reset() { + *x = ContentCreate{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_events_content_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ContentCreate) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ContentCreate) ProtoMessage() {} + +func (x *ContentCreate) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_events_content_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ContentCreate.ProtoReflect.Descriptor instead. +func (*ContentCreate) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_events_content_proto_rawDescGZIP(), []int{0} +} + +func (x *ContentCreate) GetDigest() string { + if x != nil { + return x.Digest + } + return "" +} + +func (x *ContentCreate) GetSize() int64 { + if x != nil { + return x.Size + } + return 0 +} + type ContentDelete struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -47,7 +102,7 @@ type ContentDelete struct { func (x *ContentDelete) Reset() { *x = ContentDelete{} if protoimpl.UnsafeEnabled { - mi := &file_github_com_containerd_containerd_api_events_content_proto_msgTypes[0] + mi := &file_github_com_containerd_containerd_api_events_content_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -60,7 +115,7 @@ func (x *ContentDelete) String() string { func (*ContentDelete) ProtoMessage() {} func (x *ContentDelete) ProtoReflect() protoreflect.Message { - mi := &file_github_com_containerd_containerd_api_events_content_proto_msgTypes[0] + mi := &file_github_com_containerd_containerd_api_events_content_proto_msgTypes[1] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -73,7 +128,7 @@ func (x *ContentDelete) ProtoReflect() protoreflect.Message { // Deprecated: Use ContentDelete.ProtoReflect.Descriptor instead. func (*ContentDelete) Descriptor() ([]byte, []int) { - return file_github_com_containerd_containerd_api_events_content_proto_rawDescGZIP(), []int{0} + return file_github_com_containerd_containerd_api_events_content_proto_rawDescGZIP(), []int{1} } func (x *ContentDelete) GetDigest() string { @@ -94,14 +149,18 @@ var file_github_com_containerd_containerd_api_events_content_proto_rawDesc = []b 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, - 0x70, 0x61, 0x74, 0x68, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x27, 0x0a, 0x0d, 0x43, 0x6f, - 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x64, + 0x70, 0x61, 0x74, 0x68, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x3b, 0x0a, 0x0d, 0x43, 0x6f, + 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x69, 0x67, - 0x65, 0x73, 0x74, 0x42, 0x38, 0x5a, 0x32, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, - 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x76, 0x65, 0x6e, - 0x74, 0x73, 0x3b, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0xa0, 0xf4, 0x1e, 0x01, 0x62, 0x06, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x22, 0x27, 0x0a, 0x0d, 0x43, 0x6f, 0x6e, 0x74, 0x65, + 0x6e, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x69, 0x67, 0x65, + 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, + 0x42, 0x38, 0x5a, 0x32, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x3b, + 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0xa0, 0xf4, 0x1e, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, } var ( @@ -116,9 +175,10 @@ func file_github_com_containerd_containerd_api_events_content_proto_rawDescGZIP( return file_github_com_containerd_containerd_api_events_content_proto_rawDescData } -var file_github_com_containerd_containerd_api_events_content_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_github_com_containerd_containerd_api_events_content_proto_msgTypes = make([]protoimpl.MessageInfo, 2) var file_github_com_containerd_containerd_api_events_content_proto_goTypes = []interface{}{ - (*ContentDelete)(nil), // 0: containerd.events.ContentDelete + (*ContentCreate)(nil), // 0: containerd.events.ContentCreate + (*ContentDelete)(nil), // 1: containerd.events.ContentDelete } var file_github_com_containerd_containerd_api_events_content_proto_depIdxs = []int32{ 0, // [0:0] is the sub-list for method output_type @@ -135,6 +195,18 @@ func file_github_com_containerd_containerd_api_events_content_proto_init() { } if !protoimpl.UnsafeEnabled { file_github_com_containerd_containerd_api_events_content_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ContentCreate); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_events_content_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ContentDelete); i { case 0: return &v.state @@ -153,7 +225,7 @@ func file_github_com_containerd_containerd_api_events_content_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_github_com_containerd_containerd_api_events_content_proto_rawDesc, NumEnums: 0, - NumMessages: 1, + NumMessages: 2, NumExtensions: 0, NumServices: 0, }, diff --git a/vendor/github.com/containerd/containerd/api/events/content.proto b/vendor/github.com/containerd/containerd/api/events/content.proto index 6b023d6b64..58bd9155e9 100644 --- a/vendor/github.com/containerd/containerd/api/events/content.proto +++ b/vendor/github.com/containerd/containerd/api/events/content.proto @@ -23,6 +23,11 @@ import "github.com/containerd/containerd/api/types/fieldpath.proto"; option go_package = "github.com/containerd/containerd/api/events;events"; option (containerd.types.fieldpath_all) = true; +message ContentCreate { + string digest = 1; + int64 size = 2; +} + message ContentDelete { string digest = 1; } diff --git a/vendor/github.com/containerd/containerd/api/events/content_fieldpath.pb.go b/vendor/github.com/containerd/containerd/api/events/content_fieldpath.pb.go index 9485b664c1..d8d717884f 100644 --- a/vendor/github.com/containerd/containerd/api/events/content_fieldpath.pb.go +++ b/vendor/github.com/containerd/containerd/api/events/content_fieldpath.pb.go @@ -2,6 +2,20 @@ // source: github.com/containerd/containerd/api/events/content.proto package events +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *ContentCreate) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + switch fieldpath[0] { + // unhandled: size + case "digest": + return string(m.Digest), len(m.Digest) > 0 + } + return "", false +} + // Field returns the value for the given fieldpath as a string, if defined. // If the value is not defined, the second value will be false. func (m *ContentDelete) Field(fieldpath []string) (string, bool) { diff --git a/vendor/github.com/containerd/containerd/archive/compression/compression.go b/vendor/github.com/containerd/containerd/archive/compression/compression.go index 23ddfab1a6..3c152f2815 100644 --- a/vendor/github.com/containerd/containerd/archive/compression/compression.go +++ b/vendor/github.com/containerd/containerd/archive/compression/compression.go @@ -45,6 +45,8 @@ const ( Gzip // Zstd is zstd compression algorithm. Zstd + // Unknown is used when a plugin handles the algorithm. + Unknown ) const disablePigzEnv = "CONTAINERD_DISABLE_PIGZ" @@ -254,6 +256,8 @@ func (compression *Compression) Extension() string { return "gz" case Zstd: return "zst" + case Unknown: + return "unknown" } return "" } diff --git a/vendor/github.com/containerd/containerd/log/context_deprecated.go b/vendor/github.com/containerd/containerd/log/context_deprecated.go deleted file mode 100644 index 9e9e8b4913..0000000000 --- a/vendor/github.com/containerd/containerd/log/context_deprecated.go +++ /dev/null @@ -1,149 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package log - -import ( - "context" - - "github.com/containerd/log" -) - -// G is a shorthand for [GetLogger]. -// -// Deprecated: use [log.G]. -var G = log.G - -// L is an alias for the standard logger. -// -// Deprecated: use [log.L]. -var L = log.L - -// Fields type to pass to "WithFields". -// -// Deprecated: use [log.Fields]. -type Fields = log.Fields - -// Entry is a logging entry. -// -// Deprecated: use [log.Entry]. -type Entry = log.Entry - -// RFC3339NanoFixed is [time.RFC3339Nano] with nanoseconds padded using -// zeros to ensure the formatted time is always the same number of -// characters. -// -// Deprecated: use [log.RFC3339NanoFixed]. -const RFC3339NanoFixed = log.RFC3339NanoFixed - -// Level is a logging level. -// -// Deprecated: use [log.Level]. -type Level = log.Level - -// Supported log levels. -const ( - // TraceLevel level. - // - // Deprecated: use [log.TraceLevel]. - TraceLevel Level = log.TraceLevel - - // DebugLevel level. - // - // Deprecated: use [log.DebugLevel]. - DebugLevel Level = log.DebugLevel - - // InfoLevel level. - // - // Deprecated: use [log.InfoLevel]. - InfoLevel Level = log.InfoLevel - - // WarnLevel level. - // - // Deprecated: use [log.WarnLevel]. - WarnLevel Level = log.WarnLevel - - // ErrorLevel level - // - // Deprecated: use [log.ErrorLevel]. - ErrorLevel Level = log.ErrorLevel - - // FatalLevel level. - // - // Deprecated: use [log.FatalLevel]. - FatalLevel Level = log.FatalLevel - - // PanicLevel level. - // - // Deprecated: use [log.PanicLevel]. - PanicLevel Level = log.PanicLevel -) - -// SetLevel sets log level globally. It returns an error if the given -// level is not supported. -// -// Deprecated: use [log.SetLevel]. -func SetLevel(level string) error { - return log.SetLevel(level) -} - -// GetLevel returns the current log level. -// -// Deprecated: use [log.GetLevel]. -func GetLevel() log.Level { - return log.GetLevel() -} - -// OutputFormat specifies a log output format. -// -// Deprecated: use [log.OutputFormat]. -type OutputFormat = log.OutputFormat - -// Supported log output formats. -const ( - // TextFormat represents the text logging format. - // - // Deprecated: use [log.TextFormat]. - TextFormat log.OutputFormat = "text" - - // JSONFormat represents the JSON logging format. - // - // Deprecated: use [log.JSONFormat]. - JSONFormat log.OutputFormat = "json" -) - -// SetFormat sets the log output format. -// -// Deprecated: use [log.SetFormat]. -func SetFormat(format OutputFormat) error { - return log.SetFormat(format) -} - -// WithLogger returns a new context with the provided logger. Use in -// combination with logger.WithField(s) for great effect. -// -// Deprecated: use [log.WithLogger]. -func WithLogger(ctx context.Context, logger *log.Entry) context.Context { - return log.WithLogger(ctx, logger) -} - -// GetLogger retrieves the current logger from the context. If no logger is -// available, the default logger is returned. -// -// Deprecated: use [log.GetLogger]. -func GetLogger(ctx context.Context) *log.Entry { - return log.GetLogger(ctx) -} diff --git a/vendor/github.com/containerd/containerd/pkg/epoch/epoch.go b/vendor/github.com/containerd/containerd/pkg/epoch/epoch.go index 124e8edb50..1e4e06c209 100644 --- a/vendor/github.com/containerd/containerd/pkg/epoch/epoch.go +++ b/vendor/github.com/containerd/containerd/pkg/epoch/epoch.go @@ -37,12 +37,11 @@ func SourceDateEpoch() (*time.Time, error) { if !ok || v == "" { return nil, nil // not an error } - i64, err := strconv.ParseInt(v, 10, 64) + t, err := ParseSourceDateEpoch(v) if err != nil { - return nil, fmt.Errorf("invalid %s value %q: %w", SourceDateEpochEnv, v, err) + return nil, fmt.Errorf("invalid %s value: %w", SourceDateEpochEnv, err) } - unix := time.Unix(i64, 0).UTC() - return &unix, nil + return t, nil } // SourceDateEpochOrNow returns the SOURCE_DATE_EPOCH time if available, @@ -58,12 +57,26 @@ func SourceDateEpochOrNow() time.Time { return time.Now().UTC() } +// ParseSourceDateEpoch parses the given source date epoch, as *time.Time. +// It returns an error if sourceDateEpoch is empty or not well-formatted. +func ParseSourceDateEpoch(sourceDateEpoch string) (*time.Time, error) { + if sourceDateEpoch == "" { + return nil, fmt.Errorf("value is empty") + } + i64, err := strconv.ParseInt(sourceDateEpoch, 10, 64) + if err != nil { + return nil, fmt.Errorf("invalid value: %w", err) + } + unix := time.Unix(i64, 0).UTC() + return &unix, nil +} + // SetSourceDateEpoch sets the SOURCE_DATE_EPOCH env var. func SetSourceDateEpoch(tm time.Time) { - os.Setenv(SourceDateEpochEnv, fmt.Sprintf("%d", tm.Unix())) + _ = os.Setenv(SourceDateEpochEnv, strconv.Itoa(int(tm.Unix()))) } // UnsetSourceDateEpoch unsets the SOURCE_DATE_EPOCH env var. func UnsetSourceDateEpoch() { - os.Unsetenv(SourceDateEpochEnv) + _ = os.Unsetenv(SourceDateEpochEnv) } diff --git a/vendor/github.com/containerd/containerd/remotes/docker/auth/fetch.go b/vendor/github.com/containerd/containerd/remotes/docker/auth/fetch.go index 244e03509a..c9c224b2ac 100644 --- a/vendor/github.com/containerd/containerd/remotes/docker/auth/fetch.go +++ b/vendor/github.com/containerd/containerd/remotes/docker/auth/fetch.go @@ -86,11 +86,11 @@ type TokenOptions struct { // OAuthTokenResponse is response from fetching token with a OAuth POST request type OAuthTokenResponse struct { - AccessToken string `json:"access_token"` - RefreshToken string `json:"refresh_token"` - ExpiresIn int `json:"expires_in"` - IssuedAt time.Time `json:"issued_at"` - Scope string `json:"scope"` + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + ExpiresInSeconds int `json:"expires_in"` + IssuedAt time.Time `json:"issued_at"` + Scope string `json:"scope"` } // FetchTokenWithOAuth fetches a token using a POST request @@ -152,11 +152,11 @@ func FetchTokenWithOAuth(ctx context.Context, client *http.Client, headers http. // FetchTokenResponse is response from fetching token with GET request type FetchTokenResponse struct { - Token string `json:"token"` - AccessToken string `json:"access_token"` - ExpiresIn int `json:"expires_in"` - IssuedAt time.Time `json:"issued_at"` - RefreshToken string `json:"refresh_token"` + Token string `json:"token"` + AccessToken string `json:"access_token"` + ExpiresInSeconds int `json:"expires_in"` + IssuedAt time.Time `json:"issued_at"` + RefreshToken string `json:"refresh_token"` } // FetchToken fetches a token using a GET request diff --git a/vendor/github.com/containerd/containerd/remotes/docker/authorizer.go b/vendor/github.com/containerd/containerd/remotes/docker/authorizer.go index 2bf388e8cb..6aabe95a45 100644 --- a/vendor/github.com/containerd/containerd/remotes/docker/authorizer.go +++ b/vendor/github.com/containerd/containerd/remotes/docker/authorizer.go @@ -24,6 +24,7 @@ import ( "net/http" "strings" "sync" + "time" "github.com/containerd/log" @@ -206,9 +207,10 @@ func (a *dockerAuthorizer) AddResponses(ctx context.Context, responses []*http.R // authResult is used to control limit rate. type authResult struct { sync.WaitGroup - token string - refreshToken string - err error + token string + refreshToken string + expirationTime *time.Time + err error } // authHandler is used to handle auth request per registry server. @@ -271,8 +273,12 @@ func (ah *authHandler) doBearerAuth(ctx context.Context) (token, refreshToken st // Docs: https://docs.docker.com/registry/spec/auth/scope scoped := strings.Join(to.Scopes, " ") + // Keep track of the expiration time of cached bearer tokens so they can be + // refreshed when they expire without a server roundtrip. + var expirationTime *time.Time + ah.Lock() - if r, exist := ah.scopedTokens[scoped]; exist { + if r, exist := ah.scopedTokens[scoped]; exist && (r.expirationTime == nil || r.expirationTime.After(time.Now())) { ah.Unlock() r.Wait() return r.token, r.refreshToken, r.err @@ -286,7 +292,7 @@ func (ah *authHandler) doBearerAuth(ctx context.Context) (token, refreshToken st defer func() { token = fmt.Sprintf("Bearer %s", token) - r.token, r.refreshToken, r.err = token, refreshToken, err + r.token, r.refreshToken, r.err, r.expirationTime = token, refreshToken, err, expirationTime r.Done() }() @@ -312,6 +318,7 @@ func (ah *authHandler) doBearerAuth(ctx context.Context) (token, refreshToken st if err != nil { return "", "", err } + expirationTime = getExpirationTime(resp.ExpiresInSeconds) return resp.Token, resp.RefreshToken, nil } log.G(ctx).WithFields(log.Fields{ @@ -321,6 +328,7 @@ func (ah *authHandler) doBearerAuth(ctx context.Context) (token, refreshToken st } return "", "", err } + expirationTime = getExpirationTime(resp.ExpiresInSeconds) return resp.AccessToken, resp.RefreshToken, nil } // do request anonymously @@ -328,9 +336,18 @@ func (ah *authHandler) doBearerAuth(ctx context.Context) (token, refreshToken st if err != nil { return "", "", fmt.Errorf("failed to fetch anonymous token: %w", err) } + expirationTime = getExpirationTime(resp.ExpiresInSeconds) return resp.Token, resp.RefreshToken, nil } +func getExpirationTime(expiresInSeconds int) *time.Time { + if expiresInSeconds <= 0 { + return nil + } + expirationTime := time.Now().Add(time.Duration(expiresInSeconds) * time.Second) + return &expirationTime +} + func invalidAuthorization(ctx context.Context, c auth.Challenge, responses []*http.Response) (retry bool, _ error) { errStr := c.Parameters["error"] if errStr == "" { diff --git a/vendor/github.com/containerd/containerd/version/version.go b/vendor/github.com/containerd/containerd/version/version.go index e806164cab..9075534661 100644 --- a/vendor/github.com/containerd/containerd/version/version.go +++ b/vendor/github.com/containerd/containerd/version/version.go @@ -23,7 +23,7 @@ var ( Package = "github.com/containerd/containerd" // Version holds the complete version number. Filled in at linking time. - Version = "1.7.27+unknown" + Version = "1.7.29+unknown" // Revision is filled with the VCS (e.g. git) revision being used to build // the program at linking time. diff --git a/vendor/github.com/containerd/continuity/fs/stat_darwinbsd.go b/vendor/github.com/containerd/continuity/fs/stat_darwinbsd.go index 7a34eda3ca..8192465cd0 100644 --- a/vendor/github.com/containerd/continuity/fs/stat_darwinbsd.go +++ b/vendor/github.com/containerd/continuity/fs/stat_darwinbsd.go @@ -19,10 +19,36 @@ package fs import ( + "fmt" + "io/fs" "syscall" "time" ) +func Atime(st fs.FileInfo) (time.Time, error) { + stSys, ok := st.Sys().(*syscall.Stat_t) + if !ok { + return time.Time{}, fmt.Errorf("expected st.Sys() to be *syscall.Stat_t, got %T", st.Sys()) + } + return time.Unix(stSys.Atimespec.Unix()), nil +} + +func Ctime(st fs.FileInfo) (time.Time, error) { + stSys, ok := st.Sys().(*syscall.Stat_t) + if !ok { + return time.Time{}, fmt.Errorf("expected st.Sys() to be *syscall.Stat_t, got %T", st.Sys()) + } + return time.Unix(stSys.Ctimespec.Unix()), nil +} + +func Mtime(st fs.FileInfo) (time.Time, error) { + stSys, ok := st.Sys().(*syscall.Stat_t) + if !ok { + return time.Time{}, fmt.Errorf("expected st.Sys() to be *syscall.Stat_t, got %T", st.Sys()) + } + return time.Unix(stSys.Mtimespec.Unix()), nil +} + // StatAtime returns the access time from a stat struct func StatAtime(st *syscall.Stat_t) syscall.Timespec { return st.Atimespec diff --git a/vendor/github.com/containerd/continuity/fs/stat_unix.go b/vendor/github.com/containerd/continuity/fs/stat_unix.go index 0edebdf1d7..503d24eecf 100644 --- a/vendor/github.com/containerd/continuity/fs/stat_unix.go +++ b/vendor/github.com/containerd/continuity/fs/stat_unix.go @@ -30,7 +30,7 @@ func Atime(st fs.FileInfo) (time.Time, error) { if !ok { return time.Time{}, fmt.Errorf("expected st.Sys() to be *syscall.Stat_t, got %T", st.Sys()) } - return StatATimeAsTime(stSys), nil + return time.Unix(stSys.Atim.Unix()), nil } func Ctime(st fs.FileInfo) (time.Time, error) { @@ -38,7 +38,7 @@ func Ctime(st fs.FileInfo) (time.Time, error) { if !ok { return time.Time{}, fmt.Errorf("expected st.Sys() to be *syscall.Stat_t, got %T", st.Sys()) } - return time.Unix(stSys.Atim.Unix()), nil + return time.Unix(stSys.Ctim.Unix()), nil } func Mtime(st fs.FileInfo) (time.Time, error) { diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go index 6aba0ef1f6..8b804b7dde 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go @@ -408,11 +408,11 @@ func readerFromEntries(entries ...*entry) io.Reader { defer tw.Close() for _, entry := range entries { if err := tw.WriteHeader(entry.header); err != nil { - pw.CloseWithError(fmt.Errorf("Failed to write tar header: %v", err)) + pw.CloseWithError(fmt.Errorf("failed to write tar header: %v", err)) return } if _, err := io.Copy(tw, entry.payload); err != nil { - pw.CloseWithError(fmt.Errorf("Failed to write tar payload: %v", err)) + pw.CloseWithError(fmt.Errorf("failed to write tar payload: %v", err)) return } } @@ -627,12 +627,12 @@ func (cr *countReadSeeker) Seek(offset int64, whence int) (int64, error) { switch whence { default: - return 0, fmt.Errorf("Unknown whence: %v", whence) + return 0, fmt.Errorf("unknown whence: %v", whence) case io.SeekStart: case io.SeekCurrent: offset += *cr.cPos case io.SeekEnd: - return 0, fmt.Errorf("Unsupported whence: %v", whence) + return 0, fmt.Errorf("unsupported whence: %v", whence) } if offset < 0 { diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go index f24afe32f4..88fa13b191 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go @@ -109,7 +109,7 @@ func gzipFooterBytes(tocOff int64) []byte { header[0], header[1] = 'S', 'G' subfield := fmt.Sprintf("%016xSTARGZ", tocOff) binary.LittleEndian.PutUint16(header[2:4], uint16(len(subfield))) // little-endian per RFC1952 - gz.Header.Extra = append(header, []byte(subfield)...) + gz.Extra = append(header, []byte(subfield)...) gz.Close() if buf.Len() != FooterSize { panic(fmt.Sprintf("footer buffer = %d, not %d", buf.Len(), FooterSize)) @@ -136,7 +136,7 @@ func (gz *GzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOffset, t return 0, 0, 0, err } defer zr.Close() - extra := zr.Header.Extra + extra := zr.Extra si1, si2, subfieldlen, subfield := extra[0], extra[1], extra[2:4], extra[4:] if si1 != 'S' || si2 != 'G' { return 0, 0, 0, fmt.Errorf("invalid subfield IDs: %q, %q; want E, S", si1, si2) @@ -181,7 +181,7 @@ func (gz *LegacyGzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOff return 0, 0, 0, fmt.Errorf("legacy: failed to get footer gzip reader: %w", err) } defer zr.Close() - extra := zr.Header.Extra + extra := zr.Extra if len(extra) != 16+len("STARGZ") { return 0, 0, 0, fmt.Errorf("legacy: invalid stargz's extra field size") } diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go index ba650b4d1d..a8dcdb868e 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go @@ -357,14 +357,15 @@ func compressBlob(t *testing.T, src *io.SectionReader, srcCompression int) *io.S buf := new(bytes.Buffer) var w io.WriteCloser var err error - if srcCompression == gzipType { + switch srcCompression { + case gzipType: w = gzip.NewWriter(buf) - } else if srcCompression == zstdType { + case zstdType: w, err = zstd.NewWriter(buf) if err != nil { t.Fatalf("failed to init zstd writer: %v", err) } - } else { + default: return src } src.Seek(0, io.SeekStart) @@ -445,7 +446,7 @@ func contains(t *testing.T, a, b stargzEntry) bool { bbytes, bnext, bok := readOffset(t, bf, nr, b) if !aok && !bok { break - } else if !(aok && bok) || anext != bnext { + } else if !aok || !bok || anext != bnext { t.Logf("%q != %q (offset=%d): chunk existence a=%v vs b=%v, anext=%v vs bnext=%v", ae.Name, be.Name, nr, aok, bok, anext, bnext) return false @@ -2346,8 +2347,8 @@ func CheckGzipHasStreams(t *testing.T, b []byte, streams []int64) { t.Fatalf("countStreams(gzip), Copy: %v", err) } var extra string - if len(zr.Header.Extra) > 0 { - extra = fmt.Sprintf("; extra=%q", zr.Header.Extra) + if len(zr.Extra) > 0 { + extra = fmt.Sprintf("; extra=%q", zr.Extra) } t.Logf(" [%d] at %d in stargz, uncompressed length %d%s", numStreams, zoff, n, extra) delete(wants, int64(zoff)) diff --git a/vendor/github.com/containers/image/v5/signature/fulcio_cert_stub.go b/vendor/github.com/containers/image/v5/signature/fulcio_cert_stub.go deleted file mode 100644 index da8e13c1df..0000000000 --- a/vendor/github.com/containers/image/v5/signature/fulcio_cert_stub.go +++ /dev/null @@ -1,27 +0,0 @@ -//go:build containers_image_fulcio_stub - -package signature - -import ( - "crypto" - "crypto/ecdsa" - "crypto/x509" - "errors" -) - -type fulcioTrustRoot struct { - caCertificates *x509.CertPool - oidcIssuer string - subjectEmail string -} - -func (f *fulcioTrustRoot) validate() error { - return errors.New("fulcio disabled at compile-time") -} - -func verifyRekorFulcio(rekorPublicKeys []*ecdsa.PublicKey, fulcioTrustRoot *fulcioTrustRoot, untrustedRekorSET []byte, - untrustedCertificateBytes []byte, untrustedIntermediateChainBytes []byte, untrustedBase64Signature string, - untrustedPayloadBytes []byte) (crypto.PublicKey, error) { - return nil, errors.New("fulcio disabled at compile-time") - -} diff --git a/vendor/github.com/containers/image/v5/signature/internal/rekor_set_stub.go b/vendor/github.com/containers/image/v5/signature/internal/rekor_set_stub.go deleted file mode 100644 index 4ff3da7edb..0000000000 --- a/vendor/github.com/containers/image/v5/signature/internal/rekor_set_stub.go +++ /dev/null @@ -1,14 +0,0 @@ -//go:build containers_image_rekor_stub - -package internal - -import ( - "crypto/ecdsa" - "time" -) - -// VerifyRekorSET verifies that unverifiedRekorSET is correctly signed by publicKey and matches the rest of the data. -// Returns bundle upload time on success. -func VerifyRekorSET(publicKeys []*ecdsa.PublicKey, unverifiedRekorSET []byte, unverifiedKeyOrCertBytes []byte, unverifiedBase64Signature string, unverifiedPayloadBytes []byte) (time.Time, error) { - return time.Time{}, NewInvalidSignatureError("rekor disabled at compile-time") -} diff --git a/vendor/github.com/coreos/go-systemd/v22/activation/files_unix.go b/vendor/github.com/coreos/go-systemd/v22/activation/files_unix.go index bf7671dd2b..7031f281a0 100644 --- a/vendor/github.com/coreos/go-systemd/v22/activation/files_unix.go +++ b/vendor/github.com/coreos/go-systemd/v22/activation/files_unix.go @@ -13,7 +13,6 @@ // limitations under the License. //go:build !windows -// +build !windows // Package activation implements primitives for systemd socket activation. package activation @@ -38,9 +37,12 @@ const ( // fd usage and to avoid leaking environment flags to child processes. func Files(unsetEnv bool) []*os.File { if unsetEnv { - defer os.Unsetenv("LISTEN_PID") - defer os.Unsetenv("LISTEN_FDS") - defer os.Unsetenv("LISTEN_FDNAMES") + defer func() { + // Unsetenv implementation for unix never returns an error. + _ = os.Unsetenv("LISTEN_PID") + _ = os.Unsetenv("LISTEN_FDS") + _ = os.Unsetenv("LISTEN_FDNAMES") + }() } pid, err := strconv.Atoi(os.Getenv("LISTEN_PID")) diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go index 62d91b77d5..5673f5c0bc 100644 --- a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go +++ b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go @@ -1,3 +1,4 @@ +// Package md2man aims in converting markdown into roff (man pages). package md2man import ( diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go index 96a80c99b8..4f1070fc5b 100644 --- a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go +++ b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go @@ -47,13 +47,13 @@ const ( tableStart = "\n.TS\nallbox;\n" tableEnd = ".TE\n" tableCellStart = "T{\n" - tableCellEnd = "\nT}\n" + tableCellEnd = "\nT}" tablePreprocessor = `'\" t` ) // NewRoffRenderer creates a new blackfriday Renderer for generating roff documents // from markdown -func NewRoffRenderer() *roffRenderer { // nolint: golint +func NewRoffRenderer() *roffRenderer { return &roffRenderer{} } @@ -316,9 +316,8 @@ func (r *roffRenderer) handleTableCell(w io.Writer, node *blackfriday.Node, ente } else if nodeLiteralSize(node) > 30 { end = tableCellEnd } - if node.Next == nil && end != tableCellEnd { - // Last cell: need to carriage return if we are at the end of the - // header row and content isn't wrapped in a "tablecell" + if node.Next == nil { + // Last cell: need to carriage return if we are at the end of the header row. end += crTag } out(w, end) @@ -356,7 +355,7 @@ func countColumns(node *blackfriday.Node) int { } func out(w io.Writer, output string) { - io.WriteString(w, output) // nolint: errcheck + io.WriteString(w, output) //nolint:errcheck } func escapeSpecialChars(w io.Writer, text []byte) { @@ -395,7 +394,7 @@ func escapeSpecialCharsLine(w io.Writer, text []byte) { i++ } if i > org { - w.Write(text[org:i]) // nolint: errcheck + w.Write(text[org:i]) //nolint:errcheck } // escape a character @@ -403,7 +402,7 @@ func escapeSpecialCharsLine(w io.Writer, text []byte) { break } - w.Write([]byte{'\\', text[i]}) // nolint: errcheck + w.Write([]byte{'\\', text[i]}) //nolint:errcheck } } diff --git a/vendor/github.com/distribution/distribution/v3/.golangci.yml b/vendor/github.com/distribution/distribution/v3/.golangci.yml index 823cc43de2..0f4d6ce0ec 100644 --- a/vendor/github.com/distribution/distribution/v3/.golangci.yml +++ b/vendor/github.com/distribution/distribution/v3/.golangci.yml @@ -23,6 +23,5 @@ linters-settings: disabled: true issues: - deadline: 2m - exlude-dirs: + exclude-dirs: - vendor diff --git a/vendor/github.com/distribution/distribution/v3/.mailmap b/vendor/github.com/distribution/distribution/v3/.mailmap index 50aff82d54..debd4ff4d7 100644 --- a/vendor/github.com/distribution/distribution/v3/.mailmap +++ b/vendor/github.com/distribution/distribution/v3/.mailmap @@ -11,6 +11,7 @@ Andrew Meredith Andrey Smirnov Andrii Soldatenko Andrii Soldatenko +andy-cooper Ankur Kothiwal Anthony Ramahay Antonio Murdaca @@ -105,6 +106,7 @@ Maria Bermudez Maria Bermudez Markus Thömmes Matheus Macabu +Mateusz Urbanek Matt Linville Matt Linville Matt Linville @@ -126,6 +128,7 @@ Nikita Tarasov Nikita Tarasov Oleg Bulatov Oleg Bulatov +Oleg Gnusarev Olivier Gambier Olivier Gambier Omer Cohen @@ -139,6 +142,7 @@ Phil Estes Phil Estes Phil Estes Pratik +Rafael Fonseca Richard Scothern Richard Scothern Rober Morales-Chaparro @@ -168,6 +172,7 @@ Tibor Tibor Vass Tibor Vass Tibor Vass +Vadim Bauer Victor Barbu Victor Vieux Victor Vieux diff --git a/vendor/github.com/distribution/distribution/v3/AUTHORS b/vendor/github.com/distribution/distribution/v3/AUTHORS index 8dae29d7e0..360a284529 100644 --- a/vendor/github.com/distribution/distribution/v3/AUTHORS +++ b/vendor/github.com/distribution/distribution/v3/AUTHORS @@ -44,6 +44,7 @@ Andrey Kostov Andrey Smirnov Andrii Soldatenko Andy Goldstein +andy-cooper andyzhangx Anian Z Anil Belur @@ -174,6 +175,7 @@ Erica Windisch Erik Hollensbe Etki Eugene Lubarsky +evanebb eyjhb eyjhbb@gmail.com Fabio Berchtold @@ -337,6 +339,7 @@ Mark Sagi-Kazar Markus Thömmes Mary Anthony Masataka Mizukoshi +Mateusz Urbanek Matheus Macabu Matin Rahmanian MATSUMOTO TAKEAKI @@ -389,6 +392,7 @@ Nuutti Kotivuori Nycholas de Oliveira e Oliveira Oilbeater Oleg Bulatov +Oleg Gnusarev olegburov oliver-goetz Olivier @@ -419,6 +423,7 @@ Pratik Qiang Huang Qiao Anran Radon Rosborough +Rafael Fonseca Randy Barlow Raphaël Enrici Ricardo Maraschini @@ -518,6 +523,7 @@ Troels Thomsen uhayate Usha Mandya <47779042+usha-mandya@users.noreply.github.com> Usha Mandya +Vadim Bauer Vaidas Jablonskis Vega Chou Veres Lajos diff --git a/vendor/github.com/distribution/distribution/v3/BUILDING.md b/vendor/github.com/distribution/distribution/v3/BUILDING.md index 7ee447324f..5867e2aabd 100644 --- a/vendor/github.com/distribution/distribution/v3/BUILDING.md +++ b/vendor/github.com/distribution/distribution/v3/BUILDING.md @@ -102,15 +102,15 @@ the environment variable `BUILDTAGS`.
Compiles without resumable digest support
-### Local cloud storage environment +### Local S3 store environment -You can run an S3 API compatible storage locally with [minio](https://min.io/). +You can run an S3 API compatible store locally with [minio](https://min.io/). You must have a [docker compose](https://docs.docker.com/compose/) compatible tool installed on your workstation. -Start the local cloud environment: +Start the local S3 store environment: ``` -make start-cloud-storage +make start-s3-storage ``` There is a sample registry configuration file that lets you point the registry to the started storage: ``` @@ -121,9 +121,9 @@ AWS_ACCESS_KEY=distribution \ S3_ENCRYPT=false \ REGION_ENDPOINT=http://127.0.0.1:9000 \ S3_SECURE=false \ -./bin/registry serve tests/conf-local-cloud.yml +./bin/registry serve tests/conf-local-s3.yml ``` -Stop the local storage when done: +Stop the local S3 store when done: ``` -make stop-cloud-storage +make stop-s3-storage ``` diff --git a/vendor/github.com/distribution/distribution/v3/Dockerfile b/vendor/github.com/distribution/distribution/v3/Dockerfile index b99f1179c8..8cbce906ba 100644 --- a/vendor/github.com/distribution/distribution/v3/Dockerfile +++ b/vendor/github.com/distribution/distribution/v3/Dockerfile @@ -1,6 +1,6 @@ # syntax=docker/dockerfile:1 -ARG GO_VERSION=1.23.6 +ARG GO_VERSION=1.23.7 ARG ALPINE_VERSION=3.21 ARG XX_VERSION=1.6.1 diff --git a/vendor/github.com/distribution/distribution/v3/Makefile b/vendor/github.com/distribution/distribution/v3/Makefile index 97873bc2c4..9647ed9a02 100644 --- a/vendor/github.com/distribution/distribution/v3/Makefile +++ b/vendor/github.com/distribution/distribution/v3/Makefile @@ -122,26 +122,26 @@ test-coverage: ## run unit tests and generate test coverprofiles fi; \ done ) -.PHONY: test-cloud-storage -test-cloud-storage: start-cloud-storage run-s3-tests stop-cloud-storage ## run cloud storage driver tests +.PHONY: test-s3-storage +test-s3-storage: start-s3-storage run-s3-tests stop-s3-storage ## run s3 storage driver tests -.PHONY: start-cloud-storage -start-cloud-storage: ## start local cloud storage (minio) +.PHONY: start-s3-storage +start-s3-storage: ## start local s3 storage (minio) $(COMPOSE) -f tests/docker-compose-storage.yml up minio minio-init -d -.PHONY: stop-cloud-storage -stop-cloud-storage: ## stop local cloud storage (minio) +.PHONY: stop-s3-storage +stop-s3-storage: ## stop local s3 storage (minio) $(COMPOSE) -f tests/docker-compose-storage.yml down -.PHONY: reset-cloud-storage -reset-cloud-storage: ## reset (stop, delete, start) local cloud storage (minio) +.PHONY: reset-s3-storage +reset-s3-storage: ## reset (stop, delete, start) local s3 storage (minio) $(COMPOSE) -f tests/docker-compose-storage.yml down @mkdir -p tests/miniodata/distribution @rm -rf tests/miniodata/distribution/* tests/miniodata/.minio.sys $(COMPOSE) -f tests/docker-compose-storage.yml up minio minio-init -d .PHONY: run-s3-tests -run-s3-tests: start-cloud-storage ## run S3 storage driver integration tests +run-s3-tests: start-s3-storage ## run S3 storage driver integration tests AWS_ACCESS_KEY=distribution \ AWS_SECRET_KEY=password \ AWS_REGION=us-east-1 \ @@ -161,6 +161,27 @@ start-e2e-s3-env: ## starts E2E S3 storage test environment (S3, Redis, registry stop-e2e-s3-env: ## stops E2E S3 storage test environment (S3, Redis, registry) $(COMPOSE) -f tests/docker-compose-e2e-cloud-storage.yml down +.PHONY: test-azure-storage +test-azure-storage: start-azure-storage run-azure-tests stop-azure-storage ## run Azure storage driver tests + +.PHONY: start-azure-storage +start-azure-storage: ## start local Azure storage (Azurite) + $(COMPOSE) -f tests/docker-compose-azure-blob-store.yaml up azurite azurite-init -d + +.PHONY: stop-azure-storage +stop-azure-storage: ## stop local Azure storage (minio) + $(COMPOSE) -f tests/docker-compose-azure-blob-store.yaml down + +.PHONY: run-azure-tests +run-azure-tests: start-azure-storage ## run Azure storage driver integration tests + AZURE_SKIP_VERIFY=true \ + AZURE_STORAGE_CREDENTIALS_TYPE="shared_key" \ + AZURE_STORAGE_ACCOUNT_NAME=devstoreaccount1 \ + AZURE_STORAGE_ACCOUNT_KEY="Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==" \ + AZURE_STORAGE_CONTAINER=containername \ + AZURE_SERVICE_URL="https://127.0.0.1:10000/devstoreaccount1" \ + go test ${TESTFLAGS} -count=1 ./registry/storage/driver/azure/... + ##@ Validate lint: ## run all linters diff --git a/vendor/github.com/distribution/distribution/v3/configuration/configuration.go b/vendor/github.com/distribution/distribution/v3/configuration/configuration.go index f6b88d84b3..36105b841e 100644 --- a/vendor/github.com/distribution/distribution/v3/configuration/configuration.go +++ b/vendor/github.com/distribution/distribution/v3/configuration/configuration.go @@ -23,31 +23,7 @@ type Configuration struct { // Log supports setting various parameters related to the logging // subsystem. - Log struct { - // AccessLog configures access logging. - AccessLog struct { - // Disabled disables access logging. - Disabled bool `yaml:"disabled,omitempty"` - } `yaml:"accesslog,omitempty"` - - // Level is the granularity at which registry operations are logged. - Level Loglevel `yaml:"level,omitempty"` - - // Formatter overrides the default formatter with another. Options - // include "text", "json" and "logstash". - Formatter string `yaml:"formatter,omitempty"` - - // Fields allows users to specify static string fields to include in - // the logger context. - Fields map[string]interface{} `yaml:"fields,omitempty"` - - // Hooks allows users to configure the log hooks, to enabling the - // sequent handling behavior, when defined levels of log message emit. - Hooks []LogHook `yaml:"hooks,omitempty"` - - // ReportCaller allows user to configure the log to report the caller - ReportCaller bool `yaml:"reportcaller,omitempty"` - } + Log Log `yaml:"log"` // Loglevel is the level at which registry operations are logged. // @@ -66,113 +42,7 @@ type Configuration struct { // HTTP contains configuration parameters for the registry's http // interface. - HTTP struct { - // Addr specifies the bind address for the registry instance. - Addr string `yaml:"addr,omitempty"` - - // Net specifies the net portion of the bind address. A default empty value means tcp. - Net string `yaml:"net,omitempty"` - - // Host specifies an externally-reachable address for the registry, as a fully - // qualified URL. - Host string `yaml:"host,omitempty"` - - Prefix string `yaml:"prefix,omitempty"` - - // Secret specifies the secret key which HMAC tokens are created with. - Secret string `yaml:"secret,omitempty"` - - // RelativeURLs specifies that relative URLs should be returned in - // Location headers - RelativeURLs bool `yaml:"relativeurls,omitempty"` - - // Amount of time to wait for connection to drain before shutting down when registry - // receives a stop signal - DrainTimeout time.Duration `yaml:"draintimeout,omitempty"` - - // TLS instructs the http server to listen with a TLS configuration. - // This only support simple tls configuration with a cert and key. - // Mostly, this is useful for testing situations or simple deployments - // that require tls. If more complex configurations are required, use - // a proxy or make a proposal to add support here. - TLS struct { - // Certificate specifies the path to an x509 certificate file to - // be used for TLS. - Certificate string `yaml:"certificate,omitempty"` - - // Key specifies the path to the x509 key file, which should - // contain the private portion for the file specified in - // Certificate. - Key string `yaml:"key,omitempty"` - - // Specifies the CA certs for client authentication - // A file may contain multiple CA certificates encoded as PEM - ClientCAs []string `yaml:"clientcas,omitempty"` - - // Client certificate authentication mode - // One of: request-client-cert, require-any-client-cert, verify-client-cert-if-given, require-and-verify-client-cert - ClientAuth ClientAuth `yaml:"clientauth,omitempty"` - - // Specifies the lowest TLS version allowed - MinimumTLS string `yaml:"minimumtls,omitempty"` - - // Specifies a list of cipher suites allowed - CipherSuites []string `yaml:"ciphersuites,omitempty"` - - // LetsEncrypt is used to configuration setting up TLS through - // Let's Encrypt instead of manually specifying certificate and - // key. If a TLS certificate is specified, the Let's Encrypt - // section will not be used. - LetsEncrypt struct { - // CacheFile specifies cache file to use for lets encrypt - // certificates and keys. - CacheFile string `yaml:"cachefile,omitempty"` - - // Email is the email to use during Let's Encrypt registration - Email string `yaml:"email,omitempty"` - - // Hosts specifies the hosts which are allowed to obtain Let's - // Encrypt certificates. - Hosts []string `yaml:"hosts,omitempty"` - - // DirectoryURL points to the CA directory endpoint. - // If empty, LetsEncrypt is used. - DirectoryURL string `yaml:"directoryurl,omitempty"` - } `yaml:"letsencrypt,omitempty"` - } `yaml:"tls,omitempty"` - - // Headers is a set of headers to include in HTTP responses. A common - // use case for this would be security headers such as - // Strict-Transport-Security. The map keys are the header names, and - // the values are the associated header payloads. - Headers http.Header `yaml:"headers,omitempty"` - - // Debug configures the http debug interface, if specified. This can - // include services such as pprof, expvar and other data that should - // not be exposed externally. Left disabled by default. - Debug struct { - // Addr specifies the bind address for the debug server. - Addr string `yaml:"addr,omitempty"` - // Prometheus configures the Prometheus telemetry endpoint. - Prometheus struct { - Enabled bool `yaml:"enabled,omitempty"` - Path string `yaml:"path,omitempty"` - } `yaml:"prometheus,omitempty"` - } `yaml:"debug,omitempty"` - - // HTTP2 configuration options - HTTP2 struct { - // Specifies whether the registry should disallow clients attempting - // to connect via HTTP/2. If set to true, only HTTP/1.1 is supported. - Disabled bool `yaml:"disabled,omitempty"` - } `yaml:"http2,omitempty"` - - H2C struct { - // Enables H2C (HTTP/2 Cleartext). Enable to support HTTP/2 without needing to configure TLS - // Useful when deploying the registry behind a load balancer (e.g. Cloud Run) - Enabled bool `yaml:"enabled,omitempty"` - } `yaml:"h2c,omitempty"` - } `yaml:"http,omitempty"` + HTTP HTTP `yaml:"http,omitempty"` // Notifications specifies configuration about various endpoint to which // registry events are dispatched. @@ -181,31 +51,41 @@ type Configuration struct { // Redis configures the redis pool available to the registry webapp. Redis Redis `yaml:"redis,omitempty"` - Health Health `yaml:"health,omitempty"` + // Health provides the configuration section for health checks. + // It allows defining various checks to monitor the health of different subsystems. + Health Health `yaml:"health,omitempty"` + + // Catalog is composed of MaxEntries. + // Catalog endpoint (/v2/_catalog) configuration, it provides the configuration + // options to control the maximum number of entries returned by the catalog endpoint. Catalog Catalog `yaml:"catalog,omitempty"` + // Proxy defines the configuration options for using the registry as a pull-through cache. Proxy Proxy `yaml:"proxy,omitempty"` // Validation configures validation options for the registry. Validation Validation `yaml:"validation,omitempty"` // Policy configures registry policy options. - Policy struct { - // Repository configures policies for repositories - Repository struct { - // Classes is a list of repository classes which the - // registry allows content for. This class is matched - // against the configuration media type inside uploaded - // manifests. When non-empty, the registry will enforce - // the class in authorized resources. - Classes []string `yaml:"classes"` - } `yaml:"repository,omitempty"` - } `yaml:"policy,omitempty"` -} - -// Catalog is composed of MaxEntries. -// Catalog endpoint (/v2/_catalog) configuration, it provides the configuration -// options to control the maximum number of entries returned by the catalog endpoint. + Policy Policy `yaml:"policy,omitempty"` +} + +// Policy defines configuration options for managing registry policies. +type Policy struct { + // Repository configures policies for repositories + Repository Repository `yaml:"repository,omitempty"` +} + +// Repository defines configuration options related to repository policies in the registry. +type Repository struct { + // Classes is a list of repository classes that the registry allows content for. + // This value is matched against the media type in uploaded manifests. + // If this field is non-empty, the registry enforces that all uploaded + // content belongs to one of the specified classes. + Classes []string `yaml:"classes"` +} + +// Catalog provides configuration options for the /v2/_catalog endpoint. type Catalog struct { // Max number of entries returned by the catalog endpoint. Requesting n entries // to the catalog endpoint will return at most MaxEntries entries. @@ -213,6 +93,178 @@ type Catalog struct { MaxEntries int `yaml:"maxentries,omitempty"` } +// Log represents the configuration for logging within the application. +type Log struct { + // AccessLog configures access logging. + AccessLog AccessLog `yaml:"accesslog,omitempty"` + + // Level is the granularity at which registry operations are logged. + Level Loglevel `yaml:"level,omitempty"` + + // Formatter overrides the default formatter with another. Options + // include "text", "json" and "logstash". + Formatter string `yaml:"formatter,omitempty"` + + // Fields allows users to specify static string fields to include in + // the logger context. + Fields map[string]interface{} `yaml:"fields,omitempty"` + + // Hooks allows users to configure the log hooks, to enabling the + // sequent handling behavior, when defined levels of log message emit. + Hooks []LogHook `yaml:"hooks,omitempty"` + + // ReportCaller allows user to configure the log to report the caller + ReportCaller bool `yaml:"reportcaller,omitempty"` +} + +// AccessLog configures options for access logging. +type AccessLog struct { + // Disabled disables access logging. + Disabled bool `yaml:"disabled,omitempty"` +} + +// HTTP defines configuration options for the HTTP interface of the registry. +type HTTP struct { + // Addr specifies the bind address for the registry instance. + Addr string `yaml:"addr,omitempty"` + + // Net specifies the net portion of the bind address. A default empty value means tcp. + Net string `yaml:"net,omitempty"` + + // Host specifies an externally-reachable address for the registry, as a fully + // qualified URL. + Host string `yaml:"host,omitempty"` + + // Prefix specifies a URL path prefix for the HTTP interface. + // This can be used to serve the registry under a specific path + // rather than at the root of the domain (e.g., "/registry"). + Prefix string `yaml:"prefix,omitempty"` + + // Secret specifies the secret key which HMAC tokens are created with. + Secret string `yaml:"secret,omitempty"` + + // RelativeURLs specifies that relative URLs should be returned in + // Location headers + RelativeURLs bool `yaml:"relativeurls,omitempty"` + + // Amount of time to wait for connection to drain before shutting down when registry + // receives a stop signal + DrainTimeout time.Duration `yaml:"draintimeout,omitempty"` + + // TLS instructs the http server to listen with a TLS configuration. + // This only support simple tls configuration with a cert and key. + // Mostly, this is useful for testing situations or simple deployments + // that require tls. If more complex configurations are required, use + // a proxy or make a proposal to add support here. + TLS TLS `yaml:"tls,omitempty"` + + // Headers is a set of headers to include in HTTP responses. A common + // use case for this would be security headers such as + // Strict-Transport-Security. The map keys are the header names, and + // the values are the associated header payloads. + Headers http.Header `yaml:"headers,omitempty"` + + // Debug configures the http debug interface, if specified. This can + // include services such as pprof, expvar and other data that should + // not be exposed externally. Left disabled by default. + Debug Debug `yaml:"debug,omitempty"` + + // HTTP2 configures options for HTTP/2 support. + HTTP2 HTTP2 `yaml:"http2,omitempty"` + + // H2C configures support for HTTP/2 without requiring TLS (HTTP/2 Cleartext). + H2C H2C `yaml:"h2c,omitempty"` +} + +// Debug defines the configuration options for the registry's debug interface. +// It allows administrators to enable or disable the debug server and configure +// telemetry and monitoring endpoints such as Prometheus. +type Debug struct { + // Addr specifies the bind address for the debug server. + Addr string `yaml:"addr,omitempty"` + + // Prometheus configures the Prometheus telemetry endpoint for monitoring purposes. + Prometheus Prometheus `yaml:"prometheus,omitempty"` +} + +// Prometheus configures the Prometheus telemetry endpoint for the registry. +// It allows administrators to enable Prometheus monitoring and customize +// the scrape path for metric collection. +type Prometheus struct { + // Enabled determines whether Prometheus telemetry is enabled or not. + Enabled bool `yaml:"enabled,omitempty"` + + // Path specifies the URL path where the Prometheus metrics are exposed. + // The default is "/metrics", but it can be customized here. + Path string `yaml:"path,omitempty"` +} + +// HTTP2 configures options. +type HTTP2 struct { + // Specifies whether the registry should disallow clients attempting + // to connect via HTTP/2. If set to true, only HTTP/1.1 is supported. + Disabled bool `yaml:"disabled,omitempty"` +} + +// H2C configures support for HTTP/2 Cleartext. +type H2C struct { + // Enables H2C (HTTP/2 Cleartext). Enable to support HTTP/2 without needing to configure TLS + // Useful when deploying the registry behind a load balancer (e.g. Cloud Run) + Enabled bool `yaml:"enabled,omitempty"` +} + +// TLS defines the configuration options for enabling and configuring TLS (Transport Layer Security) +// for secure communication between the registry and clients. It allows the registry to listen for +// HTTPS connections with a specified certificate, key, and optional client authentication settings. +type TLS struct { + // Certificate specifies the path to an x509 certificate file to + // be used for TLS. + Certificate string `yaml:"certificate,omitempty"` + + // Key specifies the path to the x509 key file, which should + // contain the private portion for the file specified in + // Certificate. + Key string `yaml:"key,omitempty"` + + // Specifies the CA certs for client authentication + // A file may contain multiple CA certificates encoded as PEM + ClientCAs []string `yaml:"clientcas,omitempty"` + + // Client certificate authentication mode + // One of: request-client-cert, require-any-client-cert, verify-client-cert-if-given, require-and-verify-client-cert + ClientAuth ClientAuth `yaml:"clientauth,omitempty"` + + // Specifies the lowest TLS version allowed + MinimumTLS string `yaml:"minimumtls,omitempty"` + + // Specifies a list of cipher suites allowed + CipherSuites []string `yaml:"ciphersuites,omitempty"` + + // LetsEncrypt is used to configuration setting up TLS through + // Let's Encrypt instead of manually specifying certificate and + // key. If a TLS certificate is specified, the Let's Encrypt + // section will not be used. + LetsEncrypt LetsEncrypt `yaml:"letsencrypt,omitempty"` +} + +// LetsEncrypt configures automatic TLS certificate provisioning using Let's Encrypt. +type LetsEncrypt struct { + // CacheFile specifies cache file to use for lets encrypt + // certificates and keys. + CacheFile string `yaml:"cachefile,omitempty"` + + // Email is the email to use during Let's Encrypt registration + Email string `yaml:"email,omitempty"` + + // Hosts specifies the hosts which are allowed to obtain Let's + // Encrypt certificates. + Hosts []string `yaml:"hosts,omitempty"` + + // DirectoryURL points to the CA directory endpoint. + // If empty, LetsEncrypt is used. + DirectoryURL string `yaml:"directoryurl,omitempty"` +} + // LogHook is composed of hook Level and Type. // After hooks configuration, it can execute the next handling automatically, // when defined levels of log message emitted. @@ -233,19 +285,8 @@ type LogHook struct { // MailOptions provides the configuration sections to user, for specific handler. type MailOptions struct { - SMTP struct { - // Addr defines smtp host address - Addr string `yaml:"addr,omitempty"` - - // Username defines user name to smtp host - Username string `yaml:"username,omitempty"` - - // Password defines password of login user - Password string `yaml:"password,omitempty"` - - // Insecure defines if smtp login skips the secure certification. - Insecure bool `yaml:"insecure,omitempty"` - } `yaml:"smtp,omitempty"` + // SMTP defines the configuration options for the SMTP server used for sending email notifications. + SMTP SMTP `yaml:"smtp,omitempty"` // From defines mail sending address From string `yaml:"from,omitempty"` @@ -254,12 +295,31 @@ type MailOptions struct { To []string `yaml:"to,omitempty"` } +// SMTP represents the configuration for an SMTP (Simple Mail Transfer Protocol) server +// used for sending emails. It includes settings for the SMTP server's address, authentication, +// and other relevant configurations needed to connect and send emails. +type SMTP struct { + // Addr defines smtp host address + Addr string `yaml:"addr,omitempty"` + + // Username defines user name to smtp host + Username string `yaml:"username,omitempty"` + + // Password defines password of login user + Password string `yaml:"password,omitempty"` + + // Insecure defines if smtp login skips the secure certification. + Insecure bool `yaml:"insecure,omitempty"` +} + // FileChecker is a type of entry in the health section for checking files. type FileChecker struct { // Interval is the duration in between checks Interval time.Duration `yaml:"interval,omitempty"` + // File is the path to check File string `yaml:"file,omitempty"` + // Threshold is the number of times a check must fail to trigger an // unhealthy state Threshold int `yaml:"threshold,omitempty"` @@ -269,14 +329,19 @@ type FileChecker struct { type HTTPChecker struct { // Timeout is the duration to wait before timing out the HTTP request Timeout time.Duration `yaml:"timeout,omitempty"` + // StatusCode is the expected status code StatusCode int + // Interval is the duration in between checks Interval time.Duration `yaml:"interval,omitempty"` + // URI is the HTTP URI to check URI string `yaml:"uri,omitempty"` + // Headers lists static headers that should be added to all requests Headers http.Header `yaml:"headers"` + // Threshold is the number of times a check must fail to trigger an // unhealthy state Threshold int `yaml:"threshold,omitempty"` @@ -286,10 +351,13 @@ type HTTPChecker struct { type TCPChecker struct { // Timeout is the duration to wait before timing out the TCP connection Timeout time.Duration `yaml:"timeout,omitempty"` + // Interval is the duration in between checks Interval time.Duration `yaml:"interval,omitempty"` + // Addr is the TCP address to check Addr string `yaml:"addr,omitempty"` + // Threshold is the number of times a check must fail to trigger an // unhealthy state Threshold int `yaml:"threshold,omitempty"` @@ -299,26 +367,40 @@ type TCPChecker struct { type Health struct { // FileCheckers is a list of paths to check FileCheckers []FileChecker `yaml:"file,omitempty"` + // HTTPCheckers is a list of URIs to check HTTPCheckers []HTTPChecker `yaml:"http,omitempty"` + // TCPCheckers is a list of URIs to check TCPCheckers []TCPChecker `yaml:"tcp,omitempty"` + // StorageDriver configures a health check on the configured storage // driver - StorageDriver struct { - // Enabled turns on the health check for the storage driver - Enabled bool `yaml:"enabled,omitempty"` - // Interval is the duration in between checks - Interval time.Duration `yaml:"interval,omitempty"` - // Threshold is the number of times a check must fail to trigger an - // unhealthy state - Threshold int `yaml:"threshold,omitempty"` - } `yaml:"storagedriver,omitempty"` + StorageDriver StorageDriver `yaml:"storagedriver,omitempty"` } +// StorageDriver configures health checks specific to the storage driver. +type StorageDriver struct { + // Enabled turns on the health check for the storage driver + Enabled bool `yaml:"enabled,omitempty"` + + // Interval is the duration in between checks + Interval time.Duration `yaml:"interval,omitempty"` + + // Threshold is the number of times a check must fail to trigger an + // unhealthy state + Threshold int `yaml:"threshold,omitempty"` +} + +// Platform specifies the characteristics of a computing environment +// and allows registry administrators to define required platforms for image validation. +// Administrators can select specific architectures and operating systems that must exist +// in the registry. This ensures that all image indexes uploaded to the registry are valid +// for the specified platforms. type Platform struct { // Architecture is the architecture for this platform Architecture string `yaml:"architecture,omitempty"` + // OS is the operating system for this platform OS string `yaml:"os,omitempty"` } @@ -614,6 +696,11 @@ type Proxy struct { TTL *time.Duration `yaml:"ttl,omitempty"` } +// ExecConfig defines the configuration for executing a command as a credential helper. +// This allows the registry to authenticate against an upstream registry by executing a +// specified command to obtain credentials. The command can be re-executed based on the +// configured lifetime, enabling the registry to run as a pull-through cache that manages +// its authentication dynamically. type ExecConfig struct { // Command is the command to execute. Command string `yaml:"command"` @@ -626,33 +713,44 @@ type ExecConfig struct { Lifetime *time.Duration `yaml:"lifetime,omitempty"` } +// Validation configures validation options for the registry. type Validation struct { // Enabled enables the other options in this section. This field is // deprecated in favor of Disabled. Enabled bool `yaml:"enabled,omitempty"` + // Disabled disables the other options in this section. Disabled bool `yaml:"disabled,omitempty"` + // Manifests configures manifest validation. Manifests ValidationManifests `yaml:"manifests,omitempty"` } +// ValidationManifests configures validation rules for manifests pushed to the registry. type ValidationManifests struct { // URLs configures validation for URLs in pushed manifests. - URLs struct { - // Allow specifies regular expressions (https://godoc.org/regexp/syntax) - // that URLs in pushed manifests must match. - Allow []string `yaml:"allow,omitempty"` - // Deny specifies regular expressions (https://godoc.org/regexp/syntax) - // that URLs in pushed manifests must not match. - Deny []string `yaml:"deny,omitempty"` - } `yaml:"urls,omitempty"` + URLs URLs `yaml:"urls,omitempty"` + // ImageIndexes configures validation of image indexes Indexes ValidationIndexes `yaml:"indexes,omitempty"` } +// URLs defines validation rules for URLs found in the manifests pushed to the registry. +type URLs struct { + // Allow specifies regular expressions (https://godoc.org/regexp/syntax) + // that URLs in pushed manifests must match. + Allow []string `yaml:"allow,omitempty"` + + // Deny specifies regular expressions (https://godoc.org/regexp/syntax) + // that URLs in pushed manifests must not match. + Deny []string `yaml:"deny,omitempty"` +} + +// ValidationIndexes configures validation rules for image indexes within the manifest. type ValidationIndexes struct { // Platforms configures the validation applies to the platform images included in an image index Platforms Platforms `yaml:"platforms"` + // PlatformList filters the set of platforms to validate for image existence. PlatformList []Platform `yaml:"platformlist,omitempty"` } @@ -735,17 +833,38 @@ func Parse(rd io.Reader) (*Configuration, error) { return config, nil } +// RedisOptions represents the configuration options for Redis, which are +// provided by the redis package. This struct can be used to configure the +// connection to Redis in a universal (clustered or standalone) setup. type RedisOptions = redis.UniversalOptions +// RedisTLSOptions configures the TLS (Transport Layer Security) settings for +// Redis connections, allowing secure communication over the network. type RedisTLSOptions struct { - Certificate string `yaml:"certificate,omitempty"` - Key string `yaml:"key,omitempty"` - ClientCAs []string `yaml:"clientcas,omitempty"` + // Certificate specifies the path to the certificate file for TLS authentication. + // This certificate is used to establish a secure connection with the Redis server. + Certificate string `yaml:"certificate,omitempty"` + + // Key specifies the path to the private key file associated with the certificate. + // This key is used to authenticate the client during the TLS handshake. + Key string `yaml:"key,omitempty"` + + // ClientCAs specifies a list of certificates to be used to verify the server's + // certificate during the TLS handshake. This can be used for mutual TLS authentication. + ClientCAs []string `yaml:"clientcas,omitempty"` } +// Redis represents the configuration for connecting to a Redis server. It includes +// both the basic connection options and optional TLS settings to secure the connection. type Redis struct { - Options RedisOptions `yaml:",inline"` - TLS RedisTLSOptions `yaml:"tls,omitempty"` + // Options provides the configuration for connecting to Redis, including + // options for both clustered and standalone Redis setups. It is provided inline + // from the `redis.UniversalOptions` struct. + Options RedisOptions `yaml:",inline"` + + // TLS contains the TLS settings for secure communication with the Redis server. + // If specified, these settings will enable encryption and authentication via TLS. + TLS RedisTLSOptions `yaml:"tls,omitempty"` } func (c Redis) MarshalYAML() (interface{}, error) { diff --git a/vendor/github.com/distribution/distribution/v3/internal/client/transport/http_reader.go b/vendor/github.com/distribution/distribution/v3/internal/client/transport/http_reader.go index e0833e9136..46577303e3 100644 --- a/vendor/github.com/distribution/distribution/v3/internal/client/transport/http_reader.go +++ b/vendor/github.com/distribution/distribution/v3/internal/client/transport/http_reader.go @@ -165,7 +165,7 @@ func (hrs *HTTPReadSeeker) reset() { } } -func (hrs *HTTPReadSeeker) reader() (io.Reader, error) { +func (hrs *HTTPReadSeeker) reader() (_ io.Reader, retErr error) { if hrs.err != nil { return nil, hrs.err } @@ -191,6 +191,11 @@ func (hrs *HTTPReadSeeker) reader() (io.Reader, error) { if err != nil { return nil, err } + defer func() { + if retErr != nil { + _ = resp.Body.Close() + } + }() // Normally would use client.SuccessStatus, but that would be a cyclic // import @@ -276,8 +281,11 @@ func (hrs *HTTPReadSeeker) reader() (io.Reader, error) { hrs.rc = body } else { - defer resp.Body.Close() if hrs.errorHandler != nil { + // Closing the body should be handled by the existing defer, + // but in case a custom "errHandler" is used that doesn't return + // an error, we close the body regardless. + defer resp.Body.Close() return nil, hrs.errorHandler(resp) } return nil, fmt.Errorf("unexpected status resolving reader: %v", resp.Status) diff --git a/vendor/github.com/distribution/distribution/v3/registry/root.go b/vendor/github.com/distribution/distribution/v3/registry/root.go index 15f95f3d26..a4dc97b8e2 100644 --- a/vendor/github.com/distribution/distribution/v3/registry/root.go +++ b/vendor/github.com/distribution/distribution/v3/registry/root.go @@ -18,6 +18,7 @@ func init() { RootCmd.AddCommand(GCCmd) GCCmd.Flags().BoolVarP(&dryRun, "dry-run", "d", false, "do everything except remove the blobs") GCCmd.Flags().BoolVarP(&removeUntagged, "delete-untagged", "m", false, "delete manifests that are not currently referenced via tag") + GCCmd.Flags().BoolVarP(&quiet, "quiet", "q", false, "silence output") RootCmd.Flags().BoolVarP(&showVersion, "version", "v", false, "show the version and exit") } @@ -39,6 +40,7 @@ var RootCmd = &cobra.Command{ var ( dryRun bool removeUntagged bool + quiet bool ) // GCCmd is the cobra command that corresponds to the garbage-collect subcommand @@ -77,6 +79,7 @@ var GCCmd = &cobra.Command{ err = storage.MarkAndSweep(ctx, driver, registry, storage.GCOpts{ DryRun: dryRun, RemoveUntagged: removeUntagged, + Quiet: quiet, }) if err != nil { fmt.Fprintf(os.Stderr, "failed to garbage collect: %v", err) diff --git a/vendor/github.com/distribution/distribution/v3/registry/storage/garbagecollect.go b/vendor/github.com/distribution/distribution/v3/registry/storage/garbagecollect.go index d909f93adc..8b4ae73c71 100644 --- a/vendor/github.com/distribution/distribution/v3/registry/storage/garbagecollect.go +++ b/vendor/github.com/distribution/distribution/v3/registry/storage/garbagecollect.go @@ -20,6 +20,7 @@ func emit(format string, a ...interface{}) { type GCOpts struct { DryRun bool RemoveUntagged bool + Quiet bool } // ManifestDel contains manifest structure which will be deleted @@ -41,7 +42,9 @@ func MarkAndSweep(ctx context.Context, storageDriver driver.StorageDriver, regis deleteLayerSet := make(map[string][]digest.Digest) manifestArr := make([]ManifestDel, 0) err := repositoryEnumerator.Enumerate(ctx, func(repoName string) error { - emit(repoName) + if !opts.Quiet { + emit(repoName) + } var err error named, err := reference.WithName(repoName) @@ -77,7 +80,9 @@ func MarkAndSweep(ctx context.Context, storageDriver driver.StorageDriver, regis allTags, err := repository.Tags(ctx).All(ctx) if err != nil { if _, ok := err.(distribution.ErrRepositoryUnknown); ok { - emit("manifest tags path of repository %s does not exist", repoName) + if !opts.Quiet { + emit("manifest tags path of repository %s does not exist", repoName) + } return nil } return fmt.Errorf("failed to retrieve tags %v", err) @@ -87,14 +92,18 @@ func MarkAndSweep(ctx context.Context, storageDriver driver.StorageDriver, regis } } // Mark the manifest's blob - emit("%s: marking manifest %s ", repoName, dgst) + if !opts.Quiet { + emit("%s: marking manifest %s ", repoName, dgst) + } markSet[dgst] = struct{}{} return markManifestReferences(dgst, manifestService, ctx, func(d digest.Digest) bool { _, marked := markSet[d] if !marked { markSet[d] = struct{}{} - emit("%s: marking blob %s", repoName, d) + if !opts.Quiet { + emit("%s: marking blob %s", repoName, d) + } } return marked }) @@ -132,7 +141,7 @@ func MarkAndSweep(ctx context.Context, storageDriver driver.StorageDriver, regis return fmt.Errorf("failed to mark: %v", err) } - manifestArr = unmarkReferencedManifest(manifestArr, markSet) + manifestArr = unmarkReferencedManifest(manifestArr, markSet, opts.Quiet) // sweep vacuum := NewVacuum(ctx, storageDriver) @@ -156,9 +165,13 @@ func MarkAndSweep(ctx context.Context, storageDriver driver.StorageDriver, regis if err != nil { return fmt.Errorf("error enumerating blobs: %v", err) } - emit("\n%d blobs marked, %d blobs and %d manifests eligible for deletion", len(markSet), len(deleteSet), len(manifestArr)) + if !opts.Quiet { + emit("\n%d blobs marked, %d blobs and %d manifests eligible for deletion", len(markSet), len(deleteSet), len(manifestArr)) + } for dgst := range deleteSet { - emit("blob eligible for deletion: %s", dgst) + if !opts.Quiet { + emit("blob eligible for deletion: %s", dgst) + } if opts.DryRun { continue } @@ -170,7 +183,9 @@ func MarkAndSweep(ctx context.Context, storageDriver driver.StorageDriver, regis for repo, dgsts := range deleteLayerSet { for _, dgst := range dgsts { - emit("%s: layer link eligible for deletion: %s", repo, dgst) + if !opts.Quiet { + emit("%s: layer link eligible for deletion: %s", repo, dgst) + } if opts.DryRun { continue } @@ -185,11 +200,14 @@ func MarkAndSweep(ctx context.Context, storageDriver driver.StorageDriver, regis } // unmarkReferencedManifest filters out manifest present in markSet -func unmarkReferencedManifest(manifestArr []ManifestDel, markSet map[digest.Digest]struct{}) []ManifestDel { +func unmarkReferencedManifest(manifestArr []ManifestDel, markSet map[digest.Digest]struct{}, quietOutput bool) []ManifestDel { filtered := make([]ManifestDel, 0) for _, obj := range manifestArr { if _, ok := markSet[obj.Digest]; !ok { - emit("manifest eligible for deletion: %s", obj) + if !quietOutput { + emit("manifest eligible for deletion: %s", obj) + } + filtered = append(filtered, obj) } } diff --git a/vendor/github.com/distribution/distribution/v3/registry/storage/registry.go b/vendor/github.com/distribution/distribution/v3/registry/storage/registry.go index ab8fb1f9ae..5b4d6c4249 100644 --- a/vendor/github.com/distribution/distribution/v3/registry/storage/registry.go +++ b/vendor/github.com/distribution/distribution/v3/registry/storage/registry.go @@ -261,6 +261,10 @@ func (repo *repository) Manifests(ctx context.Context, options ...distribution.M linkPath: manifestRevisionLinkPath, } + if repo.descriptorCache != nil { + statter = cache.NewCachedBlobStatter(repo.descriptorCache, statter) + } + if repo.registry.blobDescriptorServiceFactory != nil { statter = repo.registry.blobDescriptorServiceFactory.BlobAccessController(statter) } diff --git a/vendor/github.com/distribution/distribution/v3/version/version.go b/vendor/github.com/distribution/distribution/v3/version/version.go index 92ba593a24..63bbb2a0ea 100644 --- a/vendor/github.com/distribution/distribution/v3/version/version.go +++ b/vendor/github.com/distribution/distribution/v3/version/version.go @@ -8,7 +8,7 @@ var mainpkg = "github.com/distribution/distribution/v3" // the latest release tag by hand, always suffixed by "+unknown". During // build, it will be replaced by the actual version. The value here will be // used if the registry is run after a go get based install. -var version = "v3.0.0-rc.3+unknown" +var version = "v3.0.0+unknown" // revision is filled with the VCS (e.g. git) revision being used to build // the program at linking time. diff --git a/vendor/github.com/docker/cli/AUTHORS b/vendor/github.com/docker/cli/AUTHORS index ad1abd4964..c5a480b5e5 100644 --- a/vendor/github.com/docker/cli/AUTHORS +++ b/vendor/github.com/docker/cli/AUTHORS @@ -48,6 +48,7 @@ Alfred Landrum Ali Rostami Alicia Lauerman Allen Sun +Allie Sadler Alvin Deng Amen Belayneh Amey Shrivastava <72866602+AmeyShrivastava@users.noreply.github.com> @@ -81,6 +82,7 @@ Antonis Kalipetis Anusha Ragunathan Ao Li Arash Deshmeh +Archimedes Trajano Arko Dasgupta Arnaud Porterie Arnaud Rebillout @@ -88,6 +90,7 @@ Arthur Peka Ashly Mathew Ashwini Oruganti Aslam Ahemad +Austin Vazquez Azat Khuyiyakhmetov Bardia Keyoumarsi Barnaby Gray @@ -132,6 +135,7 @@ Cao Weiwei Carlo Mion Carlos Alexandro Becker Carlos de Paula +Carston Schilds Casey Korver Ce Gao Cedric Davies @@ -189,6 +193,7 @@ Daisuke Ito dalanlan Damien Nadé Dan Cotora +Dan Wallis Danial Gharib Daniel Artine Daniel Cassidy @@ -237,6 +242,7 @@ Deshi Xiao Dharmit Shah Dhawal Yogesh Bhanushali Dieter Reuter +Dilep Dev <34891655+DilepDev@users.noreply.github.com> Dima Stopel Dimitry Andric Ding Fei @@ -308,6 +314,8 @@ George MacRorie George Margaritis George Xie Gianluca Borello +Giau. Tran Minh +Giedrius Jonikas Gildas Cuisinier Gio d'Amelio Gleb Stsenov @@ -344,6 +352,7 @@ Hugo Gabriel Eyherabide huqun Huu Nguyen Hyzhou Zhy +Iain MacDonald Iain Samuel McLean Elder Ian Campbell Ian Philpot @@ -393,6 +402,7 @@ Jesse Adametz Jessica Frazelle Jezeniel Zapanta Jian Zhang +Jianyong Wu Jie Luo Jilles Oldenbeuving Jim Chen @@ -446,6 +456,7 @@ Julian Julien Barbier Julien Kassar Julien Maitrehenry +Julio Cesar Garcia Justas Brazauskas Justin Chadwell Justin Cormack @@ -490,19 +501,22 @@ Kunal Kushwaha Kyle Mitofsky Lachlan Cooper Lai Jiangshan +Lajos Papp Lars Kellogg-Stedman Laura Brehm Laura Frank Laurent Erignoux +Laurent Goderre Lee Gaines Lei Jitang Lennie +lentil32 Leo Gallucci Leonid Skorospelov Lewis Daly Li Fu Bang Li Yi -Li Yi +Li Zeghong Liang-Chi Hsieh Lihua Tang Lily Guo @@ -515,6 +529,7 @@ lixiaobing10051267 Lloyd Dewolf Lorenzo Fontana Louis Opter +Lovekesh Kumar Luca Favatella Luca Marturana Lucas Chan @@ -559,6 +574,7 @@ Matt Robenolt Matteo Orefice Matthew Heon Matthieu Hauglustaine +Matthieu MOREL Mauro Porras P Max Shytikov Max-Julian Pogner @@ -566,6 +582,7 @@ Maxime Petazzoni Maximillian Fan Xavier Mei ChunTao Melroy van den Berg +Mert Şişmanoğlu Metal <2466052+tedhexaflow@users.noreply.github.com> Micah Zoltu Michael A. Smith @@ -598,7 +615,9 @@ Mindaugas Rukas Miroslav Gula Misty Stanley-Jones Mohammad Banikazemi +Mohammad Hossein Mohammed Aaqib Ansari +Mohammed Aminu Futa Mohini Anne Dsouza Moorthy RS Morgan Bauer @@ -633,9 +652,11 @@ Nicolas De Loof Nikhil Chawla Nikolas Garofil Nikolay Milovanov +NinaLua Nir Soffer Nishant Totla NIWA Hideyuki +Noah Silas Noah Treuhaft O.S. Tezer Oded Arbel @@ -653,10 +674,12 @@ Patrick Böänziger Patrick Daigle <114765035+pdaig@users.noreply.github.com> Patrick Hemmer Patrick Lang +Patrick St. laurent Paul Paul Kehrer Paul Lietar Paul Mulders +Paul Rogalski Paul Seyfert Paul Weaver Pavel Pospisil @@ -678,7 +701,6 @@ Philip Alexander Etling Philipp Gillé Philipp Schmied Phong Tran -pidster Pieter E Smit pixelistik Pratik Karki @@ -738,6 +760,7 @@ Samuel Cochran Samuel Karp Sandro Jäckel Santhosh Manohar +Sarah Sanders Sargun Dhillon Saswat Bhattacharya Saurabh Kumar @@ -770,6 +793,7 @@ Spencer Brown Spring Lee squeegels Srini Brahmaroutu +Stavros Panakakis Stefan S. Stefan Scherer Stefan Weil @@ -780,6 +804,7 @@ Steve Durrheimer Steve Richards Steven Burgess Stoica-Marcu Floris-Andrei +Stuart Williams Subhajit Ghosh Sun Jianbo Sune Keller @@ -867,6 +892,7 @@ Wang Yumu <37442693@qq.com> Wataru Ishida Wayne Song Wen Cheng Ma +Wenlong Zhang Wenzhi Liang Wes Morgan Wewang Xiaorenfine @@ -908,3 +934,4 @@ Zhuo Zhi Átila Camurça Alves Александр Менщиков <__Singleton__@hackerdom.ru> 徐俊杰 +林博仁 Buo-ren Lin diff --git a/vendor/github.com/docker/cli/cli/config/configfile/file.go b/vendor/github.com/docker/cli/cli/config/configfile/file.go index b9f57b72a0..530c522856 100644 --- a/vendor/github.com/docker/cli/cli/config/configfile/file.go +++ b/vendor/github.com/docker/cli/cli/config/configfile/file.go @@ -3,12 +3,14 @@ package configfile import ( "encoding/base64" "encoding/json" + "fmt" "io" "os" "path/filepath" "strings" "github.com/docker/cli/cli/config/credentials" + "github.com/docker/cli/cli/config/memorystore" "github.com/docker/cli/cli/config/types" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -46,6 +48,31 @@ type ConfigFile struct { Experimental string `json:"experimental,omitempty"` } +type configEnvAuth struct { + Auth string `json:"auth"` +} + +type configEnv struct { + AuthConfigs map[string]configEnvAuth `json:"auths"` +} + +// DockerEnvConfigKey is an environment variable that contains a JSON encoded +// credential config. It only supports storing the credentials as a base64 +// encoded string in the format base64("username:pat"). +// +// Adding additional fields will produce a parsing error. +// +// Example: +// +// { +// "auths": { +// "example.test": { +// "auth": base64-encoded-username-pat +// } +// } +// } +const DockerEnvConfigKey = "DOCKER_AUTH_CONFIG" + // ProxyConfig contains proxy configuration settings type ProxyConfig struct { HTTPProxy string `json:"httpProxy,omitempty"` @@ -152,7 +179,8 @@ func (configFile *ConfigFile) Save() (retErr error) { return err } defer func() { - temp.Close() + // ignore error as the file may already be closed when we reach this. + _ = temp.Close() if retErr != nil { if err := os.Remove(temp.Name()); err != nil { logrus.WithError(err).WithField("file", temp.Name()).Debug("Error cleaning up temp file") @@ -169,10 +197,16 @@ func (configFile *ConfigFile) Save() (retErr error) { return errors.Wrap(err, "error closing temp file") } - // Handle situation where the configfile is a symlink + // Handle situation where the configfile is a symlink, and allow for dangling symlinks cfgFile := configFile.Filename - if f, err := os.Readlink(cfgFile); err == nil { + if f, err := filepath.EvalSymlinks(cfgFile); err == nil { cfgFile = f + } else if os.IsNotExist(err) { + // extract the path from the error if the configfile does not exist or is a dangling symlink + var pathError *os.PathError + if errors.As(err, &pathError) { + cfgFile = pathError.Path + } } // Try copying the current config file (if any) ownership and permissions @@ -256,10 +290,64 @@ func decodeAuth(authStr string) (string, string, error) { // GetCredentialsStore returns a new credentials store from the settings in the // configuration file func (configFile *ConfigFile) GetCredentialsStore(registryHostname string) credentials.Store { + store := credentials.NewFileStore(configFile) + if helper := getConfiguredCredentialStore(configFile, registryHostname); helper != "" { - return newNativeStore(configFile, helper) + store = newNativeStore(configFile, helper) + } + + envConfig := os.Getenv(DockerEnvConfigKey) + if envConfig == "" { + return store + } + + authConfig, err := parseEnvConfig(envConfig) + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, "Failed to create credential store from DOCKER_AUTH_CONFIG: ", err) + return store + } + + // use DOCKER_AUTH_CONFIG if set + // it uses the native or file store as a fallback to fetch and store credentials + envStore, err := memorystore.New( + memorystore.WithAuthConfig(authConfig), + memorystore.WithFallbackStore(store), + ) + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, "Failed to create credential store from DOCKER_AUTH_CONFIG: ", err) + return store + } + + return envStore +} + +func parseEnvConfig(v string) (map[string]types.AuthConfig, error) { + envConfig := &configEnv{} + decoder := json.NewDecoder(strings.NewReader(v)) + decoder.DisallowUnknownFields() + if err := decoder.Decode(envConfig); err != nil && !errors.Is(err, io.EOF) { + return nil, err + } + if decoder.More() { + return nil, errors.New("DOCKER_AUTH_CONFIG does not support more than one JSON object") + } + + authConfigs := make(map[string]types.AuthConfig) + for addr, envAuth := range envConfig.AuthConfigs { + if envAuth.Auth == "" { + return nil, fmt.Errorf("DOCKER_AUTH_CONFIG environment variable is missing key `auth` for %s", addr) + } + username, password, err := decodeAuth(envAuth.Auth) + if err != nil { + return nil, err + } + authConfigs[addr] = types.AuthConfig{ + Username: username, + Password: password, + ServerAddress: addr, + } } - return credentials.NewFileStore(configFile) + return authConfigs, nil } // var for unit testing. diff --git a/vendor/github.com/docker/cli/cli/config/memorystore/store.go b/vendor/github.com/docker/cli/cli/config/memorystore/store.go new file mode 100644 index 0000000000..199083464e --- /dev/null +++ b/vendor/github.com/docker/cli/cli/config/memorystore/store.go @@ -0,0 +1,126 @@ +//go:build go1.23 + +package memorystore + +import ( + "errors" + "fmt" + "maps" + "os" + "sync" + + "github.com/docker/cli/cli/config/credentials" + "github.com/docker/cli/cli/config/types" +) + +var errValueNotFound = errors.New("value not found") + +func IsErrValueNotFound(err error) bool { + return errors.Is(err, errValueNotFound) +} + +type Config struct { + lock sync.RWMutex + memoryCredentials map[string]types.AuthConfig + fallbackStore credentials.Store +} + +func (e *Config) Erase(serverAddress string) error { + e.lock.Lock() + defer e.lock.Unlock() + delete(e.memoryCredentials, serverAddress) + + if e.fallbackStore != nil { + err := e.fallbackStore.Erase(serverAddress) + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, "memorystore: ", err) + } + } + + return nil +} + +func (e *Config) Get(serverAddress string) (types.AuthConfig, error) { + e.lock.RLock() + defer e.lock.RUnlock() + authConfig, ok := e.memoryCredentials[serverAddress] + if !ok { + if e.fallbackStore != nil { + return e.fallbackStore.Get(serverAddress) + } + return types.AuthConfig{}, errValueNotFound + } + return authConfig, nil +} + +func (e *Config) GetAll() (map[string]types.AuthConfig, error) { + e.lock.RLock() + defer e.lock.RUnlock() + creds := make(map[string]types.AuthConfig) + + if e.fallbackStore != nil { + fileCredentials, err := e.fallbackStore.GetAll() + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, "memorystore: ", err) + } else { + creds = fileCredentials + } + } + + maps.Copy(creds, e.memoryCredentials) + return creds, nil +} + +func (e *Config) Store(authConfig types.AuthConfig) error { + e.lock.Lock() + defer e.lock.Unlock() + e.memoryCredentials[authConfig.ServerAddress] = authConfig + + if e.fallbackStore != nil { + return e.fallbackStore.Store(authConfig) + } + return nil +} + +// WithFallbackStore sets a fallback store. +// +// Write operations will be performed on both the memory store and the +// fallback store. +// +// Read operations will first check the memory store, and if the credential +// is not found, it will then check the fallback store. +// +// Retrieving all credentials will return from both the memory store and the +// fallback store, merging the results from both stores into a single map. +// +// Data stored in the memory store will take precedence over data in the +// fallback store. +func WithFallbackStore(store credentials.Store) Options { + return func(s *Config) error { + s.fallbackStore = store + return nil + } +} + +// WithAuthConfig allows to set the initial credentials in the memory store. +func WithAuthConfig(config map[string]types.AuthConfig) Options { + return func(s *Config) error { + s.memoryCredentials = config + return nil + } +} + +type Options func(*Config) error + +// New creates a new in memory credential store +func New(opts ...Options) (credentials.Store, error) { + m := &Config{ + memoryCredentials: make(map[string]types.AuthConfig), + } + for _, opt := range opts { + if err := opt(m); err != nil { + return nil, err + } + } + return m, nil +} diff --git a/vendor/github.com/docker/cli/cli/config/types/authconfig.go b/vendor/github.com/docker/cli/cli/config/types/authconfig.go index 056af6b842..95eb27c868 100644 --- a/vendor/github.com/docker/cli/cli/config/types/authconfig.go +++ b/vendor/github.com/docker/cli/cli/config/types/authconfig.go @@ -7,8 +7,8 @@ type AuthConfig struct { Auth string `json:"auth,omitempty"` // Email is an optional value associated with the username. - // This field is deprecated and will be removed in a later - // version of docker. + // + // Deprecated: This field is deprecated since docker 1.11 (API v1.23) and will be removed in the next release. Email string `json:"email,omitempty"` ServerAddress string `json:"serveraddress,omitempty"` diff --git a/vendor/github.com/docker/distribution/.dockerignore b/vendor/github.com/docker/distribution/.dockerignore deleted file mode 100644 index e660fd93d3..0000000000 --- a/vendor/github.com/docker/distribution/.dockerignore +++ /dev/null @@ -1 +0,0 @@ -bin/ diff --git a/vendor/github.com/docker/distribution/.gitignore b/vendor/github.com/docker/distribution/.gitignore deleted file mode 100644 index 4cf7888e92..0000000000 --- a/vendor/github.com/docker/distribution/.gitignore +++ /dev/null @@ -1,38 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof - -# never checkin from the bin file (for now) -bin/* - -# Test key files -*.pem - -# Cover profiles -*.out - -# Editor/IDE specific files. -*.sublime-project -*.sublime-workspace -.idea/* diff --git a/vendor/github.com/docker/distribution/.golangci.yml b/vendor/github.com/docker/distribution/.golangci.yml deleted file mode 100644 index 61dd0e00eb..0000000000 --- a/vendor/github.com/docker/distribution/.golangci.yml +++ /dev/null @@ -1,33 +0,0 @@ -linters: - enable: - - staticcheck - - unconvert - - gofmt - - goimports - - golint - - ineffassign - - vet - - unused - - misspell - disable: - - errcheck - -linters-settings: - revive: - rules: - # TODO(thaJeztah): temporarily disabled the "unused-parameter" check. - # It produces many warnings, and some of those may need to be looked at. - - name: unused-parameter - disabled: true - -run: - deadline: 2m - skip-dirs: - - vendor - -issues: - exclude-rules: - # io/ioutil is deprecated, but won't be removed until Go v2. It's safe to ignore for the release/2.8 branch. - - text: "SA1019: \"io/ioutil\" has been deprecated since Go 1.16" - linters: - - staticcheck diff --git a/vendor/github.com/docker/distribution/.mailmap b/vendor/github.com/docker/distribution/.mailmap deleted file mode 100644 index d7b832d9ea..0000000000 --- a/vendor/github.com/docker/distribution/.mailmap +++ /dev/null @@ -1,54 +0,0 @@ -Stephen J Day Stephen Day -Stephen J Day Stephen Day -Olivier Gambier Olivier Gambier -Brian Bland Brian Bland -Brian Bland Brian Bland -Josh Hawn Josh Hawn -Richard Scothern Richard -Richard Scothern Richard Scothern -Andrew Meredith Andrew Meredith -harche harche -Jessie Frazelle -Sharif Nassar Sharif Nassar -Sven Dowideit Sven Dowideit -Vincent Giersch Vincent Giersch -davidli davidli -Omer Cohen Omer Cohen -Eric Yang Eric Yang -Nikita Tarasov Nikita -Yu Wang yuwaMSFT2 -Yu Wang Yu Wang (UC) -Olivier Gambier dmp -Olivier Gambier Olivier -Olivier Gambier Olivier -Elsan Li 李楠 elsanli(李楠) -Rui Cao ruicao -Gwendolynne Barr gbarr01 -Haibing Zhou 周海兵 zhouhaibing089 -Feng Honglin tifayuki -Helen Xie Helen-xie -Mike Brown Mike Brown -Manish Tomar Manish Tomar -Sakeven Jiang sakeven -Milos Gajdos Milos Gajdos -Derek McGowan Derek McGowa -Adrian Plata Adrian Plata <@users.noreply.github.com> -Sebastiaan van Stijn Sebastiaan van Stijn -Vishesh Jindal Vishesh Jindal -Wang Yan Wang Yan -Chris Patterson Chris Patterson -Eohyung Lee Eohyung Lee -João Pereira <484633+joaodrp@users.noreply.github.com> -Smasherr Smasherr -Thomas Berger Thomas Berger -Samuel Karp Samuel Karp -Justin Cormack -sayboras -CrazyMax <1951866+crazy-max@users.noreply.github.com> -Hayley Swimelar -Jose D. Gomez R -Shengjing Zhu -Silvin Lubecki <31478878+silvin-lubecki@users.noreply.github.com> -James Hewitt -Marcus Pettersen Irgens -Ben Manuel diff --git a/vendor/github.com/docker/distribution/BUILDING.md b/vendor/github.com/docker/distribution/BUILDING.md deleted file mode 100644 index 4c43b03cb7..0000000000 --- a/vendor/github.com/docker/distribution/BUILDING.md +++ /dev/null @@ -1,117 +0,0 @@ - -# Building the registry source - -## Use-case - -This is useful if you intend to actively work on the registry. - -### Alternatives - -Most people should use the [official Registry docker image](https://hub.docker.com/r/library/registry/). - -People looking for advanced operational use cases might consider rolling their own image with a custom Dockerfile inheriting `FROM registry:2`. - -OS X users who want to run natively can do so following [the instructions here](https://github.com/docker/docker.github.io/blob/master/registry/recipes/osx-setup-guide.md). - -### Gotchas - -You are expected to know your way around with go & git. - -If you are a casual user with no development experience, and no preliminary knowledge of go, building from source is probably not a good solution for you. - -## Build the development environment - -The first prerequisite of properly building distribution targets is to have a Go -development environment setup. Please follow [How to Write Go Code](https://golang.org/doc/code.html) -for proper setup. If done correctly, you should have a GOROOT and GOPATH set in the -environment. - -If a Go development environment is setup, one can use `go get` to install the -`registry` command from the current latest: - - go get github.com/docker/distribution/cmd/registry - -The above will install the source repository into the `GOPATH`. - -Now create the directory for the registry data (this might require you to set permissions properly) - - mkdir -p /var/lib/registry - -... or alternatively `export REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY=/somewhere` if you want to store data into another location. - -The `registry` -binary can then be run with the following: - - $ $GOPATH/bin/registry --version - $GOPATH/bin/registry github.com/docker/distribution v2.0.0-alpha.1+unknown - -> __NOTE:__ While you do not need to use `go get` to checkout the distribution -> project, for these build instructions to work, the project must be checked -> out in the correct location in the `GOPATH`. This should almost always be -> `$GOPATH/src/github.com/docker/distribution`. - -The registry can be run with the default config using the following -incantation: - - $ $GOPATH/bin/registry serve $GOPATH/src/github.com/docker/distribution/cmd/registry/config-example.yml - INFO[0000] endpoint local-5003 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown - INFO[0000] endpoint local-8083 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown - INFO[0000] listening on :5000 app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown - INFO[0000] debug server listening localhost:5001 - -If it is working, one should see the above log messages. - -### Repeatable Builds - -For the full development experience, one should `cd` into -`$GOPATH/src/github.com/docker/distribution`. From there, the regular `go` -commands, such as `go test`, should work per package (please see -[Developing](#developing) if they don't work). - -A `Makefile` has been provided as a convenience to support repeatable builds. -Please install the following into `GOPATH` for it to work: - - go get github.com/golang/lint/golint - -Once these commands are available in the `GOPATH`, run `make` to get a full -build: - - $ make - + clean - + fmt - + vet - + lint - + build - github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar - github.com/sirupsen/logrus - github.com/docker/libtrust - ... - github.com/yvasiyarov/gorelic - github.com/docker/distribution/registry/handlers - github.com/docker/distribution/cmd/registry - + test - ... - ok github.com/docker/distribution/digest 7.875s - ok github.com/docker/distribution/manifest 0.028s - ok github.com/docker/distribution/notifications 17.322s - ? github.com/docker/distribution/registry [no test files] - ok github.com/docker/distribution/registry/api/v2 0.101s - ? github.com/docker/distribution/registry/auth [no test files] - ok github.com/docker/distribution/registry/auth/silly 0.011s - ... - + /Users/sday/go/src/github.com/docker/distribution/bin/registry - + /Users/sday/go/src/github.com/docker/distribution/bin/registry-api-descriptor-template - + binaries - -The above provides a repeatable build using the contents of the vendor -directory. This includes formatting, vetting, linting, building, -testing and generating tagged binaries. We can verify this worked by running -the registry binary generated in the "./bin" directory: - - $ ./bin/registry --version - ./bin/registry github.com/docker/distribution v2.0.0-alpha.2-80-g16d8b2c.m - -### Optional build tags - -Optional [build tags](http://golang.org/pkg/go/build/) can be provided using -the environment variable `BUILDTAGS`. diff --git a/vendor/github.com/docker/distribution/CONTRIBUTING.md b/vendor/github.com/docker/distribution/CONTRIBUTING.md deleted file mode 100644 index 4c067d9e7e..0000000000 --- a/vendor/github.com/docker/distribution/CONTRIBUTING.md +++ /dev/null @@ -1,148 +0,0 @@ -# Contributing to the registry - -## Before reporting an issue... - -### If your problem is with... - - - automated builds - - your account on the [Docker Hub](https://hub.docker.com/) - - any other [Docker Hub](https://hub.docker.com/) issue - -Then please do not report your issue here - you should instead report it to [https://support.docker.com](https://support.docker.com) - -### If you... - - - need help setting up your registry - - can't figure out something - - are not sure what's going on or what your problem is - -Then please do not open an issue here yet - you should first try one of the following support forums: - - - irc: #docker-distribution on freenode - - mailing-list: or https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution - -### Reporting security issues - -The Docker maintainers take security seriously. If you discover a security -issue, please bring it to their attention right away! - -Please **DO NOT** file a public issue, instead send your report privately to -[security@docker.com](mailto:security@docker.com). - -## Reporting an issue properly - -By following these simple rules you will get better and faster feedback on your issue. - - - search the bugtracker for an already reported issue - -### If you found an issue that describes your problem: - - - please read other user comments first, and confirm this is the same issue: a given error condition might be indicative of different problems - you may also find a workaround in the comments - - please refrain from adding "same thing here" or "+1" comments - - you don't need to comment on an issue to get notified of updates: just hit the "subscribe" button - - comment if you have some new, technical and relevant information to add to the case - - __DO NOT__ comment on closed issues or merged PRs. If you think you have a related problem, open up a new issue and reference the PR or issue. - -### If you have not found an existing issue that describes your problem: - - 1. create a new issue, with a succinct title that describes your issue: - - bad title: "It doesn't work with my docker" - - good title: "Private registry push fail: 400 error with E_INVALID_DIGEST" - 2. copy the output of: - - `docker version` - - `docker info` - - `docker exec registry --version` - 3. copy the command line you used to launch your Registry - 4. restart your docker daemon in debug mode (add `-D` to the daemon launch arguments) - 5. reproduce your problem and get your docker daemon logs showing the error - 6. if relevant, copy your registry logs that show the error - 7. provide any relevant detail about your specific Registry configuration (e.g., storage backend used) - 8. indicate if you are using an enterprise proxy, Nginx, or anything else between you and your Registry - -## Contributing a patch for a known bug, or a small correction - -You should follow the basic GitHub workflow: - - 1. fork - 2. commit a change - 3. make sure the tests pass - 4. PR - -Additionally, you must [sign your commits](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work). It's very simple: - - - configure your name with git: `git config user.name "Real Name" && git config user.email mail@example.com` - - sign your commits using `-s`: `git commit -s -m "My commit"` - -Some simple rules to ensure quick merge: - - - clearly point to the issue(s) you want to fix in your PR comment (e.g., `closes #12345`) - - prefer multiple (smaller) PRs addressing individual issues over a big one trying to address multiple issues at once - - if you need to amend your PR following comments, please squash instead of adding more commits - -## Contributing new features - -You are heavily encouraged to first discuss what you want to do. You can do so on the irc channel, or by opening an issue that clearly describes the use case you want to fulfill, or the problem you are trying to solve. - -If this is a major new feature, you should then submit a proposal that describes your technical solution and reasoning. -If you did discuss it first, this will likely be greenlighted very fast. It's advisable to address all feedback on this proposal before starting actual work. - -Then you should submit your implementation, clearly linking to the issue (and possible proposal). - -Your PR will be reviewed by the community, then ultimately by the project maintainers, before being merged. - -It's mandatory to: - - - interact respectfully with other community members and maintainers - more generally, you are expected to abide by the [Docker community rules](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#docker-community-guidelines) - - address maintainers' comments and modify your submission accordingly - - write tests for any new code - -Complying to these simple rules will greatly accelerate the review process, and will ensure you have a pleasant experience in contributing code to the Registry. - -Have a look at a great, successful contribution: the [Swift driver PR](https://github.com/docker/distribution/pull/493) - -## Coding Style - -Unless explicitly stated, we follow all coding guidelines from the Go -community. While some of these standards may seem arbitrary, they somehow seem -to result in a solid, consistent codebase. - -It is possible that the code base does not currently comply with these -guidelines. We are not looking for a massive PR that fixes this, since that -goes against the spirit of the guidelines. All new contributions should make a -best effort to clean up and make the code base better than they left it. -Obviously, apply your best judgement. Remember, the goal here is to make the -code base easier for humans to navigate and understand. Always keep that in -mind when nudging others to comply. - -The rules: - -1. All code should be formatted with `gofmt -s`. -2. All code should pass the default levels of - [`golint`](https://github.com/golang/lint). -3. All code should follow the guidelines covered in [Effective - Go](http://golang.org/doc/effective_go.html) and [Go Code Review - Comments](https://github.com/golang/go/wiki/CodeReviewComments). -4. Comment the code. Tell us the why, the history and the context. -5. Document _all_ declarations and methods, even private ones. Declare - expectations, caveats and anything else that may be important. If a type - gets exported, having the comments already there will ensure it's ready. -6. Variable name length should be proportional to its context and no longer. - `noCommaALongVariableNameLikeThisIsNotMoreClearWhenASimpleCommentWouldDo`. - In practice, short methods will have short variable names and globals will - have longer names. -7. No underscores in package names. If you need a compound name, step back, - and re-examine why you need a compound name. If you still think you need a - compound name, lose the underscore. -8. No utils or helpers packages. If a function is not general enough to - warrant its own package, it has not been written generally enough to be a - part of a util package. Just leave it unexported and well-documented. -9. All tests should run with `go test` and outside tooling should not be - required. No, we don't need another unit testing framework. Assertion - packages are acceptable if they provide _real_ incremental value. -10. Even though we call these "rules" above, they are actually just - guidelines. Since you've read all the rules, you now know that. - -If you are having trouble getting into the mood of idiomatic Go, we recommend -reading through [Effective Go](http://golang.org/doc/effective_go.html). The -[Go Blog](http://blog.golang.org/) is also a great resource. Drinking the -kool-aid is a lot easier than going thirsty. diff --git a/vendor/github.com/docker/distribution/Dockerfile b/vendor/github.com/docker/distribution/Dockerfile deleted file mode 100644 index ebd42c242b..0000000000 --- a/vendor/github.com/docker/distribution/Dockerfile +++ /dev/null @@ -1,60 +0,0 @@ -# syntax=docker/dockerfile:1 - -ARG GO_VERSION=1.20.8 -ARG ALPINE_VERSION=3.18 -ARG XX_VERSION=1.2.1 - -FROM --platform=$BUILDPLATFORM tonistiigi/xx:${XX_VERSION} AS xx -FROM --platform=$BUILDPLATFORM golang:${GO_VERSION}-alpine${ALPINE_VERSION} AS base -COPY --from=xx / / -RUN apk add --no-cache bash coreutils file git -ENV GO111MODULE=auto -ENV CGO_ENABLED=0 -WORKDIR /go/src/github.com/docker/distribution - -FROM base AS version -ARG PKG="github.com/docker/distribution" -RUN --mount=target=. \ - VERSION=$(git describe --match 'v[0-9]*' --dirty='.m' --always --tags) REVISION=$(git rev-parse HEAD)$(if ! git diff --no-ext-diff --quiet --exit-code; then echo .m; fi); \ - echo "-X ${PKG}/version.Version=${VERSION#v} -X ${PKG}/version.Revision=${REVISION} -X ${PKG}/version.Package=${PKG}" | tee /tmp/.ldflags; \ - echo -n "${VERSION}" | tee /tmp/.version; - -FROM base AS build -ARG TARGETPLATFORM -ARG LDFLAGS="-s -w" -ARG BUILDTAGS="include_oss,include_gcs" -RUN --mount=type=bind,target=/go/src/github.com/docker/distribution,rw \ - --mount=type=cache,target=/root/.cache/go-build \ - --mount=target=/go/pkg/mod,type=cache \ - --mount=type=bind,source=/tmp/.ldflags,target=/tmp/.ldflags,from=version \ - set -x ; xx-go build -tags "${BUILDTAGS}" -trimpath -ldflags "$(cat /tmp/.ldflags) ${LDFLAGS}" -o /usr/bin/registry ./cmd/registry \ - && xx-verify --static /usr/bin/registry - -FROM scratch AS binary -COPY --from=build /usr/bin/registry / - -FROM base AS releaser -ARG TARGETOS -ARG TARGETARCH -ARG TARGETVARIANT -WORKDIR /work -RUN --mount=from=binary,target=/build \ - --mount=type=bind,target=/src \ - --mount=type=bind,source=/tmp/.version,target=/tmp/.version,from=version \ - VERSION=$(cat /tmp/.version) \ - && mkdir -p /out \ - && cp /build/registry /src/README.md /src/LICENSE . \ - && tar -czvf "/out/registry_${VERSION#v}_${TARGETOS}_${TARGETARCH}${TARGETVARIANT}.tar.gz" * \ - && sha256sum -z "/out/registry_${VERSION#v}_${TARGETOS}_${TARGETARCH}${TARGETVARIANT}.tar.gz" | awk '{ print $1 }' > "/out/registry_${VERSION#v}_${TARGETOS}_${TARGETARCH}${TARGETVARIANT}.tar.gz.sha256" - -FROM scratch AS artifact -COPY --from=releaser /out / - -FROM alpine:${ALPINE_VERSION} -RUN apk add --no-cache ca-certificates -COPY cmd/registry/config-dev.yml /etc/docker/registry/config.yml -COPY --from=binary /registry /bin/registry -VOLUME ["/var/lib/registry"] -EXPOSE 5000 -ENTRYPOINT ["registry"] -CMD ["serve", "/etc/docker/registry/config.yml"] diff --git a/vendor/github.com/docker/distribution/MAINTAINERS b/vendor/github.com/docker/distribution/MAINTAINERS deleted file mode 100644 index 3183620c57..0000000000 --- a/vendor/github.com/docker/distribution/MAINTAINERS +++ /dev/null @@ -1,243 +0,0 @@ -# Distribution maintainers file -# -# This file describes who runs the docker/distribution project and how. -# This is a living document - if you see something out of date or missing, speak up! -# -# It is structured to be consumable by both humans and programs. -# To extract its contents programmatically, use any TOML-compliant parser. -# - -[Rules] - - [Rules.maintainers] - - title = "What is a maintainer?" - - text = """ -There are different types of maintainers, with different responsibilities, but -all maintainers have 3 things in common: - -1) They share responsibility in the project's success. -2) They have made a long-term, recurring time investment to improve the project. -3) They spend that time doing whatever needs to be done, not necessarily what -is the most interesting or fun. - -Maintainers are often under-appreciated, because their work is harder to appreciate. -It's easy to appreciate a really cool and technically advanced feature. It's harder -to appreciate the absence of bugs, the slow but steady improvement in stability, -or the reliability of a release process. But those things distinguish a good -project from a great one. -""" - - [Rules.reviewer] - - title = "What is a reviewer?" - - text = """ -A reviewer is a core role within the project. -They share in reviewing issues and pull requests and their LGTM count towards the -required LGTM count to merge a code change into the project. - -Reviewers are part of the organization but do not have write access. -Becoming a reviewer is a core aspect in the journey to becoming a maintainer. -""" - - [Rules.adding-maintainers] - - title = "How are maintainers added?" - - text = """ -Maintainers are first and foremost contributors that have shown they are -committed to the long term success of a project. Contributors wanting to become -maintainers are expected to be deeply involved in contributing code, pull -request review, and triage of issues in the project for more than three months. - -Just contributing does not make you a maintainer, it is about building trust -with the current maintainers of the project and being a person that they can -depend on and trust to make decisions in the best interest of the project. - -Periodically, the existing maintainers curate a list of contributors that have -shown regular activity on the project over the prior months. From this list, -maintainer candidates are selected and proposed on the maintainers mailing list. - -After a candidate has been announced on the maintainers mailing list, the -existing maintainers are given five business days to discuss the candidate, -raise objections and cast their vote. Candidates must be approved by at least 66% of the current maintainers by adding their vote on the mailing -list. Only maintainers of the repository that the candidate is proposed for are -allowed to vote. - -If a candidate is approved, a maintainer will contact the candidate to invite -the candidate to open a pull request that adds the contributor to the -MAINTAINERS file. The candidate becomes a maintainer once the pull request is -merged. -""" - - [Rules.stepping-down-policy] - - title = "Stepping down policy" - - text = """ -Life priorities, interests, and passions can change. If you're a maintainer but -feel you must remove yourself from the list, inform other maintainers that you -intend to step down, and if possible, help find someone to pick up your work. -At the very least, ensure your work can be continued where you left off. - -After you've informed other maintainers, create a pull request to remove -yourself from the MAINTAINERS file. -""" - - [Rules.inactive-maintainers] - - title = "Removal of inactive maintainers" - - text = """ -Similar to the procedure for adding new maintainers, existing maintainers can -be removed from the list if they do not show significant activity on the -project. Periodically, the maintainers review the list of maintainers and their -activity over the last three months. - -If a maintainer has shown insufficient activity over this period, a neutral -person will contact the maintainer to ask if they want to continue being -a maintainer. If the maintainer decides to step down as a maintainer, they -open a pull request to be removed from the MAINTAINERS file. - -If the maintainer wants to remain a maintainer, but is unable to perform the -required duties they can be removed with a vote of at least 66% of -the current maintainers. An e-mail is sent to the -mailing list, inviting maintainers of the project to vote. The voting period is -five business days. Issues related to a maintainer's performance should be -discussed with them among the other maintainers so that they are not surprised -by a pull request removing them. -""" - - [Rules.decisions] - - title = "How are decisions made?" - - text = """ -Short answer: EVERYTHING IS A PULL REQUEST. - -distribution is an open-source project with an open design philosophy. This means -that the repository is the source of truth for EVERY aspect of the project, -including its philosophy, design, road map, and APIs. *If it's part of the -project, it's in the repo. If it's in the repo, it's part of the project.* - -As a result, all decisions can be expressed as changes to the repository. An -implementation change is a change to the source code. An API change is a change -to the API specification. A philosophy change is a change to the philosophy -manifesto, and so on. - -All decisions affecting distribution, big and small, follow the same 3 steps: - -* Step 1: Open a pull request. Anyone can do this. - -* Step 2: Discuss the pull request. Anyone can do this. - -* Step 3: Merge or refuse the pull request. Who does this depends on the nature -of the pull request and which areas of the project it affects. -""" - - [Rules.DCO] - - title = "Helping contributors with the DCO" - - text = """ -The [DCO or `Sign your work`]( -https://github.com/moby/moby/blob/master/CONTRIBUTING.md#sign-your-work) -requirement is not intended as a roadblock or speed bump. - -Some distribution contributors are not as familiar with `git`, or have used a web -based editor, and thus asking them to `git commit --amend -s` is not the best -way forward. - -In this case, maintainers can update the commits based on clause (c) of the DCO. -The most trivial way for a contributor to allow the maintainer to do this, is to -add a DCO signature in a pull requests's comment, or a maintainer can simply -note that the change is sufficiently trivial that it does not substantially -change the existing contribution - i.e., a spelling change. - -When you add someone's DCO, please also add your own to keep a log. -""" - - [Rules."no direct push"] - - title = "I'm a maintainer. Should I make pull requests too?" - - text = """ -Yes. Nobody should ever push to master directly. All changes should be -made through a pull request. -""" - - [Rules.tsc] - - title = "Conflict Resolution and technical disputes" - - text = """ -distribution defers to the [Technical Steering Committee](https://github.com/moby/tsc) for escalations and resolution on disputes for technical matters." - """ - - [Rules.meta] - - title = "How is this process changed?" - - text = "Just like everything else: by making a pull request :)" - -# Current project organization -[Org] - - [Org.Maintainers] - people = [ - "dmcgowan", - "dmp42", - "stevvooe", - ] - [Org.Reviewers] - people = [ - "manishtomar", - "caervs", - "davidswu", - "RobbKistler" - ] - -[people] - -# A reference list of all people associated with the project. -# All other sections should refer to people by their canonical key -# in the people section. - - # ADD YOURSELF HERE IN ALPHABETICAL ORDER - - [people.caervs] - Name = "Ryan Abrams" - Email = "rdabrams@gmail.com" - GitHub = "caervs" - - [people.davidswu] - Name = "David Wu" - Email = "dwu7401@gmail.com" - GitHub = "davidswu" - - [people.dmcgowan] - Name = "Derek McGowan" - Email = "derek@mcgstyle.net" - GitHub = "dmcgowan" - - [people.dmp42] - Name = "Olivier Gambier" - Email = "olivier@docker.com" - GitHub = "dmp42" - - [people.manishtomar] - Name = "Manish Tomar" - Email = "manish.tomar@docker.com" - GitHub = "manishtomar" - - [people.RobbKistler] - Name = "Robb Kistler" - Email = "robb.kistler@docker.com" - GitHub = "RobbKistler" - - [people.stevvooe] - Name = "Stephen Day" - Email = "stephen.day@docker.com" - GitHub = "stevvooe" diff --git a/vendor/github.com/docker/distribution/Makefile b/vendor/github.com/docker/distribution/Makefile deleted file mode 100644 index dcdbcb5479..0000000000 --- a/vendor/github.com/docker/distribution/Makefile +++ /dev/null @@ -1,102 +0,0 @@ -# Root directory of the project (absolute path). -ROOTDIR=$(dir $(abspath $(lastword $(MAKEFILE_LIST)))) - -# Used to populate version variable in main package. -VERSION=$(shell git describe --match 'v[0-9]*' --dirty='.m' --always) -REVISION=$(shell git rev-parse HEAD)$(shell if ! git diff --no-ext-diff --quiet --exit-code; then echo .m; fi) - - -PKG=github.com/docker/distribution - -# Project packages. -PACKAGES=$(shell go list -tags "${BUILDTAGS}" ./... | grep -v /vendor/) -INTEGRATION_PACKAGE=${PKG} -COVERAGE_PACKAGES=$(filter-out ${PKG}/registry/storage/driver/%,${PACKAGES}) - - -# Project binaries. -COMMANDS=registry digest registry-api-descriptor-template - -# Allow turning off function inlining and variable registerization -ifeq (${DISABLE_OPTIMIZATION},true) - GO_GCFLAGS=-gcflags "-N -l" - VERSION:="$(VERSION)-noopt" -endif - -WHALE = "+" - -# Go files -# -TESTFLAGS_RACE= -GOFILES=$(shell find . -type f -name '*.go') -GO_TAGS=$(if $(BUILDTAGS),-tags "$(BUILDTAGS)",) -GO_LDFLAGS=-ldflags '-s -w -X $(PKG)/version.Version=$(VERSION) -X $(PKG)/version.Revision=$(REVISION) -X $(PKG)/version.Package=$(PKG) $(EXTRA_LDFLAGS)' - -BINARIES=$(addprefix bin/,$(COMMANDS)) - -# Flags passed to `go test` -TESTFLAGS ?= -v $(TESTFLAGS_RACE) -TESTFLAGS_PARALLEL ?= 8 - -.PHONY: all build binaries check clean test test-race test-full integration coverage -.DEFAULT: all - -all: binaries - -# This only needs to be generated by hand when cutting full releases. -version/version.go: - @echo "$(WHALE) $@" - ./version/version.sh > $@ - -check: ## run all linters (TODO: enable "unused", "varcheck", "ineffassign", "unconvert", "staticheck", "goimports", "structcheck") - @echo "$(WHALE) $@" - @GO111MODULE=off golangci-lint --build-tags "${BUILDTAGS}" run - -test: ## run tests, except integration test with test.short - @echo "$(WHALE) $@" - @go test ${GO_TAGS} -test.short ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${PACKAGES}) - -test-race: ## run tests, except integration test with test.short and race - @echo "$(WHALE) $@" - @go test ${GO_TAGS} -race -test.short ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${PACKAGES}) - -test-full: ## run tests, except integration tests - @echo "$(WHALE) $@" - @go test ${GO_TAGS} ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${PACKAGES}) - -integration: ## run integration tests - @echo "$(WHALE) $@" - @go test ${TESTFLAGS} -parallel ${TESTFLAGS_PARALLEL} ${INTEGRATION_PACKAGE} - -coverage: ## generate coverprofiles from the unit tests - @echo "$(WHALE) $@" - @rm -f coverage.txt - @go test ${GO_TAGS} -i ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${COVERAGE_PACKAGES}) 2> /dev/null - @( for pkg in $(filter-out ${INTEGRATION_PACKAGE},${COVERAGE_PACKAGES}); do \ - go test ${GO_TAGS} ${TESTFLAGS} \ - -cover \ - -coverprofile=profile.out \ - -covermode=atomic $$pkg || exit; \ - if [ -f profile.out ]; then \ - cat profile.out >> coverage.txt; \ - rm profile.out; \ - fi; \ - done ) - -FORCE: - -# Build a binary from a cmd. -bin/%: cmd/% FORCE - @echo "$(WHALE) $@${BINARY_SUFFIX}" - @go build ${GO_GCFLAGS} ${GO_BUILD_FLAGS} -o $@${BINARY_SUFFIX} ${GO_LDFLAGS} ${GO_TAGS} ./$< - -binaries: $(BINARIES) ## build binaries - @echo "$(WHALE) $@" - -build: - @echo "$(WHALE) $@" - @go build ${GO_GCFLAGS} ${GO_BUILD_FLAGS} ${GO_LDFLAGS} ${GO_TAGS} $(PACKAGES) - -clean: ## clean up binaries - @echo "$(WHALE) $@" - @rm -f $(BINARIES) diff --git a/vendor/github.com/docker/distribution/README.md b/vendor/github.com/docker/distribution/README.md deleted file mode 100644 index e513c18e96..0000000000 --- a/vendor/github.com/docker/distribution/README.md +++ /dev/null @@ -1,130 +0,0 @@ -# Distribution - -The Docker toolset to pack, ship, store, and deliver content. - -This repository provides the Docker Registry 2.0 implementation -for storing and distributing Docker images. It supersedes the -[docker/docker-registry](https://github.com/docker/docker-registry) -project with a new API design, focused around security and performance. - - - -[![Circle CI](https://circleci.com/gh/docker/distribution/tree/master.svg?style=svg)](https://circleci.com/gh/docker/distribution/tree/master) -[![GoDoc](https://godoc.org/github.com/docker/distribution?status.svg)](https://godoc.org/github.com/docker/distribution) - -This repository contains the following components: - -|**Component** |Description | -|--------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| **registry** | An implementation of the [Docker Registry HTTP API V2](docs/spec/api.md) for use with docker 1.6+. | -| **libraries** | A rich set of libraries for interacting with distribution components. Please see [godoc](https://godoc.org/github.com/docker/distribution) for details. **Note**: These libraries are **unstable**. | -| **specifications** | _Distribution_ related specifications are available in [docs/spec](docs/spec) | -| **documentation** | Docker's full documentation set is available at [docs.docker.com](https://docs.docker.com). This repository [contains the subset](docs/) related just to the registry. | - -### How does this integrate with Docker engine? - -This project should provide an implementation to a V2 API for use in the [Docker -core project](https://github.com/docker/docker). The API should be embeddable -and simplify the process of securely pulling and pushing content from `docker` -daemons. - -### What are the long term goals of the Distribution project? - -The _Distribution_ project has the further long term goal of providing a -secure tool chain for distributing content. The specifications, APIs and tools -should be as useful with Docker as they are without. - -Our goal is to design a professional grade and extensible content distribution -system that allow users to: - -* Enjoy an efficient, secured and reliable way to store, manage, package and - exchange content -* Hack/roll their own on top of healthy open-source components -* Implement their own home made solution through good specs, and solid - extensions mechanism. - -## More about Registry 2.0 - -The new registry implementation provides the following benefits: - -- faster push and pull -- new, more efficient implementation -- simplified deployment -- pluggable storage backend -- webhook notifications - -For information on upcoming functionality, please see [ROADMAP.md](ROADMAP.md). - -### Who needs to deploy a registry? - -By default, Docker users pull images from Docker's public registry instance. -[Installing Docker](https://docs.docker.com/engine/installation/) gives users this -ability. Users can also push images to a repository on Docker's public registry, -if they have a [Docker Hub](https://hub.docker.com/) account. - -For some users and even companies, this default behavior is sufficient. For -others, it is not. - -For example, users with their own software products may want to maintain a -registry for private, company images. Also, you may wish to deploy your own -image repository for images used to test or in continuous integration. For these -use cases and others, [deploying your own registry instance](https://github.com/docker/docker.github.io/blob/master/registry/deploying.md) -may be the better choice. - -### Migration to Registry 2.0 - -For those who have previously deployed their own registry based on the Registry -1.0 implementation and wish to deploy a Registry 2.0 while retaining images, -data migration is required. A tool to assist with migration efforts has been -created. For more information see [docker/migrator](https://github.com/docker/migrator). - -## Contribute - -Please see [CONTRIBUTING.md](CONTRIBUTING.md) for details on how to contribute -issues, fixes, and patches to this project. If you are contributing code, see -the instructions for [building a development environment](BUILDING.md). - -## Support - -If any issues are encountered while using the _Distribution_ project, several -avenues are available for support: - -
- - - - - - - - - - - - - - - - -
- IRC - - #docker-distribution on FreeNode -
- Issue Tracker - - github.com/docker/distribution/issues -
- Google Groups - - https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution -
- Mailing List - - docker@dockerproject.org -
- - -## License - -This project is distributed under [Apache License, Version 2.0](LICENSE). diff --git a/vendor/github.com/docker/distribution/ROADMAP.md b/vendor/github.com/docker/distribution/ROADMAP.md deleted file mode 100644 index 701127afec..0000000000 --- a/vendor/github.com/docker/distribution/ROADMAP.md +++ /dev/null @@ -1,267 +0,0 @@ -# Roadmap - -The Distribution Project consists of several components, some of which are -still being defined. This document defines the high-level goals of the -project, identifies the current components, and defines the release- -relationship to the Docker Platform. - -* [Distribution Goals](#distribution-goals) -* [Distribution Components](#distribution-components) -* [Project Planning](#project-planning): release-relationship to the Docker Platform. - -This road map is a living document, providing an overview of the goals and -considerations made in respect of the future of the project. - -## Distribution Goals - -- Replace the existing [docker registry](github.com/docker/docker-registry) - implementation as the primary implementation. -- Replace the existing push and pull code in the docker engine with the - distribution package. -- Define a strong data model for distributing docker images -- Provide a flexible distribution tool kit for use in the docker platform -- Unlock new distribution models - -## Distribution Components - -Components of the Distribution Project are managed via github [milestones](https://github.com/docker/distribution/milestones). Upcoming -features and bugfixes for a component will be added to the relevant milestone. If a feature or -bugfix is not part of a milestone, it is currently unscheduled for -implementation. - -* [Registry](#registry) -* [Distribution Package](#distribution-package) - -*** - -### Registry - -The new Docker registry is the main portion of the distribution repository. -Registry 2.0 is the first release of the next-generation registry. This was -primarily focused on implementing the [new registry -API](https://github.com/docker/distribution/blob/master/docs/spec/api.md), -with a focus on security and performance. - -Following from the Distribution project goals above, we have a set of goals -for registry v2 that we would like to follow in the design. New features -should be compared against these goals. - -#### Data Storage and Distribution First - -The registry's first goal is to provide a reliable, consistent storage -location for Docker images. The registry should only provide the minimal -amount of indexing required to fetch image data and no more. - -This means we should be selective in new features and API additions, including -those that may require expensive, ever growing indexes. Requests should be -servable in "constant time". - -#### Content Addressability - -All data objects used in the registry API should be content addressable. -Content identifiers should be secure and verifiable. This provides a secure, -reliable base from which to build more advanced content distribution systems. - -#### Content Agnostic - -In the past, changes to the image format would require large changes in Docker -and the Registry. By decoupling the distribution and image format, we can -allow the formats to progress without having to coordinate between the two. -This means that we should be focused on decoupling Docker from the registry -just as much as decoupling the registry from Docker. Such an approach will -allow us to unlock new distribution models that haven't been possible before. - -We can take this further by saying that the new registry should be content -agnostic. The registry provides a model of names, tags, manifests and content -addresses and that model can be used to work with content. - -#### Simplicity - -The new registry should be closer to a microservice component than its -predecessor. This means it should have a narrower API and a low number of -service dependencies. It should be easy to deploy. - -This means that other solutions should be explored before changing the API or -adding extra dependencies. If functionality is required, can it be added as an -extension or companion service. - -#### Extensibility - -The registry should provide extension points to add functionality. By keeping -the scope narrow, but providing the ability to add functionality. - -Features like search, indexing, synchronization and registry explorers fall -into this category. No such feature should be added unless we've found it -impossible to do through an extension. - -#### Active Feature Discussions - -The following are feature discussions that are currently active. - -If you don't see your favorite, unimplemented feature, feel free to contact us -via IRC or the mailing list and we can talk about adding it. The goal here is -to make sure that new features go through a rigid design process before -landing in the registry. - -##### Proxying to other Registries - -A _pull-through caching_ mode exists for the registry, but is restricted from -within the docker client to only mirror the official Docker Hub. This functionality -can be expanded when image provenance has been specified and implemented in the -distribution project. - -##### Metadata storage - -Metadata for the registry is currently stored with the manifest and layer data on -the storage backend. While this is a big win for simplicity and reliably maintaining -state, it comes with the cost of consistency and high latency. The mutable registry -metadata operations should be abstracted behind an API which will allow ACID compliant -storage systems to handle metadata. - -##### Peer to Peer transfer - -Discussion has started here: https://docs.google.com/document/d/1rYDpSpJiQWmCQy8Cuiaa3NH-Co33oK_SC9HeXYo87QA/edit - -##### Indexing, Search and Discovery - -The original registry provided some implementation of search for use with -private registries. Support has been elided from V2 since we'd like to both -decouple search functionality from the registry. The makes the registry -simpler to deploy, especially in use cases where search is not needed, and -let's us decouple the image format from the registry. - -There are explorations into using the catalog API and notification system to -build external indexes. The current line of thought is that we will define a -common search API to index and query docker images. Such a system could be run -as a companion to a registry or set of registries to power discovery. - -The main issue with search and discovery is that there are so many ways to -accomplish it. There are two aspects to this project. The first is deciding on -how it will be done, including an API definition that can work with changing -data formats. The second is the process of integrating with `docker search`. -We expect that someone attempts to address the problem with the existing tools -and propose it as a standard search API or uses it to inform a standardization -process. Once this has been explored, we integrate with the docker client. - -Please see the following for more detail: - -- https://github.com/docker/distribution/issues/206 - -##### Deletes - -> __NOTE:__ Deletes are a much asked for feature. Before requesting this -feature or participating in discussion, we ask that you read this section in -full and understand the problems behind deletes. - -While, at first glance, implementing deleting seems simple, there are a number -mitigating factors that make many solutions not ideal or even pathological in -the context of a registry. The following paragraph discuss the background and -approaches that could be applied to arrive at a solution. - -The goal of deletes in any system is to remove unused or unneeded data. Only -data requested for deletion should be removed and no other data. Removing -unintended data is worse than _not_ removing data that was requested for -removal but ideally, both are supported. Generally, according to this rule, we -err on holding data longer than needed, ensuring that it is only removed when -we can be certain that it can be removed. With the current behavior, we opt to -hold onto the data forever, ensuring that data cannot be incorrectly removed. - -To understand the problems with implementing deletes, one must understand the -data model. All registry data is stored in a filesystem layout, implemented on -a "storage driver", effectively a _virtual file system_ (VFS). The storage -system must assume that this VFS layer will be eventually consistent and has -poor read- after-write consistency, since this is the lower common denominator -among the storage drivers. This is mitigated by writing values in reverse- -dependent order, but makes wider transactional operations unsafe. - -Layered on the VFS model is a content-addressable _directed, acyclic graph_ -(DAG) made up of blobs. Manifests reference layers. Tags reference manifests. -Since the same data can be referenced by multiple manifests, we only store -data once, even if it is in different repositories. Thus, we have a set of -blobs, referenced by tags and manifests. If we want to delete a blob we need -to be certain that it is no longer referenced by another manifest or tag. When -we delete a manifest, we also can try to delete the referenced blobs. Deciding -whether or not a blob has an active reference is the crux of the problem. - -Conceptually, deleting a manifest and its resources is quite simple. Just find -all the manifests, enumerate the referenced blobs and delete the blobs not in -that set. An astute observer will recognize this as a garbage collection -problem. As with garbage collection in programming languages, this is very -simple when one always has a consistent view. When one adds parallelism and an -inconsistent view of data, it becomes very challenging. - -A simple example can demonstrate this. Let's say we are deleting a manifest -_A_ in one process. We scan the manifest and decide that all the blobs are -ready for deletion. Concurrently, we have another process accepting a new -manifest _B_ referencing one or more blobs from the manifest _A_. Manifest _B_ -is accepted and all the blobs are considered present, so the operation -proceeds. The original process then deletes the referenced blobs, assuming -they were unreferenced. The manifest _B_, which we thought had all of its data -present, can no longer be served by the registry, since the dependent data has -been deleted. - -Deleting data from the registry safely requires some way to coordinate this -operation. The following approaches are being considered: - -- _Reference Counting_ - Maintain a count of references to each blob. This is - challenging for a number of reasons: 1. maintaining a consistent consensus - of reference counts across a set of Registries and 2. Building the initial - list of reference counts for an existing registry. These challenges can be - met with a consensus protocol like Paxos or Raft in the first case and a - necessary but simple scan in the second.. -- _Lock the World GC_ - Halt all writes to the data store. Walk the data store - and find all blob references. Delete all unreferenced blobs. This approach - is very simple but requires disabling writes for a period of time while the - service reads all data. This is slow and expensive but very accurate and - effective. -- _Generational GC_ - Do something similar to above but instead of blocking - writes, writes are sent to another storage backend while reads are broadcast - to the new and old backends. GC is then performed on the read-only portion. - Because writes land in the new backend, the data in the read-only section - can be safely deleted. The main drawbacks of this approach are complexity - and coordination. -- _Centralized Oracle_ - Using a centralized, transactional database, we can - know exactly which data is referenced at any given time. This avoids - coordination problem by managing this data in a single location. We trade - off metadata scalability for simplicity and performance. This is a very good - option for most registry deployments. This would create a bottleneck for - registry metadata. However, metadata is generally not the main bottleneck - when serving images. - -Please let us know if other solutions exist that we have yet to enumerate. -Note that for any approach, implementation is a massive consideration. For -example, a mark-sweep based solution may seem simple but the amount of work in -coordination offset the extra work it might take to build a _Centralized -Oracle_. We'll accept proposals for any solution but please coordinate with us -before dropping code. - -At this time, we have traded off simplicity and ease of deployment for disk -space. Simplicity and ease of deployment tend to reduce developer involvement, -which is currently the most expensive resource in software engineering. Taking -on any solution for deletes will greatly effect these factors, trading off -very cheap disk space for a complex deployment and operational story. - -Please see the following issues for more detail: - -- https://github.com/docker/distribution/issues/422 -- https://github.com/docker/distribution/issues/461 -- https://github.com/docker/distribution/issues/462 - -### Distribution Package - -At its core, the Distribution Project is a set of Go packages that make up -Distribution Components. At this time, most of these packages make up the -Registry implementation. - -The package itself is considered unstable. If you're using it, please take care to vendor the dependent version. - -For feature additions, please see the Registry section. In the future, we may break out a -separate Roadmap for distribution-specific features that apply to more than -just the registry. - -*** - -### Project Planning - -An [Open-Source Planning Process](https://github.com/docker/distribution/wiki/Open-Source-Planning-Process) is used to define the Roadmap. [Project Pages](https://github.com/docker/distribution/wiki) define the goals for each Milestone and identify current progress. - diff --git a/vendor/github.com/docker/distribution/blobs.go b/vendor/github.com/docker/distribution/blobs.go deleted file mode 100644 index 671372abf4..0000000000 --- a/vendor/github.com/docker/distribution/blobs.go +++ /dev/null @@ -1,265 +0,0 @@ -package distribution - -import ( - "context" - "errors" - "fmt" - "io" - "net/http" - "time" - - "github.com/distribution/reference" - "github.com/opencontainers/go-digest" - v1 "github.com/opencontainers/image-spec/specs-go/v1" -) - -var ( - // ErrBlobExists returned when blob already exists - ErrBlobExists = errors.New("blob exists") - - // ErrBlobDigestUnsupported when blob digest is an unsupported version. - ErrBlobDigestUnsupported = errors.New("unsupported blob digest") - - // ErrBlobUnknown when blob is not found. - ErrBlobUnknown = errors.New("unknown blob") - - // ErrBlobUploadUnknown returned when upload is not found. - ErrBlobUploadUnknown = errors.New("blob upload unknown") - - // ErrBlobInvalidLength returned when the blob has an expected length on - // commit, meaning mismatched with the descriptor or an invalid value. - ErrBlobInvalidLength = errors.New("blob invalid length") -) - -// ErrBlobInvalidDigest returned when digest check fails. -type ErrBlobInvalidDigest struct { - Digest digest.Digest - Reason error -} - -func (err ErrBlobInvalidDigest) Error() string { - return fmt.Sprintf("invalid digest for referenced layer: %v, %v", - err.Digest, err.Reason) -} - -// ErrBlobMounted returned when a blob is mounted from another repository -// instead of initiating an upload session. -type ErrBlobMounted struct { - From reference.Canonical - Descriptor Descriptor -} - -func (err ErrBlobMounted) Error() string { - return fmt.Sprintf("blob mounted from: %v to: %v", - err.From, err.Descriptor) -} - -// Descriptor describes targeted content. Used in conjunction with a blob -// store, a descriptor can be used to fetch, store and target any kind of -// blob. The struct also describes the wire protocol format. Fields should -// only be added but never changed. -type Descriptor struct { - // MediaType describe the type of the content. All text based formats are - // encoded as utf-8. - MediaType string `json:"mediaType,omitempty"` - - // Size in bytes of content. - Size int64 `json:"size,omitempty"` - - // Digest uniquely identifies the content. A byte stream can be verified - // against this digest. - Digest digest.Digest `json:"digest,omitempty"` - - // URLs contains the source URLs of this content. - URLs []string `json:"urls,omitempty"` - - // Annotations contains arbitrary metadata relating to the targeted content. - Annotations map[string]string `json:"annotations,omitempty"` - - // Platform describes the platform which the image in the manifest runs on. - // This should only be used when referring to a manifest. - Platform *v1.Platform `json:"platform,omitempty"` - - // NOTE: Before adding a field here, please ensure that all - // other options have been exhausted. Much of the type relationships - // depend on the simplicity of this type. -} - -// Descriptor returns the descriptor, to make it satisfy the Describable -// interface. Note that implementations of Describable are generally objects -// which can be described, not simply descriptors; this exception is in place -// to make it more convenient to pass actual descriptors to functions that -// expect Describable objects. -func (d Descriptor) Descriptor() Descriptor { - return d -} - -// BlobStatter makes blob descriptors available by digest. The service may -// provide a descriptor of a different digest if the provided digest is not -// canonical. -type BlobStatter interface { - // Stat provides metadata about a blob identified by the digest. If the - // blob is unknown to the describer, ErrBlobUnknown will be returned. - Stat(ctx context.Context, dgst digest.Digest) (Descriptor, error) -} - -// BlobDeleter enables deleting blobs from storage. -type BlobDeleter interface { - Delete(ctx context.Context, dgst digest.Digest) error -} - -// BlobEnumerator enables iterating over blobs from storage -type BlobEnumerator interface { - Enumerate(ctx context.Context, ingester func(dgst digest.Digest) error) error -} - -// BlobDescriptorService manages metadata about a blob by digest. Most -// implementations will not expose such an interface explicitly. Such mappings -// should be maintained by interacting with the BlobIngester. Hence, this is -// left off of BlobService and BlobStore. -type BlobDescriptorService interface { - BlobStatter - - // SetDescriptor assigns the descriptor to the digest. The provided digest and - // the digest in the descriptor must map to identical content but they may - // differ on their algorithm. The descriptor must have the canonical - // digest of the content and the digest algorithm must match the - // annotators canonical algorithm. - // - // Such a facility can be used to map blobs between digest domains, with - // the restriction that the algorithm of the descriptor must match the - // canonical algorithm (ie sha256) of the annotator. - SetDescriptor(ctx context.Context, dgst digest.Digest, desc Descriptor) error - - // Clear enables descriptors to be unlinked - Clear(ctx context.Context, dgst digest.Digest) error -} - -// BlobDescriptorServiceFactory creates middleware for BlobDescriptorService. -type BlobDescriptorServiceFactory interface { - BlobAccessController(svc BlobDescriptorService) BlobDescriptorService -} - -// ReadSeekCloser is the primary reader type for blob data, combining -// io.ReadSeeker with io.Closer. -type ReadSeekCloser interface { - io.ReadSeeker - io.Closer -} - -// BlobProvider describes operations for getting blob data. -type BlobProvider interface { - // Get returns the entire blob identified by digest along with the descriptor. - Get(ctx context.Context, dgst digest.Digest) ([]byte, error) - - // Open provides a ReadSeekCloser to the blob identified by the provided - // descriptor. If the blob is not known to the service, an error will be - // returned. - Open(ctx context.Context, dgst digest.Digest) (ReadSeekCloser, error) -} - -// BlobServer can serve blobs via http. -type BlobServer interface { - // ServeBlob attempts to serve the blob, identified by dgst, via http. The - // service may decide to redirect the client elsewhere or serve the data - // directly. - // - // This handler only issues successful responses, such as 2xx or 3xx, - // meaning it serves data or issues a redirect. If the blob is not - // available, an error will be returned and the caller may still issue a - // response. - // - // The implementation may serve the same blob from a different digest - // domain. The appropriate headers will be set for the blob, unless they - // have already been set by the caller. - ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error -} - -// BlobIngester ingests blob data. -type BlobIngester interface { - // Put inserts the content p into the blob service, returning a descriptor - // or an error. - Put(ctx context.Context, mediaType string, p []byte) (Descriptor, error) - - // Create allocates a new blob writer to add a blob to this service. The - // returned handle can be written to and later resumed using an opaque - // identifier. With this approach, one can Close and Resume a BlobWriter - // multiple times until the BlobWriter is committed or cancelled. - Create(ctx context.Context, options ...BlobCreateOption) (BlobWriter, error) - - // Resume attempts to resume a write to a blob, identified by an id. - Resume(ctx context.Context, id string) (BlobWriter, error) -} - -// BlobCreateOption is a general extensible function argument for blob creation -// methods. A BlobIngester may choose to honor any or none of the given -// BlobCreateOptions, which can be specific to the implementation of the -// BlobIngester receiving them. -// TODO (brianbland): unify this with ManifestServiceOption in the future -type BlobCreateOption interface { - Apply(interface{}) error -} - -// CreateOptions is a collection of blob creation modifiers relevant to general -// blob storage intended to be configured by the BlobCreateOption.Apply method. -type CreateOptions struct { - Mount struct { - ShouldMount bool - From reference.Canonical - // Stat allows to pass precalculated descriptor to link and return. - // Blob access check will be skipped if set. - Stat *Descriptor - } -} - -// BlobWriter provides a handle for inserting data into a blob store. -// Instances should be obtained from BlobWriteService.Writer and -// BlobWriteService.Resume. If supported by the store, a writer can be -// recovered with the id. -type BlobWriter interface { - io.WriteCloser - io.ReaderFrom - - // Size returns the number of bytes written to this blob. - Size() int64 - - // ID returns the identifier for this writer. The ID can be used with the - // Blob service to later resume the write. - ID() string - - // StartedAt returns the time this blob write was started. - StartedAt() time.Time - - // Commit completes the blob writer process. The content is verified - // against the provided provisional descriptor, which may result in an - // error. Depending on the implementation, written data may be validated - // against the provisional descriptor fields. If MediaType is not present, - // the implementation may reject the commit or assign "application/octet- - // stream" to the blob. The returned descriptor may have a different - // digest depending on the blob store, referred to as the canonical - // descriptor. - Commit(ctx context.Context, provisional Descriptor) (canonical Descriptor, err error) - - // Cancel ends the blob write without storing any data and frees any - // associated resources. Any data written thus far will be lost. Cancel - // implementations should allow multiple calls even after a commit that - // result in a no-op. This allows use of Cancel in a defer statement, - // increasing the assurance that it is correctly called. - Cancel(ctx context.Context) error -} - -// BlobService combines the operations to access, read and write blobs. This -// can be used to describe remote blob services. -type BlobService interface { - BlobStatter - BlobProvider - BlobIngester -} - -// BlobStore represent the entire suite of blob related operations. Such an -// implementation can access, read, write, delete and serve blobs. -type BlobStore interface { - BlobService - BlobServer - BlobDeleter -} diff --git a/vendor/github.com/docker/distribution/doc.go b/vendor/github.com/docker/distribution/doc.go deleted file mode 100644 index bdd8cb708e..0000000000 --- a/vendor/github.com/docker/distribution/doc.go +++ /dev/null @@ -1,7 +0,0 @@ -// Package distribution will define the interfaces for the components of -// docker distribution. The goal is to allow users to reliably package, ship -// and store content related to docker images. -// -// This is currently a work in progress. More details are available in the -// README.md. -package distribution diff --git a/vendor/github.com/docker/distribution/docker-bake.hcl b/vendor/github.com/docker/distribution/docker-bake.hcl deleted file mode 100644 index 91686e608a..0000000000 --- a/vendor/github.com/docker/distribution/docker-bake.hcl +++ /dev/null @@ -1,56 +0,0 @@ -group "default" { - targets = ["image-local"] -} - -// Special target: https://github.com/docker/metadata-action#bake-definition -target "docker-metadata-action" { - tags = ["registry:local"] -} - -target "binary" { - target = "binary" - output = ["./bin"] -} - -target "artifact" { - target = "artifact" - output = ["./bin"] -} - -target "artifact-all" { - inherits = ["artifact"] - platforms = [ - "linux/amd64", - "linux/arm/v6", - "linux/arm/v7", - "linux/arm64", - "linux/ppc64le", - "linux/s390x" - ] -} - -// Special target: https://github.com/docker/metadata-action#bake-definition -target "docker-metadata-action" { - tags = ["registry:local"] -} - -target "image" { - inherits = ["docker-metadata-action"] -} - -target "image-local" { - inherits = ["image"] - output = ["type=docker"] -} - -target "image-all" { - inherits = ["image"] - platforms = [ - "linux/amd64", - "linux/arm/v6", - "linux/arm/v7", - "linux/arm64", - "linux/ppc64le", - "linux/s390x" - ] -} diff --git a/vendor/github.com/docker/distribution/errors.go b/vendor/github.com/docker/distribution/errors.go deleted file mode 100644 index 8e0b788d6c..0000000000 --- a/vendor/github.com/docker/distribution/errors.go +++ /dev/null @@ -1,119 +0,0 @@ -package distribution - -import ( - "errors" - "fmt" - "strings" - - "github.com/opencontainers/go-digest" -) - -// ErrAccessDenied is returned when an access to a requested resource is -// denied. -var ErrAccessDenied = errors.New("access denied") - -// ErrManifestNotModified is returned when a conditional manifest GetByTag -// returns nil due to the client indicating it has the latest version -var ErrManifestNotModified = errors.New("manifest not modified") - -// ErrUnsupported is returned when an unimplemented or unsupported action is -// performed -var ErrUnsupported = errors.New("operation unsupported") - -// ErrSchemaV1Unsupported is returned when a client tries to upload a schema v1 -// manifest but the registry is configured to reject it -var ErrSchemaV1Unsupported = errors.New("manifest schema v1 unsupported") - -// ErrTagUnknown is returned if the given tag is not known by the tag service -type ErrTagUnknown struct { - Tag string -} - -func (err ErrTagUnknown) Error() string { - return fmt.Sprintf("unknown tag=%s", err.Tag) -} - -// ErrRepositoryUnknown is returned if the named repository is not known by -// the registry. -type ErrRepositoryUnknown struct { - Name string -} - -func (err ErrRepositoryUnknown) Error() string { - return fmt.Sprintf("unknown repository name=%s", err.Name) -} - -// ErrRepositoryNameInvalid should be used to denote an invalid repository -// name. Reason may set, indicating the cause of invalidity. -type ErrRepositoryNameInvalid struct { - Name string - Reason error -} - -func (err ErrRepositoryNameInvalid) Error() string { - return fmt.Sprintf("repository name %q invalid: %v", err.Name, err.Reason) -} - -// ErrManifestUnknown is returned if the manifest is not known by the -// registry. -type ErrManifestUnknown struct { - Name string - Tag string -} - -func (err ErrManifestUnknown) Error() string { - return fmt.Sprintf("unknown manifest name=%s tag=%s", err.Name, err.Tag) -} - -// ErrManifestUnknownRevision is returned when a manifest cannot be found by -// revision within a repository. -type ErrManifestUnknownRevision struct { - Name string - Revision digest.Digest -} - -func (err ErrManifestUnknownRevision) Error() string { - return fmt.Sprintf("unknown manifest name=%s revision=%s", err.Name, err.Revision) -} - -// ErrManifestUnverified is returned when the registry is unable to verify -// the manifest. -type ErrManifestUnverified struct{} - -func (ErrManifestUnverified) Error() string { - return "unverified manifest" -} - -// ErrManifestVerification provides a type to collect errors encountered -// during manifest verification. Currently, it accepts errors of all types, -// but it may be narrowed to those involving manifest verification. -type ErrManifestVerification []error - -func (errs ErrManifestVerification) Error() string { - var parts []string - for _, err := range errs { - parts = append(parts, err.Error()) - } - - return fmt.Sprintf("errors verifying manifest: %v", strings.Join(parts, ",")) -} - -// ErrManifestBlobUnknown returned when a referenced blob cannot be found. -type ErrManifestBlobUnknown struct { - Digest digest.Digest -} - -func (err ErrManifestBlobUnknown) Error() string { - return fmt.Sprintf("unknown blob %v on manifest", err.Digest) -} - -// ErrManifestNameInvalid should be used to denote an invalid manifest -// name. Reason may set, indicating the cause of invalidity. -type ErrManifestNameInvalid struct { - Name string - Reason error -} - -func (err ErrManifestNameInvalid) Error() string { - return fmt.Sprintf("manifest name %q invalid: %v", err.Name, err.Reason) -} diff --git a/vendor/github.com/docker/distribution/manifests.go b/vendor/github.com/docker/distribution/manifests.go deleted file mode 100644 index 8f84a220a9..0000000000 --- a/vendor/github.com/docker/distribution/manifests.go +++ /dev/null @@ -1,125 +0,0 @@ -package distribution - -import ( - "context" - "fmt" - "mime" - - "github.com/opencontainers/go-digest" -) - -// Manifest represents a registry object specifying a set of -// references and an optional target -type Manifest interface { - // References returns a list of objects which make up this manifest. - // A reference is anything which can be represented by a - // distribution.Descriptor. These can consist of layers, resources or other - // manifests. - // - // While no particular order is required, implementations should return - // them from highest to lowest priority. For example, one might want to - // return the base layer before the top layer. - References() []Descriptor - - // Payload provides the serialized format of the manifest, in addition to - // the media type. - Payload() (mediaType string, payload []byte, err error) -} - -// ManifestBuilder creates a manifest allowing one to include dependencies. -// Instances can be obtained from a version-specific manifest package. Manifest -// specific data is passed into the function which creates the builder. -type ManifestBuilder interface { - // Build creates the manifest from his builder. - Build(ctx context.Context) (Manifest, error) - - // References returns a list of objects which have been added to this - // builder. The dependencies are returned in the order they were added, - // which should be from base to head. - References() []Descriptor - - // AppendReference includes the given object in the manifest after any - // existing dependencies. If the add fails, such as when adding an - // unsupported dependency, an error may be returned. - // - // The destination of the reference is dependent on the manifest type and - // the dependency type. - AppendReference(dependency Describable) error -} - -// ManifestService describes operations on image manifests. -type ManifestService interface { - // Exists returns true if the manifest exists. - Exists(ctx context.Context, dgst digest.Digest) (bool, error) - - // Get retrieves the manifest specified by the given digest - Get(ctx context.Context, dgst digest.Digest, options ...ManifestServiceOption) (Manifest, error) - - // Put creates or updates the given manifest returning the manifest digest - Put(ctx context.Context, manifest Manifest, options ...ManifestServiceOption) (digest.Digest, error) - - // Delete removes the manifest specified by the given digest. Deleting - // a manifest that doesn't exist will return ErrManifestNotFound - Delete(ctx context.Context, dgst digest.Digest) error -} - -// ManifestEnumerator enables iterating over manifests -type ManifestEnumerator interface { - // Enumerate calls ingester for each manifest. - Enumerate(ctx context.Context, ingester func(digest.Digest) error) error -} - -// Describable is an interface for descriptors -type Describable interface { - Descriptor() Descriptor -} - -// ManifestMediaTypes returns the supported media types for manifests. -func ManifestMediaTypes() (mediaTypes []string) { - for t := range mappings { - if t != "" { - mediaTypes = append(mediaTypes, t) - } - } - return -} - -// UnmarshalFunc implements manifest unmarshalling a given MediaType -type UnmarshalFunc func([]byte) (Manifest, Descriptor, error) - -var mappings = make(map[string]UnmarshalFunc) - -// UnmarshalManifest looks up manifest unmarshal functions based on -// MediaType -func UnmarshalManifest(ctHeader string, p []byte) (Manifest, Descriptor, error) { - // Need to look up by the actual media type, not the raw contents of - // the header. Strip semicolons and anything following them. - var mediaType string - if ctHeader != "" { - var err error - mediaType, _, err = mime.ParseMediaType(ctHeader) - if err != nil { - return nil, Descriptor{}, err - } - } - - unmarshalFunc, ok := mappings[mediaType] - if !ok { - unmarshalFunc, ok = mappings[""] - if !ok { - return nil, Descriptor{}, fmt.Errorf("unsupported manifest media type and no default available: %s", mediaType) - } - } - - return unmarshalFunc(p) -} - -// RegisterManifestSchema registers an UnmarshalFunc for a given schema type. This -// should be called from specific -func RegisterManifestSchema(mediaType string, u UnmarshalFunc) error { - if _, ok := mappings[mediaType]; ok { - return fmt.Errorf("manifest media type registration would overwrite existing: %s", mediaType) - } - mappings[mediaType] = u - return nil -} diff --git a/vendor/github.com/docker/distribution/metrics/prometheus.go b/vendor/github.com/docker/distribution/metrics/prometheus.go deleted file mode 100644 index b5a5321448..0000000000 --- a/vendor/github.com/docker/distribution/metrics/prometheus.go +++ /dev/null @@ -1,13 +0,0 @@ -package metrics - -import "github.com/docker/go-metrics" - -const ( - // NamespacePrefix is the namespace of prometheus metrics - NamespacePrefix = "registry" -) - -var ( - // StorageNamespace is the prometheus namespace of blob/cache related operations - StorageNamespace = metrics.NewNamespace(NamespacePrefix, "storage", nil) -) diff --git a/vendor/github.com/docker/distribution/registry.go b/vendor/github.com/docker/distribution/registry.go deleted file mode 100644 index d0deee65d7..0000000000 --- a/vendor/github.com/docker/distribution/registry.go +++ /dev/null @@ -1,118 +0,0 @@ -package distribution - -import ( - "context" - - "github.com/distribution/reference" -) - -// Scope defines the set of items that match a namespace. -type Scope interface { - // Contains returns true if the name belongs to the namespace. - Contains(name string) bool -} - -type fullScope struct{} - -func (f fullScope) Contains(string) bool { - return true -} - -// GlobalScope represents the full namespace scope which contains -// all other scopes. -var GlobalScope = Scope(fullScope{}) - -// Namespace represents a collection of repositories, addressable by name. -// Generally, a namespace is backed by a set of one or more services, -// providing facilities such as registry access, trust, and indexing. -type Namespace interface { - // Scope describes the names that can be used with this Namespace. The - // global namespace will have a scope that matches all names. The scope - // effectively provides an identity for the namespace. - Scope() Scope - - // Repository should return a reference to the named repository. The - // registry may or may not have the repository but should always return a - // reference. - Repository(ctx context.Context, name reference.Named) (Repository, error) - - // Repositories fills 'repos' with a lexicographically sorted catalog of repositories - // up to the size of 'repos' and returns the value 'n' for the number of entries - // which were filled. 'last' contains an offset in the catalog, and 'err' will be - // set to io.EOF if there are no more entries to obtain. - Repositories(ctx context.Context, repos []string, last string) (n int, err error) - - // Blobs returns a blob enumerator to access all blobs - Blobs() BlobEnumerator - - // BlobStatter returns a BlobStatter to control - BlobStatter() BlobStatter -} - -// RepositoryEnumerator describes an operation to enumerate repositories -type RepositoryEnumerator interface { - Enumerate(ctx context.Context, ingester func(string) error) error -} - -// RepositoryRemover removes given repository -type RepositoryRemover interface { - Remove(ctx context.Context, name reference.Named) error -} - -// ManifestServiceOption is a function argument for Manifest Service methods -type ManifestServiceOption interface { - Apply(ManifestService) error -} - -// WithTag allows a tag to be passed into Put -func WithTag(tag string) ManifestServiceOption { - return WithTagOption{tag} -} - -// WithTagOption holds a tag -type WithTagOption struct{ Tag string } - -// Apply conforms to the ManifestServiceOption interface -func (o WithTagOption) Apply(m ManifestService) error { - // no implementation - return nil -} - -// WithManifestMediaTypes lists the media types the client wishes -// the server to provide. -func WithManifestMediaTypes(mediaTypes []string) ManifestServiceOption { - return WithManifestMediaTypesOption{mediaTypes} -} - -// WithManifestMediaTypesOption holds a list of accepted media types -type WithManifestMediaTypesOption struct{ MediaTypes []string } - -// Apply conforms to the ManifestServiceOption interface -func (o WithManifestMediaTypesOption) Apply(m ManifestService) error { - // no implementation - return nil -} - -// Repository is a named collection of manifests and layers. -type Repository interface { - // Named returns the name of the repository. - Named() reference.Named - - // Manifests returns a reference to this repository's manifest service. - // with the supplied options applied. - Manifests(ctx context.Context, options ...ManifestServiceOption) (ManifestService, error) - - // Blobs returns a reference to this repository's blob service. - Blobs(ctx context.Context) BlobStore - - // TODO(stevvooe): The above BlobStore return can probably be relaxed to - // be a BlobService for use with clients. This will allow such - // implementations to avoid implementing ServeBlob. - - // Tags returns a reference to this repositories tag service - Tags(ctx context.Context) TagService -} - -// TODO(stevvooe): Must add close methods to all these. May want to change the -// way instances are created to better reflect internal dependency -// relationships. diff --git a/vendor/github.com/docker/distribution/registry/client/auth/api_version.go b/vendor/github.com/docker/distribution/registry/client/auth/api_version.go deleted file mode 100644 index 7d8f1d9576..0000000000 --- a/vendor/github.com/docker/distribution/registry/client/auth/api_version.go +++ /dev/null @@ -1,58 +0,0 @@ -package auth - -import ( - "net/http" - "strings" -) - -// APIVersion represents a version of an API including its -// type and version number. -type APIVersion struct { - // Type refers to the name of a specific API specification - // such as "registry" - Type string - - // Version is the version of the API specification implemented, - // This may omit the revision number and only include - // the major and minor version, such as "2.0" - Version string -} - -// String returns the string formatted API Version -func (v APIVersion) String() string { - return v.Type + "/" + v.Version -} - -// APIVersions gets the API versions out of an HTTP response using the provided -// version header as the key for the HTTP header. -func APIVersions(resp *http.Response, versionHeader string) []APIVersion { - versions := []APIVersion{} - if versionHeader != "" { - for _, supportedVersions := range resp.Header[http.CanonicalHeaderKey(versionHeader)] { - for _, version := range strings.Fields(supportedVersions) { - versions = append(versions, ParseAPIVersion(version)) - } - } - } - return versions -} - -// ParseAPIVersion parses an API version string into an APIVersion -// Format (Expected, not enforced): -// API version string = '/' -// API type = [a-z][a-z0-9]* -// API version = [0-9]+(\.[0-9]+)? -// TODO(dmcgowan): Enforce format, add error condition, remove unknown type -func ParseAPIVersion(versionStr string) APIVersion { - idx := strings.IndexRune(versionStr, '/') - if idx == -1 { - return APIVersion{ - Type: "unknown", - Version: versionStr, - } - } - return APIVersion{ - Type: strings.ToLower(versionStr[:idx]), - Version: versionStr[idx+1:], - } -} diff --git a/vendor/github.com/docker/distribution/registry/client/auth/session.go b/vendor/github.com/docker/distribution/registry/client/auth/session.go deleted file mode 100644 index aad8a0e6f5..0000000000 --- a/vendor/github.com/docker/distribution/registry/client/auth/session.go +++ /dev/null @@ -1,530 +0,0 @@ -package auth - -import ( - "encoding/json" - "errors" - "fmt" - "net/http" - "net/url" - "strings" - "sync" - "time" - - "github.com/docker/distribution/registry/client" - "github.com/docker/distribution/registry/client/auth/challenge" - "github.com/docker/distribution/registry/client/transport" -) - -var ( - // ErrNoBasicAuthCredentials is returned if a request can't be authorized with - // basic auth due to lack of credentials. - ErrNoBasicAuthCredentials = errors.New("no basic auth credentials") - - // ErrNoToken is returned if a request is successful but the body does not - // contain an authorization token. - ErrNoToken = errors.New("authorization server did not include a token in the response") -) - -const defaultClientID = "registry-client" - -// AuthenticationHandler is an interface for authorizing a request from -// params from a "WWW-Authenicate" header for a single scheme. -type AuthenticationHandler interface { - // Scheme returns the scheme as expected from the "WWW-Authenicate" header. - Scheme() string - - // AuthorizeRequest adds the authorization header to a request (if needed) - // using the parameters from "WWW-Authenticate" method. The parameters - // values depend on the scheme. - AuthorizeRequest(req *http.Request, params map[string]string) error -} - -// CredentialStore is an interface for getting credentials for -// a given URL -type CredentialStore interface { - // Basic returns basic auth for the given URL - Basic(*url.URL) (string, string) - - // RefreshToken returns a refresh token for the - // given URL and service - RefreshToken(*url.URL, string) string - - // SetRefreshToken sets the refresh token if none - // is provided for the given url and service - SetRefreshToken(realm *url.URL, service, token string) -} - -// NewAuthorizer creates an authorizer which can handle multiple authentication -// schemes. The handlers are tried in order, the higher priority authentication -// methods should be first. The challengeMap holds a list of challenges for -// a given root API endpoint (for example "https://registry-1.docker.io/v2/"). -func NewAuthorizer(manager challenge.Manager, handlers ...AuthenticationHandler) transport.RequestModifier { - return &endpointAuthorizer{ - challenges: manager, - handlers: handlers, - } -} - -type endpointAuthorizer struct { - challenges challenge.Manager - handlers []AuthenticationHandler -} - -func (ea *endpointAuthorizer) ModifyRequest(req *http.Request) error { - pingPath := req.URL.Path - if v2Root := strings.Index(req.URL.Path, "/v2/"); v2Root != -1 { - pingPath = pingPath[:v2Root+4] - } else if v1Root := strings.Index(req.URL.Path, "/v1/"); v1Root != -1 { - pingPath = pingPath[:v1Root] + "/v2/" - } else { - return nil - } - - ping := url.URL{ - Host: req.URL.Host, - Scheme: req.URL.Scheme, - Path: pingPath, - } - - challenges, err := ea.challenges.GetChallenges(ping) - if err != nil { - return err - } - - if len(challenges) > 0 { - for _, handler := range ea.handlers { - for _, c := range challenges { - if c.Scheme != handler.Scheme() { - continue - } - if err := handler.AuthorizeRequest(req, c.Parameters); err != nil { - return err - } - } - } - } - - return nil -} - -// This is the minimum duration a token can last (in seconds). -// A token must not live less than 60 seconds because older versions -// of the Docker client didn't read their expiration from the token -// response and assumed 60 seconds. So to remain compatible with -// those implementations, a token must live at least this long. -const minimumTokenLifetimeSeconds = 60 - -// Private interface for time used by this package to enable tests to provide their own implementation. -type clock interface { - Now() time.Time -} - -type tokenHandler struct { - creds CredentialStore - transport http.RoundTripper - clock clock - - offlineAccess bool - forceOAuth bool - clientID string - scopes []Scope - - tokenLock sync.Mutex - tokenCache string - tokenExpiration time.Time - - logger Logger -} - -// Scope is a type which is serializable to a string -// using the allow scope grammar. -type Scope interface { - String() string -} - -// RepositoryScope represents a token scope for access -// to a repository. -type RepositoryScope struct { - Repository string - Class string - Actions []string -} - -// String returns the string representation of the repository -// using the scope grammar -func (rs RepositoryScope) String() string { - repoType := "repository" - // Keep existing format for image class to maintain backwards compatibility - // with authorization servers which do not support the expanded grammar. - if rs.Class != "" && rs.Class != "image" { - repoType = fmt.Sprintf("%s(%s)", repoType, rs.Class) - } - return fmt.Sprintf("%s:%s:%s", repoType, rs.Repository, strings.Join(rs.Actions, ",")) -} - -// RegistryScope represents a token scope for access -// to resources in the registry. -type RegistryScope struct { - Name string - Actions []string -} - -// String returns the string representation of the user -// using the scope grammar -func (rs RegistryScope) String() string { - return fmt.Sprintf("registry:%s:%s", rs.Name, strings.Join(rs.Actions, ",")) -} - -// Logger defines the injectable logging interface, used on TokenHandlers. -type Logger interface { - Debugf(format string, args ...interface{}) -} - -func logDebugf(logger Logger, format string, args ...interface{}) { - if logger == nil { - return - } - logger.Debugf(format, args...) -} - -// TokenHandlerOptions is used to configure a new token handler -type TokenHandlerOptions struct { - Transport http.RoundTripper - Credentials CredentialStore - - OfflineAccess bool - ForceOAuth bool - ClientID string - Scopes []Scope - Logger Logger -} - -// An implementation of clock for providing real time data. -type realClock struct{} - -// Now implements clock -func (realClock) Now() time.Time { return time.Now() } - -// NewTokenHandler creates a new AuthenicationHandler which supports -// fetching tokens from a remote token server. -func NewTokenHandler(transport http.RoundTripper, creds CredentialStore, scope string, actions ...string) AuthenticationHandler { - // Create options... - return NewTokenHandlerWithOptions(TokenHandlerOptions{ - Transport: transport, - Credentials: creds, - Scopes: []Scope{ - RepositoryScope{ - Repository: scope, - Actions: actions, - }, - }, - }) -} - -// NewTokenHandlerWithOptions creates a new token handler using the provided -// options structure. -func NewTokenHandlerWithOptions(options TokenHandlerOptions) AuthenticationHandler { - handler := &tokenHandler{ - transport: options.Transport, - creds: options.Credentials, - offlineAccess: options.OfflineAccess, - forceOAuth: options.ForceOAuth, - clientID: options.ClientID, - scopes: options.Scopes, - clock: realClock{}, - logger: options.Logger, - } - - return handler -} - -func (th *tokenHandler) client() *http.Client { - return &http.Client{ - Transport: th.transport, - Timeout: 15 * time.Second, - } -} - -func (th *tokenHandler) Scheme() string { - return "bearer" -} - -func (th *tokenHandler) AuthorizeRequest(req *http.Request, params map[string]string) error { - var additionalScopes []string - if fromParam := req.URL.Query().Get("from"); fromParam != "" { - additionalScopes = append(additionalScopes, RepositoryScope{ - Repository: fromParam, - Actions: []string{"pull"}, - }.String()) - } - - token, err := th.getToken(params, additionalScopes...) - if err != nil { - return err - } - - req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token)) - - return nil -} - -func (th *tokenHandler) getToken(params map[string]string, additionalScopes ...string) (string, error) { - th.tokenLock.Lock() - defer th.tokenLock.Unlock() - scopes := make([]string, 0, len(th.scopes)+len(additionalScopes)) - for _, scope := range th.scopes { - scopes = append(scopes, scope.String()) - } - var addedScopes bool - for _, scope := range additionalScopes { - if hasScope(scopes, scope) { - continue - } - scopes = append(scopes, scope) - addedScopes = true - } - - now := th.clock.Now() - if now.After(th.tokenExpiration) || addedScopes { - token, expiration, err := th.fetchToken(params, scopes) - if err != nil { - return "", err - } - - // do not update cache for added scope tokens - if !addedScopes { - th.tokenCache = token - th.tokenExpiration = expiration - } - - return token, nil - } - - return th.tokenCache, nil -} - -func hasScope(scopes []string, scope string) bool { - for _, s := range scopes { - if s == scope { - return true - } - } - return false -} - -type postTokenResponse struct { - AccessToken string `json:"access_token"` - RefreshToken string `json:"refresh_token"` - ExpiresIn int `json:"expires_in"` - IssuedAt time.Time `json:"issued_at"` - Scope string `json:"scope"` -} - -func (th *tokenHandler) fetchTokenWithOAuth(realm *url.URL, refreshToken, service string, scopes []string) (token string, expiration time.Time, err error) { - form := url.Values{} - form.Set("scope", strings.Join(scopes, " ")) - form.Set("service", service) - - clientID := th.clientID - if clientID == "" { - // Use default client, this is a required field - clientID = defaultClientID - } - form.Set("client_id", clientID) - - if refreshToken != "" { - form.Set("grant_type", "refresh_token") - form.Set("refresh_token", refreshToken) - } else if th.creds != nil { - form.Set("grant_type", "password") - username, password := th.creds.Basic(realm) - form.Set("username", username) - form.Set("password", password) - - // attempt to get a refresh token - form.Set("access_type", "offline") - } else { - // refuse to do oauth without a grant type - return "", time.Time{}, fmt.Errorf("no supported grant type") - } - - resp, err := th.client().PostForm(realm.String(), form) - if err != nil { - return "", time.Time{}, err - } - defer resp.Body.Close() - - if !client.SuccessStatus(resp.StatusCode) { - err := client.HandleErrorResponse(resp) - return "", time.Time{}, err - } - - decoder := json.NewDecoder(resp.Body) - - var tr postTokenResponse - if err = decoder.Decode(&tr); err != nil { - return "", time.Time{}, fmt.Errorf("unable to decode token response: %s", err) - } - - if tr.RefreshToken != "" && tr.RefreshToken != refreshToken { - th.creds.SetRefreshToken(realm, service, tr.RefreshToken) - } - - if tr.ExpiresIn < minimumTokenLifetimeSeconds { - // The default/minimum lifetime. - tr.ExpiresIn = minimumTokenLifetimeSeconds - logDebugf(th.logger, "Increasing token expiration to: %d seconds", tr.ExpiresIn) - } - - if tr.IssuedAt.IsZero() { - // issued_at is optional in the token response. - tr.IssuedAt = th.clock.Now().UTC() - } - - return tr.AccessToken, tr.IssuedAt.Add(time.Duration(tr.ExpiresIn) * time.Second), nil -} - -type getTokenResponse struct { - Token string `json:"token"` - AccessToken string `json:"access_token"` - ExpiresIn int `json:"expires_in"` - IssuedAt time.Time `json:"issued_at"` - RefreshToken string `json:"refresh_token"` -} - -func (th *tokenHandler) fetchTokenWithBasicAuth(realm *url.URL, service string, scopes []string) (token string, expiration time.Time, err error) { - - req, err := http.NewRequest("GET", realm.String(), nil) - if err != nil { - return "", time.Time{}, err - } - - reqParams := req.URL.Query() - - if service != "" { - reqParams.Add("service", service) - } - - for _, scope := range scopes { - reqParams.Add("scope", scope) - } - - if th.offlineAccess { - reqParams.Add("offline_token", "true") - clientID := th.clientID - if clientID == "" { - clientID = defaultClientID - } - reqParams.Add("client_id", clientID) - } - - if th.creds != nil { - username, password := th.creds.Basic(realm) - if username != "" && password != "" { - reqParams.Add("account", username) - req.SetBasicAuth(username, password) - } - } - - req.URL.RawQuery = reqParams.Encode() - - resp, err := th.client().Do(req) - if err != nil { - return "", time.Time{}, err - } - defer resp.Body.Close() - - if !client.SuccessStatus(resp.StatusCode) { - err := client.HandleErrorResponse(resp) - return "", time.Time{}, err - } - - decoder := json.NewDecoder(resp.Body) - - var tr getTokenResponse - if err = decoder.Decode(&tr); err != nil { - return "", time.Time{}, fmt.Errorf("unable to decode token response: %s", err) - } - - if tr.RefreshToken != "" && th.creds != nil { - th.creds.SetRefreshToken(realm, service, tr.RefreshToken) - } - - // `access_token` is equivalent to `token` and if both are specified - // the choice is undefined. Canonicalize `access_token` by sticking - // things in `token`. - if tr.AccessToken != "" { - tr.Token = tr.AccessToken - } - - if tr.Token == "" { - return "", time.Time{}, ErrNoToken - } - - if tr.ExpiresIn < minimumTokenLifetimeSeconds { - // The default/minimum lifetime. - tr.ExpiresIn = minimumTokenLifetimeSeconds - logDebugf(th.logger, "Increasing token expiration to: %d seconds", tr.ExpiresIn) - } - - if tr.IssuedAt.IsZero() { - // issued_at is optional in the token response. - tr.IssuedAt = th.clock.Now().UTC() - } - - return tr.Token, tr.IssuedAt.Add(time.Duration(tr.ExpiresIn) * time.Second), nil -} - -func (th *tokenHandler) fetchToken(params map[string]string, scopes []string) (token string, expiration time.Time, err error) { - realm, ok := params["realm"] - if !ok { - return "", time.Time{}, errors.New("no realm specified for token auth challenge") - } - - // TODO(dmcgowan): Handle empty scheme and relative realm - realmURL, err := url.Parse(realm) - if err != nil { - return "", time.Time{}, fmt.Errorf("invalid token auth challenge realm: %s", err) - } - - service := params["service"] - - var refreshToken string - - if th.creds != nil { - refreshToken = th.creds.RefreshToken(realmURL, service) - } - - if refreshToken != "" || th.forceOAuth { - return th.fetchTokenWithOAuth(realmURL, refreshToken, service, scopes) - } - - return th.fetchTokenWithBasicAuth(realmURL, service, scopes) -} - -type basicHandler struct { - creds CredentialStore -} - -// NewBasicHandler creaters a new authentiation handler which adds -// basic authentication credentials to a request. -func NewBasicHandler(creds CredentialStore) AuthenticationHandler { - return &basicHandler{ - creds: creds, - } -} - -func (*basicHandler) Scheme() string { - return "basic" -} - -func (bh *basicHandler) AuthorizeRequest(req *http.Request, params map[string]string) error { - if bh.creds != nil { - username, password := bh.creds.Basic(req.URL) - if username != "" && password != "" { - req.SetBasicAuth(username, password) - return nil - } - } - return ErrNoBasicAuthCredentials -} diff --git a/vendor/github.com/docker/distribution/registry/client/blob_writer.go b/vendor/github.com/docker/distribution/registry/client/blob_writer.go deleted file mode 100644 index dac030c738..0000000000 --- a/vendor/github.com/docker/distribution/registry/client/blob_writer.go +++ /dev/null @@ -1,164 +0,0 @@ -package client - -import ( - "bytes" - "context" - "fmt" - "io" - "io/ioutil" - "net/http" - "time" - - "github.com/docker/distribution" -) - -type httpBlobUpload struct { - statter distribution.BlobStatter - client *http.Client - - uuid string - startedAt time.Time - - location string // always the last value of the location header. - offset int64 - closed bool -} - -func (hbu *httpBlobUpload) Reader() (io.ReadCloser, error) { - panic("Not implemented") -} - -func (hbu *httpBlobUpload) handleErrorResponse(resp *http.Response) error { - if resp.StatusCode == http.StatusNotFound { - return distribution.ErrBlobUploadUnknown - } - return HandleErrorResponse(resp) -} - -func (hbu *httpBlobUpload) ReadFrom(r io.Reader) (n int64, err error) { - req, err := http.NewRequest("PATCH", hbu.location, ioutil.NopCloser(r)) - if err != nil { - return 0, err - } - defer req.Body.Close() - - req.Header.Set("Content-Type", "application/octet-stream") - - resp, err := hbu.client.Do(req) - if err != nil { - return 0, err - } - - if !SuccessStatus(resp.StatusCode) { - return 0, hbu.handleErrorResponse(resp) - } - - hbu.uuid = resp.Header.Get("Docker-Upload-UUID") - hbu.location, err = sanitizeLocation(resp.Header.Get("Location"), hbu.location) - if err != nil { - return 0, err - } - rng := resp.Header.Get("Range") - var start, end int64 - if n, err := fmt.Sscanf(rng, "%d-%d", &start, &end); err != nil { - return 0, err - } else if n != 2 || end < start { - return 0, fmt.Errorf("bad range format: %s", rng) - } - - return (end - start + 1), nil - -} - -func (hbu *httpBlobUpload) Write(p []byte) (n int, err error) { - req, err := http.NewRequest("PATCH", hbu.location, bytes.NewReader(p)) - if err != nil { - return 0, err - } - req.Header.Set("Content-Range", fmt.Sprintf("%d-%d", hbu.offset, hbu.offset+int64(len(p)-1))) - req.Header.Set("Content-Length", fmt.Sprintf("%d", len(p))) - req.Header.Set("Content-Type", "application/octet-stream") - - resp, err := hbu.client.Do(req) - if err != nil { - return 0, err - } - - if !SuccessStatus(resp.StatusCode) { - return 0, hbu.handleErrorResponse(resp) - } - - hbu.uuid = resp.Header.Get("Docker-Upload-UUID") - hbu.location, err = sanitizeLocation(resp.Header.Get("Location"), hbu.location) - if err != nil { - return 0, err - } - rng := resp.Header.Get("Range") - var start, end int - if n, err := fmt.Sscanf(rng, "%d-%d", &start, &end); err != nil { - return 0, err - } else if n != 2 || end < start { - return 0, fmt.Errorf("bad range format: %s", rng) - } - - return (end - start + 1), nil - -} - -func (hbu *httpBlobUpload) Size() int64 { - return hbu.offset -} - -func (hbu *httpBlobUpload) ID() string { - return hbu.uuid -} - -func (hbu *httpBlobUpload) StartedAt() time.Time { - return hbu.startedAt -} - -func (hbu *httpBlobUpload) Commit(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) { - // TODO(dmcgowan): Check if already finished, if so just fetch - req, err := http.NewRequest("PUT", hbu.location, nil) - if err != nil { - return distribution.Descriptor{}, err - } - - values := req.URL.Query() - values.Set("digest", desc.Digest.String()) - req.URL.RawQuery = values.Encode() - - resp, err := hbu.client.Do(req) - if err != nil { - return distribution.Descriptor{}, err - } - defer resp.Body.Close() - - if !SuccessStatus(resp.StatusCode) { - return distribution.Descriptor{}, hbu.handleErrorResponse(resp) - } - - return hbu.statter.Stat(ctx, desc.Digest) -} - -func (hbu *httpBlobUpload) Cancel(ctx context.Context) error { - req, err := http.NewRequest("DELETE", hbu.location, nil) - if err != nil { - return err - } - resp, err := hbu.client.Do(req) - if err != nil { - return err - } - defer resp.Body.Close() - - if resp.StatusCode == http.StatusNotFound || SuccessStatus(resp.StatusCode) { - return nil - } - return hbu.handleErrorResponse(resp) -} - -func (hbu *httpBlobUpload) Close() error { - hbu.closed = true - return nil -} diff --git a/vendor/github.com/docker/distribution/registry/client/errors.go b/vendor/github.com/docker/distribution/registry/client/errors.go deleted file mode 100644 index ce9902034d..0000000000 --- a/vendor/github.com/docker/distribution/registry/client/errors.go +++ /dev/null @@ -1,160 +0,0 @@ -package client - -import ( - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "mime" - "net/http" - - "github.com/docker/distribution/registry/api/errcode" - "github.com/docker/distribution/registry/client/auth/challenge" -) - -// ErrNoErrorsInBody is returned when an HTTP response body parses to an empty -// errcode.Errors slice. -var ErrNoErrorsInBody = errors.New("no error details found in HTTP response body") - -// UnexpectedHTTPStatusError is returned when an unexpected HTTP status is -// returned when making a registry api call. -type UnexpectedHTTPStatusError struct { - Status string -} - -func (e *UnexpectedHTTPStatusError) Error() string { - return fmt.Sprintf("received unexpected HTTP status: %s", e.Status) -} - -// UnexpectedHTTPResponseError is returned when an expected HTTP status code -// is returned, but the content was unexpected and failed to be parsed. -type UnexpectedHTTPResponseError struct { - ParseErr error - StatusCode int - Response []byte -} - -func (e *UnexpectedHTTPResponseError) Error() string { - return fmt.Sprintf("error parsing HTTP %d response body: %s: %q", e.StatusCode, e.ParseErr.Error(), string(e.Response)) -} - -func parseHTTPErrorResponse(resp *http.Response) error { - var errors errcode.Errors - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return err - } - - statusCode := resp.StatusCode - ctHeader := resp.Header.Get("Content-Type") - - if ctHeader == "" { - return makeError(statusCode, string(body)) - } - - contentType, _, err := mime.ParseMediaType(ctHeader) - if err != nil { - return fmt.Errorf("failed parsing content-type: %w", err) - } - - if contentType != "application/json" && contentType != "application/vnd.api+json" { - return makeError(statusCode, string(body)) - } - - // For backward compatibility, handle irregularly formatted - // messages that contain a "details" field. - var detailsErr struct { - Details string `json:"details"` - } - err = json.Unmarshal(body, &detailsErr) - if err == nil && detailsErr.Details != "" { - return makeError(statusCode, detailsErr.Details) - } - - if err := json.Unmarshal(body, &errors); err != nil { - return &UnexpectedHTTPResponseError{ - ParseErr: err, - StatusCode: statusCode, - Response: body, - } - } - - if len(errors) == 0 { - // If there was no error specified in the body, return - // UnexpectedHTTPResponseError. - return &UnexpectedHTTPResponseError{ - ParseErr: ErrNoErrorsInBody, - StatusCode: statusCode, - Response: body, - } - } - - return errors -} - -func makeError(statusCode int, details string) error { - switch statusCode { - case http.StatusUnauthorized: - return errcode.ErrorCodeUnauthorized.WithMessage(details) - case http.StatusForbidden: - return errcode.ErrorCodeDenied.WithMessage(details) - case http.StatusTooManyRequests: - return errcode.ErrorCodeTooManyRequests.WithMessage(details) - default: - return errcode.ErrorCodeUnknown.WithMessage(details) - } -} - -func makeErrorList(err error) []error { - if errL, ok := err.(errcode.Errors); ok { - return []error(errL) - } - return []error{err} -} - -func mergeErrors(err1, err2 error) error { - return errcode.Errors(append(makeErrorList(err1), makeErrorList(err2)...)) -} - -// HandleErrorResponse returns error parsed from HTTP response for an -// unsuccessful HTTP response code (in the range 400 - 499 inclusive). An -// UnexpectedHTTPStatusError returned for response code outside of expected -// range. -func HandleErrorResponse(resp *http.Response) error { - if resp.StatusCode >= 400 && resp.StatusCode < 500 { - // Check for OAuth errors within the `WWW-Authenticate` header first - // See https://tools.ietf.org/html/rfc6750#section-3 - for _, c := range challenge.ResponseChallenges(resp) { - if c.Scheme == "bearer" { - var err errcode.Error - // codes defined at https://tools.ietf.org/html/rfc6750#section-3.1 - switch c.Parameters["error"] { - case "invalid_token": - err.Code = errcode.ErrorCodeUnauthorized - case "insufficient_scope": - err.Code = errcode.ErrorCodeDenied - default: - continue - } - if description := c.Parameters["error_description"]; description != "" { - err.Message = description - } else { - err.Message = err.Code.Message() - } - return mergeErrors(err, parseHTTPErrorResponse(resp)) - } - } - err := parseHTTPErrorResponse(resp) - if uErr, ok := err.(*UnexpectedHTTPResponseError); ok && resp.StatusCode == 401 { - return errcode.ErrorCodeUnauthorized.WithDetail(uErr.Response) - } - return err - } - return &UnexpectedHTTPStatusError{Status: resp.Status} -} - -// SuccessStatus returns true if the argument is a successful HTTP response -// code (in the range 200 - 399 inclusive). -func SuccessStatus(status int) bool { - return status >= 200 && status <= 399 -} diff --git a/vendor/github.com/docker/distribution/registry/client/repository.go b/vendor/github.com/docker/distribution/registry/client/repository.go deleted file mode 100644 index fd42a1e66f..0000000000 --- a/vendor/github.com/docker/distribution/registry/client/repository.go +++ /dev/null @@ -1,870 +0,0 @@ -package client - -import ( - "bytes" - "context" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "strconv" - "strings" - "time" - - "github.com/distribution/reference" - "github.com/docker/distribution" - v2 "github.com/docker/distribution/registry/api/v2" - "github.com/docker/distribution/registry/client/transport" - "github.com/docker/distribution/registry/storage/cache" - "github.com/docker/distribution/registry/storage/cache/memory" - "github.com/opencontainers/go-digest" -) - -// Registry provides an interface for calling Repositories, which returns a catalog of repositories. -type Registry interface { - Repositories(ctx context.Context, repos []string, last string) (n int, err error) -} - -// checkHTTPRedirect is a callback that can manipulate redirected HTTP -// requests. It is used to preserve Accept and Range headers. -func checkHTTPRedirect(req *http.Request, via []*http.Request) error { - if len(via) >= 10 { - return errors.New("stopped after 10 redirects") - } - - if len(via) > 0 { - for headerName, headerVals := range via[0].Header { - if headerName != "Accept" && headerName != "Range" { - continue - } - for _, val := range headerVals { - // Don't add to redirected request if redirected - // request already has a header with the same - // name and value. - hasValue := false - for _, existingVal := range req.Header[headerName] { - if existingVal == val { - hasValue = true - break - } - } - if !hasValue { - req.Header.Add(headerName, val) - } - } - } - } - - return nil -} - -// NewRegistry creates a registry namespace which can be used to get a listing of repositories -func NewRegistry(baseURL string, transport http.RoundTripper) (Registry, error) { - ub, err := v2.NewURLBuilderFromString(baseURL, false) - if err != nil { - return nil, err - } - - client := &http.Client{ - Transport: transport, - Timeout: 1 * time.Minute, - CheckRedirect: checkHTTPRedirect, - } - - return ®istry{ - client: client, - ub: ub, - }, nil -} - -type registry struct { - client *http.Client - ub *v2.URLBuilder -} - -// Repositories returns a lexigraphically sorted catalog given a base URL. The 'entries' slice will be filled up to the size -// of the slice, starting at the value provided in 'last'. The number of entries will be returned along with io.EOF if there -// are no more entries -func (r *registry) Repositories(ctx context.Context, entries []string, last string) (int, error) { - var numFilled int - var returnErr error - - values := buildCatalogValues(len(entries), last) - u, err := r.ub.BuildCatalogURL(values) - if err != nil { - return 0, err - } - - resp, err := r.client.Get(u) - if err != nil { - return 0, err - } - defer resp.Body.Close() - - if SuccessStatus(resp.StatusCode) { - var ctlg struct { - Repositories []string `json:"repositories"` - } - decoder := json.NewDecoder(resp.Body) - - if err := decoder.Decode(&ctlg); err != nil { - return 0, err - } - - copy(entries, ctlg.Repositories) - numFilled = len(ctlg.Repositories) - - link := resp.Header.Get("Link") - if link == "" { - returnErr = io.EOF - } - } else { - return 0, HandleErrorResponse(resp) - } - - return numFilled, returnErr -} - -// NewRepository creates a new Repository for the given repository name and base URL. -func NewRepository(name reference.Named, baseURL string, transport http.RoundTripper) (distribution.Repository, error) { - ub, err := v2.NewURLBuilderFromString(baseURL, false) - if err != nil { - return nil, err - } - - client := &http.Client{ - Transport: transport, - CheckRedirect: checkHTTPRedirect, - // TODO(dmcgowan): create cookie jar - } - - return &repository{ - client: client, - ub: ub, - name: name, - }, nil -} - -type repository struct { - client *http.Client - ub *v2.URLBuilder - name reference.Named -} - -func (r *repository) Named() reference.Named { - return r.name -} - -func (r *repository) Blobs(ctx context.Context) distribution.BlobStore { - statter := &blobStatter{ - name: r.name, - ub: r.ub, - client: r.client, - } - return &blobs{ - name: r.name, - ub: r.ub, - client: r.client, - statter: cache.NewCachedBlobStatter(memory.NewInMemoryBlobDescriptorCacheProvider(), statter), - } -} - -func (r *repository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { - // todo(richardscothern): options should be sent over the wire - return &manifests{ - name: r.name, - ub: r.ub, - client: r.client, - etags: make(map[string]string), - }, nil -} - -func (r *repository) Tags(ctx context.Context) distribution.TagService { - return &tags{ - client: r.client, - ub: r.ub, - name: r.Named(), - } -} - -// tags implements remote tagging operations. -type tags struct { - client *http.Client - ub *v2.URLBuilder - name reference.Named -} - -// All returns all tags -func (t *tags) All(ctx context.Context) ([]string, error) { - var tags []string - - listURLStr, err := t.ub.BuildTagsURL(t.name) - if err != nil { - return tags, err - } - - listURL, err := url.Parse(listURLStr) - if err != nil { - return tags, err - } - - for { - resp, err := t.client.Get(listURL.String()) - if err != nil { - return tags, err - } - defer resp.Body.Close() - - if SuccessStatus(resp.StatusCode) { - b, err := ioutil.ReadAll(resp.Body) - if err != nil { - return tags, err - } - - tagsResponse := struct { - Tags []string `json:"tags"` - }{} - if err := json.Unmarshal(b, &tagsResponse); err != nil { - return tags, err - } - tags = append(tags, tagsResponse.Tags...) - if link := resp.Header.Get("Link"); link != "" { - linkURLStr := strings.Trim(strings.Split(link, ";")[0], "<>") - linkURL, err := url.Parse(linkURLStr) - if err != nil { - return tags, err - } - - listURL = listURL.ResolveReference(linkURL) - } else { - return tags, nil - } - } else { - return tags, HandleErrorResponse(resp) - } - } -} - -func descriptorFromResponse(response *http.Response) (distribution.Descriptor, error) { - desc := distribution.Descriptor{} - headers := response.Header - - ctHeader := headers.Get("Content-Type") - if ctHeader == "" { - return distribution.Descriptor{}, errors.New("missing or empty Content-Type header") - } - desc.MediaType = ctHeader - - digestHeader := headers.Get("Docker-Content-Digest") - if digestHeader == "" { - bytes, err := ioutil.ReadAll(response.Body) - if err != nil { - return distribution.Descriptor{}, err - } - _, desc, err := distribution.UnmarshalManifest(ctHeader, bytes) - if err != nil { - return distribution.Descriptor{}, err - } - return desc, nil - } - - dgst, err := digest.Parse(digestHeader) - if err != nil { - return distribution.Descriptor{}, err - } - desc.Digest = dgst - - lengthHeader := headers.Get("Content-Length") - if lengthHeader == "" { - return distribution.Descriptor{}, errors.New("missing or empty Content-Length header") - } - length, err := strconv.ParseInt(lengthHeader, 10, 64) - if err != nil { - return distribution.Descriptor{}, err - } - desc.Size = length - - return desc, nil - -} - -// Get issues a HEAD request for a Manifest against its named endpoint in order -// to construct a descriptor for the tag. If the registry doesn't support HEADing -// a manifest, fallback to GET. -func (t *tags) Get(ctx context.Context, tag string) (distribution.Descriptor, error) { - ref, err := reference.WithTag(t.name, tag) - if err != nil { - return distribution.Descriptor{}, err - } - u, err := t.ub.BuildManifestURL(ref) - if err != nil { - return distribution.Descriptor{}, err - } - - newRequest := func(method string) (*http.Response, error) { - req, err := http.NewRequest(method, u, nil) - if err != nil { - return nil, err - } - - for _, t := range distribution.ManifestMediaTypes() { - req.Header.Add("Accept", t) - } - resp, err := t.client.Do(req) - return resp, err - } - - resp, err := newRequest("HEAD") - if err != nil { - return distribution.Descriptor{}, err - } - defer resp.Body.Close() - - switch { - case resp.StatusCode >= 200 && resp.StatusCode < 400 && len(resp.Header.Get("Docker-Content-Digest")) > 0: - // if the response is a success AND a Docker-Content-Digest can be retrieved from the headers - return descriptorFromResponse(resp) - default: - // if the response is an error - there will be no body to decode. - // Issue a GET request: - // - for data from a server that does not handle HEAD - // - to get error details in case of a failure - resp, err = newRequest("GET") - if err != nil { - return distribution.Descriptor{}, err - } - defer resp.Body.Close() - - if resp.StatusCode >= 200 && resp.StatusCode < 400 { - return descriptorFromResponse(resp) - } - return distribution.Descriptor{}, HandleErrorResponse(resp) - } -} - -func (t *tags) Lookup(ctx context.Context, digest distribution.Descriptor) ([]string, error) { - panic("not implemented") -} - -func (t *tags) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error { - panic("not implemented") -} - -func (t *tags) Untag(ctx context.Context, tag string) error { - panic("not implemented") -} - -type manifests struct { - name reference.Named - ub *v2.URLBuilder - client *http.Client - etags map[string]string -} - -func (ms *manifests) Exists(ctx context.Context, dgst digest.Digest) (bool, error) { - ref, err := reference.WithDigest(ms.name, dgst) - if err != nil { - return false, err - } - u, err := ms.ub.BuildManifestURL(ref) - if err != nil { - return false, err - } - - resp, err := ms.client.Head(u) - if err != nil { - return false, err - } - - if SuccessStatus(resp.StatusCode) { - return true, nil - } else if resp.StatusCode == http.StatusNotFound { - return false, nil - } - return false, HandleErrorResponse(resp) -} - -// AddEtagToTag allows a client to supply an eTag to Get which will be -// used for a conditional HTTP request. If the eTag matches, a nil manifest -// and ErrManifestNotModified error will be returned. etag is automatically -// quoted when added to this map. -func AddEtagToTag(tag, etag string) distribution.ManifestServiceOption { - return etagOption{tag, etag} -} - -type etagOption struct{ tag, etag string } - -func (o etagOption) Apply(ms distribution.ManifestService) error { - if ms, ok := ms.(*manifests); ok { - ms.etags[o.tag] = fmt.Sprintf(`"%s"`, o.etag) - return nil - } - return fmt.Errorf("etag options is a client-only option") -} - -// ReturnContentDigest allows a client to set a the content digest on -// a successful request from the 'Docker-Content-Digest' header. This -// returned digest is represents the digest which the registry uses -// to refer to the content and can be used to delete the content. -func ReturnContentDigest(dgst *digest.Digest) distribution.ManifestServiceOption { - return contentDigestOption{dgst} -} - -type contentDigestOption struct{ digest *digest.Digest } - -func (o contentDigestOption) Apply(ms distribution.ManifestService) error { - return nil -} - -func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { - var ( - digestOrTag string - ref reference.Named - err error - contentDgst *digest.Digest - mediaTypes []string - ) - - for _, option := range options { - switch opt := option.(type) { - case distribution.WithTagOption: - digestOrTag = opt.Tag - ref, err = reference.WithTag(ms.name, opt.Tag) - if err != nil { - return nil, err - } - case contentDigestOption: - contentDgst = opt.digest - case distribution.WithManifestMediaTypesOption: - mediaTypes = opt.MediaTypes - default: - err := option.Apply(ms) - if err != nil { - return nil, err - } - } - } - - if digestOrTag == "" { - digestOrTag = dgst.String() - ref, err = reference.WithDigest(ms.name, dgst) - if err != nil { - return nil, err - } - } - - if len(mediaTypes) == 0 { - mediaTypes = distribution.ManifestMediaTypes() - } - - u, err := ms.ub.BuildManifestURL(ref) - if err != nil { - return nil, err - } - - req, err := http.NewRequest("GET", u, nil) - if err != nil { - return nil, err - } - - for _, t := range mediaTypes { - req.Header.Add("Accept", t) - } - - if _, ok := ms.etags[digestOrTag]; ok { - req.Header.Set("If-None-Match", ms.etags[digestOrTag]) - } - - resp, err := ms.client.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - if resp.StatusCode == http.StatusNotModified { - return nil, distribution.ErrManifestNotModified - } else if SuccessStatus(resp.StatusCode) { - if contentDgst != nil { - dgst, err := digest.Parse(resp.Header.Get("Docker-Content-Digest")) - if err == nil { - *contentDgst = dgst - } - } - mt := resp.Header.Get("Content-Type") - body, err := ioutil.ReadAll(resp.Body) - - if err != nil { - return nil, err - } - m, _, err := distribution.UnmarshalManifest(mt, body) - if err != nil { - return nil, err - } - return m, nil - } - return nil, HandleErrorResponse(resp) -} - -// Put puts a manifest. A tag can be specified using an options parameter which uses some shared state to hold the -// tag name in order to build the correct upload URL. -func (ms *manifests) Put(ctx context.Context, m distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { - ref := ms.name - var tagged bool - - for _, option := range options { - if opt, ok := option.(distribution.WithTagOption); ok { - var err error - ref, err = reference.WithTag(ref, opt.Tag) - if err != nil { - return "", err - } - tagged = true - } else { - err := option.Apply(ms) - if err != nil { - return "", err - } - } - } - mediaType, p, err := m.Payload() - if err != nil { - return "", err - } - - if !tagged { - // generate a canonical digest and Put by digest - _, d, err := distribution.UnmarshalManifest(mediaType, p) - if err != nil { - return "", err - } - ref, err = reference.WithDigest(ref, d.Digest) - if err != nil { - return "", err - } - } - - manifestURL, err := ms.ub.BuildManifestURL(ref) - if err != nil { - return "", err - } - - putRequest, err := http.NewRequest("PUT", manifestURL, bytes.NewReader(p)) - if err != nil { - return "", err - } - - putRequest.Header.Set("Content-Type", mediaType) - - resp, err := ms.client.Do(putRequest) - if err != nil { - return "", err - } - defer resp.Body.Close() - - if SuccessStatus(resp.StatusCode) { - dgstHeader := resp.Header.Get("Docker-Content-Digest") - dgst, err := digest.Parse(dgstHeader) - if err != nil { - return "", err - } - - return dgst, nil - } - - return "", HandleErrorResponse(resp) -} - -func (ms *manifests) Delete(ctx context.Context, dgst digest.Digest) error { - ref, err := reference.WithDigest(ms.name, dgst) - if err != nil { - return err - } - u, err := ms.ub.BuildManifestURL(ref) - if err != nil { - return err - } - req, err := http.NewRequest("DELETE", u, nil) - if err != nil { - return err - } - - resp, err := ms.client.Do(req) - if err != nil { - return err - } - defer resp.Body.Close() - - if SuccessStatus(resp.StatusCode) { - return nil - } - return HandleErrorResponse(resp) -} - -// todo(richardscothern): Restore interface and implementation with merge of #1050 -/*func (ms *manifests) Enumerate(ctx context.Context, manifests []distribution.Manifest, last distribution.Manifest) (n int, err error) { - panic("not supported") -}*/ - -type blobs struct { - name reference.Named - ub *v2.URLBuilder - client *http.Client - - statter distribution.BlobDescriptorService - distribution.BlobDeleter -} - -func sanitizeLocation(location, base string) (string, error) { - baseURL, err := url.Parse(base) - if err != nil { - return "", err - } - - locationURL, err := url.Parse(location) - if err != nil { - return "", err - } - - return baseURL.ResolveReference(locationURL).String(), nil -} - -func (bs *blobs) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - return bs.statter.Stat(ctx, dgst) - -} - -func (bs *blobs) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { - reader, err := bs.Open(ctx, dgst) - if err != nil { - return nil, err - } - defer reader.Close() - - return ioutil.ReadAll(reader) -} - -func (bs *blobs) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { - ref, err := reference.WithDigest(bs.name, dgst) - if err != nil { - return nil, err - } - blobURL, err := bs.ub.BuildBlobURL(ref) - if err != nil { - return nil, err - } - - return transport.NewHTTPReadSeeker(bs.client, blobURL, - func(resp *http.Response) error { - if resp.StatusCode == http.StatusNotFound { - return distribution.ErrBlobUnknown - } - return HandleErrorResponse(resp) - }), nil -} - -func (bs *blobs) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { - panic("not implemented") -} - -func (bs *blobs) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { - writer, err := bs.Create(ctx) - if err != nil { - return distribution.Descriptor{}, err - } - dgstr := digest.Canonical.Digester() - n, err := io.Copy(writer, io.TeeReader(bytes.NewReader(p), dgstr.Hash())) - if err != nil { - return distribution.Descriptor{}, err - } - if n < int64(len(p)) { - return distribution.Descriptor{}, fmt.Errorf("short copy: wrote %d of %d", n, len(p)) - } - - desc := distribution.Descriptor{ - MediaType: mediaType, - Size: int64(len(p)), - Digest: dgstr.Digest(), - } - - return writer.Commit(ctx, desc) -} - -type optionFunc func(interface{}) error - -func (f optionFunc) Apply(v interface{}) error { - return f(v) -} - -// WithMountFrom returns a BlobCreateOption which designates that the blob should be -// mounted from the given canonical reference. -func WithMountFrom(ref reference.Canonical) distribution.BlobCreateOption { - return optionFunc(func(v interface{}) error { - opts, ok := v.(*distribution.CreateOptions) - if !ok { - return fmt.Errorf("unexpected options type: %T", v) - } - - opts.Mount.ShouldMount = true - opts.Mount.From = ref - - return nil - }) -} - -func (bs *blobs) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { - var opts distribution.CreateOptions - - for _, option := range options { - err := option.Apply(&opts) - if err != nil { - return nil, err - } - } - - var values []url.Values - - if opts.Mount.ShouldMount { - values = append(values, url.Values{"from": {opts.Mount.From.Name()}, "mount": {opts.Mount.From.Digest().String()}}) - } - - u, err := bs.ub.BuildBlobUploadURL(bs.name, values...) - if err != nil { - return nil, err - } - - req, err := http.NewRequest("POST", u, nil) - if err != nil { - return nil, err - } - - resp, err := bs.client.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - switch resp.StatusCode { - case http.StatusCreated: - desc, err := bs.statter.Stat(ctx, opts.Mount.From.Digest()) - if err != nil { - return nil, err - } - return nil, distribution.ErrBlobMounted{From: opts.Mount.From, Descriptor: desc} - case http.StatusAccepted: - // TODO(dmcgowan): Check for invalid UUID - uuid := resp.Header.Get("Docker-Upload-UUID") - location, err := sanitizeLocation(resp.Header.Get("Location"), u) - if err != nil { - return nil, err - } - - return &httpBlobUpload{ - statter: bs.statter, - client: bs.client, - uuid: uuid, - startedAt: time.Now(), - location: location, - }, nil - default: - return nil, HandleErrorResponse(resp) - } -} - -func (bs *blobs) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { - panic("not implemented") -} - -func (bs *blobs) Delete(ctx context.Context, dgst digest.Digest) error { - return bs.statter.Clear(ctx, dgst) -} - -type blobStatter struct { - name reference.Named - ub *v2.URLBuilder - client *http.Client -} - -func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - ref, err := reference.WithDigest(bs.name, dgst) - if err != nil { - return distribution.Descriptor{}, err - } - u, err := bs.ub.BuildBlobURL(ref) - if err != nil { - return distribution.Descriptor{}, err - } - - resp, err := bs.client.Head(u) - if err != nil { - return distribution.Descriptor{}, err - } - defer resp.Body.Close() - - if SuccessStatus(resp.StatusCode) { - lengthHeader := resp.Header.Get("Content-Length") - if lengthHeader == "" { - return distribution.Descriptor{}, fmt.Errorf("missing content-length header for request: %s", u) - } - - length, err := strconv.ParseInt(lengthHeader, 10, 64) - if err != nil { - return distribution.Descriptor{}, fmt.Errorf("error parsing content-length: %v", err) - } - - return distribution.Descriptor{ - MediaType: resp.Header.Get("Content-Type"), - Size: length, - Digest: dgst, - }, nil - } else if resp.StatusCode == http.StatusNotFound { - return distribution.Descriptor{}, distribution.ErrBlobUnknown - } - return distribution.Descriptor{}, HandleErrorResponse(resp) -} - -func buildCatalogValues(maxEntries int, last string) url.Values { - values := url.Values{} - - if maxEntries > 0 { - values.Add("n", strconv.Itoa(maxEntries)) - } - - if last != "" { - values.Add("last", last) - } - - return values -} - -func (bs *blobStatter) Clear(ctx context.Context, dgst digest.Digest) error { - ref, err := reference.WithDigest(bs.name, dgst) - if err != nil { - return err - } - blobURL, err := bs.ub.BuildBlobURL(ref) - if err != nil { - return err - } - - req, err := http.NewRequest("DELETE", blobURL, nil) - if err != nil { - return err - } - - resp, err := bs.client.Do(req) - if err != nil { - return err - } - defer resp.Body.Close() - - if SuccessStatus(resp.StatusCode) { - return nil - } - return HandleErrorResponse(resp) -} - -func (bs *blobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { - return nil -} diff --git a/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go b/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go deleted file mode 100644 index 9120dbed66..0000000000 --- a/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go +++ /dev/null @@ -1,249 +0,0 @@ -package transport - -import ( - "errors" - "fmt" - "io" - "net/http" - "regexp" - "strconv" -) - -var ( - contentRangeRegexp = regexp.MustCompile(`bytes ([0-9]+)-([0-9]+)/([0-9]+|\\*)`) - - // ErrWrongCodeForByteRange is returned if the client sends a request - // with a Range header but the server returns a 2xx or 3xx code other - // than 206 Partial Content. - ErrWrongCodeForByteRange = errors.New("expected HTTP 206 from byte range request") -) - -// ReadSeekCloser combines io.ReadSeeker with io.Closer. -type ReadSeekCloser interface { - io.ReadSeeker - io.Closer -} - -// NewHTTPReadSeeker handles reading from an HTTP endpoint using a GET -// request. When seeking and starting a read from a non-zero offset -// the a "Range" header will be added which sets the offset. -// TODO(dmcgowan): Move this into a separate utility package -func NewHTTPReadSeeker(client *http.Client, url string, errorHandler func(*http.Response) error) ReadSeekCloser { - return &httpReadSeeker{ - client: client, - url: url, - errorHandler: errorHandler, - } -} - -type httpReadSeeker struct { - client *http.Client - url string - - // errorHandler creates an error from an unsuccessful HTTP response. - // This allows the error to be created with the HTTP response body - // without leaking the body through a returned error. - errorHandler func(*http.Response) error - - size int64 - - // rc is the remote read closer. - rc io.ReadCloser - // readerOffset tracks the offset as of the last read. - readerOffset int64 - // seekOffset allows Seek to override the offset. Seek changes - // seekOffset instead of changing readOffset directly so that - // connection resets can be delayed and possibly avoided if the - // seek is undone (i.e. seeking to the end and then back to the - // beginning). - seekOffset int64 - err error -} - -func (hrs *httpReadSeeker) Read(p []byte) (n int, err error) { - if hrs.err != nil { - return 0, hrs.err - } - - // If we sought to a different position, we need to reset the - // connection. This logic is here instead of Seek so that if - // a seek is undone before the next read, the connection doesn't - // need to be closed and reopened. A common example of this is - // seeking to the end to determine the length, and then seeking - // back to the original position. - if hrs.readerOffset != hrs.seekOffset { - hrs.reset() - } - - hrs.readerOffset = hrs.seekOffset - - rd, err := hrs.reader() - if err != nil { - return 0, err - } - - n, err = rd.Read(p) - hrs.seekOffset += int64(n) - hrs.readerOffset += int64(n) - - return n, err -} - -func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) { - if hrs.err != nil { - return 0, hrs.err - } - - lastReaderOffset := hrs.readerOffset - - if whence == io.SeekStart && hrs.rc == nil { - // If no request has been made yet, and we are seeking to an - // absolute position, set the read offset as well to avoid an - // unnecessary request. - hrs.readerOffset = offset - } - - _, err := hrs.reader() - if err != nil { - hrs.readerOffset = lastReaderOffset - return 0, err - } - - newOffset := hrs.seekOffset - - switch whence { - case io.SeekCurrent: - newOffset += offset - case io.SeekEnd: - if hrs.size < 0 { - return 0, errors.New("content length not known") - } - newOffset = hrs.size + offset - case io.SeekStart: - newOffset = offset - } - - if newOffset < 0 { - err = errors.New("cannot seek to negative position") - } else { - hrs.seekOffset = newOffset - } - - return hrs.seekOffset, err -} - -func (hrs *httpReadSeeker) Close() error { - if hrs.err != nil { - return hrs.err - } - - // close and release reader chain - if hrs.rc != nil { - hrs.rc.Close() - } - - hrs.rc = nil - - hrs.err = errors.New("httpLayer: closed") - - return nil -} - -func (hrs *httpReadSeeker) reset() { - if hrs.err != nil { - return - } - if hrs.rc != nil { - hrs.rc.Close() - hrs.rc = nil - } -} - -func (hrs *httpReadSeeker) reader() (io.Reader, error) { - if hrs.err != nil { - return nil, hrs.err - } - - if hrs.rc != nil { - return hrs.rc, nil - } - - req, err := http.NewRequest("GET", hrs.url, nil) - if err != nil { - return nil, err - } - - if hrs.readerOffset > 0 { - // If we are at different offset, issue a range request from there. - req.Header.Add("Range", fmt.Sprintf("bytes=%d-", hrs.readerOffset)) - // TODO: get context in here - // context.GetLogger(hrs.context).Infof("Range: %s", req.Header.Get("Range")) - } - - resp, err := hrs.client.Do(req) - if err != nil { - return nil, err - } - - // Normally would use client.SuccessStatus, but that would be a cyclic - // import - if resp.StatusCode >= 200 && resp.StatusCode <= 399 { - if hrs.readerOffset > 0 { - if resp.StatusCode != http.StatusPartialContent { - return nil, ErrWrongCodeForByteRange - } - - contentRange := resp.Header.Get("Content-Range") - if contentRange == "" { - return nil, errors.New("no Content-Range header found in HTTP 206 response") - } - - submatches := contentRangeRegexp.FindStringSubmatch(contentRange) - if len(submatches) < 4 { - return nil, fmt.Errorf("could not parse Content-Range header: %s", contentRange) - } - - startByte, err := strconv.ParseUint(submatches[1], 10, 64) - if err != nil { - return nil, fmt.Errorf("could not parse start of range in Content-Range header: %s", contentRange) - } - - if startByte != uint64(hrs.readerOffset) { - return nil, fmt.Errorf("received Content-Range starting at offset %d instead of requested %d", startByte, hrs.readerOffset) - } - - endByte, err := strconv.ParseUint(submatches[2], 10, 64) - if err != nil { - return nil, fmt.Errorf("could not parse end of range in Content-Range header: %s", contentRange) - } - - if submatches[3] == "*" { - hrs.size = -1 - } else { - size, err := strconv.ParseUint(submatches[3], 10, 64) - if err != nil { - return nil, fmt.Errorf("could not parse total size in Content-Range header: %s", contentRange) - } - - if endByte+1 != size { - return nil, fmt.Errorf("range in Content-Range stops before the end of the content: %s", contentRange) - } - - hrs.size = int64(size) - } - } else if resp.StatusCode == http.StatusOK { - hrs.size = resp.ContentLength - } else { - hrs.size = -1 - } - hrs.rc = resp.Body - } else { - defer resp.Body.Close() - if hrs.errorHandler != nil { - return nil, hrs.errorHandler(resp) - } - return nil, fmt.Errorf("unexpected status resolving reader: %v", resp.Status) - } - - return hrs.rc, nil -} diff --git a/vendor/github.com/docker/distribution/registry/client/transport/transport.go b/vendor/github.com/docker/distribution/registry/client/transport/transport.go deleted file mode 100644 index 30e45fab0f..0000000000 --- a/vendor/github.com/docker/distribution/registry/client/transport/transport.go +++ /dev/null @@ -1,147 +0,0 @@ -package transport - -import ( - "io" - "net/http" - "sync" -) - -// RequestModifier represents an object which will do an inplace -// modification of an HTTP request. -type RequestModifier interface { - ModifyRequest(*http.Request) error -} - -type headerModifier http.Header - -// NewHeaderRequestModifier returns a new RequestModifier which will -// add the given headers to a request. -func NewHeaderRequestModifier(header http.Header) RequestModifier { - return headerModifier(header) -} - -func (h headerModifier) ModifyRequest(req *http.Request) error { - for k, s := range http.Header(h) { - req.Header[k] = append(req.Header[k], s...) - } - - return nil -} - -// NewTransport creates a new transport which will apply modifiers to -// the request on a RoundTrip call. -func NewTransport(base http.RoundTripper, modifiers ...RequestModifier) http.RoundTripper { - return &transport{ - Modifiers: modifiers, - Base: base, - } -} - -// transport is an http.RoundTripper that makes HTTP requests after -// copying and modifying the request -type transport struct { - Modifiers []RequestModifier - Base http.RoundTripper - - mu sync.Mutex // guards modReq - modReq map[*http.Request]*http.Request // original -> modified -} - -// RoundTrip authorizes and authenticates the request with an -// access token. If no token exists or token is expired, -// tries to refresh/fetch a new token. -func (t *transport) RoundTrip(req *http.Request) (*http.Response, error) { - req2 := cloneRequest(req) - for _, modifier := range t.Modifiers { - if err := modifier.ModifyRequest(req2); err != nil { - return nil, err - } - } - - t.setModReq(req, req2) - res, err := t.base().RoundTrip(req2) - if err != nil { - t.setModReq(req, nil) - return nil, err - } - res.Body = &onEOFReader{ - rc: res.Body, - fn: func() { t.setModReq(req, nil) }, - } - return res, nil -} - -// CancelRequest cancels an in-flight request by closing its connection. -func (t *transport) CancelRequest(req *http.Request) { - type canceler interface { - CancelRequest(*http.Request) - } - if cr, ok := t.base().(canceler); ok { - t.mu.Lock() - modReq := t.modReq[req] - delete(t.modReq, req) - t.mu.Unlock() - cr.CancelRequest(modReq) - } -} - -func (t *transport) base() http.RoundTripper { - if t.Base != nil { - return t.Base - } - return http.DefaultTransport -} - -func (t *transport) setModReq(orig, mod *http.Request) { - t.mu.Lock() - defer t.mu.Unlock() - if t.modReq == nil { - t.modReq = make(map[*http.Request]*http.Request) - } - if mod == nil { - delete(t.modReq, orig) - } else { - t.modReq[orig] = mod - } -} - -// cloneRequest returns a clone of the provided *http.Request. -// The clone is a shallow copy of the struct and its Header map. -func cloneRequest(r *http.Request) *http.Request { - // shallow copy of the struct - r2 := new(http.Request) - *r2 = *r - // deep copy of the Header - r2.Header = make(http.Header, len(r.Header)) - for k, s := range r.Header { - r2.Header[k] = append([]string(nil), s...) - } - - return r2 -} - -type onEOFReader struct { - rc io.ReadCloser - fn func() -} - -func (r *onEOFReader) Read(p []byte) (n int, err error) { - n, err = r.rc.Read(p) - if err == io.EOF { - r.runFunc() - } - return -} - -func (r *onEOFReader) Close() error { - err := r.rc.Close() - r.runFunc() - return err -} - -func (r *onEOFReader) runFunc() { - if fn := r.fn; fn != nil { - fn() - r.fn = nil - } -} diff --git a/vendor/github.com/docker/distribution/registry/storage/cache/cache.go b/vendor/github.com/docker/distribution/registry/storage/cache/cache.go deleted file mode 100644 index 10a3909197..0000000000 --- a/vendor/github.com/docker/distribution/registry/storage/cache/cache.go +++ /dev/null @@ -1,35 +0,0 @@ -// Package cache provides facilities to speed up access to the storage -// backend. -package cache - -import ( - "fmt" - - "github.com/docker/distribution" -) - -// BlobDescriptorCacheProvider provides repository scoped -// BlobDescriptorService cache instances and a global descriptor cache. -type BlobDescriptorCacheProvider interface { - distribution.BlobDescriptorService - - RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) -} - -// ValidateDescriptor provides a helper function to ensure that caches have -// common criteria for admitting descriptors. -func ValidateDescriptor(desc distribution.Descriptor) error { - if err := desc.Digest.Validate(); err != nil { - return err - } - - if desc.Size < 0 { - return fmt.Errorf("cache: invalid length in descriptor: %v < 0", desc.Size) - } - - if desc.MediaType == "" { - return fmt.Errorf("cache: empty mediatype on descriptor: %v", desc) - } - - return nil -} diff --git a/vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go b/vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go deleted file mode 100644 index ac4c452117..0000000000 --- a/vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go +++ /dev/null @@ -1,129 +0,0 @@ -package cache - -import ( - "context" - - "github.com/docker/distribution" - prometheus "github.com/docker/distribution/metrics" - "github.com/opencontainers/go-digest" -) - -// Metrics is used to hold metric counters -// related to the number of times a cache was -// hit or missed. -type Metrics struct { - Requests uint64 - Hits uint64 - Misses uint64 -} - -// Logger can be provided on the MetricsTracker to log errors. -// -// Usually, this is just a proxy to dcontext.GetLogger. -type Logger interface { - Errorf(format string, args ...interface{}) -} - -// MetricsTracker represents a metric tracker -// which simply counts the number of hits and misses. -type MetricsTracker interface { - Hit() - Miss() - Metrics() Metrics - Logger(context.Context) Logger -} - -type cachedBlobStatter struct { - cache distribution.BlobDescriptorService - backend distribution.BlobDescriptorService - tracker MetricsTracker -} - -var ( - // cacheCount is the number of total cache request received/hits/misses - cacheCount = prometheus.StorageNamespace.NewLabeledCounter("cache", "The number of cache request received", "type") -) - -// NewCachedBlobStatter creates a new statter which prefers a cache and -// falls back to a backend. -func NewCachedBlobStatter(cache distribution.BlobDescriptorService, backend distribution.BlobDescriptorService) distribution.BlobDescriptorService { - return &cachedBlobStatter{ - cache: cache, - backend: backend, - } -} - -// NewCachedBlobStatterWithMetrics creates a new statter which prefers a cache and -// falls back to a backend. Hits and misses will send to the tracker. -func NewCachedBlobStatterWithMetrics(cache distribution.BlobDescriptorService, backend distribution.BlobDescriptorService, tracker MetricsTracker) distribution.BlobStatter { - return &cachedBlobStatter{ - cache: cache, - backend: backend, - tracker: tracker, - } -} - -func (cbds *cachedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - cacheCount.WithValues("Request").Inc(1) - desc, err := cbds.cache.Stat(ctx, dgst) - if err != nil { - if err != distribution.ErrBlobUnknown { - logErrorf(ctx, cbds.tracker, "error retrieving descriptor from cache: %v", err) - } - - goto fallback - } - cacheCount.WithValues("Hit").Inc(1) - if cbds.tracker != nil { - cbds.tracker.Hit() - } - return desc, nil -fallback: - cacheCount.WithValues("Miss").Inc(1) - if cbds.tracker != nil { - cbds.tracker.Miss() - } - desc, err = cbds.backend.Stat(ctx, dgst) - if err != nil { - return desc, err - } - - if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil { - logErrorf(ctx, cbds.tracker, "error adding descriptor %v to cache: %v", desc.Digest, err) - } - - return desc, err - -} - -func (cbds *cachedBlobStatter) Clear(ctx context.Context, dgst digest.Digest) error { - err := cbds.cache.Clear(ctx, dgst) - if err != nil { - return err - } - - err = cbds.backend.Clear(ctx, dgst) - if err != nil { - return err - } - return nil -} - -func (cbds *cachedBlobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { - if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil { - logErrorf(ctx, cbds.tracker, "error adding descriptor %v to cache: %v", desc.Digest, err) - } - return nil -} - -func logErrorf(ctx context.Context, tracker MetricsTracker, format string, args ...interface{}) { - if tracker == nil { - return - } - - logger := tracker.Logger(ctx) - if logger == nil { - return - } - logger.Errorf(format, args...) -} diff --git a/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go b/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go deleted file mode 100644 index f2953b02c2..0000000000 --- a/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go +++ /dev/null @@ -1,179 +0,0 @@ -package memory - -import ( - "context" - "sync" - - "github.com/distribution/reference" - "github.com/docker/distribution" - "github.com/docker/distribution/registry/storage/cache" - "github.com/opencontainers/go-digest" -) - -type inMemoryBlobDescriptorCacheProvider struct { - global *mapBlobDescriptorCache - repositories map[string]*mapBlobDescriptorCache - mu sync.RWMutex -} - -// NewInMemoryBlobDescriptorCacheProvider returns a new mapped-based cache for -// storing blob descriptor data. -func NewInMemoryBlobDescriptorCacheProvider() cache.BlobDescriptorCacheProvider { - return &inMemoryBlobDescriptorCacheProvider{ - global: newMapBlobDescriptorCache(), - repositories: make(map[string]*mapBlobDescriptorCache), - } -} - -func (imbdcp *inMemoryBlobDescriptorCacheProvider) RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) { - if _, err := reference.ParseNormalizedNamed(repo); err != nil { - return nil, err - } - - imbdcp.mu.RLock() - defer imbdcp.mu.RUnlock() - - return &repositoryScopedInMemoryBlobDescriptorCache{ - repo: repo, - parent: imbdcp, - repository: imbdcp.repositories[repo], - }, nil -} - -func (imbdcp *inMemoryBlobDescriptorCacheProvider) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - return imbdcp.global.Stat(ctx, dgst) -} - -func (imbdcp *inMemoryBlobDescriptorCacheProvider) Clear(ctx context.Context, dgst digest.Digest) error { - return imbdcp.global.Clear(ctx, dgst) -} - -func (imbdcp *inMemoryBlobDescriptorCacheProvider) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { - _, err := imbdcp.Stat(ctx, dgst) - if err == distribution.ErrBlobUnknown { - - if dgst.Algorithm() != desc.Digest.Algorithm() && dgst != desc.Digest { - // if the digests differ, set the other canonical mapping - if err := imbdcp.global.SetDescriptor(ctx, desc.Digest, desc); err != nil { - return err - } - } - - // unknown, just set it - return imbdcp.global.SetDescriptor(ctx, dgst, desc) - } - - // we already know it, do nothing - return err -} - -// repositoryScopedInMemoryBlobDescriptorCache provides the request scoped -// repository cache. Instances are not thread-safe but the delegated -// operations are. -type repositoryScopedInMemoryBlobDescriptorCache struct { - repo string - parent *inMemoryBlobDescriptorCacheProvider // allows lazy allocation of repo's map - repository *mapBlobDescriptorCache -} - -func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - rsimbdcp.parent.mu.Lock() - repo := rsimbdcp.repository - rsimbdcp.parent.mu.Unlock() - - if repo == nil { - return distribution.Descriptor{}, distribution.ErrBlobUnknown - } - - return repo.Stat(ctx, dgst) -} - -func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Clear(ctx context.Context, dgst digest.Digest) error { - rsimbdcp.parent.mu.Lock() - repo := rsimbdcp.repository - rsimbdcp.parent.mu.Unlock() - - if repo == nil { - return distribution.ErrBlobUnknown - } - - return repo.Clear(ctx, dgst) -} - -func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { - rsimbdcp.parent.mu.Lock() - repo := rsimbdcp.repository - if repo == nil { - // allocate map since we are setting it now. - var ok bool - // have to read back value since we may have allocated elsewhere. - repo, ok = rsimbdcp.parent.repositories[rsimbdcp.repo] - if !ok { - repo = newMapBlobDescriptorCache() - rsimbdcp.parent.repositories[rsimbdcp.repo] = repo - } - rsimbdcp.repository = repo - } - rsimbdcp.parent.mu.Unlock() - - if err := repo.SetDescriptor(ctx, dgst, desc); err != nil { - return err - } - - return rsimbdcp.parent.SetDescriptor(ctx, dgst, desc) -} - -// mapBlobDescriptorCache provides a simple map-based implementation of the -// descriptor cache. -type mapBlobDescriptorCache struct { - descriptors map[digest.Digest]distribution.Descriptor - mu sync.RWMutex -} - -var _ distribution.BlobDescriptorService = &mapBlobDescriptorCache{} - -func newMapBlobDescriptorCache() *mapBlobDescriptorCache { - return &mapBlobDescriptorCache{ - descriptors: make(map[digest.Digest]distribution.Descriptor), - } -} - -func (mbdc *mapBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - if err := dgst.Validate(); err != nil { - return distribution.Descriptor{}, err - } - - mbdc.mu.RLock() - defer mbdc.mu.RUnlock() - - desc, ok := mbdc.descriptors[dgst] - if !ok { - return distribution.Descriptor{}, distribution.ErrBlobUnknown - } - - return desc, nil -} - -func (mbdc *mapBlobDescriptorCache) Clear(ctx context.Context, dgst digest.Digest) error { - mbdc.mu.Lock() - defer mbdc.mu.Unlock() - - delete(mbdc.descriptors, dgst) - return nil -} - -func (mbdc *mapBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { - if err := dgst.Validate(); err != nil { - return err - } - - if err := cache.ValidateDescriptor(desc); err != nil { - return err - } - - mbdc.mu.Lock() - defer mbdc.mu.Unlock() - - mbdc.descriptors[dgst] = desc - return nil -} diff --git a/vendor/github.com/docker/distribution/tags.go b/vendor/github.com/docker/distribution/tags.go deleted file mode 100644 index f22df2b850..0000000000 --- a/vendor/github.com/docker/distribution/tags.go +++ /dev/null @@ -1,27 +0,0 @@ -package distribution - -import ( - "context" -) - -// TagService provides access to information about tagged objects. -type TagService interface { - // Get retrieves the descriptor identified by the tag. Some - // implementations may differentiate between "trusted" tags and - // "untrusted" tags. If a tag is "untrusted", the mapping will be returned - // as an ErrTagUntrusted error, with the target descriptor. - Get(ctx context.Context, tag string) (Descriptor, error) - - // Tag associates the tag with the provided descriptor, updating the - // current association, if needed. - Tag(ctx context.Context, tag string, desc Descriptor) error - - // Untag removes the given tag association - Untag(ctx context.Context, tag string) error - - // All returns the set of tags managed by this tag service - All(ctx context.Context) ([]string, error) - - // Lookup returns the set of tags referencing the given digest. - Lookup(ctx context.Context, digest Descriptor) ([]string, error) -} diff --git a/vendor/github.com/docker/distribution/vendor.conf b/vendor/github.com/docker/distribution/vendor.conf deleted file mode 100644 index 20818428ff..0000000000 --- a/vendor/github.com/docker/distribution/vendor.conf +++ /dev/null @@ -1,52 +0,0 @@ -github.com/Azure/azure-sdk-for-go 4650843026a7fdec254a8d9cf893693a254edd0b -github.com/Azure/go-autorest eaa7994b2278094c904d31993d26f56324db3052 -github.com/sirupsen/logrus 3d4380f53a34dcdc95f0c1db702615992b38d9a4 -github.com/aws/aws-sdk-go f831d5a0822a1ad72420ab18c6269bca1ddaf490 -github.com/bshuster-repo/logrus-logstash-hook d2c0ecc1836d91814e15e23bb5dc309c3ef51f4a -github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9 -github.com/bugsnag/bugsnag-go b1d153021fcd90ca3f080db36bec96dc690fb274 -github.com/bugsnag/osext 0dd3f918b21bec95ace9dc86c7e70266cfc5c702 -github.com/bugsnag/panicwrap e2c28503fcd0675329da73bf48b33404db873782 -github.com/denverdino/aliyungo afedced274aa9a7fcdd47ac97018f0f8db4e5de2 -github.com/dgrijalva/jwt-go 4bbdd8ac624fc7a9ef7aec841c43d99b5fe65a29 https://github.com/golang-jwt/jwt.git # v3.2.2 -github.com/distribution/reference 49c28499d219290c3226822e9cfcd4ede6d75379 # v0.5.0 -github.com/docker/go-metrics 399ea8c73916000c64c2c76e8da00ca82f8387ab -github.com/docker/libtrust fa567046d9b14f6aa788882a950d69651d230b21 -github.com/garyburd/redigo 535138d7bcd717d6531c701ef5933d98b1866257 -github.com/go-ini/ini 2ba15ac2dc9cdf88c110ec2dc0ced7fa45f5678c -github.com/golang/protobuf 8d92cf5fc15a4382f8964b08e1f42a75c0591aa3 -github.com/gorilla/handlers 60c7bfde3e33c201519a200a4507a158cc03a17b -github.com/gorilla/mux 599cba5e7b6137d46ddf58fb1765f5d928e69604 -github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 -github.com/jmespath/go-jmespath bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d -github.com/marstr/guid 8bd9a64bf37eb297b492a4101fb28e80ac0b290f -github.com/satori/go.uuid f58768cc1a7a7e77a3bd49e98cdd21419399b6a3 -github.com/matttproud/golang_protobuf_extensions c12348ce28de40eed0136aa2b644d0ee0650e56c -github.com/miekg/dns 271c58e0c14f552178ea321a545ff9af38930f39 -github.com/mitchellh/mapstructure 482a9fd5fa83e8c4e7817413b80f3eb8feec03ef -github.com/ncw/swift a0320860b16212c2b59b4912bb6508cda1d7cee6 -github.com/prometheus/client_golang c332b6f63c0658a65eca15c0e5247ded801cf564 -github.com/prometheus/client_model 99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c -github.com/prometheus/common 89604d197083d4781071d3c65855d24ecfb0a563 -github.com/prometheus/procfs cb4147076ac75738c9a7d279075a253c0cc5acbd -github.com/Shopify/logrus-bugsnag 577dee27f20dd8f1a529f82210094af593be12bd -github.com/spf13/cobra 312092086bed4968099259622145a0c9ae280064 -github.com/spf13/pflag 5644820622454e71517561946e3d94b9f9db6842 -github.com/xenolf/lego a9d8cec0e6563575e5868a005359ac97911b5985 -github.com/yvasiyarov/go-metrics 57bccd1ccd43f94bb17fdd8bf3007059b802f85e -github.com/yvasiyarov/gorelic a9bba5b9ab508a086f9a12b8c51fab68478e2128 -github.com/yvasiyarov/newrelic_platform_go b21fdbd4370f3717f3bbd2bf41c223bc273068e6 -golang.org/x/crypto c10c31b5e94b6f7a0283272dc2bb27163dcea24b -golang.org/x/net 4876518f9e71663000c348837735820161a42df7 -golang.org/x/oauth2 045497edb6234273d67dbc25da3f2ddbc4c4cacf -golang.org/x/time a4bde12657593d5e90d0533a3e4fd95e635124cb -google.golang.org/api 9bf6e6e569ff057f75d9604a46c52928f17d2b54 -google.golang.org/appengine 12d5545dc1cfa6047a286d5e853841b6471f4c19 -google.golang.org/cloud 975617b05ea8a58727e6c1a06b6161ff4185a9f2 -google.golang.org/grpc d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994 -gopkg.in/check.v1 64131543e7896d5bcc6bd5a76287eb75ea96c673 -gopkg.in/square/go-jose.v1 40d457b439244b546f023d056628e5184136899b -gopkg.in/yaml.v2 v2.2.1 -rsc.io/letsencrypt e770c10b0f1a64775ae91d240407ce00d1a5bdeb https://github.com/dmcgowan/letsencrypt.git -github.com/opencontainers/go-digest ea51bea511f75cfa3ef6098cc253c5c3609b037a # v1.0.0 -github.com/opencontainers/image-spec 67d2d5658fe0476ab9bf414cec164077ebff3920 # v1.0.2 diff --git a/vendor/github.com/docker/docker/AUTHORS b/vendor/github.com/docker/docker/AUTHORS index 88032defe7..c7c649471c 100644 --- a/vendor/github.com/docker/docker/AUTHORS +++ b/vendor/github.com/docker/docker/AUTHORS @@ -2,6 +2,7 @@ # This file lists all contributors to the repository. # See hack/generate-authors.sh to make modifications. +17neverends 7sunarni <710720732@qq.com> Aanand Prasad Aarni Koskela @@ -189,6 +190,7 @@ Anes Hasicic Angel Velazquez Anil Belur Anil Madhavapeddy +Anirudh Aithal Ankit Jain Ankush Agarwal Anonmily @@ -227,7 +229,7 @@ Arun Gupta Asad Saeeduddin Asbjørn Enge Ashly Mathew -Austin Vazquez +Austin Vazquez averagehuman Avi Das Avi Kivity @@ -293,6 +295,7 @@ Brandon Liu Brandon Philips Brandon Rhodes Brendan Dixon +Brendon Smith Brennan Kinney <5098581+polarathene@users.noreply.github.com> Brent Salisbury Brett Higgins @@ -347,6 +350,7 @@ Casey Bisson Catalin Pirvu Ce Gao Cedric Davies +Cesar Talledo Cezar Sa Espinola Chad Swenson Chance Zibolski @@ -375,6 +379,7 @@ Chen Qiu Cheng-mean Liu Chengfei Shang Chengguang Xu +Chengyu Zhu Chentianze Chenyang Yan chenyuzhu @@ -1207,6 +1212,7 @@ K. Heller Kai Blin Kai Qiang Wu (Kennan) Kaijie Chen +Kaita Nakamura Kamil Domański Kamjar Gerami Kanstantsin Shautsou @@ -1281,6 +1287,7 @@ Krasi Georgiev Krasimir Georgiev Kris-Mikael Krister Kristian Haugene +Kristian Heljas Kristina Zabunova Krystian Wojcicki Kunal Kushwaha @@ -1482,6 +1489,7 @@ Matthias Kühnle Matthias Rampke Matthieu Fronton Matthieu Hauglustaine +Matthieu MOREL Mattias Jernberg Mauricio Garavaglia mauriyouth @@ -1712,6 +1720,7 @@ Patrick Hemmer Patrick St. laurent Patrick Stapleton Patrik Cyvoct +Patrik Leifert pattichen Paul "TBBle" Hampson Paul @@ -1870,6 +1879,7 @@ Robert Obryk Robert Schneider Robert Shade Robert Stern +Robert Sturla Robert Terhaar Robert Wallis Robert Wang diff --git a/vendor/github.com/docker/docker/api/types/filters/errors.go b/vendor/github.com/docker/docker/api/types/filters/errors.go deleted file mode 100644 index b8a690d67a..0000000000 --- a/vendor/github.com/docker/docker/api/types/filters/errors.go +++ /dev/null @@ -1,24 +0,0 @@ -package filters - -import "fmt" - -// invalidFilter indicates that the provided filter or its value is invalid -type invalidFilter struct { - Filter string - Value []string -} - -func (e invalidFilter) Error() string { - msg := "invalid filter" - if e.Filter != "" { - msg += " '" + e.Filter - if e.Value != nil { - msg = fmt.Sprintf("%s=%s", msg, e.Value) - } - msg += "'" - } - return msg -} - -// InvalidParameter marks this error as ErrInvalidParameter -func (e invalidFilter) InvalidParameter() {} diff --git a/vendor/github.com/docker/docker/api/types/filters/parse.go b/vendor/github.com/docker/docker/api/types/filters/parse.go deleted file mode 100644 index 2085ff38f2..0000000000 --- a/vendor/github.com/docker/docker/api/types/filters/parse.go +++ /dev/null @@ -1,336 +0,0 @@ -/* -Package filters provides tools for encoding a mapping of keys to a set of -multiple values. -*/ -package filters // import "github.com/docker/docker/api/types/filters" - -import ( - "encoding/json" - "regexp" - "strings" - - "github.com/docker/docker/api/types/versions" -) - -// Args stores a mapping of keys to a set of multiple values. -type Args struct { - fields map[string]map[string]bool -} - -// KeyValuePair are used to initialize a new Args -type KeyValuePair struct { - Key string - Value string -} - -// Arg creates a new KeyValuePair for initializing Args -func Arg(key, value string) KeyValuePair { - return KeyValuePair{Key: key, Value: value} -} - -// NewArgs returns a new Args populated with the initial args -func NewArgs(initialArgs ...KeyValuePair) Args { - args := Args{fields: map[string]map[string]bool{}} - for _, arg := range initialArgs { - args.Add(arg.Key, arg.Value) - } - return args -} - -// Keys returns all the keys in list of Args -func (args Args) Keys() []string { - keys := make([]string, 0, len(args.fields)) - for k := range args.fields { - keys = append(keys, k) - } - return keys -} - -// MarshalJSON returns a JSON byte representation of the Args -func (args Args) MarshalJSON() ([]byte, error) { - if len(args.fields) == 0 { - return []byte("{}"), nil - } - return json.Marshal(args.fields) -} - -// ToJSON returns the Args as a JSON encoded string -func ToJSON(a Args) (string, error) { - if a.Len() == 0 { - return "", nil - } - buf, err := json.Marshal(a) - return string(buf), err -} - -// ToParamWithVersion encodes Args as a JSON string. If version is less than 1.22 -// then the encoded format will use an older legacy format where the values are a -// list of strings, instead of a set. -// -// Deprecated: do not use in any new code; use ToJSON instead -func ToParamWithVersion(version string, a Args) (string, error) { - if a.Len() == 0 { - return "", nil - } - - if version != "" && versions.LessThan(version, "1.22") { - buf, err := json.Marshal(convertArgsToSlice(a.fields)) - return string(buf), err - } - - return ToJSON(a) -} - -// FromJSON decodes a JSON encoded string into Args -func FromJSON(p string) (Args, error) { - args := NewArgs() - - if p == "" { - return args, nil - } - - raw := []byte(p) - err := json.Unmarshal(raw, &args) - if err == nil { - return args, nil - } - - // Fallback to parsing arguments in the legacy slice format - deprecated := map[string][]string{} - if legacyErr := json.Unmarshal(raw, &deprecated); legacyErr != nil { - return args, &invalidFilter{} - } - - args.fields = deprecatedArgs(deprecated) - return args, nil -} - -// UnmarshalJSON populates the Args from JSON encode bytes -func (args Args) UnmarshalJSON(raw []byte) error { - return json.Unmarshal(raw, &args.fields) -} - -// Get returns the list of values associated with the key -func (args Args) Get(key string) []string { - values := args.fields[key] - if values == nil { - return make([]string, 0) - } - slice := make([]string, 0, len(values)) - for key := range values { - slice = append(slice, key) - } - return slice -} - -// Add a new value to the set of values -func (args Args) Add(key, value string) { - if _, ok := args.fields[key]; ok { - args.fields[key][value] = true - } else { - args.fields[key] = map[string]bool{value: true} - } -} - -// Del removes a value from the set -func (args Args) Del(key, value string) { - if _, ok := args.fields[key]; ok { - delete(args.fields[key], value) - if len(args.fields[key]) == 0 { - delete(args.fields, key) - } - } -} - -// Len returns the number of keys in the mapping -func (args Args) Len() int { - return len(args.fields) -} - -// MatchKVList returns true if all the pairs in sources exist as key=value -// pairs in the mapping at key, or if there are no values at key. -func (args Args) MatchKVList(key string, sources map[string]string) bool { - fieldValues := args.fields[key] - - // do not filter if there is no filter set or cannot determine filter - if len(fieldValues) == 0 { - return true - } - - if len(sources) == 0 { - return false - } - - for value := range fieldValues { - testK, testV, hasValue := strings.Cut(value, "=") - - v, ok := sources[testK] - if !ok { - return false - } - if hasValue && testV != v { - return false - } - } - - return true -} - -// Match returns true if any of the values at key match the source string -func (args Args) Match(field, source string) bool { - if args.ExactMatch(field, source) { - return true - } - - fieldValues := args.fields[field] - for name2match := range fieldValues { - match, err := regexp.MatchString(name2match, source) - if err != nil { - continue - } - if match { - return true - } - } - return false -} - -// GetBoolOrDefault returns a boolean value of the key if the key is present -// and is interpretable as a boolean value. Otherwise the default value is returned. -// Error is not nil only if the filter values are not valid boolean or are conflicting. -func (args Args) GetBoolOrDefault(key string, defaultValue bool) (bool, error) { - fieldValues, ok := args.fields[key] - if !ok { - return defaultValue, nil - } - - if len(fieldValues) == 0 { - return defaultValue, &invalidFilter{key, nil} - } - - isFalse := fieldValues["0"] || fieldValues["false"] - isTrue := fieldValues["1"] || fieldValues["true"] - if isFalse == isTrue { - // Either no or conflicting truthy/falsy value were provided - return defaultValue, &invalidFilter{key, args.Get(key)} - } - return isTrue, nil -} - -// ExactMatch returns true if the source matches exactly one of the values. -func (args Args) ExactMatch(key, source string) bool { - fieldValues, ok := args.fields[key] - // do not filter if there is no filter set or cannot determine filter - if !ok || len(fieldValues) == 0 { - return true - } - - // try to match full name value to avoid O(N) regular expression matching - return fieldValues[source] -} - -// UniqueExactMatch returns true if there is only one value and the source -// matches exactly the value. -func (args Args) UniqueExactMatch(key, source string) bool { - fieldValues := args.fields[key] - // do not filter if there is no filter set or cannot determine filter - if len(fieldValues) == 0 { - return true - } - if len(args.fields[key]) != 1 { - return false - } - - // try to match full name value to avoid O(N) regular expression matching - return fieldValues[source] -} - -// FuzzyMatch returns true if the source matches exactly one value, or the -// source has one of the values as a prefix. -func (args Args) FuzzyMatch(key, source string) bool { - if args.ExactMatch(key, source) { - return true - } - - fieldValues := args.fields[key] - for prefix := range fieldValues { - if strings.HasPrefix(source, prefix) { - return true - } - } - return false -} - -// Contains returns true if the key exists in the mapping -func (args Args) Contains(field string) bool { - _, ok := args.fields[field] - return ok -} - -// Validate compared the set of accepted keys against the keys in the mapping. -// An error is returned if any mapping keys are not in the accepted set. -func (args Args) Validate(accepted map[string]bool) error { - for name := range args.fields { - if !accepted[name] { - return &invalidFilter{name, nil} - } - } - return nil -} - -// WalkValues iterates over the list of values for a key in the mapping and calls -// op() for each value. If op returns an error the iteration stops and the -// error is returned. -func (args Args) WalkValues(field string, op func(value string) error) error { - if _, ok := args.fields[field]; !ok { - return nil - } - for v := range args.fields[field] { - if err := op(v); err != nil { - return err - } - } - return nil -} - -// Clone returns a copy of args. -func (args Args) Clone() (newArgs Args) { - newArgs.fields = make(map[string]map[string]bool, len(args.fields)) - for k, m := range args.fields { - var mm map[string]bool - if m != nil { - mm = make(map[string]bool, len(m)) - for kk, v := range m { - mm[kk] = v - } - } - newArgs.fields[k] = mm - } - return newArgs -} - -func deprecatedArgs(d map[string][]string) map[string]map[string]bool { - m := map[string]map[string]bool{} - for k, v := range d { - values := map[string]bool{} - for _, vv := range v { - values[vv] = true - } - m[k] = values - } - return m -} - -func convertArgsToSlice(f map[string]map[string]bool) map[string][]string { - m := map[string][]string{} - for k, v := range f { - values := []string{} - for kk := range v { - if v[kk] { - values = append(values, kk) - } - } - m[k] = values - } - return m -} diff --git a/vendor/github.com/docker/docker/api/types/registry/authconfig.go b/vendor/github.com/docker/docker/api/types/registry/authconfig.go deleted file mode 100644 index ebd5e4b9e2..0000000000 --- a/vendor/github.com/docker/docker/api/types/registry/authconfig.go +++ /dev/null @@ -1,109 +0,0 @@ -package registry // import "github.com/docker/docker/api/types/registry" -import ( - "context" - "encoding/base64" - "encoding/json" - "fmt" - "io" - "strings" -) - -// AuthHeader is the name of the header used to send encoded registry -// authorization credentials for registry operations (push/pull). -const AuthHeader = "X-Registry-Auth" - -// RequestAuthConfig is a function interface that clients can supply -// to retry operations after getting an authorization error. -// -// The function must return the [AuthHeader] value ([AuthConfig]), encoded -// in base64url format ([RFC4648, section 5]), which can be decoded by -// [DecodeAuthConfig]. -// -// It must return an error if the privilege request fails. -// -// [RFC4648, section 5]: https://tools.ietf.org/html/rfc4648#section-5 -type RequestAuthConfig func(context.Context) (string, error) - -// AuthConfig contains authorization information for connecting to a Registry. -type AuthConfig struct { - Username string `json:"username,omitempty"` - Password string `json:"password,omitempty"` - Auth string `json:"auth,omitempty"` - - // Email is an optional value associated with the username. - // This field is deprecated and will be removed in a later - // version of docker. - Email string `json:"email,omitempty"` - - ServerAddress string `json:"serveraddress,omitempty"` - - // IdentityToken is used to authenticate the user and get - // an access token for the registry. - IdentityToken string `json:"identitytoken,omitempty"` - - // RegistryToken is a bearer token to be sent to a registry - RegistryToken string `json:"registrytoken,omitempty"` -} - -// EncodeAuthConfig serializes the auth configuration as a base64url encoded -// ([RFC4648, section 5]) JSON string for sending through the X-Registry-Auth header. -// -// [RFC4648, section 5]: https://tools.ietf.org/html/rfc4648#section-5 -func EncodeAuthConfig(authConfig AuthConfig) (string, error) { - buf, err := json.Marshal(authConfig) - if err != nil { - return "", errInvalidParameter{err} - } - return base64.URLEncoding.EncodeToString(buf), nil -} - -// DecodeAuthConfig decodes base64url encoded ([RFC4648, section 5]) JSON -// authentication information as sent through the X-Registry-Auth header. -// -// This function always returns an [AuthConfig], even if an error occurs. It is up -// to the caller to decide if authentication is required, and if the error can -// be ignored. -// -// [RFC4648, section 5]: https://tools.ietf.org/html/rfc4648#section-5 -func DecodeAuthConfig(authEncoded string) (*AuthConfig, error) { - if authEncoded == "" { - return &AuthConfig{}, nil - } - - authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) - return decodeAuthConfigFromReader(authJSON) -} - -// DecodeAuthConfigBody decodes authentication information as sent as JSON in the -// body of a request. This function is to provide backward compatibility with old -// clients and API versions. Current clients and API versions expect authentication -// to be provided through the X-Registry-Auth header. -// -// Like [DecodeAuthConfig], this function always returns an [AuthConfig], even if an -// error occurs. It is up to the caller to decide if authentication is required, -// and if the error can be ignored. -func DecodeAuthConfigBody(rdr io.ReadCloser) (*AuthConfig, error) { - return decodeAuthConfigFromReader(rdr) -} - -func decodeAuthConfigFromReader(rdr io.Reader) (*AuthConfig, error) { - authConfig := &AuthConfig{} - if err := json.NewDecoder(rdr).Decode(authConfig); err != nil { - // always return an (empty) AuthConfig to increase compatibility with - // the existing API. - return &AuthConfig{}, invalid(err) - } - return authConfig, nil -} - -func invalid(err error) error { - return errInvalidParameter{fmt.Errorf("invalid X-Registry-Auth header: %w", err)} -} - -type errInvalidParameter struct{ error } - -func (errInvalidParameter) InvalidParameter() {} - -func (e errInvalidParameter) Cause() error { return e.error } - -func (e errInvalidParameter) Unwrap() error { return e.error } diff --git a/vendor/github.com/docker/docker/api/types/registry/authenticate.go b/vendor/github.com/docker/docker/api/types/registry/authenticate.go deleted file mode 100644 index f0a2113e40..0000000000 --- a/vendor/github.com/docker/docker/api/types/registry/authenticate.go +++ /dev/null @@ -1,21 +0,0 @@ -package registry // import "github.com/docker/docker/api/types/registry" - -// ---------------------------------------------------------------------------- -// DO NOT EDIT THIS FILE -// This file was generated by `swagger generate operation` -// -// See hack/generate-swagger-api.sh -// ---------------------------------------------------------------------------- - -// AuthenticateOKBody authenticate o k body -// swagger:model AuthenticateOKBody -type AuthenticateOKBody struct { - - // An opaque token used to authenticate a user after a successful login - // Required: true - IdentityToken string `json:"IdentityToken"` - - // The status of the authentication - // Required: true - Status string `json:"Status"` -} diff --git a/vendor/github.com/docker/docker/api/types/registry/registry.go b/vendor/github.com/docker/docker/api/types/registry/registry.go deleted file mode 100644 index 8117cb09e7..0000000000 --- a/vendor/github.com/docker/docker/api/types/registry/registry.go +++ /dev/null @@ -1,116 +0,0 @@ -package registry // import "github.com/docker/docker/api/types/registry" - -import ( - "encoding/json" - "net" - - ocispec "github.com/opencontainers/image-spec/specs-go/v1" -) - -// ServiceConfig stores daemon registry services configuration. -type ServiceConfig struct { - AllowNondistributableArtifactsCIDRs []*NetIPNet `json:"AllowNondistributableArtifactsCIDRs,omitempty"` // Deprecated: non-distributable artifacts are deprecated and enabled by default. This field will be removed in the next release. - AllowNondistributableArtifactsHostnames []string `json:"AllowNondistributableArtifactsHostnames,omitempty"` // Deprecated: non-distributable artifacts are deprecated and enabled by default. This field will be removed in the next release. - - InsecureRegistryCIDRs []*NetIPNet `json:"InsecureRegistryCIDRs"` - IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"` - Mirrors []string -} - -// MarshalJSON implements a custom marshaler to include legacy fields -// in API responses. -func (sc ServiceConfig) MarshalJSON() ([]byte, error) { - tmp := map[string]interface{}{ - "InsecureRegistryCIDRs": sc.InsecureRegistryCIDRs, - "IndexConfigs": sc.IndexConfigs, - "Mirrors": sc.Mirrors, - } - if sc.AllowNondistributableArtifactsCIDRs != nil { - tmp["AllowNondistributableArtifactsCIDRs"] = nil - } - if sc.AllowNondistributableArtifactsHostnames != nil { - tmp["AllowNondistributableArtifactsHostnames"] = nil - } - return json.Marshal(tmp) -} - -// NetIPNet is the net.IPNet type, which can be marshalled and -// unmarshalled to JSON -type NetIPNet net.IPNet - -// String returns the CIDR notation of ipnet -func (ipnet *NetIPNet) String() string { - return (*net.IPNet)(ipnet).String() -} - -// MarshalJSON returns the JSON representation of the IPNet -func (ipnet *NetIPNet) MarshalJSON() ([]byte, error) { - return json.Marshal((*net.IPNet)(ipnet).String()) -} - -// UnmarshalJSON sets the IPNet from a byte array of JSON -func (ipnet *NetIPNet) UnmarshalJSON(b []byte) error { - var ipnetStr string - if err := json.Unmarshal(b, &ipnetStr); err != nil { - return err - } - _, cidr, err := net.ParseCIDR(ipnetStr) - if err != nil { - return err - } - *ipnet = NetIPNet(*cidr) - return nil -} - -// IndexInfo contains information about a registry -// -// RepositoryInfo Examples: -// -// { -// "Index" : { -// "Name" : "docker.io", -// "Mirrors" : ["https://registry-2.docker.io/v1/", "https://registry-3.docker.io/v1/"], -// "Secure" : true, -// "Official" : true, -// }, -// "RemoteName" : "library/debian", -// "LocalName" : "debian", -// "CanonicalName" : "docker.io/debian" -// "Official" : true, -// } -// -// { -// "Index" : { -// "Name" : "127.0.0.1:5000", -// "Mirrors" : [], -// "Secure" : false, -// "Official" : false, -// }, -// "RemoteName" : "user/repo", -// "LocalName" : "127.0.0.1:5000/user/repo", -// "CanonicalName" : "127.0.0.1:5000/user/repo", -// "Official" : false, -// } -type IndexInfo struct { - // Name is the name of the registry, such as "docker.io" - Name string - // Mirrors is a list of mirrors, expressed as URIs - Mirrors []string - // Secure is set to false if the registry is part of the list of - // insecure registries. Insecure registries accept HTTP and/or accept - // HTTPS with certificates from unknown CAs. - Secure bool - // Official indicates whether this is an official registry - Official bool -} - -// DistributionInspect describes the result obtained from contacting the -// registry to retrieve image metadata -type DistributionInspect struct { - // Descriptor contains information about the manifest, including - // the content addressable digest - Descriptor ocispec.Descriptor - // Platforms contains the list of platforms supported by the image, - // obtained by parsing the manifest - Platforms []ocispec.Platform -} diff --git a/vendor/github.com/docker/docker/api/types/registry/search.go b/vendor/github.com/docker/docker/api/types/registry/search.go deleted file mode 100644 index 994ca4c6f9..0000000000 --- a/vendor/github.com/docker/docker/api/types/registry/search.go +++ /dev/null @@ -1,48 +0,0 @@ -package registry - -import ( - "context" - - "github.com/docker/docker/api/types/filters" -) - -// SearchOptions holds parameters to search images with. -type SearchOptions struct { - RegistryAuth string - - // PrivilegeFunc is a function that clients can supply to retry operations - // after getting an authorization error. This function returns the registry - // authentication header value in base64 encoded format, or an error if the - // privilege request fails. - // - // For details, refer to [github.com/docker/docker/api/types/registry.RequestAuthConfig]. - PrivilegeFunc func(context.Context) (string, error) - Filters filters.Args - Limit int -} - -// SearchResult describes a search result returned from a registry -type SearchResult struct { - // StarCount indicates the number of stars this repository has - StarCount int `json:"star_count"` - // IsOfficial is true if the result is from an official repository. - IsOfficial bool `json:"is_official"` - // Name is the name of the repository - Name string `json:"name"` - // IsAutomated indicates whether the result is automated. - // - // Deprecated: the "is_automated" field is deprecated and will always be "false". - IsAutomated bool `json:"is_automated"` - // Description is a textual description of the repository - Description string `json:"description"` -} - -// SearchResults lists a collection search results returned from a registry -type SearchResults struct { - // Query contains the query string that generated the search results - Query string `json:"query"` - // NumResults indicates the number of results the query returned - NumResults int `json:"num_results"` - // Results is a slice containing the actual results for the search - Results []SearchResult `json:"results"` -} diff --git a/vendor/github.com/docker/docker/api/types/versions/compare.go b/vendor/github.com/docker/docker/api/types/versions/compare.go index 621725a36d..1a0325c7ed 100644 --- a/vendor/github.com/docker/docker/api/types/versions/compare.go +++ b/vendor/github.com/docker/docker/api/types/versions/compare.go @@ -1,4 +1,4 @@ -package versions // import "github.com/docker/docker/api/types/versions" +package versions import ( "strconv" diff --git a/vendor/github.com/docker/docker/errdefs/defs.go b/vendor/github.com/docker/docker/errdefs/defs.go deleted file mode 100644 index a5523c3e95..0000000000 --- a/vendor/github.com/docker/docker/errdefs/defs.go +++ /dev/null @@ -1,69 +0,0 @@ -package errdefs - -// ErrNotFound signals that the requested object doesn't exist -type ErrNotFound interface { - NotFound() -} - -// ErrInvalidParameter signals that the user input is invalid -type ErrInvalidParameter interface { - InvalidParameter() -} - -// ErrConflict signals that some internal state conflicts with the requested action and can't be performed. -// A change in state should be able to clear this error. -type ErrConflict interface { - Conflict() -} - -// ErrUnauthorized is used to signify that the user is not authorized to perform a specific action -type ErrUnauthorized interface { - Unauthorized() -} - -// ErrUnavailable signals that the requested action/subsystem is not available. -type ErrUnavailable interface { - Unavailable() -} - -// ErrForbidden signals that the requested action cannot be performed under any circumstances. -// When a ErrForbidden is returned, the caller should never retry the action. -type ErrForbidden interface { - Forbidden() -} - -// ErrSystem signals that some internal error occurred. -// An example of this would be a failed mount request. -type ErrSystem interface { - System() -} - -// ErrNotModified signals that an action can't be performed because it's already in the desired state -type ErrNotModified interface { - NotModified() -} - -// ErrNotImplemented signals that the requested action/feature is not implemented on the system as configured. -type ErrNotImplemented interface { - NotImplemented() -} - -// ErrUnknown signals that the kind of error that occurred is not known. -type ErrUnknown interface { - Unknown() -} - -// ErrCancelled signals that the action was cancelled. -type ErrCancelled interface { - Cancelled() -} - -// ErrDeadline signals that the deadline was reached before the action completed. -type ErrDeadline interface { - DeadlineExceeded() -} - -// ErrDataLoss indicates that data was lost or there is data corruption. -type ErrDataLoss interface { - DataLoss() -} diff --git a/vendor/github.com/docker/docker/errdefs/doc.go b/vendor/github.com/docker/docker/errdefs/doc.go deleted file mode 100644 index c211f174fc..0000000000 --- a/vendor/github.com/docker/docker/errdefs/doc.go +++ /dev/null @@ -1,8 +0,0 @@ -// Package errdefs defines a set of error interfaces that packages should use for communicating classes of errors. -// Errors that cross the package boundary should implement one (and only one) of these interfaces. -// -// Packages should not reference these interfaces directly, only implement them. -// To check if a particular error implements one of these interfaces, there are helper -// functions provided (e.g. `Is`) which can be used rather than asserting the interfaces directly. -// If you must assert on these interfaces, be sure to check the causal chain (`err.Cause()`). -package errdefs // import "github.com/docker/docker/errdefs" diff --git a/vendor/github.com/docker/docker/errdefs/helpers.go b/vendor/github.com/docker/docker/errdefs/helpers.go deleted file mode 100644 index ab76e62736..0000000000 --- a/vendor/github.com/docker/docker/errdefs/helpers.go +++ /dev/null @@ -1,305 +0,0 @@ -package errdefs - -import "context" - -type errNotFound struct{ error } - -func (errNotFound) NotFound() {} - -func (e errNotFound) Cause() error { - return e.error -} - -func (e errNotFound) Unwrap() error { - return e.error -} - -// NotFound creates an [ErrNotFound] error from the given error. -// It returns the error as-is if it is either nil (no error) or already implements -// [ErrNotFound], -func NotFound(err error) error { - if err == nil || IsNotFound(err) { - return err - } - return errNotFound{err} -} - -type errInvalidParameter struct{ error } - -func (errInvalidParameter) InvalidParameter() {} - -func (e errInvalidParameter) Cause() error { - return e.error -} - -func (e errInvalidParameter) Unwrap() error { - return e.error -} - -// InvalidParameter creates an [ErrInvalidParameter] error from the given error. -// It returns the error as-is if it is either nil (no error) or already implements -// [ErrInvalidParameter], -func InvalidParameter(err error) error { - if err == nil || IsInvalidParameter(err) { - return err - } - return errInvalidParameter{err} -} - -type errConflict struct{ error } - -func (errConflict) Conflict() {} - -func (e errConflict) Cause() error { - return e.error -} - -func (e errConflict) Unwrap() error { - return e.error -} - -// Conflict creates an [ErrConflict] error from the given error. -// It returns the error as-is if it is either nil (no error) or already implements -// [ErrConflict], -func Conflict(err error) error { - if err == nil || IsConflict(err) { - return err - } - return errConflict{err} -} - -type errUnauthorized struct{ error } - -func (errUnauthorized) Unauthorized() {} - -func (e errUnauthorized) Cause() error { - return e.error -} - -func (e errUnauthorized) Unwrap() error { - return e.error -} - -// Unauthorized creates an [ErrUnauthorized] error from the given error. -// It returns the error as-is if it is either nil (no error) or already implements -// [ErrUnauthorized], -func Unauthorized(err error) error { - if err == nil || IsUnauthorized(err) { - return err - } - return errUnauthorized{err} -} - -type errUnavailable struct{ error } - -func (errUnavailable) Unavailable() {} - -func (e errUnavailable) Cause() error { - return e.error -} - -func (e errUnavailable) Unwrap() error { - return e.error -} - -// Unavailable creates an [ErrUnavailable] error from the given error. -// It returns the error as-is if it is either nil (no error) or already implements -// [ErrUnavailable], -func Unavailable(err error) error { - if err == nil || IsUnavailable(err) { - return err - } - return errUnavailable{err} -} - -type errForbidden struct{ error } - -func (errForbidden) Forbidden() {} - -func (e errForbidden) Cause() error { - return e.error -} - -func (e errForbidden) Unwrap() error { - return e.error -} - -// Forbidden creates an [ErrForbidden] error from the given error. -// It returns the error as-is if it is either nil (no error) or already implements -// [ErrForbidden], -func Forbidden(err error) error { - if err == nil || IsForbidden(err) { - return err - } - return errForbidden{err} -} - -type errSystem struct{ error } - -func (errSystem) System() {} - -func (e errSystem) Cause() error { - return e.error -} - -func (e errSystem) Unwrap() error { - return e.error -} - -// System creates an [ErrSystem] error from the given error. -// It returns the error as-is if it is either nil (no error) or already implements -// [ErrSystem], -func System(err error) error { - if err == nil || IsSystem(err) { - return err - } - return errSystem{err} -} - -type errNotModified struct{ error } - -func (errNotModified) NotModified() {} - -func (e errNotModified) Cause() error { - return e.error -} - -func (e errNotModified) Unwrap() error { - return e.error -} - -// NotModified creates an [ErrNotModified] error from the given error. -// It returns the error as-is if it is either nil (no error) or already implements -// [NotModified], -func NotModified(err error) error { - if err == nil || IsNotModified(err) { - return err - } - return errNotModified{err} -} - -type errNotImplemented struct{ error } - -func (errNotImplemented) NotImplemented() {} - -func (e errNotImplemented) Cause() error { - return e.error -} - -func (e errNotImplemented) Unwrap() error { - return e.error -} - -// NotImplemented creates an [ErrNotImplemented] error from the given error. -// It returns the error as-is if it is either nil (no error) or already implements -// [ErrNotImplemented], -func NotImplemented(err error) error { - if err == nil || IsNotImplemented(err) { - return err - } - return errNotImplemented{err} -} - -type errUnknown struct{ error } - -func (errUnknown) Unknown() {} - -func (e errUnknown) Cause() error { - return e.error -} - -func (e errUnknown) Unwrap() error { - return e.error -} - -// Unknown creates an [ErrUnknown] error from the given error. -// It returns the error as-is if it is either nil (no error) or already implements -// [ErrUnknown], -func Unknown(err error) error { - if err == nil || IsUnknown(err) { - return err - } - return errUnknown{err} -} - -type errCancelled struct{ error } - -func (errCancelled) Cancelled() {} - -func (e errCancelled) Cause() error { - return e.error -} - -func (e errCancelled) Unwrap() error { - return e.error -} - -// Cancelled creates an [ErrCancelled] error from the given error. -// It returns the error as-is if it is either nil (no error) or already implements -// [ErrCancelled], -func Cancelled(err error) error { - if err == nil || IsCancelled(err) { - return err - } - return errCancelled{err} -} - -type errDeadline struct{ error } - -func (errDeadline) DeadlineExceeded() {} - -func (e errDeadline) Cause() error { - return e.error -} - -func (e errDeadline) Unwrap() error { - return e.error -} - -// Deadline creates an [ErrDeadline] error from the given error. -// It returns the error as-is if it is either nil (no error) or already implements -// [ErrDeadline], -func Deadline(err error) error { - if err == nil || IsDeadline(err) { - return err - } - return errDeadline{err} -} - -type errDataLoss struct{ error } - -func (errDataLoss) DataLoss() {} - -func (e errDataLoss) Cause() error { - return e.error -} - -func (e errDataLoss) Unwrap() error { - return e.error -} - -// DataLoss creates an [ErrDataLoss] error from the given error. -// It returns the error as-is if it is either nil (no error) or already implements -// [ErrDataLoss], -func DataLoss(err error) error { - if err == nil || IsDataLoss(err) { - return err - } - return errDataLoss{err} -} - -// FromContext returns the error class from the passed in context -func FromContext(ctx context.Context) error { - e := ctx.Err() - if e == nil { - return nil - } - - if e == context.Canceled { - return Cancelled(e) - } - if e == context.DeadlineExceeded { - return Deadline(e) - } - return Unknown(e) -} diff --git a/vendor/github.com/docker/docker/errdefs/http_helpers.go b/vendor/github.com/docker/docker/errdefs/http_helpers.go deleted file mode 100644 index 0a8fadd48f..0000000000 --- a/vendor/github.com/docker/docker/errdefs/http_helpers.go +++ /dev/null @@ -1,47 +0,0 @@ -package errdefs - -import ( - "net/http" -) - -// FromStatusCode creates an errdef error, based on the provided HTTP status-code -func FromStatusCode(err error, statusCode int) error { - if err == nil { - return nil - } - switch statusCode { - case http.StatusNotFound: - return NotFound(err) - case http.StatusBadRequest: - return InvalidParameter(err) - case http.StatusConflict: - return Conflict(err) - case http.StatusUnauthorized: - return Unauthorized(err) - case http.StatusServiceUnavailable: - return Unavailable(err) - case http.StatusForbidden: - return Forbidden(err) - case http.StatusNotModified: - return NotModified(err) - case http.StatusNotImplemented: - return NotImplemented(err) - case http.StatusInternalServerError: - if IsCancelled(err) || IsSystem(err) || IsUnknown(err) || IsDataLoss(err) || IsDeadline(err) { - return err - } - return System(err) - default: - switch { - case statusCode >= 200 && statusCode < 400: - // it's a client error - return err - case statusCode >= 400 && statusCode < 500: - return InvalidParameter(err) - case statusCode >= 500 && statusCode < 600: - return System(err) - default: - return Unknown(err) - } - } -} diff --git a/vendor/github.com/docker/docker/errdefs/is.go b/vendor/github.com/docker/docker/errdefs/is.go deleted file mode 100644 index 30ea7e6fec..0000000000 --- a/vendor/github.com/docker/docker/errdefs/is.go +++ /dev/null @@ -1,123 +0,0 @@ -package errdefs - -import ( - "context" - "errors" -) - -type causer interface { - Cause() error -} - -type wrapErr interface { - Unwrap() error -} - -func getImplementer(err error) error { - switch e := err.(type) { - case - ErrNotFound, - ErrInvalidParameter, - ErrConflict, - ErrUnauthorized, - ErrUnavailable, - ErrForbidden, - ErrSystem, - ErrNotModified, - ErrNotImplemented, - ErrCancelled, - ErrDeadline, - ErrDataLoss, - ErrUnknown: - return err - case causer: - return getImplementer(e.Cause()) - case wrapErr: - return getImplementer(e.Unwrap()) - default: - return err - } -} - -// IsNotFound returns if the passed in error is an [ErrNotFound], -func IsNotFound(err error) bool { - _, ok := getImplementer(err).(ErrNotFound) - return ok -} - -// IsInvalidParameter returns if the passed in error is an [ErrInvalidParameter]. -func IsInvalidParameter(err error) bool { - _, ok := getImplementer(err).(ErrInvalidParameter) - return ok -} - -// IsConflict returns if the passed in error is an [ErrConflict]. -func IsConflict(err error) bool { - _, ok := getImplementer(err).(ErrConflict) - return ok -} - -// IsUnauthorized returns if the passed in error is an [ErrUnauthorized]. -func IsUnauthorized(err error) bool { - _, ok := getImplementer(err).(ErrUnauthorized) - return ok -} - -// IsUnavailable returns if the passed in error is an [ErrUnavailable]. -func IsUnavailable(err error) bool { - _, ok := getImplementer(err).(ErrUnavailable) - return ok -} - -// IsForbidden returns if the passed in error is an [ErrForbidden]. -func IsForbidden(err error) bool { - _, ok := getImplementer(err).(ErrForbidden) - return ok -} - -// IsSystem returns if the passed in error is an [ErrSystem]. -func IsSystem(err error) bool { - _, ok := getImplementer(err).(ErrSystem) - return ok -} - -// IsNotModified returns if the passed in error is an [ErrNotModified]. -func IsNotModified(err error) bool { - _, ok := getImplementer(err).(ErrNotModified) - return ok -} - -// IsNotImplemented returns if the passed in error is an [ErrNotImplemented]. -func IsNotImplemented(err error) bool { - _, ok := getImplementer(err).(ErrNotImplemented) - return ok -} - -// IsUnknown returns if the passed in error is an [ErrUnknown]. -func IsUnknown(err error) bool { - _, ok := getImplementer(err).(ErrUnknown) - return ok -} - -// IsCancelled returns if the passed in error is an [ErrCancelled]. -func IsCancelled(err error) bool { - _, ok := getImplementer(err).(ErrCancelled) - return ok -} - -// IsDeadline returns if the passed in error is an [ErrDeadline]. -func IsDeadline(err error) bool { - _, ok := getImplementer(err).(ErrDeadline) - return ok -} - -// IsDataLoss returns if the passed in error is an [ErrDataLoss]. -func IsDataLoss(err error) bool { - _, ok := getImplementer(err).(ErrDataLoss) - return ok -} - -// IsContext returns if the passed in error is due to context cancellation or deadline exceeded. -func IsContext(err error) bool { - return errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) -} diff --git a/vendor/github.com/docker/docker/internal/lazyregexp/lazyregexp.go b/vendor/github.com/docker/docker/internal/lazyregexp/lazyregexp.go deleted file mode 100644 index 6334edb60d..0000000000 --- a/vendor/github.com/docker/docker/internal/lazyregexp/lazyregexp.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code below was largely copied from golang.org/x/mod@v0.22; -// https://github.com/golang/mod/blob/v0.22.0/internal/lazyregexp/lazyre.go -// with some additional methods added. - -// Package lazyregexp is a thin wrapper over regexp, allowing the use of global -// regexp variables without forcing them to be compiled at init. -package lazyregexp - -import ( - "os" - "regexp" - "strings" - "sync" -) - -// Regexp is a wrapper around [regexp.Regexp], where the underlying regexp will be -// compiled the first time it is needed. -type Regexp struct { - str string - once sync.Once - rx *regexp.Regexp -} - -func (r *Regexp) re() *regexp.Regexp { - r.once.Do(r.build) - return r.rx -} - -func (r *Regexp) build() { - r.rx = regexp.MustCompile(r.str) - r.str = "" -} - -func (r *Regexp) FindSubmatch(s []byte) [][]byte { - return r.re().FindSubmatch(s) -} - -func (r *Regexp) FindAllStringSubmatch(s string, n int) [][]string { - return r.re().FindAllStringSubmatch(s, n) -} - -func (r *Regexp) FindStringSubmatch(s string) []string { - return r.re().FindStringSubmatch(s) -} - -func (r *Regexp) FindStringSubmatchIndex(s string) []int { - return r.re().FindStringSubmatchIndex(s) -} - -func (r *Regexp) ReplaceAllString(src, repl string) string { - return r.re().ReplaceAllString(src, repl) -} - -func (r *Regexp) FindString(s string) string { - return r.re().FindString(s) -} - -func (r *Regexp) FindAllString(s string, n int) []string { - return r.re().FindAllString(s, n) -} - -func (r *Regexp) MatchString(s string) bool { - return r.re().MatchString(s) -} - -func (r *Regexp) ReplaceAllStringFunc(src string, repl func(string) string) string { - return r.re().ReplaceAllStringFunc(src, repl) -} - -func (r *Regexp) SubexpNames() []string { - return r.re().SubexpNames() -} - -var inTest = len(os.Args) > 0 && strings.HasSuffix(strings.TrimSuffix(os.Args[0], ".exe"), ".test") - -// New creates a new lazy regexp, delaying the compiling work until it is first -// needed. If the code is being run as part of tests, the regexp compiling will -// happen immediately. -func New(str string) *Regexp { - lr := &Regexp{str: str} - if inTest { - // In tests, always compile the regexps early. - lr.re() - } - return lr -} diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir.go b/vendor/github.com/docker/docker/pkg/homedir/homedir.go deleted file mode 100644 index c0ab3f5bf3..0000000000 --- a/vendor/github.com/docker/docker/pkg/homedir/homedir.go +++ /dev/null @@ -1,28 +0,0 @@ -package homedir - -import ( - "os" - "os/user" - "runtime" -) - -// Get returns the home directory of the current user with the help of -// environment variables depending on the target operating system. -// Returned path should be used with "path/filepath" to form new paths. -// -// On non-Windows platforms, it falls back to nss lookups, if the home -// directory cannot be obtained from environment-variables. -// -// If linking statically with cgo enabled against glibc, ensure the -// osusergo build tag is used. -// -// If needing to do nss lookups, do not disable cgo or set osusergo. -func Get() string { - home, _ := os.UserHomeDir() - if home == "" && runtime.GOOS != "windows" { - if u, err := user.Current(); err == nil { - return u.HomeDir - } - } - return home -} diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go deleted file mode 100644 index ded1c7c8c6..0000000000 --- a/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go +++ /dev/null @@ -1,105 +0,0 @@ -package homedir // import "github.com/docker/docker/pkg/homedir" - -import ( - "errors" - "os" - "path/filepath" - "strings" -) - -// GetRuntimeDir returns XDG_RUNTIME_DIR. -// XDG_RUNTIME_DIR is typically configured via pam_systemd. -// GetRuntimeDir returns non-nil error if XDG_RUNTIME_DIR is not set. -// -// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html -func GetRuntimeDir() (string, error) { - if xdgRuntimeDir := os.Getenv("XDG_RUNTIME_DIR"); xdgRuntimeDir != "" { - return xdgRuntimeDir, nil - } - return "", errors.New("could not get XDG_RUNTIME_DIR") -} - -// StickRuntimeDirContents sets the sticky bit on files that are under -// XDG_RUNTIME_DIR, so that the files won't be periodically removed by the system. -// -// StickyRuntimeDir returns slice of sticked files. -// StickyRuntimeDir returns nil error if XDG_RUNTIME_DIR is not set. -// -// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html -func StickRuntimeDirContents(files []string) ([]string, error) { - runtimeDir, err := GetRuntimeDir() - if err != nil { - // ignore error if runtimeDir is empty - return nil, nil - } - runtimeDir, err = filepath.Abs(runtimeDir) - if err != nil { - return nil, err - } - var sticked []string - for _, f := range files { - f, err = filepath.Abs(f) - if err != nil { - return sticked, err - } - if strings.HasPrefix(f, runtimeDir+"/") { - if err = stick(f); err != nil { - return sticked, err - } - sticked = append(sticked, f) - } - } - return sticked, nil -} - -func stick(f string) error { - st, err := os.Stat(f) - if err != nil { - return err - } - m := st.Mode() - m |= os.ModeSticky - return os.Chmod(f, m) -} - -// GetDataHome returns XDG_DATA_HOME. -// GetDataHome returns $HOME/.local/share and nil error if XDG_DATA_HOME is not set. -// If HOME and XDG_DATA_HOME are not set, getpwent(3) is consulted to determine the users home directory. -// -// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html -func GetDataHome() (string, error) { - if xdgDataHome := os.Getenv("XDG_DATA_HOME"); xdgDataHome != "" { - return xdgDataHome, nil - } - home := Get() - if home == "" { - return "", errors.New("could not get either XDG_DATA_HOME or HOME") - } - return filepath.Join(home, ".local", "share"), nil -} - -// GetConfigHome returns XDG_CONFIG_HOME. -// GetConfigHome returns $HOME/.config and nil error if XDG_CONFIG_HOME is not set. -// If HOME and XDG_CONFIG_HOME are not set, getpwent(3) is consulted to determine the users home directory. -// -// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html -func GetConfigHome() (string, error) { - if xdgConfigHome := os.Getenv("XDG_CONFIG_HOME"); xdgConfigHome != "" { - return xdgConfigHome, nil - } - home := Get() - if home == "" { - return "", errors.New("could not get either XDG_CONFIG_HOME or HOME") - } - return filepath.Join(home, ".config"), nil -} - -// GetLibHome returns $HOME/.local/lib -// If HOME is not set, getpwent(3) is consulted to determine the users home directory. -func GetLibHome() (string, error) { - home := Get() - if home == "" { - return "", errors.New("could not get HOME") - } - return filepath.Join(home, ".local/lib"), nil -} diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go deleted file mode 100644 index 4eeb26b5dc..0000000000 --- a/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go +++ /dev/null @@ -1,32 +0,0 @@ -//go:build !linux - -package homedir // import "github.com/docker/docker/pkg/homedir" - -import ( - "errors" -) - -// GetRuntimeDir is unsupported on non-linux system. -func GetRuntimeDir() (string, error) { - return "", errors.New("homedir.GetRuntimeDir() is not supported on this system") -} - -// StickRuntimeDirContents is unsupported on non-linux system. -func StickRuntimeDirContents(files []string) ([]string, error) { - return nil, errors.New("homedir.StickRuntimeDirContents() is not supported on this system") -} - -// GetDataHome is unsupported on non-linux system. -func GetDataHome() (string, error) { - return "", errors.New("homedir.GetDataHome() is not supported on this system") -} - -// GetConfigHome is unsupported on non-linux system. -func GetConfigHome() (string, error) { - return "", errors.New("homedir.GetConfigHome() is not supported on this system") -} - -// GetLibHome is unsupported on non-linux system. -func GetLibHome() (string, error) { - return "", errors.New("homedir.GetLibHome() is not supported on this system") -} diff --git a/vendor/github.com/docker/docker/registry/auth.go b/vendor/github.com/docker/docker/registry/auth.go deleted file mode 100644 index 8f35dfff9c..0000000000 --- a/vendor/github.com/docker/docker/registry/auth.go +++ /dev/null @@ -1,202 +0,0 @@ -package registry // import "github.com/docker/docker/registry" - -import ( - "context" - "net/http" - "net/url" - "strings" - "time" - - "github.com/containerd/log" - "github.com/docker/distribution/registry/client/auth" - "github.com/docker/distribution/registry/client/auth/challenge" - "github.com/docker/distribution/registry/client/transport" - "github.com/docker/docker/api/types/registry" - "github.com/pkg/errors" -) - -// AuthClientID is used the ClientID used for the token server -const AuthClientID = "docker" - -type loginCredentialStore struct { - authConfig *registry.AuthConfig -} - -func (lcs loginCredentialStore) Basic(*url.URL) (string, string) { - return lcs.authConfig.Username, lcs.authConfig.Password -} - -func (lcs loginCredentialStore) RefreshToken(*url.URL, string) string { - return lcs.authConfig.IdentityToken -} - -func (lcs loginCredentialStore) SetRefreshToken(u *url.URL, service, token string) { - lcs.authConfig.IdentityToken = token -} - -type staticCredentialStore struct { - auth *registry.AuthConfig -} - -// NewStaticCredentialStore returns a credential store -// which always returns the same credential values. -func NewStaticCredentialStore(auth *registry.AuthConfig) auth.CredentialStore { - return staticCredentialStore{ - auth: auth, - } -} - -func (scs staticCredentialStore) Basic(*url.URL) (string, string) { - if scs.auth == nil { - return "", "" - } - return scs.auth.Username, scs.auth.Password -} - -func (scs staticCredentialStore) RefreshToken(*url.URL, string) string { - if scs.auth == nil { - return "" - } - return scs.auth.IdentityToken -} - -func (scs staticCredentialStore) SetRefreshToken(*url.URL, string, string) { -} - -// loginV2 tries to login to the v2 registry server. The given registry -// endpoint will be pinged to get authorization challenges. These challenges -// will be used to authenticate against the registry to validate credentials. -func loginV2(authConfig *registry.AuthConfig, endpoint APIEndpoint, userAgent string) (token string, _ error) { - endpointStr := strings.TrimRight(endpoint.URL.String(), "/") + "/v2/" - log.G(context.TODO()).Debugf("attempting v2 login to registry endpoint %s", endpointStr) - - req, err := http.NewRequest(http.MethodGet, endpointStr, nil) - if err != nil { - return "", err - } - - var ( - modifiers = Headers(userAgent, nil) - authTrans = transport.NewTransport(newTransport(endpoint.TLSConfig), modifiers...) - credentialAuthConfig = *authConfig - creds = loginCredentialStore{authConfig: &credentialAuthConfig} - ) - - loginClient, err := v2AuthHTTPClient(endpoint.URL, authTrans, modifiers, creds, nil) - if err != nil { - return "", err - } - - resp, err := loginClient.Do(req) - if err != nil { - err = translateV2AuthError(err) - return "", err - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - // TODO(dmcgowan): Attempt to further interpret result, status code and error code string - return "", errors.Errorf("login attempt to %s failed with status: %d %s", endpointStr, resp.StatusCode, http.StatusText(resp.StatusCode)) - } - - return credentialAuthConfig.IdentityToken, nil -} - -func v2AuthHTTPClient(endpoint *url.URL, authTransport http.RoundTripper, modifiers []transport.RequestModifier, creds auth.CredentialStore, scopes []auth.Scope) (*http.Client, error) { - challengeManager, err := PingV2Registry(endpoint, authTransport) - if err != nil { - return nil, err - } - - authHandlers := []auth.AuthenticationHandler{ - auth.NewTokenHandlerWithOptions(auth.TokenHandlerOptions{ - Transport: authTransport, - Credentials: creds, - OfflineAccess: true, - ClientID: AuthClientID, - Scopes: scopes, - }), - auth.NewBasicHandler(creds), - } - - modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, authHandlers...)) - - return &http.Client{ - Transport: transport.NewTransport(authTransport, modifiers...), - Timeout: 15 * time.Second, - }, nil -} - -// ConvertToHostname normalizes a registry URL which has http|https prepended -// to just its hostname. It is used to match credentials, which may be either -// stored as hostname or as hostname including scheme (in legacy configuration -// files). -func ConvertToHostname(url string) string { - stripped := url - if strings.HasPrefix(stripped, "http://") { - stripped = strings.TrimPrefix(stripped, "http://") - } else if strings.HasPrefix(stripped, "https://") { - stripped = strings.TrimPrefix(stripped, "https://") - } - stripped, _, _ = strings.Cut(stripped, "/") - return stripped -} - -// ResolveAuthConfig matches an auth configuration to a server address or a URL -func ResolveAuthConfig(authConfigs map[string]registry.AuthConfig, index *registry.IndexInfo) registry.AuthConfig { - configKey := GetAuthConfigKey(index) - // First try the happy case - if c, found := authConfigs[configKey]; found || index.Official { - return c - } - - // Maybe they have a legacy config file, we will iterate the keys converting - // them to the new format and testing - for registryURL, ac := range authConfigs { - if configKey == ConvertToHostname(registryURL) { - return ac - } - } - - // When all else fails, return an empty auth config - return registry.AuthConfig{} -} - -// PingResponseError is used when the response from a ping -// was received but invalid. -type PingResponseError struct { - Err error -} - -func (err PingResponseError) Error() string { - return err.Err.Error() -} - -// PingV2Registry attempts to ping a v2 registry and on success return a -// challenge manager for the supported authentication types. -// If a response is received but cannot be interpreted, a PingResponseError will be returned. -func PingV2Registry(endpoint *url.URL, transport http.RoundTripper) (challenge.Manager, error) { - pingClient := &http.Client{ - Transport: transport, - Timeout: 15 * time.Second, - } - endpointStr := strings.TrimRight(endpoint.String(), "/") + "/v2/" - req, err := http.NewRequest(http.MethodGet, endpointStr, nil) - if err != nil { - return nil, err - } - resp, err := pingClient.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - challengeManager := challenge.NewSimpleManager() - if err := challengeManager.AddResponse(resp); err != nil { - return nil, PingResponseError{ - Err: err, - } - } - - return challengeManager, nil -} diff --git a/vendor/github.com/docker/docker/registry/config.go b/vendor/github.com/docker/docker/registry/config.go deleted file mode 100644 index 433454624a..0000000000 --- a/vendor/github.com/docker/docker/registry/config.go +++ /dev/null @@ -1,481 +0,0 @@ -package registry // import "github.com/docker/docker/registry" - -import ( - "context" - "net" - "net/url" - "os" - "path/filepath" - "strconv" - "strings" - "sync" - - "github.com/containerd/log" - "github.com/distribution/reference" - "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/internal/lazyregexp" - "github.com/docker/docker/pkg/homedir" -) - -// ServiceOptions holds command line options. -type ServiceOptions struct { - AllowNondistributableArtifacts []string `json:"allow-nondistributable-artifacts,omitempty"` // Deprecated: non-distributable artifacts are deprecated and enabled by default. This field will be removed in the next release. - - Mirrors []string `json:"registry-mirrors,omitempty"` - InsecureRegistries []string `json:"insecure-registries,omitempty"` -} - -// serviceConfig holds daemon configuration for the registry service. -type serviceConfig registry.ServiceConfig - -// TODO(thaJeztah) both the "index.docker.io" and "registry-1.docker.io" domains -// are here for historic reasons and backward-compatibility. These domains -// are still supported by Docker Hub (and will continue to be supported), but -// there are new domains already in use, and plans to consolidate all legacy -// domains to new "canonical" domains. Once those domains are decided on, we -// should update these consts (but making sure to preserve compatibility with -// existing installs, clients, and user configuration). -const ( - // DefaultNamespace is the default namespace - DefaultNamespace = "docker.io" - // DefaultRegistryHost is the hostname for the default (Docker Hub) registry - // used for pushing and pulling images. This hostname is hard-coded to handle - // the conversion from image references without registry name (e.g. "ubuntu", - // or "ubuntu:latest"), as well as references using the "docker.io" domain - // name, which is used as canonical reference for images on Docker Hub, but - // does not match the domain-name of Docker Hub's registry. - DefaultRegistryHost = "registry-1.docker.io" - // IndexHostname is the index hostname, used for authentication and image search. - IndexHostname = "index.docker.io" - // IndexServer is used for user auth and image search - IndexServer = "https://" + IndexHostname + "/v1/" - // IndexName is the name of the index - IndexName = "docker.io" -) - -var ( - // DefaultV2Registry is the URI of the default (Docker Hub) registry. - DefaultV2Registry = &url.URL{ - Scheme: "https", - Host: DefaultRegistryHost, - } - - validHostPortRegex = lazyregexp.New(`^` + reference.DomainRegexp.String() + `$`) - - // certsDir is used to override defaultCertsDir when running with rootlessKit. - // - // TODO(thaJeztah): change to a sync.OnceValue once we remove [SetCertsDir] - // TODO(thaJeztah): certsDir should not be a package variable, but stored in our config, and passed when needed. - setCertsDirOnce sync.Once - certsDir string -) - -func setCertsDir(dir string) string { - setCertsDirOnce.Do(func() { - if dir != "" { - certsDir = dir - return - } - if os.Getenv("ROOTLESSKIT_STATE_DIR") != "" { - // Configure registry.CertsDir() when running in rootless-mode - // This is the equivalent of [rootless.RunningWithRootlessKit], - // but inlining it to prevent adding that as a dependency - // for docker/cli. - // - // [rootless.RunningWithRootlessKit]: https://github.com/moby/moby/blob/b4bdf12daec84caaf809a639f923f7370d4926ad/pkg/rootless/rootless.go#L5-L8 - if configHome, _ := homedir.GetConfigHome(); configHome != "" { - certsDir = filepath.Join(configHome, "docker/certs.d") - return - } - } - certsDir = defaultCertsDir - }) - return certsDir -} - -// SetCertsDir allows the default certs directory to be changed. This function -// is used at daemon startup to set the correct location when running in -// rootless mode. -// -// Deprecated: the cert-directory is now automatically selected when running with rootlessKit, and should no longer be set manually. -func SetCertsDir(path string) { - setCertsDir(path) -} - -// CertsDir is the directory where certificates are stored. -func CertsDir() string { - // call setCertsDir with an empty path to synchronise with [SetCertsDir] - return setCertsDir("") -} - -// newServiceConfig returns a new instance of ServiceConfig -func newServiceConfig(options ServiceOptions) (*serviceConfig, error) { - config := &serviceConfig{} - if err := config.loadMirrors(options.Mirrors); err != nil { - return nil, err - } - if err := config.loadInsecureRegistries(options.InsecureRegistries); err != nil { - return nil, err - } - - return config, nil -} - -// copy constructs a new ServiceConfig with a copy of the configuration in config. -func (config *serviceConfig) copy() *registry.ServiceConfig { - ic := make(map[string]*registry.IndexInfo) - for key, value := range config.IndexConfigs { - ic[key] = value - } - return ®istry.ServiceConfig{ - InsecureRegistryCIDRs: append([]*registry.NetIPNet(nil), config.InsecureRegistryCIDRs...), - IndexConfigs: ic, - Mirrors: append([]string(nil), config.Mirrors...), - } -} - -// loadMirrors loads mirrors to config, after removing duplicates. -// Returns an error if mirrors contains an invalid mirror. -func (config *serviceConfig) loadMirrors(mirrors []string) error { - mMap := map[string]struct{}{} - unique := []string{} - - for _, mirror := range mirrors { - m, err := ValidateMirror(mirror) - if err != nil { - return err - } - if _, exist := mMap[m]; !exist { - mMap[m] = struct{}{} - unique = append(unique, m) - } - } - - config.Mirrors = unique - - // Configure public registry since mirrors may have changed. - config.IndexConfigs = map[string]*registry.IndexInfo{ - IndexName: { - Name: IndexName, - Mirrors: unique, - Secure: true, - Official: true, - }, - } - - return nil -} - -// loadInsecureRegistries loads insecure registries to config -func (config *serviceConfig) loadInsecureRegistries(registries []string) error { - // Localhost is by default considered as an insecure registry. This is a - // stop-gap for people who are running a private registry on localhost. - registries = append(registries, "::1/128", "127.0.0.0/8") - - var ( - insecureRegistryCIDRs = make([]*registry.NetIPNet, 0) - indexConfigs = make(map[string]*registry.IndexInfo) - ) - -skip: - for _, r := range registries { - // validate insecure registry - if _, err := ValidateIndexName(r); err != nil { - return err - } - if strings.HasPrefix(strings.ToLower(r), "http://") { - log.G(context.TODO()).Warnf("insecure registry %s should not contain 'http://' and 'http://' has been removed from the insecure registry config", r) - r = r[7:] - } else if strings.HasPrefix(strings.ToLower(r), "https://") { - log.G(context.TODO()).Warnf("insecure registry %s should not contain 'https://' and 'https://' has been removed from the insecure registry config", r) - r = r[8:] - } else if hasScheme(r) { - return invalidParamf("insecure registry %s should not contain '://'", r) - } - // Check if CIDR was passed to --insecure-registry - _, ipnet, err := net.ParseCIDR(r) - if err == nil { - // Valid CIDR. If ipnet is already in config.InsecureRegistryCIDRs, skip. - data := (*registry.NetIPNet)(ipnet) - for _, value := range insecureRegistryCIDRs { - if value.IP.String() == data.IP.String() && value.Mask.String() == data.Mask.String() { - continue skip - } - } - // ipnet is not found, add it in config.InsecureRegistryCIDRs - insecureRegistryCIDRs = append(insecureRegistryCIDRs, data) - } else { - if err := validateHostPort(r); err != nil { - return invalidParamWrapf(err, "insecure registry %s is not valid", r) - } - // Assume `host:port` if not CIDR. - indexConfigs[r] = ®istry.IndexInfo{ - Name: r, - Mirrors: []string{}, - Secure: false, - Official: false, - } - } - } - - // Configure public registry. - indexConfigs[IndexName] = ®istry.IndexInfo{ - Name: IndexName, - Mirrors: config.Mirrors, - Secure: true, - Official: true, - } - config.InsecureRegistryCIDRs = insecureRegistryCIDRs - config.IndexConfigs = indexConfigs - - return nil -} - -// isSecureIndex returns false if the provided indexName is part of the list of insecure registries -// Insecure registries accept HTTP and/or accept HTTPS with certificates from unknown CAs. -// -// The list of insecure registries can contain an element with CIDR notation to specify a whole subnet. -// If the subnet contains one of the IPs of the registry specified by indexName, the latter is considered -// insecure. -// -// indexName should be a URL.Host (`host:port` or `host`) where the `host` part can be either a domain name -// or an IP address. If it is a domain name, then it will be resolved in order to check if the IP is contained -// in a subnet. If the resolving is not successful, isSecureIndex will only try to match hostname to any element -// of insecureRegistries. -func (config *serviceConfig) isSecureIndex(indexName string) bool { - // Check for configured index, first. This is needed in case isSecureIndex - // is called from anything besides newIndexInfo, in order to honor per-index configurations. - if index, ok := config.IndexConfigs[indexName]; ok { - return index.Secure - } - - return !isCIDRMatch(config.InsecureRegistryCIDRs, indexName) -} - -// for mocking in unit tests. -var lookupIP = net.LookupIP - -// isCIDRMatch returns true if URLHost matches an element of cidrs. URLHost is a URL.Host (`host:port` or `host`) -// where the `host` part can be either a domain name or an IP address. If it is a domain name, then it will be -// resolved to IP addresses for matching. If resolution fails, false is returned. -func isCIDRMatch(cidrs []*registry.NetIPNet, URLHost string) bool { - if len(cidrs) == 0 { - return false - } - - host, _, err := net.SplitHostPort(URLHost) - if err != nil { - // Assume URLHost is a host without port and go on. - host = URLHost - } - - var addresses []net.IP - if ip := net.ParseIP(host); ip != nil { - // Host is an IP-address. - addresses = append(addresses, ip) - } else { - // Try to resolve the host's IP-address. - addresses, err = lookupIP(host) - if err != nil { - // We failed to resolve the host; assume there's no match. - return false - } - } - - for _, addr := range addresses { - for _, ipnet := range cidrs { - // check if the addr falls in the subnet - if (*net.IPNet)(ipnet).Contains(addr) { - return true - } - } - } - - return false -} - -// ValidateMirror validates an HTTP(S) registry mirror. It is used by the daemon -// to validate the daemon configuration. -func ValidateMirror(val string) (string, error) { - uri, err := url.Parse(val) - if err != nil { - return "", invalidParamWrapf(err, "invalid mirror: %q is not a valid URI", val) - } - if uri.Scheme != "http" && uri.Scheme != "https" { - return "", invalidParamf("invalid mirror: unsupported scheme %q in %q", uri.Scheme, uri) - } - if uri.RawQuery != "" || uri.Fragment != "" { - return "", invalidParamf("invalid mirror: query or fragment at end of the URI %q", uri) - } - if uri.User != nil { - // strip password from output - uri.User = url.UserPassword(uri.User.Username(), "xxxxx") - return "", invalidParamf("invalid mirror: username/password not allowed in URI %q", uri) - } - return strings.TrimSuffix(val, "/") + "/", nil -} - -// ValidateIndexName validates an index name. It is used by the daemon to -// validate the daemon configuration. -func ValidateIndexName(val string) (string, error) { - val = normalizeIndexName(val) - if strings.HasPrefix(val, "-") || strings.HasSuffix(val, "-") { - return "", invalidParamf("invalid index name (%s). Cannot begin or end with a hyphen", val) - } - return val, nil -} - -func normalizeIndexName(val string) string { - // TODO(thaJeztah): consider normalizing other known options, such as "(https://)registry-1.docker.io", "https://index.docker.io/v1/". - // TODO: upstream this to check to reference package - if val == "index.docker.io" { - return "docker.io" - } - return val -} - -func hasScheme(reposName string) bool { - return strings.Contains(reposName, "://") -} - -func validateHostPort(s string) error { - // Split host and port, and in case s can not be split, assume host only - host, port, err := net.SplitHostPort(s) - if err != nil { - host = s - port = "" - } - // If match against the `host:port` pattern fails, - // it might be `IPv6:port`, which will be captured by net.ParseIP(host) - if !validHostPortRegex.MatchString(s) && net.ParseIP(host) == nil { - return invalidParamf("invalid host %q", host) - } - if port != "" { - v, err := strconv.Atoi(port) - if err != nil { - return err - } - if v < 0 || v > 65535 { - return invalidParamf("invalid port %q", port) - } - } - return nil -} - -// newIndexInfo returns IndexInfo configuration from indexName -func newIndexInfo(config *serviceConfig, indexName string) *registry.IndexInfo { - indexName = normalizeIndexName(indexName) - - // Return any configured index info, first. - if index, ok := config.IndexConfigs[indexName]; ok { - return index - } - - // Construct a non-configured index info. - return ®istry.IndexInfo{ - Name: indexName, - Mirrors: []string{}, - Secure: config.isSecureIndex(indexName), - } -} - -// GetAuthConfigKey special-cases using the full index address of the official -// index as the AuthConfig key, and uses the (host)name[:port] for private indexes. -func GetAuthConfigKey(index *registry.IndexInfo) string { - if index.Official { - return IndexServer - } - return index.Name -} - -// newRepositoryInfo validates and breaks down a repository name into a RepositoryInfo -func newRepositoryInfo(config *serviceConfig, name reference.Named) *RepositoryInfo { - index := newIndexInfo(config, reference.Domain(name)) - var officialRepo bool - if index.Official { - // RepositoryInfo.Official indicates whether the image repository - // is an official (docker library official images) repository. - // - // We only need to check this if the image-repository is on Docker Hub. - officialRepo = !strings.ContainsRune(reference.FamiliarName(name), '/') - } - - return &RepositoryInfo{ - Name: reference.TrimNamed(name), - Index: index, - Official: officialRepo, - } -} - -// ParseRepositoryInfo performs the breakdown of a repository name into a -// [RepositoryInfo], but lacks registry configuration. -// -// It is used by the Docker cli to interact with registry-related endpoints. -func ParseRepositoryInfo(reposName reference.Named) (*RepositoryInfo, error) { - indexName := normalizeIndexName(reference.Domain(reposName)) - if indexName == IndexName { - officialRepo := !strings.ContainsRune(reference.FamiliarName(reposName), '/') - return &RepositoryInfo{ - Name: reference.TrimNamed(reposName), - Index: ®istry.IndexInfo{ - Name: IndexName, - Mirrors: []string{}, - Secure: true, - Official: true, - }, - Official: officialRepo, - }, nil - } - - insecure := false - if isInsecure(indexName) { - insecure = true - } - - return &RepositoryInfo{ - Name: reference.TrimNamed(reposName), - Index: ®istry.IndexInfo{ - Name: indexName, - Mirrors: []string{}, - Secure: !insecure, - }, - }, nil -} - -// isInsecure is used to detect whether a registry domain or IP-address is allowed -// to use an insecure (non-TLS, or self-signed cert) connection according to the -// defaults, which allows for insecure connections with registries running on a -// loopback address ("localhost", "::1/128", "127.0.0.0/8"). -// -// It is used in situations where we don't have access to the daemon's configuration, -// for example, when used from the client / CLI. -func isInsecure(hostNameOrIP string) bool { - // Attempt to strip port if present; this also strips brackets for - // IPv6 addresses with a port (e.g. "[::1]:5000"). - // - // This is best-effort; we'll continue using the address as-is if it fails. - if host, _, err := net.SplitHostPort(hostNameOrIP); err == nil { - hostNameOrIP = host - } - if hostNameOrIP == "127.0.0.1" || hostNameOrIP == "::1" || strings.EqualFold(hostNameOrIP, "localhost") { - // Fast path; no need to resolve these, assuming nobody overrides - // "localhost" for anything else than a loopback address (sorry, not sorry). - return true - } - - var addresses []net.IP - if ip := net.ParseIP(hostNameOrIP); ip != nil { - addresses = append(addresses, ip) - } else { - // Try to resolve the host's IP-addresses. - addrs, _ := lookupIP(hostNameOrIP) - addresses = append(addresses, addrs...) - } - - for _, addr := range addresses { - if addr.IsLoopback() { - return true - } - } - return false -} diff --git a/vendor/github.com/docker/docker/registry/config_unix.go b/vendor/github.com/docker/docker/registry/config_unix.go deleted file mode 100644 index 2142049305..0000000000 --- a/vendor/github.com/docker/docker/registry/config_unix.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build !windows - -package registry // import "github.com/docker/docker/registry" - -// defaultCertsDir is the platform-specific default directory where certificates -// are stored. On Linux, it may be overridden through certsDir, for example, when -// running in rootless mode. -const defaultCertsDir = "/etc/docker/certs.d" - -// cleanPath is used to ensure that a directory name is valid on the target -// platform. It will be passed in something *similar* to a URL such as -// https:/index.docker.io/v1. Not all platforms support directory names -// which contain those characters (such as : on Windows) -func cleanPath(s string) string { - return s -} diff --git a/vendor/github.com/docker/docker/registry/config_windows.go b/vendor/github.com/docker/docker/registry/config_windows.go deleted file mode 100644 index 2674f2818a..0000000000 --- a/vendor/github.com/docker/docker/registry/config_windows.go +++ /dev/null @@ -1,20 +0,0 @@ -package registry // import "github.com/docker/docker/registry" - -import ( - "os" - "path/filepath" - "strings" -) - -// defaultCertsDir is the platform-specific default directory where certificates -// are stored. On Linux, it may be overridden through certsDir, for example, when -// running in rootless mode. -var defaultCertsDir = os.Getenv("programdata") + `\docker\certs.d` - -// cleanPath is used to ensure that a directory name is valid on the target -// platform. It will be passed in something *similar* to a URL such as -// https:\index.docker.io\v1. Not all platforms support directory names -// which contain those characters (such as : on Windows) -func cleanPath(s string) string { - return filepath.FromSlash(strings.ReplaceAll(s, ":", "")) -} diff --git a/vendor/github.com/docker/docker/registry/errors.go b/vendor/github.com/docker/docker/registry/errors.go deleted file mode 100644 index 7dc20ad8ff..0000000000 --- a/vendor/github.com/docker/docker/registry/errors.go +++ /dev/null @@ -1,36 +0,0 @@ -package registry // import "github.com/docker/docker/registry" - -import ( - "net/url" - - "github.com/docker/distribution/registry/api/errcode" - "github.com/docker/docker/errdefs" - "github.com/pkg/errors" -) - -func translateV2AuthError(err error) error { - switch e := err.(type) { - case *url.Error: - switch e2 := e.Err.(type) { - case errcode.Error: - switch e2.Code { - case errcode.ErrorCodeUnauthorized: - return errdefs.Unauthorized(err) - } - } - } - - return err -} - -func invalidParam(err error) error { - return errdefs.InvalidParameter(err) -} - -func invalidParamf(format string, args ...interface{}) error { - return errdefs.InvalidParameter(errors.Errorf(format, args...)) -} - -func invalidParamWrapf(err error, format string, args ...interface{}) error { - return errdefs.InvalidParameter(errors.Wrapf(err, format, args...)) -} diff --git a/vendor/github.com/docker/docker/registry/registry.go b/vendor/github.com/docker/docker/registry/registry.go deleted file mode 100644 index a26f976cee..0000000000 --- a/vendor/github.com/docker/docker/registry/registry.go +++ /dev/null @@ -1,144 +0,0 @@ -// Package registry contains client primitives to interact with a remote Docker registry. -package registry // import "github.com/docker/docker/registry" - -import ( - "context" - "crypto/tls" - "net" - "net/http" - "os" - "path/filepath" - "time" - - "github.com/containerd/log" - "github.com/docker/distribution/registry/client/transport" - "github.com/docker/go-connections/tlsconfig" - "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" -) - -// HostCertsDir returns the config directory for a specific host. -// -// Deprecated: this function was only used internally, and will be removed in a future release. -func HostCertsDir(hostname string) string { - return hostCertsDir(hostname) -} - -// hostCertsDir returns the config directory for a specific host. -func hostCertsDir(hostname string) string { - return filepath.Join(CertsDir(), cleanPath(hostname)) -} - -// newTLSConfig constructs a client TLS configuration based on server defaults -func newTLSConfig(hostname string, isSecure bool) (*tls.Config, error) { - // PreferredServerCipherSuites should have no effect - tlsConfig := tlsconfig.ServerDefault() - tlsConfig.InsecureSkipVerify = !isSecure - - if isSecure { - hostDir := hostCertsDir(hostname) - log.G(context.TODO()).Debugf("hostDir: %s", hostDir) - if err := ReadCertsDirectory(tlsConfig, hostDir); err != nil { - return nil, err - } - } - - return tlsConfig, nil -} - -func hasFile(files []os.DirEntry, name string) bool { - for _, f := range files { - if f.Name() == name { - return true - } - } - return false -} - -// ReadCertsDirectory reads the directory for TLS certificates -// including roots and certificate pairs and updates the -// provided TLS configuration. -func ReadCertsDirectory(tlsConfig *tls.Config, directory string) error { - fs, err := os.ReadDir(directory) - if err != nil && !os.IsNotExist(err) { - return invalidParam(err) - } - - for _, f := range fs { - switch filepath.Ext(f.Name()) { - case ".crt": - if tlsConfig.RootCAs == nil { - systemPool, err := tlsconfig.SystemCertPool() - if err != nil { - return invalidParamWrapf(err, "unable to get system cert pool") - } - tlsConfig.RootCAs = systemPool - } - fileName := filepath.Join(directory, f.Name()) - log.G(context.TODO()).Debugf("crt: %s", fileName) - data, err := os.ReadFile(fileName) - if err != nil { - return err - } - tlsConfig.RootCAs.AppendCertsFromPEM(data) - case ".cert": - certName := f.Name() - keyName := certName[:len(certName)-5] + ".key" - log.G(context.TODO()).Debugf("cert: %s", filepath.Join(directory, certName)) - if !hasFile(fs, keyName) { - return invalidParamf("missing key %s for client certificate %s. CA certificates must use the extension .crt", keyName, certName) - } - cert, err := tls.LoadX509KeyPair(filepath.Join(directory, certName), filepath.Join(directory, keyName)) - if err != nil { - return err - } - tlsConfig.Certificates = append(tlsConfig.Certificates, cert) - case ".key": - keyName := f.Name() - certName := keyName[:len(keyName)-4] + ".cert" - log.G(context.TODO()).Debugf("key: %s", filepath.Join(directory, keyName)) - if !hasFile(fs, certName) { - return invalidParamf("missing client certificate %s for key %s", certName, keyName) - } - } - } - - return nil -} - -// Headers returns request modifiers with a User-Agent and metaHeaders -func Headers(userAgent string, metaHeaders http.Header) []transport.RequestModifier { - modifiers := []transport.RequestModifier{} - if userAgent != "" { - modifiers = append(modifiers, transport.NewHeaderRequestModifier(http.Header{ - "User-Agent": []string{userAgent}, - })) - } - if metaHeaders != nil { - modifiers = append(modifiers, transport.NewHeaderRequestModifier(metaHeaders)) - } - return modifiers -} - -// newTransport returns a new HTTP transport. If tlsConfig is nil, it uses the -// default TLS configuration. -func newTransport(tlsConfig *tls.Config) http.RoundTripper { - if tlsConfig == nil { - tlsConfig = tlsconfig.ServerDefault() - } - - direct := &net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - } - - return otelhttp.NewTransport( - &http.Transport{ - Proxy: http.ProxyFromEnvironment, - DialContext: direct.DialContext, - TLSHandshakeTimeout: 10 * time.Second, - TLSClientConfig: tlsConfig, - // TODO(dmcgowan): Call close idle connections when complete and use keep alive - DisableKeepAlives: true, - }, - ) -} diff --git a/vendor/github.com/docker/docker/registry/search.go b/vendor/github.com/docker/docker/registry/search.go deleted file mode 100644 index 8f4739ac0e..0000000000 --- a/vendor/github.com/docker/docker/registry/search.go +++ /dev/null @@ -1,177 +0,0 @@ -package registry - -import ( - "context" - "net/http" - "strconv" - "strings" - - "github.com/containerd/log" - "github.com/docker/distribution/registry/client/auth" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/errdefs" - "github.com/pkg/errors" -) - -var acceptedSearchFilterTags = map[string]bool{ - "is-automated": true, // Deprecated: the "is_automated" field is deprecated and will always be false in the future. - "is-official": true, - "stars": true, -} - -// Search queries the public registry for repositories matching the specified -// search term and filters. -func (s *Service) Search(ctx context.Context, searchFilters filters.Args, term string, limit int, authConfig *registry.AuthConfig, headers map[string][]string) ([]registry.SearchResult, error) { - if err := searchFilters.Validate(acceptedSearchFilterTags); err != nil { - return nil, err - } - - isAutomated, err := searchFilters.GetBoolOrDefault("is-automated", false) - if err != nil { - return nil, err - } - - // "is-automated" is deprecated and filtering for `true` will yield no results. - if isAutomated { - return []registry.SearchResult{}, nil - } - - isOfficial, err := searchFilters.GetBoolOrDefault("is-official", false) - if err != nil { - return nil, err - } - - hasStarFilter := 0 - if searchFilters.Contains("stars") { - hasStars := searchFilters.Get("stars") - for _, hasStar := range hasStars { - iHasStar, err := strconv.Atoi(hasStar) - if err != nil { - return nil, errdefs.InvalidParameter(errors.Wrapf(err, "invalid filter 'stars=%s'", hasStar)) - } - if iHasStar > hasStarFilter { - hasStarFilter = iHasStar - } - } - } - - unfilteredResult, err := s.searchUnfiltered(ctx, term, limit, authConfig, headers) - if err != nil { - return nil, err - } - - filteredResults := []registry.SearchResult{} - for _, result := range unfilteredResult.Results { - if searchFilters.Contains("is-official") { - if isOfficial != result.IsOfficial { - continue - } - } - if searchFilters.Contains("stars") { - if result.StarCount < hasStarFilter { - continue - } - } - // "is-automated" is deprecated and the value in Docker Hub search - // results is untrustworthy. Force it to false so as to not mislead our - // clients. - result.IsAutomated = false //nolint:staticcheck // ignore SA1019 (field is deprecated) - filteredResults = append(filteredResults, result) - } - - return filteredResults, nil -} - -func (s *Service) searchUnfiltered(ctx context.Context, term string, limit int, authConfig *registry.AuthConfig, headers http.Header) (*registry.SearchResults, error) { - // TODO Use ctx when searching for repositories - if hasScheme(term) { - return nil, invalidParamf("invalid repository name: repository name (%s) should not have a scheme", term) - } - - indexName, remoteName := splitReposSearchTerm(term) - - // Search is a long-running operation, just lock s.config to avoid block others. - s.mu.RLock() - index := newIndexInfo(s.config, indexName) - s.mu.RUnlock() - if index.Official { - // If pull "library/foo", it's stored locally under "foo" - remoteName = strings.TrimPrefix(remoteName, "library/") - } - - endpoint, err := newV1Endpoint(index, headers) - if err != nil { - return nil, err - } - - var client *http.Client - if authConfig != nil && authConfig.IdentityToken != "" && authConfig.Username != "" { - creds := NewStaticCredentialStore(authConfig) - - // TODO(thaJeztah); is there a reason not to include other headers here? (originally added in 19d48f0b8ba59eea9f2cac4ad1c7977712a6b7ac) - modifiers := Headers(headers.Get("User-Agent"), nil) - v2Client, err := v2AuthHTTPClient(endpoint.URL, endpoint.client.Transport, modifiers, creds, []auth.Scope{ - auth.RegistryScope{Name: "catalog", Actions: []string{"search"}}, - }) - if err != nil { - return nil, err - } - // Copy non transport http client features - v2Client.Timeout = endpoint.client.Timeout - v2Client.CheckRedirect = endpoint.client.CheckRedirect - v2Client.Jar = endpoint.client.Jar - - log.G(ctx).Debugf("using v2 client for search to %s", endpoint.URL) - client = v2Client - } else { - client = endpoint.client - if err := authorizeClient(client, authConfig, endpoint); err != nil { - return nil, err - } - } - - return newSession(client, endpoint).searchRepositories(remoteName, limit) -} - -// splitReposSearchTerm breaks a search term into an index name and remote name -func splitReposSearchTerm(reposName string) (string, string) { - nameParts := strings.SplitN(reposName, "/", 2) - if len(nameParts) == 1 || (!strings.Contains(nameParts[0], ".") && - !strings.Contains(nameParts[0], ":") && nameParts[0] != "localhost") { - // This is a Docker Hub repository (ex: samalba/hipache or ubuntu), - // use the default Docker Hub registry (docker.io) - return IndexName, reposName - } - return nameParts[0], nameParts[1] -} - -// ParseSearchIndexInfo will use repository name to get back an indexInfo. -// -// TODO(thaJeztah) this function is only used by the CLI, and used to get -// information of the registry (to provide credentials if needed). We should -// move this function (or equivalent) to the CLI, as it's doing too much just -// for that. -func ParseSearchIndexInfo(reposName string) (*registry.IndexInfo, error) { - indexName, _ := splitReposSearchTerm(reposName) - indexName = normalizeIndexName(indexName) - if indexName == IndexName { - return ®istry.IndexInfo{ - Name: IndexName, - Mirrors: []string{}, - Secure: true, - Official: true, - }, nil - } - - insecure := false - if isInsecure(indexName) { - insecure = true - } - - return ®istry.IndexInfo{ - Name: indexName, - Mirrors: []string{}, - Secure: !insecure, - }, nil -} diff --git a/vendor/github.com/docker/docker/registry/search_endpoint_v1.go b/vendor/github.com/docker/docker/registry/search_endpoint_v1.go deleted file mode 100644 index f6c369a93b..0000000000 --- a/vendor/github.com/docker/docker/registry/search_endpoint_v1.go +++ /dev/null @@ -1,200 +0,0 @@ -package registry // import "github.com/docker/docker/registry" - -import ( - "context" - "crypto/tls" - "encoding/json" - "net/http" - "net/url" - "strings" - - "github.com/containerd/log" - "github.com/docker/distribution/registry/client/transport" - "github.com/docker/docker/api/types/registry" -) - -// v1PingResult contains the information returned when pinging a registry. It -// indicates whether the registry claims to be a standalone registry. -type v1PingResult struct { - // Standalone is set to true if the registry indicates it is a - // standalone registry in the X-Docker-Registry-Standalone - // header - Standalone bool `json:"standalone"` -} - -// v1Endpoint stores basic information about a V1 registry endpoint. -type v1Endpoint struct { - client *http.Client - URL *url.URL - IsSecure bool -} - -// newV1Endpoint parses the given address to return a registry endpoint. -// TODO: remove. This is only used by search. -func newV1Endpoint(index *registry.IndexInfo, headers http.Header) (*v1Endpoint, error) { - tlsConfig, err := newTLSConfig(index.Name, index.Secure) - if err != nil { - return nil, err - } - - endpoint, err := newV1EndpointFromStr(GetAuthConfigKey(index), tlsConfig, headers) - if err != nil { - return nil, err - } - - if endpoint.String() == IndexServer { - // Skip the check, we know this one is valid - // (and we never want to fall back to http in case of error) - return endpoint, nil - } - - // Try HTTPS ping to registry - endpoint.URL.Scheme = "https" - if _, err := endpoint.ping(); err != nil { - if endpoint.IsSecure { - // If registry is secure and HTTPS failed, show user the error and tell them about `--insecure-registry` - // in case that's what they need. DO NOT accept unknown CA certificates, and DO NOT fall back to HTTP. - return nil, invalidParamf("invalid registry endpoint %s: %v. If this private registry supports only HTTP or HTTPS with an unknown CA certificate, please add `--insecure-registry %s` to the daemon's arguments. In the case of HTTPS, if you have access to the registry's CA certificate, no need for the flag; simply place the CA certificate at /etc/docker/certs.d/%s/ca.crt", endpoint, err, endpoint.URL.Host, endpoint.URL.Host) - } - - // registry is insecure and HTTPS failed, fallback to HTTP. - log.G(context.TODO()).WithError(err).Debugf("error from registry %q marked as insecure - insecurely falling back to HTTP", endpoint) - endpoint.URL.Scheme = "http" - if _, err2 := endpoint.ping(); err2 != nil { - return nil, invalidParamf("invalid registry endpoint %q. HTTPS attempt: %v. HTTP attempt: %v", endpoint, err, err2) - } - } - - return endpoint, nil -} - -// trimV1Address trims the "v1" version suffix off the address and returns -// the trimmed address. It returns an error on "v2" endpoints. -func trimV1Address(address string) (string, error) { - trimmed := strings.TrimSuffix(address, "/") - if strings.HasSuffix(trimmed, "/v2") { - return "", invalidParamf("search is not supported on v2 endpoints: %s", address) - } - return strings.TrimSuffix(trimmed, "/v1"), nil -} - -func newV1EndpointFromStr(address string, tlsConfig *tls.Config, headers http.Header) (*v1Endpoint, error) { - if !strings.HasPrefix(address, "http://") && !strings.HasPrefix(address, "https://") { - address = "https://" + address - } - - address, err := trimV1Address(address) - if err != nil { - return nil, err - } - - uri, err := url.Parse(address) - if err != nil { - return nil, invalidParam(err) - } - - // TODO(tiborvass): make sure a ConnectTimeout transport is used - tr := newTransport(tlsConfig) - - return &v1Endpoint{ - IsSecure: tlsConfig == nil || !tlsConfig.InsecureSkipVerify, - URL: uri, - client: httpClient(transport.NewTransport(tr, Headers("", headers)...)), - }, nil -} - -// Get the formatted URL for the root of this registry Endpoint -func (e *v1Endpoint) String() string { - return e.URL.String() + "/v1/" -} - -// ping returns a v1PingResult which indicates whether the registry is standalone or not. -func (e *v1Endpoint) ping() (v1PingResult, error) { - if e.String() == IndexServer { - // Skip the check, we know this one is valid - // (and we never want to fallback to http in case of error) - return v1PingResult{}, nil - } - - pingURL := e.String() + "_ping" - log.G(context.TODO()).WithField("url", pingURL).Debug("attempting v1 ping for registry endpoint") - req, err := http.NewRequest(http.MethodGet, pingURL, nil) - if err != nil { - return v1PingResult{}, invalidParam(err) - } - - resp, err := e.client.Do(req) - if err != nil { - return v1PingResult{}, invalidParam(err) - } - - defer resp.Body.Close() - - if v := resp.Header.Get("X-Docker-Registry-Standalone"); v != "" { - info := v1PingResult{} - // Accepted values are "1", and "true" (case-insensitive). - if v == "1" || strings.EqualFold(v, "true") { - info.Standalone = true - } - log.G(context.TODO()).Debugf("v1PingResult.Standalone (from X-Docker-Registry-Standalone header): %t", info.Standalone) - return info, nil - } - - // If the header is absent, we assume true for compatibility with earlier - // versions of the registry. default to true - info := v1PingResult{ - Standalone: true, - } - if err := json.NewDecoder(resp.Body).Decode(&info); err != nil { - log.G(context.TODO()).WithError(err).Debug("error unmarshaling _ping response") - // don't stop here. Just assume sane defaults - } - - log.G(context.TODO()).Debugf("v1PingResult.Standalone: %t", info.Standalone) - return info, nil -} - -// httpClient returns an HTTP client structure which uses the given transport -// and contains the necessary headers for redirected requests -func httpClient(transport http.RoundTripper) *http.Client { - return &http.Client{ - Transport: transport, - CheckRedirect: addRequiredHeadersToRedirectedRequests, - } -} - -func trustedLocation(req *http.Request) bool { - var ( - trusteds = []string{"docker.com", "docker.io"} - hostname = strings.SplitN(req.Host, ":", 2)[0] - ) - if req.URL.Scheme != "https" { - return false - } - - for _, trusted := range trusteds { - if hostname == trusted || strings.HasSuffix(hostname, "."+trusted) { - return true - } - } - return false -} - -// addRequiredHeadersToRedirectedRequests adds the necessary redirection headers -// for redirected requests -func addRequiredHeadersToRedirectedRequests(req *http.Request, via []*http.Request) error { - if len(via) != 0 && via[0] != nil { - if trustedLocation(req) && trustedLocation(via[0]) { - req.Header = via[0].Header - return nil - } - for k, v := range via[0].Header { - if k != "Authorization" { - for _, vv := range v { - req.Header.Add(k, vv) - } - } - } - } - return nil -} diff --git a/vendor/github.com/docker/docker/registry/search_session.go b/vendor/github.com/docker/docker/registry/search_session.go deleted file mode 100644 index a3d2c894d2..0000000000 --- a/vendor/github.com/docker/docker/registry/search_session.go +++ /dev/null @@ -1,247 +0,0 @@ -package registry // import "github.com/docker/docker/registry" - -import ( - // this is required for some certificates - "context" - _ "crypto/sha512" - "encoding/json" - "fmt" - "io" - "net/http" - "net/http/cookiejar" - "net/url" - "strings" - "sync" - - "github.com/containerd/log" - "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/errdefs" - "github.com/pkg/errors" -) - -// A session is used to communicate with a V1 registry -type session struct { - indexEndpoint *v1Endpoint - client *http.Client -} - -type authTransport struct { - http.RoundTripper - *registry.AuthConfig - - alwaysSetBasicAuth bool - token []string - - mu sync.Mutex // guards modReq - modReq map[*http.Request]*http.Request // original -> modified -} - -// newAuthTransport handles the auth layer when communicating with a v1 registry (private or official) -// -// For private v1 registries, set alwaysSetBasicAuth to true. -// -// For the official v1 registry, if there isn't already an Authorization header in the request, -// but there is an X-Docker-Token header set to true, then Basic Auth will be used to set the Authorization header. -// After sending the request with the provided base http.RoundTripper, if an X-Docker-Token header, representing -// a token, is present in the response, then it gets cached and sent in the Authorization header of all subsequent -// requests. -// -// If the server sends a token without the client having requested it, it is ignored. -// -// This RoundTripper also has a CancelRequest method important for correct timeout handling. -func newAuthTransport(base http.RoundTripper, authConfig *registry.AuthConfig, alwaysSetBasicAuth bool) *authTransport { - if base == nil { - base = http.DefaultTransport - } - return &authTransport{ - RoundTripper: base, - AuthConfig: authConfig, - alwaysSetBasicAuth: alwaysSetBasicAuth, - modReq: make(map[*http.Request]*http.Request), - } -} - -// cloneRequest returns a clone of the provided *http.Request. -// The clone is a shallow copy of the struct and its Header map. -func cloneRequest(r *http.Request) *http.Request { - // shallow copy of the struct - r2 := new(http.Request) - *r2 = *r - // deep copy of the Header - r2.Header = make(http.Header, len(r.Header)) - for k, s := range r.Header { - r2.Header[k] = append([]string(nil), s...) - } - - return r2 -} - -// onEOFReader wraps an io.ReadCloser and a function -// the function will run at the end of file or close the file. -type onEOFReader struct { - Rc io.ReadCloser - Fn func() -} - -func (r *onEOFReader) Read(p []byte) (int, error) { - n, err := r.Rc.Read(p) - if err == io.EOF { - r.runFunc() - } - return n, err -} - -// Close closes the file and run the function. -func (r *onEOFReader) Close() error { - err := r.Rc.Close() - r.runFunc() - return err -} - -func (r *onEOFReader) runFunc() { - if fn := r.Fn; fn != nil { - fn() - r.Fn = nil - } -} - -// RoundTrip changes an HTTP request's headers to add the necessary -// authentication-related headers -func (tr *authTransport) RoundTrip(orig *http.Request) (*http.Response, error) { - // Authorization should not be set on 302 redirect for untrusted locations. - // This logic mirrors the behavior in addRequiredHeadersToRedirectedRequests. - // As the authorization logic is currently implemented in RoundTrip, - // a 302 redirect is detected by looking at the Referrer header as go http package adds said header. - // This is safe as Docker doesn't set Referrer in other scenarios. - if orig.Header.Get("Referer") != "" && !trustedLocation(orig) { - return tr.RoundTripper.RoundTrip(orig) - } - - req := cloneRequest(orig) - tr.mu.Lock() - tr.modReq[orig] = req - tr.mu.Unlock() - - if tr.alwaysSetBasicAuth { - if tr.AuthConfig == nil { - return nil, errors.New("unexpected error: empty auth config") - } - req.SetBasicAuth(tr.Username, tr.Password) - return tr.RoundTripper.RoundTrip(req) - } - - // Don't override - if req.Header.Get("Authorization") == "" { - if req.Header.Get("X-Docker-Token") == "true" && tr.AuthConfig != nil && len(tr.Username) > 0 { - req.SetBasicAuth(tr.Username, tr.Password) - } else if len(tr.token) > 0 { - req.Header.Set("Authorization", "Token "+strings.Join(tr.token, ",")) - } - } - resp, err := tr.RoundTripper.RoundTrip(req) - if err != nil { - tr.mu.Lock() - delete(tr.modReq, orig) - tr.mu.Unlock() - return nil, err - } - if len(resp.Header["X-Docker-Token"]) > 0 { - tr.token = resp.Header["X-Docker-Token"] - } - resp.Body = &onEOFReader{ - Rc: resp.Body, - Fn: func() { - tr.mu.Lock() - delete(tr.modReq, orig) - tr.mu.Unlock() - }, - } - return resp, nil -} - -// CancelRequest cancels an in-flight request by closing its connection. -func (tr *authTransport) CancelRequest(req *http.Request) { - type canceler interface { - CancelRequest(*http.Request) - } - if cr, ok := tr.RoundTripper.(canceler); ok { - tr.mu.Lock() - modReq := tr.modReq[req] - delete(tr.modReq, req) - tr.mu.Unlock() - cr.CancelRequest(modReq) - } -} - -func authorizeClient(client *http.Client, authConfig *registry.AuthConfig, endpoint *v1Endpoint) error { - var alwaysSetBasicAuth bool - - // If we're working with a standalone private registry over HTTPS, send Basic Auth headers - // alongside all our requests. - if endpoint.String() != IndexServer && endpoint.URL.Scheme == "https" { - info, err := endpoint.ping() - if err != nil { - return err - } - if info.Standalone && authConfig != nil { - log.G(context.TODO()).Debugf("Endpoint %s is eligible for private registry. Enabling decorator.", endpoint.String()) - alwaysSetBasicAuth = true - } - } - - // Annotate the transport unconditionally so that v2 can - // properly fallback on v1 when an image is not found. - client.Transport = newAuthTransport(client.Transport, authConfig, alwaysSetBasicAuth) - - jar, err := cookiejar.New(nil) - if err != nil { - return errdefs.System(errors.New("cookiejar.New is not supposed to return an error")) - } - client.Jar = jar - - return nil -} - -func newSession(client *http.Client, endpoint *v1Endpoint) *session { - return &session{ - client: client, - indexEndpoint: endpoint, - } -} - -// defaultSearchLimit is the default value for maximum number of returned search results. -const defaultSearchLimit = 25 - -// searchRepositories performs a search against the remote repository -func (r *session) searchRepositories(term string, limit int) (*registry.SearchResults, error) { - if limit == 0 { - limit = defaultSearchLimit - } - if limit < 1 || limit > 100 { - return nil, invalidParamf("limit %d is outside the range of [1, 100]", limit) - } - u := r.indexEndpoint.String() + "search?q=" + url.QueryEscape(term) + "&n=" + url.QueryEscape(fmt.Sprintf("%d", limit)) - log.G(context.TODO()).WithField("url", u).Debug("searchRepositories") - - req, err := http.NewRequest(http.MethodGet, u, nil) - if err != nil { - return nil, invalidParamWrapf(err, "error building request") - } - // Have the AuthTransport send authentication, when logged in. - req.Header.Set("X-Docker-Token", "true") - res, err := r.client.Do(req) - if err != nil { - return nil, errdefs.System(err) - } - defer res.Body.Close() - if res.StatusCode != http.StatusOK { - // TODO(thaJeztah): return upstream response body for errors (see https://github.com/moby/moby/issues/27286). - return nil, errdefs.Unknown(fmt.Errorf("Unexpected status code %d", res.StatusCode)) - } - result := ®istry.SearchResults{} - err = json.NewDecoder(res.Body).Decode(result) - if err != nil { - return nil, errdefs.System(errors.Wrap(err, "error decoding registry search results")) - } - return result, nil -} diff --git a/vendor/github.com/docker/docker/registry/service.go b/vendor/github.com/docker/docker/registry/service.go deleted file mode 100644 index bc67a43451..0000000000 --- a/vendor/github.com/docker/docker/registry/service.go +++ /dev/null @@ -1,147 +0,0 @@ -package registry // import "github.com/docker/docker/registry" - -import ( - "context" - "crypto/tls" - "net/url" - "strings" - "sync" - - "github.com/containerd/log" - "github.com/distribution/reference" - "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/errdefs" -) - -// Service is a registry service. It tracks configuration data such as a list -// of mirrors. -type Service struct { - config *serviceConfig - mu sync.RWMutex -} - -// NewService returns a new instance of [Service] ready to be installed into -// an engine. -func NewService(options ServiceOptions) (*Service, error) { - config, err := newServiceConfig(options) - - return &Service{config: config}, err -} - -// ServiceConfig returns a copy of the public registry service's configuration. -func (s *Service) ServiceConfig() *registry.ServiceConfig { - s.mu.RLock() - defer s.mu.RUnlock() - return s.config.copy() -} - -// ReplaceConfig prepares a transaction which will atomically replace the -// registry service's configuration when the returned commit function is called. -func (s *Service) ReplaceConfig(options ServiceOptions) (commit func(), err error) { - config, err := newServiceConfig(options) - if err != nil { - return nil, err - } - return func() { - s.mu.Lock() - defer s.mu.Unlock() - s.config = config - }, nil -} - -// Auth contacts the public registry with the provided credentials, -// and returns OK if authentication was successful. -// It can be used to verify the validity of a client's credentials. -func (s *Service) Auth(ctx context.Context, authConfig *registry.AuthConfig, userAgent string) (statusMessage, token string, _ error) { - // TODO Use ctx when searching for repositories - registryHostName := IndexHostname - - if authConfig.ServerAddress != "" { - serverAddress := authConfig.ServerAddress - if !strings.HasPrefix(serverAddress, "https://") && !strings.HasPrefix(serverAddress, "http://") { - serverAddress = "https://" + serverAddress - } - u, err := url.Parse(serverAddress) - if err != nil { - return "", "", invalidParamWrapf(err, "unable to parse server address") - } - registryHostName = u.Host - } - - // Lookup endpoints for authentication but exclude mirrors to prevent - // sending credentials of the upstream registry to a mirror. - s.mu.RLock() - endpoints, err := s.lookupV2Endpoints(registryHostName, false) - s.mu.RUnlock() - if err != nil { - return "", "", invalidParam(err) - } - - var lastErr error - for _, endpoint := range endpoints { - authToken, err := loginV2(authConfig, endpoint, userAgent) - if err != nil { - if errdefs.IsUnauthorized(err) { - // Failed to authenticate; don't continue with (non-TLS) endpoints. - return "", "", err - } - // Try next endpoint - log.G(ctx).WithFields(log.Fields{ - "error": err, - "endpoint": endpoint, - }).Infof("Error logging in to endpoint, trying next endpoint") - lastErr = err - continue - } - - // TODO(thaJeztah): move the statusMessage to the API endpoint; we don't need to produce that here? - return "Login Succeeded", authToken, nil - } - - return "", "", lastErr -} - -// ResolveRepository splits a repository name into its components -// and configuration of the associated registry. -func (s *Service) ResolveRepository(name reference.Named) (*RepositoryInfo, error) { - s.mu.RLock() - defer s.mu.RUnlock() - // TODO(thaJeztah): remove error return as it's no longer used. - return newRepositoryInfo(s.config, name), nil -} - -// APIEndpoint represents a remote API endpoint -type APIEndpoint struct { - Mirror bool - URL *url.URL - AllowNondistributableArtifacts bool // Deprecated: non-distributable artifacts are deprecated and enabled by default. This field will be removed in the next release. - Official bool - TrimHostname bool // Deprecated: hostname is now trimmed unconditionally for remote names. This field will be removed in the next release. - TLSConfig *tls.Config -} - -// LookupPullEndpoints creates a list of v2 endpoints to try to pull from, in order of preference. -// It gives preference to mirrors over the actual registry, and HTTPS over plain HTTP. -func (s *Service) LookupPullEndpoints(hostname string) (endpoints []APIEndpoint, err error) { - s.mu.RLock() - defer s.mu.RUnlock() - - return s.lookupV2Endpoints(hostname, true) -} - -// LookupPushEndpoints creates a list of v2 endpoints to try to push to, in order of preference. -// It gives preference to HTTPS over plain HTTP. Mirrors are not included. -func (s *Service) LookupPushEndpoints(hostname string) (endpoints []APIEndpoint, err error) { - s.mu.RLock() - defer s.mu.RUnlock() - - return s.lookupV2Endpoints(hostname, false) -} - -// IsInsecureRegistry returns true if the registry at given host is configured as -// insecure registry. -func (s *Service) IsInsecureRegistry(host string) bool { - s.mu.RLock() - defer s.mu.RUnlock() - return !s.config.isSecureIndex(host) -} diff --git a/vendor/github.com/docker/docker/registry/service_v2.go b/vendor/github.com/docker/docker/registry/service_v2.go deleted file mode 100644 index 43754527a2..0000000000 --- a/vendor/github.com/docker/docker/registry/service_v2.go +++ /dev/null @@ -1,69 +0,0 @@ -package registry // import "github.com/docker/docker/registry" - -import ( - "net/url" - "strings" - - "github.com/docker/go-connections/tlsconfig" -) - -func (s *Service) lookupV2Endpoints(hostname string, includeMirrors bool) ([]APIEndpoint, error) { - var endpoints []APIEndpoint - if hostname == DefaultNamespace || hostname == IndexHostname { - if includeMirrors { - for _, mirror := range s.config.Mirrors { - if !strings.HasPrefix(mirror, "http://") && !strings.HasPrefix(mirror, "https://") { - mirror = "https://" + mirror - } - mirrorURL, err := url.Parse(mirror) - if err != nil { - return nil, invalidParam(err) - } - mirrorTLSConfig, err := newTLSConfig(mirrorURL.Host, s.config.isSecureIndex(mirrorURL.Host)) - if err != nil { - return nil, err - } - endpoints = append(endpoints, APIEndpoint{ - URL: mirrorURL, - Mirror: true, - TLSConfig: mirrorTLSConfig, - }) - } - } - endpoints = append(endpoints, APIEndpoint{ - URL: DefaultV2Registry, - Official: true, - TLSConfig: tlsconfig.ServerDefault(), - }) - - return endpoints, nil - } - - tlsConfig, err := newTLSConfig(hostname, s.config.isSecureIndex(hostname)) - if err != nil { - return nil, err - } - - endpoints = []APIEndpoint{ - { - URL: &url.URL{ - Scheme: "https", - Host: hostname, - }, - TLSConfig: tlsConfig, - }, - } - - if tlsConfig.InsecureSkipVerify { - endpoints = append(endpoints, APIEndpoint{ - URL: &url.URL{ - Scheme: "http", - Host: hostname, - }, - // used to check if supposed to be secure via InsecureSkipVerify - TLSConfig: tlsConfig, - }) - } - - return endpoints, nil -} diff --git a/vendor/github.com/docker/docker/registry/types.go b/vendor/github.com/docker/docker/registry/types.go deleted file mode 100644 index 63ace0fbad..0000000000 --- a/vendor/github.com/docker/docker/registry/types.go +++ /dev/null @@ -1,24 +0,0 @@ -package registry // import "github.com/docker/docker/registry" - -import ( - "github.com/distribution/reference" - "github.com/docker/docker/api/types/registry" -) - -// RepositoryInfo describes a repository -type RepositoryInfo struct { - Name reference.Named - // Index points to registry information - Index *registry.IndexInfo - // Official indicates whether the repository is considered official. - // If the registry is official, and the normalized name does not - // contain a '/' (e.g. "foo"), then it is considered an official repo. - // - // Deprecated: this field is no longer used and will be removed in the next release. The information captured in this field can be obtained from the [Name] field instead. - Official bool - // Class represents the class of the repository, such as "plugin" - // or "image". - // - // Deprecated: this field is no longer used, and will be removed in the next release. - Class string -} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config.go b/vendor/github.com/docker/go-connections/tlsconfig/config.go index 606c98a38b..8b0264f68b 100644 --- a/vendor/github.com/docker/go-connections/tlsconfig/config.go +++ b/vendor/github.com/docker/go-connections/tlsconfig/config.go @@ -34,51 +34,37 @@ type Options struct { // the system pool will be used. ExclusiveRootPools bool MinVersion uint16 - // If Passphrase is set, it will be used to decrypt a TLS private key - // if the key is encrypted. - // - // Deprecated: Use of encrypted TLS private keys has been deprecated, and - // will be removed in a future release. Golang has deprecated support for - // legacy PEM encryption (as specified in RFC 1423), as it is insecure by - // design (see https://go-review.googlesource.com/c/go/+/264159). - Passphrase string -} - -// Extra (server-side) accepted CBC cipher suites - will phase out in the future -var acceptedCBCCiphers = []uint16{ - tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, } // DefaultServerAcceptedCiphers should be uses by code which already has a crypto/tls // options struct but wants to use a commonly accepted set of TLS cipher suites, with // known weak algorithms removed. -var DefaultServerAcceptedCiphers = append(clientCipherSuites, acceptedCBCCiphers...) +var DefaultServerAcceptedCiphers = defaultCipherSuites + +// defaultCipherSuites is shared by both client and server as the default set. +var defaultCipherSuites = []uint16{ + tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, +} // ServerDefault returns a secure-enough TLS configuration for the server TLS configuration. func ServerDefault(ops ...func(*tls.Config)) *tls.Config { - tlsConfig := &tls.Config{ - // Avoid fallback by default to SSL protocols < TLS1.2 - MinVersion: tls.VersionTLS12, - PreferServerCipherSuites: true, - CipherSuites: DefaultServerAcceptedCiphers, - } - - for _, op := range ops { - op(tlsConfig) - } - - return tlsConfig + return defaultConfig(ops...) } // ClientDefault returns a secure-enough TLS configuration for the client TLS configuration. func ClientDefault(ops ...func(*tls.Config)) *tls.Config { + return defaultConfig(ops...) +} + +// defaultConfig is the default config used by both client and server TLS configuration. +func defaultConfig(ops ...func(*tls.Config)) *tls.Config { tlsConfig := &tls.Config{ - // Prefer TLS1.2 as the client minimum + // Avoid fallback by default to SSL protocols < TLS1.2 MinVersion: tls.VersionTLS12, - CipherSuites: clientCipherSuites, + CipherSuites: defaultCipherSuites, } for _, op := range ops { @@ -92,13 +78,13 @@ func ClientDefault(ops ...func(*tls.Config)) *tls.Config { func certPool(caFile string, exclusivePool bool) (*x509.CertPool, error) { // If we should verify the server, we need to load a trusted ca var ( - certPool *x509.CertPool - err error + pool *x509.CertPool + err error ) if exclusivePool { - certPool = x509.NewCertPool() + pool = x509.NewCertPool() } else { - certPool, err = SystemCertPool() + pool, err = SystemCertPool() if err != nil { return nil, fmt.Errorf("failed to read system certificates: %v", err) } @@ -107,10 +93,10 @@ func certPool(caFile string, exclusivePool bool) (*x509.CertPool, error) { if err != nil { return nil, fmt.Errorf("could not read CA certificate %q: %v", caFile, err) } - if !certPool.AppendCertsFromPEM(pemData) { + if !pool.AppendCertsFromPEM(pemData) { return nil, fmt.Errorf("failed to append certificates from PEM file: %q", caFile) } - return certPool, nil + return pool, nil } // allTLSVersions lists all the TLS versions and is used by the code that validates @@ -144,34 +130,32 @@ func adjustMinVersion(options Options, config *tls.Config) error { return nil } -// IsErrEncryptedKey returns true if the 'err' is an error of incorrect -// password when trying to decrypt a TLS private key. +// errEncryptedKeyDeprecated is produced when we encounter an encrypted +// (password-protected) key. From https://go-review.googlesource.com/c/go/+/264159; // -// Deprecated: Use of encrypted TLS private keys has been deprecated, and -// will be removed in a future release. Golang has deprecated support for -// legacy PEM encryption (as specified in RFC 1423), as it is insecure by -// design (see https://go-review.googlesource.com/c/go/+/264159). -func IsErrEncryptedKey(err error) bool { - return errors.Is(err, x509.IncorrectPasswordError) -} +// > Legacy PEM encryption as specified in RFC 1423 is insecure by design. Since +// > it does not authenticate the ciphertext, it is vulnerable to padding oracle +// > attacks that can let an attacker recover the plaintext +// > +// > It's unfortunate that we don't implement PKCS#8 encryption so we can't +// > recommend an alternative but PEM encryption is so broken that it's worth +// > deprecating outright. +// +// Also see https://docs.docker.com/go/deprecated/ +var errEncryptedKeyDeprecated = errors.New("private key is encrypted; encrypted private keys are obsolete, and not supported") // getPrivateKey returns the private key in 'keyBytes', in PEM-encoded format. -// If the private key is encrypted, 'passphrase' is used to decrypted the -// private key. -func getPrivateKey(keyBytes []byte, passphrase string) ([]byte, error) { +// It returns an error if the file could not be decoded or was protected by +// a passphrase. +func getPrivateKey(keyBytes []byte) ([]byte, error) { // this section makes some small changes to code from notary/tuf/utils/x509.go pemBlock, _ := pem.Decode(keyBytes) if pemBlock == nil { return nil, fmt.Errorf("no valid private key found") } - var err error if x509.IsEncryptedPEMBlock(pemBlock) { //nolint:staticcheck // Ignore SA1019 (IsEncryptedPEMBlock is deprecated) - keyBytes, err = x509.DecryptPEMBlock(pemBlock, []byte(passphrase)) //nolint:staticcheck // Ignore SA1019 (DecryptPEMBlock is deprecated) - if err != nil { - return nil, fmt.Errorf("private key is encrypted, but could not decrypt it: %w", err) - } - keyBytes = pem.EncodeToMemory(&pem.Block{Type: pemBlock.Type, Bytes: keyBytes}) + return nil, errEncryptedKeyDeprecated } return keyBytes, nil @@ -195,7 +179,7 @@ func getCert(options Options) ([]tls.Certificate, error) { return nil, err } - prKeyBytes, err = getPrivateKey(prKeyBytes, options.Passphrase) + prKeyBytes, err = getPrivateKey(prKeyBytes) if err != nil { return nil, err } @@ -210,7 +194,7 @@ func getCert(options Options) ([]tls.Certificate, error) { // Client returns a TLS configuration meant to be used by a client. func Client(options Options) (*tls.Config, error) { - tlsConfig := ClientDefault() + tlsConfig := defaultConfig() tlsConfig.InsecureSkipVerify = options.InsecureSkipVerify if !options.InsecureSkipVerify && options.CAFile != "" { CAs, err := certPool(options.CAFile, options.ExclusiveRootPools) @@ -235,7 +219,7 @@ func Client(options Options) (*tls.Config, error) { // Server returns a TLS configuration meant to be used by a server. func Server(options Options) (*tls.Config, error) { - tlsConfig := ServerDefault() + tlsConfig := defaultConfig() tlsConfig.ClientAuth = options.ClientAuth tlsCert, err := tls.LoadX509KeyPair(options.CertFile, options.KeyFile) if err != nil { diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go b/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go deleted file mode 100644 index a82f9fa52e..0000000000 --- a/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go +++ /dev/null @@ -1,14 +0,0 @@ -// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. -package tlsconfig - -import ( - "crypto/tls" -) - -// Client TLS cipher suites (dropping CBC ciphers for client preferred suite set) -var clientCipherSuites = []uint16{ - tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, -} diff --git a/vendor/github.com/docker/go-events/README.md b/vendor/github.com/docker/go-events/README.md index 0acafc279a..662d855fb3 100644 --- a/vendor/github.com/docker/go-events/README.md +++ b/vendor/github.com/docker/go-events/README.md @@ -1,7 +1,6 @@ # Docker Events Package [![GoDoc](https://godoc.org/github.com/docker/go-events?status.svg)](https://godoc.org/github.com/docker/go-events) -[![Circle CI](https://circleci.com/gh/docker/go-events.svg?style=shield)](https://circleci.com/gh/docker/go-events) The Docker `events` package implements a composable event distribution package for Go. diff --git a/vendor/github.com/docker/go-events/SECURITY.md b/vendor/github.com/docker/go-events/SECURITY.md new file mode 100644 index 0000000000..610eef2c9e --- /dev/null +++ b/vendor/github.com/docker/go-events/SECURITY.md @@ -0,0 +1,36 @@ +# Security Policy + +The maintainers of the Docker Events package take security seriously. If you discover +a security issue, please bring it to their attention right away! + +## Reporting a Vulnerability + +Please **DO NOT** file a public issue, instead send your report privately +to [security@docker.com](mailto:security@docker.com). + +Reporter(s) can expect a response within 72 hours, acknowledging the issue was +received. + +## Review Process + +After receiving the report, an initial triage and technical analysis is +performed to confirm the report and determine its scope. We may request +additional information in this stage of the process. + +Once a reviewer has confirmed the relevance of the report, a draft security +advisory will be created on GitHub. The draft advisory will be used to discuss +the issue with maintainers, the reporter(s), and where applicable, other +affected parties under embargo. + +If the vulnerability is accepted, a timeline for developing a patch, public +disclosure, and patch release will be determined. If there is an embargo period +on public disclosure before the patch release, the reporter(s) are expected to +participate in the discussion of the timeline and abide by agreed upon dates +for public disclosure. + +## Accreditation + +Security reports are greatly appreciated and we will publicly thank you, +although we will keep your name confidential if you request it. We also like to +send gifts - if you're into swag, make sure to let us know. We do not currently +offer a paid security bounty program at this time. diff --git a/vendor/github.com/docker/go-events/vendor.mod b/vendor/github.com/docker/go-events/vendor.mod new file mode 100644 index 0000000000..66a2cd9e5c --- /dev/null +++ b/vendor/github.com/docker/go-events/vendor.mod @@ -0,0 +1,5 @@ +module github.com/docker/go-events + +go 1.13 + +require github.com/sirupsen/logrus v1.9.3 diff --git a/vendor/github.com/docker/go-events/vendor.sum b/vendor/github.com/docker/go-events/vendor.sum new file mode 100644 index 0000000000..9243c28735 --- /dev/null +++ b/vendor/github.com/docker/go-events/vendor.sum @@ -0,0 +1,16 @@ +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 h1:0A+M6Uqn+Eje4kHMK80dtF3JCXC4ykBgQG4Fe06QRhQ= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/vendor/github.com/emicklei/go-restful/v3/CHANGES.md b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md index 5edd5a7ca9..6f24dfff56 100644 --- a/vendor/github.com/emicklei/go-restful/v3/CHANGES.md +++ b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md @@ -1,5 +1,26 @@ # Change history of go-restful +## [v3.12.2] - 2025-02-21 + +- allow empty payloads in post,put,patch, issue #580 ( thanks @liggitt, Jordan Liggitt) + +## [v3.12.1] - 2024-05-28 + +- fix misroute when dealing multiple webservice with regex (#549) (thanks Haitao Chen) + +## [v3.12.0] - 2024-03-11 + +- add Flush method #529 (#538) +- fix: Improper handling of empty POST requests (#543) + +## [v3.11.3] - 2024-01-09 + +- better not have 2 tags on one commit + +## [v3.11.1, v3.11.2] - 2024-01-09 + +- fix by restoring custom JSON handler functions (Mike Beaumont #540) + ## [v3.11.0] - 2023-08-19 - restored behavior as <= v3.9.0 with option to change path strategy using TrimRightSlashEnabled. diff --git a/vendor/github.com/emicklei/go-restful/v3/README.md b/vendor/github.com/emicklei/go-restful/v3/README.md index 95a05a0894..3fb40d1980 100644 --- a/vendor/github.com/emicklei/go-restful/v3/README.md +++ b/vendor/github.com/emicklei/go-restful/v3/README.md @@ -2,9 +2,8 @@ go-restful ========== package for building REST-style Web Services using Google Go -[![Build Status](https://travis-ci.org/emicklei/go-restful.png)](https://travis-ci.org/emicklei/go-restful) [![Go Report Card](https://goreportcard.com/badge/github.com/emicklei/go-restful)](https://goreportcard.com/report/github.com/emicklei/go-restful) -[![GoDoc](https://godoc.org/github.com/emicklei/go-restful?status.svg)](https://pkg.go.dev/github.com/emicklei/go-restful) +[![Go Reference](https://pkg.go.dev/badge/github.com/emicklei/go-restful.svg)](https://pkg.go.dev/github.com/emicklei/go-restful/v3) [![codecov](https://codecov.io/gh/emicklei/go-restful/branch/master/graph/badge.svg)](https://codecov.io/gh/emicklei/go-restful) - [Code examples use v3](https://github.com/emicklei/go-restful/tree/v3/examples) diff --git a/vendor/github.com/emicklei/go-restful/v3/compress.go b/vendor/github.com/emicklei/go-restful/v3/compress.go index 1ff239f99f..80adf55fdf 100644 --- a/vendor/github.com/emicklei/go-restful/v3/compress.go +++ b/vendor/github.com/emicklei/go-restful/v3/compress.go @@ -49,6 +49,16 @@ func (c *CompressingResponseWriter) CloseNotify() <-chan bool { return c.writer.(http.CloseNotifier).CloseNotify() } +// Flush is part of http.Flusher interface. Noop if the underlying writer doesn't support it. +func (c *CompressingResponseWriter) Flush() { + flusher, ok := c.writer.(http.Flusher) + if !ok { + // writer doesn't support http.Flusher interface + return + } + flusher.Flush() +} + // Close the underlying compressor func (c *CompressingResponseWriter) Close() error { if c.isCompressorClosed() { diff --git a/vendor/github.com/emicklei/go-restful/v3/curly.go b/vendor/github.com/emicklei/go-restful/v3/curly.go index ba1fc5d5f1..6fd2bcd5a1 100644 --- a/vendor/github.com/emicklei/go-restful/v3/curly.go +++ b/vendor/github.com/emicklei/go-restful/v3/curly.go @@ -46,10 +46,10 @@ func (c CurlyRouter) SelectRoute( // selectRoutes return a collection of Route from a WebService that matches the path tokens from the request. func (c CurlyRouter) selectRoutes(ws *WebService, requestTokens []string) sortableCurlyRoutes { candidates := make(sortableCurlyRoutes, 0, 8) - for _, each := range ws.routes { - matches, paramCount, staticCount := c.matchesRouteByPathTokens(each.pathParts, requestTokens, each.hasCustomVerb) + for _, eachRoute := range ws.routes { + matches, paramCount, staticCount := c.matchesRouteByPathTokens(eachRoute.pathParts, requestTokens, eachRoute.hasCustomVerb) if matches { - candidates.add(curlyRoute{each, paramCount, staticCount}) // TODO make sure Routes() return pointers? + candidates.add(curlyRoute{eachRoute, paramCount, staticCount}) // TODO make sure Routes() return pointers? } } sort.Sort(candidates) @@ -72,7 +72,7 @@ func (c CurlyRouter) matchesRouteByPathTokens(routeTokens, requestTokens []strin return false, 0, 0 } requestToken := requestTokens[i] - if routeHasCustomVerb && hasCustomVerb(routeToken){ + if routeHasCustomVerb && hasCustomVerb(routeToken) { if !isMatchCustomVerb(routeToken, requestToken) { return false, 0, 0 } @@ -129,44 +129,52 @@ func (c CurlyRouter) detectRoute(candidateRoutes sortableCurlyRoutes, httpReques // detectWebService returns the best matching webService given the list of path tokens. // see also computeWebserviceScore func (c CurlyRouter) detectWebService(requestTokens []string, webServices []*WebService) *WebService { - var best *WebService + var bestWs *WebService score := -1 - for _, each := range webServices { - matches, eachScore := c.computeWebserviceScore(requestTokens, each.pathExpr.tokens) + for _, eachWS := range webServices { + matches, eachScore := c.computeWebserviceScore(requestTokens, eachWS.pathExpr.tokens) if matches && (eachScore > score) { - best = each + bestWs = eachWS score = eachScore } } - return best + return bestWs } // computeWebserviceScore returns whether tokens match and // the weighted score of the longest matching consecutive tokens from the beginning. -func (c CurlyRouter) computeWebserviceScore(requestTokens []string, tokens []string) (bool, int) { - if len(tokens) > len(requestTokens) { +func (c CurlyRouter) computeWebserviceScore(requestTokens []string, routeTokens []string) (bool, int) { + if len(routeTokens) > len(requestTokens) { return false, 0 } score := 0 - for i := 0; i < len(tokens); i++ { - each := requestTokens[i] - other := tokens[i] - if len(each) == 0 && len(other) == 0 { + for i := 0; i < len(routeTokens); i++ { + eachRequestToken := requestTokens[i] + eachRouteToken := routeTokens[i] + if len(eachRequestToken) == 0 && len(eachRouteToken) == 0 { score++ continue } - if len(other) > 0 && strings.HasPrefix(other, "{") { + if len(eachRouteToken) > 0 && strings.HasPrefix(eachRouteToken, "{") { // no empty match - if len(each) == 0 { + if len(eachRequestToken) == 0 { return false, score } - score += 1 + score++ + + if colon := strings.Index(eachRouteToken, ":"); colon != -1 { + // match by regex + matchesToken, _ := c.regularMatchesPathToken(eachRouteToken, colon, eachRequestToken) + if matchesToken { + score++ // extra score for regex match + } + } } else { // not a parameter - if each != other { + if eachRequestToken != eachRouteToken { return false, score } - score += (len(tokens) - i) * 10 //fuzzy + score += (len(routeTokens) - i) * 10 //fuzzy } } return true, score diff --git a/vendor/github.com/emicklei/go-restful/v3/jsr311.go b/vendor/github.com/emicklei/go-restful/v3/jsr311.go index 07a0c91e94..7f04bd9053 100644 --- a/vendor/github.com/emicklei/go-restful/v3/jsr311.go +++ b/vendor/github.com/emicklei/go-restful/v3/jsr311.go @@ -65,7 +65,7 @@ func (RouterJSR311) extractParams(pathExpr *pathExpression, matches []string) ma return params } -// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2 +// https://download.oracle.com/otndocs/jcp/jaxrs-1.1-mrel-eval-oth-JSpec/ func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*Route, error) { candidates := make([]*Route, 0, 8) for i, each := range routes { @@ -126,9 +126,7 @@ func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*R if trace { traceLogger.Printf("no Route found (from %d) that matches HTTP Content-Type: %s\n", len(previous), contentType) } - if httpRequest.ContentLength > 0 { - return nil, NewError(http.StatusUnsupportedMediaType, "415: Unsupported Media Type") - } + return nil, NewError(http.StatusUnsupportedMediaType, "415: Unsupported Media Type") } // accept @@ -151,20 +149,9 @@ func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*R for _, candidate := range previous { available = append(available, candidate.Produces...) } - // if POST,PUT,PATCH without body - method, length := httpRequest.Method, httpRequest.Header.Get("Content-Length") - if (method == http.MethodPost || - method == http.MethodPut || - method == http.MethodPatch) && length == "" { - return nil, NewError( - http.StatusUnsupportedMediaType, - fmt.Sprintf("415: Unsupported Media Type\n\nAvailable representations: %s", strings.Join(available, ", ")), - ) - } return nil, NewError( http.StatusNotAcceptable, - fmt.Sprintf("406: Not Acceptable\n\nAvailable representations: %s", strings.Join(available, ", ")), - ) + fmt.Sprintf("406: Not Acceptable\n\nAvailable representations: %s", strings.Join(available, ", "))) } // return r.bestMatchByMedia(outputMediaOk, contentType, accept), nil return candidates[0], nil diff --git a/vendor/github.com/emicklei/go-restful/v3/route.go b/vendor/github.com/emicklei/go-restful/v3/route.go index 306c44be77..a2056e2acb 100644 --- a/vendor/github.com/emicklei/go-restful/v3/route.go +++ b/vendor/github.com/emicklei/go-restful/v3/route.go @@ -111,6 +111,8 @@ func (r Route) matchesAccept(mimeTypesWithQuality string) bool { } // Return whether this Route can consume content with a type specified by mimeTypes (can be empty). +// If the route does not specify Consumes then return true (*/*). +// If no content type is set then return true for GET,HEAD,OPTIONS,DELETE and TRACE. func (r Route) matchesContentType(mimeTypes string) bool { if len(r.Consumes) == 0 { diff --git a/vendor/github.com/evanphx/json-patch/README.md b/vendor/github.com/evanphx/json-patch/README.md index 97e319b21b..86fefd5bf7 100644 --- a/vendor/github.com/evanphx/json-patch/README.md +++ b/vendor/github.com/evanphx/json-patch/README.md @@ -14,9 +14,7 @@ well as for calculating & applying [RFC7396 JSON merge patches](https://tools.ie go get -u github.com/evanphx/json-patch/v5 ``` -**Stable Versions**: -* Version 5: `go get -u gopkg.in/evanphx/json-patch.v5` -* Version 4: `go get -u gopkg.in/evanphx/json-patch.v4` +If you need version 4, use `go get -u gopkg.in/evanphx/json-patch.v4` (previous versions below `v3` are unavailable) diff --git a/vendor/github.com/evanphx/json-patch/patch.go b/vendor/github.com/evanphx/json-patch/patch.go index cd0274e1e4..95136681ba 100644 --- a/vendor/github.com/evanphx/json-patch/patch.go +++ b/vendor/github.com/evanphx/json-patch/patch.go @@ -3,11 +3,10 @@ package jsonpatch import ( "bytes" "encoding/json" + "errors" "fmt" "strconv" "strings" - - "github.com/pkg/errors" ) const ( @@ -277,7 +276,7 @@ func (o Operation) Path() (string, error) { return op, nil } - return "unknown", errors.Wrapf(ErrMissing, "operation missing path field") + return "unknown", fmt.Errorf("operation missing path field: %w", ErrMissing) } // From reads the "from" field of the Operation. @@ -294,7 +293,7 @@ func (o Operation) From() (string, error) { return op, nil } - return "unknown", errors.Wrapf(ErrMissing, "operation, missing from field") + return "unknown", fmt.Errorf("operation, missing from field: %w", ErrMissing) } func (o Operation) value() *lazyNode { @@ -319,7 +318,7 @@ func (o Operation) ValueInterface() (interface{}, error) { return v, nil } - return nil, errors.Wrapf(ErrMissing, "operation, missing value field") + return nil, fmt.Errorf("operation, missing value field: %w", ErrMissing) } func isArray(buf []byte) bool { @@ -398,7 +397,7 @@ func (d *partialDoc) get(key string) (*lazyNode, error) { func (d *partialDoc) remove(key string) error { _, ok := (*d)[key] if !ok { - return errors.Wrapf(ErrMissing, "Unable to remove nonexistent key: %s", key) + return fmt.Errorf("Unable to remove nonexistent key: %s: %w", key, ErrMissing) } delete(*d, key) @@ -415,10 +414,10 @@ func (d *partialArray) set(key string, val *lazyNode) error { if idx < 0 { if !SupportNegativeIndices { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex) } if idx < -len(*d) { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex) } idx += len(*d) } @@ -435,7 +434,7 @@ func (d *partialArray) add(key string, val *lazyNode) error { idx, err := strconv.Atoi(key) if err != nil { - return errors.Wrapf(err, "value was not a proper array index: '%s'", key) + return fmt.Errorf("value was not a proper array index: '%s': %w", key, err) } sz := len(*d) + 1 @@ -445,15 +444,15 @@ func (d *partialArray) add(key string, val *lazyNode) error { cur := *d if idx >= len(ary) { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex) } if idx < 0 { if !SupportNegativeIndices { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex) } if idx < -len(ary) { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex) } idx += len(ary) } @@ -475,16 +474,16 @@ func (d *partialArray) get(key string) (*lazyNode, error) { if idx < 0 { if !SupportNegativeIndices { - return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + return nil, fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex) } if idx < -len(*d) { - return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + return nil, fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex) } idx += len(*d) } if idx >= len(*d) { - return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + return nil, fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex) } return (*d)[idx], nil @@ -499,15 +498,15 @@ func (d *partialArray) remove(key string) error { cur := *d if idx >= len(cur) { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex) } if idx < 0 { if !SupportNegativeIndices { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex) } if idx < -len(cur) { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex) } idx += len(cur) } @@ -525,18 +524,18 @@ func (d *partialArray) remove(key string) error { func (p Patch) add(doc *container, op Operation) error { path, err := op.Path() if err != nil { - return errors.Wrapf(ErrMissing, "add operation failed to decode path") + return fmt.Errorf("add operation failed to decode path: %w", ErrMissing) } con, key := findObject(doc, path) if con == nil { - return errors.Wrapf(ErrMissing, "add operation does not apply: doc is missing path: \"%s\"", path) + return fmt.Errorf("add operation does not apply: doc is missing path: \"%s\": %w", path, ErrMissing) } err = con.add(key, op.value()) if err != nil { - return errors.Wrapf(err, "error in add for path: '%s'", path) + return fmt.Errorf("error in add for path: '%s': %w", path, err) } return nil @@ -545,18 +544,18 @@ func (p Patch) add(doc *container, op Operation) error { func (p Patch) remove(doc *container, op Operation) error { path, err := op.Path() if err != nil { - return errors.Wrapf(ErrMissing, "remove operation failed to decode path") + return fmt.Errorf("remove operation failed to decode path: %w", ErrMissing) } con, key := findObject(doc, path) if con == nil { - return errors.Wrapf(ErrMissing, "remove operation does not apply: doc is missing path: \"%s\"", path) + return fmt.Errorf("remove operation does not apply: doc is missing path: \"%s\": %w", path, ErrMissing) } err = con.remove(key) if err != nil { - return errors.Wrapf(err, "error in remove for path: '%s'", path) + return fmt.Errorf("error in remove for path: '%s': %w", path, err) } return nil @@ -565,7 +564,7 @@ func (p Patch) remove(doc *container, op Operation) error { func (p Patch) replace(doc *container, op Operation) error { path, err := op.Path() if err != nil { - return errors.Wrapf(err, "replace operation failed to decode path") + return fmt.Errorf("replace operation failed to decode path: %w", err) } if path == "" { @@ -574,7 +573,7 @@ func (p Patch) replace(doc *container, op Operation) error { if val.which == eRaw { if !val.tryDoc() { if !val.tryAry() { - return errors.Wrapf(err, "replace operation value must be object or array") + return fmt.Errorf("replace operation value must be object or array: %w", err) } } } @@ -585,7 +584,7 @@ func (p Patch) replace(doc *container, op Operation) error { case eDoc: *doc = &val.doc case eRaw: - return errors.Wrapf(err, "replace operation hit impossible case") + return fmt.Errorf("replace operation hit impossible case: %w", err) } return nil @@ -594,17 +593,17 @@ func (p Patch) replace(doc *container, op Operation) error { con, key := findObject(doc, path) if con == nil { - return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing path: %s", path) + return fmt.Errorf("replace operation does not apply: doc is missing path: %s: %w", path, ErrMissing) } _, ok := con.get(key) if ok != nil { - return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing key: %s", path) + return fmt.Errorf("replace operation does not apply: doc is missing key: %s: %w", path, ErrMissing) } err = con.set(key, op.value()) if err != nil { - return errors.Wrapf(err, "error in remove for path: '%s'", path) + return fmt.Errorf("error in remove for path: '%s': %w", path, err) } return nil @@ -613,39 +612,39 @@ func (p Patch) replace(doc *container, op Operation) error { func (p Patch) move(doc *container, op Operation) error { from, err := op.From() if err != nil { - return errors.Wrapf(err, "move operation failed to decode from") + return fmt.Errorf("move operation failed to decode from: %w", err) } con, key := findObject(doc, from) if con == nil { - return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing from path: %s", from) + return fmt.Errorf("move operation does not apply: doc is missing from path: %s: %w", from, ErrMissing) } val, err := con.get(key) if err != nil { - return errors.Wrapf(err, "error in move for path: '%s'", key) + return fmt.Errorf("error in move for path: '%s': %w", key, err) } err = con.remove(key) if err != nil { - return errors.Wrapf(err, "error in move for path: '%s'", key) + return fmt.Errorf("error in move for path: '%s': %w", key, err) } path, err := op.Path() if err != nil { - return errors.Wrapf(err, "move operation failed to decode path") + return fmt.Errorf("move operation failed to decode path: %w", err) } con, key = findObject(doc, path) if con == nil { - return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing destination path: %s", path) + return fmt.Errorf("move operation does not apply: doc is missing destination path: %s: %w", path, ErrMissing) } err = con.add(key, val) if err != nil { - return errors.Wrapf(err, "error in move for path: '%s'", path) + return fmt.Errorf("error in move for path: '%s': %w", path, err) } return nil @@ -654,7 +653,7 @@ func (p Patch) move(doc *container, op Operation) error { func (p Patch) test(doc *container, op Operation) error { path, err := op.Path() if err != nil { - return errors.Wrapf(err, "test operation failed to decode path") + return fmt.Errorf("test operation failed to decode path: %w", err) } if path == "" { @@ -673,67 +672,67 @@ func (p Patch) test(doc *container, op Operation) error { return nil } - return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) + return fmt.Errorf("testing value %s failed: %w", path, ErrTestFailed) } con, key := findObject(doc, path) if con == nil { - return errors.Wrapf(ErrMissing, "test operation does not apply: is missing path: %s", path) + return fmt.Errorf("test operation does not apply: is missing path: %s: %w", path, ErrMissing) } val, err := con.get(key) if err != nil { - return errors.Wrapf(err, "error in test for path: '%s'", path) + return fmt.Errorf("error in test for path: '%s': %w", path, err) } if val == nil { if op.value() == nil || op.value().raw == nil { return nil } - return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) + return fmt.Errorf("testing value %s failed: %w", path, ErrTestFailed) } else if op.value() == nil { - return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) + return fmt.Errorf("testing value %s failed: %w", path, ErrTestFailed) } if val.equal(op.value()) { return nil } - return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) + return fmt.Errorf("testing value %s failed: %w", path, ErrTestFailed) } func (p Patch) copy(doc *container, op Operation, accumulatedCopySize *int64) error { from, err := op.From() if err != nil { - return errors.Wrapf(err, "copy operation failed to decode from") + return fmt.Errorf("copy operation failed to decode from: %w", err) } con, key := findObject(doc, from) if con == nil { - return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing from path: %s", from) + return fmt.Errorf("copy operation does not apply: doc is missing from path: %s: %w", from, ErrMissing) } val, err := con.get(key) if err != nil { - return errors.Wrapf(err, "error in copy for from: '%s'", from) + return fmt.Errorf("error in copy for from: '%s': %w", from, err) } path, err := op.Path() if err != nil { - return errors.Wrapf(ErrMissing, "copy operation failed to decode path") + return fmt.Errorf("copy operation failed to decode path: %w", ErrMissing) } con, key = findObject(doc, path) if con == nil { - return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing destination path: %s", path) + return fmt.Errorf("copy operation does not apply: doc is missing destination path: %s: %w", path, ErrMissing) } valCopy, sz, err := deepCopy(val) if err != nil { - return errors.Wrapf(err, "error while performing deep copy") + return fmt.Errorf("error while performing deep copy: %w", err) } (*accumulatedCopySize) += int64(sz) @@ -743,7 +742,7 @@ func (p Patch) copy(doc *container, op Operation, accumulatedCopySize *int64) er err = con.add(key, valCopy) if err != nil { - return errors.Wrapf(err, "error while adding value during copy") + return fmt.Errorf("error while adding value during copy: %w", err) } return nil diff --git a/vendor/github.com/fxamacker/cbor/v2/README.md b/vendor/github.com/fxamacker/cbor/v2/README.md index af0a79507e..da9f9e6f09 100644 --- a/vendor/github.com/fxamacker/cbor/v2/README.md +++ b/vendor/github.com/fxamacker/cbor/v2/README.md @@ -1,6 +1,4 @@ -# CBOR Codec in Go - - +

CBOR Codec Go logo

[fxamacker/cbor](https://github.com/fxamacker/cbor) is a library for encoding and decoding [CBOR](https://www.rfc-editor.org/info/std94) and [CBOR Sequences](https://www.rfc-editor.org/rfc/rfc8742.html). @@ -8,23 +6,26 @@ CBOR is a [trusted alternative](https://www.rfc-editor.org/rfc/rfc8949.html#name `fxamacker/cbor` is used in projects by Arm Ltd., Cisco, EdgeX Foundry, Flow Foundation, Fraunhofer‑AISEC, Kubernetes, Let's Encrypt (ISRG), Linux Foundation, Microsoft, Mozilla, Oasis Protocol, Tailscale, Teleport, [etc](https://github.com/fxamacker/cbor#who-uses-fxamackercbor). -See [Quick Start](#quick-start) and [Releases](https://github.com/fxamacker/cbor/releases/). 🆕 `UnmarshalFirst` and `DiagnoseFirst` can decode CBOR Sequences. `cbor.MarshalToBuffer()` and `UserBufferEncMode` accepts user-specified buffer. +See [Quick Start](#quick-start) and [Releases](https://github.com/fxamacker/cbor/releases/). 🆕 `UnmarshalFirst` and `DiagnoseFirst` can decode CBOR Sequences. `MarshalToBuffer` and `UserBufferEncMode` accepts user-specified buffer. ## fxamacker/cbor [![](https://github.com/fxamacker/cbor/workflows/ci/badge.svg)](https://github.com/fxamacker/cbor/actions?query=workflow%3Aci) -[![](https://github.com/fxamacker/cbor/workflows/cover%20%E2%89%A596%25/badge.svg)](https://github.com/fxamacker/cbor/actions?query=workflow%3A%22cover+%E2%89%A596%25%22) +[![](https://github.com/fxamacker/cbor/workflows/cover%20%E2%89%A597%25/badge.svg)](https://github.com/fxamacker/cbor/actions?query=workflow%3A%22cover+%E2%89%A597%25%22) [![CodeQL](https://github.com/fxamacker/cbor/actions/workflows/codeql-analysis.yml/badge.svg)](https://github.com/fxamacker/cbor/actions/workflows/codeql-analysis.yml) [![](https://img.shields.io/badge/fuzzing-passing-44c010)](#fuzzing-and-code-coverage) [![Go Report Card](https://goreportcard.com/badge/github.com/fxamacker/cbor)](https://goreportcard.com/report/github.com/fxamacker/cbor) +[![](https://img.shields.io/ossf-scorecard/github.com/fxamacker/cbor?label=openssf%20scorecard)](https://github.com/fxamacker/cbor#fuzzing-and-code-coverage) `fxamacker/cbor` is a CBOR codec in full conformance with [IETF STD 94 (RFC 8949)](https://www.rfc-editor.org/info/std94). It also supports CBOR Sequences ([RFC 8742](https://www.rfc-editor.org/rfc/rfc8742.html)) and Extended Diagnostic Notation ([Appendix G of RFC 8610](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G)). Features include full support for CBOR tags, [Core Deterministic Encoding](https://www.rfc-editor.org/rfc/rfc8949.html#name-core-deterministic-encoding), duplicate map key detection, etc. +API is mostly same as `encoding/json`, plus interfaces that simplify concurrency and CBOR options. + Design balances trade-offs between security, speed, concurrency, encoded data size, usability, etc. -
Highlights

+

🔎  Highlights

__🚀  Speed__ @@ -38,7 +39,7 @@ Codec passed multiple confidential security assessments in 2022. No vulnerabili __🗜️  Data Size__ -Struct tags (`toarray`, `keyasint`, `omitempty`) automatically reduce size of encoded structs. Encoding optionally shrinks float64→32→16 when values fit. +Struct tag options (`toarray`, `keyasint`, `omitempty`, `omitzero`) automatically reduce size of encoded structs. Encoding optionally shrinks float64→32→16 when values fit. __:jigsaw:  Usability__ @@ -58,164 +59,201 @@ Features include CBOR [extension points](https://www.rfc-editor.org/rfc/rfc8949. `fxamacker/cbor` has configurable limits, etc. that defend against malicious CBOR data. -By contrast, `encoding/gob` is [not designed to be hardened against adversarial inputs](https://pkg.go.dev/encoding/gob#hdr-Security). - -

Example decoding with encoding/gob 💥 fatal error (out of memory)

- -```Go -// Example of encoding/gob having "fatal error: runtime: out of memory" -// while decoding 181 bytes. -package main -import ( - "bytes" - "encoding/gob" - "encoding/hex" - "fmt" -) - -// Example data is from https://github.com/golang/go/issues/24446 -// (shortened to 181 bytes). -const data = "4dffb503010102303001ff30000109010130010800010130010800010130" + - "01ffb80001014a01ffb60001014b01ff860001013001ff860001013001ff" + - "860001013001ff860001013001ffb80000001eff850401010e3030303030" + - "30303030303030303001ff3000010c0104000016ffb70201010830303030" + - "3030303001ff3000010c000030ffb6040405fcff00303030303030303030" + - "303030303030303030303030303030303030303030303030303030303030" + - "30" - -type X struct { - J *X - K map[string]int -} - -func main() { - raw, _ := hex.DecodeString(data) - decoder := gob.NewDecoder(bytes.NewReader(raw)) - - var x X - decoder.Decode(&x) // fatal error: runtime: out of memory - fmt.Println("Decoding finished.") -} -``` - -


- -
- -`fxamacker/cbor` is fast at rejecting malformed CBOR data. E.g. attempts to -decode 10 bytes of malicious CBOR data to `[]byte` (with default settings): - -| Codec | Speed (ns/op) | Memory | Allocs | -| :---- | ------------: | -----: | -----: | -| fxamacker/cbor 2.5.0 | 44 ± 5% | 32 B/op | 2 allocs/op | -| ugorji/go 1.2.11 | 5353261 ± 4% | 67111321 B/op | 13 allocs/op | - -
Benchmark details

- -Latest comparison used: -- Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}` -- go1.19.10, linux/amd64, i5-13600K (disabled all e-cores, DDR4 @2933) -- go test -bench=. -benchmem -count=20 - -#### Prior comparisons - -| Codec | Speed (ns/op) | Memory | Allocs | -| :---- | ------------: | -----: | -----: | -| fxamacker/cbor 2.5.0-beta2 | 44.33 ± 2% | 32 B/op | 2 allocs/op | -| fxamacker/cbor 0.1.0 - 2.4.0 | ~44.68 ± 6% | 32 B/op | 2 allocs/op | -| ugorji/go 1.2.10 | 5524792.50 ± 3% | 67110491 B/op | 12 allocs/op | -| ugorji/go 1.1.0 - 1.2.6 | 💥 runtime: | out of memory: | cannot allocate | - -- Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}` -- go1.19.6, linux/amd64, i5-13600K (DDR4) -- go test -bench=. -benchmem -count=20 - -


- -
- -### Smaller Encodings with Struct Tags - -Struct tags (`toarray`, `keyasint`, `omitempty`) reduce encoded size of structs. - -
Example encoding 3-level nested Go struct to 1 byte CBOR

- -https://go.dev/play/p/YxwvfPdFQG2 - -```Go -// Example encoding nested struct (with omitempty tag) -// - encoding/json: 18 byte JSON -// - fxamacker/cbor: 1 byte CBOR -package main - -import ( - "encoding/hex" - "encoding/json" - "fmt" - - "github.com/fxamacker/cbor/v2" -) - -type GrandChild struct { - Quux int `json:",omitempty"` -} - -type Child struct { - Baz int `json:",omitempty"` - Qux GrandChild `json:",omitempty"` -} - -type Parent struct { - Foo Child `json:",omitempty"` - Bar int `json:",omitempty"` -} - -func cb() { - results, _ := cbor.Marshal(Parent{}) - fmt.Println("hex(CBOR): " + hex.EncodeToString(results)) - - text, _ := cbor.Diagnose(results) // Diagnostic Notation - fmt.Println("DN: " + text) -} - -func js() { - results, _ := json.Marshal(Parent{}) - fmt.Println("hex(JSON): " + hex.EncodeToString(results)) - - text := string(results) // JSON - fmt.Println("JSON: " + text) -} - -func main() { - cb() - fmt.Println("-------------") - js() -} -``` - -Output (DN is Diagnostic Notation): -``` -hex(CBOR): a0 -DN: {} -------------- -hex(JSON): 7b22466f6f223a7b22517578223a7b7d7d7d -JSON: {"Foo":{"Qux":{}}} -``` - -


- -
- -Example using different struct tags together: +Notably, `fxamacker/cbor` is fast at rejecting malformed CBOR data. + +> [!NOTE] +> Benchmarks rejecting 10 bytes of malicious CBOR data decoding to `[]byte`: +> +> | Codec | Speed (ns/op) | Memory | Allocs | +> | :---- | ------------: | -----: | -----: | +> | fxamacker/cbor 2.7.0 | 47 ± 7% | 32 B/op | 2 allocs/op | +> | ugorji/go 1.2.12 | 5878187 ± 3% | 67111556 B/op | 13 allocs/op | +> +> Faster hardware (overclocked DDR4 or DDR5) can reduce speed difference. +> +>
🔎  Benchmark details

+> +> Latest comparison for decoding CBOR data to Go `[]byte`: +> - Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}` +> - go1.22.7, linux/amd64, i5-13600K (DDR4-2933, disabled e-cores) +> - go test -bench=. -benchmem -count=20 +> +> #### Prior comparisons +> +> | Codec | Speed (ns/op) | Memory | Allocs | +> | :---- | ------------: | -----: | -----: | +> | fxamacker/cbor 2.5.0-beta2 | 44.33 ± 2% | 32 B/op | 2 allocs/op | +> | fxamacker/cbor 0.1.0 - 2.4.0 | ~44.68 ± 6% | 32 B/op | 2 allocs/op | +> | ugorji/go 1.2.10 | 5524792.50 ± 3% | 67110491 B/op | 12 allocs/op | +> | ugorji/go 1.1.0 - 1.2.6 | 💥 runtime: | out of memory: | cannot allocate | +> +> - Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}` +> - go1.19.6, linux/amd64, i5-13600K (DDR4) +> - go test -bench=. -benchmem -count=20 +> +>

+ +In contrast, some codecs can crash or use excessive resources while decoding bad data. + +> [!WARNING] +> Go's `encoding/gob` is [not designed to be hardened against adversarial inputs](https://pkg.go.dev/encoding/gob#hdr-Security). +> +>
🔎  gob fatal error (out of memory) 💥 decoding 181 bytes

+> +> ```Go +> // Example of encoding/gob having "fatal error: runtime: out of memory" +> // while decoding 181 bytes (all Go versions as of Dec. 8, 2024). +> package main +> import ( +> "bytes" +> "encoding/gob" +> "encoding/hex" +> "fmt" +> ) +> +> // Example data is from https://github.com/golang/go/issues/24446 +> // (shortened to 181 bytes). +> const data = "4dffb503010102303001ff30000109010130010800010130010800010130" + +> "01ffb80001014a01ffb60001014b01ff860001013001ff860001013001ff" + +> "860001013001ff860001013001ffb80000001eff850401010e3030303030" + +> "30303030303030303001ff3000010c0104000016ffb70201010830303030" + +> "3030303001ff3000010c000030ffb6040405fcff00303030303030303030" + +> "303030303030303030303030303030303030303030303030303030303030" + +> "30" +> +> type X struct { +> J *X +> K map[string]int +> } +> +> func main() { +> raw, _ := hex.DecodeString(data) +> decoder := gob.NewDecoder(bytes.NewReader(raw)) +> +> var x X +> decoder.Decode(&x) // fatal error: runtime: out of memory +> fmt.Println("Decoding finished.") +> } +> ``` +> +> +>

+ +### Smaller Encodings with Struct Tag Options + +Struct tags automatically reduce encoded size of structs and improve speed. + +We can write less code by using struct tag options: +- `toarray`: encode without field names (decode back to original struct) +- `keyasint`: encode field names as integers (decode back to original struct) +- `omitempty`: omit empty fields when encoding +- `omitzero`: omit zero-value fields when encoding ![alt text](https://github.com/fxamacker/images/raw/master/cbor/v2.3.0/cbor_struct_tags_api.svg?sanitize=1 "CBOR API and Go Struct Tags") -API is mostly same as `encoding/json`, plus interfaces that simplify concurrency for CBOR options. +> [!NOTE] +> `fxamacker/cbor` can encode a 3-level nested Go struct to 1 byte! +> - `encoding/json`: 18 bytes of JSON +> - `fxamacker/cbor`: 1 byte of CBOR +> +>
🔎  Encoding 3-level nested Go struct with omitempty

+> +> https://go.dev/play/p/YxwvfPdFQG2 +> +> ```Go +> // Example encoding nested struct (with omitempty tag) +> // - encoding/json: 18 byte JSON +> // - fxamacker/cbor: 1 byte CBOR +> +> package main +> +> import ( +> "encoding/hex" +> "encoding/json" +> "fmt" +> +> "github.com/fxamacker/cbor/v2" +> ) +> +> type GrandChild struct { +> Quux int `json:",omitempty"` +> } +> +> type Child struct { +> Baz int `json:",omitempty"` +> Qux GrandChild `json:",omitempty"` +> } +> +> type Parent struct { +> Foo Child `json:",omitempty"` +> Bar int `json:",omitempty"` +> } +> +> func cb() { +> results, _ := cbor.Marshal(Parent{}) +> fmt.Println("hex(CBOR): " + hex.EncodeToString(results)) +> +> text, _ := cbor.Diagnose(results) // Diagnostic Notation +> fmt.Println("DN: " + text) +> } +> +> func js() { +> results, _ := json.Marshal(Parent{}) +> fmt.Println("hex(JSON): " + hex.EncodeToString(results)) +> +> text := string(results) // JSON +> fmt.Println("JSON: " + text) +> } +> +> func main() { +> cb() +> fmt.Println("-------------") +> js() +> } +> ``` +> +> Output (DN is Diagnostic Notation): +> ``` +> hex(CBOR): a0 +> DN: {} +> ------------- +> hex(JSON): 7b22466f6f223a7b22517578223a7b7d7d7d +> JSON: {"Foo":{"Qux":{}}} +> ``` +> +>

+ ## Quick Start __Install__: `go get github.com/fxamacker/cbor/v2` and `import "github.com/fxamacker/cbor/v2"`. +> [!TIP] +> +> Tinygo users can try beta/experimental branch [feature/cbor-tinygo-beta](https://github.com/fxamacker/cbor/tree/feature/cbor-tinygo-beta). +> +>
🔎  More about tinygo feature branch +> +> ### Tinygo +> +> Branch [feature/cbor-tinygo-beta](https://github.com/fxamacker/cbor/tree/feature/cbor-tinygo-beta) is based on fxamacker/cbor v2.7.0 and it can be compiled using tinygo v0.33 (also compiles with golang/go). +> +> It passes unit tests (with both go1.22 and tinygo v0.33) and is considered beta/experimental for tinygo. +> +> :warning: The `feature/cbor-tinygo-beta` branch does not get fuzz tested yet. +> +> Changes in this feature branch only affect tinygo compiled software. Summary of changes: +> - default `DecOptions.MaxNestedLevels` is reduced to 16 (was 32). User can specify higher limit but 24+ crashes tests when compiled with tinygo v0.33. +> - disabled decoding CBOR tag data to Go interface because tinygo v0.33 is missing needed feature. +> - encoding error message can be different when encoding function type. +> +> Related tinygo issues: +> - https://github.com/tinygo-org/tinygo/issues/4277 +> - https://github.com/tinygo-org/tinygo/issues/4458 +> +>
+ + ### Key Points This library can encode and decode CBOR (RFC 8949) and CBOR Sequences (RFC 8742). @@ -252,16 +290,17 @@ rest, err = cbor.UnmarshalFirst(b, &v) // decode []byte b to v // DiagnoseFirst translates first CBOR data item to text and returns remaining bytes. text, rest, err = cbor.DiagnoseFirst(b) // decode []byte b to Diagnostic Notation text -// NOTE: Unmarshal returns ExtraneousDataError if there are remaining bytes, -// but new funcs UnmarshalFirst and DiagnoseFirst do not. +// NOTE: Unmarshal() returns ExtraneousDataError if there are remaining bytes, but +// UnmarshalFirst() and DiagnoseFirst() allow trailing bytes. ``` -__IMPORTANT__: 👉 CBOR settings allow trade-offs between speed, security, encoding size, etc. - -- Different CBOR libraries may use different default settings. -- CBOR-based formats or protocols usually require specific settings. - -For example, WebAuthn uses "CTAP2 Canonical CBOR" which is available as a preset. +> [!IMPORTANT] +> CBOR settings allow trade-offs between speed, security, encoding size, etc. +> +> - Different CBOR libraries may use different default settings. +> - CBOR-based formats or protocols usually require specific settings. +> +> For example, WebAuthn uses "CTAP2 Canonical CBOR" which is available as a preset. ### Presets @@ -312,9 +351,9 @@ err = em.MarshalToBuffer(v, &buf) // encode v to provided buf ### Struct Tags -Struct tags (`toarray`, `keyasint`, `omitempty`) reduce encoded size of structs. +Struct tag options (`toarray`, `keyasint`, `omitempty`, `omitzero`) reduce encoded size of structs. -
Example encoding 3-level nested Go struct to 1 byte CBOR

+

🔎  Example encoding 3-level nested Go struct to 1 byte CBOR

https://go.dev/play/p/YxwvfPdFQG2 @@ -382,13 +421,13 @@ JSON: {"Foo":{"Qux":{}}}

-
Example using several struct tags

+

🔎  Example using struct tag options

![alt text](https://github.com/fxamacker/images/raw/master/cbor/v2.3.0/cbor_struct_tags_api.svg?sanitize=1 "CBOR API and Go Struct Tags")

-Struct tags simplify use of CBOR-based protocols that require CBOR arrays or maps with integer keys. +Struct tag options simplify use of CBOR-based protocols that require CBOR arrays or maps with integer keys. ### CBOR Tags @@ -404,7 +443,7 @@ em, err := opts.EncModeWithSharedTags(ts) // mutable shared CBOR tags `TagSet` and modes using it are safe for concurrent use. Equivalent API is available for `DecMode`. -
Example using TagSet and TagOptions

+

🔎  Example using TagSet and TagOptions

```go // Use signedCWT struct defined in "Decoding CWT" example. @@ -430,7 +469,7 @@ if err := dm.Unmarshal(data, &v); err != nil { em, _ := cbor.EncOptions{}.EncModeWithTags(tags) // Marshal signedCWT with tag number. -if data, err := cbor.Marshal(v); err != nil { +if data, err := em.Marshal(v); err != nil { return err } ``` @@ -439,7 +478,7 @@ if data, err := cbor.Marshal(v); err != nil { ### Functions and Interfaces -

Functions and interfaces at a glance

+

🔎  Functions and interfaces at a glance

Common functions with same API as `encoding/json`: - `Marshal`, `Unmarshal` @@ -472,11 +511,24 @@ Default limits may need to be increased for systems handling very large data (e. ## Status -v2.7.0 (June 23, 2024) adds features and improvements that help large projects (e.g. Kubernetes) use CBOR as an alternative to JSON and Protocol Buffers. Other improvements include speedups, improved memory use, bug fixes, new serialization options, etc. It passed fuzz tests (5+ billion executions) and is production quality. +v2.8.0 (March 30, 2025) is a small release primarily to add `omitzero` option to struct field tags and fix bugs. It passed fuzz tests (billions of executions) and is production quality. + +v2.8.0 and v2.7.1 fixes these 3 functions (when called directly by user apps) to use same error handling on bad inputs as `cbor.Unmarshal()`: +- `ByteString.UnmarshalCBOR()` +- `RawTag.UnmarshalCBOR()` +- `SimpleValue.UnmarshalCBOR()` + +The above 3 `UnmarshalCBOR()` functions were initially created for internal use and are deprecated now, so please use `Unmarshal()` or `UnmarshalFirst()` instead. To preserve backward compatibility, these deprecated functions were added to fuzz tests and will not be removed in v2. + +The minimum version of Go required to build: +- v2.8.0 requires go 1.20. +- v2.7.1 and older releases require go 1.17. For more details, see [release notes](https://github.com/fxamacker/cbor/releases). -### Prior Release +### Prior Releases + +v2.7.0 (June 23, 2024) adds features and improvements that help large projects (e.g. Kubernetes) use CBOR as an alternative to JSON and Protocol Buffers. Other improvements include speedups, improved memory use, bug fixes, new serialization options, etc. It passed fuzz tests (5+ billion executions) and is production quality. [v2.6.0](https://github.com/fxamacker/cbor/releases/tag/v2.6.0) (February 2024) adds important new features, optimizations, and bug fixes. It is especially useful to systems that need to convert data between CBOR and JSON. New options and optimizations improve handling of bignum, integers, maps, and strings. @@ -489,7 +541,7 @@ See [v2.5.0 release notes](https://github.com/fxamacker/cbor/releases/tag/v2.5.0 See ["Version and API Changes"](https://github.com/fxamacker/cbor#versions-and-api-changes) section for more info about version numbering, etc. 0 && (res = winLockFile(&pFile->h, SQLITE_LOCKFILE_FLAGS, - PENDING_BYTE, 0, 1, 0))==0 ){ + + /* Flags for the LockFileEx() call. This should be an exclusive lock if + ** this call is to obtain EXCLUSIVE, or a shared lock if this call is to + ** obtain SHARED. */ + int flags = LOCKFILE_FAIL_IMMEDIATELY; + if( locktype==EXCLUSIVE_LOCK ){ + flags |= LOCKFILE_EXCLUSIVE_LOCK; + } + while( cnt>0 ){ /* Try 3 times to get the pending lock. This is needed to work ** around problems caused by indexing and/or anti-virus software on ** Windows systems. + ** ** If you are using this code as a model for alternative VFSes, do not - ** copy this retry logic. It is a hack intended for Windows only. - */ + ** copy this retry logic. It is a hack intended for Windows only. */ + res = winLockFile(&pFile->h, flags, PENDING_BYTE, 0, 1, 0); + if( res ) break; + lastErrno = osGetLastError(); OSTRACE(("LOCK-PENDING-FAIL file=%p, count=%d, result=%d\n", - pFile->h, cnt, res)); + pFile->h, cnt, res + )); + if( lastErrno==ERROR_INVALID_HANDLE ){ pFile->lastErrno = lastErrno; rc = SQLITE_IOERR_LOCK; OSTRACE(("LOCK-FAIL file=%p, count=%d, rc=%s\n", - pFile->h, cnt, sqlite3ErrName(rc))); + pFile->h, cnt, sqlite3ErrName(rc) + )); return rc; } - if( cnt ) sqlite3_win32_sleep(1); + + cnt--; + if( cnt>0 ) sqlite3_win32_sleep(1); } gotPendingLock = res; - if( !res ){ - lastErrno = osGetLastError(); - } } /* Acquire a shared lock */ if( locktype==SHARED_LOCK && res ){ assert( pFile->locktype==NO_LOCK ); - res = winGetReadLock(pFile); +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT + res = winGetReadLock(pFile, pFile->bBlockOnConnect); +#else + res = winGetReadLock(pFile, 0); +#endif if( res ){ newLocktype = SHARED_LOCK; }else{ @@ -50221,7 +50772,7 @@ static int winLock(sqlite3_file *id, int locktype){ newLocktype = EXCLUSIVE_LOCK; }else{ lastErrno = osGetLastError(); - winGetReadLock(pFile); + winGetReadLock(pFile, 0); } } @@ -50301,7 +50852,7 @@ static int winUnlock(sqlite3_file *id, int locktype){ type = pFile->locktype; if( type>=EXCLUSIVE_LOCK ){ winUnlockFile(&pFile->h, SHARED_FIRST, 0, SHARED_SIZE, 0); - if( locktype==SHARED_LOCK && !winGetReadLock(pFile) ){ + if( locktype==SHARED_LOCK && !winGetReadLock(pFile, 0) ){ /* This should never happen. We should always be able to ** reacquire the read lock */ rc = winLogError(SQLITE_IOERR_UNLOCK, osGetLastError(), @@ -50511,6 +51062,28 @@ static int winFileControl(sqlite3_file *id, int op, void *pArg){ return rc; } #endif + +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT + case SQLITE_FCNTL_LOCK_TIMEOUT: { + int iOld = pFile->iBusyTimeout; + int iNew = *(int*)pArg; +#if SQLITE_ENABLE_SETLK_TIMEOUT==1 + pFile->iBusyTimeout = (iNew < 0) ? INFINITE : (DWORD)iNew; +#elif SQLITE_ENABLE_SETLK_TIMEOUT==2 + pFile->iBusyTimeout = (DWORD)(!!iNew); +#else +# error "SQLITE_ENABLE_SETLK_TIMEOUT must be set to 1 or 2" +#endif + *(int*)pArg = iOld; + return SQLITE_OK; + } + case SQLITE_FCNTL_BLOCK_ON_CONNECT: { + int iNew = *(int*)pArg; + pFile->bBlockOnConnect = iNew; + return SQLITE_OK; + } +#endif /* SQLITE_ENABLE_SETLK_TIMEOUT */ + } OSTRACE(("FCNTL file=%p, rc=SQLITE_NOTFOUND\n", pFile->h)); return SQLITE_NOTFOUND; @@ -50591,23 +51164,27 @@ static int winShmMutexHeld(void) { ** ** The following fields are read-only after the object is created: ** -** fid ** zFilename ** ** Either winShmNode.mutex must be held or winShmNode.nRef==0 and ** winShmMutexHeld() is true when reading or writing any other field ** in this structure. ** +** File-handle hSharedShm is used to (a) take the DMS lock, (b) truncate +** the *-shm file if the DMS-locking protocol demands it, and (c) map +** regions of the *-shm file into memory using MapViewOfFile() or +** similar. Other locks are taken by individual clients using the +** winShm.hShm handles. */ struct winShmNode { sqlite3_mutex *mutex; /* Mutex to access this object */ char *zFilename; /* Name of the file */ - winFile hFile; /* File handle from winOpen */ + HANDLE hSharedShm; /* File handle open on zFilename */ + int isUnlocked; /* DMS lock has not yet been obtained */ + int isReadonly; /* True if read-only */ int szRegion; /* Size of shared-memory regions */ int nRegion; /* Size of array apRegion */ - u8 isReadonly; /* True if read-only */ - u8 isUnlocked; /* True if no DMS lock held */ struct ShmRegion { HANDLE hMap; /* File handle from CreateFileMapping */ @@ -50616,7 +51193,6 @@ struct winShmNode { DWORD lastErrno; /* The Windows errno from the last I/O error */ int nRef; /* Number of winShm objects pointing to this */ - winShm *pFirst; /* All winShm objects pointing to this */ winShmNode *pNext; /* Next in list of all winShmNode objects */ #if defined(SQLITE_DEBUG) || defined(SQLITE_HAVE_OS_TRACE) u8 nextShmId; /* Next available winShm.id value */ @@ -50632,23 +51208,15 @@ static winShmNode *winShmNodeList = 0; /* ** Structure used internally by this VFS to record the state of an -** open shared memory connection. -** -** The following fields are initialized when this object is created and -** are read-only thereafter: -** -** winShm.pShmNode -** winShm.id -** -** All other fields are read/write. The winShm.pShmNode->mutex must be held -** while accessing any read/write fields. +** open shared memory connection. There is one such structure for each +** winFile open on a wal mode database. */ struct winShm { winShmNode *pShmNode; /* The underlying winShmNode object */ - winShm *pNext; /* Next winShm with the same winShmNode */ - u8 hasMutex; /* True if holding the winShmNode mutex */ u16 sharedMask; /* Mask of shared locks held */ u16 exclMask; /* Mask of exclusive locks held */ + HANDLE hShm; /* File-handle on *-shm file. For locking. */ + int bReadonly; /* True if hShm is opened read-only */ #if defined(SQLITE_DEBUG) || defined(SQLITE_HAVE_OS_TRACE) u8 id; /* Id of this connection with its winShmNode */ #endif @@ -50660,50 +51228,6 @@ struct winShm { #define WIN_SHM_BASE ((22+SQLITE_SHM_NLOCK)*4) /* first lock byte */ #define WIN_SHM_DMS (WIN_SHM_BASE+SQLITE_SHM_NLOCK) /* deadman switch */ -/* -** Apply advisory locks for all n bytes beginning at ofst. -*/ -#define WINSHM_UNLCK 1 -#define WINSHM_RDLCK 2 -#define WINSHM_WRLCK 3 -static int winShmSystemLock( - winShmNode *pFile, /* Apply locks to this open shared-memory segment */ - int lockType, /* WINSHM_UNLCK, WINSHM_RDLCK, or WINSHM_WRLCK */ - int ofst, /* Offset to first byte to be locked/unlocked */ - int nByte /* Number of bytes to lock or unlock */ -){ - int rc = 0; /* Result code form Lock/UnlockFileEx() */ - - /* Access to the winShmNode object is serialized by the caller */ - assert( pFile->nRef==0 || sqlite3_mutex_held(pFile->mutex) ); - - OSTRACE(("SHM-LOCK file=%p, lock=%d, offset=%d, size=%d\n", - pFile->hFile.h, lockType, ofst, nByte)); - - /* Release/Acquire the system-level lock */ - if( lockType==WINSHM_UNLCK ){ - rc = winUnlockFile(&pFile->hFile.h, ofst, 0, nByte, 0); - }else{ - /* Initialize the locking parameters */ - DWORD dwFlags = LOCKFILE_FAIL_IMMEDIATELY; - if( lockType == WINSHM_WRLCK ) dwFlags |= LOCKFILE_EXCLUSIVE_LOCK; - rc = winLockFile(&pFile->hFile.h, dwFlags, ofst, 0, nByte, 0); - } - - if( rc!= 0 ){ - rc = SQLITE_OK; - }else{ - pFile->lastErrno = osGetLastError(); - rc = SQLITE_BUSY; - } - - OSTRACE(("SHM-LOCK file=%p, func=%s, errno=%lu, rc=%s\n", - pFile->hFile.h, (lockType == WINSHM_UNLCK) ? "winUnlockFile" : - "winLockFile", pFile->lastErrno, sqlite3ErrName(rc))); - - return rc; -} - /* Forward references to VFS methods */ static int winOpen(sqlite3_vfs*,const char*,sqlite3_file*,int,int*); static int winDelete(sqlite3_vfs *,const char*,int); @@ -50735,11 +51259,7 @@ static void winShmPurge(sqlite3_vfs *pVfs, int deleteFlag){ osGetCurrentProcessId(), i, bRc ? "ok" : "failed")); UNUSED_VARIABLE_VALUE(bRc); } - if( p->hFile.h!=NULL && p->hFile.h!=INVALID_HANDLE_VALUE ){ - SimulateIOErrorBenign(1); - winClose((sqlite3_file *)&p->hFile); - SimulateIOErrorBenign(0); - } + winHandleClose(p->hSharedShm); if( deleteFlag ){ SimulateIOErrorBenign(1); sqlite3BeginBenignMalloc(); @@ -50757,42 +51277,239 @@ static void winShmPurge(sqlite3_vfs *pVfs, int deleteFlag){ } /* -** The DMS lock has not yet been taken on shm file pShmNode. Attempt to -** take it now. Return SQLITE_OK if successful, or an SQLite error -** code otherwise. -** -** If the DMS cannot be locked because this is a readonly_shm=1 -** connection and no other process already holds a lock, return -** SQLITE_READONLY_CANTINIT and set pShmNode->isUnlocked=1. +** The DMS lock has not yet been taken on the shm file associated with +** pShmNode. Take the lock. Truncate the *-shm file if required. +** Return SQLITE_OK if successful, or an SQLite error code otherwise. */ -static int winLockSharedMemory(winShmNode *pShmNode){ - int rc = winShmSystemLock(pShmNode, WINSHM_WRLCK, WIN_SHM_DMS, 1); +static int winLockSharedMemory(winShmNode *pShmNode, DWORD nMs){ + HANDLE h = pShmNode->hSharedShm; + int rc = SQLITE_OK; + assert( sqlite3_mutex_held(pShmNode->mutex) ); + rc = winHandleLockTimeout(h, WIN_SHM_DMS, 1, 1, 0); if( rc==SQLITE_OK ){ + /* We have an EXCLUSIVE lock on the DMS byte. This means that this + ** is the first process to open the file. Truncate it to zero bytes + ** in this case. */ if( pShmNode->isReadonly ){ - pShmNode->isUnlocked = 1; - winShmSystemLock(pShmNode, WINSHM_UNLCK, WIN_SHM_DMS, 1); - return SQLITE_READONLY_CANTINIT; - }else if( winTruncate((sqlite3_file*)&pShmNode->hFile, 0) ){ - winShmSystemLock(pShmNode, WINSHM_UNLCK, WIN_SHM_DMS, 1); - return winLogError(SQLITE_IOERR_SHMOPEN, osGetLastError(), - "winLockSharedMemory", pShmNode->zFilename); + rc = SQLITE_READONLY_CANTINIT; + }else{ + rc = winHandleTruncate(h, 0); } + + /* Release the EXCLUSIVE lock acquired above. */ + winUnlockFile(&h, WIN_SHM_DMS, 0, 1, 0); + }else if( (rc & 0xFF)==SQLITE_BUSY ){ + rc = SQLITE_OK; } if( rc==SQLITE_OK ){ - winShmSystemLock(pShmNode, WINSHM_UNLCK, WIN_SHM_DMS, 1); + /* Take a SHARED lock on the DMS byte. */ + rc = winHandleLockTimeout(h, WIN_SHM_DMS, 1, 0, nMs); + if( rc==SQLITE_OK ){ + pShmNode->isUnlocked = 0; + } } - return winShmSystemLock(pShmNode, WINSHM_RDLCK, WIN_SHM_DMS, 1); + return rc; } + /* -** Open the shared-memory area associated with database file pDbFd. +** Convert a UTF-8 filename into whatever form the underlying +** operating system wants filenames in. Space to hold the result +** is obtained from malloc and must be freed by the calling +** function +** +** On Cygwin, 3 possible input forms are accepted: +** - If the filename starts with ":/" or ":\", +** it is converted to UTF-16 as-is. +** - If the filename contains '/', it is assumed to be a +** Cygwin absolute path, it is converted to a win32 +** absolute path in UTF-16. +** - Otherwise it must be a filename only, the win32 filename +** is returned in UTF-16. +** Note: If the function cygwin_conv_path() fails, only +** UTF-8 -> UTF-16 conversion will be done. This can only +** happen when the file path >32k, in which case winUtf8ToUnicode() +** will fail too. +*/ +static void *winConvertFromUtf8Filename(const char *zFilename){ + void *zConverted = 0; + if( osIsNT() ){ +#ifdef __CYGWIN__ + int nChar; + LPWSTR zWideFilename; + + if( osCygwin_conv_path && !(winIsDriveLetterAndColon(zFilename) + && winIsDirSep(zFilename[2])) ){ + i64 nByte; + int convertflag = CCP_POSIX_TO_WIN_W; + if( !strchr(zFilename, '/') ) convertflag |= CCP_RELATIVE; + nByte = (i64)osCygwin_conv_path(convertflag, + zFilename, 0, 0); + if( nByte>0 ){ + zConverted = sqlite3MallocZero(12+(u64)nByte); + if ( zConverted==0 ){ + return zConverted; + } + zWideFilename = zConverted; + /* Filenames should be prefixed, except when converted + * full path already starts with "\\?\". */ + if( osCygwin_conv_path(convertflag, zFilename, + zWideFilename+4, nByte)==0 ){ + if( (convertflag&CCP_RELATIVE) ){ + memmove(zWideFilename, zWideFilename+4, nByte); + }else if( memcmp(zWideFilename+4, L"\\\\", 4) ){ + memcpy(zWideFilename, L"\\\\?\\", 8); + }else if( zWideFilename[6]!='?' ){ + memmove(zWideFilename+6, zWideFilename+4, nByte); + memcpy(zWideFilename, L"\\\\?\\UNC", 14); + }else{ + memmove(zWideFilename, zWideFilename+4, nByte); + } + return zConverted; + } + sqlite3_free(zConverted); + } + } + nChar = osMultiByteToWideChar(CP_UTF8, 0, zFilename, -1, NULL, 0); + if( nChar==0 ){ + return 0; + } + zWideFilename = sqlite3MallocZero( nChar*sizeof(WCHAR)+12 ); + if( zWideFilename==0 ){ + return 0; + } + nChar = osMultiByteToWideChar(CP_UTF8, 0, zFilename, -1, + zWideFilename, nChar); + if( nChar==0 ){ + sqlite3_free(zWideFilename); + zWideFilename = 0; + }else if( nChar>MAX_PATH + && winIsDriveLetterAndColon(zFilename) + && winIsDirSep(zFilename[2]) ){ + memmove(zWideFilename+4, zWideFilename, nChar*sizeof(WCHAR)); + zWideFilename[2] = '\\'; + memcpy(zWideFilename, L"\\\\?\\", 8); + }else if( nChar>MAX_PATH + && winIsDirSep(zFilename[0]) && winIsDirSep(zFilename[1]) + && zFilename[2] != '?' ){ + memmove(zWideFilename+6, zWideFilename, nChar*sizeof(WCHAR)); + memcpy(zWideFilename, L"\\\\?\\UNC", 14); + } + zConverted = zWideFilename; +#else + zConverted = winUtf8ToUnicode(zFilename); +#endif /* __CYGWIN__ */ + } +#if defined(SQLITE_WIN32_HAS_ANSI) && defined(_WIN32) + else{ + zConverted = winUtf8ToMbcs(zFilename, osAreFileApisANSI()); + } +#endif + /* caller will handle out of memory */ + return zConverted; +} + +/* +** This function is used to open a handle on a *-shm file. ** -** When opening a new shared-memory file, if no other instances of that -** file are currently open, in this process or in other processes, then -** the file must be truncated to zero length or have its header cleared. +** If SQLITE_ENABLE_SETLK_TIMEOUT is defined at build time, then the file +** is opened with FILE_FLAG_OVERLAPPED specified. If not, it is not. +*/ +static int winHandleOpen( + const char *zUtf8, /* File to open */ + int *pbReadonly, /* IN/OUT: True for readonly handle */ + HANDLE *ph /* OUT: New HANDLE for file */ +){ + int rc = SQLITE_OK; + void *zConverted = 0; + int bReadonly = *pbReadonly; + HANDLE h = INVALID_HANDLE_VALUE; + +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT + const DWORD flag_overlapped = FILE_FLAG_OVERLAPPED; +#else + const DWORD flag_overlapped = 0; +#endif + + /* Convert the filename to the system encoding. */ + zConverted = winConvertFromUtf8Filename(zUtf8); + if( zConverted==0 ){ + OSTRACE(("OPEN name=%s, rc=SQLITE_IOERR_NOMEM", zUtf8)); + rc = SQLITE_IOERR_NOMEM_BKPT; + goto winopenfile_out; + } + + /* Ensure the file we are trying to open is not actually a directory. */ + if( winIsDir(zConverted) ){ + OSTRACE(("OPEN name=%s, rc=SQLITE_CANTOPEN_ISDIR", zUtf8)); + rc = SQLITE_CANTOPEN_ISDIR; + goto winopenfile_out; + } + + /* TODO: platforms. + ** TODO: retry-on-ioerr. + */ + if( osIsNT() ){ +#if SQLITE_OS_WINRT + CREATEFILE2_EXTENDED_PARAMETERS extendedParameters; + memset(&extendedParameters, 0, sizeof(extendedParameters)); + extendedParameters.dwSize = sizeof(extendedParameters); + extendedParameters.dwFileAttributes = FILE_ATTRIBUTE_NORMAL; + extendedParameters.dwFileFlags = flag_overlapped; + extendedParameters.dwSecurityQosFlags = SECURITY_ANONYMOUS; + h = osCreateFile2((LPCWSTR)zConverted, + (GENERIC_READ | (bReadonly ? 0 : GENERIC_WRITE)),/* dwDesiredAccess */ + FILE_SHARE_READ | FILE_SHARE_WRITE, /* dwShareMode */ + OPEN_ALWAYS, /* dwCreationDisposition */ + &extendedParameters + ); +#else + h = osCreateFileW((LPCWSTR)zConverted, /* lpFileName */ + (GENERIC_READ | (bReadonly ? 0 : GENERIC_WRITE)), /* dwDesiredAccess */ + FILE_SHARE_READ | FILE_SHARE_WRITE, /* dwShareMode */ + NULL, /* lpSecurityAttributes */ + OPEN_ALWAYS, /* dwCreationDisposition */ + FILE_ATTRIBUTE_NORMAL|flag_overlapped, + NULL + ); +#endif + }else{ + /* Due to pre-processor directives earlier in this file, + ** SQLITE_WIN32_HAS_ANSI is always defined if osIsNT() is false. */ +#ifdef SQLITE_WIN32_HAS_ANSI + h = osCreateFileA((LPCSTR)zConverted, + (GENERIC_READ | (bReadonly ? 0 : GENERIC_WRITE)), /* dwDesiredAccess */ + FILE_SHARE_READ | FILE_SHARE_WRITE, /* dwShareMode */ + NULL, /* lpSecurityAttributes */ + OPEN_ALWAYS, /* dwCreationDisposition */ + FILE_ATTRIBUTE_NORMAL|flag_overlapped, + NULL + ); +#endif + } + + if( h==INVALID_HANDLE_VALUE ){ + if( bReadonly==0 ){ + bReadonly = 1; + rc = winHandleOpen(zUtf8, &bReadonly, &h); + }else{ + rc = SQLITE_CANTOPEN_BKPT; + } + } + + winopenfile_out: + sqlite3_free(zConverted); + *pbReadonly = bReadonly; + *ph = h; + return rc; +} + + +/* +** Open the shared-memory area associated with database file pDbFd. */ static int winOpenSharedMemory(winFile *pDbFd){ struct winShm *p; /* The connection to be opened */ @@ -50804,98 +51521,83 @@ static int winOpenSharedMemory(winFile *pDbFd){ assert( pDbFd->pShm==0 ); /* Not previously opened */ /* Allocate space for the new sqlite3_shm object. Also speculatively - ** allocate space for a new winShmNode and filename. - */ + ** allocate space for a new winShmNode and filename. */ p = sqlite3MallocZero( sizeof(*p) ); if( p==0 ) return SQLITE_IOERR_NOMEM_BKPT; nName = sqlite3Strlen30(pDbFd->zPath); - pNew = sqlite3MallocZero( sizeof(*pShmNode) + nName + 17 ); + pNew = sqlite3MallocZero( sizeof(*pShmNode) + (i64)nName + 17 ); if( pNew==0 ){ sqlite3_free(p); return SQLITE_IOERR_NOMEM_BKPT; } pNew->zFilename = (char*)&pNew[1]; + pNew->hSharedShm = INVALID_HANDLE_VALUE; + pNew->isUnlocked = 1; sqlite3_snprintf(nName+15, pNew->zFilename, "%s-shm", pDbFd->zPath); sqlite3FileSuffix3(pDbFd->zPath, pNew->zFilename); + /* Open a file-handle on the *-shm file for this connection. This file-handle + ** is only used for locking. The mapping of the *-shm file is created using + ** the shared file handle in winShmNode.hSharedShm. */ + p->bReadonly = sqlite3_uri_boolean(pDbFd->zPath, "readonly_shm", 0); + rc = winHandleOpen(pNew->zFilename, &p->bReadonly, &p->hShm); + /* Look to see if there is an existing winShmNode that can be used. - ** If no matching winShmNode currently exists, create a new one. - */ + ** If no matching winShmNode currently exists, then create a new one. */ winShmEnterMutex(); for(pShmNode = winShmNodeList; pShmNode; pShmNode=pShmNode->pNext){ /* TBD need to come up with better match here. Perhaps - ** use FILE_ID_BOTH_DIR_INFO Structure. - */ + ** use FILE_ID_BOTH_DIR_INFO Structure. */ if( sqlite3StrICmp(pShmNode->zFilename, pNew->zFilename)==0 ) break; } - if( pShmNode ){ - sqlite3_free(pNew); - }else{ - int inFlags = SQLITE_OPEN_WAL; - int outFlags = 0; - + if( pShmNode==0 ){ pShmNode = pNew; - pNew = 0; - ((winFile*)(&pShmNode->hFile))->h = INVALID_HANDLE_VALUE; - pShmNode->pNext = winShmNodeList; - winShmNodeList = pShmNode; + /* Allocate a mutex for this winShmNode object, if one is required. */ if( sqlite3GlobalConfig.bCoreMutex ){ pShmNode->mutex = sqlite3_mutex_alloc(SQLITE_MUTEX_FAST); - if( pShmNode->mutex==0 ){ - rc = SQLITE_IOERR_NOMEM_BKPT; - goto shm_open_err; - } + if( pShmNode->mutex==0 ) rc = SQLITE_IOERR_NOMEM_BKPT; } - if( 0==sqlite3_uri_boolean(pDbFd->zPath, "readonly_shm", 0) ){ - inFlags |= SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE; - }else{ - inFlags |= SQLITE_OPEN_READONLY; - } - rc = winOpen(pDbFd->pVfs, pShmNode->zFilename, - (sqlite3_file*)&pShmNode->hFile, - inFlags, &outFlags); - if( rc!=SQLITE_OK ){ - rc = winLogError(rc, osGetLastError(), "winOpenShm", - pShmNode->zFilename); - goto shm_open_err; + /* Open a file-handle to use for mappings, and for the DMS lock. */ + if( rc==SQLITE_OK ){ + HANDLE h = INVALID_HANDLE_VALUE; + pShmNode->isReadonly = p->bReadonly; + rc = winHandleOpen(pNew->zFilename, &pShmNode->isReadonly, &h); + pShmNode->hSharedShm = h; } - if( outFlags==SQLITE_OPEN_READONLY ) pShmNode->isReadonly = 1; - rc = winLockSharedMemory(pShmNode); - if( rc!=SQLITE_OK && rc!=SQLITE_READONLY_CANTINIT ) goto shm_open_err; + /* If successful, link the new winShmNode into the global list. If an + ** error occurred, free the object. */ + if( rc==SQLITE_OK ){ + pShmNode->pNext = winShmNodeList; + winShmNodeList = pShmNode; + pNew = 0; + }else{ + sqlite3_mutex_free(pShmNode->mutex); + if( pShmNode->hSharedShm!=INVALID_HANDLE_VALUE ){ + osCloseHandle(pShmNode->hSharedShm); + } + } } - /* Make the new connection a child of the winShmNode */ - p->pShmNode = pShmNode; + /* If no error has occurred, link the winShm object to the winShmNode and + ** the winShm to pDbFd. */ + if( rc==SQLITE_OK ){ + p->pShmNode = pShmNode; + pShmNode->nRef++; #if defined(SQLITE_DEBUG) || defined(SQLITE_HAVE_OS_TRACE) - p->id = pShmNode->nextShmId++; + p->id = pShmNode->nextShmId++; #endif - pShmNode->nRef++; - pDbFd->pShm = p; - winShmLeaveMutex(); - - /* The reference count on pShmNode has already been incremented under - ** the cover of the winShmEnterMutex() mutex and the pointer from the - ** new (struct winShm) object to the pShmNode has been set. All that is - ** left to do is to link the new object into the linked list starting - ** at pShmNode->pFirst. This must be done while holding the pShmNode->mutex - ** mutex. - */ - sqlite3_mutex_enter(pShmNode->mutex); - p->pNext = pShmNode->pFirst; - pShmNode->pFirst = p; - sqlite3_mutex_leave(pShmNode->mutex); - return rc; + pDbFd->pShm = p; + }else if( p ){ + winHandleClose(p->hShm); + sqlite3_free(p); + } - /* Jump here on any error */ -shm_open_err: - winShmSystemLock(pShmNode, WINSHM_UNLCK, WIN_SHM_DMS, 1); - winShmPurge(pDbFd->pVfs, 0); /* This call frees pShmNode if required */ - sqlite3_free(p); - sqlite3_free(pNew); + assert( rc!=SQLITE_OK || pShmNode->isUnlocked==0 || pShmNode->nRegion==0 ); winShmLeaveMutex(); + sqlite3_free(pNew); return rc; } @@ -50910,27 +51612,19 @@ static int winShmUnmap( winFile *pDbFd; /* Database holding shared-memory */ winShm *p; /* The connection to be closed */ winShmNode *pShmNode; /* The underlying shared-memory file */ - winShm **pp; /* For looping over sibling connections */ pDbFd = (winFile*)fd; p = pDbFd->pShm; if( p==0 ) return SQLITE_OK; - pShmNode = p->pShmNode; - - /* Remove connection p from the set of connections associated - ** with pShmNode */ - sqlite3_mutex_enter(pShmNode->mutex); - for(pp=&pShmNode->pFirst; (*pp)!=p; pp = &(*pp)->pNext){} - *pp = p->pNext; + if( p->hShm!=INVALID_HANDLE_VALUE ){ + osCloseHandle(p->hShm); + } - /* Free the connection p */ - sqlite3_free(p); - pDbFd->pShm = 0; - sqlite3_mutex_leave(pShmNode->mutex); + pShmNode = p->pShmNode; + winShmEnterMutex(); /* If pShmNode->nRef has reached 0, then close the underlying - ** shared-memory file, too */ - winShmEnterMutex(); + ** shared-memory file, too. */ assert( pShmNode->nRef>0 ); pShmNode->nRef--; if( pShmNode->nRef==0 ){ @@ -50938,6 +51632,9 @@ static int winShmUnmap( } winShmLeaveMutex(); + /* Free the connection p */ + sqlite3_free(p); + pDbFd->pShm = 0; return SQLITE_OK; } @@ -50952,10 +51649,9 @@ static int winShmLock( ){ winFile *pDbFd = (winFile*)fd; /* Connection holding shared memory */ winShm *p = pDbFd->pShm; /* The shared memory being locked */ - winShm *pX; /* For looping over all siblings */ winShmNode *pShmNode; int rc = SQLITE_OK; /* Result code */ - u16 mask; /* Mask of locks to take or release */ + u16 mask = (u16)((1U<<(ofst+n)) - (1U<pShmNode; @@ -50969,85 +51665,81 @@ static int winShmLock( || flags==(SQLITE_SHM_UNLOCK | SQLITE_SHM_EXCLUSIVE) ); assert( n==1 || (flags & SQLITE_SHM_EXCLUSIVE)!=0 ); - mask = (u16)((1U<<(ofst+n)) - (1U<1 || mask==(1<mutex); - if( flags & SQLITE_SHM_UNLOCK ){ - u16 allMask = 0; /* Mask of locks held by siblings */ - - /* See if any siblings hold this same lock */ - for(pX=pShmNode->pFirst; pX; pX=pX->pNext){ - if( pX==p ) continue; - assert( (pX->exclMask & (p->exclMask|p->sharedMask))==0 ); - allMask |= pX->sharedMask; - } + /* Check that, if this to be a blocking lock, no locks that occur later + ** in the following list than the lock being obtained are already held: + ** + ** 1. Recovery lock (ofst==2). + ** 2. Checkpointer lock (ofst==1). + ** 3. Write lock (ofst==0). + ** 4. Read locks (ofst>=3 && ofstexclMask|p->sharedMask); + assert( (flags & SQLITE_SHM_UNLOCK) || pDbFd->iBusyTimeout==0 || ( + (ofst!=2 || lockMask==0) + && (ofst!=1 || lockMask==0 || lockMask==2) + && (ofst!=0 || lockMask<3) + && (ofst<3 || lockMask<(1<exclMask & mask) + ); + if( ((flags & SQLITE_SHM_UNLOCK) && ((p->exclMask|p->sharedMask) & mask)) + || (flags==(SQLITE_SHM_SHARED|SQLITE_SHM_LOCK) && 0==(p->sharedMask & mask)) + || (flags==(SQLITE_SHM_EXCLUSIVE|SQLITE_SHM_LOCK)) + ){ - /* Undo the local locks */ - if( rc==SQLITE_OK ){ - p->exclMask &= ~mask; - p->sharedMask &= ~mask; - } - }else if( flags & SQLITE_SHM_SHARED ){ - u16 allShared = 0; /* Union of locks held by connections other than "p" */ + if( flags & SQLITE_SHM_UNLOCK ){ + /* Case (a) - unlock. */ - /* Find out which shared locks are already held by sibling connections. - ** If any sibling already holds an exclusive lock, go ahead and return - ** SQLITE_BUSY. - */ - for(pX=pShmNode->pFirst; pX; pX=pX->pNext){ - if( (pX->exclMask & mask)!=0 ){ - rc = SQLITE_BUSY; - break; - } - allShared |= pX->sharedMask; - } + assert( (p->exclMask & p->sharedMask)==0 ); + assert( !(flags & SQLITE_SHM_EXCLUSIVE) || (p->exclMask & mask)==mask ); + assert( !(flags & SQLITE_SHM_SHARED) || (p->sharedMask & mask)==mask ); - /* Get shared locks at the system level, if necessary */ - if( rc==SQLITE_OK ){ - if( (allShared & mask)==0 ){ - rc = winShmSystemLock(pShmNode, WINSHM_RDLCK, ofst+WIN_SHM_BASE, n); - }else{ - rc = SQLITE_OK; - } - } + rc = winHandleUnlock(p->hShm, ofst+WIN_SHM_BASE, n); - /* Get the local shared locks */ - if( rc==SQLITE_OK ){ - p->sharedMask |= mask; - } - }else{ - /* Make sure no sibling connections hold locks that will block this - ** lock. If any do, return SQLITE_BUSY right away. - */ - for(pX=pShmNode->pFirst; pX; pX=pX->pNext){ - if( (pX->exclMask & mask)!=0 || (pX->sharedMask & mask)!=0 ){ - rc = SQLITE_BUSY; - break; + /* If successful, also clear the bits in sharedMask/exclMask */ + if( rc==SQLITE_OK ){ + p->exclMask = (p->exclMask & ~mask); + p->sharedMask = (p->sharedMask & ~mask); } - } - - /* Get the exclusive locks at the system level. Then if successful - ** also mark the local connection as being locked. - */ - if( rc==SQLITE_OK ){ - rc = winShmSystemLock(pShmNode, WINSHM_WRLCK, ofst+WIN_SHM_BASE, n); + }else{ + int bExcl = ((flags & SQLITE_SHM_EXCLUSIVE) ? 1 : 0); + DWORD nMs = winFileBusyTimeout(pDbFd); + rc = winHandleLockTimeout(p->hShm, ofst+WIN_SHM_BASE, n, bExcl, nMs); if( rc==SQLITE_OK ){ - assert( (p->sharedMask & mask)==0 ); - p->exclMask |= mask; + if( bExcl ){ + p->exclMask = (p->exclMask | mask); + }else{ + p->sharedMask = (p->sharedMask | mask); + } } } } - sqlite3_mutex_leave(pShmNode->mutex); - OSTRACE(("SHM-LOCK pid=%lu, id=%d, sharedMask=%03x, exclMask=%03x, rc=%s\n", - osGetCurrentProcessId(), p->id, p->sharedMask, p->exclMask, - sqlite3ErrName(rc))); + + OSTRACE(( + "SHM-LOCK(%d,%d,%d) pid=%lu, id=%d, sharedMask=%03x, exclMask=%03x," + " rc=%s\n", + ofst, n, flags, + osGetCurrentProcessId(), p->id, p->sharedMask, p->exclMask, + sqlite3ErrName(rc)) + ); return rc; } @@ -51109,13 +51801,15 @@ static int winShmMap( sqlite3_mutex_enter(pShmNode->mutex); if( pShmNode->isUnlocked ){ - rc = winLockSharedMemory(pShmNode); + /* Take the DMS lock. */ + assert( pShmNode->nRegion==0 ); + rc = winLockSharedMemory(pShmNode, winFileBusyTimeout(pDbFd)); if( rc!=SQLITE_OK ) goto shmpage_out; - pShmNode->isUnlocked = 0; } - assert( szRegion==pShmNode->szRegion || pShmNode->nRegion==0 ); + assert( szRegion==pShmNode->szRegion || pShmNode->nRegion==0 ); if( pShmNode->nRegion<=iRegion ){ + HANDLE hShared = pShmNode->hSharedShm; struct ShmRegion *apNew; /* New aRegion[] array */ int nByte = (iRegion+1)*szRegion; /* Minimum required file size */ sqlite3_int64 sz; /* Current size of wal-index file */ @@ -51126,10 +51820,9 @@ static int winShmMap( ** Check to see if it has been allocated (i.e. if the wal-index file is ** large enough to contain the requested region). */ - rc = winFileSize((sqlite3_file *)&pShmNode->hFile, &sz); + rc = winHandleSize(hShared, &sz); if( rc!=SQLITE_OK ){ - rc = winLogError(SQLITE_IOERR_SHMSIZE, osGetLastError(), - "winShmMap1", pDbFd->zPath); + rc = winLogError(rc, osGetLastError(), "winShmMap1", pDbFd->zPath); goto shmpage_out; } @@ -51138,19 +51831,17 @@ static int winShmMap( ** zero, exit early. *pp will be set to NULL and SQLITE_OK returned. ** ** Alternatively, if isWrite is non-zero, use ftruncate() to allocate - ** the requested memory region. - */ + ** the requested memory region. */ if( !isWrite ) goto shmpage_out; - rc = winTruncate((sqlite3_file *)&pShmNode->hFile, nByte); + rc = winHandleTruncate(hShared, nByte); if( rc!=SQLITE_OK ){ - rc = winLogError(SQLITE_IOERR_SHMSIZE, osGetLastError(), - "winShmMap2", pDbFd->zPath); + rc = winLogError(rc, osGetLastError(), "winShmMap2", pDbFd->zPath); goto shmpage_out; } } /* Map the requested memory region into this processes address space. */ - apNew = (struct ShmRegion *)sqlite3_realloc64( + apNew = (struct ShmRegion*)sqlite3_realloc64( pShmNode->aRegion, (iRegion+1)*sizeof(apNew[0]) ); if( !apNew ){ @@ -51169,18 +51860,13 @@ static int winShmMap( void *pMap = 0; /* Mapped memory region */ #if SQLITE_OS_WINRT - hMap = osCreateFileMappingFromApp(pShmNode->hFile.h, - NULL, protect, nByte, NULL - ); + hMap = osCreateFileMappingFromApp(hShared, NULL, protect, nByte, NULL); #elif defined(SQLITE_WIN32_HAS_WIDE) - hMap = osCreateFileMappingW(pShmNode->hFile.h, - NULL, protect, 0, nByte, NULL - ); + hMap = osCreateFileMappingW(hShared, NULL, protect, 0, nByte, NULL); #elif defined(SQLITE_WIN32_HAS_ANSI) && SQLITE_WIN32_CREATEFILEMAPPINGA - hMap = osCreateFileMappingA(pShmNode->hFile.h, - NULL, protect, 0, nByte, NULL - ); + hMap = osCreateFileMappingA(hShared, NULL, protect, 0, nByte, NULL); #endif + OSTRACE(("SHM-MAP-CREATE pid=%lu, region=%d, size=%d, rc=%s\n", osGetCurrentProcessId(), pShmNode->nRegion, nByte, hMap ? "ok" : "failed")); @@ -51223,7 +51909,9 @@ static int winShmMap( }else{ *pp = 0; } - if( pShmNode->isReadonly && rc==SQLITE_OK ) rc = SQLITE_READONLY; + if( pShmNode->isReadonly && rc==SQLITE_OK ){ + rc = SQLITE_READONLY; + } sqlite3_mutex_leave(pShmNode->mutex); return rc; } @@ -51543,47 +52231,6 @@ static winVfsAppData winNolockAppData = { ** sqlite3_vfs object. */ -#if defined(__CYGWIN__) -/* -** Convert a filename from whatever the underlying operating system -** supports for filenames into UTF-8. Space to hold the result is -** obtained from malloc and must be freed by the calling function. -*/ -static char *winConvertToUtf8Filename(const void *zFilename){ - char *zConverted = 0; - if( osIsNT() ){ - zConverted = winUnicodeToUtf8(zFilename); - } -#ifdef SQLITE_WIN32_HAS_ANSI - else{ - zConverted = winMbcsToUtf8(zFilename, osAreFileApisANSI()); - } -#endif - /* caller will handle out of memory */ - return zConverted; -} -#endif - -/* -** Convert a UTF-8 filename into whatever form the underlying -** operating system wants filenames in. Space to hold the result -** is obtained from malloc and must be freed by the calling -** function. -*/ -static void *winConvertFromUtf8Filename(const char *zFilename){ - void *zConverted = 0; - if( osIsNT() ){ - zConverted = winUtf8ToUnicode(zFilename); - } -#ifdef SQLITE_WIN32_HAS_ANSI - else{ - zConverted = winUtf8ToMbcs(zFilename, osAreFileApisANSI()); - } -#endif - /* caller will handle out of memory */ - return zConverted; -} - /* ** This function returns non-zero if the specified UTF-8 string buffer ** ends with a directory separator character or one was successfully @@ -51596,7 +52243,14 @@ static int winMakeEndInDirSep(int nBuf, char *zBuf){ if( winIsDirSep(zBuf[nLen-1]) ){ return 1; }else if( nLen+1mxPathname; nBuf = nMax + 2; + nMax = pVfs->mxPathname; + nBuf = 2 + (i64)nMax; zBuf = sqlite3MallocZero( nBuf ); if( !zBuf ){ OSTRACE(("TEMP-FILENAME rc=SQLITE_IOERR_NOMEM\n")); @@ -51673,7 +52328,7 @@ static int winGetTempname(sqlite3_vfs *pVfs, char **pzBuf){ } #if defined(__CYGWIN__) - else{ + else if( osGetenv!=NULL ){ static const char *azDirs[] = { 0, /* getenv("SQLITE_TMPDIR") */ 0, /* getenv("TMPDIR") */ @@ -51689,11 +52344,11 @@ static int winGetTempname(sqlite3_vfs *pVfs, char **pzBuf){ unsigned int i; const char *zDir = 0; - if( !azDirs[0] ) azDirs[0] = getenv("SQLITE_TMPDIR"); - if( !azDirs[1] ) azDirs[1] = getenv("TMPDIR"); - if( !azDirs[2] ) azDirs[2] = getenv("TMP"); - if( !azDirs[3] ) azDirs[3] = getenv("TEMP"); - if( !azDirs[4] ) azDirs[4] = getenv("USERPROFILE"); + if( !azDirs[0] ) azDirs[0] = osGetenv("SQLITE_TMPDIR"); + if( !azDirs[1] ) azDirs[1] = osGetenv("TMPDIR"); + if( !azDirs[2] ) azDirs[2] = osGetenv("TMP"); + if( !azDirs[3] ) azDirs[3] = osGetenv("TEMP"); + if( !azDirs[4] ) azDirs[4] = osGetenv("USERPROFILE"); for(i=0; inOut ){ + /* SQLite assumes that xFullPathname() nul-terminates the output buffer + ** even if it returns an error. */ + zOut[iOff] = '\0'; + return SQLITE_CANTOPEN_BKPT; + } + sqlite3_snprintf(nOut-iOff, &zOut[iOff], "%s", zPath); + return SQLITE_OK; +} +#endif /* __CYGWIN__ */ /* ** Turn a relative pathname into a full pathname. Write the full @@ -52476,8 +53180,8 @@ static int winFullPathnameNoMutex( int nFull, /* Size of output buffer in bytes */ char *zFull /* Output buffer */ ){ -#if !SQLITE_OS_WINCE && !SQLITE_OS_WINRT && !defined(__CYGWIN__) - DWORD nByte; +#if !SQLITE_OS_WINCE && !SQLITE_OS_WINRT + int nByte; void *zConverted; char *zOut; #endif @@ -52490,64 +53194,82 @@ static int winFullPathnameNoMutex( zRelative++; } -#if defined(__CYGWIN__) SimulateIOError( return SQLITE_ERROR ); - UNUSED_PARAMETER(nFull); - assert( nFull>=pVfs->mxPathname ); - if ( sqlite3_data_directory && !winIsVerbatimPathname(zRelative) ){ - /* - ** NOTE: We are dealing with a relative path name and the data - ** directory has been set. Therefore, use it as the basis - ** for converting the relative path name to an absolute - ** one by prepending the data directory and a slash. - */ - char *zOut = sqlite3MallocZero( pVfs->mxPathname+1 ); - if( !zOut ){ - return SQLITE_IOERR_NOMEM_BKPT; - } - if( cygwin_conv_path( - (osIsNT() ? CCP_POSIX_TO_WIN_W : CCP_POSIX_TO_WIN_A) | - CCP_RELATIVE, zRelative, zOut, pVfs->mxPathname+1)<0 ){ - sqlite3_free(zOut); - return winLogError(SQLITE_CANTOPEN_CONVPATH, (DWORD)errno, - "winFullPathname1", zRelative); - }else{ - char *zUtf8 = winConvertToUtf8Filename(zOut); - if( !zUtf8 ){ - sqlite3_free(zOut); - return SQLITE_IOERR_NOMEM_BKPT; - } - sqlite3_snprintf(MIN(nFull, pVfs->mxPathname), zFull, "%s%c%s", - sqlite3_data_directory, winGetDirSep(), zUtf8); - sqlite3_free(zUtf8); - sqlite3_free(zOut); - } - }else{ - char *zOut = sqlite3MallocZero( pVfs->mxPathname+1 ); - if( !zOut ){ - return SQLITE_IOERR_NOMEM_BKPT; - } - if( cygwin_conv_path( - (osIsNT() ? CCP_POSIX_TO_WIN_W : CCP_POSIX_TO_WIN_A), - zRelative, zOut, pVfs->mxPathname+1)<0 ){ - sqlite3_free(zOut); - return winLogError(SQLITE_CANTOPEN_CONVPATH, (DWORD)errno, - "winFullPathname2", zRelative); - }else{ - char *zUtf8 = winConvertToUtf8Filename(zOut); - if( !zUtf8 ){ - sqlite3_free(zOut); - return SQLITE_IOERR_NOMEM_BKPT; - } - sqlite3_snprintf(MIN(nFull, pVfs->mxPathname), zFull, "%s", zUtf8); - sqlite3_free(zUtf8); - sqlite3_free(zOut); + +#ifdef __CYGWIN__ + if( osGetcwd ){ + zFull[nFull-1] = '\0'; + if( !winIsDriveLetterAndColon(zRelative) || !winIsDirSep(zRelative[2]) ){ + int rc = SQLITE_OK; + int nLink = 1; /* Number of symbolic links followed so far */ + const char *zIn = zRelative; /* Input path for each iteration of loop */ + char *zDel = 0; + struct stat buf; + + UNUSED_PARAMETER(pVfs); + + do { + /* Call lstat() on path zIn. Set bLink to true if the path is a symbolic + ** link, or false otherwise. */ + int bLink = 0; + if( osLstat && osReadlink ) { + if( osLstat(zIn, &buf)!=0 ){ + int myErrno = osErrno; + if( myErrno!=ENOENT ){ + rc = winLogError(SQLITE_CANTOPEN_BKPT, (DWORD)myErrno, "lstat", zIn); + } + }else{ + bLink = ((buf.st_mode & 0170000) == 0120000); + } + + if( bLink ){ + if( zDel==0 ){ + zDel = sqlite3MallocZero(nFull); + if( zDel==0 ) rc = SQLITE_NOMEM; + }else if( ++nLink>SQLITE_MAX_SYMLINKS ){ + rc = SQLITE_CANTOPEN_BKPT; + } + + if( rc==SQLITE_OK ){ + nByte = osReadlink(zIn, zDel, nFull-1); + if( nByte ==(DWORD)-1 ){ + rc = winLogError(SQLITE_CANTOPEN_BKPT, (DWORD)osErrno, "readlink", zIn); + }else{ + if( zDel[0]!='/' ){ + int n; + for(n = sqlite3Strlen30(zIn); n>0 && zIn[n-1]!='/'; n--); + if( nByte+n+1>nFull ){ + rc = SQLITE_CANTOPEN_BKPT; + }else{ + memmove(&zDel[n], zDel, nByte+1); + memcpy(zDel, zIn, n); + nByte += n; + } + } + zDel[nByte] = '\0'; + } + } + + zIn = zDel; + } + } + + assert( rc!=SQLITE_OK || zIn!=zFull || zIn[0]=='/' ); + if( rc==SQLITE_OK && zIn!=zFull ){ + rc = mkFullPathname(zIn, zFull, nFull); + } + if( bLink==0 ) break; + zIn = zFull; + }while( rc==SQLITE_OK ); + + sqlite3_free(zDel); + winSimplifyName(zFull); + return rc; } } - return SQLITE_OK; -#endif +#endif /* __CYGWIN__ */ -#if (SQLITE_OS_WINCE || SQLITE_OS_WINRT) && !defined(__CYGWIN__) +#if (SQLITE_OS_WINCE || SQLITE_OS_WINRT) && defined(_WIN32) SimulateIOError( return SQLITE_ERROR ); /* WinCE has no concept of a relative pathname, or so I am told. */ /* WinRT has no way to convert a relative path to an absolute one. */ @@ -52566,7 +53288,8 @@ static int winFullPathnameNoMutex( return SQLITE_OK; #endif -#if !SQLITE_OS_WINCE && !SQLITE_OS_WINRT && !defined(__CYGWIN__) +#if !SQLITE_OS_WINCE && !SQLITE_OS_WINRT +#if defined(_WIN32) /* It's odd to simulate an io-error here, but really this is just ** using the io-error infrastructure to test that SQLite handles this ** function failing. This function could fail if, for example, the @@ -52584,6 +53307,7 @@ static int winFullPathnameNoMutex( sqlite3_data_directory, winGetDirSep(), zRelative); return SQLITE_OK; } +#endif zConverted = winConvertFromUtf8Filename(zRelative); if( zConverted==0 ){ return SQLITE_IOERR_NOMEM_BKPT; @@ -52622,13 +53346,12 @@ static int winFullPathnameNoMutex( return winLogError(SQLITE_CANTOPEN_FULLPATH, osGetLastError(), "winFullPathname3", zRelative); } - nByte += 3; - zTemp = sqlite3MallocZero( nByte*sizeof(zTemp[0]) ); + zTemp = sqlite3MallocZero( nByte*sizeof(zTemp[0]) + 3*sizeof(zTemp[0]) ); if( zTemp==0 ){ sqlite3_free(zConverted); return SQLITE_IOERR_NOMEM_BKPT; } - nByte = osGetFullPathNameA((char*)zConverted, nByte, zTemp, 0); + nByte = osGetFullPathNameA((char*)zConverted, nByte+3, zTemp, 0); if( nByte==0 ){ sqlite3_free(zConverted); sqlite3_free(zTemp); @@ -52641,7 +53364,26 @@ static int winFullPathnameNoMutex( } #endif if( zOut ){ +#ifdef __CYGWIN__ + if( memcmp(zOut, "\\\\?\\", 4) ){ + sqlite3_snprintf(MIN(nFull, pVfs->mxPathname), zFull, "%s", zOut); + }else if( memcmp(zOut+4, "UNC\\", 4) ){ + sqlite3_snprintf(MIN(nFull, pVfs->mxPathname), zFull, "%s", zOut+4); + }else{ + char *p = zOut+6; + *p = '\\'; + if( osGetcwd ){ + /* On Cygwin, UNC paths use forward slashes */ + while( *p ){ + if( *p=='\\' ) *p = '/'; + ++p; + } + } + sqlite3_snprintf(MIN(nFull, pVfs->mxPathname), zFull, "%s", zOut+6); + } +#else sqlite3_snprintf(MIN(nFull, pVfs->mxPathname), zFull, "%s", zOut); +#endif /* __CYGWIN__ */ sqlite3_free(zOut); return SQLITE_OK; }else{ @@ -52671,25 +53413,8 @@ static int winFullPathname( */ static void *winDlOpen(sqlite3_vfs *pVfs, const char *zFilename){ HANDLE h; -#if defined(__CYGWIN__) - int nFull = pVfs->mxPathname+1; - char *zFull = sqlite3MallocZero( nFull ); - void *zConverted = 0; - if( zFull==0 ){ - OSTRACE(("DLOPEN name=%s, handle=%p\n", zFilename, (void*)0)); - return 0; - } - if( winFullPathname(pVfs, zFilename, nFull, zFull)!=SQLITE_OK ){ - sqlite3_free(zFull); - OSTRACE(("DLOPEN name=%s, handle=%p\n", zFilename, (void*)0)); - return 0; - } - zConverted = winConvertFromUtf8Filename(zFull); - sqlite3_free(zFull); -#else void *zConverted = winConvertFromUtf8Filename(zFilename); UNUSED_PARAMETER(pVfs); -#endif if( zConverted==0 ){ OSTRACE(("DLOPEN name=%s, handle=%p\n", zFilename, (void*)0)); return 0; @@ -53038,7 +53763,7 @@ SQLITE_API int sqlite3_os_init(void){ /* Double-check that the aSyscall[] array has been constructed ** correctly. See ticket [bb3a86e890c8e96ab] */ - assert( ArraySize(aSyscall)==80 ); + assert( ArraySize(aSyscall)==89 ); /* get memory map allocation granularity */ memset(&winSysInfo, 0, sizeof(SYSTEM_INFO)); @@ -53657,13 +54382,13 @@ static int memdbOpen( } if( p==0 ){ MemStore **apNew; - p = sqlite3Malloc( sizeof(*p) + szName + 3 ); + p = sqlite3Malloc( sizeof(*p) + (i64)szName + 3 ); if( p==0 ){ sqlite3_mutex_leave(pVfsMutex); return SQLITE_NOMEM; } apNew = sqlite3Realloc(memdb_g.apMemStore, - sizeof(apNew[0])*(memdb_g.nMemStore+1) ); + sizeof(apNew[0])*(1+(i64)memdb_g.nMemStore) ); if( apNew==0 ){ sqlite3_free(p); sqlite3_mutex_leave(pVfsMutex); @@ -54096,7 +54821,7 @@ SQLITE_PRIVATE int sqlite3MemdbInit(void){ ** no fewer collisions than the no-op *1. */ #define BITVEC_HASH(X) (((X)*1)%BITVEC_NINT) -#define BITVEC_NPTR (BITVEC_USIZE/sizeof(Bitvec *)) +#define BITVEC_NPTR ((u32)(BITVEC_USIZE/sizeof(Bitvec *))) /* @@ -54245,7 +54970,9 @@ SQLITE_PRIVATE int sqlite3BitvecSet(Bitvec *p, u32 i){ }else{ memcpy(aiValues, p->u.aHash, sizeof(p->u.aHash)); memset(p->u.apSub, 0, sizeof(p->u.apSub)); - p->iDivisor = (p->iSize + BITVEC_NPTR - 1)/BITVEC_NPTR; + p->iDivisor = p->iSize/BITVEC_NPTR; + if( (p->iSize%BITVEC_NPTR)!=0 ) p->iDivisor++; + if( p->iDivisoriDivisor = BITVEC_NBIT; rc = sqlite3BitvecSet(p, i); for(j=0; jiSize<=BITVEC_NBIT ){ - p->u.aBitmap[i/BITVEC_SZELEM] &= ~(1 << (i&(BITVEC_SZELEM-1))); + p->u.aBitmap[i/BITVEC_SZELEM] &= ~(BITVEC_TELEM)(1<<(i&(BITVEC_SZELEM-1))); }else{ unsigned int j; u32 *aiValues = pBuf; @@ -54330,7 +55057,7 @@ SQLITE_PRIVATE u32 sqlite3BitvecSize(Bitvec *p){ ** individual bits within V. */ #define SETBIT(V,I) V[I>>3] |= (1<<(I&7)) -#define CLEARBIT(V,I) V[I>>3] &= ~(1<<(I&7)) +#define CLEARBIT(V,I) V[I>>3] &= ~(BITVEC_TELEM)(1<<(I&7)) #define TESTBIT(V,I) (V[I>>3]&(1<<(I&7)))!=0 /* @@ -54373,7 +55100,7 @@ SQLITE_PRIVATE int sqlite3BitvecBuiltinTest(int sz, int *aOp){ /* Allocate the Bitvec to be tested and a linear array of ** bits to act as the reference */ pBitvec = sqlite3BitvecCreate( sz ); - pV = sqlite3MallocZero( (sz+7)/8 + 1 ); + pV = sqlite3MallocZero( (7+(i64)sz)/8 + 1 ); pTmpSpace = sqlite3_malloc64(BITVEC_SZ); if( pBitvec==0 || pV==0 || pTmpSpace==0 ) goto bitvec_end; @@ -55614,10 +56341,6 @@ static SQLITE_WSD struct PCacheGlobal { sqlite3_mutex *mutex; /* Mutex for accessing the following: */ PgFreeslot *pFree; /* Free page blocks */ int nFreeSlot; /* Number of unused pcache slots */ - /* The following value requires a mutex to change. We skip the mutex on - ** reading because (1) most platforms read a 32-bit integer atomically and - ** (2) even if an incorrect value is read, no great harm is done since this - ** is really just an optimization. */ int bUnderPressure; /* True if low on PAGECACHE memory */ } pcache1_g; @@ -55665,7 +56388,7 @@ SQLITE_PRIVATE void sqlite3PCacheBufferSetup(void *pBuf, int sz, int n){ pcache1.nReserve = n>90 ? 10 : (n/10 + 1); pcache1.pStart = pBuf; pcache1.pFree = 0; - pcache1.bUnderPressure = 0; + AtomicStore(&pcache1.bUnderPressure,0); while( n-- ){ p = (PgFreeslot*)pBuf; p->pNext = pcache1.pFree; @@ -55733,7 +56456,7 @@ static void *pcache1Alloc(int nByte){ if( p ){ pcache1.pFree = pcache1.pFree->pNext; pcache1.nFreeSlot--; - pcache1.bUnderPressure = pcache1.nFreeSlot=0 ); sqlite3StatusHighwater(SQLITE_STATUS_PAGECACHE_SIZE, nByte); sqlite3StatusUp(SQLITE_STATUS_PAGECACHE_USED, 1); @@ -55772,7 +56495,7 @@ static void pcache1Free(void *p){ pSlot->pNext = pcache1.pFree; pcache1.pFree = pSlot; pcache1.nFreeSlot++; - pcache1.bUnderPressure = pcache1.nFreeSlotszPage+pCache->szExtra)<=pcache1.szSlot ){ - return pcache1.bUnderPressure; + return AtomicLoad(&pcache1.bUnderPressure); }else{ return sqlite3HeapNearlyFull(); } @@ -55920,12 +56643,12 @@ static int pcache1UnderMemoryPressure(PCache1 *pCache){ */ static void pcache1ResizeHash(PCache1 *p){ PgHdr1 **apNew; - unsigned int nNew; - unsigned int i; + u64 nNew; + u32 i; assert( sqlite3_mutex_held(p->pGroup->mutex) ); - nNew = p->nHash*2; + nNew = 2*(u64)p->nHash; if( nNew<256 ){ nNew = 256; } @@ -56148,7 +56871,7 @@ static void pcache1Destroy(sqlite3_pcache *p); static sqlite3_pcache *pcache1Create(int szPage, int szExtra, int bPurgeable){ PCache1 *pCache; /* The newly created page cache */ PGroup *pGroup; /* The group the new page cache will belong to */ - int sz; /* Bytes of memory required to allocate the new cache */ + i64 sz; /* Bytes of memory required to allocate the new cache */ assert( (szPage & (szPage-1))==0 && szPage>=512 && szPage<=65536 ); assert( szExtra < 300 ); @@ -58036,6 +58759,9 @@ struct Pager { Wal *pWal; /* Write-ahead log used by "journal_mode=wal" */ char *zWal; /* File name for write-ahead log */ #endif +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT + sqlite3 *dbWal; +#endif }; /* @@ -58627,7 +59353,7 @@ static void checkPage(PgHdr *pPg){ ** If an error occurs while reading from the journal file, an SQLite ** error code is returned. */ -static int readSuperJournal(sqlite3_file *pJrnl, char *zSuper, u32 nSuper){ +static int readSuperJournal(sqlite3_file *pJrnl, char *zSuper, u64 nSuper){ int rc; /* Return code */ u32 len; /* Length in bytes of super-journal name */ i64 szJ; /* Total size in bytes of journal file pJrnl */ @@ -59182,6 +59908,15 @@ static void pager_unlock(Pager *pPager){ if( pagerUseWal(pPager) ){ assert( !isOpen(pPager->jfd) ); + if( pPager->eState==PAGER_ERROR ){ + /* If an IO error occurs in wal.c while attempting to wrap the wal file, + ** then the Wal object may be holding a write-lock but no read-lock. + ** This call ensures that the write-lock is dropped as well. We cannot + ** have sqlite3WalEndReadTransaction() drop the write-lock, as it once + ** did, because this would break "BEGIN EXCLUSIVE" handling for + ** SQLITE_ENABLE_SETLK_TIMEOUT builds. */ + sqlite3WalEndWriteTransaction(pPager->pWal); + } sqlite3WalEndReadTransaction(pPager->pWal); pPager->eState = PAGER_OPEN; }else if( !pPager->exclusiveMode ){ @@ -59863,12 +60598,12 @@ static int pager_delsuper(Pager *pPager, const char *zSuper){ char *zJournal; /* Pointer to one journal within MJ file */ char *zSuperPtr; /* Space to hold super-journal filename */ char *zFree = 0; /* Free this buffer */ - int nSuperPtr; /* Amount of space allocated to zSuperPtr[] */ + i64 nSuperPtr; /* Amount of space allocated to zSuperPtr[] */ /* Allocate space for both the pJournal and pSuper file descriptors. ** If successful, open the super-journal file for reading. */ - pSuper = (sqlite3_file *)sqlite3MallocZero(pVfs->szOsFile * 2); + pSuper = (sqlite3_file *)sqlite3MallocZero(2 * (i64)pVfs->szOsFile); if( !pSuper ){ rc = SQLITE_NOMEM_BKPT; pJournal = 0; @@ -59886,11 +60621,14 @@ static int pager_delsuper(Pager *pPager, const char *zSuper){ */ rc = sqlite3OsFileSize(pSuper, &nSuperJournal); if( rc!=SQLITE_OK ) goto delsuper_out; - nSuperPtr = pVfs->mxPathname+1; + nSuperPtr = 1 + (i64)pVfs->mxPathname; + assert( nSuperJournal>=0 && nSuperPtr>0 ); zFree = sqlite3Malloc(4 + nSuperJournal + nSuperPtr + 2); if( !zFree ){ rc = SQLITE_NOMEM_BKPT; goto delsuper_out; + }else{ + assert( nSuperJournal<=0x7fffffff ); } zFree[0] = zFree[1] = zFree[2] = zFree[3] = 0; zSuperJournal = &zFree[4]; @@ -60151,7 +60889,7 @@ static int pager_playback(Pager *pPager, int isHot){ ** for pageSize. */ zSuper = pPager->pTmpSpace; - rc = readSuperJournal(pPager->jfd, zSuper, pPager->pVfs->mxPathname+1); + rc = readSuperJournal(pPager->jfd, zSuper, 1+(i64)pPager->pVfs->mxPathname); if( rc==SQLITE_OK && zSuper[0] ){ rc = sqlite3OsAccess(pVfs, zSuper, SQLITE_ACCESS_EXISTS, &res); } @@ -60290,7 +61028,7 @@ static int pager_playback(Pager *pPager, int isHot){ ** which case it requires 4 0x00 bytes in memory immediately before ** the filename. */ zSuper = &pPager->pTmpSpace[4]; - rc = readSuperJournal(pPager->jfd, zSuper, pPager->pVfs->mxPathname+1); + rc = readSuperJournal(pPager->jfd, zSuper, 1+(i64)pPager->pVfs->mxPathname); testcase( rc!=SQLITE_OK ); } if( rc==SQLITE_OK @@ -62061,6 +62799,7 @@ SQLITE_PRIVATE int sqlite3PagerOpen( const char *zUri = 0; /* URI args to copy */ int nUriByte = 1; /* Number of bytes of URI args at *zUri */ + /* Figure out how much space is required for each journal file-handle ** (there are two of them, the main journal and the sub-journal). */ journalFileSize = ROUND8(sqlite3JournalSize(pVfs)); @@ -62086,8 +62825,8 @@ SQLITE_PRIVATE int sqlite3PagerOpen( */ if( zFilename && zFilename[0] ){ const char *z; - nPathname = pVfs->mxPathname+1; - zPathname = sqlite3DbMallocRaw(0, nPathname*2); + nPathname = pVfs->mxPathname + 1; + zPathname = sqlite3DbMallocRaw(0, 2*(i64)nPathname); if( zPathname==0 ){ return SQLITE_NOMEM_BKPT; } @@ -62174,14 +62913,14 @@ SQLITE_PRIVATE int sqlite3PagerOpen( ROUND8(sizeof(*pPager)) + /* Pager structure */ ROUND8(pcacheSize) + /* PCache object */ ROUND8(pVfs->szOsFile) + /* The main db file */ - journalFileSize * 2 + /* The two journal files */ + (u64)journalFileSize * 2 + /* The two journal files */ SQLITE_PTRSIZE + /* Space to hold a pointer */ 4 + /* Database prefix */ - nPathname + 1 + /* database filename */ - nUriByte + /* query parameters */ - nPathname + 8 + 1 + /* Journal filename */ + (u64)nPathname + 1 + /* database filename */ + (u64)nUriByte + /* query parameters */ + (u64)nPathname + 8 + 1 + /* Journal filename */ #ifndef SQLITE_OMIT_WAL - nPathname + 4 + 1 + /* WAL filename */ + (u64)nPathname + 4 + 1 + /* WAL filename */ #endif 3 /* Terminator */ ); @@ -64904,6 +65643,11 @@ static int pagerOpenWal(Pager *pPager){ pPager->fd, pPager->zWal, pPager->exclusiveMode, pPager->journalSizeLimit, &pPager->pWal ); +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT + if( rc==SQLITE_OK ){ + sqlite3WalDb(pPager->pWal, pPager->dbWal); + } +#endif } pagerFixMaplimit(pPager); @@ -65023,6 +65767,7 @@ SQLITE_PRIVATE int sqlite3PagerWalWriteLock(Pager *pPager, int bLock){ ** blocking locks are required. */ SQLITE_PRIVATE void sqlite3PagerWalDb(Pager *pPager, sqlite3 *db){ + pPager->dbWal = db; if( pagerUseWal(pPager) ){ sqlite3WalDb(pPager->pWal, db); } @@ -65636,6 +66381,11 @@ struct WalCkptInfo { /* ** An open write-ahead log file is represented by an instance of the ** following object. +** +** writeLock: +** This is usually set to 1 whenever the WRITER lock is held. However, +** if it is set to 2, then the WRITER lock is held but must be released +** by walHandleException() if a SEH exception is thrown. */ struct Wal { sqlite3_vfs *pVfs; /* The VFS used to create pDbFd */ @@ -65726,9 +66476,13 @@ struct WalIterator { u32 *aPgno; /* Array of page numbers. */ int nEntry; /* Nr. of entries in aPgno[] and aIndex[] */ int iZero; /* Frame number associated with aPgno[0] */ - } aSegment[1]; /* One for every 32KB page in the wal-index */ + } aSegment[FLEXARRAY]; /* One for every 32KB page in the wal-index */ }; +/* Size (in bytes) of a WalIterator object suitable for N or fewer segments */ +#define SZ_WALITERATOR(N) \ + (offsetof(WalIterator,aSegment)*(N)*sizeof(struct WalSegment)) + /* ** Define the parameters of the hash tables in the wal-index file. There ** is a hash-table following every HASHTABLE_NPAGE page numbers in the @@ -65887,7 +66641,7 @@ static SQLITE_NOINLINE int walIndexPageRealloc( /* Enlarge the pWal->apWiData[] array if required */ if( pWal->nWiData<=iPage ){ - sqlite3_int64 nByte = sizeof(u32*)*(iPage+1); + sqlite3_int64 nByte = sizeof(u32*)*(1+(i64)iPage); volatile u32 **apNew; apNew = (volatile u32 **)sqlite3Realloc((void *)pWal->apWiData, nByte); if( !apNew ){ @@ -65996,10 +66750,8 @@ static void walChecksumBytes( s1 = s2 = 0; } - assert( nByte>=8 ); - assert( (nByte&0x00000007)==0 ); - assert( nByte<=65536 ); - assert( nByte%4==0 ); + /* nByte is a multiple of 8 between 8 and 65536 */ + assert( nByte>=8 && (nByte&7)==0 && nByte<=65536 ); if( !nativeCksum ){ do { @@ -67089,8 +67841,7 @@ static int walIteratorInit(Wal *pWal, u32 nBackfill, WalIterator **pp){ /* Allocate space for the WalIterator object. */ nSegment = walFramePage(iLast) + 1; - nByte = sizeof(WalIterator) - + (nSegment-1)*sizeof(struct WalSegment) + nByte = SZ_WALITERATOR(nSegment) + iLast*sizeof(ht_slot); p = (WalIterator *)sqlite3_malloc64(nByte + sizeof(ht_slot) * (iLast>HASHTABLE_NPAGE?HASHTABLE_NPAGE:iLast) @@ -67161,7 +67912,7 @@ static int walEnableBlockingMs(Wal *pWal, int nMs){ static int walEnableBlocking(Wal *pWal){ int res = 0; if( pWal->db ){ - int tmout = pWal->db->busyTimeout; + int tmout = pWal->db->setlkTimeout; if( tmout ){ res = walEnableBlockingMs(pWal, tmout); } @@ -67547,7 +68298,9 @@ static int walHandleException(Wal *pWal){ static const int S = 1; static const int E = (1<lockMask & ~( + u32 mUnlock; + if( pWal->writeLock==2 ) pWal->writeLock = 0; + mUnlock = pWal->lockMask & ~( (pWal->readLock<0 ? 0 : (S << WAL_READ_LOCK(pWal->readLock))) | (pWal->writeLock ? (E << WAL_WRITE_LOCK) : 0) | (pWal->ckptLock ? (E << WAL_CKPT_LOCK) : 0) @@ -67819,7 +68572,12 @@ static int walIndexReadHdr(Wal *pWal, int *pChanged){ if( bWriteLock || SQLITE_OK==(rc = walLockExclusive(pWal, WAL_WRITE_LOCK, 1)) ){ - pWal->writeLock = 1; + /* If the write-lock was just obtained, set writeLock to 2 instead of + ** the usual 1. This causes walIndexPage() to behave as if the + ** write-lock were held (so that it allocates new pages as required), + ** and walHandleException() to unlock the write-lock if a SEH exception + ** is thrown. */ + if( !bWriteLock ) pWal->writeLock = 2; if( SQLITE_OK==(rc = walIndexPage(pWal, 0, &page0)) ){ badHdr = walIndexTryHdr(pWal, pChanged); if( badHdr ){ @@ -68183,7 +68941,6 @@ static int walTryBeginRead(Wal *pWal, int *pChanged, int useWal, int *pCnt){ rc = walIndexReadHdr(pWal, pChanged); } #ifdef SQLITE_ENABLE_SETLK_TIMEOUT - walDisableBlocking(pWal); if( rc==SQLITE_BUSY_TIMEOUT ){ rc = SQLITE_BUSY; *pCnt |= WAL_RETRY_BLOCKED_MASK; @@ -68198,6 +68955,7 @@ static int walTryBeginRead(Wal *pWal, int *pChanged, int useWal, int *pCnt){ ** WAL_RETRY this routine will be called again and will probably be ** right on the second iteration. */ + (void)walEnableBlocking(pWal); if( pWal->apWiData[0]==0 ){ /* This branch is taken when the xShmMap() method returns SQLITE_BUSY. ** We assume this is a transient condition, so return WAL_RETRY. The @@ -68214,6 +68972,7 @@ static int walTryBeginRead(Wal *pWal, int *pChanged, int useWal, int *pCnt){ rc = SQLITE_BUSY_RECOVERY; } } + walDisableBlocking(pWal); if( rc!=SQLITE_OK ){ return rc; } @@ -68604,8 +69363,11 @@ SQLITE_PRIVATE int sqlite3WalBeginReadTransaction(Wal *pWal, int *pChanged){ ** read-lock. */ SQLITE_PRIVATE void sqlite3WalEndReadTransaction(Wal *pWal){ - sqlite3WalEndWriteTransaction(pWal); +#ifndef SQLITE_ENABLE_SETLK_TIMEOUT + assert( pWal->writeLock==0 || pWal->readLock<0 ); +#endif if( pWal->readLock>=0 ){ + sqlite3WalEndWriteTransaction(pWal); walUnlockShared(pWal, WAL_READ_LOCK(pWal->readLock)); pWal->readLock = -1; } @@ -68798,7 +69560,7 @@ SQLITE_PRIVATE int sqlite3WalBeginWriteTransaction(Wal *pWal){ ** read-transaction was even opened, making this call a no-op. ** Return early. */ if( pWal->writeLock ){ - assert( !memcmp(&pWal->hdr,(void *)walIndexHdr(pWal),sizeof(WalIndexHdr)) ); + assert( !memcmp(&pWal->hdr,(void*)pWal->apWiData[0],sizeof(WalIndexHdr)) ); return SQLITE_OK; } #endif @@ -68898,6 +69660,7 @@ SQLITE_PRIVATE int sqlite3WalUndo(Wal *pWal, int (*xUndo)(void *, Pgno), void *p if( iMax!=pWal->hdr.mxFrame ) walCleanupHash(pWal); } SEH_EXCEPT( rc = SQLITE_IOERR_IN_PAGE; ) + pWal->iReCksum = 0; } return rc; } @@ -68945,6 +69708,9 @@ SQLITE_PRIVATE int sqlite3WalSavepointUndo(Wal *pWal, u32 *aWalData){ walCleanupHash(pWal); } SEH_EXCEPT( rc = SQLITE_IOERR_IN_PAGE; ) + if( pWal->iReCksum>pWal->hdr.mxFrame ){ + pWal->iReCksum = 0; + } } return rc; @@ -70247,6 +71013,12 @@ struct CellInfo { */ #define BTCURSOR_MAX_DEPTH 20 +/* +** Maximum amount of storage local to a database page, regardless of +** page size. +*/ +#define BT_MAX_LOCAL 65501 /* 65536 - 35 */ + /* ** A cursor is a pointer to a particular entry within a particular ** b-tree within a database file. @@ -70655,7 +71427,7 @@ SQLITE_PRIVATE int sqlite3BtreeHoldsMutex(Btree *p){ */ static void SQLITE_NOINLINE btreeEnterAll(sqlite3 *db){ int i; - int skipOk = 1; + u8 skipOk = 1; Btree *p; assert( sqlite3_mutex_held(db->mutex) ); for(i=0; inDb; i++){ @@ -71511,7 +72283,7 @@ static int saveCursorKey(BtCursor *pCur){ ** below. */ void *pKey; pCur->nKey = sqlite3BtreePayloadSize(pCur); - pKey = sqlite3Malloc( pCur->nKey + 9 + 8 ); + pKey = sqlite3Malloc( ((i64)pCur->nKey) + 9 + 8 ); if( pKey ){ rc = sqlite3BtreePayload(pCur, 0, (int)pCur->nKey, pKey); if( rc==SQLITE_OK ){ @@ -71801,7 +72573,7 @@ SQLITE_PRIVATE void sqlite3BtreeCursorHint(BtCursor *pCur, int eHintType, ...){ */ SQLITE_PRIVATE void sqlite3BtreeCursorHintFlags(BtCursor *pCur, unsigned x){ assert( x==BTREE_SEEK_EQ || x==BTREE_BULKLOAD || x==0 ); - pCur->hints = x; + pCur->hints = (u8)x; } @@ -71995,14 +72767,15 @@ static SQLITE_NOINLINE void btreeParseCellAdjustSizeForOverflow( static int btreePayloadToLocal(MemPage *pPage, i64 nPayload){ int maxLocal; /* Maximum amount of payload held locally */ maxLocal = pPage->maxLocal; + assert( nPayload>=0 ); if( nPayload<=maxLocal ){ - return nPayload; + return (int)nPayload; }else{ int minLocal; /* Minimum amount of payload held locally */ int surplus; /* Overflow payload available for local storage */ minLocal = pPage->minLocal; - surplus = minLocal + (nPayload - minLocal)%(pPage->pBt->usableSize-4); - return ( surplus <= maxLocal ) ? surplus : minLocal; + surplus = (int)(minLocal +(nPayload - minLocal)%(pPage->pBt->usableSize-4)); + return (surplus <= maxLocal) ? surplus : minLocal; } } @@ -72112,11 +72885,13 @@ static void btreeParseCellPtr( pInfo->pPayload = pIter; testcase( nPayload==pPage->maxLocal ); testcase( nPayload==(u32)pPage->maxLocal+1 ); + assert( nPayload>=0 ); + assert( pPage->maxLocal <= BT_MAX_LOCAL ); if( nPayload<=pPage->maxLocal ){ /* This is the (easy) common case where the entire payload fits ** on the local page. No overflow is required. */ - pInfo->nSize = nPayload + (u16)(pIter - pCell); + pInfo->nSize = (u16)nPayload + (u16)(pIter - pCell); if( pInfo->nSize<4 ) pInfo->nSize = 4; pInfo->nLocal = (u16)nPayload; }else{ @@ -72149,11 +72924,13 @@ static void btreeParseCellPtrIndex( pInfo->pPayload = pIter; testcase( nPayload==pPage->maxLocal ); testcase( nPayload==(u32)pPage->maxLocal+1 ); + assert( nPayload>=0 ); + assert( pPage->maxLocal <= BT_MAX_LOCAL ); if( nPayload<=pPage->maxLocal ){ /* This is the (easy) common case where the entire payload fits ** on the local page. No overflow is required. */ - pInfo->nSize = nPayload + (u16)(pIter - pCell); + pInfo->nSize = (u16)nPayload + (u16)(pIter - pCell); if( pInfo->nSize<4 ) pInfo->nSize = 4; pInfo->nLocal = (u16)nPayload; }else{ @@ -72692,14 +73469,14 @@ static SQLITE_INLINE int allocateSpace(MemPage *pPage, int nByte, int *pIdx){ ** at the end of the page. So do additional corruption checks inside this ** routine and return SQLITE_CORRUPT if any problems are found. */ -static int freeSpace(MemPage *pPage, u16 iStart, u16 iSize){ - u16 iPtr; /* Address of ptr to next freeblock */ - u16 iFreeBlk; /* Address of the next freeblock */ +static int freeSpace(MemPage *pPage, int iStart, int iSize){ + int iPtr; /* Address of ptr to next freeblock */ + int iFreeBlk; /* Address of the next freeblock */ u8 hdr; /* Page header size. 0 or 100 */ - u8 nFrag = 0; /* Reduction in fragmentation */ - u16 iOrigSize = iSize; /* Original value of iSize */ - u16 x; /* Offset to cell content area */ - u32 iEnd = iStart + iSize; /* First byte past the iStart buffer */ + int nFrag = 0; /* Reduction in fragmentation */ + int iOrigSize = iSize; /* Original value of iSize */ + int x; /* Offset to cell content area */ + int iEnd = iStart + iSize; /* First byte past the iStart buffer */ unsigned char *data = pPage->aData; /* Page content */ u8 *pTmp; /* Temporary ptr into data[] */ @@ -72726,7 +73503,7 @@ static int freeSpace(MemPage *pPage, u16 iStart, u16 iSize){ } iPtr = iFreeBlk; } - if( iFreeBlk>pPage->pBt->usableSize-4 ){ /* TH3: corrupt081.100 */ + if( iFreeBlk>(int)pPage->pBt->usableSize-4 ){ /* TH3: corrupt081.100 */ return SQLITE_CORRUPT_PAGE(pPage); } assert( iFreeBlk>iPtr || iFreeBlk==0 || CORRUPT_DB ); @@ -72741,7 +73518,7 @@ static int freeSpace(MemPage *pPage, u16 iStart, u16 iSize){ nFrag = iFreeBlk - iEnd; if( iEnd>iFreeBlk ) return SQLITE_CORRUPT_PAGE(pPage); iEnd = iFreeBlk + get2byte(&data[iFreeBlk+2]); - if( iEnd > pPage->pBt->usableSize ){ + if( iEnd > (int)pPage->pBt->usableSize ){ return SQLITE_CORRUPT_PAGE(pPage); } iSize = iEnd - iStart; @@ -72762,7 +73539,7 @@ static int freeSpace(MemPage *pPage, u16 iStart, u16 iSize){ } } if( nFrag>data[hdr+7] ) return SQLITE_CORRUPT_PAGE(pPage); - data[hdr+7] -= nFrag; + data[hdr+7] -= (u8)nFrag; } pTmp = &data[hdr+5]; x = get2byte(pTmp); @@ -72783,7 +73560,8 @@ static int freeSpace(MemPage *pPage, u16 iStart, u16 iSize){ /* Insert the new freeblock into the freelist */ put2byte(&data[iPtr], iStart); put2byte(&data[iStart], iFreeBlk); - put2byte(&data[iStart+2], iSize); + assert( iSize>=0 && iSize<=0xffff ); + put2byte(&data[iStart+2], (u16)iSize); } pPage->nFree += iOrigSize; return SQLITE_OK; @@ -73009,7 +73787,7 @@ static int btreeInitPage(MemPage *pPage){ assert( pBt->pageSize>=512 && pBt->pageSize<=65536 ); pPage->maskPage = (u16)(pBt->pageSize - 1); pPage->nOverflow = 0; - pPage->cellOffset = pPage->hdrOffset + 8 + pPage->childPtrSize; + pPage->cellOffset = (u16)(pPage->hdrOffset + 8 + pPage->childPtrSize); pPage->aCellIdx = data + pPage->childPtrSize + 8; pPage->aDataEnd = pPage->aData + pBt->pageSize; pPage->aDataOfst = pPage->aData + pPage->childPtrSize; @@ -73043,8 +73821,8 @@ static int btreeInitPage(MemPage *pPage){ static void zeroPage(MemPage *pPage, int flags){ unsigned char *data = pPage->aData; BtShared *pBt = pPage->pBt; - u8 hdr = pPage->hdrOffset; - u16 first; + int hdr = pPage->hdrOffset; + int first; assert( sqlite3PagerPagenumber(pPage->pDbPage)==pPage->pgno || CORRUPT_DB ); assert( sqlite3PagerGetExtra(pPage->pDbPage) == (void*)pPage ); @@ -73061,7 +73839,7 @@ static void zeroPage(MemPage *pPage, int flags){ put2byte(&data[hdr+5], pBt->usableSize); pPage->nFree = (u16)(pBt->usableSize - first); decodeFlags(pPage, flags); - pPage->cellOffset = first; + pPage->cellOffset = (u16)first; pPage->aDataEnd = &data[pBt->pageSize]; pPage->aCellIdx = &data[first]; pPage->aDataOfst = &data[pPage->childPtrSize]; @@ -73847,7 +74625,7 @@ SQLITE_PRIVATE int sqlite3BtreeSetPageSize(Btree *p, int pageSize, int nReserve, BtShared *pBt = p->pBt; assert( nReserve>=0 && nReserve<=255 ); sqlite3BtreeEnter(p); - pBt->nReserveWanted = nReserve; + pBt->nReserveWanted = (u8)nReserve; x = pBt->pageSize - pBt->usableSize; if( nReservebtsFlags & BTS_PAGESIZE_FIXED ){ @@ -73953,7 +74731,7 @@ SQLITE_PRIVATE int sqlite3BtreeSecureDelete(Btree *p, int newFlag){ assert( BTS_FAST_SECURE==(BTS_OVERWRITE|BTS_SECURE_DELETE) ); if( newFlag>=0 ){ p->pBt->btsFlags &= ~BTS_FAST_SECURE; - p->pBt->btsFlags |= BTS_SECURE_DELETE*newFlag; + p->pBt->btsFlags |= (u16)(BTS_SECURE_DELETE*newFlag); } b = (p->pBt->btsFlags & BTS_FAST_SECURE)/BTS_SECURE_DELETE; sqlite3BtreeLeave(p); @@ -74473,6 +75251,13 @@ static SQLITE_NOINLINE int btreeBeginTrans( (void)sqlite3PagerWalWriteLock(pPager, 0); unlockBtreeIfUnused(pBt); } +#if defined(SQLITE_ENABLE_SETLK_TIMEOUT) + if( rc==SQLITE_BUSY_TIMEOUT ){ + /* If a blocking lock timed out, break out of the loop here so that + ** the busy-handler is not invoked. */ + break; + } +#endif }while( (rc&0xFF)==SQLITE_BUSY && pBt->inTransaction==TRANS_NONE && btreeInvokeBusyHandler(pBt) ); sqlite3PagerWalDb(pPager, 0); @@ -76882,7 +77667,7 @@ SQLITE_PRIVATE int sqlite3BtreeIndexMoveto( rc = SQLITE_CORRUPT_PAGE(pPage); goto moveto_index_finish; } - pCellKey = sqlite3Malloc( nCell+nOverrun ); + pCellKey = sqlite3Malloc( (u64)nCell+(u64)nOverrun ); if( pCellKey==0 ){ rc = SQLITE_NOMEM_BKPT; goto moveto_index_finish; @@ -78401,7 +79186,8 @@ static int rebuildPage( } /* The pPg->nFree field is now set incorrectly. The caller will fix it. */ - pPg->nCell = nCell; + assert( nCell < 10922 ); + pPg->nCell = (u16)nCell; pPg->nOverflow = 0; put2byte(&aData[hdr+1], 0); @@ -78648,9 +79434,13 @@ static int editPage( if( pageInsertArray( pPg, pBegin, &pData, pCellptr, iNew+nCell, nNew-nCell, pCArray - ) ) goto editpage_fail; + ) + ){ + goto editpage_fail; + } - pPg->nCell = nNew; + assert( nNew < 10922 ); + pPg->nCell = (u16)nNew; pPg->nOverflow = 0; put2byte(&aData[hdr+3], pPg->nCell); @@ -78959,7 +79749,7 @@ static int balance_nonroot( int pageFlags; /* Value of pPage->aData[0] */ int iSpace1 = 0; /* First unused byte of aSpace1[] */ int iOvflSpace = 0; /* First unused byte of aOvflSpace[] */ - int szScratch; /* Size of scratch memory requested */ + u64 szScratch; /* Size of scratch memory requested */ MemPage *apOld[NB]; /* pPage and up to two siblings */ MemPage *apNew[NB+2]; /* pPage and up to NB siblings after balancing */ u8 *pRight; /* Location in parent of right-sibling pointer */ @@ -80244,7 +81034,7 @@ SQLITE_PRIVATE int sqlite3BtreeInsert( if( pCur->info.nKey==pX->nKey ){ BtreePayload x2; x2.pData = pX->pKey; - x2.nData = pX->nKey; + x2.nData = (int)pX->nKey; assert( pX->nKey<=0x7fffffff ); x2.nZero = 0; return btreeOverwriteCell(pCur, &x2); } @@ -80425,7 +81215,7 @@ SQLITE_PRIVATE int sqlite3BtreeTransferRow(BtCursor *pDest, BtCursor *pSrc, i64 getCellInfo(pSrc); if( pSrc->info.nPayload<0x80 ){ - *(aOut++) = pSrc->info.nPayload; + *(aOut++) = (u8)pSrc->info.nPayload; }else{ aOut += sqlite3PutVarint(aOut, pSrc->info.nPayload); } @@ -80438,7 +81228,7 @@ SQLITE_PRIVATE int sqlite3BtreeTransferRow(BtCursor *pDest, BtCursor *pSrc, i64 nRem = pSrc->info.nPayload; if( nIn==nRem && nInpPage->maxLocal ){ memcpy(aOut, aIn, nIn); - pBt->nPreformatSize = nIn + (aOut - pBt->pTmpSpace); + pBt->nPreformatSize = nIn + (int)(aOut - pBt->pTmpSpace); return SQLITE_OK; }else{ int rc = SQLITE_OK; @@ -80450,7 +81240,7 @@ SQLITE_PRIVATE int sqlite3BtreeTransferRow(BtCursor *pDest, BtCursor *pSrc, i64 u32 nOut; /* Size of output buffer aOut[] */ nOut = btreePayloadToLocal(pDest->pPage, pSrc->info.nPayload); - pBt->nPreformatSize = nOut + (aOut - pBt->pTmpSpace); + pBt->nPreformatSize = (int)nOut + (int)(aOut - pBt->pTmpSpace); if( nOutinfo.nPayload ){ pPgnoOut = &aOut[nOut]; pBt->nPreformatSize += 4; @@ -82071,6 +82861,7 @@ SQLITE_PRIVATE int sqlite3BtreeIsInBackup(Btree *p){ */ SQLITE_PRIVATE void *sqlite3BtreeSchema(Btree *p, int nBytes, void(*xFree)(void *)){ BtShared *pBt = p->pBt; + assert( nBytes==0 || nBytes==sizeof(Schema) ); sqlite3BtreeEnter(p); if( !pBt->pSchema && nBytes ){ pBt->pSchema = sqlite3DbMallocZero(0, nBytes); @@ -83187,7 +83978,7 @@ static void vdbeMemRenderNum(int sz, char *zBuf, Mem *p){ ** corresponding string value, then it is important that the string be ** derived from the numeric value, not the other way around, to ensure ** that the index and table are consistent. See ticket -** https://www.sqlite.org/src/info/343634942dd54ab (2018-01-31) for +** https://sqlite.org/src/info/343634942dd54ab (2018-01-31) for ** an example. ** ** This routine looks at pMem to verify that if it has both a numeric @@ -83373,7 +84164,7 @@ SQLITE_PRIVATE void sqlite3VdbeMemZeroTerminateIfAble(Mem *pMem){ return; } if( pMem->enc!=SQLITE_UTF8 ) return; - if( NEVER(pMem->z==0) ) return; + assert( pMem->z!=0 ); if( pMem->flags & MEM_Dyn ){ if( pMem->xDel==sqlite3_free && sqlite3_msize(pMem->z) >= (u64)(pMem->n+1) @@ -84486,7 +85277,7 @@ static sqlite3_value *valueNew(sqlite3 *db, struct ValueNewStat4Ctx *p){ if( pRec==0 ){ Index *pIdx = p->pIdx; /* Index being probed */ - int nByte; /* Bytes of space to allocate */ + i64 nByte; /* Bytes of space to allocate */ int i; /* Counter variable */ int nCol = pIdx->nColumn; /* Number of index columns including rowid */ @@ -84552,7 +85343,7 @@ static int valueFromFunction( ){ sqlite3_context ctx; /* Context object for function invocation */ sqlite3_value **apVal = 0; /* Function arguments */ - int nVal = 0; /* Size of apVal[] array */ + int nVal = 0; /* Number of function arguments */ FuncDef *pFunc = 0; /* Function definition */ sqlite3_value *pVal = 0; /* New value */ int rc = SQLITE_OK; /* Return code */ @@ -85550,12 +86341,10 @@ SQLITE_PRIVATE int sqlite3VdbeAddFunctionCall( int eCallCtx /* Calling context */ ){ Vdbe *v = pParse->pVdbe; - int nByte; int addr; sqlite3_context *pCtx; assert( v ); - nByte = sizeof(*pCtx) + (nArg-1)*sizeof(sqlite3_value*); - pCtx = sqlite3DbMallocRawNN(pParse->db, nByte); + pCtx = sqlite3DbMallocRawNN(pParse->db, SZ_CONTEXT(nArg)); if( pCtx==0 ){ assert( pParse->db->mallocFailed ); freeEphemeralFunction(pParse->db, (FuncDef*)pFunc); @@ -85831,7 +86620,7 @@ static Op *opIterNext(VdbeOpIter *p){ } if( pRet->p4type==P4_SUBPROGRAM ){ - int nByte = (p->nSub+1)*sizeof(SubProgram*); + i64 nByte = (1+(u64)p->nSub)*sizeof(SubProgram*); int j; for(j=0; jnSub; j++){ if( p->apSub[j]==pRet->p4.pProgram ) break; @@ -85961,8 +86750,8 @@ SQLITE_PRIVATE void sqlite3VdbeAssertAbortable(Vdbe *p){ ** (1) For each jump instruction with a negative P2 value (a label) ** resolve the P2 value to an actual address. ** -** (2) Compute the maximum number of arguments used by any SQL function -** and store that value in *pMaxFuncArgs. +** (2) Compute the maximum number of arguments used by the xUpdate/xFilter +** methods of any virtual table and store that value in *pMaxVtabArgs. ** ** (3) Update the Vdbe.readOnly and Vdbe.bIsReader flags to accurately ** indicate what the prepared statement actually does. @@ -85975,8 +86764,8 @@ SQLITE_PRIVATE void sqlite3VdbeAssertAbortable(Vdbe *p){ ** script numbers the opcodes correctly. Changes to this routine must be ** coordinated with changes to mkopcodeh.tcl. */ -static void resolveP2Values(Vdbe *p, int *pMaxFuncArgs){ - int nMaxArgs = *pMaxFuncArgs; +static void resolveP2Values(Vdbe *p, int *pMaxVtabArgs){ + int nMaxVtabArgs = *pMaxVtabArgs; Op *pOp; Parse *pParse = p->pParse; int *aLabel = pParse->aLabel; @@ -86021,15 +86810,19 @@ static void resolveP2Values(Vdbe *p, int *pMaxFuncArgs){ } #ifndef SQLITE_OMIT_VIRTUALTABLE case OP_VUpdate: { - if( pOp->p2>nMaxArgs ) nMaxArgs = pOp->p2; + if( pOp->p2>nMaxVtabArgs ) nMaxVtabArgs = pOp->p2; break; } case OP_VFilter: { int n; + /* The instruction immediately prior to VFilter will be an + ** OP_Integer that sets the "argc" value for the VFilter. See + ** the code where OP_VFilter is generated at tag-20250207a. */ assert( (pOp - p->aOp) >= 3 ); assert( pOp[-1].opcode==OP_Integer ); + assert( pOp[-1].p2==pOp->p3+1 ); n = pOp[-1].p1; - if( n>nMaxArgs ) nMaxArgs = n; + if( n>nMaxVtabArgs ) nMaxVtabArgs = n; /* Fall through into the default case */ /* no break */ deliberate_fall_through } @@ -86070,7 +86863,7 @@ static void resolveP2Values(Vdbe *p, int *pMaxFuncArgs){ pParse->aLabel = 0; } pParse->nLabel = 0; - *pMaxFuncArgs = nMaxArgs; + *pMaxVtabArgs = nMaxVtabArgs; assert( p->bIsReader!=0 || DbMaskAllZero(p->btreeMask) ); } @@ -86299,7 +87092,7 @@ SQLITE_PRIVATE void sqlite3VdbeScanStatus( const char *zName /* Name of table or index being scanned */ ){ if( IS_STMT_SCANSTATUS(p->db) ){ - sqlite3_int64 nByte = (p->nScan+1) * sizeof(ScanStatus); + i64 nByte = (1+(i64)p->nScan) * sizeof(ScanStatus); ScanStatus *aNew; aNew = (ScanStatus*)sqlite3DbRealloc(p->db, p->aScan, nByte); if( aNew ){ @@ -86409,6 +87202,9 @@ SQLITE_PRIVATE void sqlite3VdbeChangeP5(Vdbe *p, u16 p5){ */ SQLITE_PRIVATE void sqlite3VdbeTypeofColumn(Vdbe *p, int iDest){ VdbeOp *pOp = sqlite3VdbeGetLastOp(p); +#ifdef SQLITE_DEBUG + while( pOp->opcode==OP_ReleaseReg ) pOp--; +#endif if( pOp->p3==iDest && pOp->opcode==OP_Column ){ pOp->p5 |= OPFLAG_TYPEOFARG; } @@ -87748,7 +88544,7 @@ SQLITE_PRIVATE void sqlite3VdbeMakeReady( int nVar; /* Number of parameters */ int nMem; /* Number of VM memory registers */ int nCursor; /* Number of cursors required */ - int nArg; /* Number of arguments in subprograms */ + int nArg; /* Max number args to xFilter or xUpdate */ int n; /* Loop counter */ struct ReusableSpace x; /* Reusable bulk memory */ @@ -87820,6 +88616,9 @@ SQLITE_PRIVATE void sqlite3VdbeMakeReady( p->apCsr = allocSpace(&x, p->apCsr, nCursor*sizeof(VdbeCursor*)); } } +#ifdef SQLITE_DEBUG + p->napArg = nArg; +#endif if( db->mallocFailed ){ p->nVar = 0; @@ -89317,6 +90116,7 @@ SQLITE_PRIVATE UnpackedRecord *sqlite3VdbeAllocUnpackedRecord( ){ UnpackedRecord *p; /* Unpacked record to return */ int nByte; /* Number of bytes required for *p */ + assert( sizeof(UnpackedRecord) + sizeof(Mem)*65536 < 0x7fffffff ); nByte = ROUND8P(sizeof(UnpackedRecord)) + sizeof(Mem)*(pKeyInfo->nKeyField+1); p = (UnpackedRecord *)sqlite3DbMallocRaw(pKeyInfo->db, nByte); if( !p ) return 0; @@ -90623,10 +91423,11 @@ SQLITE_PRIVATE void sqlite3VdbePreUpdateHook( preupdate.pCsr = pCsr; preupdate.op = op; preupdate.iNewReg = iReg; - preupdate.keyinfo.db = db; - preupdate.keyinfo.enc = ENC(db); - preupdate.keyinfo.nKeyField = pTab->nCol; - preupdate.keyinfo.aSortFlags = (u8*)&fakeSortOrder; + preupdate.pKeyinfo = (KeyInfo*)&preupdate.keyinfoSpace; + preupdate.pKeyinfo->db = db; + preupdate.pKeyinfo->enc = ENC(db); + preupdate.pKeyinfo->nKeyField = pTab->nCol; + preupdate.pKeyinfo->aSortFlags = (u8*)&fakeSortOrder; preupdate.iKey1 = iKey1; preupdate.iKey2 = iKey2; preupdate.pTab = pTab; @@ -90636,8 +91437,8 @@ SQLITE_PRIVATE void sqlite3VdbePreUpdateHook( db->xPreUpdateCallback(db->pPreUpdateArg, db, op, zDb, zTbl, iKey1, iKey2); db->pPreUpdate = 0; sqlite3DbFree(db, preupdate.aRecord); - vdbeFreeUnpacked(db, preupdate.keyinfo.nKeyField+1, preupdate.pUnpacked); - vdbeFreeUnpacked(db, preupdate.keyinfo.nKeyField+1, preupdate.pNewUnpacked); + vdbeFreeUnpacked(db, preupdate.pKeyinfo->nKeyField+1,preupdate.pUnpacked); + vdbeFreeUnpacked(db, preupdate.pKeyinfo->nKeyField+1,preupdate.pNewUnpacked); sqlite3VdbeMemRelease(&preupdate.oldipk); if( preupdate.aNew ){ int i; @@ -92468,7 +93269,7 @@ SQLITE_API int sqlite3_bind_text64( assert( xDel!=SQLITE_DYNAMIC ); if( enc!=SQLITE_UTF8 ){ if( enc==SQLITE_UTF16 ) enc = SQLITE_UTF16NATIVE; - nData &= ~(u16)1; + nData &= ~(u64)1; } return bindText(pStmt, i, zData, nData, xDel, enc); } @@ -92876,7 +93677,7 @@ SQLITE_API int sqlite3_preupdate_old(sqlite3 *db, int iIdx, sqlite3_value **ppVa if( !aRec ) goto preupdate_old_out; rc = sqlite3BtreePayload(p->pCsr->uc.pCursor, 0, nRec, aRec); if( rc==SQLITE_OK ){ - p->pUnpacked = vdbeUnpackRecord(&p->keyinfo, nRec, aRec); + p->pUnpacked = vdbeUnpackRecord(p->pKeyinfo, nRec, aRec); if( !p->pUnpacked ) rc = SQLITE_NOMEM; } if( rc!=SQLITE_OK ){ @@ -92893,7 +93694,9 @@ SQLITE_API int sqlite3_preupdate_old(sqlite3 *db, int iIdx, sqlite3_value **ppVa Column *pCol = &p->pTab->aCol[iIdx]; if( pCol->iDflt>0 ){ if( p->apDflt==0 ){ - int nByte = sizeof(sqlite3_value*)*p->pTab->nCol; + int nByte; + assert( sizeof(sqlite3_value*)*UMXV(p->pTab->nCol) < 0x7fffffff ); + nByte = sizeof(sqlite3_value*)*p->pTab->nCol; p->apDflt = (sqlite3_value**)sqlite3DbMallocZero(db, nByte); if( p->apDflt==0 ) goto preupdate_old_out; } @@ -92939,7 +93742,7 @@ SQLITE_API int sqlite3_preupdate_count(sqlite3 *db){ #else p = db->pPreUpdate; #endif - return (p ? p->keyinfo.nKeyField : 0); + return (p ? p->pKeyinfo->nKeyField : 0); } #endif /* SQLITE_ENABLE_PREUPDATE_HOOK */ @@ -93022,7 +93825,7 @@ SQLITE_API int sqlite3_preupdate_new(sqlite3 *db, int iIdx, sqlite3_value **ppVa Mem *pData = &p->v->aMem[p->iNewReg]; rc = ExpandBlob(pData); if( rc!=SQLITE_OK ) goto preupdate_new_out; - pUnpack = vdbeUnpackRecord(&p->keyinfo, pData->n, pData->z); + pUnpack = vdbeUnpackRecord(p->pKeyinfo, pData->n, pData->z); if( !pUnpack ){ rc = SQLITE_NOMEM; goto preupdate_new_out; @@ -93043,7 +93846,8 @@ SQLITE_API int sqlite3_preupdate_new(sqlite3 *db, int iIdx, sqlite3_value **ppVa */ assert( p->op==SQLITE_UPDATE ); if( !p->aNew ){ - p->aNew = (Mem *)sqlite3DbMallocZero(db, sizeof(Mem) * p->pCsr->nField); + assert( sizeof(Mem)*UMXV(p->pCsr->nField) < 0x7fffffff ); + p->aNew = (Mem *)sqlite3DbMallocZero(db, sizeof(Mem)*p->pCsr->nField); if( !p->aNew ){ rc = SQLITE_NOMEM; goto preupdate_new_out; @@ -93813,11 +94617,11 @@ static VdbeCursor *allocateCursor( */ Mem *pMem = iCur>0 ? &p->aMem[p->nMem-iCur] : p->aMem; - int nByte; + i64 nByte; VdbeCursor *pCx = 0; - nByte = - ROUND8P(sizeof(VdbeCursor)) + 2*sizeof(u32)*nField + - (eCurType==CURTYPE_BTREE?sqlite3BtreeCursorSize():0); + nByte = SZ_VDBECURSOR(nField); + assert( ROUND8(nByte)==nByte ); + if( eCurType==CURTYPE_BTREE ) nByte += sqlite3BtreeCursorSize(); assert( iCur>=0 && iCurnCursor ); if( p->apCsr[iCur] ){ /*OPTIMIZATION-IF-FALSE*/ @@ -93841,7 +94645,7 @@ static VdbeCursor *allocateCursor( pMem->szMalloc = 0; return 0; } - pMem->szMalloc = nByte; + pMem->szMalloc = (int)nByte; } p->apCsr[iCur] = pCx = (VdbeCursor*)pMem->zMalloc; @@ -93850,8 +94654,8 @@ static VdbeCursor *allocateCursor( pCx->nField = nField; pCx->aOffset = &pCx->aType[nField]; if( eCurType==CURTYPE_BTREE ){ - pCx->uc.pCursor = (BtCursor*) - &pMem->z[ROUND8P(sizeof(VdbeCursor))+2*sizeof(u32)*nField]; + assert( ROUND8(SZ_VDBECURSOR(nField))==SZ_VDBECURSOR(nField) ); + pCx->uc.pCursor = (BtCursor*)&pMem->z[SZ_VDBECURSOR(nField)]; sqlite3BtreeCursorZero(pCx->uc.pCursor); } return pCx; @@ -94855,7 +95659,7 @@ case OP_Halt: { sqlite3VdbeError(p, "%s", pOp->p4.z); } pcx = (int)(pOp - aOp); - sqlite3_log(pOp->p1, "abort at %d in [%s]: %s", pcx, p->zSql, p->zErrMsg); + sqlite3_log(pOp->p1, "abort at %d: %s; [%s]", pcx, p->zErrMsg, p->zSql); } rc = sqlite3VdbeHalt(p); assert( rc==SQLITE_BUSY || rc==SQLITE_OK || rc==SQLITE_ERROR ); @@ -96181,7 +96985,7 @@ case OP_BitNot: { /* same as TK_BITNOT, in1, out2 */ break; } -/* Opcode: Once P1 P2 * * * +/* Opcode: Once P1 P2 P3 * * ** ** Fall through to the next instruction the first time this opcode is ** encountered on each invocation of the byte-code program. Jump to P2 @@ -96197,6 +97001,12 @@ case OP_BitNot: { /* same as TK_BITNOT, in1, out2 */ ** whether or not the jump should be taken. The bitmask is necessary ** because the self-altering code trick does not work for recursive ** triggers. +** +** The P3 operand is not used directly by this opcode. However P3 is +** used by the code generator as follows: If this opcode is the start +** of a subroutine and that subroutine uses a Bloom filter, then P3 will +** be the register that holds that Bloom filter. See tag-202407032019 +** in the source code for implementation details. */ case OP_Once: { /* jump */ u32 iAddr; /* Address of this instruction */ @@ -97242,6 +98052,7 @@ case OP_MakeRecord: { zHdr += sqlite3PutVarint(zHdr, serial_type); if( pRec->n ){ assert( pRec->z!=0 ); + assert( pRec->z!=(const char*)sqlite3CtypeMap ); memcpy(zPayload, pRec->z, pRec->n); zPayload += pRec->n; } @@ -99593,7 +100404,7 @@ case OP_RowData: { /* The OP_RowData opcodes always follow OP_NotExists or ** OP_SeekRowid or OP_Rewind/Op_Next with no intervening instructions ** that might invalidate the cursor. - ** If this where not the case, on of the following assert()s + ** If this were not the case, one of the following assert()s ** would fail. Should this ever change (because of changes in the code ** generator) then the fix would be to insert a call to ** sqlite3VdbeCursorMoveto(). @@ -100862,7 +101673,7 @@ case OP_RowSetTest: { /* jump, in1, in3 */ */ case OP_Program: { /* jump0 */ int nMem; /* Number of memory registers for sub-program */ - int nByte; /* Bytes of runtime space required for sub-program */ + i64 nByte; /* Bytes of runtime space required for sub-program */ Mem *pRt; /* Register to allocate runtime space */ Mem *pMem; /* Used to iterate through memory cells */ Mem *pEnd; /* Last memory cell in new array */ @@ -100913,7 +101724,7 @@ case OP_Program: { /* jump0 */ nByte = ROUND8(sizeof(VdbeFrame)) + nMem * sizeof(Mem) + pProgram->nCsr * sizeof(VdbeCursor*) - + (pProgram->nOp + 7)/8; + + (7 + (i64)pProgram->nOp)/8; pFrame = sqlite3DbMallocZero(db, nByte); if( !pFrame ){ goto no_mem; @@ -100921,7 +101732,7 @@ case OP_Program: { /* jump0 */ sqlite3VdbeMemRelease(pRt); pRt->flags = MEM_Blob|MEM_Dyn; pRt->z = (char*)pFrame; - pRt->n = nByte; + pRt->n = (int)nByte; pRt->xDel = sqlite3VdbeFrameMemDel; pFrame->v = p; @@ -101020,12 +101831,14 @@ case OP_Param: { /* out2 */ ** statement counter is incremented (immediate foreign key constraints). */ case OP_FkCounter: { - if( db->flags & SQLITE_DeferFKs ){ - db->nDeferredImmCons += pOp->p2; - }else if( pOp->p1 ){ + if( pOp->p1 ){ db->nDeferredCons += pOp->p2; }else{ - p->nFkConstraint += pOp->p2; + if( db->flags & SQLITE_DeferFKs ){ + db->nDeferredImmCons += pOp->p2; + }else{ + p->nFkConstraint += pOp->p2; + } } break; } @@ -101240,7 +102053,7 @@ case OP_AggStep: { ** ** Note: We could avoid this by using a regular memory cell from aMem[] for ** the accumulator, instead of allocating one here. */ - nAlloc = ROUND8P( sizeof(pCtx[0]) + (n-1)*sizeof(sqlite3_value*) ); + nAlloc = ROUND8P( SZ_CONTEXT(n) ); pCtx = sqlite3DbMallocRawNN(db, nAlloc + sizeof(Mem)); if( pCtx==0 ) goto no_mem; pCtx->pOut = (Mem*)((u8*)pCtx + nAlloc); @@ -101900,6 +102713,7 @@ case OP_VFilter: { /* jump, ncycle */ /* Invoke the xFilter method */ apArg = p->apArg; + assert( nArg<=p->napArg ); for(i = 0; ivtabOnConflict; apArg = p->apArg; pX = &aMem[pOp->p3]; + assert( nArg<=p->napArg ); for(i=0; irc = rc; sqlite3SystemError(db, rc); testcase( sqlite3GlobalConfig.xLog!=0 ); - sqlite3_log(rc, "statement aborts at %d: [%s] %s", - (int)(pOp - aOp), p->zSql, p->zErrMsg); + sqlite3_log(rc, "statement aborts at %d: %s; [%s]", + (int)(pOp - aOp), p->zErrMsg, p->zSql); if( p->eVdbeState==VDBE_RUN_STATE ) sqlite3VdbeHalt(p); if( rc==SQLITE_IOERR_NOMEM ) sqlite3OomFault(db); if( rc==SQLITE_CORRUPT && db->autoCommit==0 ){ @@ -102896,6 +103711,7 @@ SQLITE_API int sqlite3_blob_open( char *zErr = 0; Table *pTab; Incrblob *pBlob = 0; + int iDb; Parse sParse; #ifdef SQLITE_ENABLE_API_ARMOR @@ -102941,7 +103757,10 @@ SQLITE_API int sqlite3_blob_open( sqlite3ErrorMsg(&sParse, "cannot open view: %s", zTable); } #endif - if( !pTab ){ + if( pTab==0 + || ((iDb = sqlite3SchemaToIndex(db, pTab->pSchema))==1 && + sqlite3OpenTempDatabase(&sParse)) + ){ if( sParse.zErrMsg ){ sqlite3DbFree(db, zErr); zErr = sParse.zErrMsg; @@ -102952,15 +103771,11 @@ SQLITE_API int sqlite3_blob_open( goto blob_open_out; } pBlob->pTab = pTab; - pBlob->zDb = db->aDb[sqlite3SchemaToIndex(db, pTab->pSchema)].zDbSName; + pBlob->zDb = db->aDb[iDb].zDbSName; /* Now search pTab for the exact column. */ - for(iCol=0; iColnCol; iCol++) { - if( sqlite3StrICmp(pTab->aCol[iCol].zCnName, zColumn)==0 ){ - break; - } - } - if( iCol==pTab->nCol ){ + iCol = sqlite3ColumnIndex(pTab, zColumn); + if( iCol<0 ){ sqlite3DbFree(db, zErr); zErr = sqlite3MPrintf(db, "no such column: \"%s\"", zColumn); rc = SQLITE_ERROR; @@ -103040,7 +103855,6 @@ SQLITE_API int sqlite3_blob_open( {OP_Halt, 0, 0, 0}, /* 5 */ }; Vdbe *v = (Vdbe *)pBlob->pStmt; - int iDb = sqlite3SchemaToIndex(db, pTab->pSchema); VdbeOp *aOp; sqlite3VdbeAddOp4Int(v, OP_Transaction, iDb, wrFlag, @@ -103618,9 +104432,12 @@ struct VdbeSorter { u8 iPrev; /* Previous thread used to flush PMA */ u8 nTask; /* Size of aTask[] array */ u8 typeMask; - SortSubtask aTask[1]; /* One or more subtasks */ + SortSubtask aTask[FLEXARRAY]; /* One or more subtasks */ }; +/* Size (in bytes) of a VdbeSorter object that works with N or fewer subtasks */ +#define SZ_VDBESORTER(N) (offsetof(VdbeSorter,aTask)+(N)*sizeof(SortSubtask)) + #define SORTER_TYPE_INTEGER 0x01 #define SORTER_TYPE_TEXT 0x02 @@ -104222,7 +105039,7 @@ SQLITE_PRIVATE int sqlite3VdbeSorterInit( VdbeSorter *pSorter; /* The new sorter */ KeyInfo *pKeyInfo; /* Copy of pCsr->pKeyInfo with db==0 */ int szKeyInfo; /* Size of pCsr->pKeyInfo in bytes */ - int sz; /* Size of pSorter in bytes */ + i64 sz; /* Size of pSorter in bytes */ int rc = SQLITE_OK; #if SQLITE_MAX_WORKER_THREADS==0 # define nWorker 0 @@ -104250,8 +105067,10 @@ SQLITE_PRIVATE int sqlite3VdbeSorterInit( assert( pCsr->pKeyInfo ); assert( !pCsr->isEphemeral ); assert( pCsr->eCurType==CURTYPE_SORTER ); - szKeyInfo = sizeof(KeyInfo) + (pCsr->pKeyInfo->nKeyField-1)*sizeof(CollSeq*); - sz = sizeof(VdbeSorter) + nWorker * sizeof(SortSubtask); + assert( sizeof(KeyInfo) + UMXV(pCsr->pKeyInfo->nKeyField)*sizeof(CollSeq*) + < 0x7fffffff ); + szKeyInfo = SZ_KEYINFO(pCsr->pKeyInfo->nKeyField); + sz = SZ_VDBESORTER(nWorker+1); pSorter = (VdbeSorter*)sqlite3DbMallocZero(db, sz + szKeyInfo); pCsr->uc.pSorter = pSorter; @@ -104463,7 +105282,7 @@ static int vdbeSorterJoinAll(VdbeSorter *pSorter, int rcin){ */ static MergeEngine *vdbeMergeEngineNew(int nReader){ int N = 2; /* Smallest power of two >= nReader */ - int nByte; /* Total bytes of space to allocate */ + i64 nByte; /* Total bytes of space to allocate */ MergeEngine *pNew; /* Pointer to allocated object to return */ assert( nReader<=SORTER_MAX_MERGE_COUNT ); @@ -104715,6 +105534,10 @@ static int vdbeSorterSort(SortSubtask *pTask, SorterList *pList){ p->u.pNext = 0; for(i=0; aSlot[i]; i++){ p = vdbeSorterMerge(pTask, p, aSlot[i]); + /* ,--Each aSlot[] holds twice as much as the previous. So we cannot use + ** | up all 64 aSlots[] with only a 64-bit address space. + ** v */ + assert( iop on success */ Table *pTab = 0; /* Table holding the row */ - Column *pCol; /* A column of pTab */ ExprList *pFJMatch = 0; /* Matches for FULL JOIN .. USING */ const char *zCol = pRight->u.zToken; @@ -107557,7 +108379,6 @@ static int lookupName( if( pSrcList ){ for(i=0, pItem=pSrcList->a; inSrc; i++, pItem++){ - u8 hCol; pTab = pItem->pSTab; assert( pTab!=0 && pTab->zName!=0 ); assert( pTab->nCol>0 || pParse->nErr ); @@ -107645,43 +108466,38 @@ static int lookupName( sqlite3RenameTokenRemap(pParse, 0, (void*)&pExpr->y.pTab); } } - hCol = sqlite3StrIHash(zCol); - for(j=0, pCol=pTab->aCol; jnCol; j++, pCol++){ - if( pCol->hName==hCol - && sqlite3StrICmp(pCol->zCnName, zCol)==0 - ){ - if( cnt>0 ){ - if( pItem->fg.isUsing==0 - || sqlite3IdListIndex(pItem->u3.pUsing, zCol)<0 - ){ - /* Two or more tables have the same column name which is - ** not joined by USING. This is an error. Signal as much - ** by clearing pFJMatch and letting cnt go above 1. */ - sqlite3ExprListDelete(db, pFJMatch); - pFJMatch = 0; - }else - if( (pItem->fg.jointype & JT_RIGHT)==0 ){ - /* An INNER or LEFT JOIN. Use the left-most table */ - continue; - }else - if( (pItem->fg.jointype & JT_LEFT)==0 ){ - /* A RIGHT JOIN. Use the right-most table */ - cnt = 0; - sqlite3ExprListDelete(db, pFJMatch); - pFJMatch = 0; - }else{ - /* For a FULL JOIN, we must construct a coalesce() func */ - extendFJMatch(pParse, &pFJMatch, pMatch, pExpr->iColumn); - } - } - cnt++; - pMatch = pItem; - /* Substitute the rowid (column -1) for the INTEGER PRIMARY KEY */ - pExpr->iColumn = j==pTab->iPKey ? -1 : (i16)j; - if( pItem->fg.isNestedFrom ){ - sqlite3SrcItemColumnUsed(pItem, j); + j = sqlite3ColumnIndex(pTab, zCol); + if( j>=0 ){ + if( cnt>0 ){ + if( pItem->fg.isUsing==0 + || sqlite3IdListIndex(pItem->u3.pUsing, zCol)<0 + ){ + /* Two or more tables have the same column name which is + ** not joined by USING. This is an error. Signal as much + ** by clearing pFJMatch and letting cnt go above 1. */ + sqlite3ExprListDelete(db, pFJMatch); + pFJMatch = 0; + }else + if( (pItem->fg.jointype & JT_RIGHT)==0 ){ + /* An INNER or LEFT JOIN. Use the left-most table */ + continue; + }else + if( (pItem->fg.jointype & JT_LEFT)==0 ){ + /* A RIGHT JOIN. Use the right-most table */ + cnt = 0; + sqlite3ExprListDelete(db, pFJMatch); + pFJMatch = 0; + }else{ + /* For a FULL JOIN, we must construct a coalesce() func */ + extendFJMatch(pParse, &pFJMatch, pMatch, pExpr->iColumn); } - break; + } + cnt++; + pMatch = pItem; + /* Substitute the rowid (column -1) for the INTEGER PRIMARY KEY */ + pExpr->iColumn = j==pTab->iPKey ? -1 : (i16)j; + if( pItem->fg.isNestedFrom ){ + sqlite3SrcItemColumnUsed(pItem, j); } } if( 0==cnt && VisibleRowid(pTab) ){ @@ -107771,23 +108587,18 @@ static int lookupName( if( pTab ){ int iCol; - u8 hCol = sqlite3StrIHash(zCol); pSchema = pTab->pSchema; cntTab++; - for(iCol=0, pCol=pTab->aCol; iColnCol; iCol++, pCol++){ - if( pCol->hName==hCol - && sqlite3StrICmp(pCol->zCnName, zCol)==0 - ){ - if( iCol==pTab->iPKey ){ - iCol = -1; - } - break; + iCol = sqlite3ColumnIndex(pTab, zCol); + if( iCol>=0 ){ + if( pTab->iPKey==iCol ) iCol = -1; + }else{ + if( sqlite3IsRowid(zCol) && VisibleRowid(pTab) ){ + iCol = -1; + }else{ + iCol = pTab->nCol; } } - if( iCol>=pTab->nCol && sqlite3IsRowid(zCol) && VisibleRowid(pTab) ){ - /* IMP: R-51414-32910 */ - iCol = -1; - } if( iColnCol ){ cnt++; pMatch = 0; @@ -108426,13 +109237,12 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ ** sqlite_version() that might change over time cannot be used ** in an index or generated column. Curiously, they can be used ** in a CHECK constraint. SQLServer, MySQL, and PostgreSQL all - ** all this. */ + ** allow this. */ sqlite3ResolveNotValid(pParse, pNC, "non-deterministic functions", NC_IdxExpr|NC_PartIdx|NC_GenCol, 0, pExpr); }else{ assert( (NC_SelfRef & 0xff)==NC_SelfRef ); /* Must fit in 8 bits */ pExpr->op2 = pNC->ncFlags & NC_SelfRef; - if( pNC->ncFlags & NC_FromDDL ) ExprSetProperty(pExpr, EP_FromDDL); } if( (pDef->funcFlags & SQLITE_FUNC_INTERNAL)!=0 && pParse->nested==0 @@ -108448,6 +109258,7 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ if( (pDef->funcFlags & (SQLITE_FUNC_DIRECT|SQLITE_FUNC_UNSAFE))!=0 && !IN_RENAME_OBJECT ){ + if( pNC->ncFlags & NC_FromDDL ) ExprSetProperty(pExpr, EP_FromDDL); sqlite3ExprFunctionUsable(pParse, pExpr, pDef); } } @@ -109501,20 +110312,22 @@ SQLITE_PRIVATE int sqlite3ResolveSelfReference( Expr *pExpr, /* Expression to resolve. May be NULL. */ ExprList *pList /* Expression list to resolve. May be NULL. */ ){ - SrcList sSrc; /* Fake SrcList for pParse->pNewTable */ + SrcList *pSrc; /* Fake SrcList for pParse->pNewTable */ NameContext sNC; /* Name context for pParse->pNewTable */ int rc; + u8 srcSpace[SZ_SRCLIST_1]; /* Memory space for the fake SrcList */ assert( type==0 || pTab!=0 ); assert( type==NC_IsCheck || type==NC_PartIdx || type==NC_IdxExpr || type==NC_GenCol || pTab==0 ); memset(&sNC, 0, sizeof(sNC)); - memset(&sSrc, 0, sizeof(sSrc)); + pSrc = (SrcList*)srcSpace; + memset(pSrc, 0, SZ_SRCLIST_1); if( pTab ){ - sSrc.nSrc = 1; - sSrc.a[0].zName = pTab->zName; - sSrc.a[0].pSTab = pTab; - sSrc.a[0].iCursor = -1; + pSrc->nSrc = 1; + pSrc->a[0].zName = pTab->zName; + pSrc->a[0].pSTab = pTab; + pSrc->a[0].iCursor = -1; if( pTab->pSchema!=pParse->db->aDb[1].pSchema ){ /* Cause EP_FromDDL to be set on TK_FUNCTION nodes of non-TEMP ** schema elements */ @@ -109522,7 +110335,7 @@ SQLITE_PRIVATE int sqlite3ResolveSelfReference( } } sNC.pParse = pParse; - sNC.pSrcList = &sSrc; + sNC.pSrcList = pSrc; sNC.ncFlags = type | NC_IsDDL; if( (rc = sqlite3ResolveExprNames(&sNC, pExpr))!=SQLITE_OK ) return rc; if( pList ) rc = sqlite3ResolveExprListNames(&sNC, pList); @@ -109606,7 +110419,9 @@ SQLITE_PRIVATE char sqlite3ExprAffinity(const Expr *pExpr){ pExpr->pLeft->x.pSelect->pEList->a[pExpr->iColumn].pExpr ); } - if( op==TK_VECTOR ){ + if( op==TK_VECTOR + || (op==TK_FUNCTION && pExpr->affExpr==SQLITE_AFF_DEFER) + ){ assert( ExprUseXList(pExpr) ); return sqlite3ExprAffinity(pExpr->x.pList->a[0].pExpr); } @@ -109799,7 +110614,9 @@ SQLITE_PRIVATE CollSeq *sqlite3ExprCollSeq(Parse *pParse, const Expr *pExpr){ p = p->pLeft; continue; } - if( op==TK_VECTOR ){ + if( op==TK_VECTOR + || (op==TK_FUNCTION && p->affExpr==SQLITE_AFF_DEFER) + ){ assert( ExprUseXList(p) ); p = p->x.pList->a[0].pExpr; continue; @@ -110673,7 +111490,7 @@ SQLITE_PRIVATE Expr *sqlite3ExprAnd(Parse *pParse, Expr *pLeft, Expr *pRight){ return pLeft; }else{ u32 f = pLeft->flags | pRight->flags; - if( (f&(EP_OuterON|EP_InnerON|EP_IsFalse))==EP_IsFalse + if( (f&(EP_OuterON|EP_InnerON|EP_IsFalse|EP_HasFunc))==EP_IsFalse && !IN_RENAME_OBJECT ){ sqlite3ExprDeferredDelete(pParse, pLeft); @@ -111271,7 +112088,7 @@ static Expr *exprDup( SQLITE_PRIVATE With *sqlite3WithDup(sqlite3 *db, With *p){ With *pRet = 0; if( p ){ - sqlite3_int64 nByte = sizeof(*p) + sizeof(p->a[0]) * (p->nCte-1); + sqlite3_int64 nByte = SZ_WITH(p->nCte); pRet = sqlite3DbMallocZero(db, nByte); if( pRet ){ int i; @@ -111382,7 +112199,6 @@ SQLITE_PRIVATE ExprList *sqlite3ExprListDup(sqlite3 *db, const ExprList *p, int } pItem->zEName = sqlite3DbStrDup(db, pOldItem->zEName); pItem->fg = pOldItem->fg; - pItem->fg.done = 0; pItem->u = pOldItem->u; } return pNew; @@ -111399,11 +112215,9 @@ SQLITE_PRIVATE ExprList *sqlite3ExprListDup(sqlite3 *db, const ExprList *p, int SQLITE_PRIVATE SrcList *sqlite3SrcListDup(sqlite3 *db, const SrcList *p, int flags){ SrcList *pNew; int i; - int nByte; assert( db!=0 ); if( p==0 ) return 0; - nByte = sizeof(*p) + (p->nSrc>0 ? sizeof(p->a[0]) * (p->nSrc-1) : 0); - pNew = sqlite3DbMallocRawNN(db, nByte ); + pNew = sqlite3DbMallocRawNN(db, SZ_SRCLIST(p->nSrc) ); if( pNew==0 ) return 0; pNew->nSrc = pNew->nAlloc = p->nSrc; for(i=0; inSrc; i++){ @@ -111465,7 +112279,7 @@ SQLITE_PRIVATE IdList *sqlite3IdListDup(sqlite3 *db, const IdList *p){ int i; assert( db!=0 ); if( p==0 ) return 0; - pNew = sqlite3DbMallocRawNN(db, sizeof(*pNew)+(p->nId-1)*sizeof(p->a[0]) ); + pNew = sqlite3DbMallocRawNN(db, SZ_IDLIST(p->nId)); if( pNew==0 ) return 0; pNew->nId = p->nId; for(i=0; inId; i++){ @@ -111497,7 +112311,7 @@ SQLITE_PRIVATE Select *sqlite3SelectDup(sqlite3 *db, const Select *pDup, int fla pNew->pLimit = sqlite3ExprDup(db, p->pLimit, flags); pNew->iLimit = 0; pNew->iOffset = 0; - pNew->selFlags = p->selFlags & ~SF_UsesEphemeral; + pNew->selFlags = p->selFlags & ~(u32)SF_UsesEphemeral; pNew->addrOpenEphm[0] = -1; pNew->addrOpenEphm[1] = -1; pNew->nSelectRow = p->nSelectRow; @@ -111549,7 +112363,7 @@ SQLITE_PRIVATE SQLITE_NOINLINE ExprList *sqlite3ExprListAppendNew( struct ExprList_item *pItem; ExprList *pList; - pList = sqlite3DbMallocRawNN(db, sizeof(ExprList)+sizeof(pList->a[0])*4 ); + pList = sqlite3DbMallocRawNN(db, SZ_EXPRLIST(4)); if( pList==0 ){ sqlite3ExprDelete(db, pExpr); return 0; @@ -111569,8 +112383,7 @@ SQLITE_PRIVATE SQLITE_NOINLINE ExprList *sqlite3ExprListAppendGrow( struct ExprList_item *pItem; ExprList *pNew; pList->nAlloc *= 2; - pNew = sqlite3DbRealloc(db, pList, - sizeof(*pList)+(pList->nAlloc-1)*sizeof(pList->a[0])); + pNew = sqlite3DbRealloc(db, pList, SZ_EXPRLIST(pList->nAlloc)); if( pNew==0 ){ sqlite3ExprListDelete(db, pList); sqlite3ExprDelete(db, pExpr); @@ -112499,13 +113312,7 @@ SQLITE_PRIVATE const char *sqlite3RowidAlias(Table *pTab){ int ii; assert( VisibleRowid(pTab) ); for(ii=0; iinCol; iCol++){ - if( sqlite3_stricmp(azOpt[ii], pTab->aCol[iCol].zCnName)==0 ) break; - } - if( iCol==pTab->nCol ){ - return azOpt[ii]; - } + if( sqlite3ColumnIndex(pTab, azOpt[ii])<0 ) return azOpt[ii]; } return 0; } @@ -112909,7 +113716,7 @@ static char *exprINAffinity(Parse *pParse, const Expr *pExpr){ char *zRet; assert( pExpr->op==TK_IN ); - zRet = sqlite3DbMallocRaw(pParse->db, nVal+1); + zRet = sqlite3DbMallocRaw(pParse->db, 1+(i64)nVal); if( zRet ){ int i; for(i=0; idb, pCopy); sqlite3DbFree(pParse->db, dest.zAffSdst); if( addrBloom ){ + /* Remember that location of the Bloom filter in the P3 operand + ** of the OP_Once that began this subroutine. tag-202407032019 */ sqlite3VdbeGetOp(v, addrOnce)->p3 = dest.iSDParm2; if( dest.iSDParm2==0 ){ - sqlite3VdbeChangeToNoop(v, addrBloom); - }else{ - sqlite3VdbeGetOp(v, addrOnce)->p3 = dest.iSDParm2; + /* If the Bloom filter won't actually be used, keep it small */ + sqlite3VdbeGetOp(v, addrBloom)->p1 = 10; } } if( rc ){ @@ -113620,7 +114428,7 @@ static void sqlite3ExprCodeIN( if( ExprHasProperty(pExpr, EP_Subrtn) ){ const VdbeOp *pOp = sqlite3VdbeGetOp(v, pExpr->y.sub.iAddr); assert( pOp->opcode==OP_Once || pParse->nErr ); - if( pOp->opcode==OP_Once && pOp->p3>0 ){ + if( pOp->opcode==OP_Once && pOp->p3>0 ){ /* tag-202407032019 */ assert( OptimizationEnabled(pParse->db, SQLITE_BloomFilter) ); sqlite3VdbeAddOp4Int(v, OP_Filter, pOp->p3, destIfFalse, rLhs, nVector); VdbeCoverage(v); @@ -114212,7 +115020,7 @@ static SQLITE_NOINLINE int sqlite3IndexedExprLookup( /* -** Expresion pExpr is guaranteed to be a TK_COLUMN or equivalent. This +** Expression pExpr is guaranteed to be a TK_COLUMN or equivalent. This ** function checks the Parse.pIdxPartExpr list to see if this column ** can be replaced with a constant value. If so, it generates code to ** put the constant value in a register (ideally, but not necessarily, @@ -114436,6 +115244,12 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target) sqlite3VdbeLoadString(v, target, pExpr->u.zToken); return target; } + case TK_NULLS: { + /* Set a range of registers to NULL. pExpr->y.nReg registers starting + ** with target */ + sqlite3VdbeAddOp3(v, OP_Null, 0, target, target + pExpr->y.nReg - 1); + return target; + } default: { /* Make NULL the default case so that if a bug causes an illegal ** Expr node to be passed into this function, it will be handled @@ -115120,6 +115934,25 @@ SQLITE_PRIVATE int sqlite3ExprCodeRunJustOnce( return regDest; } +/* +** Make arrangements to invoke OP_Null on a range of registers +** during initialization. +*/ +SQLITE_PRIVATE SQLITE_NOINLINE void sqlite3ExprNullRegisterRange( + Parse *pParse, /* Parsing context */ + int iReg, /* First register to set to NULL */ + int nReg /* Number of sequential registers to NULL out */ +){ + u8 okConstFactor = pParse->okConstFactor; + Expr t; + memset(&t, 0, sizeof(t)); + t.op = TK_NULLS; + t.y.nReg = nReg; + pParse->okConstFactor = 1; + sqlite3ExprCodeRunJustOnce(pParse, &t, iReg); + pParse->okConstFactor = okConstFactor; +} + /* ** Generate code to evaluate an expression and store the results ** into a register. Return the register number where the results @@ -115469,11 +116302,11 @@ SQLITE_PRIVATE void sqlite3ExprIfTrue(Parse *pParse, Expr *pExpr, int dest, int assert( TK_ISNULL==OP_IsNull ); testcase( op==TK_ISNULL ); assert( TK_NOTNULL==OP_NotNull ); testcase( op==TK_NOTNULL ); r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, ®Free1); - sqlite3VdbeTypeofColumn(v, r1); + assert( regFree1==0 || regFree1==r1 ); + if( regFree1 ) sqlite3VdbeTypeofColumn(v, r1); sqlite3VdbeAddOp2(v, op, r1, dest); VdbeCoverageIf(v, op==TK_ISNULL); VdbeCoverageIf(v, op==TK_NOTNULL); - testcase( regFree1==0 ); break; } case TK_BETWEEN: { @@ -115644,11 +116477,11 @@ SQLITE_PRIVATE void sqlite3ExprIfFalse(Parse *pParse, Expr *pExpr, int dest, int case TK_ISNULL: case TK_NOTNULL: { r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, ®Free1); - sqlite3VdbeTypeofColumn(v, r1); + assert( regFree1==0 || regFree1==r1 ); + if( regFree1 ) sqlite3VdbeTypeofColumn(v, r1); sqlite3VdbeAddOp2(v, op, r1, dest); testcase( op==TK_ISNULL ); VdbeCoverageIf(v, op==TK_ISNULL); testcase( op==TK_NOTNULL ); VdbeCoverageIf(v, op==TK_NOTNULL); - testcase( regFree1==0 ); break; } case TK_BETWEEN: { @@ -116548,7 +117381,9 @@ static void findOrCreateAggInfoColumn( ){ struct AggInfo_col *pCol; int k; + int mxTerm = pParse->db->aLimit[SQLITE_LIMIT_COLUMN]; + assert( mxTerm <= SMXV(i16) ); assert( pAggInfo->iFirstReg==0 ); pCol = pAggInfo->aCol; for(k=0; knColumn; k++, pCol++){ @@ -116566,6 +117401,10 @@ static void findOrCreateAggInfoColumn( assert( pParse->db->mallocFailed ); return; } + if( k>mxTerm ){ + sqlite3ErrorMsg(pParse, "more than %d aggregate terms", mxTerm); + k = mxTerm; + } pCol = &pAggInfo->aCol[k]; assert( ExprUseYTab(pExpr) ); pCol->pTab = pExpr->y.pTab; @@ -116599,6 +117438,7 @@ static void findOrCreateAggInfoColumn( if( pExpr->op==TK_COLUMN ){ pExpr->op = TK_AGG_COLUMN; } + assert( k <= SMXV(pExpr->iAgg) ); pExpr->iAgg = (i16)k; } @@ -116683,13 +117523,19 @@ static int analyzeAggregate(Walker *pWalker, Expr *pExpr){ ** function that is already in the pAggInfo structure */ struct AggInfo_func *pItem = pAggInfo->aFunc; + int mxTerm = pParse->db->aLimit[SQLITE_LIMIT_COLUMN]; + assert( mxTerm <= SMXV(i16) ); for(i=0; inFunc; i++, pItem++){ if( NEVER(pItem->pFExpr==pExpr) ) break; if( sqlite3ExprCompare(0, pItem->pFExpr, pExpr, -1)==0 ){ break; } } - if( i>=pAggInfo->nFunc ){ + if( i>mxTerm ){ + sqlite3ErrorMsg(pParse, "more than %d aggregate terms", mxTerm); + i = mxTerm; + assert( inFunc ); + }else if( i>=pAggInfo->nFunc ){ /* pExpr is original. Make a new entry in pAggInfo->aFunc[] */ u8 enc = ENC(pParse->db); @@ -116743,6 +117589,7 @@ static int analyzeAggregate(Walker *pWalker, Expr *pExpr){ */ assert( !ExprHasProperty(pExpr, EP_TokenOnly|EP_Reduced) ); ExprSetVVAProperty(pExpr, EP_NoReduce); + assert( i <= SMXV(pExpr->iAgg) ); pExpr->iAgg = (i16)i; pExpr->pAggInfo = pAggInfo; return WRC_Prune; @@ -117453,13 +118300,13 @@ SQLITE_PRIVATE void sqlite3AlterBeginAddColumn(Parse *pParse, SrcList *pSrc){ assert( pNew->nCol>0 ); nAlloc = (((pNew->nCol-1)/8)*8)+8; assert( nAlloc>=pNew->nCol && nAlloc%8==0 && nAlloc-pNew->nCol<8 ); - pNew->aCol = (Column*)sqlite3DbMallocZero(db, sizeof(Column)*nAlloc); + pNew->aCol = (Column*)sqlite3DbMallocZero(db, sizeof(Column)*(u32)nAlloc); pNew->zName = sqlite3MPrintf(db, "sqlite_altertab_%s", pTab->zName); if( !pNew->aCol || !pNew->zName ){ assert( db->mallocFailed ); goto exit_begin_add_column; } - memcpy(pNew->aCol, pTab->aCol, sizeof(Column)*pNew->nCol); + memcpy(pNew->aCol, pTab->aCol, sizeof(Column)*(size_t)pNew->nCol); for(i=0; inCol; i++){ Column *pCol = &pNew->aCol[i]; pCol->zCnName = sqlite3DbStrDup(db, pCol->zCnName); @@ -117554,10 +118401,8 @@ SQLITE_PRIVATE void sqlite3AlterRenameColumn( ** altered. Set iCol to be the index of the column being renamed */ zOld = sqlite3NameFromToken(db, pOld); if( !zOld ) goto exit_rename_column; - for(iCol=0; iColnCol; iCol++){ - if( 0==sqlite3StrICmp(pTab->aCol[iCol].zCnName, zOld) ) break; - } - if( iCol==pTab->nCol ){ + iCol = sqlite3ColumnIndex(pTab, zOld); + if( iCol<0 ){ sqlite3ErrorMsg(pParse, "no such column: \"%T\"", pOld); goto exit_rename_column; } @@ -118060,6 +118905,7 @@ static int renameParseSql( int bTemp /* True if SQL is from temp schema */ ){ int rc; + u64 flags; sqlite3ParseObjectInit(p, db); if( zSql==0 ){ @@ -118068,11 +118914,21 @@ static int renameParseSql( if( sqlite3StrNICmp(zSql,"CREATE ",7)!=0 ){ return SQLITE_CORRUPT_BKPT; } - db->init.iDb = bTemp ? 1 : sqlite3FindDbName(db, zDb); + if( bTemp ){ + db->init.iDb = 1; + }else{ + int iDb = sqlite3FindDbName(db, zDb); + assert( iDb>=0 && iDb<=0xff ); + db->init.iDb = (u8)iDb; + } p->eParseMode = PARSE_MODE_RENAME; p->db = db; p->nQueryLoop = 1; + flags = db->flags; + testcase( (db->flags & SQLITE_Comments)==0 && strstr(zSql," /* ")!=0 ); + db->flags |= SQLITE_Comments; rc = sqlite3RunParser(p, zSql); + db->flags = flags; if( db->mallocFailed ) rc = SQLITE_NOMEM; if( rc==SQLITE_OK && NEVER(p->pNewTable==0 && p->pNewIndex==0 && p->pNewTrigger==0) @@ -118135,10 +118991,11 @@ static int renameEditSql( nQuot = sqlite3Strlen30(zQuot)-1; } - assert( nQuot>=nNew ); - zOut = sqlite3DbMallocZero(db, nSql + pRename->nList*nQuot + 1); + assert( nQuot>=nNew && nSql>=0 && nNew>=0 ); + zOut = sqlite3DbMallocZero(db, (u64)nSql + pRename->nList*(u64)nQuot + 1); }else{ - zOut = (char*)sqlite3DbMallocZero(db, (nSql*2+1) * 3); + assert( nSql>0 ); + zOut = (char*)sqlite3DbMallocZero(db, (2*(u64)nSql + 1) * 3); if( zOut ){ zBuf1 = &zOut[nSql*2+1]; zBuf2 = &zOut[nSql*4+2]; @@ -118150,16 +119007,17 @@ static int renameEditSql( ** with the new column name, or with single-quoted versions of themselves. ** All that remains is to construct and return the edited SQL string. */ if( zOut ){ - int nOut = nSql; - memcpy(zOut, zSql, nSql); + i64 nOut = nSql; + assert( nSql>0 ); + memcpy(zOut, zSql, (size_t)nSql); while( pRename->pList ){ int iOff; /* Offset of token to replace in zOut */ - u32 nReplace; + i64 nReplace; const char *zReplace; RenameToken *pBest = renameColumnTokenNext(pRename); if( zNew ){ - if( bQuote==0 && sqlite3IsIdChar(*pBest->t.z) ){ + if( bQuote==0 && sqlite3IsIdChar(*(u8*)pBest->t.z) ){ nReplace = nNew; zReplace = zNew; }else{ @@ -118177,14 +119035,15 @@ static int renameEditSql( memcpy(zBuf1, pBest->t.z, pBest->t.n); zBuf1[pBest->t.n] = 0; sqlite3Dequote(zBuf1); - sqlite3_snprintf(nSql*2, zBuf2, "%Q%s", zBuf1, + assert( nSql < 0x15555554 /* otherwise malloc would have failed */ ); + sqlite3_snprintf((int)(nSql*2), zBuf2, "%Q%s", zBuf1, pBest->t.z[pBest->t.n]=='\'' ? " " : "" ); zReplace = zBuf2; nReplace = sqlite3Strlen30(zReplace); } - iOff = pBest->t.z - zSql; + iOff = (int)(pBest->t.z - zSql); if( pBest->t.n!=nReplace ){ memmove(&zOut[iOff + nReplace], &zOut[iOff + pBest->t.n], nOut - (iOff + pBest->t.n) @@ -118210,11 +119069,12 @@ static int renameEditSql( ** Set all pEList->a[].fg.eEName fields in the expression-list to val. */ static void renameSetENames(ExprList *pEList, int val){ + assert( val==ENAME_NAME || val==ENAME_TAB || val==ENAME_SPAN ); if( pEList ){ int i; for(i=0; inExpr; i++){ assert( val==ENAME_NAME || pEList->a[i].fg.eEName==ENAME_NAME ); - pEList->a[i].fg.eEName = val; + pEList->a[i].fg.eEName = val&0x3; } } } @@ -118471,7 +119331,7 @@ static void renameColumnFunc( if( sParse.pNewTable ){ if( IsView(sParse.pNewTable) ){ Select *pSelect = sParse.pNewTable->u.view.pSelect; - pSelect->selFlags &= ~SF_View; + pSelect->selFlags &= ~(u32)SF_View; sParse.rc = SQLITE_OK; sqlite3SelectPrep(&sParse, pSelect, 0); rc = (db->mallocFailed ? SQLITE_NOMEM : sParse.rc); @@ -118689,7 +119549,7 @@ static void renameTableFunc( sNC.pParse = &sParse; assert( pSelect->selFlags & SF_View ); - pSelect->selFlags &= ~SF_View; + pSelect->selFlags &= ~(u32)SF_View; sqlite3SelectPrep(&sParse, pTab->u.view.pSelect, &sNC); if( sParse.nErr ){ rc = sParse.rc; @@ -118862,7 +119722,7 @@ static void renameQuotefixFunc( if( sParse.pNewTable ){ if( IsView(sParse.pNewTable) ){ Select *pSelect = sParse.pNewTable->u.view.pSelect; - pSelect->selFlags &= ~SF_View; + pSelect->selFlags &= ~(u32)SF_View; sParse.rc = SQLITE_OK; sqlite3SelectPrep(&sParse, pSelect, 0); rc = (db->mallocFailed ? SQLITE_NOMEM : sParse.rc); @@ -118961,10 +119821,10 @@ static void renameTableTest( if( zDb && zInput ){ int rc; Parse sParse; - int flags = db->flags; + u64 flags = db->flags; if( bNoDQS ) db->flags &= ~(SQLITE_DqsDML|SQLITE_DqsDDL); rc = renameParseSql(&sParse, zDb, db, zInput, bTemp); - db->flags |= (flags & (SQLITE_DqsDML|SQLITE_DqsDDL)); + db->flags = flags; if( rc==SQLITE_OK ){ if( isLegacy==0 && sParse.pNewTable && IsView(sParse.pNewTable) ){ NameContext sNC; @@ -119456,7 +120316,8 @@ static void openStatTable( sqlite3NestedParse(pParse, "CREATE TABLE %Q.%s(%s)", pDb->zDbSName, zTab, aTable[i].zCols ); - aRoot[i] = (u32)pParse->regRoot; + assert( pParse->isCreate || pParse->nErr ); + aRoot[i] = (u32)pParse->u1.cr.regRoot; aCreateTbl[i] = OPFLAG_P2ISREG; } }else{ @@ -119647,7 +120508,7 @@ static void statInit( int nCol; /* Number of columns in index being sampled */ int nKeyCol; /* Number of key columns */ int nColUp; /* nCol rounded up for alignment */ - int n; /* Bytes of space to allocate */ + i64 n; /* Bytes of space to allocate */ sqlite3 *db = sqlite3_context_db_handle(context); /* Database connection */ #ifdef SQLITE_ENABLE_STAT4 /* Maximum number of samples. 0 if STAT4 data is not collected */ @@ -119683,7 +120544,7 @@ static void statInit( p->db = db; p->nEst = sqlite3_value_int64(argv[2]); p->nRow = 0; - p->nLimit = sqlite3_value_int64(argv[3]); + p->nLimit = sqlite3_value_int(argv[3]); p->nCol = nCol; p->nKeyCol = nKeyCol; p->nSkipAhead = 0; @@ -120816,16 +121677,6 @@ static void decodeIntArray( while( z[0]!=0 && z[0]!=' ' ) z++; while( z[0]==' ' ) z++; } - - /* Set the bLowQual flag if the peak number of rows obtained - ** from a full equality match is so large that a full table scan - ** seems likely to be faster than using the index. - */ - if( aLog[0] > 66 /* Index has more than 100 rows */ - && aLog[0] <= aLog[nOut-1] /* And only a single value seen */ - ){ - pIndex->bLowQual = 1; - } } } @@ -121421,7 +122272,7 @@ static void attachFunc( if( aNew==0 ) return; memcpy(aNew, db->aDb, sizeof(db->aDb[0])*2); }else{ - aNew = sqlite3DbRealloc(db, db->aDb, sizeof(db->aDb[0])*(db->nDb+1) ); + aNew = sqlite3DbRealloc(db, db->aDb, sizeof(db->aDb[0])*(1+(i64)db->nDb)); if( aNew==0 ) return; } db->aDb = aNew; @@ -121492,6 +122343,13 @@ static void attachFunc( sqlite3BtreeEnterAll(db); db->init.iDb = 0; db->mDbFlags &= ~(DBFLAG_SchemaKnownOk); +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT + if( db->setlkFlags & SQLITE_SETLK_BLOCK_ON_CONNECT ){ + int val = 1; + sqlite3_file *fd = sqlite3PagerFile(sqlite3BtreePager(pNew->pBt)); + sqlite3OsFileControlHint(fd, SQLITE_FCNTL_BLOCK_ON_CONNECT, &val); + } +#endif if( !REOPEN_AS_MEMDB(db) ){ rc = sqlite3Init(db, &zErrDyn); } @@ -122214,6 +123072,7 @@ static SQLITE_NOINLINE void lockTable( } } + assert( pToplevel->nTableLock < 0x7fff0000 ); nBytes = sizeof(TableLock) * (pToplevel->nTableLock+1); pToplevel->aTableLock = sqlite3DbReallocOrFree(pToplevel->db, pToplevel->aTableLock, nBytes); @@ -122314,10 +123173,12 @@ SQLITE_PRIVATE void sqlite3FinishCoding(Parse *pParse){ || sqlite3VdbeAssertMayAbort(v, pParse->mayAbort)); if( v ){ if( pParse->bReturning ){ - Returning *pReturning = pParse->u1.pReturning; + Returning *pReturning; int addrRewind; int reg; + assert( !pParse->isCreate ); + pReturning = pParse->u1.d.pReturning; if( pReturning->nRetCol ){ sqlite3VdbeAddOp0(v, OP_FkCheck); addrRewind = @@ -122393,7 +123254,9 @@ SQLITE_PRIVATE void sqlite3FinishCoding(Parse *pParse){ } if( pParse->bReturning ){ - Returning *pRet = pParse->u1.pReturning; + Returning *pRet; + assert( !pParse->isCreate ); + pRet = pParse->u1.d.pReturning; if( pRet->nRetCol ){ sqlite3VdbeAddOp2(v, OP_OpenEphemeral, pRet->iRetCur, pRet->nRetCol); } @@ -123208,10 +124071,16 @@ SQLITE_PRIVATE Index *sqlite3PrimaryKeyIndex(Table *pTab){ ** find the (first) offset of that column in index pIdx. Or return -1 ** if column iCol is not used in index pIdx. */ -SQLITE_PRIVATE i16 sqlite3TableColumnToIndex(Index *pIdx, i16 iCol){ +SQLITE_PRIVATE int sqlite3TableColumnToIndex(Index *pIdx, int iCol){ int i; + i16 iCol16; + assert( iCol>=(-1) && iCol<=SQLITE_MAX_COLUMN ); + assert( pIdx->nColumn<=SQLITE_MAX_COLUMN+1 ); + iCol16 = iCol; for(i=0; inColumn; i++){ - if( iCol==pIdx->aiColumn[i] ) return i; + if( iCol16==pIdx->aiColumn[i] ){ + return i; + } } return -1; } @@ -123465,8 +124334,9 @@ SQLITE_PRIVATE void sqlite3StartTable( /* If the file format and encoding in the database have not been set, ** set them now. */ - reg1 = pParse->regRowid = ++pParse->nMem; - reg2 = pParse->regRoot = ++pParse->nMem; + assert( pParse->isCreate ); + reg1 = pParse->u1.cr.regRowid = ++pParse->nMem; + reg2 = pParse->u1.cr.regRoot = ++pParse->nMem; reg3 = ++pParse->nMem; sqlite3VdbeAddOp3(v, OP_ReadCookie, iDb, reg3, BTREE_FILE_FORMAT); sqlite3VdbeUsesBtree(v, iDb); @@ -123481,8 +124351,8 @@ SQLITE_PRIVATE void sqlite3StartTable( ** The record created does not contain anything yet. It will be replaced ** by the real entry in code generated at sqlite3EndTable(). ** - ** The rowid for the new entry is left in register pParse->regRowid. - ** The root page number of the new table is left in reg pParse->regRoot. + ** The rowid for the new entry is left in register pParse->u1.cr.regRowid. + ** The root page of the new table is left in reg pParse->u1.cr.regRoot. ** The rowid and root page number values are needed by the code that ** sqlite3EndTable will generate. */ @@ -123493,7 +124363,7 @@ SQLITE_PRIVATE void sqlite3StartTable( #endif { assert( !pParse->bReturning ); - pParse->u1.addrCrTab = + pParse->u1.cr.addrCrTab = sqlite3VdbeAddOp3(v, OP_CreateBtree, iDb, reg2, BTREE_INTKEY); } sqlite3OpenSchemaTable(pParse, iDb); @@ -123571,7 +124441,8 @@ SQLITE_PRIVATE void sqlite3AddReturning(Parse *pParse, ExprList *pList){ sqlite3ExprListDelete(db, pList); return; } - pParse->u1.pReturning = pRet; + assert( !pParse->isCreate ); + pParse->u1.d.pReturning = pRet; pRet->pParse = pParse; pRet->pReturnEL = pList; sqlite3ParserAddCleanup(pParse, sqlite3DeleteReturning, pRet); @@ -123613,7 +124484,6 @@ SQLITE_PRIVATE void sqlite3AddColumn(Parse *pParse, Token sName, Token sType){ char *zType; Column *pCol; sqlite3 *db = pParse->db; - u8 hName; Column *aNew; u8 eType = COLTYPE_CUSTOM; u8 szEst = 1; @@ -123667,13 +124537,10 @@ SQLITE_PRIVATE void sqlite3AddColumn(Parse *pParse, Token sName, Token sType){ memcpy(z, sName.z, sName.n); z[sName.n] = 0; sqlite3Dequote(z); - hName = sqlite3StrIHash(z); - for(i=0; inCol; i++){ - if( p->aCol[i].hName==hName && sqlite3StrICmp(z, p->aCol[i].zCnName)==0 ){ - sqlite3ErrorMsg(pParse, "duplicate column name: %s", z); - sqlite3DbFree(db, z); - return; - } + if( p->nCol && sqlite3ColumnIndex(p, z)>=0 ){ + sqlite3ErrorMsg(pParse, "duplicate column name: %s", z); + sqlite3DbFree(db, z); + return; } aNew = sqlite3DbRealloc(db,p->aCol,((i64)p->nCol+1)*sizeof(p->aCol[0])); if( aNew==0 ){ @@ -123684,7 +124551,7 @@ SQLITE_PRIVATE void sqlite3AddColumn(Parse *pParse, Token sName, Token sType){ pCol = &p->aCol[p->nCol]; memset(pCol, 0, sizeof(p->aCol[0])); pCol->zCnName = z; - pCol->hName = hName; + pCol->hName = sqlite3StrIHash(z); sqlite3ColumnPropertiesFromName(p, pCol); if( sType.n==0 ){ @@ -123708,9 +124575,14 @@ SQLITE_PRIVATE void sqlite3AddColumn(Parse *pParse, Token sName, Token sType){ pCol->affinity = sqlite3AffinityType(zType, pCol); pCol->colFlags |= COLFLAG_HASTYPE; } + if( p->nCol<=0xff ){ + u8 h = pCol->hName % sizeof(p->aHx); + p->aHx[h] = p->nCol; + } p->nCol++; p->nNVCol++; - pParse->constraintName.n = 0; + assert( pParse->isCreate ); + pParse->u1.cr.constraintName.n = 0; } /* @@ -123974,15 +124846,11 @@ SQLITE_PRIVATE void sqlite3AddPrimaryKey( assert( pCExpr!=0 ); sqlite3StringToId(pCExpr); if( pCExpr->op==TK_ID ){ - const char *zCName; assert( !ExprHasProperty(pCExpr, EP_IntValue) ); - zCName = pCExpr->u.zToken; - for(iCol=0; iColnCol; iCol++){ - if( sqlite3StrICmp(zCName, pTab->aCol[iCol].zCnName)==0 ){ - pCol = &pTab->aCol[iCol]; - makeColumnPartOfPrimaryKey(pParse, pCol); - break; - } + iCol = sqlite3ColumnIndex(pTab, pCExpr->u.zToken); + if( iCol>=0 ){ + pCol = &pTab->aCol[iCol]; + makeColumnPartOfPrimaryKey(pParse, pCol); } } } @@ -124034,8 +124902,10 @@ SQLITE_PRIVATE void sqlite3AddCheckConstraint( && !sqlite3BtreeIsReadonly(db->aDb[db->init.iDb].pBt) ){ pTab->pCheck = sqlite3ExprListAppend(pParse, pTab->pCheck, pCheckExpr); - if( pParse->constraintName.n ){ - sqlite3ExprListSetName(pParse, pTab->pCheck, &pParse->constraintName, 1); + assert( pParse->isCreate ); + if( pParse->u1.cr.constraintName.n ){ + sqlite3ExprListSetName(pParse, pTab->pCheck, + &pParse->u1.cr.constraintName, 1); }else{ Token t; for(zStart++; sqlite3Isspace(zStart[0]); zStart++){} @@ -124230,7 +125100,8 @@ static void identPut(char *z, int *pIdx, char *zSignedIdent){ ** from sqliteMalloc() and must be freed by the calling function. */ static char *createTableStmt(sqlite3 *db, Table *p){ - int i, k, n; + int i, k, len; + i64 n; char *zStmt; char *zSep, *zSep2, *zEnd; Column *pCol; @@ -124254,8 +125125,9 @@ static char *createTableStmt(sqlite3 *db, Table *p){ sqlite3OomFault(db); return 0; } - sqlite3_snprintf(n, zStmt, "CREATE TABLE "); - k = sqlite3Strlen30(zStmt); + assert( n>14 && n<=0x7fffffff ); + memcpy(zStmt, "CREATE TABLE ", 13); + k = 13; identPut(zStmt, &k, p->zName); zStmt[k++] = '('; for(pCol=p->aCol, i=0; inCol; i++, pCol++){ @@ -124267,13 +125139,15 @@ static char *createTableStmt(sqlite3 *db, Table *p){ /* SQLITE_AFF_REAL */ " REAL", /* SQLITE_AFF_FLEXNUM */ " NUM", }; - int len; const char *zType; - sqlite3_snprintf(n-k, &zStmt[k], zSep); - k += sqlite3Strlen30(&zStmt[k]); + len = sqlite3Strlen30(zSep); + assert( k+lenzCnName); + assert( kaffinity-SQLITE_AFF_BLOB >= 0 ); assert( pCol->affinity-SQLITE_AFF_BLOB < ArraySize(azType) ); testcase( pCol->affinity==SQLITE_AFF_BLOB ); @@ -124288,11 +125162,14 @@ static char *createTableStmt(sqlite3 *db, Table *p){ assert( pCol->affinity==SQLITE_AFF_BLOB || pCol->affinity==SQLITE_AFF_FLEXNUM || pCol->affinity==sqlite3AffinityType(zType, 0) ); + assert( k+lennColumn>=N ) return SQLITE_OK; + db = pParse->db; + assert( N>0 ); + assert( N <= SQLITE_MAX_COLUMN*2 /* tag-20250221-1 */ ); + testcase( N==2*pParse->db->aLimit[SQLITE_LIMIT_COLUMN] ); assert( pIdx->isResized==0 ); - nByte = (sizeof(char*) + sizeof(LogEst) + sizeof(i16) + 1)*N; + nByte = (sizeof(char*) + sizeof(LogEst) + sizeof(i16) + 1)*(u64)N; zExtra = sqlite3DbMallocZero(db, nByte); if( zExtra==0 ) return SQLITE_NOMEM_BKPT; memcpy(zExtra, pIdx->azColl, sizeof(char*)*pIdx->nColumn); @@ -124319,7 +125201,7 @@ static int resizeIndexObject(sqlite3 *db, Index *pIdx, int N){ zExtra += sizeof(i16)*N; memcpy(zExtra, pIdx->aSortOrder, pIdx->nColumn); pIdx->aSortOrder = (u8*)zExtra; - pIdx->nColumn = N; + pIdx->nColumn = (u16)N; /* See tag-20250221-1 above for proof of safety */ pIdx->isResized = 1; return SQLITE_OK; } @@ -124485,9 +125367,9 @@ static void convertToWithoutRowidTable(Parse *pParse, Table *pTab){ ** into BTREE_BLOBKEY. */ assert( !pParse->bReturning ); - if( pParse->u1.addrCrTab ){ + if( pParse->u1.cr.addrCrTab ){ assert( v ); - sqlite3VdbeChangeP3(v, pParse->u1.addrCrTab, BTREE_BLOBKEY); + sqlite3VdbeChangeP3(v, pParse->u1.cr.addrCrTab, BTREE_BLOBKEY); } /* Locate the PRIMARY KEY index. Or, if this table was originally @@ -124573,14 +125455,14 @@ static void convertToWithoutRowidTable(Parse *pParse, Table *pTab){ pIdx->nColumn = pIdx->nKeyCol; continue; } - if( resizeIndexObject(db, pIdx, pIdx->nKeyCol+n) ) return; + if( resizeIndexObject(pParse, pIdx, pIdx->nKeyCol+n) ) return; for(i=0, j=pIdx->nKeyCol; inKeyCol, pPk, i) ){ testcase( hasColumn(pIdx->aiColumn, pIdx->nKeyCol, pPk->aiColumn[i]) ); pIdx->aiColumn[j] = pPk->aiColumn[i]; pIdx->azColl[j] = pPk->azColl[i]; if( pPk->aSortOrder[i] ){ - /* See ticket https://www.sqlite.org/src/info/bba7b69f9849b5bf */ + /* See ticket https://sqlite.org/src/info/bba7b69f9849b5bf */ pIdx->bAscKeyBug = 1; } j++; @@ -124597,7 +125479,7 @@ static void convertToWithoutRowidTable(Parse *pParse, Table *pTab){ if( !hasColumn(pPk->aiColumn, nPk, i) && (pTab->aCol[i].colFlags & COLFLAG_VIRTUAL)==0 ) nExtra++; } - if( resizeIndexObject(db, pPk, nPk+nExtra) ) return; + if( resizeIndexObject(pParse, pPk, nPk+nExtra) ) return; for(i=0, j=nPk; inCol; i++){ if( !hasColumn(pPk->aiColumn, j, i) && (pTab->aCol[i].colFlags & COLFLAG_VIRTUAL)==0 @@ -124927,7 +125809,7 @@ SQLITE_PRIVATE void sqlite3EndTable( /* If this is a CREATE TABLE xx AS SELECT ..., execute the SELECT ** statement to populate the new table. The root-page number for the - ** new table is in register pParse->regRoot. + ** new table is in register pParse->u1.cr.regRoot. ** ** Once the SELECT has been coded by sqlite3Select(), it is in a ** suitable state to query for the column names and types to be used @@ -124958,7 +125840,8 @@ SQLITE_PRIVATE void sqlite3EndTable( regRec = ++pParse->nMem; regRowid = ++pParse->nMem; sqlite3MayAbort(pParse); - sqlite3VdbeAddOp3(v, OP_OpenWrite, iCsr, pParse->regRoot, iDb); + assert( pParse->isCreate ); + sqlite3VdbeAddOp3(v, OP_OpenWrite, iCsr, pParse->u1.cr.regRoot, iDb); sqlite3VdbeChangeP5(v, OPFLAG_P2ISREG); addrTop = sqlite3VdbeCurrentAddr(v) + 1; sqlite3VdbeAddOp3(v, OP_InitCoroutine, regYield, 0, addrTop); @@ -125003,6 +125886,7 @@ SQLITE_PRIVATE void sqlite3EndTable( ** schema table. We just need to update that slot with all ** the information we've collected. */ + assert( pParse->isCreate ); sqlite3NestedParse(pParse, "UPDATE %Q." LEGACY_SCHEMA_TABLE " SET type='%s', name=%Q, tbl_name=%Q, rootpage=#%d, sql=%Q" @@ -125011,9 +125895,9 @@ SQLITE_PRIVATE void sqlite3EndTable( zType, p->zName, p->zName, - pParse->regRoot, + pParse->u1.cr.regRoot, zStmt, - pParse->regRowid + pParse->u1.cr.regRowid ); sqlite3DbFree(db, zStmt); sqlite3ChangeCookie(pParse, iDb); @@ -125753,7 +126637,7 @@ SQLITE_PRIVATE void sqlite3CreateForeignKey( }else{ nCol = pFromCol->nExpr; } - nByte = sizeof(*pFKey) + (nCol-1)*sizeof(pFKey->aCol[0]) + pTo->n + 1; + nByte = SZ_FKEY(nCol) + pTo->n + 1; if( pToCol ){ for(i=0; inExpr; i++){ nByte += sqlite3Strlen30(pToCol->a[i].zEName) + 1; @@ -125955,7 +126839,7 @@ static void sqlite3RefillIndex(Parse *pParse, Index *pIndex, int memRootPage){ ** not work for UNIQUE constraint indexes on WITHOUT ROWID tables ** with DESC primary keys, since those indexes have there keys in ** a different order from the main table. - ** See ticket: https://www.sqlite.org/src/info/bba7b69f9849b5bf + ** See ticket: https://sqlite.org/src/info/bba7b69f9849b5bf */ sqlite3VdbeAddOp1(v, OP_SeekEnd, iIdx); } @@ -125979,13 +126863,14 @@ static void sqlite3RefillIndex(Parse *pParse, Index *pIndex, int memRootPage){ */ SQLITE_PRIVATE Index *sqlite3AllocateIndexObject( sqlite3 *db, /* Database connection */ - i16 nCol, /* Total number of columns in the index */ + int nCol, /* Total number of columns in the index */ int nExtra, /* Number of bytes of extra space to alloc */ char **ppExtra /* Pointer to the "extra" space */ ){ Index *p; /* Allocated index object */ - int nByte; /* Bytes of space for Index object + arrays */ + i64 nByte; /* Bytes of space for Index object + arrays */ + assert( nCol <= 2*db->aLimit[SQLITE_LIMIT_COLUMN] ); nByte = ROUND8(sizeof(Index)) + /* Index structure */ ROUND8(sizeof(char*)*nCol) + /* Index.azColl */ ROUND8(sizeof(LogEst)*(nCol+1) + /* Index.aiRowLogEst */ @@ -125998,8 +126883,9 @@ SQLITE_PRIVATE Index *sqlite3AllocateIndexObject( p->aiRowLogEst = (LogEst*)pExtra; pExtra += sizeof(LogEst)*(nCol+1); p->aiColumn = (i16*)pExtra; pExtra += sizeof(i16)*nCol; p->aSortOrder = (u8*)pExtra; - p->nColumn = nCol; - p->nKeyCol = nCol - 1; + assert( nCol>0 ); + p->nColumn = (u16)nCol; + p->nKeyCol = (u16)(nCol - 1); *ppExtra = ((char*)p) + nByte; } return p; @@ -126810,12 +127696,11 @@ SQLITE_PRIVATE IdList *sqlite3IdListAppend(Parse *pParse, IdList *pList, Token * sqlite3 *db = pParse->db; int i; if( pList==0 ){ - pList = sqlite3DbMallocZero(db, sizeof(IdList) ); + pList = sqlite3DbMallocZero(db, SZ_IDLIST(1)); if( pList==0 ) return 0; }else{ IdList *pNew; - pNew = sqlite3DbRealloc(db, pList, - sizeof(IdList) + pList->nId*sizeof(pList->a)); + pNew = sqlite3DbRealloc(db, pList, SZ_IDLIST(pList->nId+1)); if( pNew==0 ){ sqlite3IdListDelete(db, pList); return 0; @@ -126914,8 +127799,7 @@ SQLITE_PRIVATE SrcList *sqlite3SrcListEnlarge( return 0; } if( nAlloc>SQLITE_MAX_SRCLIST ) nAlloc = SQLITE_MAX_SRCLIST; - pNew = sqlite3DbRealloc(db, pSrc, - sizeof(*pSrc) + (nAlloc-1)*sizeof(pSrc->a[0]) ); + pNew = sqlite3DbRealloc(db, pSrc, SZ_SRCLIST(nAlloc)); if( pNew==0 ){ assert( db->mallocFailed ); return 0; @@ -126990,7 +127874,7 @@ SQLITE_PRIVATE SrcList *sqlite3SrcListAppend( assert( pParse->db!=0 ); db = pParse->db; if( pList==0 ){ - pList = sqlite3DbMallocRawNN(pParse->db, sizeof(SrcList) ); + pList = sqlite3DbMallocRawNN(pParse->db, SZ_SRCLIST(1)); if( pList==0 ) return 0; pList->nAlloc = 1; pList->nSrc = 1; @@ -127876,10 +128760,9 @@ SQLITE_PRIVATE With *sqlite3WithAdd( } if( pWith ){ - sqlite3_int64 nByte = sizeof(*pWith) + (sizeof(pWith->a[1]) * pWith->nCte); - pNew = sqlite3DbRealloc(db, pWith, nByte); + pNew = sqlite3DbRealloc(db, pWith, SZ_WITH(pWith->nCte+1)); }else{ - pNew = sqlite3DbMallocZero(db, sizeof(*pWith)); + pNew = sqlite3DbMallocZero(db, SZ_WITH(1)); } assert( (pNew!=0 && zName!=0) || db->mallocFailed ); @@ -129853,11 +130736,6 @@ static void substrFunc( i64 p1, p2; assert( argc==3 || argc==2 ); - if( sqlite3_value_type(argv[1])==SQLITE_NULL - || (argc==3 && sqlite3_value_type(argv[2])==SQLITE_NULL) - ){ - return; - } p0type = sqlite3_value_type(argv[0]); p1 = sqlite3_value_int64(argv[1]); if( p0type==SQLITE_BLOB ){ @@ -129875,19 +130753,23 @@ static void substrFunc( } } } -#ifdef SQLITE_SUBSTR_COMPATIBILITY - /* If SUBSTR_COMPATIBILITY is defined then substr(X,0,N) work the same as - ** as substr(X,1,N) - it returns the first N characters of X. This - ** is essentially a back-out of the bug-fix in check-in [5fc125d362df4b8] - ** from 2009-02-02 for compatibility of applications that exploited the - ** old buggy behavior. */ - if( p1==0 ) p1 = 1; /* */ -#endif if( argc==3 ){ p2 = sqlite3_value_int64(argv[2]); + if( p2==0 && sqlite3_value_type(argv[2])==SQLITE_NULL ) return; }else{ p2 = sqlite3_context_db_handle(context)->aLimit[SQLITE_LIMIT_LENGTH]; } + if( p1==0 ){ +#ifdef SQLITE_SUBSTR_COMPATIBILITY + /* If SUBSTR_COMPATIBILITY is defined then substr(X,0,N) work the same as + ** as substr(X,1,N) - it returns the first N characters of X. This + ** is essentially a back-out of the bug-fix in check-in [5fc125d362df4b8] + ** from 2009-02-02 for compatibility of applications that exploited the + ** old buggy behavior. */ + p1 = 1; /* */ +#endif + if( sqlite3_value_type(argv[1])==SQLITE_NULL ) return; + } if( p1<0 ){ p1 += len; if( p1<0 ){ @@ -130588,7 +131470,7 @@ static const char hexdigits[] = { ** Append to pStr text that is the SQL literal representation of the ** value contained in pValue. */ -SQLITE_PRIVATE void sqlite3QuoteValue(StrAccum *pStr, sqlite3_value *pValue){ +SQLITE_PRIVATE void sqlite3QuoteValue(StrAccum *pStr, sqlite3_value *pValue, int bEscape){ /* As currently implemented, the string must be initially empty. ** we might relax this requirement in the future, but that will ** require enhancements to the implementation. */ @@ -130636,7 +131518,7 @@ SQLITE_PRIVATE void sqlite3QuoteValue(StrAccum *pStr, sqlite3_value *pValue){ } case SQLITE_TEXT: { const unsigned char *zArg = sqlite3_value_text(pValue); - sqlite3_str_appendf(pStr, "%Q", zArg); + sqlite3_str_appendf(pStr, bEscape ? "%#Q" : "%Q", zArg); break; } default: { @@ -130647,6 +131529,105 @@ SQLITE_PRIVATE void sqlite3QuoteValue(StrAccum *pStr, sqlite3_value *pValue){ } } +/* +** Return true if z[] begins with N hexadecimal digits, and write +** a decoding of those digits into *pVal. Or return false if any +** one of the first N characters in z[] is not a hexadecimal digit. +*/ +static int isNHex(const char *z, int N, u32 *pVal){ + int i; + int v = 0; + for(i=0; i0 ){ + memmove(&zOut[j], &zIn[i], n); + j += n; + i += n; + } + if( zIn[i+1]=='\\' ){ + i += 2; + zOut[j++] = '\\'; + }else if( sqlite3Isxdigit(zIn[i+1]) ){ + if( !isNHex(&zIn[i+1], 4, &v) ) goto unistr_error; + i += 5; + j += sqlite3AppendOneUtf8Character(&zOut[j], v); + }else if( zIn[i+1]=='+' ){ + if( !isNHex(&zIn[i+2], 6, &v) ) goto unistr_error; + i += 8; + j += sqlite3AppendOneUtf8Character(&zOut[j], v); + }else if( zIn[i+1]=='u' ){ + if( !isNHex(&zIn[i+2], 4, &v) ) goto unistr_error; + i += 6; + j += sqlite3AppendOneUtf8Character(&zOut[j], v); + }else if( zIn[i+1]=='U' ){ + if( !isNHex(&zIn[i+2], 8, &v) ) goto unistr_error; + i += 10; + j += sqlite3AppendOneUtf8Character(&zOut[j], v); + }else{ + goto unistr_error; + } + } + zOut[j] = 0; + sqlite3_result_text64(context, zOut, j, sqlite3_free, SQLITE_UTF8); + return; + +unistr_error: + sqlite3_free(zOut); + sqlite3_result_error(context, "invalid Unicode escape", -1); + return; +} + + /* ** Implementation of the QUOTE() function. ** @@ -130656,6 +131637,10 @@ SQLITE_PRIVATE void sqlite3QuoteValue(StrAccum *pStr, sqlite3_value *pValue){ ** as needed. BLOBs are encoded as hexadecimal literals. Strings with ** embedded NUL characters cannot be represented as string literals in SQL ** and hence the returned string literal is truncated prior to the first NUL. +** +** If sqlite3_user_data() is non-zero, then the UNISTR_QUOTE() function is +** implemented instead. The difference is that UNISTR_QUOTE() uses the +** UNISTR() function to escape control characters. */ static void quoteFunc(sqlite3_context *context, int argc, sqlite3_value **argv){ sqlite3_str str; @@ -130663,7 +131648,7 @@ static void quoteFunc(sqlite3_context *context, int argc, sqlite3_value **argv){ assert( argc==1 ); UNUSED_PARAMETER(argc); sqlite3StrAccumInit(&str, db, 0, 0, db->aLimit[SQLITE_LIMIT_LENGTH]); - sqlite3QuoteValue(&str,argv[0]); + sqlite3QuoteValue(&str,argv[0],SQLITE_PTR_TO_INT(sqlite3_user_data(context))); sqlite3_result_text(context, sqlite3StrAccumFinish(&str), str.nChar, SQLITE_DYNAMIC); if( str.accError!=SQLITE_OK ){ @@ -130918,7 +131903,7 @@ static void replaceFunc( assert( zRep==sqlite3_value_text(argv[2]) ); nOut = nStr + 1; assert( nOut0 ){ + if( sqlite3_value_type(argv[i])!=SQLITE_NULL ){ + int k = sqlite3_value_bytes(argv[i]); const char *v = (const char*)sqlite3_value_text(argv[i]); if( v!=0 ){ if( j>0 && nSep>0 ){ @@ -131314,7 +132299,7 @@ static void kahanBabuskaNeumaierInit( ** that it returns NULL if it sums over no inputs. TOTAL returns ** 0.0 in that case. In addition, TOTAL always returns a float where ** SUM might return an integer if it never encounters a floating point -** value. TOTAL never fails, but SUM might through an exception if +** value. TOTAL never fails, but SUM might throw an exception if ** it overflows an integer. */ static void sumStep(sqlite3_context *context, int argc, sqlite3_value **argv){ @@ -132234,7 +133219,9 @@ SQLITE_PRIVATE void sqlite3RegisterBuiltinFunctions(void){ DFUNCTION(sqlite_version, 0, 0, 0, versionFunc ), DFUNCTION(sqlite_source_id, 0, 0, 0, sourceidFunc ), FUNCTION(sqlite_log, 2, 0, 0, errlogFunc ), + FUNCTION(unistr, 1, 0, 0, unistrFunc ), FUNCTION(quote, 1, 0, 0, quoteFunc ), + FUNCTION(unistr_quote, 1, 1, 0, quoteFunc ), VFUNCTION(last_insert_rowid, 0, 0, 0, last_insert_rowid), VFUNCTION(changes, 0, 0, 0, changes ), VFUNCTION(total_changes, 0, 0, 0, total_changes ), @@ -134521,7 +135508,7 @@ SQLITE_PRIVATE Select *sqlite3MultiValues(Parse *pParse, Select *pLeft, ExprList f = (f & pLeft->selFlags); } pSelect = sqlite3SelectNew(pParse, pRow, 0, 0, 0, 0, 0, f, 0); - pLeft->selFlags &= ~SF_MultiValue; + pLeft->selFlags &= ~(u32)SF_MultiValue; if( pSelect ){ pSelect->op = TK_ALL; pSelect->pPrior = pLeft; @@ -134903,28 +135890,22 @@ SQLITE_PRIVATE void sqlite3Insert( aTabColMap = sqlite3DbMallocZero(db, pTab->nCol*sizeof(int)); if( aTabColMap==0 ) goto insert_cleanup; for(i=0; inId; i++){ - const char *zCName = pColumn->a[i].zName; - u8 hName = sqlite3StrIHash(zCName); - for(j=0; jnCol; j++){ - if( pTab->aCol[j].hName!=hName ) continue; - if( sqlite3StrICmp(zCName, pTab->aCol[j].zCnName)==0 ){ - if( aTabColMap[j]==0 ) aTabColMap[j] = i+1; - if( i!=j ) bIdListInOrder = 0; - if( j==pTab->iPKey ){ - ipkColumn = i; assert( !withoutRowid ); - } + j = sqlite3ColumnIndex(pTab, pColumn->a[i].zName); + if( j>=0 ){ + if( aTabColMap[j]==0 ) aTabColMap[j] = i+1; + if( i!=j ) bIdListInOrder = 0; + if( j==pTab->iPKey ){ + ipkColumn = i; assert( !withoutRowid ); + } #ifndef SQLITE_OMIT_GENERATED_COLUMNS - if( pTab->aCol[j].colFlags & (COLFLAG_STORED|COLFLAG_VIRTUAL) ){ - sqlite3ErrorMsg(pParse, - "cannot INSERT into generated column \"%s\"", - pTab->aCol[j].zCnName); - goto insert_cleanup; - } -#endif - break; + if( pTab->aCol[j].colFlags & (COLFLAG_STORED|COLFLAG_VIRTUAL) ){ + sqlite3ErrorMsg(pParse, + "cannot INSERT into generated column \"%s\"", + pTab->aCol[j].zCnName); + goto insert_cleanup; } - } - if( j>=pTab->nCol ){ +#endif + }else{ if( sqlite3IsRowid(pColumn->a[i].zName) && !withoutRowid ){ ipkColumn = i; bIdListInOrder = 0; @@ -135222,7 +136203,7 @@ SQLITE_PRIVATE void sqlite3Insert( continue; }else if( pColumn==0 ){ /* Hidden columns that are not explicitly named in the INSERT - ** get there default value */ + ** get their default value */ sqlite3ExprCodeFactorable(pParse, sqlite3ColumnExpr(pTab, &pTab->aCol[i]), iRegStore); @@ -135947,7 +136928,7 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks( ** could happen in any order, but they are grouped up front for ** convenience. ** - ** 2018-08-14: Ticket https://www.sqlite.org/src/info/908f001483982c43 + ** 2018-08-14: Ticket https://sqlite.org/src/info/908f001483982c43 ** The order of constraints used to have OE_Update as (2) and OE_Abort ** and so forth as (1). But apparently PostgreSQL checks the OE_Update ** constraint before any others, so it had to be moved. @@ -137757,6 +138738,8 @@ struct sqlite3_api_routines { /* Version 3.44.0 and later */ void *(*get_clientdata)(sqlite3*,const char*); int (*set_clientdata)(sqlite3*, const char*, void*, void(*)(void*)); + /* Version 3.50.0 and later */ + int (*setlk_timeout)(sqlite3*,int,int); }; /* @@ -138090,6 +139073,8 @@ typedef int (*sqlite3_loadext_entry)( /* Version 3.44.0 and later */ #define sqlite3_get_clientdata sqlite3_api->get_clientdata #define sqlite3_set_clientdata sqlite3_api->set_clientdata +/* Version 3.50.0 and later */ +#define sqlite3_setlk_timeout sqlite3_api->setlk_timeout #endif /* !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION) */ #if !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION) @@ -138611,7 +139596,9 @@ static const sqlite3_api_routines sqlite3Apis = { sqlite3_stmt_explain, /* Version 3.44.0 and later */ sqlite3_get_clientdata, - sqlite3_set_clientdata + sqlite3_set_clientdata, + /* Version 3.50.0 and later */ + sqlite3_setlk_timeout }; /* True if x is the directory separator character @@ -139133,48 +140120,48 @@ static const char *const pragCName[] = { /* 13 */ "pk", /* 14 */ "hidden", /* table_info reuses 8 */ - /* 15 */ "schema", /* Used by: table_list */ - /* 16 */ "name", + /* 15 */ "name", /* Used by: function_list */ + /* 16 */ "builtin", /* 17 */ "type", - /* 18 */ "ncol", - /* 19 */ "wr", - /* 20 */ "strict", - /* 21 */ "seqno", /* Used by: index_xinfo */ - /* 22 */ "cid", - /* 23 */ "name", - /* 24 */ "desc", - /* 25 */ "coll", - /* 26 */ "key", - /* 27 */ "name", /* Used by: function_list */ - /* 28 */ "builtin", - /* 29 */ "type", - /* 30 */ "enc", - /* 31 */ "narg", - /* 32 */ "flags", - /* 33 */ "tbl", /* Used by: stats */ - /* 34 */ "idx", - /* 35 */ "wdth", - /* 36 */ "hght", - /* 37 */ "flgs", - /* 38 */ "seq", /* Used by: index_list */ - /* 39 */ "name", - /* 40 */ "unique", - /* 41 */ "origin", - /* 42 */ "partial", + /* 18 */ "enc", + /* 19 */ "narg", + /* 20 */ "flags", + /* 21 */ "schema", /* Used by: table_list */ + /* 22 */ "name", + /* 23 */ "type", + /* 24 */ "ncol", + /* 25 */ "wr", + /* 26 */ "strict", + /* 27 */ "seqno", /* Used by: index_xinfo */ + /* 28 */ "cid", + /* 29 */ "name", + /* 30 */ "desc", + /* 31 */ "coll", + /* 32 */ "key", + /* 33 */ "seq", /* Used by: index_list */ + /* 34 */ "name", + /* 35 */ "unique", + /* 36 */ "origin", + /* 37 */ "partial", + /* 38 */ "tbl", /* Used by: stats */ + /* 39 */ "idx", + /* 40 */ "wdth", + /* 41 */ "hght", + /* 42 */ "flgs", /* 43 */ "table", /* Used by: foreign_key_check */ /* 44 */ "rowid", /* 45 */ "parent", /* 46 */ "fkid", - /* index_info reuses 21 */ - /* 47 */ "seq", /* Used by: database_list */ - /* 48 */ "name", - /* 49 */ "file", - /* 50 */ "busy", /* Used by: wal_checkpoint */ - /* 51 */ "log", - /* 52 */ "checkpointed", - /* collation_list reuses 38 */ + /* 47 */ "busy", /* Used by: wal_checkpoint */ + /* 48 */ "log", + /* 49 */ "checkpointed", + /* 50 */ "seq", /* Used by: database_list */ + /* 51 */ "name", + /* 52 */ "file", + /* index_info reuses 27 */ /* 53 */ "database", /* Used by: lock_status */ /* 54 */ "status", + /* collation_list reuses 33 */ /* 55 */ "cache_size", /* Used by: default_cache_size */ /* module_list pragma_list reuses 9 */ /* 56 */ "timeout", /* Used by: busy_timeout */ @@ -139267,7 +140254,7 @@ static const PragmaName aPragmaName[] = { {/* zName: */ "collation_list", /* ePragTyp: */ PragTyp_COLLATION_LIST, /* ePragFlg: */ PragFlg_Result0, - /* ColNames: */ 38, 2, + /* ColNames: */ 33, 2, /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_COMPILEOPTION_DIAGS) @@ -139302,7 +140289,7 @@ static const PragmaName aPragmaName[] = { {/* zName: */ "database_list", /* ePragTyp: */ PragTyp_DATABASE_LIST, /* ePragFlg: */ PragFlg_Result0, - /* ColNames: */ 47, 3, + /* ColNames: */ 50, 3, /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_PAGER_PRAGMAS) && !defined(SQLITE_OMIT_DEPRECATED) @@ -139382,7 +140369,7 @@ static const PragmaName aPragmaName[] = { {/* zName: */ "function_list", /* ePragTyp: */ PragTyp_FUNCTION_LIST, /* ePragFlg: */ PragFlg_Result0, - /* ColNames: */ 27, 6, + /* ColNames: */ 15, 6, /* iArg: */ 0 }, #endif #endif @@ -139411,17 +140398,17 @@ static const PragmaName aPragmaName[] = { {/* zName: */ "index_info", /* ePragTyp: */ PragTyp_INDEX_INFO, /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result1|PragFlg_SchemaOpt, - /* ColNames: */ 21, 3, + /* ColNames: */ 27, 3, /* iArg: */ 0 }, {/* zName: */ "index_list", /* ePragTyp: */ PragTyp_INDEX_LIST, /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result1|PragFlg_SchemaOpt, - /* ColNames: */ 38, 5, + /* ColNames: */ 33, 5, /* iArg: */ 0 }, {/* zName: */ "index_xinfo", /* ePragTyp: */ PragTyp_INDEX_INFO, /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result1|PragFlg_SchemaOpt, - /* ColNames: */ 21, 6, + /* ColNames: */ 27, 6, /* iArg: */ 1 }, #endif #if !defined(SQLITE_OMIT_INTEGRITY_CHECK) @@ -139600,7 +140587,7 @@ static const PragmaName aPragmaName[] = { {/* zName: */ "stats", /* ePragTyp: */ PragTyp_STATS, /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result0|PragFlg_SchemaReq, - /* ColNames: */ 33, 5, + /* ColNames: */ 38, 5, /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_PAGER_PRAGMAS) @@ -139619,7 +140606,7 @@ static const PragmaName aPragmaName[] = { {/* zName: */ "table_list", /* ePragTyp: */ PragTyp_TABLE_LIST, /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result1, - /* ColNames: */ 15, 6, + /* ColNames: */ 21, 6, /* iArg: */ 0 }, {/* zName: */ "table_xinfo", /* ePragTyp: */ PragTyp_TABLE_INFO, @@ -139696,7 +140683,7 @@ static const PragmaName aPragmaName[] = { {/* zName: */ "wal_checkpoint", /* ePragTyp: */ PragTyp_WAL_CHECKPOINT, /* ePragFlg: */ PragFlg_NeedSchema, - /* ColNames: */ 50, 3, + /* ColNames: */ 47, 3, /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_FLAG_PRAGMAS) @@ -139718,7 +140705,7 @@ static const PragmaName aPragmaName[] = { ** the following macro or to the actual analysis_limit if it is non-zero, ** in order to prevent PRAGMA optimize from running for too long. ** -** The value of 2000 is chosen emperically so that the worst-case run-time +** The value of 2000 is chosen empirically so that the worst-case run-time ** for PRAGMA optimize does not exceed 100 milliseconds against a variety ** of test databases on a RaspberryPI-4 compiled using -Os and without ** -DSQLITE_DEBUG. Of course, your mileage may vary. For the purpose of @@ -140835,7 +141822,10 @@ SQLITE_PRIVATE void sqlite3Pragma( } }else{ db->flags &= ~mask; - if( mask==SQLITE_DeferFKs ) db->nDeferredImmCons = 0; + if( mask==SQLITE_DeferFKs ){ + db->nDeferredImmCons = 0; + db->nDeferredCons = 0; + } if( (mask & SQLITE_WriteSchema)!=0 && sqlite3_stricmp(zRight, "reset")==0 ){ @@ -144004,7 +144994,7 @@ SQLITE_PRIVATE Select *sqlite3SelectNew( pNew->addrOpenEphm[0] = -1; pNew->addrOpenEphm[1] = -1; pNew->nSelectRow = 0; - if( pSrc==0 ) pSrc = sqlite3DbMallocZero(pParse->db, sizeof(*pSrc)); + if( pSrc==0 ) pSrc = sqlite3DbMallocZero(pParse->db, SZ_SRCLIST_1); pNew->pSrc = pSrc; pNew->pWhere = pWhere; pNew->pGroupBy = pGroupBy; @@ -144169,10 +145159,33 @@ SQLITE_PRIVATE int sqlite3JoinType(Parse *pParse, Token *pA, Token *pB, Token *p */ SQLITE_PRIVATE int sqlite3ColumnIndex(Table *pTab, const char *zCol){ int i; - u8 h = sqlite3StrIHash(zCol); - Column *pCol; - for(pCol=pTab->aCol, i=0; inCol; pCol++, i++){ - if( pCol->hName==h && sqlite3StrICmp(pCol->zCnName, zCol)==0 ) return i; + u8 h; + const Column *aCol; + int nCol; + + h = sqlite3StrIHash(zCol); + aCol = pTab->aCol; + nCol = pTab->nCol; + + /* See if the aHx gives us a lucky match */ + i = pTab->aHx[h % sizeof(pTab->aHx)]; + assert( i=nCol ) break; } return -1; } @@ -144423,7 +145436,7 @@ static int sqlite3ProcessJoin(Parse *pParse, Select *p){ } pE1 = sqlite3CreateColumnExpr(db, pSrc, iLeft, iLeftCol); sqlite3SrcItemColumnUsed(&pSrc->a[iLeft], iLeftCol); - if( (pSrc->a[0].fg.jointype & JT_LTORJ)!=0 ){ + if( (pSrc->a[0].fg.jointype & JT_LTORJ)!=0 && pParse->nErr==0 ){ /* This branch runs if the query contains one or more RIGHT or FULL ** JOINs. If only a single table on the left side of this join ** contains the zName column, then this branch is a no-op. @@ -144439,6 +145452,8 @@ static int sqlite3ProcessJoin(Parse *pParse, Select *p){ */ ExprList *pFuncArgs = 0; /* Arguments to the coalesce() */ static const Token tkCoalesce = { "coalesce", 8 }; + assert( pE1!=0 ); + ExprSetProperty(pE1, EP_CanBeNull); while( tableAndColumnIndex(pSrc, iLeft+1, i, zName, &iLeft, &iLeftCol, pRight->fg.isSynthUsing)!=0 ){ if( pSrc->a[iLeft].fg.isUsing==0 @@ -144455,7 +145470,13 @@ static int sqlite3ProcessJoin(Parse *pParse, Select *p){ if( pFuncArgs ){ pFuncArgs = sqlite3ExprListAppend(pParse, pFuncArgs, pE1); pE1 = sqlite3ExprFunction(pParse, pFuncArgs, &tkCoalesce, 0); + if( pE1 ){ + pE1->affExpr = SQLITE_AFF_DEFER; + } } + }else if( (pSrc->a[i+1].fg.jointype & JT_LEFT)!=0 && pParse->nErr==0 ){ + assert( pE1!=0 ); + ExprSetProperty(pE1, EP_CanBeNull); } pE2 = sqlite3CreateColumnExpr(db, pSrc, i+1, iRightCol); sqlite3SrcItemColumnUsed(pRight, iRightCol); @@ -145364,8 +146385,8 @@ static void selectInnerLoop( ** X extra columns. */ SQLITE_PRIVATE KeyInfo *sqlite3KeyInfoAlloc(sqlite3 *db, int N, int X){ - int nExtra = (N+X)*(sizeof(CollSeq*)+1) - sizeof(CollSeq*); - KeyInfo *p = sqlite3DbMallocRawNN(db, sizeof(KeyInfo) + nExtra); + int nExtra = (N+X)*(sizeof(CollSeq*)+1); + KeyInfo *p = sqlite3DbMallocRawNN(db, SZ_KEYINFO(0) + nExtra); if( p ){ p->aSortFlags = (u8*)&p->aColl[N+X]; p->nKeyField = (u16)N; @@ -145373,7 +146394,7 @@ SQLITE_PRIVATE KeyInfo *sqlite3KeyInfoAlloc(sqlite3 *db, int N, int X){ p->enc = ENC(db); p->db = db; p->nRef = 1; - memset(&p[1], 0, nExtra); + memset(p->aColl, 0, nExtra); }else{ return (KeyInfo*)sqlite3OomFault(db); } @@ -147074,6 +148095,7 @@ static int multiSelect( multi_select_end: pDest->iSdst = dest.iSdst; pDest->nSdst = dest.nSdst; + pDest->iSDParm2 = dest.iSDParm2; if( pDelete ){ sqlite3ParserAddCleanup(pParse, sqlite3SelectDeleteGeneric, pDelete); } @@ -148062,9 +149084,9 @@ static int compoundHasDifferentAffinities(Select *p){ ** from 2015-02-09.) ** ** (3) If the subquery is the right operand of a LEFT JOIN then -** (3a) the subquery may not be a join and -** (3b) the FROM clause of the subquery may not contain a virtual -** table and +** (3a) the subquery may not be a join +** (**) Was (3b): "the FROM clause of the subquery may not contain +** a virtual table" ** (**) Was: "The outer query may not have a GROUP BY." This case ** is now managed correctly ** (3d) the outer query may not be DISTINCT. @@ -148280,7 +149302,7 @@ static int flattenSubquery( */ if( (pSubitem->fg.jointype & (JT_OUTER|JT_LTORJ))!=0 ){ if( pSubSrc->nSrc>1 /* (3a) */ - || IsVirtual(pSubSrc->a[0].pSTab) /* (3b) */ + /**** || IsVirtual(pSubSrc->a[0].pSTab) (3b)-omitted */ || (p->selFlags & SF_Distinct)!=0 /* (3d) */ || (pSubitem->fg.jointype & JT_RIGHT)!=0 /* (26) */ ){ @@ -148684,7 +149706,8 @@ static void constInsert( return; /* Already present. Return without doing anything. */ } } - if( sqlite3ExprAffinity(pColumn)==SQLITE_AFF_BLOB ){ + assert( SQLITE_AFF_NONEbHasAffBlob = 1; } @@ -148759,7 +149782,8 @@ static int propagateConstantExprRewriteOne( if( pColumn==pExpr ) continue; if( pColumn->iTable!=pExpr->iTable ) continue; if( pColumn->iColumn!=pExpr->iColumn ) continue; - if( bIgnoreAffBlob && sqlite3ExprAffinity(pColumn)==SQLITE_AFF_BLOB ){ + assert( SQLITE_AFF_NONEpWinDefn = 0; #endif - p->selFlags &= ~SF_Compound; + p->selFlags &= ~(u32)SF_Compound; assert( (p->selFlags & SF_Converted)==0 ); p->selFlags |= SF_Converted; assert( pNew->pPrior!=0 ); @@ -149889,7 +150913,7 @@ static int selectExpander(Walker *pWalker, Select *p){ pEList = p->pEList; if( pParse->pWith && (p->selFlags & SF_View) ){ if( p->pWith==0 ){ - p->pWith = (With*)sqlite3DbMallocZero(db, sizeof(With)); + p->pWith = (With*)sqlite3DbMallocZero(db, SZ_WITH(1) ); if( p->pWith==0 ){ return WRC_Abort; } @@ -151028,6 +152052,7 @@ static void agginfoFree(sqlite3 *db, void *pArg){ ** * There is no WHERE or GROUP BY or HAVING clauses on the subqueries ** * The outer query is a simple count(*) with no WHERE clause or other ** extraneous syntax. +** * None of the subqueries are DISTINCT (forumpost/a860f5fb2e 2025-03-10) ** ** Return TRUE if the optimization is undertaken. */ @@ -151060,7 +152085,11 @@ static int countOfViewOptimization(Parse *pParse, Select *p){ if( pSub->op!=TK_ALL && pSub->pPrior ) return 0; /* Must be UNION ALL */ if( pSub->pWhere ) return 0; /* No WHERE clause */ if( pSub->pLimit ) return 0; /* No LIMIT clause */ - if( pSub->selFlags & SF_Aggregate ) return 0; /* Not an aggregate */ + if( pSub->selFlags & (SF_Aggregate|SF_Distinct) ){ + testcase( pSub->selFlags & SF_Aggregate ); + testcase( pSub->selFlags & SF_Distinct ); + return 0; /* Not an aggregate nor DISTINCT */ + } assert( pSub->pHaving==0 ); /* Due to the previous */ pSub = pSub->pPrior; /* Repeat over compound */ }while( pSub ); @@ -151072,14 +152101,14 @@ static int countOfViewOptimization(Parse *pParse, Select *p){ pExpr = 0; pSub = sqlite3SubqueryDetach(db, pFrom); sqlite3SrcListDelete(db, p->pSrc); - p->pSrc = sqlite3DbMallocZero(pParse->db, sizeof(*p->pSrc)); + p->pSrc = sqlite3DbMallocZero(pParse->db, SZ_SRCLIST_1); while( pSub ){ Expr *pTerm; pPrior = pSub->pPrior; pSub->pPrior = 0; pSub->pNext = 0; pSub->selFlags |= SF_Aggregate; - pSub->selFlags &= ~SF_Compound; + pSub->selFlags &= ~(u32)SF_Compound; pSub->nSelectRow = 0; sqlite3ParserAddCleanup(pParse, sqlite3ExprListDeleteGeneric, pSub->pEList); pTerm = pPrior ? sqlite3ExprDup(db, pCount, 0) : pCount; @@ -151094,7 +152123,7 @@ static int countOfViewOptimization(Parse *pParse, Select *p){ pSub = pPrior; } p->pEList->a[0].pExpr = pExpr; - p->selFlags &= ~SF_Aggregate; + p->selFlags &= ~(u32)SF_Aggregate; #if TREETRACE_ENABLED if( sqlite3TreeTrace & 0x200 ){ @@ -151301,7 +152330,7 @@ SQLITE_PRIVATE int sqlite3Select( testcase( pParse->earlyCleanup ); p->pOrderBy = 0; } - p->selFlags &= ~SF_Distinct; + p->selFlags &= ~(u32)SF_Distinct; p->selFlags |= SF_NoopOrderBy; } sqlite3SelectPrep(pParse, p, 0); @@ -151340,7 +152369,7 @@ SQLITE_PRIVATE int sqlite3Select( ** and leaving this flag set can cause errors if a compound sub-query ** in p->pSrc is flattened into this query and this function called ** again as part of compound SELECT processing. */ - p->selFlags &= ~SF_UFSrcCheck; + p->selFlags &= ~(u32)SF_UFSrcCheck; } if( pDest->eDest==SRT_Output ){ @@ -151829,7 +152858,7 @@ SQLITE_PRIVATE int sqlite3Select( && p->pWin==0 #endif ){ - p->selFlags &= ~SF_Distinct; + p->selFlags &= ~(u32)SF_Distinct; pGroupBy = p->pGroupBy = sqlite3ExprListDup(db, pEList, 0); if( pGroupBy ){ for(i=0; inExpr; i++){ @@ -151938,6 +152967,12 @@ SQLITE_PRIVATE int sqlite3Select( if( pWInfo==0 ) goto select_end; if( sqlite3WhereOutputRowCount(pWInfo) < p->nSelectRow ){ p->nSelectRow = sqlite3WhereOutputRowCount(pWInfo); + if( pDest->eDest<=SRT_DistQueue && pDest->eDest>=SRT_DistFifo ){ + /* TUNING: For a UNION CTE, because UNION is implies DISTINCT, + ** reduce the estimated output row count by 8 (LogEst 30). + ** Search for tag-20250414a to see other cases */ + p->nSelectRow -= 30; + } } if( sDistinct.isTnct && sqlite3WhereIsDistinct(pWInfo) ){ sDistinct.eTnctType = sqlite3WhereIsDistinct(pWInfo); @@ -152168,6 +153203,7 @@ SQLITE_PRIVATE int sqlite3Select( sqlite3VdbeAddOp2(v, OP_Integer, 0, iAbortFlag); VdbeComment((v, "clear abort flag")); sqlite3VdbeAddOp3(v, OP_Null, 0, iAMem, iAMem+pGroupBy->nExpr-1); + sqlite3ExprNullRegisterRange(pParse, iAMem, pGroupBy->nExpr); /* Begin a loop that will extract all source rows in GROUP BY order. ** This might involve two separate loops with an OP_Sort in between, or @@ -152311,6 +153347,10 @@ SQLITE_PRIVATE int sqlite3Select( if( iOrderByCol ){ Expr *pX = p->pEList->a[iOrderByCol-1].pExpr; Expr *pBase = sqlite3ExprSkipCollateAndLikely(pX); + while( ALWAYS(pBase!=0) && pBase->op==TK_IF_NULL_ROW ){ + pX = pBase->pLeft; + pBase = sqlite3ExprSkipCollateAndLikely(pX); + } if( ALWAYS(pBase!=0) && pBase->op!=TK_AGG_COLUMN && pBase->op!=TK_REGISTER @@ -152894,7 +153934,8 @@ SQLITE_PRIVATE Trigger *sqlite3TriggerList(Parse *pParse, Table *pTab){ assert( pParse->db->pVtabCtx==0 ); #endif assert( pParse->bReturning ); - assert( &(pParse->u1.pReturning->retTrig) == pTrig ); + assert( !pParse->isCreate ); + assert( &(pParse->u1.d.pReturning->retTrig) == pTrig ); pTrig->table = pTab->zName; pTrig->pTabSchema = pTab->pSchema; pTrig->pNext = pList; @@ -153862,7 +154903,8 @@ static void codeReturningTrigger( ExprList *pNew; Returning *pReturning; Select sSelect; - SrcList sFrom; + SrcList *pFrom; + u8 fromSpace[SZ_SRCLIST_1]; assert( v!=0 ); if( !pParse->bReturning ){ @@ -153871,19 +154913,21 @@ static void codeReturningTrigger( return; } assert( db->pParse==pParse ); - pReturning = pParse->u1.pReturning; + assert( !pParse->isCreate ); + pReturning = pParse->u1.d.pReturning; if( pTrigger != &(pReturning->retTrig) ){ /* This RETURNING trigger is for a different statement */ return; } memset(&sSelect, 0, sizeof(sSelect)); - memset(&sFrom, 0, sizeof(sFrom)); + pFrom = (SrcList*)fromSpace; + memset(pFrom, 0, SZ_SRCLIST_1); sSelect.pEList = sqlite3ExprListDup(db, pReturning->pReturnEL, 0); - sSelect.pSrc = &sFrom; - sFrom.nSrc = 1; - sFrom.a[0].pSTab = pTab; - sFrom.a[0].zName = pTab->zName; /* tag-20240424-1 */ - sFrom.a[0].iCursor = -1; + sSelect.pSrc = pFrom; + pFrom->nSrc = 1; + pFrom->a[0].pSTab = pTab; + pFrom->a[0].zName = pTab->zName; /* tag-20240424-1 */ + pFrom->a[0].iCursor = -1; sqlite3SelectPrep(pParse, &sSelect, 0); if( pParse->nErr==0 ){ assert( db->mallocFailed==0 ); @@ -154101,6 +155145,8 @@ static TriggerPrg *codeRowTrigger( sSubParse.eTriggerOp = pTrigger->op; sSubParse.nQueryLoop = pParse->nQueryLoop; sSubParse.prepFlags = pParse->prepFlags; + sSubParse.oldmask = 0; + sSubParse.newmask = 0; v = sqlite3GetVdbe(&sSubParse); if( v ){ @@ -154855,38 +155901,32 @@ SQLITE_PRIVATE void sqlite3Update( */ chngRowid = chngPk = 0; for(i=0; inExpr; i++){ - u8 hCol = sqlite3StrIHash(pChanges->a[i].zEName); /* If this is an UPDATE with a FROM clause, do not resolve expressions ** here. The call to sqlite3Select() below will do that. */ if( nChangeFrom==0 && sqlite3ResolveExprNames(&sNC, pChanges->a[i].pExpr) ){ goto update_cleanup; } - for(j=0; jnCol; j++){ - if( pTab->aCol[j].hName==hCol - && sqlite3StrICmp(pTab->aCol[j].zCnName, pChanges->a[i].zEName)==0 - ){ - if( j==pTab->iPKey ){ - chngRowid = 1; - pRowidExpr = pChanges->a[i].pExpr; - iRowidExpr = i; - }else if( pPk && (pTab->aCol[j].colFlags & COLFLAG_PRIMKEY)!=0 ){ - chngPk = 1; - } + j = sqlite3ColumnIndex(pTab, pChanges->a[i].zEName); + if( j>=0 ){ + if( j==pTab->iPKey ){ + chngRowid = 1; + pRowidExpr = pChanges->a[i].pExpr; + iRowidExpr = i; + }else if( pPk && (pTab->aCol[j].colFlags & COLFLAG_PRIMKEY)!=0 ){ + chngPk = 1; + } #ifndef SQLITE_OMIT_GENERATED_COLUMNS - else if( pTab->aCol[j].colFlags & COLFLAG_GENERATED ){ - testcase( pTab->aCol[j].colFlags & COLFLAG_VIRTUAL ); - testcase( pTab->aCol[j].colFlags & COLFLAG_STORED ); - sqlite3ErrorMsg(pParse, - "cannot UPDATE generated column \"%s\"", - pTab->aCol[j].zCnName); - goto update_cleanup; - } -#endif - aXRef[j] = i; - break; + else if( pTab->aCol[j].colFlags & COLFLAG_GENERATED ){ + testcase( pTab->aCol[j].colFlags & COLFLAG_VIRTUAL ); + testcase( pTab->aCol[j].colFlags & COLFLAG_STORED ); + sqlite3ErrorMsg(pParse, + "cannot UPDATE generated column \"%s\"", + pTab->aCol[j].zCnName); + goto update_cleanup; } - } - if( j>=pTab->nCol ){ +#endif + aXRef[j] = i; + }else{ if( pPk==0 && sqlite3IsRowid(pChanges->a[i].zEName) ){ j = -1; chngRowid = 1; @@ -156209,7 +157249,7 @@ SQLITE_PRIVATE void sqlite3Vacuum(Parse *pParse, Token *pNm, Expr *pInto){ #else /* When SQLITE_BUG_COMPATIBLE_20160819 is defined, unrecognized arguments ** to VACUUM are silently ignored. This is a back-out of a bug fix that - ** occurred on 2016-08-19 (https://www.sqlite.org/src/info/083f9e6270). + ** occurred on 2016-08-19 (https://sqlite.org/src/info/083f9e6270). ** The buggy behavior is required for binary compatibility with some ** legacy applications. */ iDb = sqlite3FindDb(pParse->db, pNm); @@ -156288,7 +157328,7 @@ SQLITE_PRIVATE SQLITE_NOINLINE int sqlite3RunVacuum( saved_nChange = db->nChange; saved_nTotalChange = db->nTotalChange; saved_mTrace = db->mTrace; - db->flags |= SQLITE_WriteSchema | SQLITE_IgnoreChecks; + db->flags |= SQLITE_WriteSchema | SQLITE_IgnoreChecks | SQLITE_Comments; db->mDbFlags |= DBFLAG_PreferBuiltin | DBFLAG_Vacuum; db->flags &= ~(u64)(SQLITE_ForeignKeys | SQLITE_ReverseOrder | SQLITE_Defensive | SQLITE_CountRows); @@ -156993,11 +158033,12 @@ SQLITE_PRIVATE void sqlite3VtabFinishParse(Parse *pParse, Token *pEnd){ ** schema table. We just need to update that slot with all ** the information we've collected. ** - ** The VM register number pParse->regRowid holds the rowid of an + ** The VM register number pParse->u1.cr.regRowid holds the rowid of an ** entry in the sqlite_schema table that was created for this vtab ** by sqlite3StartTable(). */ iDb = sqlite3SchemaToIndex(db, pTab->pSchema); + assert( pParse->isCreate ); sqlite3NestedParse(pParse, "UPDATE %Q." LEGACY_SCHEMA_TABLE " " "SET type='table', name=%Q, tbl_name=%Q, rootpage=0, sql=%Q " @@ -157006,7 +158047,7 @@ SQLITE_PRIVATE void sqlite3VtabFinishParse(Parse *pParse, Token *pEnd){ pTab->zName, pTab->zName, zStmt, - pParse->regRowid + pParse->u1.cr.regRowid ); v = sqlite3GetVdbe(pParse); sqlite3ChangeCookie(pParse, iDb); @@ -158416,9 +159457,14 @@ struct WhereInfo { Bitmask revMask; /* Mask of ORDER BY terms that need reversing */ WhereClause sWC; /* Decomposition of the WHERE clause */ WhereMaskSet sMaskSet; /* Map cursor numbers to bitmasks */ - WhereLevel a[1]; /* Information about each nest loop in WHERE */ + WhereLevel a[FLEXARRAY]; /* Information about each nest loop in WHERE */ }; +/* +** The size (in bytes) of a WhereInfo object that holds N WhereLevels. +*/ +#define SZ_WHEREINFO(N) ROUND8(offsetof(WhereInfo,a)+(N)*sizeof(WhereLevel)) + /* ** Private interfaces - callable only by other where.c routines. ** @@ -159098,7 +160144,7 @@ static void adjustOrderByCol(ExprList *pOrderBy, ExprList *pEList){ /* ** pX is an expression of the form: (vector) IN (SELECT ...) ** In other words, it is a vector IN operator with a SELECT clause on the -** LHS. But not all terms in the vector are indexable and the terms might +** RHS. But not all terms in the vector are indexable and the terms might ** not be in the correct order for indexing. ** ** This routine makes a copy of the input pX expression and then adjusts @@ -159154,7 +160200,9 @@ static Expr *removeUnindexableInClauseTerms( int iField; assert( (pLoop->aLTerm[i]->eOperator & (WO_OR|WO_AND))==0 ); iField = pLoop->aLTerm[i]->u.x.iField - 1; - if( pOrigRhs->a[iField].pExpr==0 ) continue; /* Duplicate PK column */ + if( NEVER(pOrigRhs->a[iField].pExpr==0) ){ + continue; /* Duplicate PK column */ + } pRhs = sqlite3ExprListAppend(pParse, pRhs, pOrigRhs->a[iField].pExpr); pOrigRhs->a[iField].pExpr = 0; if( pRhs ) pRhs->a[pRhs->nExpr-1].u.x.iOrderByCol = iField+1; @@ -159251,7 +160299,7 @@ static SQLITE_NOINLINE void codeINTerm( return; } } - for(i=iEq;inLTerm; i++){ + for(i=iEq; inLTerm; i++){ assert( pLoop->aLTerm[i]!=0 ); if( pLoop->aLTerm[i]->pExpr==pX ) nEq++; } @@ -159260,22 +160308,13 @@ static SQLITE_NOINLINE void codeINTerm( if( !ExprUseXSelect(pX) || pX->x.pSelect->pEList->nExpr==1 ){ eType = sqlite3FindInIndex(pParse, pX, IN_INDEX_LOOP, 0, 0, &iTab); }else{ - Expr *pExpr = pTerm->pExpr; - if( pExpr->iTable==0 || !ExprHasProperty(pExpr, EP_Subrtn) ){ - sqlite3 *db = pParse->db; - pX = removeUnindexableInClauseTerms(pParse, iEq, pLoop, pX); - if( !db->mallocFailed ){ - aiMap = (int*)sqlite3DbMallocZero(pParse->db, sizeof(int)*nEq); - eType = sqlite3FindInIndex(pParse, pX, IN_INDEX_LOOP, 0, aiMap,&iTab); - pExpr->iTable = iTab; - } - sqlite3ExprDelete(db, pX); - }else{ - int n = sqlite3ExprVectorSize(pX->pLeft); - aiMap = (int*)sqlite3DbMallocZero(pParse->db, sizeof(int)*MAX(nEq,n)); - eType = sqlite3FindInIndex(pParse, pX, IN_INDEX_LOOP, 0, aiMap, &iTab); + sqlite3 *db = pParse->db; + Expr *pXMod = removeUnindexableInClauseTerms(pParse, iEq, pLoop, pX); + if( !db->mallocFailed ){ + aiMap = (int*)sqlite3DbMallocZero(db, sizeof(int)*nEq); + eType = sqlite3FindInIndex(pParse, pXMod, IN_INDEX_LOOP, 0, aiMap, &iTab); } - pX = pExpr; + sqlite3ExprDelete(db, pXMod); } if( eType==IN_INDEX_INDEX_DESC ){ @@ -159305,7 +160344,7 @@ static SQLITE_NOINLINE void codeINTerm( if( pIn ){ int iMap = 0; /* Index in aiMap[] */ pIn += i; - for(i=iEq;inLTerm; i++){ + for(i=iEq; inLTerm; i++){ if( pLoop->aLTerm[i]->pExpr==pX ){ int iOut = iTarget + i - iEq; if( eType==IN_INDEX_ROWID ){ @@ -160164,6 +161203,9 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( } sqlite3VdbeAddOp2(v, OP_Integer, pLoop->u.vtab.idxNum, iReg); sqlite3VdbeAddOp2(v, OP_Integer, nConstraint, iReg+1); + /* The instruction immediately prior to OP_VFilter must be an OP_Integer + ** that sets the "argc" value for xVFilter. This is necessary for + ** resolveP2() to work correctly. See tag-20250207a. */ sqlite3VdbeAddOp4(v, OP_VFilter, iCur, addrNotFound, iReg, pLoop->u.vtab.idxStr, pLoop->u.vtab.needFree ? P4_DYNAMIC : P4_STATIC); @@ -160754,12 +161796,13 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( if( pLevel->iLeftJoin==0 ){ /* If a partial index is driving the loop, try to eliminate WHERE clause ** terms from the query that must be true due to the WHERE clause of - ** the partial index. + ** the partial index. This optimization does not work on an outer join, + ** as shown by: ** - ** 2019-11-02 ticket 623eff57e76d45f6: This optimization does not work - ** for a LEFT JOIN. + ** 2019-11-02 ticket 623eff57e76d45f6 (LEFT JOIN) + ** 2025-05-29 forum post 7dee41d32506c4ae (RIGHT JOIN) */ - if( pIdx->pPartIdxWhere ){ + if( pIdx->pPartIdxWhere && pLevel->pRJ==0 ){ whereApplyPartialIndexConstraints(pIdx->pPartIdxWhere, iCur, pWC); } }else{ @@ -160866,8 +161909,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( int nNotReady; /* The number of notReady tables */ SrcItem *origSrc; /* Original list of tables */ nNotReady = pWInfo->nLevel - iLevel - 1; - pOrTab = sqlite3DbMallocRawNN(db, - sizeof(*pOrTab)+ nNotReady*sizeof(pOrTab->a[0])); + pOrTab = sqlite3DbMallocRawNN(db, SZ_SRCLIST(nNotReady+1)); if( pOrTab==0 ) return notReady; pOrTab->nAlloc = (u8)(nNotReady + 1); pOrTab->nSrc = pOrTab->nAlloc; @@ -160918,7 +161960,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( ** ** This optimization also only applies if the (x1 OR x2 OR ...) term ** is not contained in the ON clause of a LEFT JOIN. - ** See ticket http://www.sqlite.org/src/info/f2369304e4 + ** See ticket http://sqlite.org/src/info/f2369304e4 ** ** 2022-02-04: Do not push down slices of a row-value comparison. ** In other words, "w" or "y" may not be a slice of a vector. Otherwise, @@ -161410,7 +162452,8 @@ SQLITE_PRIVATE SQLITE_NOINLINE void sqlite3WhereRightJoinLoop( WhereInfo *pSubWInfo; WhereLoop *pLoop = pLevel->pWLoop; SrcItem *pTabItem = &pWInfo->pTabList->a[pLevel->iFrom]; - SrcList sFrom; + SrcList *pFrom; + u8 fromSpace[SZ_SRCLIST_1]; Bitmask mAll = 0; int k; @@ -161454,13 +162497,14 @@ SQLITE_PRIVATE SQLITE_NOINLINE void sqlite3WhereRightJoinLoop( sqlite3ExprDup(pParse->db, pTerm->pExpr, 0)); } } - sFrom.nSrc = 1; - sFrom.nAlloc = 1; - memcpy(&sFrom.a[0], pTabItem, sizeof(SrcItem)); - sFrom.a[0].fg.jointype = 0; + pFrom = (SrcList*)fromSpace; + pFrom->nSrc = 1; + pFrom->nAlloc = 1; + memcpy(&pFrom->a[0], pTabItem, sizeof(SrcItem)); + pFrom->a[0].fg.jointype = 0; assert( pParse->withinRJSubrtn < 100 ); pParse->withinRJSubrtn++; - pSubWInfo = sqlite3WhereBegin(pParse, &sFrom, pSubWhere, 0, 0, 0, + pSubWInfo = sqlite3WhereBegin(pParse, pFrom, pSubWhere, 0, 0, 0, WHERE_RIGHT_JOIN, 0); if( pSubWInfo ){ int iCur = pLevel->iTabCur; @@ -162431,30 +163475,42 @@ static void exprAnalyzeOrTerm( ** 1. The SQLITE_Transitive optimization must be enabled ** 2. Must be either an == or an IS operator ** 3. Not originating in the ON clause of an OUTER JOIN -** 4. The affinities of A and B must be compatible -** 5a. Both operands use the same collating sequence OR -** 5b. The overall collating sequence is BINARY +** 4. The operator is not IS or else the query does not contain RIGHT JOIN +** 5. The affinities of A and B must be compatible +** 6a. Both operands use the same collating sequence OR +** 6b. The overall collating sequence is BINARY ** If this routine returns TRUE, that means that the RHS can be substituted ** for the LHS anyplace else in the WHERE clause where the LHS column occurs. ** This is an optimization. No harm comes from returning 0. But if 1 is ** returned when it should not be, then incorrect answers might result. */ -static int termIsEquivalence(Parse *pParse, Expr *pExpr){ +static int termIsEquivalence(Parse *pParse, Expr *pExpr, SrcList *pSrc){ char aff1, aff2; CollSeq *pColl; - if( !OptimizationEnabled(pParse->db, SQLITE_Transitive) ) return 0; - if( pExpr->op!=TK_EQ && pExpr->op!=TK_IS ) return 0; - if( ExprHasProperty(pExpr, EP_OuterON) ) return 0; + if( !OptimizationEnabled(pParse->db, SQLITE_Transitive) ) return 0; /* (1) */ + if( pExpr->op!=TK_EQ && pExpr->op!=TK_IS ) return 0; /* (2) */ + if( ExprHasProperty(pExpr, EP_OuterON) ) return 0; /* (3) */ + assert( pSrc!=0 ); + if( pExpr->op==TK_IS + && pSrc->nSrc + && (pSrc->a[0].fg.jointype & JT_LTORJ)!=0 + ){ + return 0; /* (4) */ + } aff1 = sqlite3ExprAffinity(pExpr->pLeft); aff2 = sqlite3ExprAffinity(pExpr->pRight); if( aff1!=aff2 && (!sqlite3IsNumericAffinity(aff1) || !sqlite3IsNumericAffinity(aff2)) ){ - return 0; + return 0; /* (5) */ } pColl = sqlite3ExprCompareCollSeq(pParse, pExpr); - if( sqlite3IsBinary(pColl) ) return 1; - return sqlite3ExprCollSeqMatch(pParse, pExpr->pLeft, pExpr->pRight); + if( !sqlite3IsBinary(pColl) + && !sqlite3ExprCollSeqMatch(pParse, pExpr->pLeft, pExpr->pRight) + ){ + return 0; /* (6) */ + } + return 1; } /* @@ -162719,8 +163775,8 @@ static void exprAnalyze( if( op==TK_IS ) pNew->wtFlags |= TERM_IS; pTerm = &pWC->a[idxTerm]; pTerm->wtFlags |= TERM_COPIED; - - if( termIsEquivalence(pParse, pDup) ){ + assert( pWInfo->pTabList!=0 ); + if( termIsEquivalence(pParse, pDup, pWInfo->pTabList) ){ pTerm->eOperator |= WO_EQUIV; eExtraOp = WO_EQUIV; } @@ -163448,11 +164504,16 @@ struct HiddenIndexInfo { int eDistinct; /* Value to return from sqlite3_vtab_distinct() */ u32 mIn; /* Mask of terms that are IN (...) */ u32 mHandleIn; /* Terms that vtab will handle as IN (...) */ - sqlite3_value *aRhs[1]; /* RHS values for constraints. MUST BE LAST - ** because extra space is allocated to hold up - ** to nTerm such values */ + sqlite3_value *aRhs[FLEXARRAY]; /* RHS values for constraints. MUST BE LAST + ** Extra space is allocated to hold up + ** to nTerm such values */ }; +/* Size (in bytes) of a HiddenIndeInfo object sufficient to hold as +** many as N constraints */ +#define SZ_HIDDENINDEXINFO(N) \ + (offsetof(HiddenIndexInfo,aRhs) + (N)*sizeof(sqlite3_value*)) + /* Forward declaration of methods */ static int whereLoopResize(sqlite3*, WhereLoop*, int); @@ -164517,6 +165578,8 @@ static SQLITE_NOINLINE void constructAutomaticIndex( } /* Construct the Index object to describe this index */ + assert( nKeyCol <= pTable->nCol + MAX(0, pTable->nCol - BMS + 1) ); + /* ^-- This guarantees that the number of index columns will fit in the u16 */ pIdx = sqlite3AllocateIndexObject(pParse->db, nKeyCol+HasRowid(pTable), 0, &zNotUsed); if( pIdx==0 ) goto end_auto_index_create; @@ -164928,8 +165991,8 @@ static sqlite3_index_info *allocateIndexInfo( */ pIdxInfo = sqlite3DbMallocZero(pParse->db, sizeof(*pIdxInfo) + (sizeof(*pIdxCons) + sizeof(*pUsage))*nTerm - + sizeof(*pIdxOrderBy)*nOrderBy + sizeof(*pHidden) - + sizeof(sqlite3_value*)*nTerm ); + + sizeof(*pIdxOrderBy)*nOrderBy + + SZ_HIDDENINDEXINFO(nTerm) ); if( pIdxInfo==0 ){ sqlite3ErrorMsg(pParse, "out of memory"); return 0; @@ -166565,11 +167628,8 @@ static int whereLoopAddBtreeIndex( assert( pNew->u.btree.nBtm==0 ); opMask = WO_EQ|WO_IN|WO_GT|WO_GE|WO_LT|WO_LE|WO_ISNULL|WO_IS; } - if( pProbe->bUnordered || pProbe->bLowQual ){ - if( pProbe->bUnordered ) opMask &= ~(WO_GT|WO_GE|WO_LT|WO_LE); - if( pProbe->bLowQual && pSrc->fg.isIndexedBy==0 ){ - opMask &= ~(WO_EQ|WO_IN|WO_IS); - } + if( pProbe->bUnordered ){ + opMask &= ~(WO_GT|WO_GE|WO_LT|WO_LE); } assert( pNew->u.btree.nEqnColumn ); @@ -166642,6 +167702,7 @@ static int whereLoopAddBtreeIndex( if( ExprUseXSelect(pExpr) ){ /* "x IN (SELECT ...)": TUNING: the SELECT returns 25 rows */ int i; + int bRedundant = 0; nIn = 46; assert( 46==sqlite3LogEst(25) ); /* The expression may actually be of the form (x, y) IN (SELECT...). @@ -166650,7 +167711,20 @@ static int whereLoopAddBtreeIndex( ** for each such term. The following loop checks that pTerm is the ** first such term in use, and sets nIn back to 0 if it is not. */ for(i=0; inLTerm-1; i++){ - if( pNew->aLTerm[i] && pNew->aLTerm[i]->pExpr==pExpr ) nIn = 0; + if( pNew->aLTerm[i] && pNew->aLTerm[i]->pExpr==pExpr ){ + nIn = 0; + if( pNew->aLTerm[i]->u.x.iField == pTerm->u.x.iField ){ + /* Detect when two or more columns of an index match the same + ** column of a vector IN operater, and avoid adding the column + ** to the WhereLoop more than once. See tag-20250707-01 + ** in test/rowvalue.test */ + bRedundant = 1; + } + } + } + if( bRedundant ){ + pNew->nLTerm--; + continue; } }else if( ALWAYS(pExpr->x.pList && pExpr->x.pList->nExpr) ){ /* "x IN (value, value, ...)" */ @@ -166882,7 +167956,7 @@ static int whereLoopAddBtreeIndex( if( (pNew->wsFlags & WHERE_TOP_LIMIT)==0 && pNew->u.btree.nEqnColumn && (pNew->u.btree.nEqnKeyCol || - pProbe->idxType!=SQLITE_IDXTYPE_PRIMARYKEY) + pProbe->idxType!=SQLITE_IDXTYPE_PRIMARYKEY) ){ if( pNew->u.btree.nEq>3 ){ sqlite3ProgressCheck(pParse); @@ -167011,6 +168085,7 @@ static int whereUsablePartialIndex( if( (!ExprHasProperty(pExpr, EP_OuterON) || pExpr->w.iJoin==iTab) && ((jointype & JT_OUTER)==0 || ExprHasProperty(pExpr, EP_OuterON)) && sqlite3ExprImpliesExpr(pParse, pExpr, pWhere, iTab) + && !sqlite3ExprImpliesExpr(pParse, pExpr, pWhere, -1) && (pTerm->wtFlags & TERM_VNULL)==0 ){ return 1; @@ -167424,6 +168499,7 @@ static int whereLoopAddBtree( pNew->u.btree.nEq = 0; pNew->u.btree.nBtm = 0; pNew->u.btree.nTop = 0; + pNew->u.btree.nDistinctCol = 0; pNew->nSkip = 0; pNew->nLTerm = 0; pNew->iSortIdx = 0; @@ -167506,7 +168582,7 @@ static int whereLoopAddBtree( && (HasRowid(pTab) || pWInfo->pSelect!=0 || sqlite3FaultSim(700)) ){ WHERETRACE(0x200, - ("-> %s a covering index according to bitmasks\n", + ("-> %s is a covering index according to bitmasks\n", pProbe->zName, m==0 ? "is" : "is not")); pNew->wsFlags = WHERE_IDX_ONLY | WHERE_INDEXED; } @@ -168492,8 +169568,6 @@ static i8 wherePathSatisfiesOrderBy( obSat = obDone; } break; - }else if( wctrlFlags & WHERE_DISTINCTBY ){ - pLoop->u.btree.nDistinctCol = 0; } iCur = pWInfo->pTabList->a[pLoop->iTab].iCursor; @@ -170123,10 +171197,7 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( ** field (type Bitmask) it must be aligned on an 8-byte boundary on ** some architectures. Hence the ROUND8() below. */ - nByteWInfo = ROUND8P(sizeof(WhereInfo)); - if( nTabList>1 ){ - nByteWInfo = ROUND8P(nByteWInfo + (nTabList-1)*sizeof(WhereLevel)); - } + nByteWInfo = SZ_WHEREINFO(nTabList); pWInfo = sqlite3DbMallocRawNN(db, nByteWInfo + sizeof(WhereLoop)); if( db->mallocFailed ){ sqlite3DbFree(db, pWInfo); @@ -170343,7 +171414,8 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( } /* TUNING: Assume that a DISTINCT clause on a subquery reduces - ** the output size by a factor of 8 (LogEst -30). + ** the output size by a factor of 8 (LogEst -30). Search for + ** tag-20250414a to see other cases. */ if( (pWInfo->wctrlFlags & WHERE_WANT_DISTINCT)!=0 ){ WHERETRACE(0x0080,("nRowOut reduced from %d to %d due to DISTINCT\n", @@ -172078,7 +173150,7 @@ SQLITE_PRIVATE int sqlite3WindowRewrite(Parse *pParse, Select *p){ p->pWhere = 0; p->pGroupBy = 0; p->pHaving = 0; - p->selFlags &= ~SF_Aggregate; + p->selFlags &= ~(u32)SF_Aggregate; p->selFlags |= SF_WinRewrite; /* Create the ORDER BY clause for the sub-select. This is the concatenation @@ -174218,6 +175290,11 @@ SQLITE_PRIVATE void sqlite3WindowCodeStep( /* #include "sqliteInt.h" */ +/* +** Verify that the pParse->isCreate field is set +*/ +#define ASSERT_IS_CREATE assert(pParse->isCreate) + /* ** Disable all error recovery processing in the parser push-down ** automaton. @@ -174281,6 +175358,10 @@ static void parserSyntaxError(Parse *pParse, Token *p){ static void disableLookaside(Parse *pParse){ sqlite3 *db = pParse->db; pParse->disableLookaside++; +#ifdef SQLITE_DEBUG + pParse->isCreate = 1; +#endif + memset(&pParse->u1.cr, 0, sizeof(pParse->u1.cr)); DisableLookaside; } @@ -177917,7 +178998,9 @@ static YYACTIONTYPE yy_reduce( } break; case 14: /* createkw ::= CREATE */ -{disableLookaside(pParse);} +{ + disableLookaside(pParse); +} break; case 15: /* ifnotexists ::= */ case 18: /* temp ::= */ yytestcase(yyruleno==18); @@ -178009,7 +179092,7 @@ static YYACTIONTYPE yy_reduce( break; case 32: /* ccons ::= CONSTRAINT nm */ case 67: /* tcons ::= CONSTRAINT nm */ yytestcase(yyruleno==67); -{pParse->constraintName = yymsp[0].minor.yy0;} +{ASSERT_IS_CREATE; pParse->u1.cr.constraintName = yymsp[0].minor.yy0;} break; case 33: /* ccons ::= DEFAULT scantok term */ {sqlite3AddDefaultValue(pParse,yymsp[0].minor.yy590,yymsp[-1].minor.yy0.z,&yymsp[-1].minor.yy0.z[yymsp[-1].minor.yy0.n]);} @@ -178119,7 +179202,7 @@ static YYACTIONTYPE yy_reduce( {yymsp[-1].minor.yy502 = 0;} break; case 66: /* tconscomma ::= COMMA */ -{pParse->constraintName.n = 0;} +{ASSERT_IS_CREATE; pParse->u1.cr.constraintName.n = 0;} break; case 68: /* tcons ::= PRIMARY KEY LP sortlist autoinc RP onconf */ {sqlite3AddPrimaryKey(pParse,yymsp[-3].minor.yy402,yymsp[0].minor.yy502,yymsp[-2].minor.yy502,0);} @@ -178206,8 +179289,8 @@ static YYACTIONTYPE yy_reduce( if( pRhs ){ pRhs->op = (u8)yymsp[-1].minor.yy502; pRhs->pPrior = pLhs; - if( ALWAYS(pLhs) ) pLhs->selFlags &= ~SF_MultiValue; - pRhs->selFlags &= ~SF_MultiValue; + if( ALWAYS(pLhs) ) pLhs->selFlags &= ~(u32)SF_MultiValue; + pRhs->selFlags &= ~(u32)SF_MultiValue; if( yymsp[-1].minor.yy502!=TK_ALL ) pParse->hasCompound = 1; }else{ sqlite3SelectDelete(pParse->db, pLhs); @@ -178847,12 +179930,21 @@ static YYACTIONTYPE yy_reduce( ** expr1 IN () ** expr1 NOT IN () ** - ** simplify to constants 0 (false) and 1 (true), respectively, - ** regardless of the value of expr1. + ** simplify to constants 0 (false) and 1 (true), respectively. + ** + ** Except, do not apply this optimization if expr1 contains a function + ** because that function might be an aggregate (we don't know yet whether + ** it is or not) and if it is an aggregate, that could change the meaning + ** of the whole query. */ - sqlite3ExprUnmapAndDelete(pParse, yymsp[-4].minor.yy590); - yymsp[-4].minor.yy590 = sqlite3Expr(pParse->db, TK_STRING, yymsp[-3].minor.yy502 ? "true" : "false"); - if( yymsp[-4].minor.yy590 ) sqlite3ExprIdToTrueFalse(yymsp[-4].minor.yy590); + Expr *pB = sqlite3Expr(pParse->db, TK_STRING, yymsp[-3].minor.yy502 ? "true" : "false"); + if( pB ) sqlite3ExprIdToTrueFalse(pB); + if( !ExprHasProperty(yymsp[-4].minor.yy590, EP_HasFunc) ){ + sqlite3ExprUnmapAndDelete(pParse, yymsp[-4].minor.yy590); + yymsp[-4].minor.yy590 = pB; + }else{ + yymsp[-4].minor.yy590 = sqlite3PExpr(pParse, yymsp[-3].minor.yy502 ? TK_OR : TK_AND, pB, yymsp[-4].minor.yy590); + } }else{ Expr *pRHS = yymsp[-1].minor.yy402->a[0].pExpr; if( yymsp[-1].minor.yy402->nExpr==1 && sqlite3ExprIsConstant(pParse,pRHS) && yymsp[-4].minor.yy590->op!=TK_VECTOR ){ @@ -179012,6 +180104,10 @@ static YYACTIONTYPE yy_reduce( { sqlite3BeginTrigger(pParse, &yymsp[-7].minor.yy0, &yymsp[-6].minor.yy0, yymsp[-5].minor.yy502, yymsp[-4].minor.yy28.a, yymsp[-4].minor.yy28.b, yymsp[-2].minor.yy563, yymsp[0].minor.yy590, yymsp[-10].minor.yy502, yymsp[-8].minor.yy502); yymsp[-10].minor.yy0 = (yymsp[-6].minor.yy0.n==0?yymsp[-7].minor.yy0:yymsp[-6].minor.yy0); /*A-overwrites-T*/ +#ifdef SQLITE_DEBUG + assert( pParse->isCreate ); /* Set by createkw reduce action */ + pParse->isCreate = 0; /* But, should not be set for CREATE TRIGGER */ +#endif } break; case 262: /* trigger_time ::= BEFORE|AFTER */ @@ -180454,7 +181550,7 @@ static int getToken(const unsigned char **pz){ int t; /* Token type to return */ do { z += sqlite3GetToken(z, &t); - }while( t==TK_SPACE ); + }while( t==TK_SPACE || t==TK_COMMENT ); if( t==TK_ID || t==TK_STRING || t==TK_JOIN_KW @@ -180947,7 +182043,11 @@ SQLITE_PRIVATE int sqlite3RunParser(Parse *pParse, const char *zSql){ assert( n==6 ); tokenType = analyzeFilterKeyword((const u8*)&zSql[6], lastTokenParsed); #endif /* SQLITE_OMIT_WINDOWFUNC */ - }else if( tokenType==TK_COMMENT && (db->flags & SQLITE_Comments)!=0 ){ + }else if( tokenType==TK_COMMENT + && (db->init.busy || (db->flags & SQLITE_Comments)!=0) + ){ + /* Ignore SQL comments if either (1) we are reparsing the schema or + ** (2) SQLITE_DBCONFIG_ENABLE_COMMENTS is turned on (the default). */ zSql += n; continue; }else if( tokenType!=TK_QNUMBER ){ @@ -181842,6 +182942,14 @@ SQLITE_API int sqlite3_initialize(void){ if( rc==SQLITE_OK ){ sqlite3PCacheBufferSetup( sqlite3GlobalConfig.pPage, sqlite3GlobalConfig.szPage, sqlite3GlobalConfig.nPage); +#ifdef SQLITE_EXTRA_INIT_MUTEXED + { + int SQLITE_EXTRA_INIT_MUTEXED(const char*); + rc = SQLITE_EXTRA_INIT_MUTEXED(0); + } +#endif + } + if( rc==SQLITE_OK ){ sqlite3MemoryBarrier(); sqlite3GlobalConfig.isInit = 1; #ifdef SQLITE_EXTRA_INIT @@ -182298,17 +183406,22 @@ SQLITE_API int sqlite3_config(int op, ...){ ** If lookaside is already active, return SQLITE_BUSY. ** ** The sz parameter is the number of bytes in each lookaside slot. -** The cnt parameter is the number of slots. If pStart is NULL the -** space for the lookaside memory is obtained from sqlite3_malloc(). -** If pStart is not NULL then it is sz*cnt bytes of memory to use for -** the lookaside memory. +** The cnt parameter is the number of slots. If pBuf is NULL the +** space for the lookaside memory is obtained from sqlite3_malloc() +** or similar. If pBuf is not NULL then it is sz*cnt bytes of memory +** to use for the lookaside memory. */ -static int setupLookaside(sqlite3 *db, void *pBuf, int sz, int cnt){ +static int setupLookaside( + sqlite3 *db, /* Database connection being configured */ + void *pBuf, /* Memory to use for lookaside. May be NULL */ + int sz, /* Desired size of each lookaside memory slot */ + int cnt /* Number of slots to allocate */ +){ #ifndef SQLITE_OMIT_LOOKASIDE - void *pStart; - sqlite3_int64 szAlloc; - int nBig; /* Number of full-size slots */ - int nSm; /* Number smaller LOOKASIDE_SMALL-byte slots */ + void *pStart; /* Start of the lookaside buffer */ + sqlite3_int64 szAlloc; /* Total space set aside for lookaside memory */ + int nBig; /* Number of full-size slots */ + int nSm; /* Number smaller LOOKASIDE_SMALL-byte slots */ if( sqlite3LookasideUsed(db,0)>0 ){ return SQLITE_BUSY; @@ -182321,19 +183434,22 @@ static int setupLookaside(sqlite3 *db, void *pBuf, int sz, int cnt){ sqlite3_free(db->lookaside.pStart); } /* The size of a lookaside slot after ROUNDDOWN8 needs to be larger - ** than a pointer to be useful. + ** than a pointer and small enough to fit in a u16. */ - sz = ROUNDDOWN8(sz); /* IMP: R-33038-09382 */ + sz = ROUNDDOWN8(sz); if( sz<=(int)sizeof(LookasideSlot*) ) sz = 0; if( sz>65528 ) sz = 65528; - if( cnt<0 ) cnt = 0; + /* Count must be at least 1 to be useful, but not so large as to use + ** more than 0x7fff0000 total bytes for lookaside. */ + if( cnt<1 ) cnt = 0; + if( sz>0 && cnt>(0x7fff0000/sz) ) cnt = 0x7fff0000/sz; szAlloc = (i64)sz*(i64)cnt; - if( sz==0 || cnt==0 ){ + if( szAlloc==0 ){ sz = 0; pStart = 0; }else if( pBuf==0 ){ sqlite3BeginBenignMalloc(); - pStart = sqlite3Malloc( szAlloc ); /* IMP: R-61949-35727 */ + pStart = sqlite3Malloc( szAlloc ); sqlite3EndBenignMalloc(); if( pStart ) szAlloc = sqlite3MallocSize(pStart); }else{ @@ -183310,6 +184426,9 @@ SQLITE_API int sqlite3_busy_handler( db->busyHandler.pBusyArg = pArg; db->busyHandler.nBusy = 0; db->busyTimeout = 0; +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT + db->setlkTimeout = 0; +#endif sqlite3_mutex_leave(db->mutex); return SQLITE_OK; } @@ -183359,12 +184478,49 @@ SQLITE_API int sqlite3_busy_timeout(sqlite3 *db, int ms){ sqlite3_busy_handler(db, (int(*)(void*,int))sqliteDefaultBusyCallback, (void*)db); db->busyTimeout = ms; +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT + db->setlkTimeout = ms; +#endif }else{ sqlite3_busy_handler(db, 0, 0); } return SQLITE_OK; } +/* +** Set the setlk timeout value. +*/ +SQLITE_API int sqlite3_setlk_timeout(sqlite3 *db, int ms, int flags){ +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT + int iDb; + int bBOC = ((flags & SQLITE_SETLK_BLOCK_ON_CONNECT) ? 1 : 0); +#endif +#ifdef SQLITE_ENABLE_API_ARMOR + if( !sqlite3SafetyCheckOk(db) ) return SQLITE_MISUSE_BKPT; +#endif + if( ms<-1 ) return SQLITE_RANGE; +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT + sqlite3_mutex_enter(db->mutex); + db->setlkTimeout = ms; + db->setlkFlags = flags; + sqlite3BtreeEnterAll(db); + for(iDb=0; iDbnDb; iDb++){ + Btree *pBt = db->aDb[iDb].pBt; + if( pBt ){ + sqlite3_file *fd = sqlite3PagerFile(sqlite3BtreePager(pBt)); + sqlite3OsFileControlHint(fd, SQLITE_FCNTL_BLOCK_ON_CONNECT, (void*)&bBOC); + } + } + sqlite3BtreeLeaveAll(db); + sqlite3_mutex_leave(db->mutex); +#endif +#if !defined(SQLITE_ENABLE_API_ARMOR) && !defined(SQLITE_ENABLE_SETLK_TIMEOUT) + UNUSED_PARAMETER(db); + UNUSED_PARAMETER(flags); +#endif + return SQLITE_OK; +} + /* ** Cause any pending operation to stop at its earliest opportunity. */ @@ -185330,7 +186486,7 @@ SQLITE_API int sqlite3_set_clientdata( return SQLITE_OK; }else{ size_t n = strlen(zName); - p = sqlite3_malloc64( sizeof(DbClientData)+n+1 ); + p = sqlite3_malloc64( SZ_DBCLIENTDATA(n+1) ); if( p==0 ){ if( xDestructor ) xDestructor(pData); sqlite3_mutex_leave(db->mutex); @@ -185484,13 +186640,10 @@ SQLITE_API int sqlite3_table_column_metadata( if( zColumnName==0 ){ /* Query for existence of table only */ }else{ - for(iCol=0; iColnCol; iCol++){ + iCol = sqlite3ColumnIndex(pTab, zColumnName); + if( iCol>=0 ){ pCol = &pTab->aCol[iCol]; - if( 0==sqlite3StrICmp(pCol->zCnName, zColumnName) ){ - break; - } - } - if( iCol==pTab->nCol ){ + }else{ if( HasRowid(pTab) && sqlite3IsRowid(zColumnName) ){ iCol = pTab->iPKey; pCol = iCol>=0 ? &pTab->aCol[iCol] : 0; @@ -185699,8 +186852,8 @@ SQLITE_API int sqlite3_test_control(int op, ...){ /* sqlite3_test_control(SQLITE_TESTCTRL_FK_NO_ACTION, sqlite3 *db, int b); ** ** If b is true, then activate the SQLITE_FkNoAction setting. If b is - ** false then clearn that setting. If the SQLITE_FkNoAction setting is - ** abled, all foreign key ON DELETE and ON UPDATE actions behave as if + ** false then clear that setting. If the SQLITE_FkNoAction setting is + ** enabled, all foreign key ON DELETE and ON UPDATE actions behave as if ** they were NO ACTION, regardless of how they are defined. ** ** NB: One must usually run "PRAGMA writable_schema=RESET" after @@ -187047,7 +188200,7 @@ SQLITE_PRIVATE void sqlite3ConnectionClosed(sqlite3 *db){ ** Here, array { X } means zero or more occurrences of X, adjacent in ** memory. A "position" is an index of a token in the token stream ** generated by the tokenizer. Note that POS_END and POS_COLUMN occur -** in the same logical place as the position element, and act as sentinals +** in the same logical place as the position element, and act as sentinels ** ending a position list array. POS_END is 0. POS_COLUMN is 1. ** The positions numbers are not stored literally but rather as two more ** than the difference from the prior position, or the just the position plus @@ -187266,6 +188419,13 @@ SQLITE_PRIVATE void sqlite3ConnectionClosed(sqlite3 *db){ #ifndef _FTSINT_H #define _FTSINT_H +/* #include */ +/* #include */ +/* #include */ +/* #include */ +/* #include */ +/* #include */ + #if !defined(NDEBUG) && !defined(SQLITE_DEBUG) # define NDEBUG 1 #endif @@ -187735,6 +188895,19 @@ typedef sqlite3_int64 i64; /* 8-byte signed integer */ #define deliberate_fall_through +/* +** Macros needed to provide flexible arrays in a portable way +*/ +#ifndef offsetof +# define offsetof(STRUCTURE,FIELD) ((size_t)((char*)&((STRUCTURE*)0)->FIELD)) +#endif +#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) +# define FLEXARRAY +#else +# define FLEXARRAY 1 +#endif + + #endif /* SQLITE_AMALGAMATION */ #ifdef SQLITE_DEBUG @@ -187839,7 +189012,7 @@ struct Fts3Table { #endif #if defined(SQLITE_DEBUG) || defined(SQLITE_TEST) - /* True to disable the incremental doclist optimization. This is controled + /* True to disable the incremental doclist optimization. This is controlled ** by special insert command 'test-no-incr-doclist'. */ int bNoIncrDoclist; @@ -187891,7 +189064,7 @@ struct Fts3Cursor { /* ** The Fts3Cursor.eSearch member is always set to one of the following. -** Actualy, Fts3Cursor.eSearch can be greater than or equal to +** Actually, Fts3Cursor.eSearch can be greater than or equal to ** FTS3_FULLTEXT_SEARCH. If so, then Fts3Cursor.eSearch - 2 is the index ** of the column to be searched. For example, in ** @@ -187964,9 +189137,13 @@ struct Fts3Phrase { */ int nToken; /* Number of tokens in the phrase */ int iColumn; /* Index of column this phrase must match */ - Fts3PhraseToken aToken[1]; /* One entry for each token in the phrase */ + Fts3PhraseToken aToken[FLEXARRAY]; /* One for each token in the phrase */ }; +/* Size (in bytes) of an Fts3Phrase object large enough to hold N tokens */ +#define SZ_FTS3PHRASE(N) \ + (offsetof(Fts3Phrase,aToken)+(N)*sizeof(Fts3PhraseToken)) + /* ** A tree of these objects forms the RHS of a MATCH operator. ** @@ -188200,12 +189377,6 @@ SQLITE_PRIVATE int sqlite3Fts3IntegrityCheck(Fts3Table *p, int *pbOk); # define SQLITE_CORE 1 #endif -/* #include */ -/* #include */ -/* #include */ -/* #include */ -/* #include */ -/* #include */ /* #include "fts3.h" */ #ifndef SQLITE_CORE @@ -190544,7 +191715,7 @@ static int fts3DoclistOrMerge( ** sizes of the two inputs, plus enough space for exactly one of the input ** docids to grow. ** - ** A symetric argument may be made if the doclists are in descending + ** A symmetric argument may be made if the doclists are in descending ** order. */ aOut = sqlite3_malloc64((i64)n1+n2+FTS3_VARINT_MAX-1+FTS3_BUFFER_PADDING); @@ -192343,7 +193514,7 @@ static int fts3EvalDeferredPhrase(Fts3Cursor *pCsr, Fts3Phrase *pPhrase){ nDistance = iPrev - nMaxUndeferred; } - aOut = (char *)sqlite3Fts3MallocZero(nPoslist+FTS3_BUFFER_PADDING); + aOut = (char *)sqlite3Fts3MallocZero(((i64)nPoslist)+FTS3_BUFFER_PADDING); if( !aOut ){ sqlite3_free(aPoslist); return SQLITE_NOMEM; @@ -192642,7 +193813,7 @@ static int incrPhraseTokenNext( ** ** * does not contain any deferred tokens. ** -** Advance it to the next matching documnent in the database and populate +** Advance it to the next matching document in the database and populate ** the Fts3Doclist.pList and nList fields. ** ** If there is no "next" entry and no error occurs, then *pbEof is set to @@ -193649,7 +194820,7 @@ static int fts3EvalNext(Fts3Cursor *pCsr){ } /* -** Restart interation for expression pExpr so that the next call to +** Restart iteration for expression pExpr so that the next call to ** fts3EvalNext() visits the first row. Do not allow incremental ** loading or merging of phrase doclists for this iteration. ** @@ -194841,6 +196012,23 @@ SQLITE_PRIVATE int sqlite3Fts3OpenTokenizer( */ static int fts3ExprParse(ParseContext *, const char *, int, Fts3Expr **, int *); +/* +** Search buffer z[], size n, for a '"' character. Or, if enable_parenthesis +** is defined, search for '(' and ')' as well. Return the index of the first +** such character in the buffer. If there is no such character, return -1. +*/ +static int findBarredChar(const char *z, int n){ + int ii; + for(ii=0; iiiLangid, z, i, &pCursor); + *pnConsumed = n; + rc = sqlite3Fts3OpenTokenizer(pTokenizer, pParse->iLangid, z, n, &pCursor); if( rc==SQLITE_OK ){ const char *zToken; int nToken = 0, iStart = 0, iEnd = 0, iPosition = 0; @@ -194882,7 +196063,18 @@ static int getNextToken( rc = pModule->xNext(pCursor, &zToken, &nToken, &iStart, &iEnd, &iPosition); if( rc==SQLITE_OK ){ - nByte = sizeof(Fts3Expr) + sizeof(Fts3Phrase) + nToken; + /* Check that this tokenization did not gobble up any " characters. Or, + ** if enable_parenthesis is true, that it did not gobble up any + ** open or close parenthesis characters either. If it did, call + ** getNextToken() again, but pass only that part of the input buffer + ** up to the first such character. */ + int iBarred = findBarredChar(z, iEnd); + if( iBarred>=0 ){ + pModule->xClose(pCursor); + return getNextToken(pParse, iCol, z, iBarred, ppExpr, pnConsumed); + } + + nByte = sizeof(Fts3Expr) + SZ_FTS3PHRASE(1) + nToken; pRet = (Fts3Expr *)sqlite3Fts3MallocZero(nByte); if( !pRet ){ rc = SQLITE_NOMEM; @@ -194892,7 +196084,7 @@ static int getNextToken( pRet->pPhrase->nToken = 1; pRet->pPhrase->iColumn = iCol; pRet->pPhrase->aToken[0].n = nToken; - pRet->pPhrase->aToken[0].z = (char *)&pRet->pPhrase[1]; + pRet->pPhrase->aToken[0].z = (char*)&pRet->pPhrase->aToken[1]; memcpy(pRet->pPhrase->aToken[0].z, zToken, nToken); if( iEnd=0 ){ + *pnConsumed = iBarred; + } rc = SQLITE_OK; } @@ -194963,9 +196159,9 @@ static int getNextString( Fts3Expr *p = 0; sqlite3_tokenizer_cursor *pCursor = 0; char *zTemp = 0; - int nTemp = 0; + i64 nTemp = 0; - const int nSpace = sizeof(Fts3Expr) + sizeof(Fts3Phrase); + const int nSpace = sizeof(Fts3Expr) + SZ_FTS3PHRASE(1); int nToken = 0; /* The final Fts3Expr data structure, including the Fts3Phrase, @@ -195337,7 +196533,7 @@ static int fts3ExprParse( /* The isRequirePhrase variable is set to true if a phrase or ** an expression contained in parenthesis is required. If a - ** binary operator (AND, OR, NOT or NEAR) is encounted when + ** binary operator (AND, OR, NOT or NEAR) is encountered when ** isRequirePhrase is set, this is a syntax error. */ if( !isPhrase && isRequirePhrase ){ @@ -195919,7 +197115,6 @@ static void fts3ExprTestCommon( } if( rc!=SQLITE_OK && rc!=SQLITE_NOMEM ){ - sqlite3Fts3ExprFree(pExpr); sqlite3_result_error(context, "Error parsing expression", -1); }else if( rc==SQLITE_NOMEM || !(zBuf = exprToString(pExpr, 0)) ){ sqlite3_result_error_nomem(context); @@ -196162,7 +197357,7 @@ static void fts3HashInsertElement( } -/* Resize the hash table so that it cantains "new_size" buckets. +/* Resize the hash table so that it contains "new_size" buckets. ** "new_size" must be a power of 2. The hash table might fail ** to resize if sqliteMalloc() fails. ** @@ -196617,7 +197812,7 @@ static int star_oh(const char *z){ /* ** If the word ends with zFrom and xCond() is true for the stem -** of the word that preceeds the zFrom ending, then change the +** of the word that precedes the zFrom ending, then change the ** ending to zTo. ** ** The input word *pz and zFrom are both in reverse order. zTo @@ -198128,7 +199323,7 @@ static int fts3tokFilterMethod( fts3tokResetCursor(pCsr); if( idxNum==1 ){ const char *zByte = (const char *)sqlite3_value_text(apVal[0]); - int nByte = sqlite3_value_bytes(apVal[0]); + sqlite3_int64 nByte = sqlite3_value_bytes(apVal[0]); pCsr->zInput = sqlite3_malloc64(nByte+1); if( pCsr->zInput==0 ){ rc = SQLITE_NOMEM; @@ -202200,7 +203395,7 @@ static int fts3IncrmergePush( ** ** It is assumed that the buffer associated with pNode is already large ** enough to accommodate the new entry. The buffer associated with pPrev -** is extended by this function if requrired. +** is extended by this function if required. ** ** If an error (i.e. OOM condition) occurs, an SQLite error code is ** returned. Otherwise, SQLITE_OK. @@ -203863,7 +205058,7 @@ SQLITE_PRIVATE int sqlite3Fts3DeferToken( /* ** SQLite value pRowid contains the rowid of a row that may or may not be ** present in the FTS3 table. If it is, delete it and adjust the contents -** of subsiduary data structures accordingly. +** of subsidiary data structures accordingly. */ static int fts3DeleteByRowid( Fts3Table *p, @@ -204189,9 +205384,13 @@ struct MatchinfoBuffer { int nElem; int bGlobal; /* Set if global data is loaded */ char *zMatchinfo; - u32 aMatchinfo[1]; + u32 aMI[FLEXARRAY]; }; +/* Size (in bytes) of a MatchinfoBuffer sufficient for N elements */ +#define SZ_MATCHINFOBUFFER(N) \ + (offsetof(MatchinfoBuffer,aMI)+(((N)+1)/2)*sizeof(u64)) + /* ** The snippet() and offsets() functions both return text values. An instance @@ -204216,13 +205415,13 @@ struct StrBuffer { static MatchinfoBuffer *fts3MIBufferNew(size_t nElem, const char *zMatchinfo){ MatchinfoBuffer *pRet; sqlite3_int64 nByte = sizeof(u32) * (2*(sqlite3_int64)nElem + 1) - + sizeof(MatchinfoBuffer); + + SZ_MATCHINFOBUFFER(1); sqlite3_int64 nStr = strlen(zMatchinfo); pRet = sqlite3Fts3MallocZero(nByte + nStr+1); if( pRet ){ - pRet->aMatchinfo[0] = (u8*)(&pRet->aMatchinfo[1]) - (u8*)pRet; - pRet->aMatchinfo[1+nElem] = pRet->aMatchinfo[0] + pRet->aMI[0] = (u8*)(&pRet->aMI[1]) - (u8*)pRet; + pRet->aMI[1+nElem] = pRet->aMI[0] + sizeof(u32)*((int)nElem+1); pRet->nElem = (int)nElem; pRet->zMatchinfo = ((char*)pRet) + nByte; @@ -204236,10 +205435,10 @@ static MatchinfoBuffer *fts3MIBufferNew(size_t nElem, const char *zMatchinfo){ static void fts3MIBufferFree(void *p){ MatchinfoBuffer *pBuf = (MatchinfoBuffer*)((u8*)p - ((u32*)p)[-1]); - assert( (u32*)p==&pBuf->aMatchinfo[1] - || (u32*)p==&pBuf->aMatchinfo[pBuf->nElem+2] + assert( (u32*)p==&pBuf->aMI[1] + || (u32*)p==&pBuf->aMI[pBuf->nElem+2] ); - if( (u32*)p==&pBuf->aMatchinfo[1] ){ + if( (u32*)p==&pBuf->aMI[1] ){ pBuf->aRef[1] = 0; }else{ pBuf->aRef[2] = 0; @@ -204256,18 +205455,18 @@ static void (*fts3MIBufferAlloc(MatchinfoBuffer *p, u32 **paOut))(void*){ if( p->aRef[1]==0 ){ p->aRef[1] = 1; - aOut = &p->aMatchinfo[1]; + aOut = &p->aMI[1]; xRet = fts3MIBufferFree; } else if( p->aRef[2]==0 ){ p->aRef[2] = 1; - aOut = &p->aMatchinfo[p->nElem+2]; + aOut = &p->aMI[p->nElem+2]; xRet = fts3MIBufferFree; }else{ aOut = (u32*)sqlite3_malloc64(p->nElem * sizeof(u32)); if( aOut ){ xRet = sqlite3_free; - if( p->bGlobal ) memcpy(aOut, &p->aMatchinfo[1], p->nElem*sizeof(u32)); + if( p->bGlobal ) memcpy(aOut, &p->aMI[1], p->nElem*sizeof(u32)); } } @@ -204277,7 +205476,7 @@ static void (*fts3MIBufferAlloc(MatchinfoBuffer *p, u32 **paOut))(void*){ static void fts3MIBufferSetGlobal(MatchinfoBuffer *p){ p->bGlobal = 1; - memcpy(&p->aMatchinfo[2+p->nElem], &p->aMatchinfo[1], p->nElem*sizeof(u32)); + memcpy(&p->aMI[2+p->nElem], &p->aMI[1], p->nElem*sizeof(u32)); } /* @@ -204692,7 +205891,7 @@ static int fts3StringAppend( } /* If there is insufficient space allocated at StrBuffer.z, use realloc() - ** to grow the buffer until so that it is big enough to accomadate the + ** to grow the buffer until so that it is big enough to accommodate the ** appended data. */ if( pStr->n+nAppend+1>=pStr->nAlloc ){ @@ -205104,16 +206303,16 @@ static size_t fts3MatchinfoSize(MatchInfo *pInfo, char cArg){ break; case FTS3_MATCHINFO_LHITS: - nVal = pInfo->nCol * pInfo->nPhrase; + nVal = (size_t)pInfo->nCol * pInfo->nPhrase; break; case FTS3_MATCHINFO_LHITS_BM: - nVal = pInfo->nPhrase * ((pInfo->nCol + 31) / 32); + nVal = (size_t)pInfo->nPhrase * ((pInfo->nCol + 31) / 32); break; default: assert( cArg==FTS3_MATCHINFO_HITS ); - nVal = pInfo->nCol * pInfo->nPhrase * 3; + nVal = (size_t)pInfo->nCol * pInfo->nPhrase * 3; break; } @@ -206671,8 +207870,8 @@ SQLITE_PRIVATE int sqlite3FtsUnicodeFold(int c, int eRemoveDiacritic){ ** Beginning with version 3.45.0 (circa 2024-01-01), these routines also ** accept BLOB values that have JSON encoded using a binary representation ** called "JSONB". The name JSONB comes from PostgreSQL, however the on-disk -** format SQLite JSONB is completely different and incompatible with -** PostgreSQL JSONB. +** format for SQLite-JSONB is completely different and incompatible with +** PostgreSQL-JSONB. ** ** Decoding and interpreting JSONB is still O(N) where N is the size of ** the input, the same as text JSON. However, the constant of proportionality @@ -206729,7 +207928,7 @@ SQLITE_PRIVATE int sqlite3FtsUnicodeFold(int c, int eRemoveDiacritic){ ** ** The payload size need not be expressed in its minimal form. For example, ** if the payload size is 10, the size can be expressed in any of 5 different -** ways: (1) (X>>4)==10, (2) (X>>4)==12 following by on 0x0a byte, +** ways: (1) (X>>4)==10, (2) (X>>4)==12 following by one 0x0a byte, ** (3) (X>>4)==13 followed by 0x00 and 0x0a, (4) (X>>4)==14 followed by ** 0x00 0x00 0x00 0x0a, or (5) (X>>4)==15 followed by 7 bytes of 0x00 and ** a single byte of 0x0a. The shorter forms are preferred, of course, but @@ -206739,7 +207938,7 @@ SQLITE_PRIVATE int sqlite3FtsUnicodeFold(int c, int eRemoveDiacritic){ ** the size when it becomes known, resulting in a non-minimal encoding. ** ** The value (X>>4)==15 is not actually used in the current implementation -** (as SQLite is currently unable handle BLOBs larger than about 2GB) +** (as SQLite is currently unable to handle BLOBs larger than about 2GB) ** but is included in the design to allow for future enhancements. ** ** The payload follows the header. NULL, TRUE, and FALSE have no payload and @@ -206799,23 +207998,47 @@ static const char * const jsonbType[] = { ** increase for the text-JSON parser. (Ubuntu14.10 gcc 4.8.4 x64 with -Os). */ static const char jsonIsSpace[] = { - 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +#ifdef SQLITE_ASCII +/*0 1 2 3 4 5 6 7 8 9 a b c d e f */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, /* 0 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 1 */ + 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 2 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 3 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 4 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 5 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 6 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 7 */ + + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 8 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 9 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* a */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* b */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* c */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* d */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* e */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* f */ +#endif +#ifdef SQLITE_EBCDIC +/*0 1 2 3 4 5 6 7 8 9 a b c d e f */ + 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, /* 0 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 1 */ + 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 2 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 3 */ + 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 4 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 5 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 6 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 7 */ + + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 8 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 9 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* a */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* b */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* c */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* d */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* e */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* f */ +#endif + }; #define jsonIsspace(x) (jsonIsSpace[(unsigned char)x]) @@ -206823,7 +208046,13 @@ static const char jsonIsSpace[] = { ** The set of all space characters recognized by jsonIsspace(). ** Useful as the second argument to strspn(). */ +#ifdef SQLITE_ASCII static const char jsonSpaces[] = "\011\012\015\040"; +#endif +#ifdef SQLITE_EBCDIC +static const char jsonSpaces[] = "\005\045\015\100"; +#endif + /* ** Characters that are special to JSON. Control characters, @@ -206832,23 +208061,46 @@ static const char jsonSpaces[] = "\011\012\015\040"; ** it in the set of special characters. */ static const char jsonIsOk[256] = { - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 +#ifdef SQLITE_ASCII +/*0 1 2 3 4 5 6 7 8 9 a b c d e f */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 1 */ + 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, /* 2 */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 3 */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 4 */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, /* 5 */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 6 */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 7 */ + + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 8 */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 9 */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* a */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* b */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* c */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* d */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* e */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 /* f */ +#endif +#ifdef SQLITE_EBCDIC +/*0 1 2 3 4 5 6 7 8 9 a b c d e f */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 1 */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 2 */ + 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, /* 3 */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 4 */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 5 */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 6 */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, /* 7 */ + + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 8 */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 9 */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* a */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* b */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* c */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* d */ + 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* e */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 /* f */ +#endif }; /* Objects */ @@ -206993,7 +208245,7 @@ struct JsonParse { ** Forward references **************************************************************************/ static void jsonReturnStringAsBlob(JsonString*); -static int jsonFuncArgMightBeBinary(sqlite3_value *pJson); +static int jsonArgIsJsonb(sqlite3_value *pJson, JsonParse *p); static u32 jsonTranslateBlobToText(const JsonParse*,u32,JsonString*); static void jsonReturnParse(sqlite3_context*,JsonParse*); static JsonParse *jsonParseFuncArg(sqlite3_context*,sqlite3_value*,u32); @@ -207067,7 +208319,7 @@ static int jsonCacheInsert( ** most-recently used entry if it isn't so already. ** ** The JsonParse object returned still belongs to the Cache and might -** be deleted at any moment. If the caller whants the JsonParse to +** be deleted at any moment. If the caller wants the JsonParse to ** linger, it needs to increment the nPJRef reference counter. */ static JsonParse *jsonCacheSearch( @@ -207411,11 +208663,9 @@ static void jsonAppendSqlValue( break; } default: { - if( jsonFuncArgMightBeBinary(pValue) ){ - JsonParse px; - memset(&px, 0, sizeof(px)); - px.aBlob = (u8*)sqlite3_value_blob(pValue); - px.nBlob = sqlite3_value_bytes(pValue); + JsonParse px; + memset(&px, 0, sizeof(px)); + if( jsonArgIsJsonb(pValue, &px) ){ jsonTranslateBlobToText(&px, 0, p); }else if( p->eErr==0 ){ sqlite3_result_error(p->pCtx, "JSON cannot hold BLOB values", -1); @@ -207734,7 +208984,7 @@ static void jsonWrongNumArgs( */ static int jsonBlobExpand(JsonParse *pParse, u32 N){ u8 *aNew; - u32 t; + u64 t; assert( N>pParse->nBlobAlloc ); if( pParse->nBlobAlloc==0 ){ t = 100; @@ -207744,8 +208994,9 @@ static int jsonBlobExpand(JsonParse *pParse, u32 N){ if( tdb, pParse->aBlob, t); if( aNew==0 ){ pParse->oom = 1; return 1; } + assert( t<0x7fffffff ); pParse->aBlob = aNew; - pParse->nBlobAlloc = t; + pParse->nBlobAlloc = (u32)t; return 0; } @@ -207812,7 +209063,7 @@ static SQLITE_NOINLINE void jsonBlobExpandAndAppendNode( } -/* Append an node type byte together with the payload size and +/* Append a node type byte together with the payload size and ** possibly also the payload. ** ** If aPayload is not NULL, then it is a pointer to the payload which @@ -207881,8 +209132,10 @@ static int jsonBlobChangePayloadSize( nExtra = 1; }else if( szType==13 ){ nExtra = 2; - }else{ + }else if( szType==14 ){ nExtra = 4; + }else{ + nExtra = 8; } if( szPayload<=11 ){ nNeeded = 0; @@ -208352,7 +209605,12 @@ static int jsonTranslateTextToBlob(JsonParse *pParse, u32 i){ || c=='n' || c=='r' || c=='t' || (c=='u' && jsonIs4Hex(&z[j+1])) ){ if( opcode==JSONB_TEXT ) opcode = JSONB_TEXTJ; - }else if( c=='\'' || c=='0' || c=='v' || c=='\n' + }else if( c=='\'' || c=='v' || c=='\n' +#ifdef SQLITE_BUG_COMPATIBLE_20250510 + || (c=='0') /* Legacy bug compatible */ +#else + || (c=='0' && !sqlite3Isdigit(z[j+1])) /* Correct implementation */ +#endif || (0xe2==(u8)c && 0x80==(u8)z[j+1] && (0xa8==(u8)z[j+2] || 0xa9==(u8)z[j+2])) || (c=='x' && jsonIs2Hex(&z[j+1])) ){ @@ -208702,10 +209960,7 @@ static u32 jsonbPayloadSize(const JsonParse *pParse, u32 i, u32 *pSz){ u8 x; u32 sz; u32 n; - if( NEVER(i>pParse->nBlob) ){ - *pSz = 0; - return 0; - } + assert( i<=pParse->nBlob ); x = pParse->aBlob[i]>>4; if( x<=11 ){ sz = x; @@ -208742,15 +209997,15 @@ static u32 jsonbPayloadSize(const JsonParse *pParse, u32 i, u32 *pSz){ *pSz = 0; return 0; } - sz = (pParse->aBlob[i+5]<<24) + (pParse->aBlob[i+6]<<16) + + sz = ((u32)pParse->aBlob[i+5]<<24) + (pParse->aBlob[i+6]<<16) + (pParse->aBlob[i+7]<<8) + pParse->aBlob[i+8]; n = 9; } if( (i64)i+sz+n > pParse->nBlob && (i64)i+sz+n > pParse->nBlob-pParse->delta ){ - sz = 0; - n = 0; + *pSz = 0; + return 0; } *pSz = sz; return n; @@ -208847,9 +210102,12 @@ static u32 jsonTranslateBlobToText( } case JSONB_TEXT: case JSONB_TEXTJ: { - jsonAppendChar(pOut, '"'); - jsonAppendRaw(pOut, (const char*)&pParse->aBlob[i+n], sz); - jsonAppendChar(pOut, '"'); + if( pOut->nUsed+sz+2<=pOut->nAlloc || jsonStringGrow(pOut, sz+2)==0 ){ + pOut->zBuf[pOut->nUsed] = '"'; + memcpy(pOut->zBuf+pOut->nUsed+1,(const char*)&pParse->aBlob[i+n],sz); + pOut->zBuf[pOut->nUsed+sz+1] = '"'; + pOut->nUsed += sz+2; + } break; } case JSONB_TEXT5: { @@ -209088,33 +210346,6 @@ static u32 jsonTranslateBlobToPrettyText( return i; } - -/* Return true if the input pJson -** -** For performance reasons, this routine does not do a detailed check of the -** input BLOB to ensure that it is well-formed. Hence, false positives are -** possible. False negatives should never occur, however. -*/ -static int jsonFuncArgMightBeBinary(sqlite3_value *pJson){ - u32 sz, n; - const u8 *aBlob; - int nBlob; - JsonParse s; - if( sqlite3_value_type(pJson)!=SQLITE_BLOB ) return 0; - aBlob = sqlite3_value_blob(pJson); - nBlob = sqlite3_value_bytes(pJson); - if( nBlob<1 ) return 0; - if( NEVER(aBlob==0) || (aBlob[0] & 0x0f)>JSONB_OBJECT ) return 0; - memset(&s, 0, sizeof(s)); - s.aBlob = (u8*)aBlob; - s.nBlob = nBlob; - n = jsonbPayloadSize(&s, 0, &sz); - if( n==0 ) return 0; - if( sz+n!=(u32)nBlob ) return 0; - if( (aBlob[0] & 0x0f)<=JSONB_FALSE && sz>0 ) return 0; - return sz+n==(u32)nBlob; -} - /* ** Given that a JSONB_ARRAY object starts at offset i, return ** the number of entries in that array. @@ -209147,6 +210378,82 @@ static void jsonAfterEditSizeAdjust(JsonParse *pParse, u32 iRoot){ pParse->delta += jsonBlobChangePayloadSize(pParse, iRoot, sz); } +/* +** If the JSONB at aIns[0..nIns-1] can be expanded (by denormalizing the +** size field) by d bytes, then write the expansion into aOut[] and +** return true. In this way, an overwrite happens without changing the +** size of the JSONB, which reduces memcpy() operations and also make it +** faster and easier to update the B-Tree entry that contains the JSONB +** in the database. +** +** If the expansion of aIns[] by d bytes cannot be (easily) accomplished +** then return false. +** +** The d parameter is guaranteed to be between 1 and 8. +** +** This routine is an optimization. A correct answer is obtained if it +** always leaves the output unchanged and returns false. +*/ +static int jsonBlobOverwrite( + u8 *aOut, /* Overwrite here */ + const u8 *aIns, /* New content */ + u32 nIns, /* Bytes of new content */ + u32 d /* Need to expand new content by this much */ +){ + u32 szPayload; /* Bytes of payload */ + u32 i; /* New header size, after expansion & a loop counter */ + u8 szHdr; /* Size of header before expansion */ + + /* Lookup table for finding the upper 4 bits of the first byte of the + ** expanded aIns[], based on the size of the expanded aIns[] header: + ** + ** 2 3 4 5 6 7 8 9 */ + static const u8 aType[] = { 0xc0, 0xd0, 0, 0xe0, 0, 0, 0, 0xf0 }; + + if( (aIns[0]&0x0f)<=2 ) return 0; /* Cannot enlarge NULL, true, false */ + switch( aIns[0]>>4 ){ + default: { /* aIns[] header size 1 */ + if( ((1<=2 && i<=9 && aType[i-2]!=0 ); + aOut[0] = (aIns[0] & 0x0f) | aType[i-2]; + memcpy(&aOut[i], &aIns[szHdr], nIns-szHdr); + szPayload = nIns - szHdr; + while( 1/*edit-by-break*/ ){ + i--; + aOut[i] = szPayload & 0xff; + if( i==1 ) break; + szPayload >>= 8; + } + assert( (szPayload>>8)==0 ); + return 1; +} + /* ** Modify the JSONB blob at pParse->aBlob by removing nDel bytes of ** content beginning at iDel, and replacing them with nIns bytes of @@ -209168,6 +210475,11 @@ static void jsonBlobEdit( u32 nIns /* Bytes of content to insert */ ){ i64 d = (i64)nIns - (i64)nDel; + if( d<0 && d>=(-8) && aIns!=0 + && jsonBlobOverwrite(&pParse->aBlob[iDel], aIns, nIns, (int)-d) + ){ + return; + } if( d!=0 ){ if( pParse->nBlob + d > pParse->nBlobAlloc ){ jsonBlobExpand(pParse, pParse->nBlob+d); @@ -209179,7 +210491,9 @@ static void jsonBlobEdit( pParse->nBlob += d; pParse->delta += d; } - if( nIns && aIns ) memcpy(&pParse->aBlob[iDel], aIns, nIns); + if( nIns && aIns ){ + memcpy(&pParse->aBlob[iDel], aIns, nIns); + } } /* @@ -209264,7 +210578,21 @@ static u32 jsonUnescapeOneChar(const char *z, u32 n, u32 *piOut){ case 'r': { *piOut = '\r'; return 2; } case 't': { *piOut = '\t'; return 2; } case 'v': { *piOut = '\v'; return 2; } - case '0': { *piOut = 0; return 2; } + case '0': { + /* JSON5 requires that the \0 escape not be followed by a digit. + ** But SQLite did not enforce this restriction in versions 3.42.0 + ** through 3.49.2. That was a bug. But some applications might have + ** come to depend on that bug. Use the SQLITE_BUG_COMPATIBLE_20250510 + ** option to restore the old buggy behavior. */ +#ifdef SQLITE_BUG_COMPATIBLE_20250510 + /* Legacy bug-compatible behavior */ + *piOut = 0; +#else + /* Correct behavior */ + *piOut = (n>2 && sqlite3Isdigit(z[2])) ? JSON_INVALID_CHAR : 0; +#endif + return 2; + } case '\'': case '"': case '/': @@ -209764,7 +211092,7 @@ static void jsonReturnFromBlob( char *zOut; u32 nOut = sz; z = (const char*)&pParse->aBlob[i+n]; - zOut = sqlite3DbMallocRaw(db, nOut+1); + zOut = sqlite3DbMallocRaw(db, ((u64)nOut)+1); if( zOut==0 ) goto returnfromblob_oom; for(iIn=iOut=0; iInaBlob = (u8*)sqlite3_value_blob(pArg); - pParse->nBlob = sqlite3_value_bytes(pArg); - }else{ + if( !jsonArgIsJsonb(pArg, pParse) ){ sqlite3_result_error(ctx, "JSON cannot hold BLOB values", -1); return 1; } @@ -209942,7 +211267,7 @@ static char *jsonBadPathError( } /* argv[0] is a BLOB that seems likely to be a JSONB. Subsequent -** arguments come in parse where each pair contains a JSON path and +** arguments come in pairs where each pair contains a JSON path and ** content to insert or set at that patch. Do the updates ** and return the result. ** @@ -210013,27 +211338,46 @@ static void jsonInsertIntoBlob( /* ** If pArg is a blob that seems like a JSONB blob, then initialize ** p to point to that JSONB and return TRUE. If pArg does not seem like -** a JSONB blob, then return FALSE; -** -** This routine is only called if it is already known that pArg is a -** blob. The only open question is whether or not the blob appears -** to be a JSONB blob. +** a JSONB blob, then return FALSE. +** +** For small BLOBs (having no more than 7 bytes of payload) a full +** validity check is done. So for small BLOBs this routine only returns +** true if the value is guaranteed to be a valid JSONB. For larger BLOBs +** (8 byte or more of payload) only the size of the outermost element is +** checked to verify that the BLOB is superficially valid JSONB. +** +** A full JSONB validation is done on smaller BLOBs because those BLOBs might +** also be text JSON that has been incorrectly cast into a BLOB. +** (See tag-20240123-a and https://sqlite.org/forum/forumpost/012136abd5) +** If the BLOB is 9 bytes are larger, then it is not possible for the +** superficial size check done here to pass if the input is really text +** JSON so we do not need to look deeper in that case. +** +** Why we only need to do full JSONB validation for smaller BLOBs: +** +** The first byte of valid JSON text must be one of: '{', '[', '"', ' ', '\n', +** '\r', '\t', '-', or a digit '0' through '9'. Of these, only a subset +** can also be the first byte of JSONB: '{', '[', and digits '3' +** through '9'. In every one of those cases, the payload size is 7 bytes +** or less. So if we do full JSONB validation for every BLOB where the +** payload is less than 7 bytes, we will never get a false positive for +** JSONB on an input that is really text JSON. */ static int jsonArgIsJsonb(sqlite3_value *pArg, JsonParse *p){ u32 n, sz = 0; + u8 c; + if( sqlite3_value_type(pArg)!=SQLITE_BLOB ) return 0; p->aBlob = (u8*)sqlite3_value_blob(pArg); p->nBlob = (u32)sqlite3_value_bytes(pArg); - if( p->nBlob==0 ){ - p->aBlob = 0; - return 0; - } - if( NEVER(p->aBlob==0) ){ - return 0; - } - if( (p->aBlob[0] & 0x0f)<=JSONB_OBJECT + if( p->nBlob>0 + && ALWAYS(p->aBlob!=0) + && ((c = p->aBlob[0]) & 0x0f)<=JSONB_OBJECT && (n = jsonbPayloadSize(p, 0, &sz))>0 && sz+n==p->nBlob - && ((p->aBlob[0] & 0x0f)>JSONB_FALSE || sz==0) + && ((c & 0x0f)>JSONB_FALSE || sz==0) + && (sz>7 + || (c!=0x7b && c!=0x5b && !sqlite3Isdigit(c)) + || jsonbValidityCheck(p, 0, p->nBlob, 1)==0) ){ return 1; } @@ -210111,7 +211455,7 @@ static JsonParse *jsonParseFuncArg( ** JSON functions were suppose to work. From the beginning, blob was ** reserved for expansion and a blob value should have raised an error. ** But it did not, due to a bug. And many applications came to depend - ** upon this buggy behavior, espeically when using the CLI and reading + ** upon this buggy behavior, especially when using the CLI and reading ** JSON text using readfile(), which returns a blob. For this reason ** we will continue to support the bug moving forward. ** See for example https://sqlite.org/forum/forumpost/012136abd5292b8d @@ -211126,21 +212470,17 @@ static void jsonValidFunc( return; } case SQLITE_BLOB: { - if( jsonFuncArgMightBeBinary(argv[0]) ){ + JsonParse py; + memset(&py, 0, sizeof(py)); + if( jsonArgIsJsonb(argv[0], &py) ){ if( flags & 0x04 ){ /* Superficial checking only - accomplished by the - ** jsonFuncArgMightBeBinary() call above. */ + ** jsonArgIsJsonb() call above. */ res = 1; }else if( flags & 0x08 ){ /* Strict checking. Check by translating BLOB->TEXT->BLOB. If ** no errors occur, call that a "strict check". */ - JsonParse px; - u32 iErr; - memset(&px, 0, sizeof(px)); - px.aBlob = (u8*)sqlite3_value_blob(argv[0]); - px.nBlob = sqlite3_value_bytes(argv[0]); - iErr = jsonbValidityCheck(&px, 0, px.nBlob, 1); - res = iErr==0; + res = 0==jsonbValidityCheck(&py, 0, py.nBlob, 1); } break; } @@ -211198,9 +212538,7 @@ static void jsonErrorFunc( UNUSED_PARAMETER(argc); memset(&s, 0, sizeof(s)); s.db = sqlite3_context_db_handle(ctx); - if( jsonFuncArgMightBeBinary(argv[0]) ){ - s.aBlob = (u8*)sqlite3_value_blob(argv[0]); - s.nBlob = sqlite3_value_bytes(argv[0]); + if( jsonArgIsJsonb(argv[0], &s) ){ iErrPos = (i64)jsonbValidityCheck(&s, 0, s.nBlob, 1); }else{ s.zJson = (char*)sqlite3_value_text(argv[0]); @@ -211361,18 +212699,20 @@ static void jsonObjectStep( UNUSED_PARAMETER(argc); pStr = (JsonString*)sqlite3_aggregate_context(ctx, sizeof(*pStr)); if( pStr ){ + z = (const char*)sqlite3_value_text(argv[0]); + n = sqlite3Strlen30(z); if( pStr->zBuf==0 ){ jsonStringInit(pStr, ctx); jsonAppendChar(pStr, '{'); - }else if( pStr->nUsed>1 ){ + }else if( pStr->nUsed>1 && z!=0 ){ jsonAppendChar(pStr, ','); } pStr->pCtx = ctx; - z = (const char*)sqlite3_value_text(argv[0]); - n = sqlite3Strlen30(z); - jsonAppendString(pStr, z, n); - jsonAppendChar(pStr, ':'); - jsonAppendSqlValue(pStr, argv[1]); + if( z!=0 ){ + jsonAppendString(pStr, z, n); + jsonAppendChar(pStr, ':'); + jsonAppendSqlValue(pStr, argv[1]); + } } } static void jsonObjectCompute(sqlite3_context *ctx, int isFinal){ @@ -211885,9 +213225,8 @@ static int jsonEachFilter( memset(&p->sParse, 0, sizeof(p->sParse)); p->sParse.nJPRef = 1; p->sParse.db = p->db; - if( jsonFuncArgMightBeBinary(argv[0]) ){ - p->sParse.nBlob = sqlite3_value_bytes(argv[0]); - p->sParse.aBlob = (u8*)sqlite3_value_blob(argv[0]); + if( jsonArgIsJsonb(argv[0], &p->sParse) ){ + /* We have JSONB */ }else{ p->sParse.zJson = (char*)sqlite3_value_text(argv[0]); p->sParse.nJson = sqlite3_value_bytes(argv[0]); @@ -212181,6 +213520,8 @@ SQLITE_PRIVATE int sqlite3JsonTableFunctions(sqlite3 *db){ #endif SQLITE_PRIVATE int sqlite3GetToken(const unsigned char*,int*); /* In the SQLite core */ +/* #include */ + /* ** If building separately, we will need some setup that is normally ** found in sqliteInt.h @@ -212211,6 +213552,14 @@ typedef unsigned int u32; # define ALWAYS(X) (X) # define NEVER(X) (X) #endif +#ifndef offsetof +#define offsetof(STRUCTURE,FIELD) ((size_t)((char*)&((STRUCTURE*)0)->FIELD)) +#endif +#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) +# define FLEXARRAY +#else +# define FLEXARRAY 1 +#endif #endif /* !defined(SQLITE_AMALGAMATION) */ /* Macro to check for 4-byte alignment. Only used inside of assert() */ @@ -212531,9 +213880,13 @@ struct RtreeMatchArg { RtreeGeomCallback cb; /* Info about the callback functions */ int nParam; /* Number of parameters to the SQL function */ sqlite3_value **apSqlParam; /* Original SQL parameter values */ - RtreeDValue aParam[1]; /* Values for parameters to the SQL function */ + RtreeDValue aParam[FLEXARRAY]; /* Values for parameters to the SQL function */ }; +/* Size of an RtreeMatchArg object with N parameters */ +#define SZ_RTREEMATCHARG(N) \ + (offsetof(RtreeMatchArg,aParam)+(N)*sizeof(RtreeDValue)) + #ifndef MAX # define MAX(x,y) ((x) < (y) ? (y) : (x)) #endif @@ -214222,7 +215575,7 @@ static int rtreeBestIndex(sqlite3_vtab *tab, sqlite3_index_info *pIdxInfo){ } /* -** Return the N-dimensional volumn of the cell stored in *p. +** Return the N-dimensional volume of the cell stored in *p. */ static RtreeDValue cellArea(Rtree *pRtree, RtreeCell *p){ RtreeDValue area = (RtreeDValue)1; @@ -215988,7 +217341,7 @@ static sqlite3_stmt *rtreeCheckPrepare( /* ** The second and subsequent arguments to this function are a printf() ** style format string and arguments. This function formats the string and -** appends it to the report being accumuated in pCheck. +** appends it to the report being accumulated in pCheck. */ static void rtreeCheckAppendMsg(RtreeCheck *pCheck, const char *zFmt, ...){ va_list ap; @@ -217176,7 +218529,7 @@ static void geopolyBBoxFinal( ** Determine if point (x0,y0) is beneath line segment (x1,y1)->(x2,y2). ** Returns: ** -** +2 x0,y0 is on the line segement +** +2 x0,y0 is on the line segment ** ** +1 x0,y0 is beneath line segment ** @@ -217282,7 +218635,7 @@ static void geopolyWithinFunc( sqlite3_free(p2); } -/* Objects used by the overlap algorihm. */ +/* Objects used by the overlap algorithm. */ typedef struct GeoEvent GeoEvent; typedef struct GeoSegment GeoSegment; typedef struct GeoOverlap GeoOverlap; @@ -218329,8 +219682,7 @@ static void geomCallback(sqlite3_context *ctx, int nArg, sqlite3_value **aArg){ sqlite3_int64 nBlob; int memErr = 0; - nBlob = sizeof(RtreeMatchArg) + (nArg-1)*sizeof(RtreeDValue) - + nArg*sizeof(sqlite3_value*); + nBlob = SZ_RTREEMATCHARG(nArg) + nArg*sizeof(sqlite3_value*); pBlob = (RtreeMatchArg *)sqlite3_malloc64(nBlob); if( !pBlob ){ sqlite3_result_error_nomem(ctx); @@ -219425,7 +220777,7 @@ SQLITE_PRIVATE void sqlite3Fts3IcuTokenizerModule( ** ** "RBU" stands for "Resumable Bulk Update". As in a large database update ** transmitted via a wireless network to a mobile device. A transaction -** applied using this extension is hence refered to as an "RBU update". +** applied using this extension is hence referred to as an "RBU update". ** ** ** LIMITATIONS @@ -219722,7 +221074,7 @@ SQLITE_API sqlite3rbu *sqlite3rbu_open( ** the next call to sqlite3rbu_vacuum() opens a handle that starts a ** new RBU vacuum operation. ** -** As with sqlite3rbu_open(), Zipvfs users should rever to the comment +** As with sqlite3rbu_open(), Zipvfs users should refer to the comment ** describing the sqlite3rbu_create_vfs() API function below for ** a description of the complications associated with using RBU with ** zipvfs databases. @@ -219818,7 +221170,7 @@ SQLITE_API int sqlite3rbu_savestate(sqlite3rbu *pRbu); ** ** If the RBU update has been completely applied, mark the RBU database ** as fully applied. Otherwise, assuming no error has occurred, save the -** current state of the RBU update appliation to the RBU database. +** current state of the RBU update application to the RBU database. ** ** If an error has already occurred as part of an sqlite3rbu_step() ** or sqlite3rbu_open() call, or if one occurs within this function, an @@ -224744,7 +226096,7 @@ static int rbuVfsFileSize(sqlite3_file *pFile, sqlite_int64 *pSize){ /* If this is an RBU vacuum operation and this is the target database, ** pretend that it has at least one page. Otherwise, SQLite will not - ** check for the existance of a *-wal file. rbuVfsRead() contains + ** check for the existence of a *-wal file. rbuVfsRead() contains ** similar logic. */ if( rc==SQLITE_OK && *pSize==0 && p->pRbu && rbuIsVacuum(p->pRbu) @@ -226676,8 +228028,8 @@ static int dbpageUpdate( /* "INSERT INTO dbpage($PGNO,NULL)" causes page number $PGNO and ** all subsequent pages to be deleted. */ pTab->iDbTrunc = iDb; - pgno--; - pTab->pgnoTrunc = pgno; + pTab->pgnoTrunc = pgno-1; + pgno = 1; }else{ zErr = "bad page value"; goto update_fail; @@ -227974,7 +229326,7 @@ static int sessionTableInfo( /* ** This function is called to initialize the SessionTable.nCol, azCol[] ** abPK[] and azDflt[] members of SessionTable object pTab. If these -** fields are already initilialized, this function is a no-op. +** fields are already initialized, this function is a no-op. ** ** If an error occurs, an error code is stored in sqlite3_session.rc and ** non-zero returned. Or, if no error occurs but the table has no primary @@ -227993,6 +229345,8 @@ static int sessionInitTable( if( pTab->nCol==0 ){ u8 *abPK; assert( pTab->azCol==0 || pTab->abPK==0 ); + sqlite3_free(pTab->azCol); + pTab->abPK = 0; rc = sessionTableInfo(pSession, db, zDb, pTab->zName, &pTab->nCol, &pTab->nTotalCol, 0, &pTab->azCol, &pTab->azDflt, &pTab->aiIdx, &abPK, @@ -229000,7 +230354,9 @@ SQLITE_API int sqlite3session_diff( SessionTable *pTo; /* Table zTbl */ /* Locate and if necessary initialize the target table object */ + pSession->bAutoAttach++; rc = sessionFindTable(pSession, zTbl, &pTo); + pSession->bAutoAttach--; if( pTo==0 ) goto diff_out; if( sessionInitTable(pSession, pTo, pSession->db, pSession->zDb) ){ rc = pSession->rc; @@ -229011,17 +230367,43 @@ SQLITE_API int sqlite3session_diff( if( rc==SQLITE_OK ){ int bHasPk = 0; int bMismatch = 0; - int nCol; /* Columns in zFrom.zTbl */ + int nCol = 0; /* Columns in zFrom.zTbl */ int bRowid = 0; - u8 *abPK; + u8 *abPK = 0; const char **azCol = 0; - rc = sessionTableInfo(0, db, zFrom, zTbl, - &nCol, 0, 0, &azCol, 0, 0, &abPK, - pSession->bImplicitPK ? &bRowid : 0 - ); + char *zDbExists = 0; + + /* Check that database zFrom is attached. */ + zDbExists = sqlite3_mprintf("SELECT * FROM %Q.sqlite_schema", zFrom); + if( zDbExists==0 ){ + rc = SQLITE_NOMEM; + }else{ + sqlite3_stmt *pDbExists = 0; + rc = sqlite3_prepare_v2(db, zDbExists, -1, &pDbExists, 0); + if( rc==SQLITE_ERROR ){ + rc = SQLITE_OK; + nCol = -1; + } + sqlite3_finalize(pDbExists); + sqlite3_free(zDbExists); + } + + if( rc==SQLITE_OK && nCol==0 ){ + rc = sessionTableInfo(0, db, zFrom, zTbl, + &nCol, 0, 0, &azCol, 0, 0, &abPK, + pSession->bImplicitPK ? &bRowid : 0 + ); + } if( rc==SQLITE_OK ){ if( pTo->nCol!=nCol ){ - bMismatch = 1; + if( nCol<=0 ){ + rc = SQLITE_SCHEMA; + if( pzErrMsg ){ + *pzErrMsg = sqlite3_mprintf("no such table: %s.%s", zFrom, zTbl); + } + }else{ + bMismatch = 1; + } }else{ int i; for(i=0; idb; /* Source database handle */ SessionTable *pTab; /* Used to iterate through attached tables */ - SessionBuffer buf = {0,0,0}; /* Buffer in which to accumlate changeset */ + SessionBuffer buf = {0,0,0}; /* Buffer in which to accumulate changeset */ int rc; /* Return code */ assert( xOutput==0 || (pnChangeset==0 && ppChangeset==0) ); @@ -230150,14 +231532,15 @@ SQLITE_API int sqlite3changeset_start_v2_strm( ** object and the buffer is full, discard some data to free up space. */ static void sessionDiscardData(SessionInput *pIn){ - if( pIn->xInput && pIn->iNext>=sessions_strm_chunk_size ){ - int nMove = pIn->buf.nBuf - pIn->iNext; + if( pIn->xInput && pIn->iCurrent>=sessions_strm_chunk_size ){ + int nMove = pIn->buf.nBuf - pIn->iCurrent; assert( nMove>=0 ); if( nMove>0 ){ - memmove(pIn->buf.aBuf, &pIn->buf.aBuf[pIn->iNext], nMove); + memmove(pIn->buf.aBuf, &pIn->buf.aBuf[pIn->iCurrent], nMove); } - pIn->buf.nBuf -= pIn->iNext; - pIn->iNext = 0; + pIn->buf.nBuf -= pIn->iCurrent; + pIn->iNext -= pIn->iCurrent; + pIn->iCurrent = 0; pIn->nData = pIn->buf.nBuf; } } @@ -230511,8 +231894,8 @@ static int sessionChangesetNextOne( p->rc = sessionInputBuffer(&p->in, 2); if( p->rc!=SQLITE_OK ) return p->rc; - sessionDiscardData(&p->in); p->in.iCurrent = p->in.iNext; + sessionDiscardData(&p->in); /* If the iterator is already at the end of the changeset, return DONE. */ if( p->in.iNext>=p->in.nData ){ @@ -232871,14 +234254,19 @@ SQLITE_API int sqlite3changegroup_add_change( sqlite3_changegroup *pGrp, sqlite3_changeset_iter *pIter ){ + int rc = SQLITE_OK; + if( pIter->in.iCurrent==pIter->in.iNext || pIter->rc!=SQLITE_OK || pIter->bInvert ){ /* Iterator does not point to any valid entry or is an INVERT iterator. */ - return SQLITE_ERROR; + rc = SQLITE_ERROR; + }else{ + pIter->in.bNoDiscard = 1; + rc = sessionOneChangeToHash(pGrp, pIter, 0); } - return sessionOneChangeToHash(pGrp, pIter, 0); + return rc; } /* @@ -234176,6 +235564,7 @@ SQLITE_EXTENSION_INIT1 /* #include */ /* #include */ +/* #include */ #ifndef SQLITE_AMALGAMATION @@ -234231,6 +235620,18 @@ typedef sqlite3_uint64 u64; # define EIGHT_BYTE_ALIGNMENT(X) ((((uptr)(X) - (uptr)0)&7)==0) #endif +/* +** Macros needed to provide flexible arrays in a portable way +*/ +#ifndef offsetof +# define offsetof(STRUCTURE,FIELD) ((size_t)((char*)&((STRUCTURE*)0)->FIELD)) +#endif +#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) +# define FLEXARRAY +#else +# define FLEXARRAY 1 +#endif + #endif /* Truncate very long tokens to this many bytes. Hard limit is @@ -234303,10 +235704,11 @@ typedef struct Fts5Colset Fts5Colset; */ struct Fts5Colset { int nCol; - int aiCol[1]; + int aiCol[FLEXARRAY]; }; - +/* Size (int bytes) of a complete Fts5Colset object with N columns. */ +#define SZ_FTS5COLSET(N) (sizeof(i64)*((N+2)/2)) /************************************************************************** ** Interface to code in fts5_config.c. fts5_config.c contains contains code @@ -235135,7 +236537,7 @@ static void sqlite3Fts5UnicodeAscii(u8*, u8*); ** ** The "lemon" program processes an LALR(1) input grammar file, then uses ** this template to construct a parser. The "lemon" program inserts text -** at each "%%" line. Also, any "P-a-r-s-e" identifer prefix (without the +** at each "%%" line. Also, any "P-a-r-s-e" identifier prefix (without the ** interstitial "-" characters) contained in this template is changed into ** the value of the %name directive from the grammar. Otherwise, the content ** of this template is copied straight through into the generate parser @@ -237289,7 +238691,7 @@ static int fts5Bm25GetData( ** under consideration. ** ** The problem with this is that if (N < 2*nHit), the IDF is - ** negative. Which is undesirable. So the mimimum allowable IDF is + ** negative. Which is undesirable. So the minimum allowable IDF is ** (1e-6) - roughly the same as a term that appears in just over ** half of set of 5,000,000 documents. */ double idf = log( (nRow - nHit + 0.5) / (nHit + 0.5) ); @@ -237752,7 +239154,7 @@ static char *sqlite3Fts5Strndup(int *pRc, const char *pIn, int nIn){ ** * The 52 upper and lower case ASCII characters, and ** * The 10 integer ASCII characters. ** * The underscore character "_" (0x5F). -** * The unicode "subsitute" character (0x1A). +** * The unicode "substitute" character (0x1A). */ static int sqlite3Fts5IsBareword(char t){ u8 aBareword[128] = { @@ -239070,9 +240472,13 @@ struct Fts5ExprNode { /* Child nodes. For a NOT node, this array always contains 2 entries. For ** AND or OR nodes, it contains 2 or more entries. */ int nChild; /* Number of child nodes */ - Fts5ExprNode *apChild[1]; /* Array of child nodes */ + Fts5ExprNode *apChild[FLEXARRAY]; /* Array of child nodes */ }; +/* Size (in bytes) of an Fts5ExprNode object that holds up to N children */ +#define SZ_FTS5EXPRNODE(N) \ + (offsetof(Fts5ExprNode,apChild) + (N)*sizeof(Fts5ExprNode*)) + #define Fts5NodeIsString(p) ((p)->eType==FTS5_TERM || (p)->eType==FTS5_STRING) /* @@ -239103,9 +240509,13 @@ struct Fts5ExprPhrase { Fts5ExprNode *pNode; /* FTS5_STRING node this phrase is part of */ Fts5Buffer poslist; /* Current position list */ int nTerm; /* Number of entries in aTerm[] */ - Fts5ExprTerm aTerm[1]; /* Terms that make up this phrase */ + Fts5ExprTerm aTerm[FLEXARRAY]; /* Terms that make up this phrase */ }; +/* Size (in bytes) of an Fts5ExprPhrase object that holds up to N terms */ +#define SZ_FTS5EXPRPHRASE(N) \ + (offsetof(Fts5ExprPhrase,aTerm) + (N)*sizeof(Fts5ExprTerm)) + /* ** One or more phrases that must appear within a certain token distance of ** each other within each matching document. @@ -239114,9 +240524,12 @@ struct Fts5ExprNearset { int nNear; /* NEAR parameter */ Fts5Colset *pColset; /* Columns to search (NULL -> all columns) */ int nPhrase; /* Number of entries in aPhrase[] array */ - Fts5ExprPhrase *apPhrase[1]; /* Array of phrase pointers */ + Fts5ExprPhrase *apPhrase[FLEXARRAY]; /* Array of phrase pointers */ }; +/* Size (in bytes) of an Fts5ExprNearset object covering up to N phrases */ +#define SZ_FTS5EXPRNEARSET(N) \ + (offsetof(Fts5ExprNearset,apPhrase)+(N)*sizeof(Fts5ExprPhrase*)) /* ** Parse context. @@ -239276,7 +240689,7 @@ static int sqlite3Fts5ExprNew( /* If the LHS of the MATCH expression was a user column, apply the ** implicit column-filter. */ if( sParse.rc==SQLITE_OK && iColnCol ){ - int n = sizeof(Fts5Colset); + int n = SZ_FTS5COLSET(1); Fts5Colset *pColset = (Fts5Colset*)sqlite3Fts5MallocZero(&sParse.rc, n); if( pColset ){ pColset->nCol = 1; @@ -240634,7 +242047,7 @@ static Fts5ExprNearset *sqlite3Fts5ParseNearset( if( pParse->rc==SQLITE_OK ){ if( pNear==0 ){ sqlite3_int64 nByte; - nByte = sizeof(Fts5ExprNearset) + SZALLOC * sizeof(Fts5ExprPhrase*); + nByte = SZ_FTS5EXPRNEARSET(SZALLOC+1); pRet = sqlite3_malloc64(nByte); if( pRet==0 ){ pParse->rc = SQLITE_NOMEM; @@ -240645,7 +242058,7 @@ static Fts5ExprNearset *sqlite3Fts5ParseNearset( int nNew = pNear->nPhrase + SZALLOC; sqlite3_int64 nByte; - nByte = sizeof(Fts5ExprNearset) + nNew * sizeof(Fts5ExprPhrase*); + nByte = SZ_FTS5EXPRNEARSET(nNew+1); pRet = (Fts5ExprNearset*)sqlite3_realloc64(pNear, nByte); if( pRet==0 ){ pParse->rc = SQLITE_NOMEM; @@ -240736,12 +242149,12 @@ static int fts5ParseTokenize( int nNew = SZALLOC + (pPhrase ? pPhrase->nTerm : 0); pNew = (Fts5ExprPhrase*)sqlite3_realloc64(pPhrase, - sizeof(Fts5ExprPhrase) + sizeof(Fts5ExprTerm) * nNew + SZ_FTS5EXPRPHRASE(nNew+1) ); if( pNew==0 ){ rc = SQLITE_NOMEM; }else{ - if( pPhrase==0 ) memset(pNew, 0, sizeof(Fts5ExprPhrase)); + if( pPhrase==0 ) memset(pNew, 0, SZ_FTS5EXPRPHRASE(1)); pCtx->pPhrase = pPhrase = pNew; pNew->nTerm = nNew - SZALLOC; } @@ -240849,7 +242262,7 @@ static Fts5ExprPhrase *sqlite3Fts5ParseTerm( if( sCtx.pPhrase==0 ){ /* This happens when parsing a token or quoted phrase that contains ** no token characters at all. (e.g ... MATCH '""'). */ - sCtx.pPhrase = sqlite3Fts5MallocZero(&pParse->rc, sizeof(Fts5ExprPhrase)); + sCtx.pPhrase = sqlite3Fts5MallocZero(&pParse->rc, SZ_FTS5EXPRPHRASE(1)); }else if( sCtx.pPhrase->nTerm ){ sCtx.pPhrase->aTerm[sCtx.pPhrase->nTerm-1].bPrefix = (u8)bPrefix; } @@ -240884,19 +242297,18 @@ static int sqlite3Fts5ExprClonePhrase( sizeof(Fts5ExprPhrase*)); } if( rc==SQLITE_OK ){ - pNew->pRoot = (Fts5ExprNode*)sqlite3Fts5MallocZero(&rc, - sizeof(Fts5ExprNode)); + pNew->pRoot = (Fts5ExprNode*)sqlite3Fts5MallocZero(&rc, SZ_FTS5EXPRNODE(1)); } if( rc==SQLITE_OK ){ pNew->pRoot->pNear = (Fts5ExprNearset*)sqlite3Fts5MallocZero(&rc, - sizeof(Fts5ExprNearset) + sizeof(Fts5ExprPhrase*)); + SZ_FTS5EXPRNEARSET(2)); } if( rc==SQLITE_OK && ALWAYS(pOrig!=0) ){ Fts5Colset *pColsetOrig = pOrig->pNode->pNear->pColset; if( pColsetOrig ){ sqlite3_int64 nByte; Fts5Colset *pColset; - nByte = sizeof(Fts5Colset) + (pColsetOrig->nCol-1) * sizeof(int); + nByte = SZ_FTS5COLSET(pColsetOrig->nCol); pColset = (Fts5Colset*)sqlite3Fts5MallocZero(&rc, nByte); if( pColset ){ memcpy(pColset, pColsetOrig, (size_t)nByte); @@ -240924,7 +242336,7 @@ static int sqlite3Fts5ExprClonePhrase( }else{ /* This happens when parsing a token or quoted phrase that contains ** no token characters at all. (e.g ... MATCH '""'). */ - sCtx.pPhrase = sqlite3Fts5MallocZero(&rc, sizeof(Fts5ExprPhrase)); + sCtx.pPhrase = sqlite3Fts5MallocZero(&rc, SZ_FTS5EXPRPHRASE(1)); } } @@ -240989,7 +242401,8 @@ static void sqlite3Fts5ParseSetDistance( ); return; } - nNear = nNear * 10 + (p->p[i] - '0'); + if( nNear<214748363 ) nNear = nNear * 10 + (p->p[i] - '0'); + /* ^^^^^^^^^^^^^^^--- Prevent integer overflow */ } }else{ nNear = FTS5_DEFAULT_NEARDIST; @@ -241018,7 +242431,7 @@ static Fts5Colset *fts5ParseColset( assert( pParse->rc==SQLITE_OK ); assert( iCol>=0 && iColpConfig->nCol ); - pNew = sqlite3_realloc64(p, sizeof(Fts5Colset) + sizeof(int)*nCol); + pNew = sqlite3_realloc64(p, SZ_FTS5COLSET(nCol+1)); if( pNew==0 ){ pParse->rc = SQLITE_NOMEM; }else{ @@ -241053,7 +242466,7 @@ static Fts5Colset *sqlite3Fts5ParseColsetInvert(Fts5Parse *pParse, Fts5Colset *p int nCol = pParse->pConfig->nCol; pRet = (Fts5Colset*)sqlite3Fts5MallocZero(&pParse->rc, - sizeof(Fts5Colset) + sizeof(int)*nCol + SZ_FTS5COLSET(nCol+1) ); if( pRet ){ int i; @@ -241114,7 +242527,7 @@ static Fts5Colset *sqlite3Fts5ParseColset( static Fts5Colset *fts5CloneColset(int *pRc, Fts5Colset *pOrig){ Fts5Colset *pRet; if( pOrig ){ - sqlite3_int64 nByte = sizeof(Fts5Colset) + (pOrig->nCol-1) * sizeof(int); + sqlite3_int64 nByte = SZ_FTS5COLSET(pOrig->nCol); pRet = (Fts5Colset*)sqlite3Fts5MallocZero(pRc, nByte); if( pRet ){ memcpy(pRet, pOrig, (size_t)nByte); @@ -241282,7 +242695,7 @@ static Fts5ExprNode *fts5ParsePhraseToAnd( assert( pNear->nPhrase==1 ); assert( pParse->bPhraseToAnd ); - nByte = sizeof(Fts5ExprNode) + nTerm*sizeof(Fts5ExprNode*); + nByte = SZ_FTS5EXPRNODE(nTerm+1); pRet = (Fts5ExprNode*)sqlite3Fts5MallocZero(&pParse->rc, nByte); if( pRet ){ pRet->eType = FTS5_AND; @@ -241292,7 +242705,7 @@ static Fts5ExprNode *fts5ParsePhraseToAnd( pParse->nPhrase--; for(ii=0; iirc, sizeof(Fts5ExprPhrase) + &pParse->rc, SZ_FTS5EXPRPHRASE(1) ); if( pPhrase ){ if( parseGrowPhraseArray(pParse) ){ @@ -241361,7 +242774,7 @@ static Fts5ExprNode *sqlite3Fts5ParseNode( if( pRight->eType==eType ) nChild += pRight->nChild-1; } - nByte = sizeof(Fts5ExprNode) + sizeof(Fts5ExprNode*)*(nChild-1); + nByte = SZ_FTS5EXPRNODE(nChild); pRet = (Fts5ExprNode*)sqlite3Fts5MallocZero(&pParse->rc, nByte); if( pRet ){ @@ -242236,7 +243649,7 @@ static int sqlite3Fts5ExprInstToken( } /* -** Clear the token mappings for all Fts5IndexIter objects mannaged by +** Clear the token mappings for all Fts5IndexIter objects managed by ** the expression passed as the only argument. */ static void sqlite3Fts5ExprClearTokens(Fts5Expr *pExpr){ @@ -242271,7 +243684,7 @@ typedef struct Fts5HashEntry Fts5HashEntry; /* ** This file contains the implementation of an in-memory hash table used -** to accumuluate "term -> doclist" content before it is flused to a level-0 +** to accumulate "term -> doclist" content before it is flushed to a level-0 ** segment. */ @@ -242328,7 +243741,7 @@ struct Fts5HashEntry { }; /* -** Eqivalent to: +** Equivalent to: ** ** char *fts5EntryKey(Fts5HashEntry *pEntry){ return zKey; } */ @@ -243264,9 +244677,13 @@ struct Fts5Structure { u64 nOriginCntr; /* Origin value for next top-level segment */ int nSegment; /* Total segments in this structure */ int nLevel; /* Number of levels in this index */ - Fts5StructureLevel aLevel[1]; /* Array of nLevel level objects */ + Fts5StructureLevel aLevel[FLEXARRAY]; /* Array of nLevel level objects */ }; +/* Size (in bytes) of an Fts5Structure object holding up to N levels */ +#define SZ_FTS5STRUCTURE(N) \ + (offsetof(Fts5Structure,aLevel) + (N)*sizeof(Fts5StructureLevel)) + /* ** An object of type Fts5SegWriter is used to write to segments. */ @@ -243396,11 +244813,15 @@ struct Fts5SegIter { ** Array of tombstone pages. Reference counted. */ struct Fts5TombstoneArray { - int nRef; /* Number of pointers to this object */ + int nRef; /* Number of pointers to this object */ int nTombstone; - Fts5Data *apTombstone[1]; /* Array of tombstone pages */ + Fts5Data *apTombstone[FLEXARRAY]; /* Array of tombstone pages */ }; +/* Size (in bytes) of an Fts5TombstoneArray holding up to N tombstones */ +#define SZ_FTS5TOMBSTONEARRAY(N) \ + (offsetof(Fts5TombstoneArray,apTombstone)+(N)*sizeof(Fts5Data*)) + /* ** Argument is a pointer to an Fts5Data structure that contains a ** leaf page. @@ -243469,9 +244890,12 @@ struct Fts5Iter { i64 iSwitchRowid; /* Firstest rowid of other than aFirst[1] */ Fts5CResult *aFirst; /* Current merge state (see above) */ - Fts5SegIter aSeg[1]; /* Array of segment iterators */ + Fts5SegIter aSeg[FLEXARRAY]; /* Array of segment iterators */ }; +/* Size (in bytes) of an Fts5Iter object holding up to N segment iterators */ +#define SZ_FTS5ITER(N) (offsetof(Fts5Iter,aSeg)+(N)*sizeof(Fts5SegIter)) + /* ** An instance of the following type is used to iterate through the contents ** of a doclist-index record. @@ -243498,9 +244922,13 @@ struct Fts5DlidxLvl { struct Fts5DlidxIter { int nLvl; int iSegid; - Fts5DlidxLvl aLvl[1]; + Fts5DlidxLvl aLvl[FLEXARRAY]; }; +/* Size (in bytes) of an Fts5DlidxIter object with up to N levels */ +#define SZ_FTS5DLIDXITER(N) \ + (offsetof(Fts5DlidxIter,aLvl)+(N)*sizeof(Fts5DlidxLvl)) + static void fts5PutU16(u8 *aOut, u16 iVal){ aOut[0] = (iVal>>8); aOut[1] = (iVal&0xFF); @@ -243868,7 +245296,7 @@ static int sqlite3Fts5StructureTest(Fts5Index *p, void *pStruct){ static void fts5StructureMakeWritable(int *pRc, Fts5Structure **pp){ Fts5Structure *p = *pp; if( *pRc==SQLITE_OK && p->nRef>1 ){ - i64 nByte = sizeof(Fts5Structure)+(p->nLevel-1)*sizeof(Fts5StructureLevel); + i64 nByte = SZ_FTS5STRUCTURE(p->nLevel); Fts5Structure *pNew; pNew = (Fts5Structure*)sqlite3Fts5MallocZero(pRc, nByte); if( pNew ){ @@ -243942,10 +245370,7 @@ static int fts5StructureDecode( ){ return FTS5_CORRUPT; } - nByte = ( - sizeof(Fts5Structure) + /* Main structure */ - sizeof(Fts5StructureLevel) * (nLevel-1) /* aLevel[] array */ - ); + nByte = SZ_FTS5STRUCTURE(nLevel); pRet = (Fts5Structure*)sqlite3Fts5MallocZero(&rc, nByte); if( pRet ){ @@ -244025,10 +245450,7 @@ static void fts5StructureAddLevel(int *pRc, Fts5Structure **ppStruct){ if( *pRc==SQLITE_OK ){ Fts5Structure *pStruct = *ppStruct; int nLevel = pStruct->nLevel; - sqlite3_int64 nByte = ( - sizeof(Fts5Structure) + /* Main structure */ - sizeof(Fts5StructureLevel) * (nLevel+1) /* aLevel[] array */ - ); + sqlite3_int64 nByte = SZ_FTS5STRUCTURE(nLevel+2); pStruct = sqlite3_realloc64(pStruct, nByte); if( pStruct ){ @@ -244567,7 +245989,7 @@ static Fts5DlidxIter *fts5DlidxIterInit( int bDone = 0; for(i=0; p->rc==SQLITE_OK && bDone==0; i++){ - sqlite3_int64 nByte = sizeof(Fts5DlidxIter) + i * sizeof(Fts5DlidxLvl); + sqlite3_int64 nByte = SZ_FTS5DLIDXITER(i+1); Fts5DlidxIter *pNew; pNew = (Fts5DlidxIter*)sqlite3_realloc64(pIter, nByte); @@ -244783,9 +246205,9 @@ static void fts5SegIterSetNext(Fts5Index *p, Fts5SegIter *pIter){ ** leave an error in the Fts5Index object. */ static void fts5SegIterAllocTombstone(Fts5Index *p, Fts5SegIter *pIter){ - const int nTomb = pIter->pSeg->nPgTombstone; + const i64 nTomb = (i64)pIter->pSeg->nPgTombstone; if( nTomb>0 ){ - int nByte = nTomb * sizeof(Fts5Data*) + sizeof(Fts5TombstoneArray); + i64 nByte = SZ_FTS5TOMBSTONEARRAY(nTomb+1); Fts5TombstoneArray *pNew; pNew = (Fts5TombstoneArray*)sqlite3Fts5MallocZero(&p->rc, nByte); if( pNew ){ @@ -246246,8 +247668,7 @@ static Fts5Iter *fts5MultiIterAlloc( for(nSlot=2; nSlotaSeg[] */ + SZ_FTS5ITER(nSlot) + /* pNew + pNew->aSeg[] */ sizeof(Fts5CResult) * nSlot /* pNew->aFirst[] */ ); if( pNew ){ @@ -248048,7 +249469,7 @@ static void fts5DoSecureDelete( int iDelKeyOff = 0; /* Offset of deleted key, if any */ nIdx = nPg-iPgIdx; - aIdx = sqlite3Fts5MallocZero(&p->rc, nIdx+16); + aIdx = sqlite3Fts5MallocZero(&p->rc, ((i64)nIdx)+16); if( p->rc ) return; memcpy(aIdx, &aPg[iPgIdx], nIdx); @@ -248613,7 +250034,7 @@ static Fts5Structure *fts5IndexOptimizeStruct( Fts5Structure *pStruct ){ Fts5Structure *pNew = 0; - sqlite3_int64 nByte = sizeof(Fts5Structure); + sqlite3_int64 nByte = SZ_FTS5STRUCTURE(1); int nSeg = pStruct->nSegment; int i; @@ -248642,7 +250063,8 @@ static Fts5Structure *fts5IndexOptimizeStruct( assert( pStruct->aLevel[i].nMerge<=nThis ); } - nByte += (pStruct->nLevel+1) * sizeof(Fts5StructureLevel); + nByte += (((i64)pStruct->nLevel)+1) * sizeof(Fts5StructureLevel); + assert( nByte==SZ_FTS5STRUCTURE(pStruct->nLevel+2) ); pNew = (Fts5Structure*)sqlite3Fts5MallocZero(&p->rc, nByte); if( pNew ){ @@ -249219,9 +250641,13 @@ struct Fts5TokenDataIter { int nIterAlloc; Fts5PoslistReader *aPoslistReader; int *aPoslistToIter; - Fts5Iter *apIter[1]; + Fts5Iter *apIter[FLEXARRAY]; }; +/* Size in bytes of an Fts5TokenDataIter object holding up to N iterators */ +#define SZ_FTS5TOKENDATAITER(N) \ + (offsetof(Fts5TokenDataIter,apIter) + (N)*sizeof(Fts5Iter)) + /* ** The two input arrays - a1[] and a2[] - are in sorted order. This function ** merges the two arrays together and writes the result to output array @@ -249293,7 +250719,7 @@ static void fts5TokendataIterAppendMap( /* ** Sort the contents of the pT->aMap[] array. ** -** The sorting algorithm requries a malloc(). If this fails, an error code +** The sorting algorithm requires a malloc(). If this fails, an error code ** is left in Fts5Index.rc before returning. */ static void fts5TokendataIterSortMap(Fts5Index *p, Fts5TokenDataIter *pT){ @@ -249484,7 +250910,7 @@ static void fts5SetupPrefixIter( && p->pConfig->bPrefixInsttoken ){ s.pTokendata = &s2; - s2.pT = (Fts5TokenDataIter*)fts5IdxMalloc(p, sizeof(*s2.pT)); + s2.pT = (Fts5TokenDataIter*)fts5IdxMalloc(p, SZ_FTS5TOKENDATAITER(1)); } if( p->pConfig->eDetail==FTS5_DETAIL_NONE ){ @@ -249530,7 +250956,8 @@ static void fts5SetupPrefixIter( } } - pData = fts5IdxMalloc(p, sizeof(*pData)+s.doclist.n+FTS5_DATA_ZERO_PADDING); + pData = fts5IdxMalloc(p, sizeof(*pData) + + ((i64)s.doclist.n)+FTS5_DATA_ZERO_PADDING); assert( pData!=0 || p->rc!=SQLITE_OK ); if( pData ){ pData->p = (u8*)&pData[1]; @@ -249611,15 +251038,17 @@ static int sqlite3Fts5IndexRollback(Fts5Index *p){ ** and the initial version of the "averages" record (a zero-byte blob). */ static int sqlite3Fts5IndexReinit(Fts5Index *p){ - Fts5Structure s; + Fts5Structure *pTmp; + u8 tmpSpace[SZ_FTS5STRUCTURE(1)]; fts5StructureInvalidate(p); fts5IndexDiscardData(p); - memset(&s, 0, sizeof(Fts5Structure)); + pTmp = (Fts5Structure*)tmpSpace; + memset(pTmp, 0, SZ_FTS5STRUCTURE(1)); if( p->pConfig->bContentlessDelete ){ - s.nOriginCntr = 1; + pTmp->nOriginCntr = 1; } fts5DataWrite(p, FTS5_AVERAGES_ROWID, (const u8*)"", 0); - fts5StructureWrite(p, &s); + fts5StructureWrite(p, pTmp); return fts5IndexReturn(p); } @@ -249827,7 +251256,7 @@ static Fts5TokenDataIter *fts5AppendTokendataIter( if( p->rc==SQLITE_OK ){ if( pIn==0 || pIn->nIter==pIn->nIterAlloc ){ int nAlloc = pIn ? pIn->nIterAlloc*2 : 16; - int nByte = nAlloc * sizeof(Fts5Iter*) + sizeof(Fts5TokenDataIter); + int nByte = SZ_FTS5TOKENDATAITER(nAlloc+1); Fts5TokenDataIter *pNew = (Fts5TokenDataIter*)sqlite3_realloc(pIn, nByte); if( pNew==0 ){ @@ -250343,7 +251772,8 @@ static int fts5SetupPrefixIterTokendata( fts5BufferGrow(&p->rc, &token, nToken+1); assert( token.p!=0 || p->rc!=SQLITE_OK ); - ctx.pT = (Fts5TokenDataIter*)sqlite3Fts5MallocZero(&p->rc, sizeof(*ctx.pT)); + ctx.pT = (Fts5TokenDataIter*)sqlite3Fts5MallocZero(&p->rc, + SZ_FTS5TOKENDATAITER(1)); if( p->rc==SQLITE_OK ){ @@ -250474,7 +251904,8 @@ static int sqlite3Fts5IndexIterWriteTokendata( if( pIter->nSeg>0 ){ /* This is a prefix term iterator. */ if( pT==0 ){ - pT = (Fts5TokenDataIter*)sqlite3Fts5MallocZero(&p->rc, sizeof(*pT)); + pT = (Fts5TokenDataIter*)sqlite3Fts5MallocZero(&p->rc, + SZ_FTS5TOKENDATAITER(1)); pIter->pTokenDataIter = pT; } if( pT ){ @@ -251508,7 +252939,7 @@ static void fts5DecodeRowid( #if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG) static void fts5DebugRowid(int *pRc, Fts5Buffer *pBuf, i64 iKey){ - int iSegid, iHeight, iPgno, bDlidx, bTomb; /* Rowid compenents */ + int iSegid, iHeight, iPgno, bDlidx, bTomb; /* Rowid components */ fts5DecodeRowid(iKey, &bTomb, &iSegid, &bDlidx, &iHeight, &iPgno); if( iSegid==0 ){ @@ -251754,7 +253185,7 @@ static void fts5DecodeFunction( ** buffer overreads even if the record is corrupt. */ n = sqlite3_value_bytes(apVal[1]); aBlob = sqlite3_value_blob(apVal[1]); - nSpace = n + FTS5_DATA_ZERO_PADDING; + nSpace = ((i64)n) + FTS5_DATA_ZERO_PADDING; a = (u8*)sqlite3Fts5MallocZero(&rc, nSpace); if( a==0 ) goto decode_out; if( n>0 ) memcpy(a, aBlob, n); @@ -252469,9 +253900,11 @@ struct Fts5Sorter { i64 iRowid; /* Current rowid */ const u8 *aPoslist; /* Position lists for current row */ int nIdx; /* Number of entries in aIdx[] */ - int aIdx[1]; /* Offsets into aPoslist for current row */ + int aIdx[FLEXARRAY]; /* Offsets into aPoslist for current row */ }; +/* Size (int bytes) of an Fts5Sorter object with N indexes */ +#define SZ_FTS5SORTER(N) (offsetof(Fts5Sorter,nIdx)+((N+2)/2)*sizeof(i64)) /* ** Virtual-table cursor object. @@ -253349,7 +254782,7 @@ static int fts5CursorFirstSorted( const char *zRankArgs = pCsr->zRankArgs; nPhrase = sqlite3Fts5ExprPhraseCount(pCsr->pExpr); - nByte = sizeof(Fts5Sorter) + sizeof(int) * (nPhrase-1); + nByte = SZ_FTS5SORTER(nPhrase); pSorter = (Fts5Sorter*)sqlite3_malloc64(nByte); if( pSorter==0 ) return SQLITE_NOMEM; memset(pSorter, 0, (size_t)nByte); @@ -255875,7 +257308,7 @@ static void fts5SourceIdFunc( ){ assert( nArg==0 ); UNUSED_PARAM2(nArg, apUnused); - sqlite3_result_text(pCtx, "fts5: 2025-02-18 13:38:58 873d4e274b4988d260ba8354a9718324a1c26187a4ab4c1cc0227c03d0f10e70", -1, SQLITE_TRANSIENT); + sqlite3_result_text(pCtx, "fts5: 2025-07-30 19:33:53 4d8adfb30e03f9cf27f800a2c1ba3c48fb4ca1b08b0f5ed59a4d5ecbf45e20a3", -1, SQLITE_TRANSIENT); } /* @@ -256100,8 +257533,8 @@ static int fts5Init(sqlite3 *db){ ** its entry point to enable the matchinfo() demo. */ #ifdef SQLITE_FTS5_ENABLE_TEST_MI if( rc==SQLITE_OK ){ - extern int sqlite3Fts5TestRegisterMatchinfo(sqlite3*); - rc = sqlite3Fts5TestRegisterMatchinfo(db); + extern int sqlite3Fts5TestRegisterMatchinfoAPI(fts5_api*); + rc = sqlite3Fts5TestRegisterMatchinfoAPI(&pGlobal->api); } #endif @@ -256690,6 +258123,7 @@ static int fts5StorageDeleteFromIndex( for(iCol=1; rc==SQLITE_OK && iCol<=pConfig->nCol; iCol++){ if( pConfig->abUnindexed[iCol-1]==0 ){ sqlite3_value *pVal = 0; + sqlite3_value *pFree = 0; const char *pText = 0; int nText = 0; const char *pLoc = 0; @@ -256706,11 +258140,22 @@ static int fts5StorageDeleteFromIndex( if( pConfig->bLocale && sqlite3Fts5IsLocaleValue(pConfig, pVal) ){ rc = sqlite3Fts5DecodeLocaleValue(pVal, &pText, &nText, &pLoc, &nLoc); }else{ - pText = (const char*)sqlite3_value_text(pVal); - nText = sqlite3_value_bytes(pVal); - if( pConfig->bLocale && pSeek ){ - pLoc = (const char*)sqlite3_column_text(pSeek, iCol + pConfig->nCol); - nLoc = sqlite3_column_bytes(pSeek, iCol + pConfig->nCol); + if( sqlite3_value_type(pVal)!=SQLITE_TEXT ){ + /* Make a copy of the value to work with. This is because the call + ** to sqlite3_value_text() below forces the type of the value to + ** SQLITE_TEXT, and we may need to use it again later. */ + pFree = pVal = sqlite3_value_dup(pVal); + if( pVal==0 ){ + rc = SQLITE_NOMEM; + } + } + if( rc==SQLITE_OK ){ + pText = (const char*)sqlite3_value_text(pVal); + nText = sqlite3_value_bytes(pVal); + if( pConfig->bLocale && pSeek ){ + pLoc = (const char*)sqlite3_column_text(pSeek, iCol+pConfig->nCol); + nLoc = sqlite3_column_bytes(pSeek, iCol + pConfig->nCol); + } } } @@ -256726,6 +258171,7 @@ static int fts5StorageDeleteFromIndex( } sqlite3Fts5ClearLocale(pConfig); } + sqlite3_value_free(pFree); } } if( rc==SQLITE_OK && p->nTotalRow<1 ){ @@ -259939,7 +261385,6 @@ static void sqlite3Fts5UnicodeAscii(u8 *aArray, u8 *aAscii){ aAscii[0] = 0; /* 0x00 is never a token character */ } - /* ** 2015 May 30 ** @@ -260480,12 +261925,12 @@ static int fts5VocabInitVtab( *pzErr = sqlite3_mprintf("wrong number of vtable arguments"); rc = SQLITE_ERROR; }else{ - int nByte; /* Bytes of space to allocate */ + i64 nByte; /* Bytes of space to allocate */ const char *zDb = bDb ? argv[3] : argv[1]; const char *zTab = bDb ? argv[4] : argv[3]; const char *zType = bDb ? argv[5] : argv[4]; - int nDb = (int)strlen(zDb)+1; - int nTab = (int)strlen(zTab)+1; + i64 nDb = strlen(zDb)+1; + i64 nTab = strlen(zTab)+1; int eType = 0; rc = fts5VocabTableType(zType, pzErr, &eType); diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h b/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h index 5e07ce68e9..c34235d84d 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h @@ -134,7 +134,7 @@ extern "C" { ** ** Since [version 3.6.18] ([dateof:3.6.18]), ** SQLite source code has been stored in the -** Fossil configuration management +** Fossil configuration management ** system. ^The SQLITE_SOURCE_ID macro evaluates to ** a string which identifies a particular check-in of SQLite ** within its configuration management system. ^The SQLITE_SOURCE_ID @@ -147,9 +147,9 @@ extern "C" { ** [sqlite3_libversion_number()], [sqlite3_sourceid()], ** [sqlite_version()] and [sqlite_source_id()]. */ -#define SQLITE_VERSION "3.49.1" -#define SQLITE_VERSION_NUMBER 3049001 -#define SQLITE_SOURCE_ID "2025-02-18 13:38:58 873d4e274b4988d260ba8354a9718324a1c26187a4ab4c1cc0227c03d0f10e70" +#define SQLITE_VERSION "3.50.4" +#define SQLITE_VERSION_NUMBER 3050004 +#define SQLITE_SOURCE_ID "2025-07-30 19:33:53 4d8adfb30e03f9cf27f800a2c1ba3c48fb4ca1b08b0f5ed59a4d5ecbf45e20a3" /* ** CAPI3REF: Run-Time Library Version Numbers @@ -1164,6 +1164,12 @@ struct sqlite3_io_methods { ** the value that M is to be set to. Before returning, the 32-bit signed ** integer is overwritten with the previous value of M. ** +**

  • [[SQLITE_FCNTL_BLOCK_ON_CONNECT]] +** The [SQLITE_FCNTL_BLOCK_ON_CONNECT] opcode is used to configure the +** VFS to block when taking a SHARED lock to connect to a wal mode database. +** This is used to implement the functionality associated with +** SQLITE_SETLK_BLOCK_ON_CONNECT. +** **
  • [[SQLITE_FCNTL_DATA_VERSION]] ** The [SQLITE_FCNTL_DATA_VERSION] opcode is used to detect changes to ** a database file. The argument is a pointer to a 32-bit unsigned integer. @@ -1260,6 +1266,7 @@ struct sqlite3_io_methods { #define SQLITE_FCNTL_CKSM_FILE 41 #define SQLITE_FCNTL_RESET_CACHE 42 #define SQLITE_FCNTL_NULL_IO 43 +#define SQLITE_FCNTL_BLOCK_ON_CONNECT 44 /* deprecated names */ #define SQLITE_GET_LOCKPROXYFILE SQLITE_FCNTL_GET_LOCKPROXYFILE @@ -1990,13 +1997,16 @@ struct sqlite3_mem_methods { ** ** [[SQLITE_CONFIG_LOOKASIDE]]
    SQLITE_CONFIG_LOOKASIDE
    **
    ^(The SQLITE_CONFIG_LOOKASIDE option takes two arguments that determine -** the default size of lookaside memory on each [database connection]. +** the default size of [lookaside memory] on each [database connection]. ** The first argument is the -** size of each lookaside buffer slot and the second is the number of -** slots allocated to each database connection.)^ ^(SQLITE_CONFIG_LOOKASIDE -** sets the default lookaside size. The [SQLITE_DBCONFIG_LOOKASIDE] -** option to [sqlite3_db_config()] can be used to change the lookaside -** configuration on individual connections.)^
    +** size of each lookaside buffer slot ("sz") and the second is the number of +** slots allocated to each database connection ("cnt").)^ +** ^(SQLITE_CONFIG_LOOKASIDE sets the default lookaside size. +** The [SQLITE_DBCONFIG_LOOKASIDE] option to [sqlite3_db_config()] can +** be used to change the lookaside configuration on individual connections.)^ +** The [-DSQLITE_DEFAULT_LOOKASIDE] option can be used to change the +** default lookaside configuration at compile-time. +** ** ** [[SQLITE_CONFIG_PCACHE2]]
    SQLITE_CONFIG_PCACHE2
    **
    ^(The SQLITE_CONFIG_PCACHE2 option takes a single argument which is @@ -2233,31 +2243,50 @@ struct sqlite3_mem_methods { ** [[SQLITE_DBCONFIG_LOOKASIDE]] **
    SQLITE_DBCONFIG_LOOKASIDE
    **
    The SQLITE_DBCONFIG_LOOKASIDE option is used to adjust the -** configuration of the lookaside memory allocator within a database +** configuration of the [lookaside memory allocator] within a database ** connection. ** The arguments to the SQLITE_DBCONFIG_LOOKASIDE option are not ** in the [DBCONFIG arguments|usual format]. ** The SQLITE_DBCONFIG_LOOKASIDE option takes three arguments, not two, ** so that a call to [sqlite3_db_config()] that uses SQLITE_DBCONFIG_LOOKASIDE ** should have a total of five parameters. -** ^The first argument (the third parameter to [sqlite3_db_config()] is a +**
      +**
    1. The first argument ("buf") is a ** pointer to a memory buffer to use for lookaside memory. -** ^The first argument after the SQLITE_DBCONFIG_LOOKASIDE verb -** may be NULL in which case SQLite will allocate the -** lookaside buffer itself using [sqlite3_malloc()]. ^The second argument is the -** size of each lookaside buffer slot. ^The third argument is the number of -** slots. The size of the buffer in the first argument must be greater than -** or equal to the product of the second and third arguments. The buffer -** must be aligned to an 8-byte boundary. ^If the second argument to -** SQLITE_DBCONFIG_LOOKASIDE is not a multiple of 8, it is internally -** rounded down to the next smaller multiple of 8. ^(The lookaside memory +** The first argument may be NULL in which case SQLite will allocate the +** lookaside buffer itself using [sqlite3_malloc()]. +**

    2. The second argument ("sz") is the +** size of each lookaside buffer slot. Lookaside is disabled if "sz" +** is less than 8. The "sz" argument should be a multiple of 8 less than +** 65536. If "sz" does not meet this constraint, it is reduced in size until +** it does. +**

    3. The third argument ("cnt") is the number of slots. Lookaside is disabled +** if "cnt"is less than 1. The "cnt" value will be reduced, if necessary, so +** that the product of "sz" and "cnt" does not exceed 2,147,418,112. The "cnt" +** parameter is usually chosen so that the product of "sz" and "cnt" is less +** than 1,000,000. +**

    +**

    If the "buf" argument is not NULL, then it must +** point to a memory buffer with a size that is greater than +** or equal to the product of "sz" and "cnt". +** The buffer must be aligned to an 8-byte boundary. +** The lookaside memory ** configuration for a database connection can only be changed when that ** connection is not currently using lookaside memory, or in other words -** when the "current value" returned by -** [sqlite3_db_status](D,[SQLITE_DBSTATUS_LOOKASIDE_USED],...) is zero. +** when the value returned by [SQLITE_DBSTATUS_LOOKASIDE_USED] is zero. ** Any attempt to change the lookaside memory configuration when lookaside ** memory is in use leaves the configuration unchanged and returns -** [SQLITE_BUSY].)^

    +** [SQLITE_BUSY]. +** If the "buf" argument is NULL and an attempt +** to allocate memory based on "sz" and "cnt" fails, then +** lookaside is silently disabled. +**

    +** The [SQLITE_CONFIG_LOOKASIDE] configuration option can be used to set the +** default lookaside configuration at initialization. The +** [-DSQLITE_DEFAULT_LOOKASIDE] option can be used to set the default lookaside +** configuration at compile-time. Typical values for lookaside are 1200 for +** "sz" and 40 to 100 for "cnt". +** ** ** [[SQLITE_DBCONFIG_ENABLE_FKEY]] **

    SQLITE_DBCONFIG_ENABLE_FKEY
    @@ -2994,6 +3023,44 @@ SQLITE_API int sqlite3_busy_handler(sqlite3*,int(*)(void*,int),void*); */ SQLITE_API int sqlite3_busy_timeout(sqlite3*, int ms); +/* +** CAPI3REF: Set the Setlk Timeout +** METHOD: sqlite3 +** +** This routine is only useful in SQLITE_ENABLE_SETLK_TIMEOUT builds. If +** the VFS supports blocking locks, it sets the timeout in ms used by +** eligible locks taken on wal mode databases by the specified database +** handle. In non-SQLITE_ENABLE_SETLK_TIMEOUT builds, or if the VFS does +** not support blocking locks, this function is a no-op. +** +** Passing 0 to this function disables blocking locks altogether. Passing +** -1 to this function requests that the VFS blocks for a long time - +** indefinitely if possible. The results of passing any other negative value +** are undefined. +** +** Internally, each SQLite database handle store two timeout values - the +** busy-timeout (used for rollback mode databases, or if the VFS does not +** support blocking locks) and the setlk-timeout (used for blocking locks +** on wal-mode databases). The sqlite3_busy_timeout() method sets both +** values, this function sets only the setlk-timeout value. Therefore, +** to configure separate busy-timeout and setlk-timeout values for a single +** database handle, call sqlite3_busy_timeout() followed by this function. +** +** Whenever the number of connections to a wal mode database falls from +** 1 to 0, the last connection takes an exclusive lock on the database, +** then checkpoints and deletes the wal file. While it is doing this, any +** new connection that tries to read from the database fails with an +** SQLITE_BUSY error. Or, if the SQLITE_SETLK_BLOCK_ON_CONNECT flag is +** passed to this API, the new connection blocks until the exclusive lock +** has been released. +*/ +SQLITE_API int sqlite3_setlk_timeout(sqlite3*, int ms, int flags); + +/* +** CAPI3REF: Flags for sqlite3_setlk_timeout() +*/ +#define SQLITE_SETLK_BLOCK_ON_CONNECT 0x01 + /* ** CAPI3REF: Convenience Routines For Running Queries ** METHOD: sqlite3 @@ -4013,7 +4080,7 @@ SQLITE_API sqlite3_file *sqlite3_database_file_object(const char*); ** ** The sqlite3_create_filename(D,J,W,N,P) allocates memory to hold a version of ** database filename D with corresponding journal file J and WAL file W and -** with N URI parameters key/values pairs in the array P. The result from +** an array P of N URI Key/Value pairs. The result from ** sqlite3_create_filename(D,J,W,N,P) is a pointer to a database filename that ** is safe to pass to routines like: **
      @@ -4694,7 +4761,7 @@ typedef struct sqlite3_context sqlite3_context; ** METHOD: sqlite3_stmt ** ** ^(In the SQL statement text input to [sqlite3_prepare_v2()] and its variants, -** literals may be replaced by a [parameter] that matches one of following +** literals may be replaced by a [parameter] that matches one of the following ** templates: ** **
        @@ -4739,7 +4806,7 @@ typedef struct sqlite3_context sqlite3_context; ** ** [[byte-order determination rules]] ^The byte-order of ** UTF16 input text is determined by the byte-order mark (BOM, U+FEFF) -** found in first character, which is removed, or in the absence of a BOM +** found in the first character, which is removed, or in the absence of a BOM ** the byte order is the native byte order of the host ** machine for sqlite3_bind_text16() or the byte order specified in ** the 6th parameter for sqlite3_bind_text64().)^ @@ -4759,7 +4826,7 @@ typedef struct sqlite3_context sqlite3_context; ** or sqlite3_bind_text16() or sqlite3_bind_text64() then ** that parameter must be the byte offset ** where the NUL terminator would occur assuming the string were NUL -** terminated. If any NUL characters occurs at byte offsets less than +** terminated. If any NUL characters occur at byte offsets less than ** the value of the fourth parameter then the resulting string value will ** contain embedded NULs. The result of expressions involving strings ** with embedded NULs is undefined. @@ -4971,7 +5038,7 @@ SQLITE_API const void *sqlite3_column_name16(sqlite3_stmt*, int N); ** METHOD: sqlite3_stmt ** ** ^These routines provide a means to determine the database, table, and -** table column that is the origin of a particular result column in +** table column that is the origin of a particular result column in a ** [SELECT] statement. ** ^The name of the database or table or column can be returned as ** either a UTF-8 or UTF-16 string. ^The _database_ routines return @@ -5109,7 +5176,7 @@ SQLITE_API const void *sqlite3_column_decltype16(sqlite3_stmt*,int); ** other than [SQLITE_ROW] before any subsequent invocation of ** sqlite3_step(). Failure to reset the prepared statement using ** [sqlite3_reset()] would result in an [SQLITE_MISUSE] return from -** sqlite3_step(). But after [version 3.6.23.1] ([dateof:3.6.23.1], +** sqlite3_step(). But after [version 3.6.23.1] ([dateof:3.6.23.1]), ** sqlite3_step() began ** calling [sqlite3_reset()] automatically in this circumstance rather ** than returning [SQLITE_MISUSE]. This is not considered a compatibility @@ -5540,8 +5607,8 @@ SQLITE_API int sqlite3_reset(sqlite3_stmt *pStmt); ** ** For best security, the [SQLITE_DIRECTONLY] flag is recommended for ** all application-defined SQL functions that do not need to be -** used inside of triggers, view, CHECK constraints, or other elements of -** the database schema. This flags is especially recommended for SQL +** used inside of triggers, views, CHECK constraints, or other elements of +** the database schema. This flag is especially recommended for SQL ** functions that have side effects or reveal internal application state. ** Without this flag, an attacker might be able to modify the schema of ** a database file to include invocations of the function with parameters @@ -5572,7 +5639,7 @@ SQLITE_API int sqlite3_reset(sqlite3_stmt *pStmt); ** [user-defined window functions|available here]. ** ** ^(If the final parameter to sqlite3_create_function_v2() or -** sqlite3_create_window_function() is not NULL, then it is destructor for +** sqlite3_create_window_function() is not NULL, then it is the destructor for ** the application data pointer. The destructor is invoked when the function ** is deleted, either by being overloaded or when the database connection ** closes.)^ ^The destructor is also invoked if the call to @@ -5972,7 +6039,7 @@ SQLITE_API unsigned int sqlite3_value_subtype(sqlite3_value*); ** METHOD: sqlite3_value ** ** ^The sqlite3_value_dup(V) interface makes a copy of the [sqlite3_value] -** object D and returns a pointer to that copy. ^The [sqlite3_value] returned +** object V and returns a pointer to that copy. ^The [sqlite3_value] returned ** is a [protected sqlite3_value] object even if the input is not. ** ^The sqlite3_value_dup(V) interface returns NULL if V is NULL or if a ** memory allocation fails. ^If V is a [pointer value], then the result @@ -6010,7 +6077,7 @@ SQLITE_API void sqlite3_value_free(sqlite3_value*); ** allocation error occurs. ** ** ^(The amount of space allocated by sqlite3_aggregate_context(C,N) is -** determined by the N parameter on first successful call. Changing the +** determined by the N parameter on the first successful call. Changing the ** value of N in any subsequent call to sqlite3_aggregate_context() within ** the same aggregate function instance will not resize the memory ** allocation.)^ Within the xFinal callback, it is customary to set @@ -6172,7 +6239,7 @@ SQLITE_API void sqlite3_set_auxdata(sqlite3_context*, int N, void*, void (*)(voi ** ** Security Warning: These interfaces should not be exposed in scripting ** languages or in other circumstances where it might be possible for an -** an attacker to invoke them. Any agent that can invoke these interfaces +** attacker to invoke them. Any agent that can invoke these interfaces ** can probably also take control of the process. ** ** Database connection client data is only available for SQLite @@ -6286,7 +6353,7 @@ typedef void (*sqlite3_destructor_type)(void*); ** pointed to by the 2nd parameter are taken as the application-defined ** function result. If the 3rd parameter is non-negative, then it ** must be the byte offset into the string where the NUL terminator would -** appear if the string where NUL terminated. If any NUL characters occur +** appear if the string were NUL terminated. If any NUL characters occur ** in the string at a byte offset that is less than the value of the 3rd ** parameter, then the resulting string will contain embedded NULs and the ** result of expressions operating on strings with embedded NULs is undefined. @@ -6344,7 +6411,7 @@ typedef void (*sqlite3_destructor_type)(void*); ** string and preferably a string literal. The sqlite3_result_pointer() ** routine is part of the [pointer passing interface] added for SQLite 3.20.0. ** -** If these routines are called from within the different thread +** If these routines are called from within a different thread ** than the one containing the application-defined function that received ** the [sqlite3_context] pointer, the results are undefined. */ @@ -6750,7 +6817,7 @@ SQLITE_API sqlite3 *sqlite3_db_handle(sqlite3_stmt*); ** METHOD: sqlite3 ** ** ^The sqlite3_db_name(D,N) interface returns a pointer to the schema name -** for the N-th database on database connection D, or a NULL pointer of N is +** for the N-th database on database connection D, or a NULL pointer if N is ** out of range. An N value of 0 means the main database file. An N of 1 is ** the "temp" schema. Larger values of N correspond to various ATTACH-ed ** databases. @@ -6845,7 +6912,7 @@ SQLITE_API int sqlite3_txn_state(sqlite3*,const char *zSchema); **
        The SQLITE_TXN_READ state means that the database is currently ** in a read transaction. Content has been read from the database file ** but nothing in the database file has changed. The transaction state -** will advanced to SQLITE_TXN_WRITE if any changes occur and there are +** will be advanced to SQLITE_TXN_WRITE if any changes occur and there are ** no other conflicting concurrent write transactions. The transaction ** state will revert to SQLITE_TXN_NONE following a [ROLLBACK] or ** [COMMIT].
        @@ -6854,7 +6921,7 @@ SQLITE_API int sqlite3_txn_state(sqlite3*,const char *zSchema); **
        The SQLITE_TXN_WRITE state means that the database is currently ** in a write transaction. Content has been written to the database file ** but has not yet committed. The transaction state will change to -** to SQLITE_TXN_NONE at the next [ROLLBACK] or [COMMIT].
        +** SQLITE_TXN_NONE at the next [ROLLBACK] or [COMMIT]. */ #define SQLITE_TXN_NONE 0 #define SQLITE_TXN_READ 1 @@ -7005,6 +7072,8 @@ SQLITE_API int sqlite3_autovacuum_pages( ** ** ^The second argument is a pointer to the function to invoke when a ** row is updated, inserted or deleted in a rowid table. +** ^The update hook is disabled by invoking sqlite3_update_hook() +** with a NULL pointer as the second parameter. ** ^The first argument to the callback is a copy of the third argument ** to sqlite3_update_hook(). ** ^The second callback argument is one of [SQLITE_INSERT], [SQLITE_DELETE], @@ -7133,7 +7202,7 @@ SQLITE_API int sqlite3_db_release_memory(sqlite3*); ** CAPI3REF: Impose A Limit On Heap Size ** ** These interfaces impose limits on the amount of heap memory that will be -** by all database connections within a single process. +** used by all database connections within a single process. ** ** ^The sqlite3_soft_heap_limit64() interface sets and/or queries the ** soft limit on the amount of heap memory that may be allocated by SQLite. @@ -7191,7 +7260,7 @@ SQLITE_API int sqlite3_db_release_memory(sqlite3*); **
      )^ ** ** The circumstances under which SQLite will enforce the heap limits may -** changes in future releases of SQLite. +** change in future releases of SQLite. */ SQLITE_API sqlite3_int64 sqlite3_soft_heap_limit64(sqlite3_int64 N); SQLITE_API sqlite3_int64 sqlite3_hard_heap_limit64(sqlite3_int64 N); @@ -7306,8 +7375,8 @@ SQLITE_API int sqlite3_table_column_metadata( ** ^The entry point is zProc. ** ^(zProc may be 0, in which case SQLite will try to come up with an ** entry point name on its own. It first tries "sqlite3_extension_init". -** If that does not work, it constructs a name "sqlite3_X_init" where the -** X is consists of the lower-case equivalent of all ASCII alphabetic +** If that does not work, it constructs a name "sqlite3_X_init" where +** X consists of the lower-case equivalent of all ASCII alphabetic ** characters in the filename from the last "/" to the first following ** "." and omitting any initial "lib".)^ ** ^The sqlite3_load_extension() interface returns @@ -7378,7 +7447,7 @@ SQLITE_API int sqlite3_enable_load_extension(sqlite3 *db, int onoff); ** ^(Even though the function prototype shows that xEntryPoint() takes ** no arguments and returns void, SQLite invokes xEntryPoint() with three ** arguments and expects an integer result as if the signature of the -** entry point where as follows: +** entry point were as follows: ** **
       **    int xEntryPoint(
      @@ -7542,7 +7611,7 @@ struct sqlite3_module {
       ** virtual table and might not be checked again by the byte code.)^ ^(The
       ** aConstraintUsage[].omit flag is an optimization hint. When the omit flag
       ** is left in its default setting of false, the constraint will always be
      -** checked separately in byte code.  If the omit flag is change to true, then
      +** checked separately in byte code.  If the omit flag is changed to true, then
       ** the constraint may or may not be checked in byte code.  In other words,
       ** when the omit flag is true there is no guarantee that the constraint will
       ** not be checked again using byte code.)^
      @@ -7568,7 +7637,7 @@ struct sqlite3_module {
       ** The xBestIndex method may optionally populate the idxFlags field with a
       ** mask of SQLITE_INDEX_SCAN_* flags. One such flag is
       ** [SQLITE_INDEX_SCAN_HEX], which if set causes the [EXPLAIN QUERY PLAN]
      -** output to show the idxNum has hex instead of as decimal.  Another flag is
      +** output to show the idxNum as hex instead of as decimal.  Another flag is
       ** SQLITE_INDEX_SCAN_UNIQUE, which if set indicates that the query plan will
       ** return at most one row.
       **
      @@ -7709,7 +7778,7 @@ struct sqlite3_index_info {
       ** the implementation of the [virtual table module].   ^The fourth
       ** parameter is an arbitrary client data pointer that is passed through
       ** into the [xCreate] and [xConnect] methods of the virtual table module
      -** when a new virtual table is be being created or reinitialized.
      +** when a new virtual table is being created or reinitialized.
       **
       ** ^The sqlite3_create_module_v2() interface has a fifth parameter which
       ** is a pointer to a destructor for the pClientData.  ^SQLite will
      @@ -7874,7 +7943,7 @@ typedef struct sqlite3_blob sqlite3_blob;
       ** in *ppBlob. Otherwise an [error code] is returned and, unless the error
       ** code is SQLITE_MISUSE, *ppBlob is set to NULL.)^ ^This means that, provided
       ** the API is not misused, it is always safe to call [sqlite3_blob_close()]
      -** on *ppBlob after this function it returns.
      +** on *ppBlob after this function returns.
       **
       ** This function fails with SQLITE_ERROR if any of the following are true:
       ** 
        @@ -7994,7 +8063,7 @@ SQLITE_API int sqlite3_blob_close(sqlite3_blob *); ** ** ^Returns the size in bytes of the BLOB accessible via the ** successfully opened [BLOB handle] in its only argument. ^The -** incremental blob I/O routines can only read or overwriting existing +** incremental blob I/O routines can only read or overwrite existing ** blob content; they cannot change the size of a blob. ** ** This routine only works on a [BLOB handle] which has been created @@ -8144,7 +8213,7 @@ SQLITE_API int sqlite3_vfs_unregister(sqlite3_vfs*); ** ^The sqlite3_mutex_alloc() routine allocates a new ** mutex and returns a pointer to it. ^The sqlite3_mutex_alloc() ** routine returns NULL if it is unable to allocate the requested -** mutex. The argument to sqlite3_mutex_alloc() must one of these +** mutex. The argument to sqlite3_mutex_alloc() must be one of these ** integer constants: ** **
          @@ -8377,7 +8446,7 @@ SQLITE_API int sqlite3_mutex_notheld(sqlite3_mutex*); ** CAPI3REF: Retrieve the mutex for a database connection ** METHOD: sqlite3 ** -** ^This interface returns a pointer the [sqlite3_mutex] object that +** ^This interface returns a pointer to the [sqlite3_mutex] object that ** serializes access to the [database connection] given in the argument ** when the [threading mode] is Serialized. ** ^If the [threading mode] is Single-thread or Multi-thread then this @@ -8500,7 +8569,7 @@ SQLITE_API int sqlite3_test_control(int op, ...); ** CAPI3REF: SQL Keyword Checking ** ** These routines provide access to the set of SQL language keywords -** recognized by SQLite. Applications can uses these routines to determine +** recognized by SQLite. Applications can use these routines to determine ** whether or not a specific identifier needs to be escaped (for example, ** by enclosing in double-quotes) so as not to confuse the parser. ** @@ -8668,7 +8737,7 @@ SQLITE_API void sqlite3_str_reset(sqlite3_str*); ** content of the dynamic string under construction in X. The value ** returned by [sqlite3_str_value(X)] is managed by the sqlite3_str object X ** and might be freed or altered by any subsequent method on the same -** [sqlite3_str] object. Applications must not used the pointer returned +** [sqlite3_str] object. Applications must not use the pointer returned by ** [sqlite3_str_value(X)] after any subsequent method call on the same ** object. ^Applications may change the content of the string returned ** by [sqlite3_str_value(X)] as long as they do not write into any bytes @@ -8754,7 +8823,7 @@ SQLITE_API int sqlite3_status64( ** allocation which could not be satisfied by the [SQLITE_CONFIG_PAGECACHE] ** buffer and where forced to overflow to [sqlite3_malloc()]. The ** returned value includes allocations that overflowed because they -** where too large (they were larger than the "sz" parameter to +** were too large (they were larger than the "sz" parameter to ** [SQLITE_CONFIG_PAGECACHE]) and allocations that overflowed because ** no space was left in the page cache.)^ ** @@ -8838,28 +8907,29 @@ SQLITE_API int sqlite3_db_status(sqlite3*, int op, int *pCur, int *pHiwtr, int r ** [[SQLITE_DBSTATUS_LOOKASIDE_HIT]] ^(
          SQLITE_DBSTATUS_LOOKASIDE_HIT
          **
          This parameter returns the number of malloc attempts that were ** satisfied using lookaside memory. Only the high-water value is meaningful; -** the current value is always zero.)^ +** the current value is always zero.
          )^ ** ** [[SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE]] ** ^(
          SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE
          -**
          This parameter returns the number malloc attempts that might have +**
          This parameter returns the number of malloc attempts that might have ** been satisfied using lookaside memory but failed due to the amount of ** memory requested being larger than the lookaside slot size. ** Only the high-water value is meaningful; -** the current value is always zero.)^ +** the current value is always zero.
          )^ ** ** [[SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL]] ** ^(
          SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL
          -**
          This parameter returns the number malloc attempts that might have +**
          This parameter returns the number of malloc attempts that might have ** been satisfied using lookaside memory but failed due to all lookaside ** memory already being in use. ** Only the high-water value is meaningful; -** the current value is always zero.)^ +** the current value is always zero.
          )^ ** ** [[SQLITE_DBSTATUS_CACHE_USED]] ^(
          SQLITE_DBSTATUS_CACHE_USED
          **
          This parameter returns the approximate number of bytes of heap ** memory used by all pager caches associated with the database connection.)^ ** ^The highwater mark associated with SQLITE_DBSTATUS_CACHE_USED is always 0. +**
          ** ** [[SQLITE_DBSTATUS_CACHE_USED_SHARED]] ** ^(
          SQLITE_DBSTATUS_CACHE_USED_SHARED
          @@ -8868,10 +8938,10 @@ SQLITE_API int sqlite3_db_status(sqlite3*, int op, int *pCur, int *pHiwtr, int r ** memory used by that pager cache is divided evenly between the attached ** connections.)^ In other words, if none of the pager caches associated ** with the database connection are shared, this request returns the same -** value as DBSTATUS_CACHE_USED. Or, if one or more or the pager caches are +** value as DBSTATUS_CACHE_USED. Or, if one or more of the pager caches are ** shared, the value returned by this call will be smaller than that returned ** by DBSTATUS_CACHE_USED. ^The highwater mark associated with -** SQLITE_DBSTATUS_CACHE_USED_SHARED is always 0. +** SQLITE_DBSTATUS_CACHE_USED_SHARED is always 0. ** ** [[SQLITE_DBSTATUS_SCHEMA_USED]] ^(
          SQLITE_DBSTATUS_SCHEMA_USED
          **
          This parameter returns the approximate number of bytes of heap @@ -8881,6 +8951,7 @@ SQLITE_API int sqlite3_db_status(sqlite3*, int op, int *pCur, int *pHiwtr, int r ** schema memory is shared with other database connections due to ** [shared cache mode] being enabled. ** ^The highwater mark associated with SQLITE_DBSTATUS_SCHEMA_USED is always 0. +**
          ** ** [[SQLITE_DBSTATUS_STMT_USED]] ^(
          SQLITE_DBSTATUS_STMT_USED
          **
          This parameter returns the approximate number of bytes of heap @@ -8917,7 +8988,7 @@ SQLITE_API int sqlite3_db_status(sqlite3*, int op, int *pCur, int *pHiwtr, int r ** been written to disk in the middle of a transaction due to the page ** cache overflowing. Transactions are more efficient if they are written ** to disk all at once. When pages spill mid-transaction, that introduces -** additional overhead. This parameter can be used help identify +** additional overhead. This parameter can be used to help identify ** inefficiencies that can be resolved by increasing the cache size. **
          ** @@ -8988,13 +9059,13 @@ SQLITE_API int sqlite3_stmt_status(sqlite3_stmt*, int op,int resetFlg); ** [[SQLITE_STMTSTATUS_SORT]]
          SQLITE_STMTSTATUS_SORT
          **
          ^This is the number of sort operations that have occurred. ** A non-zero value in this counter may indicate an opportunity to -** improvement performance through careful use of indices.
          +** improve performance through careful use of indices. ** ** [[SQLITE_STMTSTATUS_AUTOINDEX]]
          SQLITE_STMTSTATUS_AUTOINDEX
          **
          ^This is the number of rows inserted into transient indices that ** were created automatically in order to help joins run faster. ** A non-zero value in this counter may indicate an opportunity to -** improvement performance by adding permanent indices that do not +** improve performance by adding permanent indices that do not ** need to be reinitialized each time the statement is run.
          ** ** [[SQLITE_STMTSTATUS_VM_STEP]]
          SQLITE_STMTSTATUS_VM_STEP
          @@ -9003,19 +9074,19 @@ SQLITE_API int sqlite3_stmt_status(sqlite3_stmt*, int op,int resetFlg); ** to 2147483647. The number of virtual machine operations can be ** used as a proxy for the total work done by the prepared statement. ** If the number of virtual machine operations exceeds 2147483647 -** then the value returned by this statement status code is undefined. +** then the value returned by this statement status code is undefined. ** ** [[SQLITE_STMTSTATUS_REPREPARE]]
          SQLITE_STMTSTATUS_REPREPARE
          **
          ^This is the number of times that the prepare statement has been ** automatically regenerated due to schema changes or changes to -** [bound parameters] that might affect the query plan. +** [bound parameters] that might affect the query plan.
          ** ** [[SQLITE_STMTSTATUS_RUN]]
          SQLITE_STMTSTATUS_RUN
          **
          ^This is the number of times that the prepared statement has ** been run. A single "run" for the purposes of this counter is one ** or more calls to [sqlite3_step()] followed by a call to [sqlite3_reset()]. ** The counter is incremented on the first [sqlite3_step()] call of each -** cycle. +** cycle.
          ** ** [[SQLITE_STMTSTATUS_FILTER_MISS]] ** [[SQLITE_STMTSTATUS_FILTER HIT]] @@ -9025,7 +9096,7 @@ SQLITE_API int sqlite3_stmt_status(sqlite3_stmt*, int op,int resetFlg); ** step was bypassed because a Bloom filter returned not-found. The ** corresponding SQLITE_STMTSTATUS_FILTER_MISS value is the number of ** times that the Bloom filter returned a find, and thus the join step -** had to be processed as normal. +** had to be processed as normal. ** ** [[SQLITE_STMTSTATUS_MEMUSED]]
          SQLITE_STMTSTATUS_MEMUSED
          **
          ^This is the approximate number of bytes of heap memory @@ -9130,9 +9201,9 @@ struct sqlite3_pcache_page { ** SQLite will typically create one cache instance for each open database file, ** though this is not guaranteed. ^The ** first parameter, szPage, is the size in bytes of the pages that must -** be allocated by the cache. ^szPage will always a power of two. ^The +** be allocated by the cache. ^szPage will always be a power of two. ^The ** second parameter szExtra is a number of bytes of extra storage -** associated with each page cache entry. ^The szExtra parameter will +** associated with each page cache entry. ^The szExtra parameter will be ** a number less than 250. SQLite will use the ** extra szExtra bytes on each page to store metadata about the underlying ** database page on disk. The value passed into szExtra depends @@ -9140,17 +9211,17 @@ struct sqlite3_pcache_page { ** ^The third argument to xCreate(), bPurgeable, is true if the cache being ** created will be used to cache database pages of a file stored on disk, or ** false if it is used for an in-memory database. The cache implementation -** does not have to do anything special based with the value of bPurgeable; +** does not have to do anything special based upon the value of bPurgeable; ** it is purely advisory. ^On a cache where bPurgeable is false, SQLite will ** never invoke xUnpin() except to deliberately delete a page. ** ^In other words, calls to xUnpin() on a cache with bPurgeable set to ** false will always have the "discard" flag set to true. -** ^Hence, a cache created with bPurgeable false will +** ^Hence, a cache created with bPurgeable set to false will ** never contain any unpinned pages. ** ** [[the xCachesize() page cache method]] ** ^(The xCachesize() method may be called at any time by SQLite to set the -** suggested maximum cache-size (number of pages stored by) the cache +** suggested maximum cache-size (number of pages stored) for the cache ** instance passed as the first argument. This is the value configured using ** the SQLite "[PRAGMA cache_size]" command.)^ As with the bPurgeable ** parameter, the implementation is not required to do anything with this @@ -9177,12 +9248,12 @@ struct sqlite3_pcache_page { ** implementation must return a pointer to the page buffer with its content ** intact. If the requested page is not already in the cache, then the ** cache implementation should use the value of the createFlag -** parameter to help it determined what action to take: +** parameter to help it determine what action to take: ** ** **
          createFlag Behavior when page is not already in cache **
          0 Do not allocate a new page. Return NULL. -**
          1 Allocate a new page if it easy and convenient to do so. +**
          1 Allocate a new page if it is easy and convenient to do so. ** Otherwise return NULL. **
          2 Make every effort to allocate a new page. Only return ** NULL if allocating a new page is effectively impossible. @@ -9199,7 +9270,7 @@ struct sqlite3_pcache_page { ** as its second argument. If the third parameter, discard, is non-zero, ** then the page must be evicted from the cache. ** ^If the discard parameter is -** zero, then the page may be discarded or retained at the discretion of +** zero, then the page may be discarded or retained at the discretion of the ** page cache implementation. ^The page cache implementation ** may choose to evict unpinned pages at any time. ** @@ -9217,7 +9288,7 @@ struct sqlite3_pcache_page { ** When SQLite calls the xTruncate() method, the cache must discard all ** existing cache entries with page numbers (keys) greater than or equal ** to the value of the iLimit parameter passed to xTruncate(). If any -** of these pages are pinned, they are implicitly unpinned, meaning that +** of these pages are pinned, they become implicitly unpinned, meaning that ** they can be safely discarded. ** ** [[the xDestroy() page cache method]] @@ -9397,7 +9468,7 @@ typedef struct sqlite3_backup sqlite3_backup; ** external process or via a database connection other than the one being ** used by the backup operation, then the backup will be automatically ** restarted by the next call to sqlite3_backup_step(). ^If the source -** database is modified by the using the same database connection as is used +** database is modified by using the same database connection as is used ** by the backup operation, then the backup database is automatically ** updated at the same time. ** @@ -9414,7 +9485,7 @@ typedef struct sqlite3_backup sqlite3_backup; ** and may not be used following a call to sqlite3_backup_finish(). ** ** ^The value returned by sqlite3_backup_finish is [SQLITE_OK] if no -** sqlite3_backup_step() errors occurred, regardless or whether or not +** sqlite3_backup_step() errors occurred, regardless of whether or not ** sqlite3_backup_step() completed. ** ^If an out-of-memory condition or IO error occurred during any prior ** sqlite3_backup_step() call on the same [sqlite3_backup] object, then @@ -9516,7 +9587,7 @@ SQLITE_API int sqlite3_backup_pagecount(sqlite3_backup *p); ** application receives an SQLITE_LOCKED error, it may call the ** sqlite3_unlock_notify() method with the blocked connection handle as ** the first argument to register for a callback that will be invoked -** when the blocking connections current transaction is concluded. ^The +** when the blocking connection's current transaction is concluded. ^The ** callback is invoked from within the [sqlite3_step] or [sqlite3_close] ** call that concludes the blocking connection's transaction. ** @@ -9536,7 +9607,7 @@ SQLITE_API int sqlite3_backup_pagecount(sqlite3_backup *p); ** blocked connection already has a registered unlock-notify callback, ** then the new callback replaces the old.)^ ^If sqlite3_unlock_notify() is ** called with a NULL pointer as its second argument, then any existing -** unlock-notify callback is canceled. ^The blocked connections +** unlock-notify callback is canceled. ^The blocked connection's ** unlock-notify callback may also be canceled by closing the blocked ** connection using [sqlite3_close()]. ** @@ -9934,7 +10005,7 @@ SQLITE_API int sqlite3_vtab_config(sqlite3*, int op, ...); ** support constraints. In this configuration (which is the default) if ** a call to the [xUpdate] method returns [SQLITE_CONSTRAINT], then the entire ** statement is rolled back as if [ON CONFLICT | OR ABORT] had been -** specified as part of the users SQL statement, regardless of the actual +** specified as part of the user's SQL statement, regardless of the actual ** ON CONFLICT mode specified. ** ** If X is non-zero, then the virtual table implementation guarantees @@ -9968,7 +10039,7 @@ SQLITE_API int sqlite3_vtab_config(sqlite3*, int op, ...); ** [[SQLITE_VTAB_INNOCUOUS]]
          SQLITE_VTAB_INNOCUOUS
          **
          Calls of the form ** [sqlite3_vtab_config](db,SQLITE_VTAB_INNOCUOUS) from within the -** the [xConnect] or [xCreate] methods of a [virtual table] implementation +** [xConnect] or [xCreate] methods of a [virtual table] implementation ** identify that virtual table as being safe to use from within triggers ** and views. Conceptually, the SQLITE_VTAB_INNOCUOUS tag means that the ** virtual table can do no serious harm even if it is controlled by a @@ -10136,7 +10207,7 @@ SQLITE_API const char *sqlite3_vtab_collation(sqlite3_index_info*,int); **
          ** ** ^For the purposes of comparing virtual table output values to see if the -** values are same value for sorting purposes, two NULL values are considered +** values are the same value for sorting purposes, two NULL values are considered ** to be the same. In other words, the comparison operator is "IS" ** (or "IS NOT DISTINCT FROM") and not "==". ** @@ -10146,7 +10217,7 @@ SQLITE_API const char *sqlite3_vtab_collation(sqlite3_index_info*,int); ** ** ^A virtual table implementation is always free to return rows in any order ** it wants, as long as the "orderByConsumed" flag is not set. ^When the -** the "orderByConsumed" flag is unset, the query planner will add extra +** "orderByConsumed" flag is unset, the query planner will add extra ** [bytecode] to ensure that the final results returned by the SQL query are ** ordered correctly. The use of the "orderByConsumed" flag and the ** sqlite3_vtab_distinct() interface is merely an optimization. ^Careful @@ -10243,7 +10314,7 @@ SQLITE_API int sqlite3_vtab_in(sqlite3_index_info*, int iCons, int bHandle); ** sqlite3_vtab_in_next(X,P) should be one of the parameters to the ** xFilter method which invokes these routines, and specifically ** a parameter that was previously selected for all-at-once IN constraint -** processing use the [sqlite3_vtab_in()] interface in the +** processing using the [sqlite3_vtab_in()] interface in the ** [xBestIndex|xBestIndex method]. ^(If the X parameter is not ** an xFilter argument that was selected for all-at-once IN constraint ** processing, then these routines return [SQLITE_ERROR].)^ @@ -10298,7 +10369,7 @@ SQLITE_API int sqlite3_vtab_in_next(sqlite3_value *pVal, sqlite3_value **ppOut); ** and only if *V is set to a value. ^The sqlite3_vtab_rhs_value(P,J,V) ** inteface returns SQLITE_NOTFOUND if the right-hand side of the J-th ** constraint is not available. ^The sqlite3_vtab_rhs_value() interface -** can return an result code other than SQLITE_OK or SQLITE_NOTFOUND if +** can return a result code other than SQLITE_OK or SQLITE_NOTFOUND if ** something goes wrong. ** ** The sqlite3_vtab_rhs_value() interface is usually only successful if @@ -10326,8 +10397,8 @@ SQLITE_API int sqlite3_vtab_rhs_value(sqlite3_index_info*, int, sqlite3_value ** ** KEYWORDS: {conflict resolution mode} ** ** These constants are returned by [sqlite3_vtab_on_conflict()] to -** inform a [virtual table] implementation what the [ON CONFLICT] mode -** is for the SQL statement being evaluated. +** inform a [virtual table] implementation of the [ON CONFLICT] mode +** for the SQL statement being evaluated. ** ** Note that the [SQLITE_IGNORE] constant is also used as a potential ** return value from the [sqlite3_set_authorizer()] callback and that @@ -10367,39 +10438,39 @@ SQLITE_API int sqlite3_vtab_rhs_value(sqlite3_index_info*, int, sqlite3_value ** ** [[SQLITE_SCANSTAT_EST]]
          SQLITE_SCANSTAT_EST
          **
          ^The "double" variable pointed to by the V parameter will be set to the ** query planner's estimate for the average number of rows output from each -** iteration of the X-th loop. If the query planner's estimates was accurate, +** iteration of the X-th loop. If the query planner's estimate was accurate, ** then this value will approximate the quotient NVISIT/NLOOP and the ** product of this value for all prior loops with the same SELECTID will -** be the NLOOP value for the current loop. +** be the NLOOP value for the current loop.
          ** ** [[SQLITE_SCANSTAT_NAME]]
          SQLITE_SCANSTAT_NAME
          **
          ^The "const char *" variable pointed to by the V parameter will be set ** to a zero-terminated UTF-8 string containing the name of the index or table -** used for the X-th loop. +** used for the X-th loop.
          ** ** [[SQLITE_SCANSTAT_EXPLAIN]]
          SQLITE_SCANSTAT_EXPLAIN
          **
          ^The "const char *" variable pointed to by the V parameter will be set ** to a zero-terminated UTF-8 string containing the [EXPLAIN QUERY PLAN] -** description for the X-th loop. +** description for the X-th loop.
          ** ** [[SQLITE_SCANSTAT_SELECTID]]
          SQLITE_SCANSTAT_SELECTID
          **
          ^The "int" variable pointed to by the V parameter will be set to the ** id for the X-th query plan element. The id value is unique within the ** statement. The select-id is the same value as is output in the first -** column of an [EXPLAIN QUERY PLAN] query. +** column of an [EXPLAIN QUERY PLAN] query.
          ** ** [[SQLITE_SCANSTAT_PARENTID]]
          SQLITE_SCANSTAT_PARENTID
          **
          The "int" variable pointed to by the V parameter will be set to the -** the id of the parent of the current query element, if applicable, or +** id of the parent of the current query element, if applicable, or ** to zero if the query element has no parent. This is the same value as -** returned in the second column of an [EXPLAIN QUERY PLAN] query. +** returned in the second column of an [EXPLAIN QUERY PLAN] query.
          ** ** [[SQLITE_SCANSTAT_NCYCLE]]
          SQLITE_SCANSTAT_NCYCLE
          **
          The sqlite3_int64 output value is set to the number of cycles, ** according to the processor time-stamp counter, that elapsed while the ** query element was being processed. This value is not available for ** all query elements - if it is unavailable the output variable is -** set to -1. +** set to -1.
          ** */ #define SQLITE_SCANSTAT_NLOOP 0 @@ -10440,8 +10511,8 @@ SQLITE_API int sqlite3_vtab_rhs_value(sqlite3_index_info*, int, sqlite3_value ** ** sqlite3_stmt_scanstatus_v2() with a zeroed flags parameter. ** ** Parameter "idx" identifies the specific query element to retrieve statistics -** for. Query elements are numbered starting from zero. A value of -1 may be -** to query for statistics regarding the entire query. ^If idx is out of range +** for. Query elements are numbered starting from zero. A value of -1 may +** retrieve statistics for the entire query. ^If idx is out of range ** - less than -1 or greater than or equal to the total number of query ** elements used to implement the statement - a non-zero value is returned and ** the variable that pOut points to is unchanged. @@ -10484,7 +10555,7 @@ SQLITE_API void sqlite3_stmt_scanstatus_reset(sqlite3_stmt*); ** METHOD: sqlite3 ** ** ^If a write-transaction is open on [database connection] D when the -** [sqlite3_db_cacheflush(D)] interface invoked, any dirty +** [sqlite3_db_cacheflush(D)] interface is invoked, any dirty ** pages in the pager-cache that are not currently in use are written out ** to disk. A dirty page may be in use if a database cursor created by an ** active SQL statement is reading from it, or if it is page 1 of a database @@ -10598,8 +10669,8 @@ SQLITE_API int sqlite3_db_cacheflush(sqlite3*); ** triggers; and so forth. ** ** When the [sqlite3_blob_write()] API is used to update a blob column, -** the pre-update hook is invoked with SQLITE_DELETE. This is because the -** in this case the new values are not available. In this case, when a +** the pre-update hook is invoked with SQLITE_DELETE, because +** the new values are not yet available. In this case, when a ** callback made with op==SQLITE_DELETE is actually a write using the ** sqlite3_blob_write() API, the [sqlite3_preupdate_blobwrite()] returns ** the index of the column being written. In other cases, where the @@ -10852,7 +10923,7 @@ SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_recover(sqlite3 *db, const c ** For an ordinary on-disk database file, the serialization is just a ** copy of the disk file. For an in-memory database or a "TEMP" database, ** the serialization is the same sequence of bytes which would be written -** to disk if that database where backed up to disk. +** to disk if that database were backed up to disk. ** ** The usual case is that sqlite3_serialize() copies the serialization of ** the database into memory obtained from [sqlite3_malloc64()] and returns @@ -10861,7 +10932,7 @@ SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_recover(sqlite3 *db, const c ** contains the SQLITE_SERIALIZE_NOCOPY bit, then no memory allocations ** are made, and the sqlite3_serialize() function will return a pointer ** to the contiguous memory representation of the database that SQLite -** is currently using for that database, or NULL if the no such contiguous +** is currently using for that database, or NULL if no such contiguous ** memory representation of the database exists. A contiguous memory ** representation of the database will usually only exist if there has ** been a prior call to [sqlite3_deserialize(D,S,...)] with the same @@ -10932,7 +11003,7 @@ SQLITE_API unsigned char *sqlite3_serialize( ** database is currently in a read transaction or is involved in a backup ** operation. ** -** It is not possible to deserialized into the TEMP database. If the +** It is not possible to deserialize into the TEMP database. If the ** S argument to sqlite3_deserialize(D,S,P,N,M,F) is "temp" then the ** function returns SQLITE_ERROR. ** @@ -10954,7 +11025,7 @@ SQLITE_API int sqlite3_deserialize( sqlite3 *db, /* The database connection */ const char *zSchema, /* Which DB to reopen with the deserialization */ unsigned char *pData, /* The serialized database content */ - sqlite3_int64 szDb, /* Number bytes in the deserialization */ + sqlite3_int64 szDb, /* Number of bytes in the deserialization */ sqlite3_int64 szBuf, /* Total size of buffer pData[] */ unsigned mFlags /* Zero or more SQLITE_DESERIALIZE_* flags */ ); @@ -10962,7 +11033,7 @@ SQLITE_API int sqlite3_deserialize( /* ** CAPI3REF: Flags for sqlite3_deserialize() ** -** The following are allowed values for 6th argument (the F argument) to +** The following are allowed values for the 6th argument (the F argument) to ** the [sqlite3_deserialize(D,S,P,N,M,F)] interface. ** ** The SQLITE_DESERIALIZE_FREEONCLOSE means that the database serialization @@ -11487,9 +11558,10 @@ SQLITE_API void sqlite3session_table_filter( ** is inserted while a session object is enabled, then later deleted while ** the same session object is disabled, no INSERT record will appear in the ** changeset, even though the delete took place while the session was disabled. -** Or, if one field of a row is updated while a session is disabled, and -** another field of the same row is updated while the session is enabled, the -** resulting changeset will contain an UPDATE change that updates both fields. +** Or, if one field of a row is updated while a session is enabled, and +** then another field of the same row is updated while the session is disabled, +** the resulting changeset will contain an UPDATE change that updates both +** fields. */ SQLITE_API int sqlite3session_changeset( sqlite3_session *pSession, /* Session object */ @@ -11561,8 +11633,9 @@ SQLITE_API sqlite3_int64 sqlite3session_changeset_size(sqlite3_session *pSession ** database zFrom the contents of the two compatible tables would be ** identical. ** -** It an error if database zFrom does not exist or does not contain the -** required compatible table. +** Unless the call to this function is a no-op as described above, it is an +** error if database zFrom does not exist or does not contain the required +** compatible table. ** ** If the operation is successful, SQLITE_OK is returned. Otherwise, an SQLite ** error code. In this case, if argument pzErrMsg is not NULL, *pzErrMsg @@ -11697,7 +11770,7 @@ SQLITE_API int sqlite3changeset_start_v2( ** The following flags may passed via the 4th parameter to ** [sqlite3changeset_start_v2] and [sqlite3changeset_start_v2_strm]: ** -**
          SQLITE_CHANGESETAPPLY_INVERT
          +**
          SQLITE_CHANGESETSTART_INVERT
          ** Invert the changeset while iterating through it. This is equivalent to ** inverting a changeset using sqlite3changeset_invert() before applying it. ** It is an error to specify this flag with a patchset. @@ -12012,19 +12085,6 @@ SQLITE_API int sqlite3changeset_concat( void **ppOut /* OUT: Buffer containing output changeset */ ); - -/* -** CAPI3REF: Upgrade the Schema of a Changeset/Patchset -*/ -SQLITE_API int sqlite3changeset_upgrade( - sqlite3 *db, - const char *zDb, - int nIn, const void *pIn, /* Input changeset */ - int *pnOut, void **ppOut /* OUT: Inverse of input */ -); - - - /* ** CAPI3REF: Changegroup Handle ** diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_userauth.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_userauth.go index 76d8401644..5a49276659 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_userauth.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_opt_userauth.go @@ -16,53 +16,10 @@ package sqlite3 #else #include #endif -#include - -static int -_sqlite3_user_authenticate(sqlite3* db, const char* zUsername, const char* aPW, int nPW) -{ - return sqlite3_user_authenticate(db, zUsername, aPW, nPW); -} - -static int -_sqlite3_user_add(sqlite3* db, const char* zUsername, const char* aPW, int nPW, int isAdmin) -{ - return sqlite3_user_add(db, zUsername, aPW, nPW, isAdmin); -} - -static int -_sqlite3_user_change(sqlite3* db, const char* zUsername, const char* aPW, int nPW, int isAdmin) -{ - return sqlite3_user_change(db, zUsername, aPW, nPW, isAdmin); -} - -static int -_sqlite3_user_delete(sqlite3* db, const char* zUsername) -{ - return sqlite3_user_delete(db, zUsername); -} - -static int -_sqlite3_auth_enabled(sqlite3* db) -{ - int exists = -1; - - sqlite3_stmt *stmt; - sqlite3_prepare_v2(db, "select count(type) from sqlite_master WHERE type='table' and name='sqlite_user';", -1, &stmt, NULL); - - while ( sqlite3_step(stmt) == SQLITE_ROW) { - exists = sqlite3_column_int(stmt, 0); - } - - sqlite3_finalize(stmt); - - return exists; -} */ import "C" import ( "errors" - "unsafe" ) const ( @@ -70,8 +27,9 @@ const ( ) var ( - ErrUnauthorized = errors.New("SQLITE_AUTH: Unauthorized") - ErrAdminRequired = errors.New("SQLITE_AUTH: Unauthorized; Admin Privileges Required") + ErrUnauthorized = errors.New("SQLITE_AUTH: Unauthorized") + ErrAdminRequired = errors.New("SQLITE_AUTH: Unauthorized; Admin Privileges Required") + errUserAuthNoLongerSupported = errors.New("sqlite3: the sqlite_userauth tag is no longer supported as the userauth extension is no longer supported by the SQLite authors, see https://github.com/mattn/go-sqlite3/issues/1341") ) // Authenticate will perform an authentication of the provided username @@ -88,15 +46,7 @@ var ( // If the SQLITE_USER table is not present in the database file, then // this interface is a harmless no-op returning SQLITE_OK. func (c *SQLiteConn) Authenticate(username, password string) error { - rv := c.authenticate(username, password) - switch rv { - case C.SQLITE_ERROR, C.SQLITE_AUTH: - return ErrUnauthorized - case C.SQLITE_OK: - return nil - default: - return c.lastError() - } + return errUserAuthNoLongerSupported } // authenticate provides the actual authentication to SQLite. @@ -109,17 +59,7 @@ func (c *SQLiteConn) Authenticate(username, password string) error { // C.SQLITE_ERROR (1) // C.SQLITE_AUTH (23) func (c *SQLiteConn) authenticate(username, password string) int { - // Allocate C Variables - cuser := C.CString(username) - cpass := C.CString(password) - - // Free C Variables - defer func() { - C.free(unsafe.Pointer(cuser)) - C.free(unsafe.Pointer(cpass)) - }() - - return int(C._sqlite3_user_authenticate(c.db, cuser, cpass, C.int(len(password)))) + return 1 } // AuthUserAdd can be used (by an admin user only) @@ -131,20 +71,7 @@ func (c *SQLiteConn) authenticate(username, password string) int { // for any ATTACH-ed databases. Any call to AuthUserAdd by a // non-admin user results in an error. func (c *SQLiteConn) AuthUserAdd(username, password string, admin bool) error { - isAdmin := 0 - if admin { - isAdmin = 1 - } - - rv := c.authUserAdd(username, password, isAdmin) - switch rv { - case C.SQLITE_ERROR, C.SQLITE_AUTH: - return ErrAdminRequired - case C.SQLITE_OK: - return nil - default: - return c.lastError() - } + return errUserAuthNoLongerSupported } // authUserAdd enables the User Authentication if not enabled. @@ -162,17 +89,7 @@ func (c *SQLiteConn) AuthUserAdd(username, password string, admin bool) error { // C.SQLITE_ERROR (1) // C.SQLITE_AUTH (23) func (c *SQLiteConn) authUserAdd(username, password string, admin int) int { - // Allocate C Variables - cuser := C.CString(username) - cpass := C.CString(password) - - // Free C Variables - defer func() { - C.free(unsafe.Pointer(cuser)) - C.free(unsafe.Pointer(cpass)) - }() - - return int(C._sqlite3_user_add(c.db, cuser, cpass, C.int(len(password)), C.int(admin))) + return 1 } // AuthUserChange can be used to change a users @@ -181,20 +98,7 @@ func (c *SQLiteConn) authUserAdd(username, password string, admin int) int { // credentials or admin privilege setting. No user may change their own // admin privilege setting. func (c *SQLiteConn) AuthUserChange(username, password string, admin bool) error { - isAdmin := 0 - if admin { - isAdmin = 1 - } - - rv := c.authUserChange(username, password, isAdmin) - switch rv { - case C.SQLITE_ERROR, C.SQLITE_AUTH: - return ErrAdminRequired - case C.SQLITE_OK: - return nil - default: - return c.lastError() - } + return errUserAuthNoLongerSupported } // authUserChange allows to modify a user. @@ -215,17 +119,7 @@ func (c *SQLiteConn) AuthUserChange(username, password string, admin bool) error // C.SQLITE_ERROR (1) // C.SQLITE_AUTH (23) func (c *SQLiteConn) authUserChange(username, password string, admin int) int { - // Allocate C Variables - cuser := C.CString(username) - cpass := C.CString(password) - - // Free C Variables - defer func() { - C.free(unsafe.Pointer(cuser)) - C.free(unsafe.Pointer(cpass)) - }() - - return int(C._sqlite3_user_change(c.db, cuser, cpass, C.int(len(password)), C.int(admin))) + return 1 } // AuthUserDelete can be used (by an admin user only) @@ -234,15 +128,7 @@ func (c *SQLiteConn) authUserChange(username, password string, admin int) int { // the database cannot be converted into a no-authentication-required // database. func (c *SQLiteConn) AuthUserDelete(username string) error { - rv := c.authUserDelete(username) - switch rv { - case C.SQLITE_ERROR, C.SQLITE_AUTH: - return ErrAdminRequired - case C.SQLITE_OK: - return nil - default: - return c.lastError() - } + return errUserAuthNoLongerSupported } // authUserDelete can be used to delete a user. @@ -258,25 +144,12 @@ func (c *SQLiteConn) AuthUserDelete(username string) error { // C.SQLITE_ERROR (1) // C.SQLITE_AUTH (23) func (c *SQLiteConn) authUserDelete(username string) int { - // Allocate C Variables - cuser := C.CString(username) - - // Free C Variables - defer func() { - C.free(unsafe.Pointer(cuser)) - }() - - return int(C._sqlite3_user_delete(c.db, cuser)) + return 1 } // AuthEnabled checks if the database is protected by user authentication func (c *SQLiteConn) AuthEnabled() (exists bool) { - rv := c.authEnabled() - if rv == 1 { - exists = true - } - - return + return false } // authEnabled perform the actual check for user authentication. @@ -289,7 +162,7 @@ func (c *SQLiteConn) AuthEnabled() (exists bool) { // 0 - Disabled // 1 - Enabled func (c *SQLiteConn) authEnabled() int { - return int(C._sqlite3_auth_enabled(c.db)) + return 0 } // EOF diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3ext.h b/vendor/github.com/mattn/go-sqlite3/sqlite3ext.h index 935437bb63..3a5e0a4edb 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3ext.h +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3ext.h @@ -371,6 +371,8 @@ struct sqlite3_api_routines { /* Version 3.44.0 and later */ void *(*get_clientdata)(sqlite3*,const char*); int (*set_clientdata)(sqlite3*, const char*, void*, void(*)(void*)); + /* Version 3.50.0 and later */ + int (*setlk_timeout)(sqlite3*,int,int); }; /* @@ -704,6 +706,8 @@ typedef int (*sqlite3_loadext_entry)( /* Version 3.44.0 and later */ #define sqlite3_get_clientdata sqlite3_api->get_clientdata #define sqlite3_set_clientdata sqlite3_api->set_clientdata +/* Version 3.50.0 and later */ +#define sqlite3_setlk_timeout sqlite3_api->setlk_timeout #endif /* !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION) */ #if !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION) diff --git a/vendor/github.com/maxbrunsfeld/counterfeiter/v6/.gitignore b/vendor/github.com/maxbrunsfeld/counterfeiter/v6/.gitignore index 1597f12b71..070b2a1fc9 100644 --- a/vendor/github.com/maxbrunsfeld/counterfeiter/v6/.gitignore +++ b/vendor/github.com/maxbrunsfeld/counterfeiter/v6/.gitignore @@ -30,3 +30,4 @@ integration/testdata/output *.profile *.bench /.vscode +.DS_Store diff --git a/vendor/github.com/maxbrunsfeld/counterfeiter/v6/README.md b/vendor/github.com/maxbrunsfeld/counterfeiter/v6/README.md index db70f92c2d..dd36898987 100644 --- a/vendor/github.com/maxbrunsfeld/counterfeiter/v6/README.md +++ b/vendor/github.com/maxbrunsfeld/counterfeiter/v6/README.md @@ -20,25 +20,14 @@ If you are having problems with `counterfeiter` and are not using a supported ve Typically, `counterfeiter` is used in `go generate` directives. It can be frustrating when you change your interface declaration and suddenly all of your generated code is suddenly out-of-date. The best practice here is to use the [`go generate` command](https://blog.golang.org/generate) to make it easier to keep your test doubles up to date. -#### Step 1 - Create `tools.go` +⚠️ If you are working with go 1.23 or earlier, please refer to an [older version of this README](https://github.com/maxbrunsfeld/counterfeiter/blob/e39cbe6aaa94a0b6718cf3d413cd5319c3a1f6fa/README.md#using-counterfeiter), as the instructions below assume go 1.24 (which added `go tool` support) and later. -You can take a dependency on tools by creating a `tools.go` file, as described in [How can I track tool dependencies for a module?](https://go.dev/wiki/Modules#how-can-i-track-tool-dependencies-for-a-module). This ensures that everyone working with your module is using the same version of each tool you use. +#### Step 1 - Add `counterfeiter` as a tool dependency -```shell -$ cat tools/tools.go -``` - -```go -//go:build tools +Establish a tool dependency on counterfeiter by running the following command: -package tools - -import ( - _ "github.com/maxbrunsfeld/counterfeiter/v6" -) - -// This file imports packages that are used when running go generate, or used -// during the development process but not otherwise depended on by built code. +```shell +go get -tool github.com/maxbrunsfeld/counterfeiter/v6 ``` #### Step 2a - Add `go:generate` Directives @@ -52,7 +41,7 @@ $ cat myinterface.go ```go package foo -//go:generate go run github.com/maxbrunsfeld/counterfeiter/v6 . MySpecialInterface +//go:generate go tool counterfeiter . MySpecialInterface type MySpecialInterface interface { DoThings(string, uint64) (int, error) @@ -67,8 +56,8 @@ Writing `FakeMySpecialInterface` to `foofakes/fake_my_special_interface.go`... D #### Step 2b - Add `counterfeiter:generate` Directives If you plan to have many directives in a single package, consider using this -option. You can add directives right next to your interface definitions -(or not), in any `.go` file in your module. +option, as it will speed things up considerably. You can add directives right +next to your interface definitions (or not), in any `.go` file in your module. ```shell $ cat myinterface.go @@ -78,7 +67,7 @@ $ cat myinterface.go package foo // You only need **one** of these per package! -//go:generate go run github.com/maxbrunsfeld/counterfeiter/v6 -generate +//go:generate go tool counterfeiter -generate // You will add lots of directives like these in the same package... //counterfeiter:generate . MySpecialInterface @@ -112,7 +101,7 @@ $ go generate ./... You can use the following command to invoke `counterfeiter` from within a go module: ```shell -$ go run github.com/maxbrunsfeld/counterfeiter/v6 +$ go tool counterfeiter USAGE counterfeiter @@ -153,7 +142,7 @@ type MySpecialInterface interface { ``` ```shell -$ go run github.com/maxbrunsfeld/counterfeiter/v6 path/to/foo MySpecialInterface +$ go tool counterfeiter path/to/foo MySpecialInterface Wrote `FakeMySpecialInterface` to `path/to/foo/foofakes/fake_my_special_interface.go` ``` @@ -196,7 +185,7 @@ For more examples of using the `counterfeiter` API, look at [some of the provide For third party interfaces, you can specify the interface using the alternative syntax `.`, for example: ```shell -$ go run github.com/maxbrunsfeld/counterfeiter/v6 github.com/go-redis/redis.Pipeliner +$ go tool counterfeiter github.com/go-redis/redis.Pipeliner ``` ### Running The Tests For `counterfeiter` diff --git a/vendor/github.com/maxbrunsfeld/counterfeiter/v6/generator/fake.go b/vendor/github.com/maxbrunsfeld/counterfeiter/v6/generator/fake.go index 5c2a6fd1ac..fbdad7b359 100644 --- a/vendor/github.com/maxbrunsfeld/counterfeiter/v6/generator/fake.go +++ b/vendor/github.com/maxbrunsfeld/counterfeiter/v6/generator/fake.go @@ -104,6 +104,72 @@ func (f *Fake) IsFunction() bool { return ok } +// IsConstraintInterface indicates whether the interface is a constraint interface +// (contains type constraints like ~string) which cannot be implemented by concrete types. +func (f *Fake) IsConstraintInterface() bool { + if !f.IsInterface() { + return false + } + + iface, ok := f.Target.Type().Underlying().(*types.Interface) + if !ok { + return false + } + + // check if the interface has any type constraints + for i := 0; i < iface.NumEmbeddeds(); i++ { + if _, ok := iface.EmbeddedType(i).(*types.Union); ok { + return true + } + } + + // check for approximation constraints by examining the string representation + // a bit of a hack, but the Go types API doesn't expose type constraints cleanly + return strings.Contains(iface.String(), "~") +} + +// HasConstraintInterface indicates whether any of the generic type constraints +// are constraint interfaces that cannot be used in type assertions. +func (f *Fake) HasConstraintInterface() bool { + if f.Target == nil || f.Target.Type() == nil { + return false + } + + named, ok := f.Target.Type().(*types.Named) + if !ok { + return false + } + + typeParams := named.TypeParams() + if typeParams.Len() == 0 { + return false + } + + for i := 0; i < typeParams.Len(); i++ { + param := typeParams.At(i) + constraint := param.Constraint() + + // check if the constraint is a constraint interface + if iface, ok := constraint.Underlying().(*types.Interface); ok { + // check if this interface contains type constraints + for j := 0; j < iface.NumEmbeddeds(); j++ { + if _, ok := iface.EmbeddedType(j).(*types.Union); ok { + return true + } + } + + // check for approximation constraints by examining the string representation + // a bit of a hack, but the Go types API doesn't expose type constraints cleanly + constraintStr := constraint.String() + if strings.Contains(constraintStr, "~") { + return true + } + } + } + + return false +} + func unexport(s string) string { s = strings.TrimSpace(s) if s == "" { diff --git a/vendor/github.com/maxbrunsfeld/counterfeiter/v6/generator/interface_template.go b/vendor/github.com/maxbrunsfeld/counterfeiter/v6/generator/interface_template.go index 3be9c1a5c3..1a946f8075 100644 --- a/vendor/github.com/maxbrunsfeld/counterfeiter/v6/generator/interface_template.go +++ b/vendor/github.com/maxbrunsfeld/counterfeiter/v6/generator/interface_template.go @@ -10,12 +10,15 @@ import ( var title = cases.Title(language.Und, cases.NoLower) +var hasConstraintInterface = func(f *Fake) bool { return f.HasConstraintInterface() } + var interfaceFuncs = template.FuncMap{ - "ToLower": strings.ToLower, - "UnExport": unexport, - "Replace": strings.Replace, - "IsExported": isExported, - "Title": title.String, + "ToLower": strings.ToLower, + "UnExport": unexport, + "Replace": strings.Replace, + "IsExported": isExported, + "Title": title.String, + "HasConstraintInterface": hasConstraintInterface, } const interfaceTemplate string = `{{.Header}}// Code generated by counterfeiter. DO NOT EDIT. @@ -147,10 +150,6 @@ func (fake *{{$.Name}}{{$.GenericTypeParameters}}) {{Title .Name}}ReturnsOnCall( func (fake *{{.Name}}{{$.GenericTypeParameters}}) Invocations() map[string][][]interface{} { fake.invocationsMutex.RLock() defer fake.invocationsMutex.RUnlock() - {{- range .Methods}} - fake.{{UnExport .Name}}Mutex.RLock() - defer fake.{{UnExport .Name}}Mutex.RUnlock() - {{- end}} copiedInvocations := map[string][][]interface{}{} for key, value := range fake.invocations { copiedInvocations[key] = value @@ -171,6 +170,8 @@ func (fake *{{.Name}}{{$.GenericTypeParameters}}) recordInvocation(key string, a } {{if IsExported .TargetName -}} +{{if not (HasConstraintInterface .) -}} var _ {{.TargetAlias}}.{{.TargetName}}{{.GenericTypeConstraints}} = new({{.Name}}{{.GenericTypeConstraints}}) {{- end}} +{{- end}} ` diff --git a/vendor/github.com/maxbrunsfeld/counterfeiter/v6/generator/loader.go b/vendor/github.com/maxbrunsfeld/counterfeiter/v6/generator/loader.go index 106ddfbd58..08233b0d62 100644 --- a/vendor/github.com/maxbrunsfeld/counterfeiter/v6/generator/loader.go +++ b/vendor/github.com/maxbrunsfeld/counterfeiter/v6/generator/loader.go @@ -138,6 +138,10 @@ func (f *Fake) findPackage() error { if !f.IsInterface() && !f.IsFunction() { return fmt.Errorf("cannot generate a fake for %s because it is not an interface or function", f.TargetName) } + + if f.IsConstraintInterface() { + return fmt.Errorf("cannot generate a fake for %s because it is a constraint interface (contains type constraints like ~string) which cannot be implemented by concrete types", f.TargetName) + } } if f.IsInterface() { diff --git a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md b/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md deleted file mode 100644 index c758234904..0000000000 --- a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md +++ /dev/null @@ -1,96 +0,0 @@ -## 1.5.0 - -* New option `IgnoreUntaggedFields` to ignore decoding to any fields - without `mapstructure` (or the configured tag name) set [GH-277] -* New option `ErrorUnset` which makes it an error if any fields - in a target struct are not set by the decoding process. [GH-225] -* New function `OrComposeDecodeHookFunc` to help compose decode hooks. [GH-240] -* Decoding to slice from array no longer crashes [GH-265] -* Decode nested struct pointers to map [GH-271] -* Fix issue where `,squash` was ignored if `Squash` option was set. [GH-280] -* Fix issue where fields with `,omitempty` would sometimes decode - into a map with an empty string key [GH-281] - -## 1.4.3 - -* Fix cases where `json.Number` didn't decode properly [GH-261] - -## 1.4.2 - -* Custom name matchers to support any sort of casing, formatting, etc. for - field names. [GH-250] -* Fix possible panic in ComposeDecodeHookFunc [GH-251] - -## 1.4.1 - -* Fix regression where `*time.Time` value would be set to empty and not be sent - to decode hooks properly [GH-232] - -## 1.4.0 - -* A new decode hook type `DecodeHookFuncValue` has been added that has - access to the full values. [GH-183] -* Squash is now supported with embedded fields that are struct pointers [GH-205] -* Empty strings will convert to 0 for all numeric types when weakly decoding [GH-206] - -## 1.3.3 - -* Decoding maps from maps creates a settable value for decode hooks [GH-203] - -## 1.3.2 - -* Decode into interface type with a struct value is supported [GH-187] - -## 1.3.1 - -* Squash should only squash embedded structs. [GH-194] - -## 1.3.0 - -* Added `",omitempty"` support. This will ignore zero values in the source - structure when encoding. [GH-145] - -## 1.2.3 - -* Fix duplicate entries in Keys list with pointer values. [GH-185] - -## 1.2.2 - -* Do not add unsettable (unexported) values to the unused metadata key - or "remain" value. [GH-150] - -## 1.2.1 - -* Go modules checksum mismatch fix - -## 1.2.0 - -* Added support to capture unused values in a field using the `",remain"` value - in the mapstructure tag. There is an example to showcase usage. -* Added `DecoderConfig` option to always squash embedded structs -* `json.Number` can decode into `uint` types -* Empty slices are preserved and not replaced with nil slices -* Fix panic that can occur in when decoding a map into a nil slice of structs -* Improved package documentation for godoc - -## 1.1.2 - -* Fix error when decode hook decodes interface implementation into interface - type. [GH-140] - -## 1.1.1 - -* Fix panic that can happen in `decodePtr` - -## 1.1.0 - -* Added `StringToIPHookFunc` to convert `string` to `net.IP` and `net.IPNet` [GH-133] -* Support struct to struct decoding [GH-137] -* If source map value is nil, then destination map value is nil (instead of empty) -* If source slice value is nil, then destination slice value is nil (instead of empty) -* If source pointer is nil, then destination pointer is set to nil (instead of - allocated zero value of type) - -## 1.0.0 - -* Initial tagged stable release. diff --git a/vendor/github.com/mitchellh/mapstructure/README.md b/vendor/github.com/mitchellh/mapstructure/README.md deleted file mode 100644 index 0018dc7d9f..0000000000 --- a/vendor/github.com/mitchellh/mapstructure/README.md +++ /dev/null @@ -1,46 +0,0 @@ -# mapstructure [![Godoc](https://godoc.org/github.com/mitchellh/mapstructure?status.svg)](https://godoc.org/github.com/mitchellh/mapstructure) - -mapstructure is a Go library for decoding generic map values to structures -and vice versa, while providing helpful error handling. - -This library is most useful when decoding values from some data stream (JSON, -Gob, etc.) where you don't _quite_ know the structure of the underlying data -until you read a part of it. You can therefore read a `map[string]interface{}` -and use this library to decode it into the proper underlying native Go -structure. - -## Installation - -Standard `go get`: - -``` -$ go get github.com/mitchellh/mapstructure -``` - -## Usage & Example - -For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/mapstructure). - -The `Decode` function has examples associated with it there. - -## But Why?! - -Go offers fantastic standard libraries for decoding formats such as JSON. -The standard method is to have a struct pre-created, and populate that struct -from the bytes of the encoded format. This is great, but the problem is if -you have configuration or an encoding that changes slightly depending on -specific fields. For example, consider this JSON: - -```json -{ - "type": "person", - "name": "Mitchell" -} -``` - -Perhaps we can't populate a specific structure without first reading -the "type" field from the JSON. We could always do two passes over the -decoding of the JSON (reading the "type" first, and the rest later). -However, it is much simpler to just decode this into a `map[string]interface{}` -structure, read the "type" key, then use something like this library -to decode it into the proper structure. diff --git a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go deleted file mode 100644 index 3a754ca724..0000000000 --- a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go +++ /dev/null @@ -1,279 +0,0 @@ -package mapstructure - -import ( - "encoding" - "errors" - "fmt" - "net" - "reflect" - "strconv" - "strings" - "time" -) - -// typedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns -// it into the proper DecodeHookFunc type, such as DecodeHookFuncType. -func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc { - // Create variables here so we can reference them with the reflect pkg - var f1 DecodeHookFuncType - var f2 DecodeHookFuncKind - var f3 DecodeHookFuncValue - - // Fill in the variables into this interface and the rest is done - // automatically using the reflect package. - potential := []interface{}{f1, f2, f3} - - v := reflect.ValueOf(h) - vt := v.Type() - for _, raw := range potential { - pt := reflect.ValueOf(raw).Type() - if vt.ConvertibleTo(pt) { - return v.Convert(pt).Interface() - } - } - - return nil -} - -// DecodeHookExec executes the given decode hook. This should be used -// since it'll naturally degrade to the older backwards compatible DecodeHookFunc -// that took reflect.Kind instead of reflect.Type. -func DecodeHookExec( - raw DecodeHookFunc, - from reflect.Value, to reflect.Value) (interface{}, error) { - - switch f := typedDecodeHook(raw).(type) { - case DecodeHookFuncType: - return f(from.Type(), to.Type(), from.Interface()) - case DecodeHookFuncKind: - return f(from.Kind(), to.Kind(), from.Interface()) - case DecodeHookFuncValue: - return f(from, to) - default: - return nil, errors.New("invalid decode hook signature") - } -} - -// ComposeDecodeHookFunc creates a single DecodeHookFunc that -// automatically composes multiple DecodeHookFuncs. -// -// The composed funcs are called in order, with the result of the -// previous transformation. -func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc { - return func(f reflect.Value, t reflect.Value) (interface{}, error) { - var err error - data := f.Interface() - - newFrom := f - for _, f1 := range fs { - data, err = DecodeHookExec(f1, newFrom, t) - if err != nil { - return nil, err - } - newFrom = reflect.ValueOf(data) - } - - return data, nil - } -} - -// OrComposeDecodeHookFunc executes all input hook functions until one of them returns no error. In that case its value is returned. -// If all hooks return an error, OrComposeDecodeHookFunc returns an error concatenating all error messages. -func OrComposeDecodeHookFunc(ff ...DecodeHookFunc) DecodeHookFunc { - return func(a, b reflect.Value) (interface{}, error) { - var allErrs string - var out interface{} - var err error - - for _, f := range ff { - out, err = DecodeHookExec(f, a, b) - if err != nil { - allErrs += err.Error() + "\n" - continue - } - - return out, nil - } - - return nil, errors.New(allErrs) - } -} - -// StringToSliceHookFunc returns a DecodeHookFunc that converts -// string to []string by splitting on the given sep. -func StringToSliceHookFunc(sep string) DecodeHookFunc { - return func( - f reflect.Kind, - t reflect.Kind, - data interface{}) (interface{}, error) { - if f != reflect.String || t != reflect.Slice { - return data, nil - } - - raw := data.(string) - if raw == "" { - return []string{}, nil - } - - return strings.Split(raw, sep), nil - } -} - -// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts -// strings to time.Duration. -func StringToTimeDurationHookFunc() DecodeHookFunc { - return func( - f reflect.Type, - t reflect.Type, - data interface{}) (interface{}, error) { - if f.Kind() != reflect.String { - return data, nil - } - if t != reflect.TypeOf(time.Duration(5)) { - return data, nil - } - - // Convert it by parsing - return time.ParseDuration(data.(string)) - } -} - -// StringToIPHookFunc returns a DecodeHookFunc that converts -// strings to net.IP -func StringToIPHookFunc() DecodeHookFunc { - return func( - f reflect.Type, - t reflect.Type, - data interface{}) (interface{}, error) { - if f.Kind() != reflect.String { - return data, nil - } - if t != reflect.TypeOf(net.IP{}) { - return data, nil - } - - // Convert it by parsing - ip := net.ParseIP(data.(string)) - if ip == nil { - return net.IP{}, fmt.Errorf("failed parsing ip %v", data) - } - - return ip, nil - } -} - -// StringToIPNetHookFunc returns a DecodeHookFunc that converts -// strings to net.IPNet -func StringToIPNetHookFunc() DecodeHookFunc { - return func( - f reflect.Type, - t reflect.Type, - data interface{}) (interface{}, error) { - if f.Kind() != reflect.String { - return data, nil - } - if t != reflect.TypeOf(net.IPNet{}) { - return data, nil - } - - // Convert it by parsing - _, net, err := net.ParseCIDR(data.(string)) - return net, err - } -} - -// StringToTimeHookFunc returns a DecodeHookFunc that converts -// strings to time.Time. -func StringToTimeHookFunc(layout string) DecodeHookFunc { - return func( - f reflect.Type, - t reflect.Type, - data interface{}) (interface{}, error) { - if f.Kind() != reflect.String { - return data, nil - } - if t != reflect.TypeOf(time.Time{}) { - return data, nil - } - - // Convert it by parsing - return time.Parse(layout, data.(string)) - } -} - -// WeaklyTypedHook is a DecodeHookFunc which adds support for weak typing to -// the decoder. -// -// Note that this is significantly different from the WeaklyTypedInput option -// of the DecoderConfig. -func WeaklyTypedHook( - f reflect.Kind, - t reflect.Kind, - data interface{}) (interface{}, error) { - dataVal := reflect.ValueOf(data) - switch t { - case reflect.String: - switch f { - case reflect.Bool: - if dataVal.Bool() { - return "1", nil - } - return "0", nil - case reflect.Float32: - return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil - case reflect.Int: - return strconv.FormatInt(dataVal.Int(), 10), nil - case reflect.Slice: - dataType := dataVal.Type() - elemKind := dataType.Elem().Kind() - if elemKind == reflect.Uint8 { - return string(dataVal.Interface().([]uint8)), nil - } - case reflect.Uint: - return strconv.FormatUint(dataVal.Uint(), 10), nil - } - } - - return data, nil -} - -func RecursiveStructToMapHookFunc() DecodeHookFunc { - return func(f reflect.Value, t reflect.Value) (interface{}, error) { - if f.Kind() != reflect.Struct { - return f.Interface(), nil - } - - var i interface{} = struct{}{} - if t.Type() != reflect.TypeOf(&i).Elem() { - return f.Interface(), nil - } - - m := make(map[string]interface{}) - t.Set(reflect.ValueOf(m)) - - return f.Interface(), nil - } -} - -// TextUnmarshallerHookFunc returns a DecodeHookFunc that applies -// strings to the UnmarshalText function, when the target type -// implements the encoding.TextUnmarshaler interface -func TextUnmarshallerHookFunc() DecodeHookFuncType { - return func( - f reflect.Type, - t reflect.Type, - data interface{}) (interface{}, error) { - if f.Kind() != reflect.String { - return data, nil - } - result := reflect.New(t).Interface() - unmarshaller, ok := result.(encoding.TextUnmarshaler) - if !ok { - return data, nil - } - if err := unmarshaller.UnmarshalText([]byte(data.(string))); err != nil { - return nil, err - } - return result, nil - } -} diff --git a/vendor/github.com/mitchellh/mapstructure/error.go b/vendor/github.com/mitchellh/mapstructure/error.go deleted file mode 100644 index 47a99e5af3..0000000000 --- a/vendor/github.com/mitchellh/mapstructure/error.go +++ /dev/null @@ -1,50 +0,0 @@ -package mapstructure - -import ( - "errors" - "fmt" - "sort" - "strings" -) - -// Error implements the error interface and can represents multiple -// errors that occur in the course of a single decode. -type Error struct { - Errors []string -} - -func (e *Error) Error() string { - points := make([]string, len(e.Errors)) - for i, err := range e.Errors { - points[i] = fmt.Sprintf("* %s", err) - } - - sort.Strings(points) - return fmt.Sprintf( - "%d error(s) decoding:\n\n%s", - len(e.Errors), strings.Join(points, "\n")) -} - -// WrappedErrors implements the errwrap.Wrapper interface to make this -// return value more useful with the errwrap and go-multierror libraries. -func (e *Error) WrappedErrors() []error { - if e == nil { - return nil - } - - result := make([]error, len(e.Errors)) - for i, e := range e.Errors { - result[i] = errors.New(e) - } - - return result -} - -func appendErrors(errors []string, err error) []string { - switch e := err.(type) { - case *Error: - return append(errors, e.Errors...) - default: - return append(errors, e.Error()) - } -} diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go deleted file mode 100644 index 1efb22ac36..0000000000 --- a/vendor/github.com/mitchellh/mapstructure/mapstructure.go +++ /dev/null @@ -1,1540 +0,0 @@ -// Package mapstructure exposes functionality to convert one arbitrary -// Go type into another, typically to convert a map[string]interface{} -// into a native Go structure. -// -// The Go structure can be arbitrarily complex, containing slices, -// other structs, etc. and the decoder will properly decode nested -// maps and so on into the proper structures in the native Go struct. -// See the examples to see what the decoder is capable of. -// -// The simplest function to start with is Decode. -// -// Field Tags -// -// When decoding to a struct, mapstructure will use the field name by -// default to perform the mapping. For example, if a struct has a field -// "Username" then mapstructure will look for a key in the source value -// of "username" (case insensitive). -// -// type User struct { -// Username string -// } -// -// You can change the behavior of mapstructure by using struct tags. -// The default struct tag that mapstructure looks for is "mapstructure" -// but you can customize it using DecoderConfig. -// -// Renaming Fields -// -// To rename the key that mapstructure looks for, use the "mapstructure" -// tag and set a value directly. For example, to change the "username" example -// above to "user": -// -// type User struct { -// Username string `mapstructure:"user"` -// } -// -// Embedded Structs and Squashing -// -// Embedded structs are treated as if they're another field with that name. -// By default, the two structs below are equivalent when decoding with -// mapstructure: -// -// type Person struct { -// Name string -// } -// -// type Friend struct { -// Person -// } -// -// type Friend struct { -// Person Person -// } -// -// This would require an input that looks like below: -// -// map[string]interface{}{ -// "person": map[string]interface{}{"name": "alice"}, -// } -// -// If your "person" value is NOT nested, then you can append ",squash" to -// your tag value and mapstructure will treat it as if the embedded struct -// were part of the struct directly. Example: -// -// type Friend struct { -// Person `mapstructure:",squash"` -// } -// -// Now the following input would be accepted: -// -// map[string]interface{}{ -// "name": "alice", -// } -// -// When decoding from a struct to a map, the squash tag squashes the struct -// fields into a single map. Using the example structs from above: -// -// Friend{Person: Person{Name: "alice"}} -// -// Will be decoded into a map: -// -// map[string]interface{}{ -// "name": "alice", -// } -// -// DecoderConfig has a field that changes the behavior of mapstructure -// to always squash embedded structs. -// -// Remainder Values -// -// If there are any unmapped keys in the source value, mapstructure by -// default will silently ignore them. You can error by setting ErrorUnused -// in DecoderConfig. If you're using Metadata you can also maintain a slice -// of the unused keys. -// -// You can also use the ",remain" suffix on your tag to collect all unused -// values in a map. The field with this tag MUST be a map type and should -// probably be a "map[string]interface{}" or "map[interface{}]interface{}". -// See example below: -// -// type Friend struct { -// Name string -// Other map[string]interface{} `mapstructure:",remain"` -// } -// -// Given the input below, Other would be populated with the other -// values that weren't used (everything but "name"): -// -// map[string]interface{}{ -// "name": "bob", -// "address": "123 Maple St.", -// } -// -// Omit Empty Values -// -// When decoding from a struct to any other value, you may use the -// ",omitempty" suffix on your tag to omit that value if it equates to -// the zero value. The zero value of all types is specified in the Go -// specification. -// -// For example, the zero type of a numeric type is zero ("0"). If the struct -// field value is zero and a numeric type, the field is empty, and it won't -// be encoded into the destination type. -// -// type Source struct { -// Age int `mapstructure:",omitempty"` -// } -// -// Unexported fields -// -// Since unexported (private) struct fields cannot be set outside the package -// where they are defined, the decoder will simply skip them. -// -// For this output type definition: -// -// type Exported struct { -// private string // this unexported field will be skipped -// Public string -// } -// -// Using this map as input: -// -// map[string]interface{}{ -// "private": "I will be ignored", -// "Public": "I made it through!", -// } -// -// The following struct will be decoded: -// -// type Exported struct { -// private: "" // field is left with an empty string (zero value) -// Public: "I made it through!" -// } -// -// Other Configuration -// -// mapstructure is highly configurable. See the DecoderConfig struct -// for other features and options that are supported. -package mapstructure - -import ( - "encoding/json" - "errors" - "fmt" - "reflect" - "sort" - "strconv" - "strings" -) - -// DecodeHookFunc is the callback function that can be used for -// data transformations. See "DecodeHook" in the DecoderConfig -// struct. -// -// The type must be one of DecodeHookFuncType, DecodeHookFuncKind, or -// DecodeHookFuncValue. -// Values are a superset of Types (Values can return types), and Types are a -// superset of Kinds (Types can return Kinds) and are generally a richer thing -// to use, but Kinds are simpler if you only need those. -// -// The reason DecodeHookFunc is multi-typed is for backwards compatibility: -// we started with Kinds and then realized Types were the better solution, -// but have a promise to not break backwards compat so we now support -// both. -type DecodeHookFunc interface{} - -// DecodeHookFuncType is a DecodeHookFunc which has complete information about -// the source and target types. -type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface{}, error) - -// DecodeHookFuncKind is a DecodeHookFunc which knows only the Kinds of the -// source and target types. -type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error) - -// DecodeHookFuncValue is a DecodeHookFunc which has complete access to both the source and target -// values. -type DecodeHookFuncValue func(from reflect.Value, to reflect.Value) (interface{}, error) - -// DecoderConfig is the configuration that is used to create a new decoder -// and allows customization of various aspects of decoding. -type DecoderConfig struct { - // DecodeHook, if set, will be called before any decoding and any - // type conversion (if WeaklyTypedInput is on). This lets you modify - // the values before they're set down onto the resulting struct. The - // DecodeHook is called for every map and value in the input. This means - // that if a struct has embedded fields with squash tags the decode hook - // is called only once with all of the input data, not once for each - // embedded struct. - // - // If an error is returned, the entire decode will fail with that error. - DecodeHook DecodeHookFunc - - // If ErrorUnused is true, then it is an error for there to exist - // keys in the original map that were unused in the decoding process - // (extra keys). - ErrorUnused bool - - // If ErrorUnset is true, then it is an error for there to exist - // fields in the result that were not set in the decoding process - // (extra fields). This only applies to decoding to a struct. This - // will affect all nested structs as well. - ErrorUnset bool - - // ZeroFields, if set to true, will zero fields before writing them. - // For example, a map will be emptied before decoded values are put in - // it. If this is false, a map will be merged. - ZeroFields bool - - // If WeaklyTypedInput is true, the decoder will make the following - // "weak" conversions: - // - // - bools to string (true = "1", false = "0") - // - numbers to string (base 10) - // - bools to int/uint (true = 1, false = 0) - // - strings to int/uint (base implied by prefix) - // - int to bool (true if value != 0) - // - string to bool (accepts: 1, t, T, TRUE, true, True, 0, f, F, - // FALSE, false, False. Anything else is an error) - // - empty array = empty map and vice versa - // - negative numbers to overflowed uint values (base 10) - // - slice of maps to a merged map - // - single values are converted to slices if required. Each - // element is weakly decoded. For example: "4" can become []int{4} - // if the target type is an int slice. - // - WeaklyTypedInput bool - - // Squash will squash embedded structs. A squash tag may also be - // added to an individual struct field using a tag. For example: - // - // type Parent struct { - // Child `mapstructure:",squash"` - // } - Squash bool - - // Metadata is the struct that will contain extra metadata about - // the decoding. If this is nil, then no metadata will be tracked. - Metadata *Metadata - - // Result is a pointer to the struct that will contain the decoded - // value. - Result interface{} - - // The tag name that mapstructure reads for field names. This - // defaults to "mapstructure" - TagName string - - // IgnoreUntaggedFields ignores all struct fields without explicit - // TagName, comparable to `mapstructure:"-"` as default behaviour. - IgnoreUntaggedFields bool - - // MatchName is the function used to match the map key to the struct - // field name or tag. Defaults to `strings.EqualFold`. This can be used - // to implement case-sensitive tag values, support snake casing, etc. - MatchName func(mapKey, fieldName string) bool -} - -// A Decoder takes a raw interface value and turns it into structured -// data, keeping track of rich error information along the way in case -// anything goes wrong. Unlike the basic top-level Decode method, you can -// more finely control how the Decoder behaves using the DecoderConfig -// structure. The top-level Decode method is just a convenience that sets -// up the most basic Decoder. -type Decoder struct { - config *DecoderConfig -} - -// Metadata contains information about decoding a structure that -// is tedious or difficult to get otherwise. -type Metadata struct { - // Keys are the keys of the structure which were successfully decoded - Keys []string - - // Unused is a slice of keys that were found in the raw value but - // weren't decoded since there was no matching field in the result interface - Unused []string - - // Unset is a slice of field names that were found in the result interface - // but weren't set in the decoding process since there was no matching value - // in the input - Unset []string -} - -// Decode takes an input structure and uses reflection to translate it to -// the output structure. output must be a pointer to a map or struct. -func Decode(input interface{}, output interface{}) error { - config := &DecoderConfig{ - Metadata: nil, - Result: output, - } - - decoder, err := NewDecoder(config) - if err != nil { - return err - } - - return decoder.Decode(input) -} - -// WeakDecode is the same as Decode but is shorthand to enable -// WeaklyTypedInput. See DecoderConfig for more info. -func WeakDecode(input, output interface{}) error { - config := &DecoderConfig{ - Metadata: nil, - Result: output, - WeaklyTypedInput: true, - } - - decoder, err := NewDecoder(config) - if err != nil { - return err - } - - return decoder.Decode(input) -} - -// DecodeMetadata is the same as Decode, but is shorthand to -// enable metadata collection. See DecoderConfig for more info. -func DecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error { - config := &DecoderConfig{ - Metadata: metadata, - Result: output, - } - - decoder, err := NewDecoder(config) - if err != nil { - return err - } - - return decoder.Decode(input) -} - -// WeakDecodeMetadata is the same as Decode, but is shorthand to -// enable both WeaklyTypedInput and metadata collection. See -// DecoderConfig for more info. -func WeakDecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error { - config := &DecoderConfig{ - Metadata: metadata, - Result: output, - WeaklyTypedInput: true, - } - - decoder, err := NewDecoder(config) - if err != nil { - return err - } - - return decoder.Decode(input) -} - -// NewDecoder returns a new decoder for the given configuration. Once -// a decoder has been returned, the same configuration must not be used -// again. -func NewDecoder(config *DecoderConfig) (*Decoder, error) { - val := reflect.ValueOf(config.Result) - if val.Kind() != reflect.Ptr { - return nil, errors.New("result must be a pointer") - } - - val = val.Elem() - if !val.CanAddr() { - return nil, errors.New("result must be addressable (a pointer)") - } - - if config.Metadata != nil { - if config.Metadata.Keys == nil { - config.Metadata.Keys = make([]string, 0) - } - - if config.Metadata.Unused == nil { - config.Metadata.Unused = make([]string, 0) - } - - if config.Metadata.Unset == nil { - config.Metadata.Unset = make([]string, 0) - } - } - - if config.TagName == "" { - config.TagName = "mapstructure" - } - - if config.MatchName == nil { - config.MatchName = strings.EqualFold - } - - result := &Decoder{ - config: config, - } - - return result, nil -} - -// Decode decodes the given raw interface to the target pointer specified -// by the configuration. -func (d *Decoder) Decode(input interface{}) error { - return d.decode("", input, reflect.ValueOf(d.config.Result).Elem()) -} - -// Decodes an unknown data type into a specific reflection value. -func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) error { - var inputVal reflect.Value - if input != nil { - inputVal = reflect.ValueOf(input) - - // We need to check here if input is a typed nil. Typed nils won't - // match the "input == nil" below so we check that here. - if inputVal.Kind() == reflect.Ptr && inputVal.IsNil() { - input = nil - } - } - - if input == nil { - // If the data is nil, then we don't set anything, unless ZeroFields is set - // to true. - if d.config.ZeroFields { - outVal.Set(reflect.Zero(outVal.Type())) - - if d.config.Metadata != nil && name != "" { - d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) - } - } - return nil - } - - if !inputVal.IsValid() { - // If the input value is invalid, then we just set the value - // to be the zero value. - outVal.Set(reflect.Zero(outVal.Type())) - if d.config.Metadata != nil && name != "" { - d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) - } - return nil - } - - if d.config.DecodeHook != nil { - // We have a DecodeHook, so let's pre-process the input. - var err error - input, err = DecodeHookExec(d.config.DecodeHook, inputVal, outVal) - if err != nil { - return fmt.Errorf("error decoding '%s': %s", name, err) - } - } - - var err error - outputKind := getKind(outVal) - addMetaKey := true - switch outputKind { - case reflect.Bool: - err = d.decodeBool(name, input, outVal) - case reflect.Interface: - err = d.decodeBasic(name, input, outVal) - case reflect.String: - err = d.decodeString(name, input, outVal) - case reflect.Int: - err = d.decodeInt(name, input, outVal) - case reflect.Uint: - err = d.decodeUint(name, input, outVal) - case reflect.Float32: - err = d.decodeFloat(name, input, outVal) - case reflect.Struct: - err = d.decodeStruct(name, input, outVal) - case reflect.Map: - err = d.decodeMap(name, input, outVal) - case reflect.Ptr: - addMetaKey, err = d.decodePtr(name, input, outVal) - case reflect.Slice: - err = d.decodeSlice(name, input, outVal) - case reflect.Array: - err = d.decodeArray(name, input, outVal) - case reflect.Func: - err = d.decodeFunc(name, input, outVal) - default: - // If we reached this point then we weren't able to decode it - return fmt.Errorf("%s: unsupported type: %s", name, outputKind) - } - - // If we reached here, then we successfully decoded SOMETHING, so - // mark the key as used if we're tracking metainput. - if addMetaKey && d.config.Metadata != nil && name != "" { - d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) - } - - return err -} - -// This decodes a basic type (bool, int, string, etc.) and sets the -// value to "data" of that type. -func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error { - if val.IsValid() && val.Elem().IsValid() { - elem := val.Elem() - - // If we can't address this element, then its not writable. Instead, - // we make a copy of the value (which is a pointer and therefore - // writable), decode into that, and replace the whole value. - copied := false - if !elem.CanAddr() { - copied = true - - // Make *T - copy := reflect.New(elem.Type()) - - // *T = elem - copy.Elem().Set(elem) - - // Set elem so we decode into it - elem = copy - } - - // Decode. If we have an error then return. We also return right - // away if we're not a copy because that means we decoded directly. - if err := d.decode(name, data, elem); err != nil || !copied { - return err - } - - // If we're a copy, we need to set te final result - val.Set(elem.Elem()) - return nil - } - - dataVal := reflect.ValueOf(data) - - // If the input data is a pointer, and the assigned type is the dereference - // of that exact pointer, then indirect it so that we can assign it. - // Example: *string to string - if dataVal.Kind() == reflect.Ptr && dataVal.Type().Elem() == val.Type() { - dataVal = reflect.Indirect(dataVal) - } - - if !dataVal.IsValid() { - dataVal = reflect.Zero(val.Type()) - } - - dataValType := dataVal.Type() - if !dataValType.AssignableTo(val.Type()) { - return fmt.Errorf( - "'%s' expected type '%s', got '%s'", - name, val.Type(), dataValType) - } - - val.Set(dataVal) - return nil -} - -func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - dataKind := getKind(dataVal) - - converted := true - switch { - case dataKind == reflect.String: - val.SetString(dataVal.String()) - case dataKind == reflect.Bool && d.config.WeaklyTypedInput: - if dataVal.Bool() { - val.SetString("1") - } else { - val.SetString("0") - } - case dataKind == reflect.Int && d.config.WeaklyTypedInput: - val.SetString(strconv.FormatInt(dataVal.Int(), 10)) - case dataKind == reflect.Uint && d.config.WeaklyTypedInput: - val.SetString(strconv.FormatUint(dataVal.Uint(), 10)) - case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: - val.SetString(strconv.FormatFloat(dataVal.Float(), 'f', -1, 64)) - case dataKind == reflect.Slice && d.config.WeaklyTypedInput, - dataKind == reflect.Array && d.config.WeaklyTypedInput: - dataType := dataVal.Type() - elemKind := dataType.Elem().Kind() - switch elemKind { - case reflect.Uint8: - var uints []uint8 - if dataKind == reflect.Array { - uints = make([]uint8, dataVal.Len(), dataVal.Len()) - for i := range uints { - uints[i] = dataVal.Index(i).Interface().(uint8) - } - } else { - uints = dataVal.Interface().([]uint8) - } - val.SetString(string(uints)) - default: - converted = false - } - default: - converted = false - } - - if !converted { - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", - name, val.Type(), dataVal.Type(), data) - } - - return nil -} - -func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - dataKind := getKind(dataVal) - dataType := dataVal.Type() - - switch { - case dataKind == reflect.Int: - val.SetInt(dataVal.Int()) - case dataKind == reflect.Uint: - val.SetInt(int64(dataVal.Uint())) - case dataKind == reflect.Float32: - val.SetInt(int64(dataVal.Float())) - case dataKind == reflect.Bool && d.config.WeaklyTypedInput: - if dataVal.Bool() { - val.SetInt(1) - } else { - val.SetInt(0) - } - case dataKind == reflect.String && d.config.WeaklyTypedInput: - str := dataVal.String() - if str == "" { - str = "0" - } - - i, err := strconv.ParseInt(str, 0, val.Type().Bits()) - if err == nil { - val.SetInt(i) - } else { - return fmt.Errorf("cannot parse '%s' as int: %s", name, err) - } - case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": - jn := data.(json.Number) - i, err := jn.Int64() - if err != nil { - return fmt.Errorf( - "error decoding json.Number into %s: %s", name, err) - } - val.SetInt(i) - default: - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", - name, val.Type(), dataVal.Type(), data) - } - - return nil -} - -func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - dataKind := getKind(dataVal) - dataType := dataVal.Type() - - switch { - case dataKind == reflect.Int: - i := dataVal.Int() - if i < 0 && !d.config.WeaklyTypedInput { - return fmt.Errorf("cannot parse '%s', %d overflows uint", - name, i) - } - val.SetUint(uint64(i)) - case dataKind == reflect.Uint: - val.SetUint(dataVal.Uint()) - case dataKind == reflect.Float32: - f := dataVal.Float() - if f < 0 && !d.config.WeaklyTypedInput { - return fmt.Errorf("cannot parse '%s', %f overflows uint", - name, f) - } - val.SetUint(uint64(f)) - case dataKind == reflect.Bool && d.config.WeaklyTypedInput: - if dataVal.Bool() { - val.SetUint(1) - } else { - val.SetUint(0) - } - case dataKind == reflect.String && d.config.WeaklyTypedInput: - str := dataVal.String() - if str == "" { - str = "0" - } - - i, err := strconv.ParseUint(str, 0, val.Type().Bits()) - if err == nil { - val.SetUint(i) - } else { - return fmt.Errorf("cannot parse '%s' as uint: %s", name, err) - } - case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": - jn := data.(json.Number) - i, err := strconv.ParseUint(string(jn), 0, 64) - if err != nil { - return fmt.Errorf( - "error decoding json.Number into %s: %s", name, err) - } - val.SetUint(i) - default: - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", - name, val.Type(), dataVal.Type(), data) - } - - return nil -} - -func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - dataKind := getKind(dataVal) - - switch { - case dataKind == reflect.Bool: - val.SetBool(dataVal.Bool()) - case dataKind == reflect.Int && d.config.WeaklyTypedInput: - val.SetBool(dataVal.Int() != 0) - case dataKind == reflect.Uint && d.config.WeaklyTypedInput: - val.SetBool(dataVal.Uint() != 0) - case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: - val.SetBool(dataVal.Float() != 0) - case dataKind == reflect.String && d.config.WeaklyTypedInput: - b, err := strconv.ParseBool(dataVal.String()) - if err == nil { - val.SetBool(b) - } else if dataVal.String() == "" { - val.SetBool(false) - } else { - return fmt.Errorf("cannot parse '%s' as bool: %s", name, err) - } - default: - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", - name, val.Type(), dataVal.Type(), data) - } - - return nil -} - -func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - dataKind := getKind(dataVal) - dataType := dataVal.Type() - - switch { - case dataKind == reflect.Int: - val.SetFloat(float64(dataVal.Int())) - case dataKind == reflect.Uint: - val.SetFloat(float64(dataVal.Uint())) - case dataKind == reflect.Float32: - val.SetFloat(dataVal.Float()) - case dataKind == reflect.Bool && d.config.WeaklyTypedInput: - if dataVal.Bool() { - val.SetFloat(1) - } else { - val.SetFloat(0) - } - case dataKind == reflect.String && d.config.WeaklyTypedInput: - str := dataVal.String() - if str == "" { - str = "0" - } - - f, err := strconv.ParseFloat(str, val.Type().Bits()) - if err == nil { - val.SetFloat(f) - } else { - return fmt.Errorf("cannot parse '%s' as float: %s", name, err) - } - case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": - jn := data.(json.Number) - i, err := jn.Float64() - if err != nil { - return fmt.Errorf( - "error decoding json.Number into %s: %s", name, err) - } - val.SetFloat(i) - default: - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", - name, val.Type(), dataVal.Type(), data) - } - - return nil -} - -func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) error { - valType := val.Type() - valKeyType := valType.Key() - valElemType := valType.Elem() - - // By default we overwrite keys in the current map - valMap := val - - // If the map is nil or we're purposely zeroing fields, make a new map - if valMap.IsNil() || d.config.ZeroFields { - // Make a new map to hold our result - mapType := reflect.MapOf(valKeyType, valElemType) - valMap = reflect.MakeMap(mapType) - } - - // Check input type and based on the input type jump to the proper func - dataVal := reflect.Indirect(reflect.ValueOf(data)) - switch dataVal.Kind() { - case reflect.Map: - return d.decodeMapFromMap(name, dataVal, val, valMap) - - case reflect.Struct: - return d.decodeMapFromStruct(name, dataVal, val, valMap) - - case reflect.Array, reflect.Slice: - if d.config.WeaklyTypedInput { - return d.decodeMapFromSlice(name, dataVal, val, valMap) - } - - fallthrough - - default: - return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind()) - } -} - -func (d *Decoder) decodeMapFromSlice(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { - // Special case for BC reasons (covered by tests) - if dataVal.Len() == 0 { - val.Set(valMap) - return nil - } - - for i := 0; i < dataVal.Len(); i++ { - err := d.decode( - name+"["+strconv.Itoa(i)+"]", - dataVal.Index(i).Interface(), val) - if err != nil { - return err - } - } - - return nil -} - -func (d *Decoder) decodeMapFromMap(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { - valType := val.Type() - valKeyType := valType.Key() - valElemType := valType.Elem() - - // Accumulate errors - errors := make([]string, 0) - - // If the input data is empty, then we just match what the input data is. - if dataVal.Len() == 0 { - if dataVal.IsNil() { - if !val.IsNil() { - val.Set(dataVal) - } - } else { - // Set to empty allocated value - val.Set(valMap) - } - - return nil - } - - for _, k := range dataVal.MapKeys() { - fieldName := name + "[" + k.String() + "]" - - // First decode the key into the proper type - currentKey := reflect.Indirect(reflect.New(valKeyType)) - if err := d.decode(fieldName, k.Interface(), currentKey); err != nil { - errors = appendErrors(errors, err) - continue - } - - // Next decode the data into the proper type - v := dataVal.MapIndex(k).Interface() - currentVal := reflect.Indirect(reflect.New(valElemType)) - if err := d.decode(fieldName, v, currentVal); err != nil { - errors = appendErrors(errors, err) - continue - } - - valMap.SetMapIndex(currentKey, currentVal) - } - - // Set the built up map to the value - val.Set(valMap) - - // If we had errors, return those - if len(errors) > 0 { - return &Error{errors} - } - - return nil -} - -func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { - typ := dataVal.Type() - for i := 0; i < typ.NumField(); i++ { - // Get the StructField first since this is a cheap operation. If the - // field is unexported, then ignore it. - f := typ.Field(i) - if f.PkgPath != "" { - continue - } - - // Next get the actual value of this field and verify it is assignable - // to the map value. - v := dataVal.Field(i) - if !v.Type().AssignableTo(valMap.Type().Elem()) { - return fmt.Errorf("cannot assign type '%s' to map value field of type '%s'", v.Type(), valMap.Type().Elem()) - } - - tagValue := f.Tag.Get(d.config.TagName) - keyName := f.Name - - if tagValue == "" && d.config.IgnoreUntaggedFields { - continue - } - - // If Squash is set in the config, we squash the field down. - squash := d.config.Squash && v.Kind() == reflect.Struct && f.Anonymous - - v = dereferencePtrToStructIfNeeded(v, d.config.TagName) - - // Determine the name of the key in the map - if index := strings.Index(tagValue, ","); index != -1 { - if tagValue[:index] == "-" { - continue - } - // If "omitempty" is specified in the tag, it ignores empty values. - if strings.Index(tagValue[index+1:], "omitempty") != -1 && isEmptyValue(v) { - continue - } - - // If "squash" is specified in the tag, we squash the field down. - squash = squash || strings.Index(tagValue[index+1:], "squash") != -1 - if squash { - // When squashing, the embedded type can be a pointer to a struct. - if v.Kind() == reflect.Ptr && v.Elem().Kind() == reflect.Struct { - v = v.Elem() - } - - // The final type must be a struct - if v.Kind() != reflect.Struct { - return fmt.Errorf("cannot squash non-struct type '%s'", v.Type()) - } - } - if keyNameTagValue := tagValue[:index]; keyNameTagValue != "" { - keyName = keyNameTagValue - } - } else if len(tagValue) > 0 { - if tagValue == "-" { - continue - } - keyName = tagValue - } - - switch v.Kind() { - // this is an embedded struct, so handle it differently - case reflect.Struct: - x := reflect.New(v.Type()) - x.Elem().Set(v) - - vType := valMap.Type() - vKeyType := vType.Key() - vElemType := vType.Elem() - mType := reflect.MapOf(vKeyType, vElemType) - vMap := reflect.MakeMap(mType) - - // Creating a pointer to a map so that other methods can completely - // overwrite the map if need be (looking at you decodeMapFromMap). The - // indirection allows the underlying map to be settable (CanSet() == true) - // where as reflect.MakeMap returns an unsettable map. - addrVal := reflect.New(vMap.Type()) - reflect.Indirect(addrVal).Set(vMap) - - err := d.decode(keyName, x.Interface(), reflect.Indirect(addrVal)) - if err != nil { - return err - } - - // the underlying map may have been completely overwritten so pull - // it indirectly out of the enclosing value. - vMap = reflect.Indirect(addrVal) - - if squash { - for _, k := range vMap.MapKeys() { - valMap.SetMapIndex(k, vMap.MapIndex(k)) - } - } else { - valMap.SetMapIndex(reflect.ValueOf(keyName), vMap) - } - - default: - valMap.SetMapIndex(reflect.ValueOf(keyName), v) - } - } - - if val.CanAddr() { - val.Set(valMap) - } - - return nil -} - -func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) (bool, error) { - // If the input data is nil, then we want to just set the output - // pointer to be nil as well. - isNil := data == nil - if !isNil { - switch v := reflect.Indirect(reflect.ValueOf(data)); v.Kind() { - case reflect.Chan, - reflect.Func, - reflect.Interface, - reflect.Map, - reflect.Ptr, - reflect.Slice: - isNil = v.IsNil() - } - } - if isNil { - if !val.IsNil() && val.CanSet() { - nilValue := reflect.New(val.Type()).Elem() - val.Set(nilValue) - } - - return true, nil - } - - // Create an element of the concrete (non pointer) type and decode - // into that. Then set the value of the pointer to this type. - valType := val.Type() - valElemType := valType.Elem() - if val.CanSet() { - realVal := val - if realVal.IsNil() || d.config.ZeroFields { - realVal = reflect.New(valElemType) - } - - if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil { - return false, err - } - - val.Set(realVal) - } else { - if err := d.decode(name, data, reflect.Indirect(val)); err != nil { - return false, err - } - } - return false, nil -} - -func (d *Decoder) decodeFunc(name string, data interface{}, val reflect.Value) error { - // Create an element of the concrete (non pointer) type and decode - // into that. Then set the value of the pointer to this type. - dataVal := reflect.Indirect(reflect.ValueOf(data)) - if val.Type() != dataVal.Type() { - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", - name, val.Type(), dataVal.Type(), data) - } - val.Set(dataVal) - return nil -} - -func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - dataValKind := dataVal.Kind() - valType := val.Type() - valElemType := valType.Elem() - sliceType := reflect.SliceOf(valElemType) - - // If we have a non array/slice type then we first attempt to convert. - if dataValKind != reflect.Array && dataValKind != reflect.Slice { - if d.config.WeaklyTypedInput { - switch { - // Slice and array we use the normal logic - case dataValKind == reflect.Slice, dataValKind == reflect.Array: - break - - // Empty maps turn into empty slices - case dataValKind == reflect.Map: - if dataVal.Len() == 0 { - val.Set(reflect.MakeSlice(sliceType, 0, 0)) - return nil - } - // Create slice of maps of other sizes - return d.decodeSlice(name, []interface{}{data}, val) - - case dataValKind == reflect.String && valElemType.Kind() == reflect.Uint8: - return d.decodeSlice(name, []byte(dataVal.String()), val) - - // All other types we try to convert to the slice type - // and "lift" it into it. i.e. a string becomes a string slice. - default: - // Just re-try this function with data as a slice. - return d.decodeSlice(name, []interface{}{data}, val) - } - } - - return fmt.Errorf( - "'%s': source data must be an array or slice, got %s", name, dataValKind) - } - - // If the input value is nil, then don't allocate since empty != nil - if dataValKind != reflect.Array && dataVal.IsNil() { - return nil - } - - valSlice := val - if valSlice.IsNil() || d.config.ZeroFields { - // Make a new slice to hold our result, same size as the original data. - valSlice = reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len()) - } - - // Accumulate any errors - errors := make([]string, 0) - - for i := 0; i < dataVal.Len(); i++ { - currentData := dataVal.Index(i).Interface() - for valSlice.Len() <= i { - valSlice = reflect.Append(valSlice, reflect.Zero(valElemType)) - } - currentField := valSlice.Index(i) - - fieldName := name + "[" + strconv.Itoa(i) + "]" - if err := d.decode(fieldName, currentData, currentField); err != nil { - errors = appendErrors(errors, err) - } - } - - // Finally, set the value to the slice we built up - val.Set(valSlice) - - // If there were errors, we return those - if len(errors) > 0 { - return &Error{errors} - } - - return nil -} - -func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - dataValKind := dataVal.Kind() - valType := val.Type() - valElemType := valType.Elem() - arrayType := reflect.ArrayOf(valType.Len(), valElemType) - - valArray := val - - if valArray.Interface() == reflect.Zero(valArray.Type()).Interface() || d.config.ZeroFields { - // Check input type - if dataValKind != reflect.Array && dataValKind != reflect.Slice { - if d.config.WeaklyTypedInput { - switch { - // Empty maps turn into empty arrays - case dataValKind == reflect.Map: - if dataVal.Len() == 0 { - val.Set(reflect.Zero(arrayType)) - return nil - } - - // All other types we try to convert to the array type - // and "lift" it into it. i.e. a string becomes a string array. - default: - // Just re-try this function with data as a slice. - return d.decodeArray(name, []interface{}{data}, val) - } - } - - return fmt.Errorf( - "'%s': source data must be an array or slice, got %s", name, dataValKind) - - } - if dataVal.Len() > arrayType.Len() { - return fmt.Errorf( - "'%s': expected source data to have length less or equal to %d, got %d", name, arrayType.Len(), dataVal.Len()) - - } - - // Make a new array to hold our result, same size as the original data. - valArray = reflect.New(arrayType).Elem() - } - - // Accumulate any errors - errors := make([]string, 0) - - for i := 0; i < dataVal.Len(); i++ { - currentData := dataVal.Index(i).Interface() - currentField := valArray.Index(i) - - fieldName := name + "[" + strconv.Itoa(i) + "]" - if err := d.decode(fieldName, currentData, currentField); err != nil { - errors = appendErrors(errors, err) - } - } - - // Finally, set the value to the array we built up - val.Set(valArray) - - // If there were errors, we return those - if len(errors) > 0 { - return &Error{errors} - } - - return nil -} - -func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - - // If the type of the value to write to and the data match directly, - // then we just set it directly instead of recursing into the structure. - if dataVal.Type() == val.Type() { - val.Set(dataVal) - return nil - } - - dataValKind := dataVal.Kind() - switch dataValKind { - case reflect.Map: - return d.decodeStructFromMap(name, dataVal, val) - - case reflect.Struct: - // Not the most efficient way to do this but we can optimize later if - // we want to. To convert from struct to struct we go to map first - // as an intermediary. - - // Make a new map to hold our result - mapType := reflect.TypeOf((map[string]interface{})(nil)) - mval := reflect.MakeMap(mapType) - - // Creating a pointer to a map so that other methods can completely - // overwrite the map if need be (looking at you decodeMapFromMap). The - // indirection allows the underlying map to be settable (CanSet() == true) - // where as reflect.MakeMap returns an unsettable map. - addrVal := reflect.New(mval.Type()) - - reflect.Indirect(addrVal).Set(mval) - if err := d.decodeMapFromStruct(name, dataVal, reflect.Indirect(addrVal), mval); err != nil { - return err - } - - result := d.decodeStructFromMap(name, reflect.Indirect(addrVal), val) - return result - - default: - return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind()) - } -} - -func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) error { - dataValType := dataVal.Type() - if kind := dataValType.Key().Kind(); kind != reflect.String && kind != reflect.Interface { - return fmt.Errorf( - "'%s' needs a map with string keys, has '%s' keys", - name, dataValType.Key().Kind()) - } - - dataValKeys := make(map[reflect.Value]struct{}) - dataValKeysUnused := make(map[interface{}]struct{}) - for _, dataValKey := range dataVal.MapKeys() { - dataValKeys[dataValKey] = struct{}{} - dataValKeysUnused[dataValKey.Interface()] = struct{}{} - } - - targetValKeysUnused := make(map[interface{}]struct{}) - errors := make([]string, 0) - - // This slice will keep track of all the structs we'll be decoding. - // There can be more than one struct if there are embedded structs - // that are squashed. - structs := make([]reflect.Value, 1, 5) - structs[0] = val - - // Compile the list of all the fields that we're going to be decoding - // from all the structs. - type field struct { - field reflect.StructField - val reflect.Value - } - - // remainField is set to a valid field set with the "remain" tag if - // we are keeping track of remaining values. - var remainField *field - - fields := []field{} - for len(structs) > 0 { - structVal := structs[0] - structs = structs[1:] - - structType := structVal.Type() - - for i := 0; i < structType.NumField(); i++ { - fieldType := structType.Field(i) - fieldVal := structVal.Field(i) - if fieldVal.Kind() == reflect.Ptr && fieldVal.Elem().Kind() == reflect.Struct { - // Handle embedded struct pointers as embedded structs. - fieldVal = fieldVal.Elem() - } - - // If "squash" is specified in the tag, we squash the field down. - squash := d.config.Squash && fieldVal.Kind() == reflect.Struct && fieldType.Anonymous - remain := false - - // We always parse the tags cause we're looking for other tags too - tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",") - for _, tag := range tagParts[1:] { - if tag == "squash" { - squash = true - break - } - - if tag == "remain" { - remain = true - break - } - } - - if squash { - if fieldVal.Kind() != reflect.Struct { - errors = appendErrors(errors, - fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldVal.Kind())) - } else { - structs = append(structs, fieldVal) - } - continue - } - - // Build our field - if remain { - remainField = &field{fieldType, fieldVal} - } else { - // Normal struct field, store it away - fields = append(fields, field{fieldType, fieldVal}) - } - } - } - - // for fieldType, field := range fields { - for _, f := range fields { - field, fieldValue := f.field, f.val - fieldName := field.Name - - tagValue := field.Tag.Get(d.config.TagName) - tagValue = strings.SplitN(tagValue, ",", 2)[0] - if tagValue != "" { - fieldName = tagValue - } - - rawMapKey := reflect.ValueOf(fieldName) - rawMapVal := dataVal.MapIndex(rawMapKey) - if !rawMapVal.IsValid() { - // Do a slower search by iterating over each key and - // doing case-insensitive search. - for dataValKey := range dataValKeys { - mK, ok := dataValKey.Interface().(string) - if !ok { - // Not a string key - continue - } - - if d.config.MatchName(mK, fieldName) { - rawMapKey = dataValKey - rawMapVal = dataVal.MapIndex(dataValKey) - break - } - } - - if !rawMapVal.IsValid() { - // There was no matching key in the map for the value in - // the struct. Remember it for potential errors and metadata. - targetValKeysUnused[fieldName] = struct{}{} - continue - } - } - - if !fieldValue.IsValid() { - // This should never happen - panic("field is not valid") - } - - // If we can't set the field, then it is unexported or something, - // and we just continue onwards. - if !fieldValue.CanSet() { - continue - } - - // Delete the key we're using from the unused map so we stop tracking - delete(dataValKeysUnused, rawMapKey.Interface()) - - // If the name is empty string, then we're at the root, and we - // don't dot-join the fields. - if name != "" { - fieldName = name + "." + fieldName - } - - if err := d.decode(fieldName, rawMapVal.Interface(), fieldValue); err != nil { - errors = appendErrors(errors, err) - } - } - - // If we have a "remain"-tagged field and we have unused keys then - // we put the unused keys directly into the remain field. - if remainField != nil && len(dataValKeysUnused) > 0 { - // Build a map of only the unused values - remain := map[interface{}]interface{}{} - for key := range dataValKeysUnused { - remain[key] = dataVal.MapIndex(reflect.ValueOf(key)).Interface() - } - - // Decode it as-if we were just decoding this map onto our map. - if err := d.decodeMap(name, remain, remainField.val); err != nil { - errors = appendErrors(errors, err) - } - - // Set the map to nil so we have none so that the next check will - // not error (ErrorUnused) - dataValKeysUnused = nil - } - - if d.config.ErrorUnused && len(dataValKeysUnused) > 0 { - keys := make([]string, 0, len(dataValKeysUnused)) - for rawKey := range dataValKeysUnused { - keys = append(keys, rawKey.(string)) - } - sort.Strings(keys) - - err := fmt.Errorf("'%s' has invalid keys: %s", name, strings.Join(keys, ", ")) - errors = appendErrors(errors, err) - } - - if d.config.ErrorUnset && len(targetValKeysUnused) > 0 { - keys := make([]string, 0, len(targetValKeysUnused)) - for rawKey := range targetValKeysUnused { - keys = append(keys, rawKey.(string)) - } - sort.Strings(keys) - - err := fmt.Errorf("'%s' has unset fields: %s", name, strings.Join(keys, ", ")) - errors = appendErrors(errors, err) - } - - if len(errors) > 0 { - return &Error{errors} - } - - // Add the unused keys to the list of unused keys if we're tracking metadata - if d.config.Metadata != nil { - for rawKey := range dataValKeysUnused { - key := rawKey.(string) - if name != "" { - key = name + "." + key - } - - d.config.Metadata.Unused = append(d.config.Metadata.Unused, key) - } - for rawKey := range targetValKeysUnused { - key := rawKey.(string) - if name != "" { - key = name + "." + key - } - - d.config.Metadata.Unset = append(d.config.Metadata.Unset, key) - } - } - - return nil -} - -func isEmptyValue(v reflect.Value) bool { - switch getKind(v) { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - } - return false -} - -func getKind(val reflect.Value) reflect.Kind { - kind := val.Kind() - - switch { - case kind >= reflect.Int && kind <= reflect.Int64: - return reflect.Int - case kind >= reflect.Uint && kind <= reflect.Uint64: - return reflect.Uint - case kind >= reflect.Float32 && kind <= reflect.Float64: - return reflect.Float32 - default: - return kind - } -} - -func isStructTypeConvertibleToMap(typ reflect.Type, checkMapstructureTags bool, tagName string) bool { - for i := 0; i < typ.NumField(); i++ { - f := typ.Field(i) - if f.PkgPath == "" && !checkMapstructureTags { // check for unexported fields - return true - } - if checkMapstructureTags && f.Tag.Get(tagName) != "" { // check for mapstructure tags inside - return true - } - } - return false -} - -func dereferencePtrToStructIfNeeded(v reflect.Value, tagName string) reflect.Value { - if v.Kind() != reflect.Ptr || v.Elem().Kind() != reflect.Struct { - return v - } - deref := v.Elem() - derefT := deref.Type() - if isStructTypeConvertibleToMap(derefT, true, tagName) { - return deref - } - return v -} diff --git a/vendor/github.com/moby/sys/sequential/sequential_unix.go b/vendor/github.com/moby/sys/sequential/sequential_unix.go index a3c7340e3a..278cdfb077 100644 --- a/vendor/github.com/moby/sys/sequential/sequential_unix.go +++ b/vendor/github.com/moby/sys/sequential/sequential_unix.go @@ -5,41 +5,22 @@ package sequential import "os" -// Create creates the named file with mode 0666 (before umask), truncating -// it if it already exists. If successful, methods on the returned -// File can be used for I/O; the associated file descriptor has mode -// O_RDWR. -// If there is an error, it will be of type *PathError. +// Create is an alias for [os.Create] on non-Windows platforms. func Create(name string) (*os.File, error) { return os.Create(name) } -// Open opens the named file for reading. If successful, methods on -// the returned file can be used for reading; the associated file -// descriptor has mode O_RDONLY. -// If there is an error, it will be of type *PathError. +// Open is an alias for [os.Open] on non-Windows platforms. func Open(name string) (*os.File, error) { return os.Open(name) } -// OpenFile is the generalized open call; most users will use Open -// or Create instead. It opens the named file with specified flag -// (O_RDONLY etc.) and perm, (0666 etc.) if applicable. If successful, -// methods on the returned File can be used for I/O. -// If there is an error, it will be of type *PathError. +// OpenFile is an alias for [os.OpenFile] on non-Windows platforms. func OpenFile(name string, flag int, perm os.FileMode) (*os.File, error) { return os.OpenFile(name, flag, perm) } -// CreateTemp creates a new temporary file in the directory dir -// with a name beginning with prefix, opens the file for reading -// and writing, and returns the resulting *os.File. -// If dir is the empty string, TempFile uses the default directory -// for temporary files (see os.TempDir). -// Multiple programs calling TempFile simultaneously -// will not choose the same file. The caller can use f.Name() -// to find the pathname of the file. It is the caller's responsibility -// to remove the file when no longer needed. +// CreateTemp is an alias for [os.CreateTemp] on non-Windows platforms. func CreateTemp(dir, prefix string) (f *os.File, err error) { return os.CreateTemp(dir, prefix) } diff --git a/vendor/github.com/moby/sys/sequential/sequential_windows.go b/vendor/github.com/moby/sys/sequential/sequential_windows.go index 3f7f0d83e0..3500ecc689 100644 --- a/vendor/github.com/moby/sys/sequential/sequential_windows.go +++ b/vendor/github.com/moby/sys/sequential/sequential_windows.go @@ -5,48 +5,52 @@ import ( "path/filepath" "strconv" "sync" - "syscall" "time" "unsafe" "golang.org/x/sys/windows" ) -// Create creates the named file with mode 0666 (before umask), truncating -// it if it already exists. If successful, methods on the returned -// File can be used for I/O; the associated file descriptor has mode -// O_RDWR. -// If there is an error, it will be of type *PathError. +// Create is a copy of [os.Create], modified to use sequential file access. +// +// It uses [windows.FILE_FLAG_SEQUENTIAL_SCAN] rather than [windows.FILE_ATTRIBUTE_NORMAL] +// as implemented in golang. Refer to the [Win32 API documentation] for details +// on sequential file access. +// +// [Win32 API documentation]: https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilea#FILE_FLAG_SEQUENTIAL_SCAN func Create(name string) (*os.File, error) { - return OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0) + return openFileSequential(name, windows.O_RDWR|windows.O_CREAT|windows.O_TRUNC) } -// Open opens the named file for reading. If successful, methods on -// the returned file can be used for reading; the associated file -// descriptor has mode O_RDONLY. -// If there is an error, it will be of type *PathError. +// Open is a copy of [os.Open], modified to use sequential file access. +// +// It uses [windows.FILE_FLAG_SEQUENTIAL_SCAN] rather than [windows.FILE_ATTRIBUTE_NORMAL] +// as implemented in golang. Refer to the [Win32 API documentation] for details +// on sequential file access. +// +// [Win32 API documentation]: https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilea#FILE_FLAG_SEQUENTIAL_SCAN func Open(name string) (*os.File, error) { - return OpenFile(name, os.O_RDONLY, 0) + return openFileSequential(name, windows.O_RDONLY) } -// OpenFile is the generalized open call; most users will use Open -// or Create instead. -// If there is an error, it will be of type *PathError. +// OpenFile is a copy of [os.OpenFile], modified to use sequential file access. +// +// It uses [windows.FILE_FLAG_SEQUENTIAL_SCAN] rather than [windows.FILE_ATTRIBUTE_NORMAL] +// as implemented in golang. Refer to the [Win32 API documentation] for details +// on sequential file access. +// +// [Win32 API documentation]: https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilea#FILE_FLAG_SEQUENTIAL_SCAN func OpenFile(name string, flag int, _ os.FileMode) (*os.File, error) { - if name == "" { - return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOENT} - } - r, err := openFileSequential(name, flag, 0) - if err == nil { - return r, nil - } - return nil, &os.PathError{Op: "open", Path: name, Err: err} + return openFileSequential(name, flag) } -func openFileSequential(name string, flag int, _ os.FileMode) (file *os.File, err error) { - r, e := openSequential(name, flag|windows.O_CLOEXEC, 0) +func openFileSequential(name string, flag int) (file *os.File, err error) { + if name == "" { + return nil, &os.PathError{Op: "open", Path: name, Err: windows.ERROR_FILE_NOT_FOUND} + } + r, e := openSequential(name, flag|windows.O_CLOEXEC) if e != nil { - return nil, e + return nil, &os.PathError{Op: "open", Path: name, Err: e} } return os.NewFile(uintptr(r), name), nil } @@ -58,7 +62,7 @@ func makeInheritSa() *windows.SecurityAttributes { return &sa } -func openSequential(path string, mode int, _ uint32) (fd windows.Handle, err error) { +func openSequential(path string, mode int) (fd windows.Handle, err error) { if len(path) == 0 { return windows.InvalidHandle, windows.ERROR_FILE_NOT_FOUND } @@ -101,15 +105,16 @@ func openSequential(path string, mode int, _ uint32) (fd windows.Handle, err err createmode = windows.OPEN_EXISTING } // Use FILE_FLAG_SEQUENTIAL_SCAN rather than FILE_ATTRIBUTE_NORMAL as implemented in golang. - // https://msdn.microsoft.com/en-us/library/windows/desktop/aa363858(v=vs.85).aspx - const fileFlagSequentialScan = 0x08000000 // FILE_FLAG_SEQUENTIAL_SCAN - h, e := windows.CreateFile(pathp, access, sharemode, sa, createmode, fileFlagSequentialScan, 0) + // https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilea#FILE_FLAG_SEQUENTIAL_SCAN + h, e := windows.CreateFile(pathp, access, sharemode, sa, createmode, windows.FILE_FLAG_SEQUENTIAL_SCAN, 0) return h, e } // Helpers for CreateTemp -var rand uint32 -var randmu sync.Mutex +var ( + rand uint32 + randmu sync.Mutex +) func reseed() uint32 { return uint32(time.Now().UnixNano() + int64(os.Getpid())) @@ -127,17 +132,13 @@ func nextSuffix() string { return strconv.Itoa(int(1e9 + r%1e9))[1:] } -// CreateTemp is a copy of os.CreateTemp, modified to use sequential -// file access. Below is the original comment from golang: -// TempFile creates a new temporary file in the directory dir -// with a name beginning with prefix, opens the file for reading -// and writing, and returns the resulting *os.File. -// If dir is the empty string, TempFile uses the default directory -// for temporary files (see os.TempDir). -// Multiple programs calling TempFile simultaneously -// will not choose the same file. The caller can use f.Name() -// to find the pathname of the file. It is the caller's responsibility -// to remove the file when no longer needed. +// CreateTemp is a copy of [os.CreateTemp], modified to use sequential file access. +// +// It uses [windows.FILE_FLAG_SEQUENTIAL_SCAN] rather than [windows.FILE_ATTRIBUTE_NORMAL] +// as implemented in golang. Refer to the [Win32 API documentation] for details +// on sequential file access. +// +// [Win32 API documentation]: https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilea#FILE_FLAG_SEQUENTIAL_SCAN func CreateTemp(dir, prefix string) (f *os.File, err error) { if dir == "" { dir = os.TempDir() @@ -146,7 +147,7 @@ func CreateTemp(dir, prefix string) (f *os.File, err error) { nconflict := 0 for i := 0; i < 10000; i++ { name := filepath.Join(dir, prefix+nextSuffix()) - f, err = OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0o600) + f, err = openFileSequential(name, windows.O_RDWR|windows.O_CREAT|windows.O_EXCL) if os.IsExist(err) { if nconflict++; nconflict > 10 { randmu.Lock() diff --git a/vendor/github.com/oklog/ulid/.gitignore b/vendor/github.com/oklog/ulid/.gitignore deleted file mode 100644 index c92c4d5608..0000000000 --- a/vendor/github.com/oklog/ulid/.gitignore +++ /dev/null @@ -1,29 +0,0 @@ -#### joe made this: http://goel.io/joe - -#####=== Go ===##### - -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof - diff --git a/vendor/github.com/oklog/ulid/.travis.yml b/vendor/github.com/oklog/ulid/.travis.yml deleted file mode 100644 index 43eb762fa3..0000000000 --- a/vendor/github.com/oklog/ulid/.travis.yml +++ /dev/null @@ -1,16 +0,0 @@ -language: go -sudo: false -go: - - 1.10.x -install: - - go get -v github.com/golang/lint/golint - - go get golang.org/x/tools/cmd/cover - - go get github.com/mattn/goveralls - - go get -d -t -v ./... - - go build -v ./... -script: - - go vet ./... - - $HOME/gopath/bin/golint . - - go test -v -race ./... - - go test -v -covermode=count -coverprofile=cov.out - - $HOME/gopath/bin/goveralls -coverprofile=cov.out -service=travis-ci -repotoken "$COVERALLS_TOKEN" || true diff --git a/vendor/github.com/oklog/ulid/AUTHORS.md b/vendor/github.com/oklog/ulid/AUTHORS.md deleted file mode 100644 index 95581c78b0..0000000000 --- a/vendor/github.com/oklog/ulid/AUTHORS.md +++ /dev/null @@ -1,2 +0,0 @@ -- Peter Bourgon (@peterbourgon) -- Tomás Senart (@tsenart) diff --git a/vendor/github.com/oklog/ulid/CHANGELOG.md b/vendor/github.com/oklog/ulid/CHANGELOG.md deleted file mode 100644 index 8da38c6b00..0000000000 --- a/vendor/github.com/oklog/ulid/CHANGELOG.md +++ /dev/null @@ -1,33 +0,0 @@ -## 1.3.1 / 2018-10-02 - -* Use underlying entropy source for random increments in Monotonic (#32) - -## 1.3.0 / 2018-09-29 - -* Monotonic entropy support (#31) - -## 1.2.0 / 2018-09-09 - -* Add a function to convert Unix time in milliseconds back to time.Time (#30) - -## 1.1.0 / 2018-08-15 - -* Ensure random part is always read from the entropy reader in full (#28) - -## 1.0.0 / 2018-07-29 - -* Add ParseStrict and MustParseStrict functions (#26) -* Enforce overflow checking when parsing (#20) - -## 0.3.0 / 2017-01-03 - -* Implement ULID.Compare method - -## 0.2.0 / 2016-12-13 - -* Remove year 2262 Timestamp bug. (#1) -* Gracefully handle invalid encodings when parsing. - -## 0.1.0 / 2016-12-06 - -* First ULID release diff --git a/vendor/github.com/oklog/ulid/CONTRIBUTING.md b/vendor/github.com/oklog/ulid/CONTRIBUTING.md deleted file mode 100644 index 68f03f26eb..0000000000 --- a/vendor/github.com/oklog/ulid/CONTRIBUTING.md +++ /dev/null @@ -1,17 +0,0 @@ -# Contributing - -We use GitHub to manage reviews of pull requests. - -* If you have a trivial fix or improvement, go ahead and create a pull - request, addressing (with `@...`) one or more of the maintainers - (see [AUTHORS.md](AUTHORS.md)) in the description of the pull request. - -* If you plan to do something more involved, first propose your ideas - in a Github issue. This will avoid unnecessary work and surely give - you and us a good deal of inspiration. - -* Relevant coding style guidelines are the [Go Code Review - Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments) - and the _Formatting and style_ section of Peter Bourgon's [Go: Best - Practices for Production - Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style). diff --git a/vendor/github.com/oklog/ulid/Gopkg.lock b/vendor/github.com/oklog/ulid/Gopkg.lock deleted file mode 100644 index 349b449a6e..0000000000 --- a/vendor/github.com/oklog/ulid/Gopkg.lock +++ /dev/null @@ -1,15 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] - branch = "master" - name = "github.com/pborman/getopt" - packages = ["v2"] - revision = "7148bc3a4c3008adfcab60cbebfd0576018f330b" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - inputs-digest = "6779b05abd5cd429c5393641d2453005a3cb74a400d161b2b5c5d0ca2e10e116" - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/vendor/github.com/oklog/ulid/Gopkg.toml b/vendor/github.com/oklog/ulid/Gopkg.toml deleted file mode 100644 index 624a7a019c..0000000000 --- a/vendor/github.com/oklog/ulid/Gopkg.toml +++ /dev/null @@ -1,26 +0,0 @@ - -# Gopkg.toml example -# -# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md -# for detailed Gopkg.toml documentation. -# -# required = ["github.com/user/thing/cmd/thing"] -# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] -# -# [[constraint]] -# name = "github.com/user/project" -# version = "1.0.0" -# -# [[constraint]] -# name = "github.com/user/project2" -# branch = "dev" -# source = "github.com/myfork/project2" -# -# [[override]] -# name = "github.com/x/y" -# version = "2.4.0" - - -[[constraint]] - branch = "master" - name = "github.com/pborman/getopt" diff --git a/vendor/github.com/oklog/ulid/LICENSE b/vendor/github.com/oklog/ulid/LICENSE deleted file mode 100644 index 261eeb9e9f..0000000000 --- a/vendor/github.com/oklog/ulid/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/oklog/ulid/README.md b/vendor/github.com/oklog/ulid/README.md deleted file mode 100644 index 0a3d2f82b2..0000000000 --- a/vendor/github.com/oklog/ulid/README.md +++ /dev/null @@ -1,150 +0,0 @@ -# Universally Unique Lexicographically Sortable Identifier - -![Project status](https://img.shields.io/badge/version-1.3.0-yellow.svg) -[![Build Status](https://secure.travis-ci.org/oklog/ulid.png)](http://travis-ci.org/oklog/ulid) -[![Go Report Card](https://goreportcard.com/badge/oklog/ulid?cache=0)](https://goreportcard.com/report/oklog/ulid) -[![Coverage Status](https://coveralls.io/repos/github/oklog/ulid/badge.svg?branch=master&cache=0)](https://coveralls.io/github/oklog/ulid?branch=master) -[![GoDoc](https://godoc.org/github.com/oklog/ulid?status.svg)](https://godoc.org/github.com/oklog/ulid) -[![Apache 2 licensed](https://img.shields.io/badge/license-Apache2-blue.svg)](https://raw.githubusercontent.com/oklog/ulid/master/LICENSE) - -A Go port of [alizain/ulid](https://github.com/alizain/ulid) with binary format implemented. - -## Background - -A GUID/UUID can be suboptimal for many use-cases because: - -- It isn't the most character efficient way of encoding 128 bits -- UUID v1/v2 is impractical in many environments, as it requires access to a unique, stable MAC address -- UUID v3/v5 requires a unique seed and produces randomly distributed IDs, which can cause fragmentation in many data structures -- UUID v4 provides no other information than randomness which can cause fragmentation in many data structures - -A ULID however: - -- Is compatible with UUID/GUID's -- 1.21e+24 unique ULIDs per millisecond (1,208,925,819,614,629,174,706,176 to be exact) -- Lexicographically sortable -- Canonically encoded as a 26 character string, as opposed to the 36 character UUID -- Uses Crockford's base32 for better efficiency and readability (5 bits per character) -- Case insensitive -- No special characters (URL safe) -- Monotonic sort order (correctly detects and handles the same millisecond) - -## Install - -```shell -go get github.com/oklog/ulid -``` - -## Usage - -An ULID is constructed with a `time.Time` and an `io.Reader` entropy source. -This design allows for greater flexibility in choosing your trade-offs. - -Please note that `rand.Rand` from the `math` package is *not* safe for concurrent use. -Instantiate one per long living go-routine or use a `sync.Pool` if you want to avoid the potential contention of a locked `rand.Source` as its been frequently observed in the package level functions. - - -```go -func ExampleULID() { - t := time.Unix(1000000, 0) - entropy := ulid.Monotonic(rand.New(rand.NewSource(t.UnixNano())), 0) - fmt.Println(ulid.MustNew(ulid.Timestamp(t), entropy)) - // Output: 0000XSNJG0MQJHBF4QX1EFD6Y3 -} - -``` - -## Specification - -Below is the current specification of ULID as implemented in this repository. - -### Components - -**Timestamp** -- 48 bits -- UNIX-time in milliseconds -- Won't run out of space till the year 10895 AD - -**Entropy** -- 80 bits -- User defined entropy source. -- Monotonicity within the same millisecond with [`ulid.Monotonic`](https://godoc.org/github.com/oklog/ulid#Monotonic) - -### Encoding - -[Crockford's Base32](http://www.crockford.com/wrmg/base32.html) is used as shown. -This alphabet excludes the letters I, L, O, and U to avoid confusion and abuse. - -``` -0123456789ABCDEFGHJKMNPQRSTVWXYZ -``` - -### Binary Layout and Byte Order - -The components are encoded as 16 octets. Each component is encoded with the Most Significant Byte first (network byte order). - -``` -0 1 2 3 - 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 -+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -| 32_bit_uint_time_high | -+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -| 16_bit_uint_time_low | 16_bit_uint_random | -+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -| 32_bit_uint_random | -+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -| 32_bit_uint_random | -+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -``` - -### String Representation - -``` - 01AN4Z07BY 79KA1307SR9X4MV3 -|----------| |----------------| - Timestamp Entropy - 10 chars 16 chars - 48bits 80bits - base32 base32 -``` - -## Test - -```shell -go test ./... -``` - -## Benchmarks - -On a Intel Core i7 Ivy Bridge 2.7 GHz, MacOS 10.12.1 and Go 1.8.0beta1 - -``` -BenchmarkNew/WithCryptoEntropy-8 2000000 771 ns/op 20.73 MB/s 16 B/op 1 allocs/op -BenchmarkNew/WithEntropy-8 20000000 65.8 ns/op 243.01 MB/s 16 B/op 1 allocs/op -BenchmarkNew/WithoutEntropy-8 50000000 30.0 ns/op 534.06 MB/s 16 B/op 1 allocs/op -BenchmarkMustNew/WithCryptoEntropy-8 2000000 781 ns/op 20.48 MB/s 16 B/op 1 allocs/op -BenchmarkMustNew/WithEntropy-8 20000000 70.0 ns/op 228.51 MB/s 16 B/op 1 allocs/op -BenchmarkMustNew/WithoutEntropy-8 50000000 34.6 ns/op 462.98 MB/s 16 B/op 1 allocs/op -BenchmarkParse-8 50000000 30.0 ns/op 866.16 MB/s 0 B/op 0 allocs/op -BenchmarkMustParse-8 50000000 35.2 ns/op 738.94 MB/s 0 B/op 0 allocs/op -BenchmarkString-8 20000000 64.9 ns/op 246.40 MB/s 32 B/op 1 allocs/op -BenchmarkMarshal/Text-8 20000000 55.8 ns/op 286.84 MB/s 32 B/op 1 allocs/op -BenchmarkMarshal/TextTo-8 100000000 22.4 ns/op 714.91 MB/s 0 B/op 0 allocs/op -BenchmarkMarshal/Binary-8 300000000 4.02 ns/op 3981.77 MB/s 0 B/op 0 allocs/op -BenchmarkMarshal/BinaryTo-8 2000000000 1.18 ns/op 13551.75 MB/s 0 B/op 0 allocs/op -BenchmarkUnmarshal/Text-8 100000000 20.5 ns/op 1265.27 MB/s 0 B/op 0 allocs/op -BenchmarkUnmarshal/Binary-8 300000000 4.94 ns/op 3240.01 MB/s 0 B/op 0 allocs/op -BenchmarkNow-8 100000000 15.1 ns/op 528.09 MB/s 0 B/op 0 allocs/op -BenchmarkTimestamp-8 2000000000 0.29 ns/op 27271.59 MB/s 0 B/op 0 allocs/op -BenchmarkTime-8 2000000000 0.58 ns/op 13717.80 MB/s 0 B/op 0 allocs/op -BenchmarkSetTime-8 2000000000 0.89 ns/op 9023.95 MB/s 0 B/op 0 allocs/op -BenchmarkEntropy-8 200000000 7.62 ns/op 1311.66 MB/s 0 B/op 0 allocs/op -BenchmarkSetEntropy-8 2000000000 0.88 ns/op 11376.54 MB/s 0 B/op 0 allocs/op -BenchmarkCompare-8 200000000 7.34 ns/op 4359.23 MB/s 0 B/op 0 allocs/op -``` - -## Prior Art - -- [alizain/ulid](https://github.com/alizain/ulid) -- [RobThree/NUlid](https://github.com/RobThree/NUlid) -- [imdario/go-ulid](https://github.com/imdario/go-ulid) diff --git a/vendor/github.com/oklog/ulid/ulid.go b/vendor/github.com/oklog/ulid/ulid.go deleted file mode 100644 index c5d0d66fd2..0000000000 --- a/vendor/github.com/oklog/ulid/ulid.go +++ /dev/null @@ -1,614 +0,0 @@ -// Copyright 2016 The Oklog Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ulid - -import ( - "bufio" - "bytes" - "database/sql/driver" - "encoding/binary" - "errors" - "io" - "math" - "math/bits" - "math/rand" - "time" -) - -/* -An ULID is a 16 byte Universally Unique Lexicographically Sortable Identifier - - The components are encoded as 16 octets. - Each component is encoded with the MSB first (network byte order). - - 0 1 2 3 - 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | 32_bit_uint_time_high | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | 16_bit_uint_time_low | 16_bit_uint_random | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | 32_bit_uint_random | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | 32_bit_uint_random | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -*/ -type ULID [16]byte - -var ( - // ErrDataSize is returned when parsing or unmarshaling ULIDs with the wrong - // data size. - ErrDataSize = errors.New("ulid: bad data size when unmarshaling") - - // ErrInvalidCharacters is returned when parsing or unmarshaling ULIDs with - // invalid Base32 encodings. - ErrInvalidCharacters = errors.New("ulid: bad data characters when unmarshaling") - - // ErrBufferSize is returned when marshalling ULIDs to a buffer of insufficient - // size. - ErrBufferSize = errors.New("ulid: bad buffer size when marshaling") - - // ErrBigTime is returned when constructing an ULID with a time that is larger - // than MaxTime. - ErrBigTime = errors.New("ulid: time too big") - - // ErrOverflow is returned when unmarshaling a ULID whose first character is - // larger than 7, thereby exceeding the valid bit depth of 128. - ErrOverflow = errors.New("ulid: overflow when unmarshaling") - - // ErrMonotonicOverflow is returned by a Monotonic entropy source when - // incrementing the previous ULID's entropy bytes would result in overflow. - ErrMonotonicOverflow = errors.New("ulid: monotonic entropy overflow") - - // ErrScanValue is returned when the value passed to scan cannot be unmarshaled - // into the ULID. - ErrScanValue = errors.New("ulid: source value must be a string or byte slice") -) - -// New returns an ULID with the given Unix milliseconds timestamp and an -// optional entropy source. Use the Timestamp function to convert -// a time.Time to Unix milliseconds. -// -// ErrBigTime is returned when passing a timestamp bigger than MaxTime. -// Reading from the entropy source may also return an error. -func New(ms uint64, entropy io.Reader) (id ULID, err error) { - if err = id.SetTime(ms); err != nil { - return id, err - } - - switch e := entropy.(type) { - case nil: - return id, err - case *monotonic: - err = e.MonotonicRead(ms, id[6:]) - default: - _, err = io.ReadFull(e, id[6:]) - } - - return id, err -} - -// MustNew is a convenience function equivalent to New that panics on failure -// instead of returning an error. -func MustNew(ms uint64, entropy io.Reader) ULID { - id, err := New(ms, entropy) - if err != nil { - panic(err) - } - return id -} - -// Parse parses an encoded ULID, returning an error in case of failure. -// -// ErrDataSize is returned if the len(ulid) is different from an encoded -// ULID's length. Invalid encodings produce undefined ULIDs. For a version that -// returns an error instead, see ParseStrict. -func Parse(ulid string) (id ULID, err error) { - return id, parse([]byte(ulid), false, &id) -} - -// ParseStrict parses an encoded ULID, returning an error in case of failure. -// -// It is like Parse, but additionally validates that the parsed ULID consists -// only of valid base32 characters. It is slightly slower than Parse. -// -// ErrDataSize is returned if the len(ulid) is different from an encoded -// ULID's length. Invalid encodings return ErrInvalidCharacters. -func ParseStrict(ulid string) (id ULID, err error) { - return id, parse([]byte(ulid), true, &id) -} - -func parse(v []byte, strict bool, id *ULID) error { - // Check if a base32 encoded ULID is the right length. - if len(v) != EncodedSize { - return ErrDataSize - } - - // Check if all the characters in a base32 encoded ULID are part of the - // expected base32 character set. - if strict && - (dec[v[0]] == 0xFF || - dec[v[1]] == 0xFF || - dec[v[2]] == 0xFF || - dec[v[3]] == 0xFF || - dec[v[4]] == 0xFF || - dec[v[5]] == 0xFF || - dec[v[6]] == 0xFF || - dec[v[7]] == 0xFF || - dec[v[8]] == 0xFF || - dec[v[9]] == 0xFF || - dec[v[10]] == 0xFF || - dec[v[11]] == 0xFF || - dec[v[12]] == 0xFF || - dec[v[13]] == 0xFF || - dec[v[14]] == 0xFF || - dec[v[15]] == 0xFF || - dec[v[16]] == 0xFF || - dec[v[17]] == 0xFF || - dec[v[18]] == 0xFF || - dec[v[19]] == 0xFF || - dec[v[20]] == 0xFF || - dec[v[21]] == 0xFF || - dec[v[22]] == 0xFF || - dec[v[23]] == 0xFF || - dec[v[24]] == 0xFF || - dec[v[25]] == 0xFF) { - return ErrInvalidCharacters - } - - // Check if the first character in a base32 encoded ULID will overflow. This - // happens because the base32 representation encodes 130 bits, while the - // ULID is only 128 bits. - // - // See https://github.com/oklog/ulid/issues/9 for details. - if v[0] > '7' { - return ErrOverflow - } - - // Use an optimized unrolled loop (from https://github.com/RobThree/NUlid) - // to decode a base32 ULID. - - // 6 bytes timestamp (48 bits) - (*id)[0] = ((dec[v[0]] << 5) | dec[v[1]]) - (*id)[1] = ((dec[v[2]] << 3) | (dec[v[3]] >> 2)) - (*id)[2] = ((dec[v[3]] << 6) | (dec[v[4]] << 1) | (dec[v[5]] >> 4)) - (*id)[3] = ((dec[v[5]] << 4) | (dec[v[6]] >> 1)) - (*id)[4] = ((dec[v[6]] << 7) | (dec[v[7]] << 2) | (dec[v[8]] >> 3)) - (*id)[5] = ((dec[v[8]] << 5) | dec[v[9]]) - - // 10 bytes of entropy (80 bits) - (*id)[6] = ((dec[v[10]] << 3) | (dec[v[11]] >> 2)) - (*id)[7] = ((dec[v[11]] << 6) | (dec[v[12]] << 1) | (dec[v[13]] >> 4)) - (*id)[8] = ((dec[v[13]] << 4) | (dec[v[14]] >> 1)) - (*id)[9] = ((dec[v[14]] << 7) | (dec[v[15]] << 2) | (dec[v[16]] >> 3)) - (*id)[10] = ((dec[v[16]] << 5) | dec[v[17]]) - (*id)[11] = ((dec[v[18]] << 3) | dec[v[19]]>>2) - (*id)[12] = ((dec[v[19]] << 6) | (dec[v[20]] << 1) | (dec[v[21]] >> 4)) - (*id)[13] = ((dec[v[21]] << 4) | (dec[v[22]] >> 1)) - (*id)[14] = ((dec[v[22]] << 7) | (dec[v[23]] << 2) | (dec[v[24]] >> 3)) - (*id)[15] = ((dec[v[24]] << 5) | dec[v[25]]) - - return nil -} - -// MustParse is a convenience function equivalent to Parse that panics on failure -// instead of returning an error. -func MustParse(ulid string) ULID { - id, err := Parse(ulid) - if err != nil { - panic(err) - } - return id -} - -// MustParseStrict is a convenience function equivalent to ParseStrict that -// panics on failure instead of returning an error. -func MustParseStrict(ulid string) ULID { - id, err := ParseStrict(ulid) - if err != nil { - panic(err) - } - return id -} - -// String returns a lexicographically sortable string encoded ULID -// (26 characters, non-standard base 32) e.g. 01AN4Z07BY79KA1307SR9X4MV3 -// Format: tttttttttteeeeeeeeeeeeeeee where t is time and e is entropy -func (id ULID) String() string { - ulid := make([]byte, EncodedSize) - _ = id.MarshalTextTo(ulid) - return string(ulid) -} - -// MarshalBinary implements the encoding.BinaryMarshaler interface by -// returning the ULID as a byte slice. -func (id ULID) MarshalBinary() ([]byte, error) { - ulid := make([]byte, len(id)) - return ulid, id.MarshalBinaryTo(ulid) -} - -// MarshalBinaryTo writes the binary encoding of the ULID to the given buffer. -// ErrBufferSize is returned when the len(dst) != 16. -func (id ULID) MarshalBinaryTo(dst []byte) error { - if len(dst) != len(id) { - return ErrBufferSize - } - - copy(dst, id[:]) - return nil -} - -// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface by -// copying the passed data and converting it to an ULID. ErrDataSize is -// returned if the data length is different from ULID length. -func (id *ULID) UnmarshalBinary(data []byte) error { - if len(data) != len(*id) { - return ErrDataSize - } - - copy((*id)[:], data) - return nil -} - -// Encoding is the base 32 encoding alphabet used in ULID strings. -const Encoding = "0123456789ABCDEFGHJKMNPQRSTVWXYZ" - -// MarshalText implements the encoding.TextMarshaler interface by -// returning the string encoded ULID. -func (id ULID) MarshalText() ([]byte, error) { - ulid := make([]byte, EncodedSize) - return ulid, id.MarshalTextTo(ulid) -} - -// MarshalTextTo writes the ULID as a string to the given buffer. -// ErrBufferSize is returned when the len(dst) != 26. -func (id ULID) MarshalTextTo(dst []byte) error { - // Optimized unrolled loop ahead. - // From https://github.com/RobThree/NUlid - - if len(dst) != EncodedSize { - return ErrBufferSize - } - - // 10 byte timestamp - dst[0] = Encoding[(id[0]&224)>>5] - dst[1] = Encoding[id[0]&31] - dst[2] = Encoding[(id[1]&248)>>3] - dst[3] = Encoding[((id[1]&7)<<2)|((id[2]&192)>>6)] - dst[4] = Encoding[(id[2]&62)>>1] - dst[5] = Encoding[((id[2]&1)<<4)|((id[3]&240)>>4)] - dst[6] = Encoding[((id[3]&15)<<1)|((id[4]&128)>>7)] - dst[7] = Encoding[(id[4]&124)>>2] - dst[8] = Encoding[((id[4]&3)<<3)|((id[5]&224)>>5)] - dst[9] = Encoding[id[5]&31] - - // 16 bytes of entropy - dst[10] = Encoding[(id[6]&248)>>3] - dst[11] = Encoding[((id[6]&7)<<2)|((id[7]&192)>>6)] - dst[12] = Encoding[(id[7]&62)>>1] - dst[13] = Encoding[((id[7]&1)<<4)|((id[8]&240)>>4)] - dst[14] = Encoding[((id[8]&15)<<1)|((id[9]&128)>>7)] - dst[15] = Encoding[(id[9]&124)>>2] - dst[16] = Encoding[((id[9]&3)<<3)|((id[10]&224)>>5)] - dst[17] = Encoding[id[10]&31] - dst[18] = Encoding[(id[11]&248)>>3] - dst[19] = Encoding[((id[11]&7)<<2)|((id[12]&192)>>6)] - dst[20] = Encoding[(id[12]&62)>>1] - dst[21] = Encoding[((id[12]&1)<<4)|((id[13]&240)>>4)] - dst[22] = Encoding[((id[13]&15)<<1)|((id[14]&128)>>7)] - dst[23] = Encoding[(id[14]&124)>>2] - dst[24] = Encoding[((id[14]&3)<<3)|((id[15]&224)>>5)] - dst[25] = Encoding[id[15]&31] - - return nil -} - -// Byte to index table for O(1) lookups when unmarshaling. -// We use 0xFF as sentinel value for invalid indexes. -var dec = [...]byte{ - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x01, - 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, - 0x0F, 0x10, 0x11, 0xFF, 0x12, 0x13, 0xFF, 0x14, 0x15, 0xFF, - 0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C, 0x1D, 0x1E, - 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C, - 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0xFF, 0x12, 0x13, 0xFF, 0x14, - 0x15, 0xFF, 0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C, - 0x1D, 0x1E, 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, -} - -// EncodedSize is the length of a text encoded ULID. -const EncodedSize = 26 - -// UnmarshalText implements the encoding.TextUnmarshaler interface by -// parsing the data as string encoded ULID. -// -// ErrDataSize is returned if the len(v) is different from an encoded -// ULID's length. Invalid encodings produce undefined ULIDs. -func (id *ULID) UnmarshalText(v []byte) error { - return parse(v, false, id) -} - -// Time returns the Unix time in milliseconds encoded in the ULID. -// Use the top level Time function to convert the returned value to -// a time.Time. -func (id ULID) Time() uint64 { - return uint64(id[5]) | uint64(id[4])<<8 | - uint64(id[3])<<16 | uint64(id[2])<<24 | - uint64(id[1])<<32 | uint64(id[0])<<40 -} - -// maxTime is the maximum Unix time in milliseconds that can be -// represented in an ULID. -var maxTime = ULID{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}.Time() - -// MaxTime returns the maximum Unix time in milliseconds that -// can be encoded in an ULID. -func MaxTime() uint64 { return maxTime } - -// Now is a convenience function that returns the current -// UTC time in Unix milliseconds. Equivalent to: -// Timestamp(time.Now().UTC()) -func Now() uint64 { return Timestamp(time.Now().UTC()) } - -// Timestamp converts a time.Time to Unix milliseconds. -// -// Because of the way ULID stores time, times from the year -// 10889 produces undefined results. -func Timestamp(t time.Time) uint64 { - return uint64(t.Unix())*1000 + - uint64(t.Nanosecond()/int(time.Millisecond)) -} - -// Time converts Unix milliseconds in the format -// returned by the Timestamp function to a time.Time. -func Time(ms uint64) time.Time { - s := int64(ms / 1e3) - ns := int64((ms % 1e3) * 1e6) - return time.Unix(s, ns) -} - -// SetTime sets the time component of the ULID to the given Unix time -// in milliseconds. -func (id *ULID) SetTime(ms uint64) error { - if ms > maxTime { - return ErrBigTime - } - - (*id)[0] = byte(ms >> 40) - (*id)[1] = byte(ms >> 32) - (*id)[2] = byte(ms >> 24) - (*id)[3] = byte(ms >> 16) - (*id)[4] = byte(ms >> 8) - (*id)[5] = byte(ms) - - return nil -} - -// Entropy returns the entropy from the ULID. -func (id ULID) Entropy() []byte { - e := make([]byte, 10) - copy(e, id[6:]) - return e -} - -// SetEntropy sets the ULID entropy to the passed byte slice. -// ErrDataSize is returned if len(e) != 10. -func (id *ULID) SetEntropy(e []byte) error { - if len(e) != 10 { - return ErrDataSize - } - - copy((*id)[6:], e) - return nil -} - -// Compare returns an integer comparing id and other lexicographically. -// The result will be 0 if id==other, -1 if id < other, and +1 if id > other. -func (id ULID) Compare(other ULID) int { - return bytes.Compare(id[:], other[:]) -} - -// Scan implements the sql.Scanner interface. It supports scanning -// a string or byte slice. -func (id *ULID) Scan(src interface{}) error { - switch x := src.(type) { - case nil: - return nil - case string: - return id.UnmarshalText([]byte(x)) - case []byte: - return id.UnmarshalBinary(x) - } - - return ErrScanValue -} - -// Value implements the sql/driver.Valuer interface. This returns the value -// represented as a byte slice. If instead a string is desirable, a wrapper -// type can be created that calls String(). -// -// // stringValuer wraps a ULID as a string-based driver.Valuer. -// type stringValuer ULID -// -// func (id stringValuer) Value() (driver.Value, error) { -// return ULID(id).String(), nil -// } -// -// // Example usage. -// db.Exec("...", stringValuer(id)) -func (id ULID) Value() (driver.Value, error) { - return id.MarshalBinary() -} - -// Monotonic returns an entropy source that is guaranteed to yield -// strictly increasing entropy bytes for the same ULID timestamp. -// On conflicts, the previous ULID entropy is incremented with a -// random number between 1 and `inc` (inclusive). -// -// The provided entropy source must actually yield random bytes or else -// monotonic reads are not guaranteed to terminate, since there isn't -// enough randomness to compute an increment number. -// -// When `inc == 0`, it'll be set to a secure default of `math.MaxUint32`. -// The lower the value of `inc`, the easier the next ULID within the -// same millisecond is to guess. If your code depends on ULIDs having -// secure entropy bytes, then don't go under this default unless you know -// what you're doing. -// -// The returned io.Reader isn't safe for concurrent use. -func Monotonic(entropy io.Reader, inc uint64) io.Reader { - m := monotonic{ - Reader: bufio.NewReader(entropy), - inc: inc, - } - - if m.inc == 0 { - m.inc = math.MaxUint32 - } - - if rng, ok := entropy.(*rand.Rand); ok { - m.rng = rng - } - - return &m -} - -type monotonic struct { - io.Reader - ms uint64 - inc uint64 - entropy uint80 - rand [8]byte - rng *rand.Rand -} - -func (m *monotonic) MonotonicRead(ms uint64, entropy []byte) (err error) { - if !m.entropy.IsZero() && m.ms == ms { - err = m.increment() - m.entropy.AppendTo(entropy) - } else if _, err = io.ReadFull(m.Reader, entropy); err == nil { - m.ms = ms - m.entropy.SetBytes(entropy) - } - return err -} - -// increment the previous entropy number with a random number -// of up to m.inc (inclusive). -func (m *monotonic) increment() error { - if inc, err := m.random(); err != nil { - return err - } else if m.entropy.Add(inc) { - return ErrMonotonicOverflow - } - return nil -} - -// random returns a uniform random value in [1, m.inc), reading entropy -// from m.Reader. When m.inc == 0 || m.inc == 1, it returns 1. -// Adapted from: https://golang.org/pkg/crypto/rand/#Int -func (m *monotonic) random() (inc uint64, err error) { - if m.inc <= 1 { - return 1, nil - } - - // Fast path for using a underlying rand.Rand directly. - if m.rng != nil { - // Range: [1, m.inc) - return 1 + uint64(m.rng.Int63n(int64(m.inc))), nil - } - - // bitLen is the maximum bit length needed to encode a value < m.inc. - bitLen := bits.Len64(m.inc) - - // byteLen is the maximum byte length needed to encode a value < m.inc. - byteLen := uint(bitLen+7) / 8 - - // msbitLen is the number of bits in the most significant byte of m.inc-1. - msbitLen := uint(bitLen % 8) - if msbitLen == 0 { - msbitLen = 8 - } - - for inc == 0 || inc >= m.inc { - if _, err = io.ReadFull(m.Reader, m.rand[:byteLen]); err != nil { - return 0, err - } - - // Clear bits in the first byte to increase the probability - // that the candidate is < m.inc. - m.rand[0] &= uint8(int(1<Browser testing via + + + +

          diff --git a/vendor/github.com/onsi/ginkgo/v2/core_dsl.go b/vendor/github.com/onsi/ginkgo/v2/core_dsl.go index d027bdff93..7e165e4738 100644 --- a/vendor/github.com/onsi/ginkgo/v2/core_dsl.go +++ b/vendor/github.com/onsi/ginkgo/v2/core_dsl.go @@ -186,6 +186,20 @@ func GinkgoLabelFilter() string { return suiteConfig.LabelFilter } +/* +GinkgoSemVerFilter() returns the semantic version filter configured for this suite via `--sem-ver-filter`. + +You can use this to manually check if a set of semantic version constraints would satisfy the filter via: + + if (SemVerConstraint("> 2.6.0", "< 2.8.0").MatchesSemVerFilter(GinkgoSemVerFilter())) { + //... + } +*/ +func GinkgoSemVerFilter() string { + suiteConfig, _ := GinkgoConfiguration() + return suiteConfig.SemVerFilter +} + /* PauseOutputInterception() pauses Ginkgo's output interception. This is only relevant when running in parallel and output to stdout/stderr is being intercepted. You generally @@ -254,7 +268,7 @@ func RunSpecs(t GinkgoTestingT, description string, args ...any) bool { } defer global.PopClone() - suiteLabels := extractSuiteConfiguration(args) + suiteLabels, suiteSemVerConstraints, suiteAroundNodes := extractSuiteConfiguration(args) var reporter reporters.Reporter if suiteConfig.ParallelTotal == 1 { @@ -297,7 +311,7 @@ func RunSpecs(t GinkgoTestingT, description string, args ...any) bool { suitePath, err = filepath.Abs(suitePath) exitIfErr(err) - passed, hasFocusedTests := global.Suite.Run(description, suiteLabels, suitePath, global.Failer, reporter, writer, outputInterceptor, interrupt_handler.NewInterruptHandler(client), client, internal.RegisterForProgressSignal, suiteConfig) + passed, hasFocusedTests := global.Suite.Run(description, suiteLabels, suiteSemVerConstraints, suiteAroundNodes, suitePath, global.Failer, reporter, writer, outputInterceptor, interrupt_handler.NewInterruptHandler(client), client, internal.RegisterForProgressSignal, suiteConfig) outputInterceptor.Shutdown() flagSet.ValidateDeprecations(deprecationTracker) @@ -316,8 +330,10 @@ func RunSpecs(t GinkgoTestingT, description string, args ...any) bool { return passed } -func extractSuiteConfiguration(args []any) Labels { +func extractSuiteConfiguration(args []any) (Labels, SemVerConstraints, types.AroundNodes) { suiteLabels := Labels{} + suiteSemVerConstraints := SemVerConstraints{} + aroundNodes := types.AroundNodes{} configErrors := []error{} for _, arg := range args { switch arg := arg.(type) { @@ -327,6 +343,10 @@ func extractSuiteConfiguration(args []any) Labels { reporterConfig = arg case Labels: suiteLabels = append(suiteLabels, arg...) + case SemVerConstraints: + suiteSemVerConstraints = append(suiteSemVerConstraints, arg...) + case types.AroundNodeDecorator: + aroundNodes = append(aroundNodes, arg) default: configErrors = append(configErrors, types.GinkgoErrors.UnknownTypePassedToRunSpecs(arg)) } @@ -342,7 +362,7 @@ func extractSuiteConfiguration(args []any) Labels { os.Exit(1) } - return suiteLabels + return suiteLabels, suiteSemVerConstraints, aroundNodes } func getwd() (string, error) { @@ -365,7 +385,7 @@ func PreviewSpecs(description string, args ...any) Report { } defer global.PopClone() - suiteLabels := extractSuiteConfiguration(args) + suiteLabels, suiteSemVerConstraints, suiteAroundNodes := extractSuiteConfiguration(args) priorDryRun, priorParallelTotal, priorParallelProcess := suiteConfig.DryRun, suiteConfig.ParallelTotal, suiteConfig.ParallelProcess suiteConfig.DryRun, suiteConfig.ParallelTotal, suiteConfig.ParallelProcess = true, 1, 1 defer func() { @@ -383,7 +403,7 @@ func PreviewSpecs(description string, args ...any) Report { suitePath, err = filepath.Abs(suitePath) exitIfErr(err) - global.Suite.Run(description, suiteLabels, suitePath, global.Failer, reporter, writer, outputInterceptor, interrupt_handler.NewInterruptHandler(client), client, internal.RegisterForProgressSignal, suiteConfig) + global.Suite.Run(description, suiteLabels, suiteSemVerConstraints, suiteAroundNodes, suitePath, global.Failer, reporter, writer, outputInterceptor, interrupt_handler.NewInterruptHandler(client), client, internal.RegisterForProgressSignal, suiteConfig) return global.Suite.GetPreviewReport() } @@ -481,6 +501,38 @@ func pushNode(node internal.Node, errors []error) bool { return true } +// NodeArgsTransformer is a hook which is called by the test construction DSL methods +// before creating the new node. If it returns any error, the test suite +// prints those errors and exits. The text and arguments can be modified, +// which includes directly changing the args slice that is passed in. +// Arguments have been flattened already, i.e. none of the entries in args is another []any. +// The result may be nested. +// +// The node type is provided for information and remains the same. +// +// The offset is valid for calling NewLocation directly in the +// implementation of TransformNodeArgs to find the location where +// the Ginkgo DSL function is called. An additional offset supplied +// by the caller via args is already included. +// +// A NodeArgsTransformer can be registered with AddTreeConstructionNodeArgsTransformer. +type NodeArgsTransformer func(nodeType types.NodeType, offset Offset, text string, args []any) (string, []any, []error) + +// AddTreeConstructionNodeArgsTransformer registers a NodeArgsTransformer. +// Only nodes which get created after registering a NodeArgsTransformer +// are transformed by it. The returned function can be called to +// unregister the transformer. +// +// Both may only be called during the construction phase. +// +// If there is more than one registered transformer, then the most +// recently added ones get called first. +func AddTreeConstructionNodeArgsTransformer(transformer NodeArgsTransformer) func() { + // This conversion could be avoided with a type alias, but type aliases make + // developer documentation less useful. + return internal.AddTreeConstructionNodeArgsTransformer(internal.NodeArgsTransformer(transformer)) +} + /* Describe nodes are Container nodes that allow you to organize your specs. A Describe node's closure can contain any number of Setup nodes (e.g. BeforeEach, AfterEach, JustBeforeEach), and Subject nodes (i.e. It). @@ -492,7 +544,7 @@ You can learn more at https://onsi.github.io/ginkgo/#organizing-specs-with-conta In addition, container nodes can be decorated with a variety of decorators. You can learn more here: https://onsi.github.io/ginkgo/#decorator-reference */ func Describe(text string, args ...any) bool { - return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, text, args...)) + return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeContainer, text, args...))) } /* @@ -500,7 +552,7 @@ FDescribe focuses specs within the Describe block. */ func FDescribe(text string, args ...any) bool { args = append(args, internal.Focus) - return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, text, args...)) + return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeContainer, text, args...))) } /* @@ -508,7 +560,7 @@ PDescribe marks specs within the Describe block as pending. */ func PDescribe(text string, args ...any) bool { args = append(args, internal.Pending) - return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, text, args...)) + return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeContainer, text, args...))) } /* @@ -521,21 +573,21 @@ var XDescribe = PDescribe /* Context is an alias for Describe - it generates the exact same kind of Container node */ var Context, FContext, PContext, XContext = Describe, FDescribe, PDescribe, XDescribe -/* When is an alias for Describe - it generates the exact same kind of Container node */ +/* When is an alias for Describe - it generates the exact same kind of Container node with "when " as prefix for the text. */ func When(text string, args ...any) bool { - return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, "when "+text, args...)) + return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeContainer, "when "+text, args...))) } -/* When is an alias for Describe - it generates the exact same kind of Container node */ +/* When is an alias for Describe - it generates the exact same kind of Container node with "when " as prefix for the text. */ func FWhen(text string, args ...any) bool { args = append(args, internal.Focus) - return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, "when "+text, args...)) + return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeContainer, "when "+text, args...))) } /* When is an alias for Describe - it generates the exact same kind of Container node */ func PWhen(text string, args ...any) bool { args = append(args, internal.Pending) - return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, "when "+text, args...)) + return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeContainer, "when "+text, args...))) } var XWhen = PWhen @@ -551,7 +603,7 @@ You can learn more at https://onsi.github.io/ginkgo/#spec-subjects-it In addition, subject nodes can be decorated with a variety of decorators. You can learn more here: https://onsi.github.io/ginkgo/#decorator-reference */ func It(text string, args ...any) bool { - return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeIt, text, args...)) + return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeIt, text, args...))) } /* @@ -559,7 +611,7 @@ FIt allows you to focus an individual It. */ func FIt(text string, args ...any) bool { args = append(args, internal.Focus) - return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeIt, text, args...)) + return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeIt, text, args...))) } /* @@ -567,7 +619,7 @@ PIt allows you to mark an individual It as pending. */ func PIt(text string, args ...any) bool { args = append(args, internal.Pending) - return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeIt, text, args...)) + return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeIt, text, args...))) } /* @@ -614,7 +666,7 @@ You can learn more here: https://onsi.github.io/ginkgo/#suite-setup-and-cleanup- func BeforeSuite(body any, args ...any) bool { combinedArgs := []any{body} combinedArgs = append(combinedArgs, args...) - return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeBeforeSuite, "", combinedArgs...)) + return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeBeforeSuite, "", combinedArgs...))) } /* @@ -633,7 +685,7 @@ You can learn more here: https://onsi.github.io/ginkgo/#suite-setup-and-cleanup- func AfterSuite(body any, args ...any) bool { combinedArgs := []any{body} combinedArgs = append(combinedArgs, args...) - return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeAfterSuite, "", combinedArgs...)) + return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeAfterSuite, "", combinedArgs...))) } /* @@ -671,7 +723,7 @@ func SynchronizedBeforeSuite(process1Body any, allProcessBody any, args ...any) combinedArgs := []any{process1Body, allProcessBody} combinedArgs = append(combinedArgs, args...) - return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeSynchronizedBeforeSuite, "", combinedArgs...)) + return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeSynchronizedBeforeSuite, "", combinedArgs...))) } /* @@ -691,7 +743,7 @@ func SynchronizedAfterSuite(allProcessBody any, process1Body any, args ...any) b combinedArgs := []any{allProcessBody, process1Body} combinedArgs = append(combinedArgs, args...) - return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeSynchronizedAfterSuite, "", combinedArgs...)) + return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeSynchronizedAfterSuite, "", combinedArgs...))) } /* @@ -704,7 +756,7 @@ You cannot nest any other Ginkgo nodes within a BeforeEach node's closure. You can learn more here: https://onsi.github.io/ginkgo/#extracting-common-setup-beforeeach */ func BeforeEach(args ...any) bool { - return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeBeforeEach, "", args...)) + return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeBeforeEach, "", args...))) } /* @@ -717,7 +769,7 @@ You cannot nest any other Ginkgo nodes within a JustBeforeEach node's closure. You can learn more and see some examples here: https://onsi.github.io/ginkgo/#separating-creation-and-configuration-justbeforeeach */ func JustBeforeEach(args ...any) bool { - return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeJustBeforeEach, "", args...)) + return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeJustBeforeEach, "", args...))) } /* @@ -732,7 +784,7 @@ You cannot nest any other Ginkgo nodes within an AfterEach node's closure. You can learn more here: https://onsi.github.io/ginkgo/#spec-cleanup-aftereach-and-defercleanup */ func AfterEach(args ...any) bool { - return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeAfterEach, "", args...)) + return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeAfterEach, "", args...))) } /* @@ -744,7 +796,7 @@ You cannot nest any other Ginkgo nodes within a JustAfterEach node's closure. You can learn more and see some examples here: https://onsi.github.io/ginkgo/#separating-diagnostics-collection-and-teardown-justaftereach */ func JustAfterEach(args ...any) bool { - return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeJustAfterEach, "", args...)) + return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeJustAfterEach, "", args...))) } /* @@ -759,7 +811,7 @@ You can learn more about Ordered Containers at: https://onsi.github.io/ginkgo/#o And you can learn more about BeforeAll at: https://onsi.github.io/ginkgo/#setup-in-ordered-containers-beforeall-and-afterall */ func BeforeAll(args ...any) bool { - return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeBeforeAll, "", args...)) + return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeBeforeAll, "", args...))) } /* @@ -776,7 +828,7 @@ You can learn more about Ordered Containers at: https://onsi.github.io/ginkgo/#o And you can learn more about AfterAll at: https://onsi.github.io/ginkgo/#setup-in-ordered-containers-beforeall-and-afterall */ func AfterAll(args ...any) bool { - return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeAfterAll, "", args...)) + return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeAfterAll, "", args...))) } /* diff --git a/vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go b/vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go index c65af4ce1c..e331d7cf8c 100644 --- a/vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go +++ b/vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go @@ -2,6 +2,7 @@ package ginkgo import ( "github.com/onsi/ginkgo/v2/internal" + "github.com/onsi/ginkgo/v2/types" ) /* @@ -99,6 +100,23 @@ You can learn more here: https://onsi.github.io/ginkgo/#spec-labels */ type Labels = internal.Labels +/* +SemVerConstraint decorates specs with SemVerConstraints. Multiple semantic version constraints can be passed to SemVerConstraint and these strings must follow the semantic version constraint rules. +SemVerConstraints can be applied to container and subject nodes, but not setup nodes. You can provide multiple SemVerConstraints to a given node and a spec's semantic version constraints is the union of all semantic version constraints in its node hierarchy. + +You can learn more here: https://onsi.github.io/ginkgo/#spec-semantic-version-filtering +You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference +*/ +func SemVerConstraint(semVerConstraints ...string) SemVerConstraints { + return SemVerConstraints(semVerConstraints) +} + +/* +SemVerConstraints are the type for spec SemVerConstraint decorators. Use SemVerConstraint(...) to construct SemVerConstraints. +You can learn more here: https://onsi.github.io/ginkgo/#spec-semantic-version-filtering +*/ +type SemVerConstraints = internal.SemVerConstraints + /* PollProgressAfter allows you to override the configured value for --poll-progress-after for a particular node. @@ -136,8 +154,40 @@ Nodes that do not finish within a GracePeriod will be leaked and Ginkgo will pro */ type GracePeriod = internal.GracePeriod +/* +SpecPriority allows you to assign a priority to a spec or container. + +Specs with higher priority will be scheduled to run before specs with lower priority. The default priority is 0 and negative priorities are allowed. +*/ +type SpecPriority = internal.SpecPriority + /* SuppressProgressReporting is a decorator that allows you to disable progress reporting of a particular node. This is useful if `ginkgo -v -progress` is generating too much noise; particularly if you have a `ReportAfterEach` node that is running for every skipped spec and is generating lots of progress reports. */ const SuppressProgressReporting = internal.SuppressProgressReporting + +/* +AroundNode registers a function that runs before each individual node. This is considered a more advanced decorator. + +Please read the [docs](https://onsi.github.io/ginkgo/#advanced-around-node) for more information. + +Allowed signatures: + +- AroundNode(func()) - func will be called before the node is run. +- AroundNode(func(ctx context.Context) context.Context) - func can wrap the passed in context and return a new one which will be passed on to the node. +- AroundNode(func(ctx context.Context, body func(ctx context.Context))) - ctx is the context for the node and body is a function that must be called to run the node. This gives you complete control over what runs before and after the node. + +Multiple AroundNode decorators can be applied to a single node and they will run in the order they are applied. + +Unlike setup nodes like BeforeEach and DeferCleanup, AroundNode is guaranteed to run in the same goroutine as the decorated node. This is necessary when working with lower-level libraries that must run on a single thread (you can call runtime.LockOSThread() in the AroundNode to ensure that the node runs on a single thread). + +Since AroundNode allows you to modify the context you can also use AroundNode to implement shared setup that attaches values to the context. You must return a context that inherits from the passed in context. + +If applied to a container, AroundNode will run before every node in the container. Including setup nodes like BeforeEach and DeferCleanup. + +AroundNode can also be applied to RunSpecs to run before every node in the suite. +*/ +func AroundNode[F types.AroundNodeAllowedFuncs](f F) types.AroundNodeDecorator { + return types.AroundNode(f, types.NewCodeLocation(1)) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs.go new file mode 100644 index 0000000000..ee6ac7b5f3 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs.go @@ -0,0 +1,8 @@ +//go:build !go1.25 +// +build !go1.25 + +package main + +import ( + _ "github.com/onsi/ginkgo/v2/ginkgo/automaxprocs" +) diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/README.md b/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/README.md new file mode 100644 index 0000000000..e249ebe8b3 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/README.md @@ -0,0 +1,3 @@ +This entire directory is a lightly modified clone of https://github.com/uber-go/automaxprocs + +It will be removed when Go 1.26 ships and we no longer need to support Go 1.24 (which does not correctly autodetect maxprocs in containers). diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/automaxprocs.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/automaxprocs.go new file mode 100644 index 0000000000..8a762b51d6 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/automaxprocs.go @@ -0,0 +1,71 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package maxprocs lets Go programs easily configure runtime.GOMAXPROCS to +// match the configured Linux CPU quota. Unlike the top-level automaxprocs +// package, it lets the caller configure logging and handle errors. +package automaxprocs + +import ( + "os" + "runtime" +) + +func init() { + Set() +} + +const _maxProcsKey = "GOMAXPROCS" + +type config struct { + procs func(int, func(v float64) int) (int, CPUQuotaStatus, error) + minGOMAXPROCS int + roundQuotaFunc func(v float64) int +} + +// Set GOMAXPROCS to match the Linux container CPU quota (if any), returning +// any error encountered and an undo function. +// +// Set is a no-op on non-Linux systems and in Linux environments without a +// configured CPU quota. +func Set() error { + cfg := &config{ + procs: CPUQuotaToGOMAXPROCS, + roundQuotaFunc: DefaultRoundFunc, + minGOMAXPROCS: 1, + } + + // Honor the GOMAXPROCS environment variable if present. Otherwise, amend + // `runtime.GOMAXPROCS()` with the current process' CPU quota if the OS is + // Linux, and guarantee a minimum value of 1. The minimum guaranteed value + // can be overridden using `maxprocs.Min()`. + if _, exists := os.LookupEnv(_maxProcsKey); exists { + return nil + } + maxProcs, status, err := cfg.procs(cfg.minGOMAXPROCS, cfg.roundQuotaFunc) + if err != nil { + return err + } + if status == CPUQuotaUndefined { + return nil + } + runtime.GOMAXPROCS(maxProcs) + return nil +} diff --git a/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroup.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cgroup.go similarity index 99% rename from vendor/go.uber.org/automaxprocs/internal/cgroups/cgroup.go rename to vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cgroup.go index fe4ecf561e..a4676933e8 100644 --- a/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroup.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cgroup.go @@ -21,7 +21,7 @@ //go:build linux // +build linux -package cgroups +package automaxprocs import ( "bufio" diff --git a/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cgroups.go similarity index 99% rename from vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups.go rename to vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cgroups.go index e89f543602..ed384891ef 100644 --- a/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cgroups.go @@ -21,7 +21,7 @@ //go:build linux // +build linux -package cgroups +package automaxprocs const ( // _cgroupFSType is the Linux CGroup file system type used in diff --git a/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups2.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cgroups2.go similarity index 99% rename from vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups2.go rename to vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cgroups2.go index 78556062fe..69a0be6b71 100644 --- a/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups2.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cgroups2.go @@ -21,7 +21,7 @@ //go:build linux // +build linux -package cgroups +package automaxprocs import ( "bufio" diff --git a/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_linux.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cpu_quota_linux.go similarity index 91% rename from vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_linux.go rename to vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cpu_quota_linux.go index f9057fd273..2d83343bd9 100644 --- a/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_linux.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cpu_quota_linux.go @@ -21,12 +21,10 @@ //go:build linux // +build linux -package runtime +package automaxprocs import ( "errors" - - cg "go.uber.org/automaxprocs/internal/cgroups" ) // CPUQuotaToGOMAXPROCS converts the CPU quota applied to the calling process @@ -58,8 +56,8 @@ type queryer interface { } var ( - _newCgroups2 = cg.NewCGroups2ForCurrentProcess - _newCgroups = cg.NewCGroupsForCurrentProcess + _newCgroups2 = NewCGroups2ForCurrentProcess + _newCgroups = NewCGroupsForCurrentProcess _newQueryer = newQueryer ) @@ -68,7 +66,7 @@ func newQueryer() (queryer, error) { if err == nil { return cgroups, nil } - if errors.Is(err, cg.ErrNotV2) { + if errors.Is(err, ErrNotV2) { return _newCgroups() } return nil, err diff --git a/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_unsupported.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cpu_quota_unsupported.go similarity index 98% rename from vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_unsupported.go rename to vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cpu_quota_unsupported.go index e74701508e..d2d61e8941 100644 --- a/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_unsupported.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cpu_quota_unsupported.go @@ -21,7 +21,7 @@ //go:build !linux // +build !linux -package runtime +package automaxprocs // CPUQuotaToGOMAXPROCS converts the CPU quota applied to the calling process // to a valid GOMAXPROCS value. This is Linux-specific and not supported in the diff --git a/vendor/go.uber.org/automaxprocs/internal/cgroups/errors.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/errors.go similarity index 98% rename from vendor/go.uber.org/automaxprocs/internal/cgroups/errors.go rename to vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/errors.go index 94ac75a46e..2e235d7d65 100644 --- a/vendor/go.uber.org/automaxprocs/internal/cgroups/errors.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/errors.go @@ -21,7 +21,7 @@ //go:build linux // +build linux -package cgroups +package automaxprocs import "fmt" diff --git a/vendor/go.uber.org/automaxprocs/internal/cgroups/mountpoint.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/mountpoint.go similarity index 99% rename from vendor/go.uber.org/automaxprocs/internal/cgroups/mountpoint.go rename to vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/mountpoint.go index f3877f78aa..7c3fa306ef 100644 --- a/vendor/go.uber.org/automaxprocs/internal/cgroups/mountpoint.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/mountpoint.go @@ -21,7 +21,7 @@ //go:build linux // +build linux -package cgroups +package automaxprocs import ( "bufio" diff --git a/vendor/go.uber.org/automaxprocs/internal/runtime/runtime.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/runtime.go similarity index 98% rename from vendor/go.uber.org/automaxprocs/internal/runtime/runtime.go rename to vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/runtime.go index f8a2834ac0..b8ec7e502a 100644 --- a/vendor/go.uber.org/automaxprocs/internal/runtime/runtime.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/runtime.go @@ -18,7 +18,7 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. -package runtime +package automaxprocs import "math" diff --git a/vendor/go.uber.org/automaxprocs/internal/cgroups/subsys.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/subsys.go similarity index 99% rename from vendor/go.uber.org/automaxprocs/internal/cgroups/subsys.go rename to vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/subsys.go index cddc3eaec3..881ebd5902 100644 --- a/vendor/go.uber.org/automaxprocs/internal/cgroups/subsys.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/subsys.go @@ -21,7 +21,7 @@ //go:build linux // +build linux -package cgroups +package automaxprocs import ( "bufio" diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go index 2b36b2feb9..3021dfec2e 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go @@ -29,7 +29,6 @@ func BuildBuildCommand() command.Command { var errors []error cliConfig, goFlagsConfig, errors = types.VetAndInitializeCLIAndGoConfig(cliConfig, goFlagsConfig) command.AbortIfErrors("Ginkgo detected configuration issues:", errors) - buildSpecs(args, cliConfig, goFlagsConfig) }, } diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go index 8e16d2bb03..f3439a3f0c 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go @@ -90,6 +90,9 @@ func FinalizeProfilesAndReportsForSuites(suites TestSuites, cliConfig types.CLIC if reporterConfig.JSONReport != "" { reportFormats = append(reportFormats, reportFormat{ReportName: reporterConfig.JSONReport, GenerateFunc: reporters.GenerateJSONReport, MergeFunc: reporters.MergeAndCleanupJSONReports}) } + if reporterConfig.GoJSONReport != "" { + reportFormats = append(reportFormats, reportFormat{ReportName: reporterConfig.GoJSONReport, GenerateFunc: reporters.GenerateGoTestJSONReport, MergeFunc: reporters.MergeAndCleanupGoTestJSONReports}) + } if reporterConfig.JUnitReport != "" { reportFormats = append(reportFormats, reportFormat{ReportName: reporterConfig.JUnitReport, GenerateFunc: reporters.GenerateJUnitReport, MergeFunc: reporters.MergeAndCleanupJUnitReports}) } diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go index 41052ea19d..30d8096cd6 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go @@ -107,6 +107,9 @@ func runSerial(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig t if reporterConfig.JSONReport != "" { reporterConfig.JSONReport = AbsPathForGeneratedAsset(reporterConfig.JSONReport, suite, cliConfig, 0) } + if reporterConfig.GoJSONReport != "" { + reporterConfig.GoJSONReport = AbsPathForGeneratedAsset(reporterConfig.GoJSONReport, suite, cliConfig, 0) + } if reporterConfig.JUnitReport != "" { reporterConfig.JUnitReport = AbsPathForGeneratedAsset(reporterConfig.JUnitReport, suite, cliConfig, 0) } @@ -179,6 +182,9 @@ func runParallel(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig if reporterConfig.JSONReport != "" { reporterConfig.JSONReport = AbsPathForGeneratedAsset(reporterConfig.JSONReport, suite, cliConfig, 0) } + if reporterConfig.GoJSONReport != "" { + reporterConfig.GoJSONReport = AbsPathForGeneratedAsset(reporterConfig.GoJSONReport, suite, cliConfig, 0) + } if reporterConfig.JUnitReport != "" { reporterConfig.JUnitReport = AbsPathForGeneratedAsset(reporterConfig.JUnitReport, suite, cliConfig, 0) } diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go index bd6b8fbff3..419589b48c 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go @@ -3,7 +3,6 @@ package main import ( "fmt" "os" - _ "go.uber.org/automaxprocs" "github.com/onsi/ginkgo/v2/ginkgo/build" "github.com/onsi/ginkgo/v2/ginkgo/command" "github.com/onsi/ginkgo/v2/ginkgo/generators" diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go index a34d94354d..75cbdb4962 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go @@ -2,12 +2,9 @@ package watch import ( "go/build" - "regexp" + "strings" ) -var ginkgoAndGomegaFilter = regexp.MustCompile(`github\.com/onsi/ginkgo|github\.com/onsi/gomega`) -var ginkgoIntegrationTestFilter = regexp.MustCompile(`github\.com/onsi/ginkgo/integration`) //allow us to integration test this thing - type Dependencies struct { deps map[string]int } @@ -78,7 +75,7 @@ func (d Dependencies) resolveAndAdd(deps []string, depth int) { if err != nil { continue } - if !pkg.Goroot && (!ginkgoAndGomegaFilter.MatchString(pkg.Dir) || ginkgoIntegrationTestFilter.MatchString(pkg.Dir)) { + if !pkg.Goroot && (!matchesGinkgoOrGomega(pkg.Dir) || matchesGinkgoIntegration(pkg.Dir)) { d.addDepIfNotPresent(pkg.Dir, depth) } } @@ -90,3 +87,11 @@ func (d Dependencies) addDepIfNotPresent(dep string, depth int) { d.deps[dep] = depth } } + +func matchesGinkgoOrGomega(s string) bool { + return strings.Contains(s, "github.com/onsi/ginkgo") || strings.Contains(s, "github.com/onsi/gomega") +} + +func matchesGinkgoIntegration(s string) bool { + return strings.Contains(s, "github.com/onsi/ginkgo/integration") // allow us to integration test this thing +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go index 993279de29..40d1e1ab5c 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go @@ -2,6 +2,7 @@ package ginkgo import ( "context" + "io" "testing" "github.com/onsi/ginkgo/v2/internal/testingtproxy" @@ -69,6 +70,8 @@ type GinkgoTInterface interface { Skipf(format string, args ...any) Skipped() bool TempDir() string + Attr(key, value string) + Output() io.Writer } /* @@ -187,3 +190,9 @@ func (g *GinkgoTBWrapper) Skipped() bool { func (g *GinkgoTBWrapper) TempDir() string { return g.GinkgoT.TempDir() } +func (g *GinkgoTBWrapper) Attr(key, value string) { + g.GinkgoT.Attr(key, value) +} +func (g *GinkgoTBWrapper) Output() io.Writer { + return g.GinkgoT.Output() +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/around_node.go b/vendor/github.com/onsi/ginkgo/v2/internal/around_node.go new file mode 100644 index 0000000000..c965710205 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/around_node.go @@ -0,0 +1,34 @@ +package internal + +import ( + "github.com/onsi/ginkgo/v2/types" +) + +func ComputeAroundNodes(specs Specs) Specs { + out := Specs{} + for _, spec := range specs { + nodes := Nodes{} + currentNestingLevel := 0 + aroundNodes := types.AroundNodes{} + nestingLevelIndices := []int{} + for _, node := range spec.Nodes { + switch node.NodeType { + case types.NodeTypeContainer: + currentNestingLevel = node.NestingLevel + 1 + nestingLevelIndices = append(nestingLevelIndices, len(aroundNodes)) + aroundNodes = aroundNodes.Append(node.AroundNodes...) + nodes = append(nodes, node) + default: + if currentNestingLevel > node.NestingLevel { + currentNestingLevel = node.NestingLevel + aroundNodes = aroundNodes[:nestingLevelIndices[currentNestingLevel]] + } + node.AroundNodes = types.AroundNodes{}.Append(aroundNodes...).Append(node.AroundNodes...) + nodes = append(nodes, node) + } + } + spec.Nodes = nodes + out = append(out, spec) + } + return out +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/focus.go b/vendor/github.com/onsi/ginkgo/v2/internal/focus.go index e3da7d14dd..a39daf5a60 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/focus.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/focus.go @@ -56,7 +56,7 @@ This function sets the `Skip` property on specs by applying Ginkgo's focus polic *Note:* specs with pending nodes are Skipped when created by NewSpec. */ -func ApplyFocusToSpecs(specs Specs, description string, suiteLabels Labels, suiteConfig types.SuiteConfig) (Specs, bool) { +func ApplyFocusToSpecs(specs Specs, description string, suiteLabels Labels, suiteSemVerConstraints SemVerConstraints, suiteConfig types.SuiteConfig) (Specs, bool) { focusString := strings.Join(suiteConfig.FocusStrings, "|") skipString := strings.Join(suiteConfig.SkipStrings, "|") @@ -84,6 +84,13 @@ func ApplyFocusToSpecs(specs Specs, description string, suiteLabels Labels, suit }) } + if suiteConfig.SemVerFilter != "" { + semVerFilter, _ := types.ParseSemVerFilter(suiteConfig.SemVerFilter) + skipChecks = append(skipChecks, func(spec Spec) bool { + return !semVerFilter(UnionOfSemVerConstraints(suiteSemVerConstraints, spec.Nodes.UnionOfSemVerConstraints())) + }) + } + if len(suiteConfig.FocusFiles) > 0 { focusFilters, _ := types.ParseFileFilters(suiteConfig.FocusFiles) skipChecks = append(skipChecks, func(spec Spec) bool { return !focusFilters.Matches(spec.Nodes.CodeLocations()) }) diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/group.go b/vendor/github.com/onsi/ginkgo/v2/internal/group.go index 02c9fe4fcd..cc794903e7 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/group.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/group.go @@ -110,21 +110,53 @@ func newGroup(suite *Suite) *group { } } +// initialReportForSpec constructs a new SpecReport right before running the spec. func (g *group) initialReportForSpec(spec Spec) types.SpecReport { return types.SpecReport{ - ContainerHierarchyTexts: spec.Nodes.WithType(types.NodeTypeContainer).Texts(), - ContainerHierarchyLocations: spec.Nodes.WithType(types.NodeTypeContainer).CodeLocations(), - ContainerHierarchyLabels: spec.Nodes.WithType(types.NodeTypeContainer).Labels(), - LeafNodeLocation: spec.FirstNodeWithType(types.NodeTypeIt).CodeLocation, - LeafNodeType: types.NodeTypeIt, - LeafNodeText: spec.FirstNodeWithType(types.NodeTypeIt).Text, - LeafNodeLabels: []string(spec.FirstNodeWithType(types.NodeTypeIt).Labels), - ParallelProcess: g.suite.config.ParallelProcess, - RunningInParallel: g.suite.isRunningInParallel(), - IsSerial: spec.Nodes.HasNodeMarkedSerial(), - IsInOrderedContainer: !spec.Nodes.FirstNodeMarkedOrdered().IsZero(), - MaxFlakeAttempts: spec.Nodes.GetMaxFlakeAttempts(), - MaxMustPassRepeatedly: spec.Nodes.GetMaxMustPassRepeatedly(), + ContainerHierarchyTexts: spec.Nodes.WithType(types.NodeTypeContainer).Texts(), + ContainerHierarchyLocations: spec.Nodes.WithType(types.NodeTypeContainer).CodeLocations(), + ContainerHierarchyLabels: spec.Nodes.WithType(types.NodeTypeContainer).Labels(), + ContainerHierarchySemVerConstraints: spec.Nodes.WithType(types.NodeTypeContainer).SemVerConstraints(), + LeafNodeLocation: spec.FirstNodeWithType(types.NodeTypeIt).CodeLocation, + LeafNodeType: types.NodeTypeIt, + LeafNodeText: spec.FirstNodeWithType(types.NodeTypeIt).Text, + LeafNodeLabels: []string(spec.FirstNodeWithType(types.NodeTypeIt).Labels), + LeafNodeSemVerConstraints: []string(spec.FirstNodeWithType(types.NodeTypeIt).SemVerConstraints), + ParallelProcess: g.suite.config.ParallelProcess, + RunningInParallel: g.suite.isRunningInParallel(), + IsSerial: spec.Nodes.HasNodeMarkedSerial(), + IsInOrderedContainer: !spec.Nodes.FirstNodeMarkedOrdered().IsZero(), + MaxFlakeAttempts: spec.Nodes.GetMaxFlakeAttempts(), + MaxMustPassRepeatedly: spec.Nodes.GetMaxMustPassRepeatedly(), + SpecPriority: spec.Nodes.GetSpecPriority(), + } +} + +// constructionNodeReportForTreeNode constructs a new SpecReport right before invoking the body +// of a container node during construction of the full tree. +func constructionNodeReportForTreeNode(node *TreeNode) *types.ConstructionNodeReport { + var report types.ConstructionNodeReport + // Walk up the tree and set attributes accordingly. + addNodeToReportForNode(&report, node) + return &report +} + +// addNodeToReportForNode is conceptually similar to initialReportForSpec and therefore placed here +// although it doesn't do anything with a group. +func addNodeToReportForNode(report *types.ConstructionNodeReport, node *TreeNode) { + if node.Parent != nil { + // First add the parent node, then the current one. + addNodeToReportForNode(report, node.Parent) + } + report.ContainerHierarchyTexts = append(report.ContainerHierarchyTexts, node.Node.Text) + report.ContainerHierarchyLocations = append(report.ContainerHierarchyLocations, node.Node.CodeLocation) + report.ContainerHierarchyLabels = append(report.ContainerHierarchyLabels, node.Node.Labels) + report.ContainerHierarchySemVerConstraints = append(report.ContainerHierarchySemVerConstraints, node.Node.SemVerConstraints) + if node.Node.MarkedSerial { + report.IsSerial = true + } + if node.Node.MarkedOrdered { + report.IsInOrderedContainer = true } } diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/node.go b/vendor/github.com/onsi/ginkgo/v2/internal/node.go index 8096950b6c..2bccec2dbf 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/node.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/node.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "reflect" + "slices" "sort" "sync" "time" @@ -46,20 +47,24 @@ type Node struct { ReportEachBody func(SpecContext, types.SpecReport) ReportSuiteBody func(SpecContext, types.Report) - MarkedFocus bool - MarkedPending bool - MarkedSerial bool - MarkedOrdered bool - MarkedContinueOnFailure bool - MarkedOncePerOrdered bool - FlakeAttempts int - MustPassRepeatedly int - Labels Labels - PollProgressAfter time.Duration - PollProgressInterval time.Duration - NodeTimeout time.Duration - SpecTimeout time.Duration - GracePeriod time.Duration + MarkedFocus bool + MarkedPending bool + MarkedSerial bool + MarkedOrdered bool + MarkedContinueOnFailure bool + MarkedOncePerOrdered bool + FlakeAttempts int + MustPassRepeatedly int + Labels Labels + SemVerConstraints SemVerConstraints + PollProgressAfter time.Duration + PollProgressInterval time.Duration + NodeTimeout time.Duration + SpecTimeout time.Duration + GracePeriod time.Duration + AroundNodes types.AroundNodes + HasExplicitlySetSpecPriority bool + SpecPriority int NodeIDWhereCleanupWasGenerated uint } @@ -85,31 +90,47 @@ type FlakeAttempts uint type MustPassRepeatedly uint type Offset uint type Done chan<- any // Deprecated Done Channel for asynchronous testing -type Labels []string type PollProgressInterval time.Duration type PollProgressAfter time.Duration type NodeTimeout time.Duration type SpecTimeout time.Duration type GracePeriod time.Duration +type SpecPriority int + +type Labels []string func (l Labels) MatchesLabelFilter(query string) bool { return types.MustParseLabelFilter(query)(l) } -func UnionOfLabels(labels ...Labels) Labels { - out := Labels{} - seen := map[string]bool{} - for _, labelSet := range labels { - for _, label := range labelSet { - if !seen[label] { - seen[label] = true - out = append(out, label) +type SemVerConstraints []string + +func (svc SemVerConstraints) MatchesSemVerFilter(version string) bool { + return types.MustParseSemVerFilter(version)(svc) +} + +func unionOf[S ~[]E, E comparable](slices ...S) S { + out := S{} + seen := map[E]bool{} + for _, slice := range slices { + for _, item := range slice { + if !seen[item] { + seen[item] = true + out = append(out, item) } } } return out } +func UnionOfLabels(labels ...Labels) Labels { + return unionOf(labels...) +} + +func UnionOfSemVerConstraints(semVerConstraints ...SemVerConstraints) SemVerConstraints { + return unionOf(semVerConstraints...) +} + func PartitionDecorations(args ...any) ([]any, []any) { decorations := []any{} remainingArgs := []any{} @@ -151,6 +172,8 @@ func isDecoration(arg any) bool { return true case t == reflect.TypeOf(Labels{}): return true + case t == reflect.TypeOf(SemVerConstraints{}): + return true case t == reflect.TypeOf(PollProgressInterval(0)): return true case t == reflect.TypeOf(PollProgressAfter(0)): @@ -161,6 +184,10 @@ func isDecoration(arg any) bool { return true case t == reflect.TypeOf(GracePeriod(0)): return true + case t == reflect.TypeOf(types.AroundNodeDecorator{}): + return true + case t == reflect.TypeOf(SpecPriority(0)): + return true case t.Kind() == reflect.Slice && isSliceOfDecorations(arg): return true default: @@ -191,6 +218,7 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy NodeType: nodeType, Text: text, Labels: Labels{}, + SemVerConstraints: SemVerConstraints{}, CodeLocation: types.NewCodeLocation(baseOffset), NestingLevel: -1, PollProgressAfter: -1, @@ -205,7 +233,7 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy } } - args = unrollInterfaceSlice(args) + args = UnrollInterfaceSlice(args) remainingArgs := []any{} // First get the CodeLocation up-to-date @@ -221,6 +249,7 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy } labelsSeen := map[string]bool{} + semVerConstraintsSeen := map[string]bool{} trackedFunctionError := false args = remainingArgs remainingArgs = []any{} @@ -299,6 +328,14 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy if nodeType.Is(types.NodeTypeContainer) { appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "GracePeriod")) } + case t == reflect.TypeOf(SpecPriority(0)): + if !nodeType.Is(types.NodeTypesForContainerAndIt) { + appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "SpecPriority")) + } + node.SpecPriority = int(arg.(SpecPriority)) + node.HasExplicitlySetSpecPriority = true + case t == reflect.TypeOf(types.AroundNodeDecorator{}): + node.AroundNodes = append(node.AroundNodes, arg.(types.AroundNodeDecorator)) case t == reflect.TypeOf(Labels{}): if !nodeType.Is(types.NodeTypesForContainerAndIt) { appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Label")) @@ -311,6 +348,18 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy appendError(err) } } + case t == reflect.TypeOf(SemVerConstraints{}): + if !nodeType.Is(types.NodeTypesForContainerAndIt) { + appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "SemVerConstraint")) + } + for _, semVerConstraint := range arg.(SemVerConstraints) { + if !semVerConstraintsSeen[semVerConstraint] { + semVerConstraintsSeen[semVerConstraint] = true + semVerConstraint, err := types.ValidateAndCleanupSemVerConstraint(semVerConstraint, node.CodeLocation) + node.SemVerConstraints = append(node.SemVerConstraints, semVerConstraint) + appendError(err) + } + } case t.Kind() == reflect.Func: if nodeType.Is(types.NodeTypeContainer) { if node.Body != nil { @@ -599,7 +648,7 @@ func NewCleanupNode(deprecationTracker *types.DeprecationTracker, fail func(stri }) } - return NewNode(deprecationTracker, types.NodeTypeCleanupInvalid, "", finalArgs...) + return NewNode(deprecationTracker, types.NodeTypeCleanupInvalid, "", finalArgs) } func (n Node) IsZero() bool { @@ -824,6 +873,32 @@ func (n Nodes) UnionOfLabels() []string { return out } +func (n Nodes) SemVerConstraints() [][]string { + out := make([][]string, len(n)) + for i := range n { + if n[i].SemVerConstraints == nil { + out[i] = []string{} + } else { + out[i] = []string(n[i].SemVerConstraints) + } + } + return out +} + +func (n Nodes) UnionOfSemVerConstraints() []string { + out := []string{} + seen := map[string]bool{} + for i := range n { + for _, constraint := range n[i].SemVerConstraints { + if !seen[constraint] { + seen[constraint] = true + out = append(out, constraint) + } + } + } + return out +} + func (n Nodes) CodeLocations() []types.CodeLocation { out := make([]types.CodeLocation, len(n)) for i := range n { @@ -920,7 +995,16 @@ func (n Nodes) GetMaxMustPassRepeatedly() int { return maxMustPassRepeatedly } -func unrollInterfaceSlice(args any) []any { +func (n Nodes) GetSpecPriority() int { + for i := len(n) - 1; i >= 0; i-- { + if n[i].HasExplicitlySetSpecPriority { + return n[i].SpecPriority + } + } + return 0 +} + +func UnrollInterfaceSlice(args any) []any { v := reflect.ValueOf(args) if v.Kind() != reflect.Slice { return []any{args} @@ -928,11 +1012,67 @@ func unrollInterfaceSlice(args any) []any { out := []any{} for i := 0; i < v.Len(); i++ { el := reflect.ValueOf(v.Index(i).Interface()) - if el.Kind() == reflect.Slice && el.Type() != reflect.TypeOf(Labels{}) { - out = append(out, unrollInterfaceSlice(el.Interface())...) + if el.Kind() == reflect.Slice && el.Type() != reflect.TypeOf(Labels{}) && el.Type() != reflect.TypeOf(SemVerConstraints{}) { + out = append(out, UnrollInterfaceSlice(el.Interface())...) } else { out = append(out, v.Index(i).Interface()) } } return out } + +type NodeArgsTransformer func(nodeType types.NodeType, offset Offset, text string, args []any) (string, []any, []error) + +func AddTreeConstructionNodeArgsTransformer(transformer NodeArgsTransformer) func() { + id := nodeArgsTransformerCounter + nodeArgsTransformerCounter++ + nodeArgsTransformers = append(nodeArgsTransformers, registeredNodeArgsTransformer{id, transformer}) + return func() { + nodeArgsTransformers = slices.DeleteFunc(nodeArgsTransformers, func(transformer registeredNodeArgsTransformer) bool { + return transformer.id == id + }) + } +} + +var ( + nodeArgsTransformerCounter int64 + nodeArgsTransformers []registeredNodeArgsTransformer +) + +type registeredNodeArgsTransformer struct { + id int64 + transformer NodeArgsTransformer +} + +// TransformNewNodeArgs is the helper for DSL functions which handles NodeArgsTransformers. +// +// Its return valus are intentionally the same as the internal.NewNode parameters, +// which makes it possible to chain the invocations: +// +// NewNode(transformNewNodeArgs(...)) +func TransformNewNodeArgs(exitIfErrors func([]error), deprecationTracker *types.DeprecationTracker, nodeType types.NodeType, text string, args ...any) (*types.DeprecationTracker, types.NodeType, string, []any) { + var errs []error + + // Most recent first... + // + // This intentionally doesn't use slices.Backward because + // using iterators influences stack unwinding. + for i := len(nodeArgsTransformers) - 1; i >= 0; i-- { + transformer := nodeArgsTransformers[i].transformer + args = UnrollInterfaceSlice(args) + + // We do not really need to recompute this on additional loop iterations, + // but its fast and simpler this way. + var offset Offset + for _, arg := range args { + if o, ok := arg.(Offset); ok { + offset = o + } + } + offset += 3 // The DSL function, this helper, and the TransformNodeArgs implementation. + + text, args, errs = transformer(nodeType, offset, text, args) + exitIfErrors(errs) + } + return deprecationTracker, nodeType, text, args +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/ordering.go b/vendor/github.com/onsi/ginkgo/v2/internal/ordering.go index 84eea0a59e..da58d54f95 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/ordering.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/ordering.go @@ -125,7 +125,7 @@ func OrderSpecs(specs Specs, suiteConfig types.SuiteConfig) (GroupedSpecIndices, // pick out a representative spec representativeSpec := specs[executionGroups[groupID][0]] - // and grab the node on the spec that will represent which shufflable group this execution group belongs tu + // and grab the node on the spec that will represent which shufflable group this execution group belongs to shufflableGroupingNode := representativeSpec.Nodes.FirstNodeWithType(nodeTypesToShuffle) //add the execution group to its shufflable group @@ -138,14 +138,35 @@ func OrderSpecs(specs Specs, suiteConfig types.SuiteConfig) (GroupedSpecIndices, } } + // now, for each shuffleable group, we compute the priority + shufflableGroupingIDPriorities := map[uint]int{} + for shufflableGroupingID, groupIDs := range shufflableGroupingIDToGroupIDs { + // the priority of a shufflable grouping is the max priority of any spec in any execution group in the shufflable grouping + maxPriority := -1 << 31 // min int + for _, groupID := range groupIDs { + for _, specIdx := range executionGroups[groupID] { + specPriority := specs[specIdx].Nodes.GetSpecPriority() + maxPriority = max(specPriority, maxPriority) + } + } + shufflableGroupingIDPriorities[shufflableGroupingID] = maxPriority + } + // now we permute the sorted shufflable grouping IDs and build the ordered Groups - orderedGroups := GroupedSpecIndices{} permutation := r.Perm(len(shufflableGroupingIDs)) - for _, j := range permutation { - //let's get the execution group IDs for this shufflable group: - executionGroupIDsForJ := shufflableGroupingIDToGroupIDs[shufflableGroupingIDs[j]] - // and we'll add their associated specindices to the orderedGroups slice: - for _, executionGroupID := range executionGroupIDsForJ { + shuffledGroupingIds := make([]uint, len(shufflableGroupingIDs)) + for i, j := range permutation { + shuffledGroupingIds[i] = shufflableGroupingIDs[j] + } + // now, we need to stable sort the shuffledGroupingIds by priority (higher priority first) + sort.SliceStable(shuffledGroupingIds, func(i, j int) bool { + return shufflableGroupingIDPriorities[shuffledGroupingIds[i]] > shufflableGroupingIDPriorities[shuffledGroupingIds[j]] + }) + + // we can now take these prioritized, shuffled, groupings and form the final set of ordered spec groups + orderedGroups := GroupedSpecIndices{} + for _, id := range shuffledGroupingIds { + for _, executionGroupID := range shufflableGroupingIDToGroupIDs[id] { orderedGroups = append(orderedGroups, executionGroups[executionGroupID]) } } diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/progress_report.go b/vendor/github.com/onsi/ginkgo/v2/internal/progress_report.go index 11269cf1f2..165cbc4b67 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/progress_report.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/progress_report.go @@ -236,7 +236,7 @@ func extractRunningGoroutines() ([]types.Goroutine, error) { } functionCall.Filename = line[:delimiterIdx] line = strings.Split(line[delimiterIdx+1:], " ")[0] - lineNumber, err := strconv.ParseInt(line, 10, 64) + lineNumber, err := strconv.ParseInt(line, 10, 32) functionCall.Line = int(lineNumber) if err != nil { return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid function call line number: %s\n%s", line, err.Error())) diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/reporters/gojson.go b/vendor/github.com/onsi/ginkgo/v2/internal/reporters/gojson.go new file mode 100644 index 0000000000..8b7a9ceabf --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/reporters/gojson.go @@ -0,0 +1,158 @@ +package reporters + +import ( + "errors" + "fmt" + "strings" + "time" + + "github.com/onsi/ginkgo/v2/types" + "golang.org/x/tools/go/packages" +) + +func ptr[T any](in T) *T { + return &in +} + +type encoder interface { + Encode(v any) error +} + +// gojsonEvent matches the format from go internals +// https://github.com/golang/go/blob/master/src/cmd/internal/test2json/test2json.go#L31-L41 +// https://pkg.go.dev/cmd/test2json +type gojsonEvent struct { + Time *time.Time `json:",omitempty"` + Action GoJSONAction + Package string `json:",omitempty"` + Test string `json:",omitempty"` + Elapsed *float64 `json:",omitempty"` + Output *string `json:",omitempty"` + FailedBuild string `json:",omitempty"` +} + +type GoJSONAction string + +const ( + // start - the test binary is about to be executed + GoJSONStart GoJSONAction = "start" + // run - the test has started running + GoJSONRun GoJSONAction = "run" + // pause - the test has been paused + GoJSONPause GoJSONAction = "pause" + // cont - the test has continued running + GoJSONCont GoJSONAction = "cont" + // pass - the test passed + GoJSONPass GoJSONAction = "pass" + // bench - the benchmark printed log output but did not fail + GoJSONBench GoJSONAction = "bench" + // fail - the test or benchmark failed + GoJSONFail GoJSONAction = "fail" + // output - the test printed output + GoJSONOutput GoJSONAction = "output" + // skip - the test was skipped or the package contained no tests + GoJSONSkip GoJSONAction = "skip" +) + +func goJSONActionFromSpecState(state types.SpecState) GoJSONAction { + switch state { + case types.SpecStateInvalid: + return GoJSONFail + case types.SpecStatePending: + return GoJSONSkip + case types.SpecStateSkipped: + return GoJSONSkip + case types.SpecStatePassed: + return GoJSONPass + case types.SpecStateFailed: + return GoJSONFail + case types.SpecStateAborted: + return GoJSONFail + case types.SpecStatePanicked: + return GoJSONFail + case types.SpecStateInterrupted: + return GoJSONFail + case types.SpecStateTimedout: + return GoJSONFail + default: + panic("unexpected state should not happen") + } +} + +// gojsonReport wraps types.Report and calcualtes extra fields requires by gojson +type gojsonReport struct { + o types.Report + // Extra calculated fields + goPkg string + elapsed float64 +} + +func newReport(in types.Report) *gojsonReport { + return &gojsonReport{ + o: in, + } +} + +func (r *gojsonReport) Fill() error { + // NOTE: could the types.Report include the go package name? + goPkg, err := suitePathToPkg(r.o.SuitePath) + if err != nil { + return err + } + r.goPkg = goPkg + r.elapsed = r.o.RunTime.Seconds() + return nil +} + +// gojsonSpecReport wraps types.SpecReport and calculates extra fields required by gojson +type gojsonSpecReport struct { + o types.SpecReport + // extra calculated fields + testName string + elapsed float64 + action GoJSONAction +} + +func newSpecReport(in types.SpecReport) *gojsonSpecReport { + return &gojsonSpecReport{ + o: in, + } +} + +func (sr *gojsonSpecReport) Fill() error { + sr.elapsed = sr.o.RunTime.Seconds() + sr.testName = createTestName(sr.o) + sr.action = goJSONActionFromSpecState(sr.o.State) + return nil +} + +func suitePathToPkg(dir string) (string, error) { + cfg := &packages.Config{ + Mode: packages.NeedFiles | packages.NeedSyntax, + } + pkgs, err := packages.Load(cfg, dir) + if err != nil { + return "", err + } + if len(pkgs) != 1 { + return "", errors.New("error") + } + return pkgs[0].ID, nil +} + +func createTestName(spec types.SpecReport) string { + name := fmt.Sprintf("[%s]", spec.LeafNodeType) + if spec.FullText() != "" { + name = name + " " + spec.FullText() + } + labels := spec.Labels() + if len(labels) > 0 { + name = name + " [" + strings.Join(labels, ", ") + "]" + } + semVerConstraints := spec.SemVerConstraints() + if len(semVerConstraints) > 0 { + name = name + " [" + strings.Join(semVerConstraints, ", ") + "]" + } + name = strings.TrimSpace(name) + return name +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/reporters/gojson_event_writer.go b/vendor/github.com/onsi/ginkgo/v2/internal/reporters/gojson_event_writer.go new file mode 100644 index 0000000000..ec5311d069 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/reporters/gojson_event_writer.go @@ -0,0 +1,111 @@ +package reporters + +type GoJSONEventWriter struct { + enc encoder + specSystemErrFn specSystemExtractFn + specSystemOutFn specSystemExtractFn +} + +func NewGoJSONEventWriter(enc encoder, errFn specSystemExtractFn, outFn specSystemExtractFn) *GoJSONEventWriter { + return &GoJSONEventWriter{ + enc: enc, + specSystemErrFn: errFn, + specSystemOutFn: outFn, + } +} + +func (r *GoJSONEventWriter) writeEvent(e *gojsonEvent) error { + return r.enc.Encode(e) +} + +func (r *GoJSONEventWriter) WriteSuiteStart(report *gojsonReport) error { + e := &gojsonEvent{ + Time: &report.o.StartTime, + Action: GoJSONStart, + Package: report.goPkg, + Output: nil, + FailedBuild: "", + } + return r.writeEvent(e) +} + +func (r *GoJSONEventWriter) WriteSuiteResult(report *gojsonReport) error { + var action GoJSONAction + switch { + case report.o.PreRunStats.SpecsThatWillRun == 0: + action = GoJSONSkip + case report.o.SuiteSucceeded: + action = GoJSONPass + default: + action = GoJSONFail + } + e := &gojsonEvent{ + Time: &report.o.EndTime, + Action: action, + Package: report.goPkg, + Output: nil, + FailedBuild: "", + Elapsed: ptr(report.elapsed), + } + return r.writeEvent(e) +} + +func (r *GoJSONEventWriter) WriteSpecStart(report *gojsonReport, specReport *gojsonSpecReport) error { + e := &gojsonEvent{ + Time: &specReport.o.StartTime, + Action: GoJSONRun, + Test: specReport.testName, + Package: report.goPkg, + Output: nil, + FailedBuild: "", + } + return r.writeEvent(e) +} + +func (r *GoJSONEventWriter) WriteSpecOut(report *gojsonReport, specReport *gojsonSpecReport) error { + events := []*gojsonEvent{} + + stdErr := r.specSystemErrFn(specReport.o) + if stdErr != "" { + events = append(events, &gojsonEvent{ + Time: &specReport.o.EndTime, + Action: GoJSONOutput, + Test: specReport.testName, + Package: report.goPkg, + Output: ptr(stdErr), + FailedBuild: "", + }) + } + stdOut := r.specSystemOutFn(specReport.o) + if stdOut != "" { + events = append(events, &gojsonEvent{ + Time: &specReport.o.EndTime, + Action: GoJSONOutput, + Test: specReport.testName, + Package: report.goPkg, + Output: ptr(stdOut), + FailedBuild: "", + }) + } + + for _, ev := range events { + err := r.writeEvent(ev) + if err != nil { + return err + } + } + return nil +} + +func (r *GoJSONEventWriter) WriteSpecResult(report *gojsonReport, specReport *gojsonSpecReport) error { + e := &gojsonEvent{ + Time: &specReport.o.EndTime, + Action: specReport.action, + Test: specReport.testName, + Package: report.goPkg, + Elapsed: ptr(specReport.elapsed), + Output: nil, + FailedBuild: "", + } + return r.writeEvent(e) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/reporters/gojson_reporter.go b/vendor/github.com/onsi/ginkgo/v2/internal/reporters/gojson_reporter.go new file mode 100644 index 0000000000..633e49b88d --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/reporters/gojson_reporter.go @@ -0,0 +1,45 @@ +package reporters + +import ( + "github.com/onsi/ginkgo/v2/types" +) + +type GoJSONReporter struct { + ev *GoJSONEventWriter +} + +type specSystemExtractFn func (spec types.SpecReport) string + +func NewGoJSONReporter(enc encoder, errFn specSystemExtractFn, outFn specSystemExtractFn) *GoJSONReporter { + return &GoJSONReporter{ + ev: NewGoJSONEventWriter(enc, errFn, outFn), + } +} + +func (r *GoJSONReporter) Write(originalReport types.Report) error { + // suite start events + report := newReport(originalReport) + err := report.Fill() + if err != nil { + return err + } + r.ev.WriteSuiteStart(report) + for _, originalSpecReport := range originalReport.SpecReports { + specReport := newSpecReport(originalSpecReport) + err := specReport.Fill() + if err != nil { + return err + } + if specReport.o.LeafNodeType == types.NodeTypeIt { + // handle any It leaf node as a spec + r.ev.WriteSpecStart(report, specReport) + r.ev.WriteSpecOut(report, specReport) + r.ev.WriteSpecResult(report, specReport) + } else { + // handle any other leaf node as generic output + r.ev.WriteSpecOut(report, specReport) + } + } + r.ev.WriteSuiteResult(report) + return nil +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go b/vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go index 2d2ea2fc35..99c9c5f5be 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go @@ -2,6 +2,7 @@ package internal import ( "context" + "reflect" "github.com/onsi/ginkgo/v2/types" ) @@ -11,6 +12,7 @@ type SpecContext interface { SpecReport() types.SpecReport AttachProgressReporter(func() string) func() + WrappedContext() context.Context } type specContext struct { @@ -45,3 +47,28 @@ func NewSpecContext(suite *Suite) *specContext { func (sc *specContext) SpecReport() types.SpecReport { return sc.suite.CurrentSpecReport() } + +func (sc *specContext) WrappedContext() context.Context { + return sc.Context +} + +/* +The user is allowed to wrap `SpecContext` in a new context.Context when using AroundNodes. But body functions expect SpecContext. +We support this by taking their context.Context and returning a SpecContext that wraps it. +*/ +func wrapContextChain(ctx context.Context) SpecContext { + if ctx == nil { + return nil + } + if reflect.TypeOf(ctx) == reflect.TypeOf(&specContext{}) { + return ctx.(*specContext) + } else if sc, ok := ctx.Value("GINKGO_SPEC_CONTEXT").(*specContext); ok { + return &specContext{ + Context: ctx, + ProgressReporterManager: sc.ProgressReporterManager, + cancel: sc.cancel, + suite: sc.suite, + } + } + return nil +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/suite.go b/vendor/github.com/onsi/ginkgo/v2/internal/suite.go index 3edf507765..ef76cd099e 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/suite.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/suite.go @@ -32,6 +32,7 @@ type Suite struct { suiteNodes Nodes cleanupNodes Nodes + aroundNodes types.AroundNodes failer *Failer reporter reporters.Reporter @@ -41,6 +42,8 @@ type Suite struct { config types.SuiteConfig deadline time.Time + currentConstructionNodeReport *types.ConstructionNodeReport + skipAll bool report types.Report currentSpecReport types.SpecReport @@ -87,6 +90,7 @@ func (suite *Suite) Clone() (*Suite, error) { ProgressReporterManager: NewProgressReporterManager(), topLevelContainers: suite.topLevelContainers.Clone(), suiteNodes: suite.suiteNodes.Clone(), + aroundNodes: suite.aroundNodes.Clone(), selectiveLock: &sync.Mutex{}, }, nil } @@ -104,13 +108,14 @@ func (suite *Suite) BuildTree() error { return nil } -func (suite *Suite) Run(description string, suiteLabels Labels, suitePath string, failer *Failer, reporter reporters.Reporter, writer WriterInterface, outputInterceptor OutputInterceptor, interruptHandler interrupt_handler.InterruptHandlerInterface, client parallel_support.Client, progressSignalRegistrar ProgressSignalRegistrar, suiteConfig types.SuiteConfig) (bool, bool) { +func (suite *Suite) Run(description string, suiteLabels Labels, suiteSemVerConstraints SemVerConstraints, suiteAroundNodes types.AroundNodes, suitePath string, failer *Failer, reporter reporters.Reporter, writer WriterInterface, outputInterceptor OutputInterceptor, interruptHandler interrupt_handler.InterruptHandlerInterface, client parallel_support.Client, progressSignalRegistrar ProgressSignalRegistrar, suiteConfig types.SuiteConfig) (bool, bool) { if suite.phase != PhaseBuildTree { panic("cannot run before building the tree = call suite.BuildTree() first") } ApplyNestedFocusPolicyToTree(suite.tree) specs := GenerateSpecsFromTreeRoot(suite.tree) - specs, hasProgrammaticFocus := ApplyFocusToSpecs(specs, description, suiteLabels, suiteConfig) + specs, hasProgrammaticFocus := ApplyFocusToSpecs(specs, description, suiteLabels, suiteSemVerConstraints, suiteConfig) + specs = ComputeAroundNodes(specs) suite.phase = PhaseRun suite.client = client @@ -120,6 +125,7 @@ func (suite *Suite) Run(description string, suiteLabels Labels, suitePath string suite.outputInterceptor = outputInterceptor suite.interruptHandler = interruptHandler suite.config = suiteConfig + suite.aroundNodes = suiteAroundNodes if suite.config.Timeout > 0 { suite.deadline = time.Now().Add(suite.config.Timeout) @@ -127,7 +133,7 @@ func (suite *Suite) Run(description string, suiteLabels Labels, suitePath string cancelProgressHandler := progressSignalRegistrar(suite.handleProgressSignal) - success := suite.runSpecs(description, suiteLabels, suitePath, hasProgrammaticFocus, specs) + success := suite.runSpecs(description, suiteLabels, suiteSemVerConstraints, suitePath, hasProgrammaticFocus, specs) cancelProgressHandler() @@ -199,6 +205,14 @@ func (suite *Suite) PushNode(node Node) error { err = types.GinkgoErrors.CaughtPanicDuringABuildPhase(e, node.CodeLocation) } }() + + // Ensure that code running in the body of the container node + // has access to information about the current container node(s). + suite.currentConstructionNodeReport = constructionNodeReportForTreeNode(suite.tree) + defer func() { + suite.currentConstructionNodeReport = nil + }() + node.Body(nil) return err }() @@ -259,6 +273,7 @@ func (suite *Suite) pushCleanupNode(node Node) error { node.NodeIDWhereCleanupWasGenerated = suite.currentNode.ID node.NestingLevel = suite.currentNode.NestingLevel + node.AroundNodes = types.AroundNodes{}.Append(suite.currentNode.AroundNodes...).Append(node.AroundNodes...) suite.selectiveLock.Lock() suite.cleanupNodes = append(suite.cleanupNodes, node) suite.selectiveLock.Unlock() @@ -327,6 +342,16 @@ func (suite *Suite) By(text string, callback ...func()) error { return nil } +func (suite *Suite) CurrentConstructionNodeReport() types.ConstructionNodeReport { + suite.selectiveLock.Lock() + defer suite.selectiveLock.Unlock() + report := suite.currentConstructionNodeReport + if report == nil { + panic("CurrentConstructionNodeReport may only be called during construction of the spec tree") + } + return *report +} + /* Spec Running methods - used during PhaseRun */ @@ -428,13 +453,14 @@ func (suite *Suite) processCurrentSpecReport() { } } -func (suite *Suite) runSpecs(description string, suiteLabels Labels, suitePath string, hasProgrammaticFocus bool, specs Specs) bool { +func (suite *Suite) runSpecs(description string, suiteLabels Labels, suiteSemVerConstraints SemVerConstraints, suitePath string, hasProgrammaticFocus bool, specs Specs) bool { numSpecsThatWillBeRun := specs.CountWithoutSkip() suite.report = types.Report{ SuitePath: suitePath, SuiteDescription: description, SuiteLabels: suiteLabels, + SuiteSemVerConstraints: suiteSemVerConstraints, SuiteConfig: suite.config, SuiteHasProgrammaticFocus: hasProgrammaticFocus, PreRunStats: types.PreRunStats{ @@ -891,7 +917,30 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ failureC <- failureFromRun }() - node.Body(sc) + aroundNodes := types.AroundNodes{}.Append(suite.aroundNodes...).Append(node.AroundNodes...) + if len(aroundNodes) > 0 { + i := 0 + var f func(context.Context) + f = func(c context.Context) { + sc := wrapContextChain(c) + if sc == nil { + suite.failer.Fail("An AroundNode failed to pass a valid Ginkgo SpecContext in. You must always pass in a context derived from the context passed to you.", aroundNodes[i].CodeLocation) + return + } + i++ + if i < len(aroundNodes) { + aroundNodes[i].Body(sc, f) + } else { + node.Body(sc) + } + } + aroundNodes[0].Body(sc, f) + if i != len(aroundNodes) { + suite.failer.Fail("An AroundNode failed to call the passed in function.", aroundNodes[i].CodeLocation) + } + } else { + node.Body(sc) + } finished = true }() diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go b/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go index b4ecc7cb83..9806e315a6 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go @@ -229,3 +229,9 @@ func (t *ginkgoTestingTProxy) ParallelTotal() int { func (t *ginkgoTestingTProxy) AttachProgressReporter(f func() string) func() { return t.attachProgressReporter(f) } +func (t *ginkgoTestingTProxy) Output() io.Writer { + return t.writer +} +func (t *ginkgoTestingTProxy) Attr(key, value string) { + t.addReportEntry(key, value, internal.Offset(1), types.ReportEntryVisibilityFailureOrVerbose) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go b/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go index 74ad0768b7..026d9cf9b3 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go @@ -72,6 +72,9 @@ func (r *DefaultReporter) SuiteWillBegin(report types.Report) { if len(report.SuiteLabels) > 0 { r.emit(r.f("{{coral}}[%s]{{/}} ", strings.Join(report.SuiteLabels, ", "))) } + if len(report.SuiteSemVerConstraints) > 0 { + r.emit(r.f("{{coral}}[%s]{{/}} ", strings.Join(report.SuiteSemVerConstraints, ", "))) + } r.emit(r.f("- %d/%d specs ", report.PreRunStats.SpecsThatWillRun, report.PreRunStats.TotalSpecs)) if report.SuiteConfig.ParallelTotal > 1 { r.emit(r.f("- %d procs ", report.SuiteConfig.ParallelTotal)) @@ -87,6 +90,13 @@ func (r *DefaultReporter) SuiteWillBegin(report types.Report) { bannerWidth = len(labels) + 2 } } + if len(report.SuiteSemVerConstraints) > 0 { + semVerConstraints := strings.Join(report.SuiteSemVerConstraints, ", ") + r.emitBlock(r.f("{{coral}}[%s]{{/}} ", semVerConstraints)) + if len(semVerConstraints)+2 > bannerWidth { + bannerWidth = len(semVerConstraints) + 2 + } + } r.emitBlock(strings.Repeat("=", bannerWidth)) out := r.f("Random Seed: {{bold}}%d{{/}}", report.SuiteConfig.RandomSeed) @@ -371,13 +381,22 @@ func (r *DefaultReporter) emitTimeline(indent uint, report types.SpecReport, tim cursor := 0 for _, entry := range timeline { tl := entry.GetTimelineLocation() - if tl.Offset < len(gw) { - r.emit(r.fi(indent, "%s", gw[cursor:tl.Offset])) - cursor = tl.Offset - } else if cursor < len(gw) { + + end := tl.Offset + if end > len(gw) { + end = len(gw) + } + if end < cursor { + end = cursor + } + if cursor < end && cursor <= len(gw) && end <= len(gw) { + r.emit(r.fi(indent, "%s", gw[cursor:end])) + cursor = end + } else if cursor < len(gw) && end == len(gw) { r.emit(r.fi(indent, "%s", gw[cursor:])) cursor = len(gw) } + switch x := entry.(type) { case types.Failure: if isVeryVerbose { @@ -394,7 +413,7 @@ func (r *DefaultReporter) emitTimeline(indent uint, report types.SpecReport, tim case types.ReportEntry: r.emitReportEntry(indent, x) case types.ProgressReport: - r.emitProgressReport(indent, false, x) + r.emitProgressReport(indent, false, isVeryVerbose, x) case types.SpecEvent: if isVeryVerbose || !x.IsOnlyVisibleAtVeryVerbose() || r.conf.ShowNodeEvents { r.emitSpecEvent(indent, x, isVeryVerbose) @@ -448,7 +467,7 @@ func (r *DefaultReporter) emitFailure(indent uint, state types.SpecState, failur if !failure.ProgressReport.IsZero() { r.emitBlock("\n") - r.emitProgressReport(indent, false, failure.ProgressReport) + r.emitProgressReport(indent, false, false, failure.ProgressReport) } if failure.AdditionalFailure != nil && includeAdditionalFailure { @@ -464,11 +483,11 @@ func (r *DefaultReporter) EmitProgressReport(report types.ProgressReport) { r.emit(r.fi(1, "{{coral}}Progress Report for Ginkgo Process #{{bold}}%d{{/}}\n", report.ParallelProcess)) } shouldEmitGW := report.RunningInParallel || r.conf.Verbosity().LT(types.VerbosityLevelVerbose) - r.emitProgressReport(1, shouldEmitGW, report) + r.emitProgressReport(1, shouldEmitGW, true, report) r.emitDelimiter(1) } -func (r *DefaultReporter) emitProgressReport(indent uint, emitGinkgoWriterOutput bool, report types.ProgressReport) { +func (r *DefaultReporter) emitProgressReport(indent uint, emitGinkgoWriterOutput, emitGroup bool, report types.ProgressReport) { if report.Message != "" { r.emitBlock(r.fi(indent, report.Message+"\n")) indent += 1 @@ -504,6 +523,10 @@ func (r *DefaultReporter) emitProgressReport(indent uint, emitGinkgoWriterOutput indent -= 1 } + if r.conf.GithubOutput && emitGroup { + r.emitBlock(r.fi(indent, "::group::Progress Report")) + } + if emitGinkgoWriterOutput && report.CapturedGinkgoWriterOutput != "" { r.emit("\n") r.emitBlock(r.fi(indent, "{{gray}}Begin Captured GinkgoWriter Output >>{{/}}")) @@ -550,6 +573,10 @@ func (r *DefaultReporter) emitProgressReport(indent uint, emitGinkgoWriterOutput r.emit(r.fi(indent, "{{gray}}{{bold}}{{underline}}Other Goroutines{{/}}\n")) r.emitGoroutines(indent, otherGoroutines...) } + + if r.conf.GithubOutput && emitGroup { + r.emitBlock(r.fi(indent, "::endgroup::")) + } } func (r *DefaultReporter) EmitReportEntry(entry types.ReportEntry) { @@ -698,8 +725,8 @@ func (r *DefaultReporter) cycleJoin(elements []string, joiner string) string { } func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightColor string, veryVerbose bool, usePreciseFailureLocation bool) string { - texts, locations, labels := []string{}, []types.CodeLocation{}, [][]string{} - texts, locations, labels = append(texts, report.ContainerHierarchyTexts...), append(locations, report.ContainerHierarchyLocations...), append(labels, report.ContainerHierarchyLabels...) + texts, locations, labels, semVerConstraints := []string{}, []types.CodeLocation{}, [][]string{}, [][]string{} + texts, locations, labels, semVerConstraints = append(texts, report.ContainerHierarchyTexts...), append(locations, report.ContainerHierarchyLocations...), append(labels, report.ContainerHierarchyLabels...), append(semVerConstraints, report.ContainerHierarchySemVerConstraints...) if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) { texts = append(texts, r.f("[%s] %s", report.LeafNodeType, report.LeafNodeText)) @@ -707,6 +734,7 @@ func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightCo texts = append(texts, r.f(report.LeafNodeText)) } labels = append(labels, report.LeafNodeLabels) + semVerConstraints = append(semVerConstraints, report.LeafNodeSemVerConstraints) locations = append(locations, report.LeafNodeLocation) failureLocation := report.Failure.FailureNodeLocation @@ -720,6 +748,7 @@ func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightCo texts = append([]string{fmt.Sprintf("TOP-LEVEL [%s]", report.Failure.FailureNodeType)}, texts...) locations = append([]types.CodeLocation{failureLocation}, locations...) labels = append([][]string{{}}, labels...) + semVerConstraints = append([][]string{{}}, semVerConstraints...) highlightIndex = 0 case types.FailureNodeInContainer: i := report.Failure.FailureNodeContainerIndex @@ -747,6 +776,9 @@ func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightCo if len(labels[i]) > 0 { out += r.f(" {{coral}}[%s]{{/}}", strings.Join(labels[i], ", ")) } + if len(semVerConstraints[i]) > 0 { + out += r.f(" {{coral}}[%s]{{/}}", strings.Join(semVerConstraints[i], ", ")) + } out += "\n" out += r.fi(uint(i), "{{gray}}%s{{/}}\n", locations[i]) } @@ -770,6 +802,10 @@ func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightCo if len(flattenedLabels) > 0 { out += r.f(" {{coral}}[%s]{{/}}", strings.Join(flattenedLabels, ", ")) } + flattenedSemVerConstraints := report.SemVerConstraints() + if len(flattenedSemVerConstraints) > 0 { + out += r.f(" {{coral}}[%s]{{/}}", strings.Join(flattenedSemVerConstraints, ", ")) + } out += "\n" if usePreciseFailureLocation { out += r.f("{{gray}}%s{{/}}", failureLocation) diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/gojson_report.go b/vendor/github.com/onsi/ginkgo/v2/reporters/gojson_report.go new file mode 100644 index 0000000000..d02fb7a1ae --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/gojson_report.go @@ -0,0 +1,61 @@ +package reporters + +import ( + "encoding/json" + "fmt" + "os" + "path" + + "github.com/onsi/ginkgo/v2/internal/reporters" + "github.com/onsi/ginkgo/v2/types" +) + +// GenerateGoTestJSONReport produces a JSON-formatted in the test2json format used by `go test -json` +func GenerateGoTestJSONReport(report types.Report, destination string) error { + // walk report and generate test2json-compatible objects + // JSON-encode the objects into filename + if err := os.MkdirAll(path.Dir(destination), 0770); err != nil { + return err + } + f, err := os.Create(destination) + if err != nil { + return err + } + defer f.Close() + enc := json.NewEncoder(f) + r := reporters.NewGoJSONReporter( + enc, + systemErrForUnstructuredReporters, + systemOutForUnstructuredReporters, + ) + return r.Write(report) +} + +// MergeJSONReports produces a single JSON-formatted report at the passed in destination by merging the JSON-formatted reports provided in sources +// It skips over reports that fail to decode but reports on them via the returned messages []string +func MergeAndCleanupGoTestJSONReports(sources []string, destination string) ([]string, error) { + messages := []string{} + if err := os.MkdirAll(path.Dir(destination), 0770); err != nil { + return messages, err + } + f, err := os.Create(destination) + if err != nil { + return messages, err + } + defer f.Close() + + for _, source := range sources { + data, err := os.ReadFile(source) + if err != nil { + messages = append(messages, fmt.Sprintf("Could not open %s:\n%s", source, err.Error())) + continue + } + _, err = f.Write(data) + if err != nil { + messages = append(messages, fmt.Sprintf("Could not write to %s:\n%s", destination, err.Error())) + continue + } + os.Remove(source) + } + return messages, nil +} diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go b/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go index 562e0f62ba..828f893fb8 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go @@ -36,6 +36,9 @@ type JunitReportConfig struct { // Enable OmitSpecLabels to prevent labels from appearing in the spec name OmitSpecLabels bool + // Enable OmitSpecSemVerConstraints to prevent semantic version constraints from appearing in the spec name + OmitSpecSemVerConstraints bool + // Enable OmitLeafNodeType to prevent the spec leaf node type from appearing in the spec name OmitLeafNodeType bool @@ -169,9 +172,11 @@ func GenerateJUnitReportWithConfig(report types.Report, dst string, config Junit {"SuiteHasProgrammaticFocus", fmt.Sprintf("%t", report.SuiteHasProgrammaticFocus)}, {"SpecialSuiteFailureReason", strings.Join(report.SpecialSuiteFailureReasons, ",")}, {"SuiteLabels", fmt.Sprintf("[%s]", strings.Join(report.SuiteLabels, ","))}, + {"SuiteSemVerConstraints", fmt.Sprintf("[%s]", strings.Join(report.SuiteSemVerConstraints, ","))}, {"RandomSeed", fmt.Sprintf("%d", report.SuiteConfig.RandomSeed)}, {"RandomizeAllSpecs", fmt.Sprintf("%t", report.SuiteConfig.RandomizeAllSpecs)}, {"LabelFilter", report.SuiteConfig.LabelFilter}, + {"SemVerFilter", report.SuiteConfig.SemVerFilter}, {"FocusStrings", strings.Join(report.SuiteConfig.FocusStrings, ",")}, {"SkipStrings", strings.Join(report.SuiteConfig.SkipStrings, ",")}, {"FocusFiles", strings.Join(report.SuiteConfig.FocusFiles, ";")}, @@ -207,6 +212,10 @@ func GenerateJUnitReportWithConfig(report types.Report, dst string, config Junit owner = matches[1] } } + semVerConstraints := spec.SemVerConstraints() + if len(semVerConstraints) > 0 && !config.OmitSpecSemVerConstraints { + name = name + " [" + strings.Join(semVerConstraints, ", ") + "]" + } name = strings.TrimSpace(name) test := JUnitTestCase{ diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go b/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go index e990ad82e1..55e1d1f4f7 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go @@ -38,9 +38,13 @@ func GenerateTeamcityReport(report types.Report, dst string) error { name := report.SuiteDescription labels := report.SuiteLabels + semVerConstraints := report.SuiteSemVerConstraints if len(labels) > 0 { name = name + " [" + strings.Join(labels, ", ") + "]" } + if len(semVerConstraints) > 0 { + name = name + " [" + strings.Join(semVerConstraints, ", ") + "]" + } fmt.Fprintf(f, "##teamcity[testSuiteStarted name='%s']\n", tcEscape(name)) for _, spec := range report.SpecReports { name := fmt.Sprintf("[%s]", spec.LeafNodeType) @@ -51,6 +55,10 @@ func GenerateTeamcityReport(report types.Report, dst string) error { if len(labels) > 0 { name = name + " [" + strings.Join(labels, ", ") + "]" } + semVerConstraints := spec.SemVerConstraints() + if len(semVerConstraints) > 0 { + name = name + " [" + strings.Join(semVerConstraints, ", ") + "]" + } name = tcEscape(name) fmt.Fprintf(f, "##teamcity[testStarted name='%s']\n", name) diff --git a/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go b/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go index 5bf2e62e90..4e86dba84d 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go @@ -27,6 +27,8 @@ CurrentSpecReport returns information about the current running spec. The returned object is a types.SpecReport which includes helper methods to make extracting information about the spec easier. +During construction of the test tree the result is empty. + You can learn more about SpecReport here: https://pkg.go.dev/github.com/onsi/ginkgo/types#SpecReport You can learn more about CurrentSpecReport() here: https://onsi.github.io/ginkgo/#getting-a-report-for-the-current-spec */ @@ -34,6 +36,31 @@ func CurrentSpecReport() SpecReport { return global.Suite.CurrentSpecReport() } +/* +ConstructionNodeReport describes the container nodes during construction of +the spec tree. It provides a subset of the information that is provided +by SpecReport at runtime. + +It is documented here: [types.ConstructionNodeReport] +*/ +type ConstructionNodeReport = types.ConstructionNodeReport + +/* +CurrentConstructionNodeReport returns information about the current container nodes +that are leading to the current path in the spec tree. +The returned object is a types.ConstructionNodeReport which includes helper methods +to make extracting information about the spec easier. + +May only be called during construction of the spec tree. It panics when +called while tests are running. Use CurrentSpecReport instead in that +phase. + +You can learn more about ConstructionNodeReport here: [types.ConstructionNodeReport] +*/ +func CurrentTreeConstructionNodeReport() ConstructionNodeReport { + return global.Suite.CurrentConstructionNodeReport() +} + /* ReportEntryVisibility governs the visibility of ReportEntries in Ginkgo's console reporter @@ -92,7 +119,7 @@ func ReportBeforeEach(body any, args ...any) bool { combinedArgs := []any{body} combinedArgs = append(combinedArgs, args...) - return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportBeforeEach, "", combinedArgs...)) + return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeReportBeforeEach, "", combinedArgs...))) } /* @@ -116,7 +143,7 @@ func ReportAfterEach(body any, args ...any) bool { combinedArgs := []any{body} combinedArgs = append(combinedArgs, args...) - return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportAfterEach, "", combinedArgs...)) + return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeReportAfterEach, "", combinedArgs...))) } /* @@ -145,7 +172,7 @@ You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spe func ReportBeforeSuite(body any, args ...any) bool { combinedArgs := []any{body} combinedArgs = append(combinedArgs, args...) - return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportBeforeSuite, "", combinedArgs...)) + return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeReportBeforeSuite, "", combinedArgs...))) } /* @@ -165,7 +192,7 @@ ReportAfterSuite nodes must be created at the top-level (i.e. not nested in a Co When running in parallel, Ginkgo ensures that only one of the parallel nodes runs the ReportAfterSuite and that it is passed a report that is aggregated across all parallel nodes -In addition to using ReportAfterSuite to programmatically generate suite reports, you can also generate JSON, JUnit, and Teamcity formatted reports using the --json-report, --junit-report, and --teamcity-report ginkgo CLI flags. +In addition to using ReportAfterSuite to programmatically generate suite reports, you can also generate JSON, GoJSON, JUnit, and Teamcity formatted reports using the --json-report, --gojson-report, --junit-report, and --teamcity-report ginkgo CLI flags. You cannot nest any other Ginkgo nodes within a ReportAfterSuite node's closure. You can learn more about ReportAfterSuite here: https://onsi.github.io/ginkgo/#generating-reports-programmatically @@ -177,7 +204,7 @@ You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spe func ReportAfterSuite(text string, body any, args ...any) bool { combinedArgs := []any{body} combinedArgs = append(combinedArgs, args...) - return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportAfterSuite, text, combinedArgs...)) + return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeReportAfterSuite, text, combinedArgs...))) } func registerReportAfterSuiteNodeForAutogeneratedReports(reporterConfig types.ReporterConfig) { @@ -188,6 +215,12 @@ func registerReportAfterSuiteNodeForAutogeneratedReports(reporterConfig types.Re Fail(fmt.Sprintf("Failed to generate JSON report:\n%s", err.Error())) } } + if reporterConfig.GoJSONReport != "" { + err := reporters.GenerateGoTestJSONReport(report, reporterConfig.GoJSONReport) + if err != nil { + Fail(fmt.Sprintf("Failed to generate Go JSON report:\n%s", err.Error())) + } + } if reporterConfig.JUnitReport != "" { err := reporters.GenerateJUnitReport(report, reporterConfig.JUnitReport) if err != nil { @@ -206,6 +239,9 @@ func registerReportAfterSuiteNodeForAutogeneratedReports(reporterConfig types.Re if reporterConfig.JSONReport != "" { flags = append(flags, "--json-report") } + if reporterConfig.GoJSONReport != "" { + flags = append(flags, "--gojson-report") + } if reporterConfig.JUnitReport != "" { flags = append(flags, "--junit-report") } @@ -213,9 +249,11 @@ func registerReportAfterSuiteNodeForAutogeneratedReports(reporterConfig types.Re flags = append(flags, "--teamcity-report") } pushNode(internal.NewNode( - deprecationTracker, types.NodeTypeReportAfterSuite, - fmt.Sprintf("Autogenerated ReportAfterSuite for %s", strings.Join(flags, " ")), - body, - types.NewCustomCodeLocation("autogenerated by Ginkgo"), + internal.TransformNewNodeArgs( + exitIfErrors, deprecationTracker, types.NodeTypeReportAfterSuite, + fmt.Sprintf("Autogenerated ReportAfterSuite for %s", strings.Join(flags, " ")), + body, + types.NewCustomCodeLocation("autogenerated by Ginkgo"), + ), )) } diff --git a/vendor/github.com/onsi/ginkgo/v2/table_dsl.go b/vendor/github.com/onsi/ginkgo/v2/table_dsl.go index b9e0ca9ef7..1031aa8554 100644 --- a/vendor/github.com/onsi/ginkgo/v2/table_dsl.go +++ b/vendor/github.com/onsi/ginkgo/v2/table_dsl.go @@ -309,11 +309,11 @@ func generateTable(description string, isSubtree bool, args ...any) { internalNodeType = types.NodeTypeContainer } - pushNode(internal.NewNode(deprecationTracker, internalNodeType, description, internalNodeArgs...)) + pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, internalNodeType, description, internalNodeArgs...))) } }) - pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, description, containerNodeArgs...)) + pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeContainer, description, containerNodeArgs...))) } func invokeFunction(function any, parameters []any) []reflect.Value { diff --git a/vendor/github.com/onsi/ginkgo/v2/types/around_node.go b/vendor/github.com/onsi/ginkgo/v2/types/around_node.go new file mode 100644 index 0000000000..a069e0623d --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/around_node.go @@ -0,0 +1,56 @@ +package types + +import ( + "context" +) + +type AroundNodeAllowedFuncs interface { + ~func(context.Context, func(context.Context)) | ~func(context.Context) context.Context | ~func() +} +type AroundNodeFunc func(ctx context.Context, body func(ctx context.Context)) + +func AroundNode[F AroundNodeAllowedFuncs](f F, cl CodeLocation) AroundNodeDecorator { + if f == nil { + panic("BuildAroundNode cannot be called with a nil function.") + } + var aroundNodeFunc func(context.Context, func(context.Context)) + switch x := any(f).(type) { + case func(context.Context, func(context.Context)): + aroundNodeFunc = x + case func(context.Context) context.Context: + aroundNodeFunc = func(ctx context.Context, body func(context.Context)) { + ctx = x(ctx) + body(ctx) + } + case func(): + aroundNodeFunc = func(ctx context.Context, body func(context.Context)) { + x() + body(ctx) + } + } + + return AroundNodeDecorator{ + Body: aroundNodeFunc, + CodeLocation: cl, + } +} + +type AroundNodeDecorator struct { + Body AroundNodeFunc + CodeLocation CodeLocation +} + +type AroundNodes []AroundNodeDecorator + +func (an AroundNodes) Clone() AroundNodes { + out := make(AroundNodes, len(an)) + copy(out, an) + return out +} + +func (an AroundNodes) Append(other ...AroundNodeDecorator) AroundNodes { + out := make(AroundNodes, len(an)+len(other)) + copy(out, an) + copy(out[len(an):], other) + return out +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/config.go b/vendor/github.com/onsi/ginkgo/v2/types/config.go index 2e827efe30..f847036046 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/config.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/config.go @@ -24,6 +24,7 @@ type SuiteConfig struct { FocusFiles []string SkipFiles []string LabelFilter string + SemVerFilter string FailOnPending bool FailOnEmpty bool FailFast bool @@ -95,6 +96,7 @@ type ReporterConfig struct { ForceNewlines bool JSONReport string + GoJSONReport string JUnitReport string TeamcityReport string } @@ -111,7 +113,7 @@ func (rc ReporterConfig) Verbosity() VerbosityLevel { } func (rc ReporterConfig) WillGenerateReport() bool { - return rc.JSONReport != "" || rc.JUnitReport != "" || rc.TeamcityReport != "" + return rc.JSONReport != "" || rc.GoJSONReport != "" || rc.JUnitReport != "" || rc.TeamcityReport != "" } func NewDefaultReporterConfig() ReporterConfig { @@ -308,6 +310,8 @@ var SuiteConfigFlags = GinkgoFlags{ {KeyPath: "S.LabelFilter", Name: "label-filter", SectionKey: "filter", UsageArgument: "expression", Usage: "If set, ginkgo will only run specs with labels that match the label-filter. The passed-in expression can include boolean operations (!, &&, ||, ','), groupings via '()', and regular expressions '/regexp/'. e.g. '(cat || dog) && !fruit'"}, + {KeyPath: "S.SemVerFilter", Name: "sem-ver-filter", SectionKey: "filter", UsageArgument: "version", + Usage: "If set, ginkgo will only run specs with semantic version constraints that are satisfied by the provided version. e.g. '2.1.0'"}, {KeyPath: "S.FocusStrings", Name: "focus", SectionKey: "filter", Usage: "If set, ginkgo will only run specs that match this regular expression. Can be specified multiple times, values are ORed."}, {KeyPath: "S.SkipStrings", Name: "skip", SectionKey: "filter", @@ -356,6 +360,8 @@ var ReporterConfigFlags = GinkgoFlags{ {KeyPath: "R.JSONReport", Name: "json-report", UsageArgument: "filename.json", SectionKey: "output", Usage: "If set, Ginkgo will generate a JSON-formatted test report at the specified location."}, + {KeyPath: "R.GoJSONReport", Name: "gojson-report", UsageArgument: "filename.json", SectionKey: "output", + Usage: "If set, Ginkgo will generate a Go JSON-formatted test report at the specified location."}, {KeyPath: "R.JUnitReport", Name: "junit-report", UsageArgument: "filename.xml", SectionKey: "output", DeprecatedName: "reportFile", DeprecatedDocLink: "improved-reporting-infrastructure", Usage: "If set, Ginkgo will generate a conformant junit test report in the specified file."}, {KeyPath: "R.TeamcityReport", Name: "teamcity-report", UsageArgument: "filename", SectionKey: "output", @@ -443,6 +449,13 @@ func VetConfig(flagSet GinkgoFlagSet, suiteConfig SuiteConfig, reporterConfig Re } } + if suiteConfig.SemVerFilter != "" { + _, err := ParseSemVerFilter(suiteConfig.SemVerFilter) + if err != nil { + errors = append(errors, err) + } + } + switch strings.ToLower(suiteConfig.OutputInterceptorMode) { case "", "dup", "swap", "none": default: @@ -573,6 +586,9 @@ var GoBuildFlags = GinkgoFlags{ Usage: "print the name of the temporary work directory and do not delete it when exiting."}, {KeyPath: "Go.X", Name: "x", SectionKey: "go-build", Usage: "print the commands."}, +} + +var GoBuildOFlags = GinkgoFlags{ {KeyPath: "Go.O", Name: "o", SectionKey: "go-build", Usage: "output binary path (including name)."}, } @@ -673,7 +689,7 @@ func GenerateGoTestCompileArgs(goFlagsConfig GoFlagsConfig, packageToBuild strin args := []string{"test", "-c", packageToBuild} goArgs, err := GenerateFlagArgs( - GoBuildFlags, + GoBuildFlags.CopyAppend(GoBuildOFlags...), map[string]any{ "Go": &goFlagsConfig, }, @@ -763,6 +779,7 @@ func BuildWatchCommandFlagSet(suiteConfig *SuiteConfig, reporterConfig *Reporter func BuildBuildCommandFlagSet(cliConfig *CLIConfig, goFlagsConfig *GoFlagsConfig) (GinkgoFlagSet, error) { flags := GinkgoCLISharedFlags flags = flags.CopyAppend(GoBuildFlags...) + flags = flags.CopyAppend(GoBuildOFlags...) bindings := map[string]any{ "C": cliConfig, diff --git a/vendor/github.com/onsi/ginkgo/v2/types/errors.go b/vendor/github.com/onsi/ginkgo/v2/types/errors.go index c2796b5490..59313238cf 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/errors.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/errors.go @@ -432,6 +432,24 @@ func (g ginkgoErrors) InvalidEmptyLabel(cl CodeLocation) error { } } +func (g ginkgoErrors) InvalidSemVerConstraint(semVerConstraint, errMsg string, cl CodeLocation) error { + return GinkgoError{ + Heading: "Invalid SemVerConstraint", + Message: fmt.Sprintf("'%s' is an invalid SemVerConstraint: %s", semVerConstraint, errMsg), + CodeLocation: cl, + DocLink: "spec-semantic-version-filtering", + } +} + +func (g ginkgoErrors) InvalidEmptySemVerConstraint(cl CodeLocation) error { + return GinkgoError{ + Heading: "Invalid Empty SemVerConstraint", + Message: "SemVerConstraint cannot be empty", + CodeLocation: cl, + DocLink: "spec-semantic-version-filtering", + } +} + /* Table errors */ func (g ginkgoErrors) MultipleEntryBodyFunctionsForTable(cl CodeLocation) error { return GinkgoError{ diff --git a/vendor/github.com/onsi/ginkgo/v2/types/semver_filter.go b/vendor/github.com/onsi/ginkgo/v2/types/semver_filter.go new file mode 100644 index 0000000000..3fc2ed144b --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/semver_filter.go @@ -0,0 +1,60 @@ +package types + +import ( + "fmt" + + "github.com/Masterminds/semver/v3" +) + +type SemVerFilter func([]string) bool + +func MustParseSemVerFilter(input string) SemVerFilter { + filter, err := ParseSemVerFilter(input) + if err != nil { + panic(err) + } + return filter +} + +func ParseSemVerFilter(filterVersion string) (SemVerFilter, error) { + if filterVersion == "" { + return func(_ []string) bool { return true }, nil + } + + targetVersion, err := semver.NewVersion(filterVersion) + if err != nil { + return nil, fmt.Errorf("invalid filter version: %w", err) + } + + return func(constraints []string) bool { + // unconstrained specs always run + if len(constraints) == 0 { + return true + } + + for _, constraintStr := range constraints { + constraint, err := semver.NewConstraint(constraintStr) + if err != nil { + return false + } + + if !constraint.Check(targetVersion) { + return false + } + } + + return true + }, nil +} + +func ValidateAndCleanupSemVerConstraint(semVerConstraint string, cl CodeLocation) (string, error) { + if len(semVerConstraint) == 0 { + return "", GinkgoErrors.InvalidEmptySemVerConstraint(cl) + } + _, err := semver.NewConstraint(semVerConstraint) + if err != nil { + return "", GinkgoErrors.InvalidSemVerConstraint(semVerConstraint, err.Error(), cl) + } + + return semVerConstraint, nil +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/types.go b/vendor/github.com/onsi/ginkgo/v2/types/types.go index ddcbec1ba8..9981a0dd68 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/types.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/types.go @@ -4,6 +4,7 @@ import ( "encoding/json" "fmt" "os" + "slices" "sort" "strings" "time" @@ -19,6 +20,57 @@ func init() { } } +// ConstructionNodeReport captures information about a Ginkgo spec. +type ConstructionNodeReport struct { + // ContainerHierarchyTexts is a slice containing the text strings of + // all Describe/Context/When containers in this spec's hierarchy. + ContainerHierarchyTexts []string + + // ContainerHierarchyLocations is a slice containing the CodeLocations of + // all Describe/Context/When containers in this spec's hierarchy. + ContainerHierarchyLocations []CodeLocation + + // ContainerHierarchyLabels is a slice containing the labels of + // all Describe/Context/When containers in this spec's hierarchy + ContainerHierarchyLabels [][]string + + // ContainerHierarchySemVerConstraints is a slice containing the semVerConstraints of + // all Describe/Context/When containers in this spec's hierarchy + ContainerHierarchySemVerConstraints [][]string + + // IsSerial captures whether the any container has the Serial decorator + IsSerial bool + + // IsInOrderedContainer captures whether any container is an Ordered container + IsInOrderedContainer bool +} + +// FullText returns a concatenation of all the report.ContainerHierarchyTexts and report.LeafNodeText +func (report ConstructionNodeReport) FullText() string { + texts := []string{} + texts = append(texts, report.ContainerHierarchyTexts...) + texts = slices.DeleteFunc(texts, func(t string) bool { + return t == "" + }) + return strings.Join(texts, " ") +} + +// Labels returns a deduped set of all the spec's Labels. +func (report ConstructionNodeReport) Labels() []string { + out := []string{} + seen := map[string]bool{} + for _, labels := range report.ContainerHierarchyLabels { + for _, label := range labels { + if !seen[label] { + seen[label] = true + out = append(out, label) + } + } + } + + return out +} + // Report captures information about a Ginkgo test run type Report struct { //SuitePath captures the absolute path to the test suite @@ -30,6 +82,9 @@ type Report struct { //SuiteLabels captures any labels attached to the suite by the DSL's RunSpecs() function SuiteLabels []string + //SuiteSemVerConstraints captures any semVerConstraints attached to the suite by the DSL's RunSpecs() function + SuiteSemVerConstraints []string + //SuiteSucceeded captures the success or failure status of the test run //If true, the test run is considered successful. //If false, the test run is considered unsuccessful @@ -129,13 +184,21 @@ type SpecReport struct { // all Describe/Context/When containers in this spec's hierarchy ContainerHierarchyLabels [][]string - // LeafNodeType, LeadNodeLocation, LeafNodeLabels and LeafNodeText capture the NodeType, CodeLocation, and text + // ContainerHierarchySemVerConstraints is a slice containing the semVerConstraints of + // all Describe/Context/When containers in this spec's hierarchy + ContainerHierarchySemVerConstraints [][]string + + // LeafNodeType, LeafNodeLocation, LeafNodeLabels, LeafNodeSemVerConstraints and LeafNodeText capture the NodeType, CodeLocation, and text // of the Ginkgo node being tested (typically an NodeTypeIt node, though this can also be // one of the NodeTypesForSuiteLevelNodes node types) - LeafNodeType NodeType - LeafNodeLocation CodeLocation - LeafNodeLabels []string - LeafNodeText string + LeafNodeType NodeType + LeafNodeLocation CodeLocation + LeafNodeLabels []string + LeafNodeSemVerConstraints []string + LeafNodeText string + + // Captures the Spec Priority + SpecPriority int // State captures whether the spec has passed, failed, etc. State SpecState @@ -198,48 +261,52 @@ type SpecReport struct { func (report SpecReport) MarshalJSON() ([]byte, error) { //All this to avoid emitting an empty Failure struct in the JSON out := struct { - ContainerHierarchyTexts []string - ContainerHierarchyLocations []CodeLocation - ContainerHierarchyLabels [][]string - LeafNodeType NodeType - LeafNodeLocation CodeLocation - LeafNodeLabels []string - LeafNodeText string - State SpecState - StartTime time.Time - EndTime time.Time - RunTime time.Duration - ParallelProcess int - Failure *Failure `json:",omitempty"` - NumAttempts int - MaxFlakeAttempts int - MaxMustPassRepeatedly int - CapturedGinkgoWriterOutput string `json:",omitempty"` - CapturedStdOutErr string `json:",omitempty"` - ReportEntries ReportEntries `json:",omitempty"` - ProgressReports []ProgressReport `json:",omitempty"` - AdditionalFailures []AdditionalFailure `json:",omitempty"` - SpecEvents SpecEvents `json:",omitempty"` + ContainerHierarchyTexts []string + ContainerHierarchyLocations []CodeLocation + ContainerHierarchyLabels [][]string + ContainerHierarchySemVerConstraints [][]string + LeafNodeType NodeType + LeafNodeLocation CodeLocation + LeafNodeLabels []string + LeafNodeSemVerConstraints []string + LeafNodeText string + State SpecState + StartTime time.Time + EndTime time.Time + RunTime time.Duration + ParallelProcess int + Failure *Failure `json:",omitempty"` + NumAttempts int + MaxFlakeAttempts int + MaxMustPassRepeatedly int + CapturedGinkgoWriterOutput string `json:",omitempty"` + CapturedStdOutErr string `json:",omitempty"` + ReportEntries ReportEntries `json:",omitempty"` + ProgressReports []ProgressReport `json:",omitempty"` + AdditionalFailures []AdditionalFailure `json:",omitempty"` + SpecEvents SpecEvents `json:",omitempty"` }{ - ContainerHierarchyTexts: report.ContainerHierarchyTexts, - ContainerHierarchyLocations: report.ContainerHierarchyLocations, - ContainerHierarchyLabels: report.ContainerHierarchyLabels, - LeafNodeType: report.LeafNodeType, - LeafNodeLocation: report.LeafNodeLocation, - LeafNodeLabels: report.LeafNodeLabels, - LeafNodeText: report.LeafNodeText, - State: report.State, - StartTime: report.StartTime, - EndTime: report.EndTime, - RunTime: report.RunTime, - ParallelProcess: report.ParallelProcess, - Failure: nil, - ReportEntries: nil, - NumAttempts: report.NumAttempts, - MaxFlakeAttempts: report.MaxFlakeAttempts, - MaxMustPassRepeatedly: report.MaxMustPassRepeatedly, - CapturedGinkgoWriterOutput: report.CapturedGinkgoWriterOutput, - CapturedStdOutErr: report.CapturedStdOutErr, + ContainerHierarchyTexts: report.ContainerHierarchyTexts, + ContainerHierarchyLocations: report.ContainerHierarchyLocations, + ContainerHierarchyLabels: report.ContainerHierarchyLabels, + ContainerHierarchySemVerConstraints: report.ContainerHierarchySemVerConstraints, + LeafNodeType: report.LeafNodeType, + LeafNodeLocation: report.LeafNodeLocation, + LeafNodeLabels: report.LeafNodeLabels, + LeafNodeSemVerConstraints: report.LeafNodeSemVerConstraints, + LeafNodeText: report.LeafNodeText, + State: report.State, + StartTime: report.StartTime, + EndTime: report.EndTime, + RunTime: report.RunTime, + ParallelProcess: report.ParallelProcess, + Failure: nil, + ReportEntries: nil, + NumAttempts: report.NumAttempts, + MaxFlakeAttempts: report.MaxFlakeAttempts, + MaxMustPassRepeatedly: report.MaxMustPassRepeatedly, + CapturedGinkgoWriterOutput: report.CapturedGinkgoWriterOutput, + CapturedStdOutErr: report.CapturedStdOutErr, } if !report.Failure.IsZero() { @@ -287,6 +354,9 @@ func (report SpecReport) FullText() string { if report.LeafNodeText != "" { texts = append(texts, report.LeafNodeText) } + texts = slices.DeleteFunc(texts, func(t string) bool { + return t == "" + }) return strings.Join(texts, " ") } @@ -312,6 +382,28 @@ func (report SpecReport) Labels() []string { return out } +// SemVerConstraints returns a deduped set of all the spec's SemVerConstraints. +func (report SpecReport) SemVerConstraints() []string { + out := []string{} + seen := map[string]bool{} + for _, semVerConstraints := range report.ContainerHierarchySemVerConstraints { + for _, semVerConstraint := range semVerConstraints { + if !seen[semVerConstraint] { + seen[semVerConstraint] = true + out = append(out, semVerConstraint) + } + } + } + for _, semVerConstraint := range report.LeafNodeSemVerConstraints { + if !seen[semVerConstraint] { + seen[semVerConstraint] = true + out = append(out, semVerConstraint) + } + } + + return out +} + // MatchesLabelFilter returns true if the spec satisfies the passed in label filter query func (report SpecReport) MatchesLabelFilter(query string) (bool, error) { filter, err := ParseLabelFilter(query) @@ -321,6 +413,15 @@ func (report SpecReport) MatchesLabelFilter(query string) (bool, error) { return filter(report.Labels()), nil } +// MatchesSemVerFilter returns true if the spec satisfies the passed in label filter query +func (report SpecReport) MatchesSemVerFilter(version string) (bool, error) { + filter, err := ParseSemVerFilter(version) + if err != nil { + return false, err + } + return filter(report.SemVerConstraints()), nil +} + // FileName() returns the name of the file containing the spec func (report SpecReport) FileName() string { return report.LeafNodeLocation.FileName diff --git a/vendor/github.com/onsi/ginkgo/v2/types/version.go b/vendor/github.com/onsi/ginkgo/v2/types/version.go index 158ac2fd89..b9c1ea9856 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/version.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/version.go @@ -1,3 +1,3 @@ package types -const VERSION = "2.23.4" +const VERSION = "2.27.2" diff --git a/vendor/github.com/onsi/gomega/CHANGELOG.md b/vendor/github.com/onsi/gomega/CHANGELOG.md index 890d892228..b7d7309f3f 100644 --- a/vendor/github.com/onsi/gomega/CHANGELOG.md +++ b/vendor/github.com/onsi/gomega/CHANGELOG.md @@ -1,3 +1,32 @@ +## 1.38.2 + +- roll back to go 1.23.0 [c404969] + +## 1.38.1 + +### Fixes + +Numerous minor fixes and dependency bumps + +## 1.38.0 + +### Features +- gstruct handles extra unexported fields [4ee7ed0] + +### Fixes +- support [] in IgnoringTopFunction function signatures (#851) [36bbf72] + +### Maintenance +- Bump golang.org/x/net from 0.40.0 to 0.41.0 (#846) [529d408] +- Fix typo [acd1f55] +- Bump google.golang.org/protobuf from 1.36.5 to 1.36.6 (#835) [bae65a0] +- Bump nokogiri from 1.18.4 to 1.18.8 in /docs (#842) [8dda91f] +- Bump golang.org/x/net from 0.39.0 to 0.40.0 (#843) [212d812] +- Bump github.com/onsi/ginkgo/v2 from 2.23.3 to 2.23.4 (#839) [59bd7f9] +- Bump nokogiri from 1.18.1 to 1.18.4 in /docs (#834) [328c729] +- Bump uri from 1.0.2 to 1.0.3 in /docs (#826) [9a798a1] +- Bump golang.org/x/net from 0.37.0 to 0.39.0 (#841) [04a72c6] + ## 1.37.0 ### Features diff --git a/vendor/github.com/onsi/gomega/gomega_dsl.go b/vendor/github.com/onsi/gomega/gomega_dsl.go index a491a64be7..fdba34ee9d 100644 --- a/vendor/github.com/onsi/gomega/gomega_dsl.go +++ b/vendor/github.com/onsi/gomega/gomega_dsl.go @@ -22,7 +22,7 @@ import ( "github.com/onsi/gomega/types" ) -const GOMEGA_VERSION = "1.37.0" +const GOMEGA_VERSION = "1.38.2" const nilGomegaPanic = `You are trying to make an assertion, but haven't registered Gomega's fail handler. If you're using Ginkgo then you probably forgot to put your assertion in an It(). @@ -178,7 +178,7 @@ func ensureDefaultGomegaIsConfigured() { // All subsequent arguments will be required to be nil/zero. // // This is convenient if you want to make an assertion on a method/function that returns -// a value and an error - a common patter in Go. +// a value and an error - a common pattern in Go. // // For example, given a function with signature: // diff --git a/vendor/github.com/onsi/gomega/internal/async_assertion.go b/vendor/github.com/onsi/gomega/internal/async_assertion.go index a3a646e4ad..4121505b62 100644 --- a/vendor/github.com/onsi/gomega/internal/async_assertion.go +++ b/vendor/github.com/onsi/gomega/internal/async_assertion.go @@ -452,7 +452,7 @@ func (assertion *AsyncAssertion) match(matcher types.GomegaMatcher, desiredMatch } } else { var fgErr formattedGomegaError - if errors.As(actualErr, &fgErr) { + if errors.As(matcherErr, &fgErr) { message += fgErr.FormattedGomegaError() + "\n" } else { message += renderError(fmt.Sprintf("The matcher passed to %s returned the following error:", assertion.asyncType), matcherErr) diff --git a/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go index 532fc37449..ce74eee4c7 100644 --- a/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go @@ -2,6 +2,7 @@ package matchers import ( "bytes" + "errors" "fmt" "github.com/google/go-cmp/cmp" @@ -32,7 +33,7 @@ func (matcher *BeComparableToMatcher) Match(actual any) (success bool, matchErr if err, ok := r.(error); ok { matchErr = err } else if errMsg, ok := r.(string); ok { - matchErr = fmt.Errorf(errMsg) + matchErr = errors.New(errMsg) } } }() diff --git a/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go index 95057c26cc..c3da9bd48b 100644 --- a/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go @@ -5,7 +5,7 @@ import ( "strings" "github.com/onsi/gomega/format" - "gopkg.in/yaml.v3" + "go.yaml.in/yaml/v3" ) type MatchYAMLMatcher struct { diff --git a/vendor/github.com/operator-framework/ansible-operator-plugins/internal/version/version.go b/vendor/github.com/operator-framework/ansible-operator-plugins/internal/version/version.go index cb44bdc6d1..3b223c969f 100644 --- a/vendor/github.com/operator-framework/ansible-operator-plugins/internal/version/version.go +++ b/vendor/github.com/operator-framework/ansible-operator-plugins/internal/version/version.go @@ -28,5 +28,5 @@ var ( // and release process, this variable will be removed. // TODO: find a way to make this automated. For now manually update this before releases. - ImageVersion = "v1.38.1" + ImageVersion = "v1.42.0" ) diff --git a/vendor/github.com/operator-framework/api/pkg/manifests/bundleloader.go b/vendor/github.com/operator-framework/api/pkg/manifests/bundleloader.go index 94f14a22ac..397190a6a1 100644 --- a/vendor/github.com/operator-framework/api/pkg/manifests/bundleloader.go +++ b/vendor/github.com/operator-framework/api/pkg/manifests/bundleloader.go @@ -209,7 +209,7 @@ func loadBundle(csvName string, dir string) (*Bundle, error) { } defer fileReader.Close() - decoder := yaml.NewYAMLOrJSONDecoder(fileReader, 30) + decoder := yaml.NewYAMLToJSONDecoder(fileReader) obj := &unstructured.Unstructured{} if err = decoder.Decode(obj); err != nil { errs = append(errs, fmt.Errorf("unable to decode object: %s", err)) diff --git a/vendor/github.com/operator-framework/api/pkg/validation/internal/multiarch.go b/vendor/github.com/operator-framework/api/pkg/validation/internal/multiarch.go index a533ffe989..7207400e65 100644 --- a/vendor/github.com/operator-framework/api/pkg/validation/internal/multiarch.go +++ b/vendor/github.com/operator-framework/api/pkg/validation/internal/multiarch.go @@ -513,7 +513,7 @@ func (data *multiArchValidator) checkNodeAffinity(images map[string][]platform) if !imagePlatformDataValid { // Node affinity info is missing from CSV (or invalid) data.warns = append(data.warns, - fmt.Errorf("check if the CSV is missing a node affinity configuration for the image: %q. "+ + fmt.Errorf("check if the CSV is missing a node affinity configuration for the image: %q. ", image, )) } diff --git a/vendor/github.com/operator-framework/operator-registry/alpha/declcfg/write.go b/vendor/github.com/operator-framework/operator-registry/alpha/declcfg/write.go index 293d9363b2..6a0451a261 100644 --- a/vendor/github.com/operator-framework/operator-registry/alpha/declcfg/write.go +++ b/vendor/github.com/operator-framework/operator-registry/alpha/declcfg/write.go @@ -5,8 +5,10 @@ import ( "encoding/json" "fmt" "io" + "maps" "os" "path/filepath" + "slices" "sort" "strings" @@ -20,6 +22,7 @@ import ( type MermaidWriter struct { MinEdgeName string SpecifiedPackageName string + DrawV0Semantics bool } type MermaidOption func(*MermaidWriter) @@ -32,6 +35,7 @@ func NewMermaidWriter(opts ...MermaidOption) *MermaidWriter { m := &MermaidWriter{ MinEdgeName: minEdgeName, SpecifiedPackageName: specifiedPackageName, + DrawV0Semantics: true, } for _, opt := range opts { @@ -52,6 +56,12 @@ func WithSpecifiedPackageName(specifiedPackageName string) MermaidOption { } } +func WithV0Semantics(drawV0Semantics bool) MermaidOption { + return func(o *MermaidWriter) { + o.DrawV0Semantics = drawV0Semantics + } +} + // writes out the channel edges of the declarative config graph in a mermaid format capable of being pasted into // mermaid renderers like github, mermaid.live, etc. // output is sorted lexicographically by package name, and then by channel name @@ -124,7 +134,10 @@ func (writer *MermaidWriter) WriteChannels(cfg DeclarativeConfig, out io.Writer) } var deprecatedPackage string - deprecatedChannels := []string{} + deprecatedChannelIDs := []string{} + decoratedBundleIDs := map[string][]string{"deprecated": {}, "skipped": {}, "deprecatedskipped": {}} + linkID := 0 + skippedLinkIDs := []string{} for _, c := range cfg.Channels { filteredChannel := writer.filterChannel(&c, versionMap, minVersion, minEdgePackage) @@ -137,58 +150,102 @@ func (writer *MermaidWriter) WriteChannels(cfg DeclarativeConfig, out io.Writer) } channelID := fmt.Sprintf("%s-%s", filteredChannel.Package, filteredChannel.Name) - pkgBuilder.WriteString(fmt.Sprintf(" %%%% channel %q\n", filteredChannel.Name)) - pkgBuilder.WriteString(fmt.Sprintf(" subgraph %s[%q]\n", channelID, filteredChannel.Name)) + fmt.Fprintf(pkgBuilder, " %%%% channel %q\n", filteredChannel.Name) + fmt.Fprintf(pkgBuilder, " subgraph %s[%q]\n", channelID, filteredChannel.Name) if depByPackage.Has(filteredChannel.Package) { deprecatedPackage = filteredChannel.Package } if depByChannel.Has(filteredChannel.Name) { - deprecatedChannels = append(deprecatedChannels, channelID) + deprecatedChannelIDs = append(deprecatedChannelIDs, channelID) } - for _, ce := range filteredChannel.Entries { - if versionMap[ce.Name].GE(minVersion) { - bundleDeprecation := "" - if depByBundle.Has(ce.Name) { - bundleDeprecation = ":::deprecated" + // sort edges by decreasing version + sortedEntries := make([]*ChannelEntry, 0, len(filteredChannel.Entries)) + for i := range filteredChannel.Entries { + sortedEntries = append(sortedEntries, &filteredChannel.Entries[i]) + } + sort.Slice(sortedEntries, func(i, j int) bool { + // Sort by decreasing version: greater version comes first + return versionMap[sortedEntries[i].Name].GT(versionMap[sortedEntries[j].Name]) + }) + + skippedEntities := sets.Set[string]{} + + const ( + captureNewEntry = true + processExisting = false + ) + handleSemantics := func(edge string, linkID int, captureNew bool) { + if writer.DrawV0Semantics { + if captureNew { + if skippedEntities.Has(edge) { + skippedLinkIDs = append(skippedLinkIDs, fmt.Sprintf("%d", linkID)) + } else { + skippedEntities.Insert(edge) + } + } else { + if skippedEntities.Has(edge) { + skippedLinkIDs = append(skippedLinkIDs, fmt.Sprintf("%d", linkID)) + } } + } + } - entryID := fmt.Sprintf("%s-%s", channelID, ce.Name) - pkgBuilder.WriteString(fmt.Sprintf(" %s[%q]%s\n", entryID, ce.Name, bundleDeprecation)) + for _, ce := range sortedEntries { + entryID := fmt.Sprintf("%s-%s", channelID, ce.Name) + fmt.Fprintf(pkgBuilder, " %s[%q]\n", entryID, ce.Name) + + // mermaid allows specification of only a single decoration class, so any combinations must be independently represented + switch { + case depByBundle.Has(ce.Name) && skippedEntities.Has(ce.Name): + decoratedBundleIDs["deprecatedskipped"] = append(decoratedBundleIDs["deprecatedskipped"], entryID) + case depByBundle.Has(ce.Name): + decoratedBundleIDs["deprecated"] = append(decoratedBundleIDs["deprecated"], entryID) + case skippedEntities.Has(ce.Name): + decoratedBundleIDs["skipped"] = append(decoratedBundleIDs["skipped"], entryID) + } - if len(ce.Replaces) > 0 { - replacesID := fmt.Sprintf("%s-%s", channelID, ce.Replaces) - pkgBuilder.WriteString(fmt.Sprintf(" %s[%q]-- %s --> %s[%q]\n", replacesID, ce.Replaces, "replace", entryID, ce.Name)) - } - if len(ce.Skips) > 0 { - for _, s := range ce.Skips { - skipsID := fmt.Sprintf("%s-%s", channelID, s) - pkgBuilder.WriteString(fmt.Sprintf(" %s[%q]-- %s --> %s[%q]\n", skipsID, s, "skip", entryID, ce.Name)) - } + if len(ce.Skips) > 0 { + for _, s := range ce.Skips { + skipsID := fmt.Sprintf("%s-%s", channelID, s) + fmt.Fprintf(pkgBuilder, " %s[%q]-- %s --> %s[%q]\n", skipsID, s, "skip", entryID, ce.Name) + handleSemantics(s, linkID, captureNewEntry) + linkID++ } - if len(ce.SkipRange) > 0 { - skipRange, err := semver.ParseRange(ce.SkipRange) - if err == nil { - for _, edgeName := range filteredChannel.Entries { - if skipRange(versionMap[edgeName.Name]) { - skipRangeID := fmt.Sprintf("%s-%s", channelID, edgeName.Name) - pkgBuilder.WriteString(fmt.Sprintf(" %s[%q]-- \"%s(%s)\" --> %s[%q]\n", skipRangeID, edgeName.Name, "skipRange", ce.SkipRange, entryID, ce.Name)) - } + } + if len(ce.SkipRange) > 0 { + skipRange, err := semver.ParseRange(ce.SkipRange) + if err == nil { + for _, edgeName := range filteredChannel.Entries { + if skipRange(versionMap[edgeName.Name]) { + skipRangeID := fmt.Sprintf("%s-%s", channelID, edgeName.Name) + fmt.Fprintf(pkgBuilder, " %s[%q]-- \"%s(%s)\" --> %s[%q]\n", skipRangeID, edgeName.Name, "skipRange", ce.SkipRange, entryID, ce.Name) + handleSemantics(ce.Name, linkID, processExisting) + linkID++ } - } else { - fmt.Fprintf(os.Stderr, "warning: ignoring invalid SkipRange for package/edge %q/%q: %v\n", c.Package, ce.Name, err) } + } else { + fmt.Fprintf(os.Stderr, "warning: ignoring invalid SkipRange for package/edge %q/%q: %v\n", c.Package, ce.Name, err) } } + // have to process replaces last, because applicablity can be impacted by skips + if len(ce.Replaces) > 0 { + replacesID := fmt.Sprintf("%s-%s", channelID, ce.Replaces) + fmt.Fprintf(pkgBuilder, " %s[%q]-- %s --> %s[%q]\n", replacesID, ce.Replaces, "replace", entryID, ce.Name) + handleSemantics(ce.Name, linkID, processExisting) + linkID++ + } } - pkgBuilder.WriteString(" end\n") + fmt.Fprintf(pkgBuilder, " end\n") } } _, _ = out.Write([]byte("graph LR\n")) _, _ = out.Write([]byte(" classDef deprecated fill:#E8960F\n")) + _, _ = out.Write([]byte(" classDef skipped stroke:#FF0000,stroke-width:4px\n")) + _, _ = out.Write([]byte(" classDef deprecatedskipped fill:#E8960F,stroke:#FF0000,stroke-width:4px\n")) pkgNames := []string{} for pname := range pkgs { pkgNames = append(pkgNames, pname) @@ -197,22 +254,36 @@ func (writer *MermaidWriter) WriteChannels(cfg DeclarativeConfig, out io.Writer) return pkgNames[i] < pkgNames[j] }) for _, pkgName := range pkgNames { - _, _ = out.Write([]byte(fmt.Sprintf(" %%%% package %q\n", pkgName))) - _, _ = out.Write([]byte(fmt.Sprintf(" subgraph %q\n", pkgName))) + _, _ = fmt.Fprintf(out, " %%%% package %q\n", pkgName) + _, _ = fmt.Fprintf(out, " subgraph %q\n", pkgName) _, _ = out.Write([]byte(pkgs[pkgName].String())) _, _ = out.Write([]byte(" end\n")) } if deprecatedPackage != "" { - _, _ = out.Write([]byte(fmt.Sprintf("style %s fill:#989695\n", deprecatedPackage))) + _, _ = fmt.Fprintf(out, "style %s fill:#989695\n", deprecatedPackage) + } + + if len(deprecatedChannelIDs) > 0 { + for _, deprecatedChannel := range deprecatedChannelIDs { + _, _ = fmt.Fprintf(out, "style %s fill:#DCD0FF\n", deprecatedChannel) + } } - if len(deprecatedChannels) > 0 { - for _, deprecatedChannel := range deprecatedChannels { - _, _ = out.Write([]byte(fmt.Sprintf("style %s fill:#DCD0FF\n", deprecatedChannel))) + // express the decoration classes + sortedKeys := slices.Sorted(maps.Keys(decoratedBundleIDs)) + for _, key := range sortedKeys { + if len(decoratedBundleIDs[key]) > 0 { + b := slices.Clone(decoratedBundleIDs[key]) + slices.Sort(b) + _, _ = fmt.Fprintf(out, "class %s %s\n", strings.Join(b, ","), key) } } + if len(skippedLinkIDs) > 0 { + _, _ = fmt.Fprintf(out, "linkStyle %s %s\n", strings.Join(skippedLinkIDs, ","), "stroke:#FF0000,stroke-width:3px,stroke-dasharray:5;") + } + return nil } @@ -537,7 +608,9 @@ func writeFile(cfg DeclarativeConfig, filename string, writeFunc WriteFunc) erro if err := writeFunc(cfg, buf); err != nil { return fmt.Errorf("write to buffer for %q: %v", filename, err) } - if err := os.WriteFile(filename, buf.Bytes(), 0600); err != nil { + // we explicitly want to generate content from this function which is limited only by the user's umask (G306) + // nolint:gosec + if err := os.WriteFile(filename, buf.Bytes(), 0666); err != nil { return fmt.Errorf("write file %q: %v", filename, err) } return nil diff --git a/vendor/github.com/operator-framework/operator-registry/alpha/model/model.go b/vendor/github.com/operator-framework/operator-registry/alpha/model/model.go index 9b4e3ae858..af6c391e6a 100644 --- a/vendor/github.com/operator-framework/operator-registry/alpha/model/model.go +++ b/vendor/github.com/operator-framework/operator-registry/alpha/model/model.go @@ -288,6 +288,10 @@ func (c *Channel) validateReplacesChain() error { if _, ok := chainFrom[cur.Name]; !ok { chainFrom[cur.Name] = []string{cur.Name} } + // if the replaces edge is known to be skipped, disregard it + if skippedBundles.Has(cur.Replaces) { + break + } for k := range chainFrom { chainFrom[k] = append(chainFrom[k], cur.Replaces) } diff --git a/vendor/github.com/operator-framework/operator-registry/pkg/image/containerdregistry/registry.go b/vendor/github.com/operator-framework/operator-registry/pkg/image/containerdregistry/registry.go index 9c421dc682..9d9575942d 100644 --- a/vendor/github.com/operator-framework/operator-registry/pkg/image/containerdregistry/registry.go +++ b/vendor/github.com/operator-framework/operator-registry/pkg/image/containerdregistry/registry.go @@ -19,9 +19,9 @@ import ( "github.com/containerd/containerd/platforms" "github.com/containerd/containerd/remotes" "github.com/containerd/errdefs" - "github.com/containers/image/v5/docker/reference" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" + "go.podman.io/image/v5/docker/reference" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/util/retry" diff --git a/vendor/github.com/operator-framework/operator-registry/pkg/image/containerdregistry/resolver.go b/vendor/github.com/operator-framework/operator-registry/pkg/image/containerdregistry/resolver.go index 95e343f4c0..5d0366f81d 100644 --- a/vendor/github.com/operator-framework/operator-registry/pkg/image/containerdregistry/resolver.go +++ b/vendor/github.com/operator-framework/operator-registry/pkg/image/containerdregistry/resolver.go @@ -7,10 +7,10 @@ import ( "github.com/containerd/containerd/remotes" "github.com/containerd/containerd/remotes/docker" - "github.com/containers/common/pkg/auth" - "github.com/containers/image/v5/pkg/docker/config" - "github.com/containers/image/v5/types" dockerconfig "github.com/docker/cli/cli/config" + "go.podman.io/common/pkg/auth" + "go.podman.io/image/v5/pkg/docker/config" + "go.podman.io/image/v5/types" ) func NewResolver(client *http.Client, configDir string, plainHTTP bool, repo string) (remotes.Resolver, error) { diff --git a/vendor/github.com/operator-framework/operator-registry/pkg/image/containersimageregistry/registry.go b/vendor/github.com/operator-framework/operator-registry/pkg/image/containersimageregistry/registry.go index 9527dad0b7..ec6e920d34 100644 --- a/vendor/github.com/operator-framework/operator-registry/pkg/image/containersimageregistry/registry.go +++ b/vendor/github.com/operator-framework/operator-registry/pkg/image/containersimageregistry/registry.go @@ -8,17 +8,17 @@ import ( "path/filepath" "github.com/containerd/containerd/archive" - "github.com/containers/common/pkg/auth" - "github.com/containers/image/v5/copy" - "github.com/containers/image/v5/docker" - "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/image" - "github.com/containers/image/v5/oci/layout" - "github.com/containers/image/v5/pkg/compression" - "github.com/containers/image/v5/pkg/docker/config" - "github.com/containers/image/v5/signature" - "github.com/containers/image/v5/types" dockerconfig "github.com/docker/cli/cli/config" + "go.podman.io/common/pkg/auth" + "go.podman.io/image/v5/copy" + "go.podman.io/image/v5/docker" + "go.podman.io/image/v5/docker/reference" + "go.podman.io/image/v5/image" + "go.podman.io/image/v5/oci/layout" + "go.podman.io/image/v5/pkg/compression" + "go.podman.io/image/v5/pkg/docker/config" + "go.podman.io/image/v5/signature" + "go.podman.io/image/v5/types" "oras.land/oras-go/v2/content/oci" orimage "github.com/operator-framework/operator-registry/pkg/image" diff --git a/vendor/github.com/pelletier/go-toml/v2/.goreleaser.yaml b/vendor/github.com/pelletier/go-toml/v2/.goreleaser.yaml index ec52857a3e..47f0f59142 100644 --- a/vendor/github.com/pelletier/go-toml/v2/.goreleaser.yaml +++ b/vendor/github.com/pelletier/go-toml/v2/.goreleaser.yaml @@ -113,7 +113,7 @@ dockers: checksum: name_template: 'sha256sums.txt' snapshot: - name_template: "{{ incpatch .Version }}-next" + version_template: "{{ incpatch .Version }}-next" release: github: owner: pelletier diff --git a/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go b/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go index c3df8bee1c..189be525e1 100644 --- a/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go +++ b/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go @@ -59,7 +59,7 @@ func (d *Decoder) DisallowUnknownFields() *Decoder { // // With this feature enabled, types implementing the unstable/Unmarshaler // interface can be decoded from any structure of the document. It allows types -// that don't have a straightfoward TOML representation to provide their own +// that don't have a straightforward TOML representation to provide their own // decoding logic. // // Currently, types can only decode from a single value. Tables and array tables diff --git a/vendor/github.com/proglottis/gpgme/gpgme.go b/vendor/github.com/proglottis/gpgme/gpgme.go index 15af69c865..62a095c128 100644 --- a/vendor/github.com/proglottis/gpgme/gpgme.go +++ b/vendor/github.com/proglottis/gpgme/gpgme.go @@ -877,6 +877,12 @@ func (k *Key) KeyListMode() KeyListMode { return res } +func (k *Key) Fingerprint() string { + res := C.GoString(k.k.fpr) + runtime.KeepAlive(k) + return res +} + type SubKey struct { k C.gpgme_subkey_t parent *Key // make sure the key is not released when we have a reference to a subkey diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go index ad347113c0..2331b8b4f3 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/desc.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go @@ -95,7 +95,8 @@ func (v2) NewDesc(fqName, help string, variableLabels ConstrainableLabels, const help: help, variableLabels: variableLabels.compile(), } - if !model.IsValidMetricName(model.LabelValue(fqName)) { + //nolint:staticcheck // TODO: Don't use deprecated model.NameValidationScheme. + if !model.NameValidationScheme.IsValidMetricName(fqName) { d.err = fmt.Errorf("%q is not a valid metric name", fqName) return d } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go index 8b016355ad..7bac0da33d 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go @@ -453,7 +453,7 @@ func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode { } group = append(group, OpCode{c.Tag, i1, i2, j1, j2}) } - if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') { + if len(group) > 0 && (len(group) != 1 || group[0].Tag != 'e') { groups = append(groups, group) } return groups @@ -568,7 +568,7 @@ func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error { buf := bufio.NewWriter(writer) defer buf.Flush() wf := func(format string, args ...interface{}) error { - _, err := buf.WriteString(fmt.Sprintf(format, args...)) + _, err := fmt.Fprintf(buf, format, args...) return err } ws := func(s string) error { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go index f7f97ef926..d273b6640e 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go @@ -67,7 +67,7 @@ func RuntimeMetricsToProm(d *metrics.Description) (string, string, string, bool) } // Our current conversion moves to legacy naming, so use legacy validation. - valid := model.IsValidLegacyMetricName(namespace + "_" + subsystem + "_" + name) + valid := model.LegacyValidation.IsValidMetricName(namespace + "_" + subsystem + "_" + name) switch d.Kind { case metrics.KindUint64: case metrics.KindFloat64: diff --git a/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/vendor/github.com/prometheus/client_golang/prometheus/labels.go index c21911f292..5fe8d3b4d2 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/labels.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/labels.go @@ -184,5 +184,6 @@ func validateLabelValues(vals []string, expectedNumberOfValues int) error { } func checkLabelName(l string) bool { - return model.LabelName(l).IsValid() && !strings.HasPrefix(l, reservedLabelPrefix) + //nolint:staticcheck // TODO: Don't use deprecated model.NameValidationScheme. + return model.NameValidationScheme.IsValidLabelName(l) && !strings.HasPrefix(l, reservedLabelPrefix) } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go index 592eec3e24..76e59f1288 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/metric.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go @@ -186,21 +186,31 @@ func (m *withExemplarsMetric) Write(pb *dto.Metric) error { case pb.Counter != nil: pb.Counter.Exemplar = m.exemplars[len(m.exemplars)-1] case pb.Histogram != nil: + h := pb.Histogram for _, e := range m.exemplars { - // pb.Histogram.Bucket are sorted by UpperBound. - i := sort.Search(len(pb.Histogram.Bucket), func(i int) bool { - return pb.Histogram.Bucket[i].GetUpperBound() >= e.GetValue() + if (h.GetZeroThreshold() != 0 || h.GetZeroCount() != 0 || + len(h.PositiveSpan) != 0 || len(h.NegativeSpan) != 0) && + e.GetTimestamp() != nil { + h.Exemplars = append(h.Exemplars, e) + if len(h.Bucket) == 0 { + // Don't proceed to classic buckets if there are none. + continue + } + } + // h.Bucket are sorted by UpperBound. + i := sort.Search(len(h.Bucket), func(i int) bool { + return h.Bucket[i].GetUpperBound() >= e.GetValue() }) - if i < len(pb.Histogram.Bucket) { - pb.Histogram.Bucket[i].Exemplar = e + if i < len(h.Bucket) { + h.Bucket[i].Exemplar = e } else { // The +Inf bucket should be explicitly added if there is an exemplar for it, similar to non-const histogram logic in https://github.com/prometheus/client_golang/blob/main/prometheus/histogram.go#L357-L365. b := &dto.Bucket{ - CumulativeCount: proto.Uint64(pb.Histogram.GetSampleCount()), + CumulativeCount: proto.Uint64(h.GetSampleCount()), UpperBound: proto.Float64(math.Inf(1)), Exemplar: e, } - pb.Histogram.Bucket = append(pb.Histogram.Bucket, b) + h.Bucket = append(h.Bucket, b) } } default: @@ -227,6 +237,7 @@ type Exemplar struct { // Only last applicable exemplar is injected from the list. // For example for Counter it means last exemplar is injected. // For Histogram, it means last applicable exemplar for each bucket is injected. +// For a Native Histogram, all valid exemplars are injected. // // NewMetricWithExemplars works best with MustNewConstMetric and // MustNewConstHistogram, see example. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go index 0a61b98461..b32c95fa3f 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go @@ -25,9 +25,9 @@ import ( "golang.org/x/sys/unix" ) -// notImplementedErr is returned by stub functions that replace cgo functions, when cgo +// errNotImplemented is returned by stub functions that replace cgo functions, when cgo // isn't available. -var notImplementedErr = errors.New("not implemented") +var errNotImplemented = errors.New("not implemented") type memoryInfo struct { vsize uint64 // Virtual memory size in bytes @@ -101,7 +101,7 @@ func (c *processCollector) processCollect(ch chan<- Metric) { if memInfo, err := getMemory(); err == nil { ch <- MustNewConstMetric(c.rss, GaugeValue, float64(memInfo.rss)) ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(memInfo.vsize)) - } else if !errors.Is(err, notImplementedErr) { + } else if !errors.Is(err, errNotImplemented) { // Don't report an error when support is not compiled in. c.reportError(ch, c.rss, err) c.reportError(ch, c.vsize, err) diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_nocgo_darwin.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_nocgo_darwin.go index 8ddb0995d6..378865129b 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_nocgo_darwin.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_nocgo_darwin.go @@ -16,7 +16,7 @@ package prometheus func getMemory() (*memoryInfo, error) { - return nil, notImplementedErr + return nil, errNotImplemented } // describe returns all descriptions of the collector for Darwin. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_procfsenabled.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_procfsenabled.go index 9f4b130bef..8074f70f5d 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_procfsenabled.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_procfsenabled.go @@ -66,11 +66,11 @@ func (c *processCollector) processCollect(ch chan<- Metric) { if netstat, err := p.Netstat(); err == nil { var inOctets, outOctets float64 - if netstat.IpExt.InOctets != nil { - inOctets = *netstat.IpExt.InOctets + if netstat.InOctets != nil { + inOctets = *netstat.InOctets } - if netstat.IpExt.OutOctets != nil { - outOctets = *netstat.IpExt.OutOctets + if netstat.OutOctets != nil { + outOctets = *netstat.OutOctets } ch <- MustNewConstMetric(c.inBytes, CounterValue, inOctets) ch <- MustNewConstMetric(c.outBytes, CounterValue, outOctets) diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go index 356edb7868..9332b0249a 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go @@ -392,7 +392,7 @@ func isLabelCurried(c prometheus.Collector, label string) bool { func labels(code, method bool, reqMethod string, status int, extraMethods ...string) prometheus.Labels { labels := prometheus.Labels{} - if !(code || method) { + if !code && !method { return labels } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go index 2c808eece0..487b466563 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/vec.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go @@ -79,7 +79,7 @@ func (m *MetricVec) DeleteLabelValues(lvs ...string) bool { return false } - return m.metricMap.deleteByHashWithLabelValues(h, lvs, m.curry) + return m.deleteByHashWithLabelValues(h, lvs, m.curry) } // Delete deletes the metric where the variable labels are the same as those @@ -101,7 +101,7 @@ func (m *MetricVec) Delete(labels Labels) bool { return false } - return m.metricMap.deleteByHashWithLabels(h, labels, m.curry) + return m.deleteByHashWithLabels(h, labels, m.curry) } // DeletePartialMatch deletes all metrics where the variable labels contain all of those @@ -114,7 +114,7 @@ func (m *MetricVec) DeletePartialMatch(labels Labels) int { labels, closer := constrainLabels(m.desc, labels) defer closer() - return m.metricMap.deleteByLabels(labels, m.curry) + return m.deleteByLabels(labels, m.curry) } // Without explicit forwarding of Describe, Collect, Reset, those methods won't @@ -216,7 +216,7 @@ func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) { return nil, err } - return m.metricMap.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil + return m.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil } // GetMetricWith returns the Metric for the given Labels map (the label names @@ -244,7 +244,7 @@ func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) { return nil, err } - return m.metricMap.getOrCreateMetricWithLabels(h, labels, m.curry), nil + return m.getOrCreateMetricWithLabels(h, labels, m.curry), nil } func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go index 25da157f15..2ed1285068 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go @@ -63,7 +63,7 @@ func WrapRegistererWith(labels Labels, reg Registerer) Registerer { // metric names that are standardized across applications, as that would break // horizontal monitoring, for example the metrics provided by the Go collector // (see NewGoCollector) and the process collector (see NewProcessCollector). (In -// fact, those metrics are already prefixed with “go_” or “process_”, +// fact, those metrics are already prefixed with "go_" or "process_", // respectively.) // // Conflicts between Collectors registered through the original Registerer with @@ -78,6 +78,40 @@ func WrapRegistererWithPrefix(prefix string, reg Registerer) Registerer { } } +// WrapCollectorWith returns a Collector wrapping the provided Collector. The +// wrapped Collector will add the provided Labels to all Metrics it collects (as +// ConstLabels). The Metrics collected by the unmodified Collector must not +// duplicate any of those labels. +// +// WrapCollectorWith can be useful to work with multiple instances of a third +// party library that does not expose enough flexibility on the lifecycle of its +// registered metrics. +// For example, let's say you have a foo.New(reg Registerer) constructor that +// registers metrics but never unregisters them, and you want to create multiple +// instances of foo.Foo with different labels. +// The way to achieve that, is to create a new Registry, pass it to foo.New, +// then use WrapCollectorWith to wrap that Registry with the desired labels and +// register that as a collector in your main Registry. +// Then you can un-register the wrapped collector effectively un-registering the +// metrics registered by foo.New. +func WrapCollectorWith(labels Labels, c Collector) Collector { + return &wrappingCollector{ + wrappedCollector: c, + labels: labels, + } +} + +// WrapCollectorWithPrefix returns a Collector wrapping the provided Collector. The +// wrapped Collector will add the provided prefix to the name of all Metrics it collects. +// +// See the documentation of WrapCollectorWith for more details on the use case. +func WrapCollectorWithPrefix(prefix string, c Collector) Collector { + return &wrappingCollector{ + wrappedCollector: c, + prefix: prefix, + } +} + type wrappingRegisterer struct { wrappedRegisterer Registerer prefix string diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go index 1448439b7f..7b762370e2 100644 --- a/vendor/github.com/prometheus/common/expfmt/decode.go +++ b/vendor/github.com/prometheus/common/expfmt/decode.go @@ -70,19 +70,34 @@ func ResponseFormat(h http.Header) Format { return FmtUnknown } -// NewDecoder returns a new decoder based on the given input format. -// If the input format does not imply otherwise, a text format decoder is returned. +// NewDecoder returns a new decoder based on the given input format. Metric +// names are validated based on the provided Format -- if the format requires +// escaping, raditional Prometheues validity checking is used. Otherwise, names +// are checked for UTF-8 validity. Supported formats include delimited protobuf +// and Prometheus text format. For historical reasons, this decoder fallbacks +// to classic text decoding for any other format. This decoder does not fully +// support OpenMetrics although it may often succeed due to the similarities +// between the formats. This decoder may not support the latest features of +// Prometheus text format and is not intended for high-performance applications. +// See: https://github.com/prometheus/common/issues/812 func NewDecoder(r io.Reader, format Format) Decoder { + scheme := model.LegacyValidation + if format.ToEscapingScheme() == model.NoEscaping { + scheme = model.UTF8Validation + } switch format.FormatType() { case TypeProtoDelim: - return &protoDecoder{r: bufio.NewReader(r)} + return &protoDecoder{r: bufio.NewReader(r), s: scheme} + case TypeProtoText, TypeProtoCompact: + return &errDecoder{err: fmt.Errorf("format %s not supported for decoding", format)} } - return &textDecoder{r: r} + return &textDecoder{r: r, s: scheme} } // protoDecoder implements the Decoder interface for protocol buffers. type protoDecoder struct { r protodelim.Reader + s model.ValidationScheme } // Decode implements the Decoder interface. @@ -93,7 +108,7 @@ func (d *protoDecoder) Decode(v *dto.MetricFamily) error { if err := opts.UnmarshalFrom(d.r, v); err != nil { return err } - if !model.IsValidMetricName(model.LabelValue(v.GetName())) { + if !d.s.IsValidMetricName(v.GetName()) { return fmt.Errorf("invalid metric name %q", v.GetName()) } for _, m := range v.GetMetric() { @@ -107,7 +122,7 @@ func (d *protoDecoder) Decode(v *dto.MetricFamily) error { if !model.LabelValue(l.GetValue()).IsValid() { return fmt.Errorf("invalid label value %q", l.GetValue()) } - if !model.LabelName(l.GetName()).IsValid() { + if !d.s.IsValidLabelName(l.GetName()) { return fmt.Errorf("invalid label name %q", l.GetName()) } } @@ -115,10 +130,20 @@ func (d *protoDecoder) Decode(v *dto.MetricFamily) error { return nil } +// errDecoder is an error-state decoder that always returns the same error. +type errDecoder struct { + err error +} + +func (d *errDecoder) Decode(*dto.MetricFamily) error { + return d.err +} + // textDecoder implements the Decoder interface for the text protocol. type textDecoder struct { r io.Reader fams map[string]*dto.MetricFamily + s model.ValidationScheme err error } @@ -126,7 +151,7 @@ type textDecoder struct { func (d *textDecoder) Decode(v *dto.MetricFamily) error { if d.err == nil { // Read all metrics in one shot. - var p TextParser + p := NewTextParser(d.s) d.fams, d.err = p.TextToMetricFamilies(d.r) // If we don't get an error, store io.EOF for the end. if d.err == nil { diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go index d7f3d76f55..73c24dfbc9 100644 --- a/vendor/github.com/prometheus/common/expfmt/encode.go +++ b/vendor/github.com/prometheus/common/expfmt/encode.go @@ -18,14 +18,12 @@ import ( "io" "net/http" + "github.com/munnerz/goautoneg" + dto "github.com/prometheus/client_model/go" "google.golang.org/protobuf/encoding/protodelim" "google.golang.org/protobuf/encoding/prototext" "github.com/prometheus/common/model" - - "github.com/munnerz/goautoneg" - - dto "github.com/prometheus/client_model/go" ) // Encoder types encode metric families into an underlying wire protocol. @@ -61,7 +59,7 @@ func (ec encoderCloser) Close() error { // appropriate accepted type is found, FmtText is returned (which is the // Prometheus text format). This function will never negotiate FmtOpenMetrics, // as the support is still experimental. To include the option to negotiate -// FmtOpenMetrics, use NegotiateOpenMetrics. +// FmtOpenMetrics, use NegotiateIncludingOpenMetrics. func Negotiate(h http.Header) Format { escapingScheme := Format(fmt.Sprintf("; escaping=%s", Format(model.NameEscapingScheme.String()))) for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) { @@ -153,7 +151,7 @@ func NewEncoder(w io.Writer, format Format, options ...EncoderOption) Encoder { case TypeProtoDelim: return encoderCloser{ encode: func(v *dto.MetricFamily) error { - _, err := protodelim.MarshalTo(w, v) + _, err := protodelim.MarshalTo(w, model.EscapeMetricFamily(v, escapingScheme)) return err }, close: func() error { return nil }, diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go index b26886560d..c34c7de432 100644 --- a/vendor/github.com/prometheus/common/expfmt/expfmt.go +++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go @@ -36,9 +36,11 @@ const ( ProtoType = `application/vnd.google.protobuf` ProtoProtocol = `io.prometheus.client.MetricFamily` // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead. - ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" - OpenMetricsType = `application/openmetrics-text` + ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" + OpenMetricsType = `application/openmetrics-text` + //nolint:revive // Allow for underscores. OpenMetricsVersion_0_0_1 = "0.0.1" + //nolint:revive // Allow for underscores. OpenMetricsVersion_1_0_0 = "1.0.0" // The Content-Type values for the different wire protocols. Do not do direct @@ -54,8 +56,10 @@ const ( // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead. FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` // Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead. + //nolint:revive // Allow for underscores. FmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8` // Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead. + //nolint:revive // Allow for underscores. FmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8` ) @@ -188,8 +192,8 @@ func (f Format) FormatType() FormatType { // Format contains a escaping=allow-utf-8 term, it will select NoEscaping. If a valid // "escaping" term exists, that will be used. Otherwise, the global default will // be returned. -func (format Format) ToEscapingScheme() model.EscapingScheme { - for _, p := range strings.Split(string(format), ";") { +func (f Format) ToEscapingScheme() model.EscapingScheme { + for _, p := range strings.Split(string(f), ";") { toks := strings.Split(p, "=") if len(toks) != 2 { continue diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz.go b/vendor/github.com/prometheus/common/expfmt/fuzz.go index dfac962a4e..0290f6abc4 100644 --- a/vendor/github.com/prometheus/common/expfmt/fuzz.go +++ b/vendor/github.com/prometheus/common/expfmt/fuzz.go @@ -17,7 +17,11 @@ package expfmt -import "bytes" +import ( + "bytes" + + "github.com/prometheus/common/model" +) // Fuzz text metric parser with with github.com/dvyukov/go-fuzz: // @@ -26,9 +30,8 @@ import "bytes" // // Further input samples should go in the folder fuzz/corpus. func Fuzz(in []byte) int { - parser := TextParser{} + parser := NewTextParser(model.UTF8Validation) _, err := parser.TextToMetricFamilies(bytes.NewReader(in)) - if err != nil { return 0 } diff --git a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go index a21ed4ec1f..8dbf6d04ed 100644 --- a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go +++ b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go @@ -22,11 +22,10 @@ import ( "strconv" "strings" + dto "github.com/prometheus/client_model/go" "google.golang.org/protobuf/types/known/timestamppb" "github.com/prometheus/common/model" - - dto "github.com/prometheus/client_model/go" ) type encoderOption struct { @@ -249,7 +248,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...E // Finally the samples, one line for each. if metricType == dto.MetricType_COUNTER && strings.HasSuffix(name, "_total") { - compliantName = compliantName + "_total" + compliantName += "_total" } for _, metric := range in.Metric { switch metricType { @@ -477,7 +476,7 @@ func writeOpenMetricsNameAndLabelPairs( if name != "" { // If the name does not pass the legacy validity check, we must put the // metric name inside the braces, quoted. - if !model.IsValidLegacyMetricName(name) { + if !model.LegacyValidation.IsValidMetricName(name) { metricInsideBraces = true err := w.WriteByte(separator) written++ @@ -641,11 +640,11 @@ func writeExemplar(w enhancedWriter, e *dto.Exemplar) (int, error) { if err != nil { return written, err } - err = (*e).Timestamp.CheckValid() + err = e.Timestamp.CheckValid() if err != nil { return written, err } - ts := (*e).Timestamp.AsTime() + ts := e.Timestamp.AsTime() // TODO(beorn7): Format this directly from components of ts to // avoid overflow/underflow and precision issues of the float // conversion. diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go index 4b86434b33..c4e9c1bbc3 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_create.go +++ b/vendor/github.com/prometheus/common/expfmt/text_create.go @@ -22,9 +22,9 @@ import ( "strings" "sync" - "github.com/prometheus/common/model" - dto "github.com/prometheus/client_model/go" + + "github.com/prometheus/common/model" ) // enhancedWriter has all the enhanced write functions needed here. bufio.Writer @@ -354,7 +354,7 @@ func writeNameAndLabelPairs( if name != "" { // If the name does not pass the legacy validity check, we must put the // metric name inside the braces. - if !model.IsValidLegacyMetricName(name) { + if !model.LegacyValidation.IsValidMetricName(name) { metricInsideBraces = true err := w.WriteByte(separator) written++ @@ -498,7 +498,7 @@ func writeInt(w enhancedWriter, i int64) (int, error) { // writeName writes a string as-is if it complies with the legacy naming // scheme, or escapes it in double quotes if not. func writeName(w enhancedWriter, name string) (int, error) { - if model.IsValidLegacyMetricName(name) { + if model.LegacyValidation.IsValidMetricName(name) { return w.WriteString(name) } var written int diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go index b4607fe4d2..8f2edde324 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_parse.go +++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -78,6 +78,14 @@ type TextParser struct { // These indicate if the metric name from the current line being parsed is inside // braces and if that metric name was found respectively. currentMetricIsInsideBraces, currentMetricInsideBracesIsPresent bool + // scheme sets the desired ValidationScheme for names. Defaults to the invalid + // UnsetValidation. + scheme model.ValidationScheme +} + +// NewTextParser returns a new TextParser with the provided nameValidationScheme. +func NewTextParser(nameValidationScheme model.ValidationScheme) TextParser { + return TextParser{scheme: nameValidationScheme} } // TextToMetricFamilies reads 'in' as the simple and flat text-based exchange @@ -126,6 +134,7 @@ func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricF func (p *TextParser) reset(in io.Reader) { p.metricFamiliesByName = map[string]*dto.MetricFamily{} + p.currentLabelPairs = nil if p.buf == nil { p.buf = bufio.NewReader(in) } else { @@ -216,6 +225,9 @@ func (p *TextParser) startComment() stateFn { return nil } p.setOrCreateCurrentMF() + if p.err != nil { + return nil + } if p.skipBlankTab(); p.err != nil { return nil // Unexpected end of input. } @@ -244,6 +256,9 @@ func (p *TextParser) readingMetricName() stateFn { return nil } p.setOrCreateCurrentMF() + if p.err != nil { + return nil + } // Now is the time to fix the type if it hasn't happened yet. if p.currentMF.Type == nil { p.currentMF.Type = dto.MetricType_UNTYPED.Enum() @@ -311,6 +326,9 @@ func (p *TextParser) startLabelName() stateFn { switch p.currentByte { case ',': p.setOrCreateCurrentMF() + if p.err != nil { + return nil + } if p.currentMF.Type == nil { p.currentMF.Type = dto.MetricType_UNTYPED.Enum() } @@ -319,6 +337,10 @@ func (p *TextParser) startLabelName() stateFn { return p.startLabelName case '}': p.setOrCreateCurrentMF() + if p.err != nil { + p.currentLabelPairs = nil + return nil + } if p.currentMF.Type == nil { p.currentMF.Type = dto.MetricType_UNTYPED.Enum() } @@ -341,25 +363,30 @@ func (p *TextParser) startLabelName() stateFn { p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())} if p.currentLabelPair.GetName() == string(model.MetricNameLabel) { p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel)) + p.currentLabelPairs = nil + return nil + } + if !p.scheme.IsValidLabelName(p.currentLabelPair.GetName()) { + p.parseError(fmt.Sprintf("invalid label name %q", p.currentLabelPair.GetName())) + p.currentLabelPairs = nil return nil } // Special summary/histogram treatment. Don't add 'quantile' and 'le' // labels to 'real' labels. - if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) && - !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) { + if (p.currentMF.GetType() != dto.MetricType_SUMMARY || p.currentLabelPair.GetName() != model.QuantileLabel) && + (p.currentMF.GetType() != dto.MetricType_HISTOGRAM || p.currentLabelPair.GetName() != model.BucketLabel) { p.currentLabelPairs = append(p.currentLabelPairs, p.currentLabelPair) } // Check for duplicate label names. labels := make(map[string]struct{}) for _, l := range p.currentLabelPairs { lName := l.GetName() - if _, exists := labels[lName]; !exists { - labels[lName] = struct{}{} - } else { + if _, exists := labels[lName]; exists { p.parseError(fmt.Sprintf("duplicate label names for metric %q", p.currentMF.GetName())) p.currentLabelPairs = nil return nil } + labels[lName] = struct{}{} } return p.startLabelValue } @@ -440,7 +467,8 @@ func (p *TextParser) readingValue() stateFn { // When we are here, we have read all the labels, so for the // special case of a summary/histogram, we can finally find out // if the metric already exists. - if p.currentMF.GetType() == dto.MetricType_SUMMARY { + switch p.currentMF.GetType() { + case dto.MetricType_SUMMARY: signature := model.LabelsToSignature(p.currentLabels) if summary := p.summaries[signature]; summary != nil { p.currentMetric = summary @@ -448,7 +476,7 @@ func (p *TextParser) readingValue() stateFn { p.summaries[signature] = p.currentMetric p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) } - } else if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + case dto.MetricType_HISTOGRAM: signature := model.LabelsToSignature(p.currentLabels) if histogram := p.histograms[signature]; histogram != nil { p.currentMetric = histogram @@ -456,7 +484,7 @@ func (p *TextParser) readingValue() stateFn { p.histograms[signature] = p.currentMetric p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) } - } else { + default: p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) } if p.readTokenUntilWhitespace(); p.err != nil { @@ -805,6 +833,10 @@ func (p *TextParser) setOrCreateCurrentMF() { p.currentIsHistogramCount = false p.currentIsHistogramSum = false name := p.currentToken.String() + if !p.scheme.IsValidMetricName(name) { + p.parseError(fmt.Sprintf("invalid metric name %q", name)) + return + } if p.currentMF = p.metricFamiliesByName[name]; p.currentMF != nil { return } diff --git a/vendor/github.com/prometheus/common/model/alert.go b/vendor/github.com/prometheus/common/model/alert.go index bd3a39e3e1..460f554f29 100644 --- a/vendor/github.com/prometheus/common/model/alert.go +++ b/vendor/github.com/prometheus/common/model/alert.go @@ -65,7 +65,7 @@ func (a *Alert) Resolved() bool { return a.ResolvedAt(time.Now()) } -// ResolvedAt returns true off the activity interval ended before +// ResolvedAt returns true iff the activity interval ended before // the given timestamp. func (a *Alert) ResolvedAt(ts time.Time) bool { if a.EndsAt.IsZero() { diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go index 73b7aa3e60..dfeb34be5f 100644 --- a/vendor/github.com/prometheus/common/model/labels.go +++ b/vendor/github.com/prometheus/common/model/labels.go @@ -22,7 +22,7 @@ import ( ) const ( - // AlertNameLabel is the name of the label containing the an alert's name. + // AlertNameLabel is the name of the label containing the alert's name. AlertNameLabel = "alertname" // ExportedLabelPrefix is the prefix to prepend to the label names present in @@ -32,6 +32,12 @@ const ( // MetricNameLabel is the label name indicating the metric name of a // timeseries. MetricNameLabel = "__name__" + // MetricTypeLabel is the label name indicating the metric type of + // timeseries as per the PROM-39 proposal. + MetricTypeLabel = "__type__" + // MetricUnitLabel is the label name indicating the metric unit of + // timeseries as per the PROM-39 proposal. + MetricUnitLabel = "__unit__" // SchemeLabel is the name of the label that holds the scheme on which to // scrape a target. @@ -100,33 +106,21 @@ type LabelName string // IsValid returns true iff the name matches the pattern of LabelNameRE when // NameValidationScheme is set to LegacyValidation, or valid UTF-8 if // NameValidationScheme is set to UTF8Validation. +// +// Deprecated: This method should not be used and may be removed in the future. +// Use [ValidationScheme.IsValidLabelName] instead. func (ln LabelName) IsValid() bool { - if len(ln) == 0 { - return false - } - switch NameValidationScheme { - case LegacyValidation: - return ln.IsValidLegacy() - case UTF8Validation: - return utf8.ValidString(string(ln)) - default: - panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme)) - } + return NameValidationScheme.IsValidLabelName(string(ln)) } // IsValidLegacy returns true iff name matches the pattern of LabelNameRE for // legacy names. It does not use LabelNameRE for the check but a much faster // hardcoded implementation. +// +// Deprecated: This method should not be used and may be removed in the future. +// Use [LegacyValidation.IsValidLabelName] instead. func (ln LabelName) IsValidLegacy() bool { - if len(ln) == 0 { - return false - } - for i, b := range ln { - if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { - return false - } - } - return true + return LegacyValidation.IsValidLabelName(string(ln)) } // UnmarshalYAML implements the yaml.Unmarshaler interface. diff --git a/vendor/github.com/prometheus/common/model/labelset.go b/vendor/github.com/prometheus/common/model/labelset.go index d0ad88da33..9de47b2568 100644 --- a/vendor/github.com/prometheus/common/model/labelset.go +++ b/vendor/github.com/prometheus/common/model/labelset.go @@ -114,10 +114,10 @@ func (ls LabelSet) Clone() LabelSet { } // Merge is a helper function to non-destructively merge two label sets. -func (l LabelSet) Merge(other LabelSet) LabelSet { - result := make(LabelSet, len(l)) +func (ls LabelSet) Merge(other LabelSet) LabelSet { + result := make(LabelSet, len(ls)) - for k, v := range l { + for k, v := range ls { result[k] = v } @@ -140,7 +140,7 @@ func (ls LabelSet) FastFingerprint() Fingerprint { } // UnmarshalJSON implements the json.Unmarshaler interface. -func (l *LabelSet) UnmarshalJSON(b []byte) error { +func (ls *LabelSet) UnmarshalJSON(b []byte) error { var m map[LabelName]LabelValue if err := json.Unmarshal(b, &m); err != nil { return err @@ -153,6 +153,6 @@ func (l *LabelSet) UnmarshalJSON(b []byte) error { return fmt.Errorf("%q is not a valid label name", ln) } } - *l = LabelSet(m) + *ls = LabelSet(m) return nil } diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go index 5766107cf9..3feebf328a 100644 --- a/vendor/github.com/prometheus/common/model/metric.go +++ b/vendor/github.com/prometheus/common/model/metric.go @@ -14,6 +14,7 @@ package model import ( + "encoding/json" "errors" "fmt" "regexp" @@ -23,17 +24,30 @@ import ( "unicode/utf8" dto "github.com/prometheus/client_model/go" + "go.yaml.in/yaml/v2" "google.golang.org/protobuf/proto" ) var ( - // NameValidationScheme determines the method of name validation to be used by - // all calls to IsValidMetricName() and LabelName IsValid(). Setting UTF-8 - // mode in isolation from other components that don't support UTF-8 may result - // in bugs or other undefined behavior. This value can be set to - // LegacyValidation during startup if a binary is not UTF-8-aware binaries. To - // avoid need for locking, this value should be set once, ideally in an - // init(), before multiple goroutines are started. + // NameValidationScheme determines the global default method of the name + // validation to be used by all calls to IsValidMetricName() and LabelName + // IsValid(). + // + // Deprecated: This variable should not be used and might be removed in the + // far future. If you wish to stick to the legacy name validation use + // `IsValidLegacyMetricName()` and `LabelName.IsValidLegacy()` methods + // instead. This variable is here as an escape hatch for emergency cases, + // given the recent change from `LegacyValidation` to `UTF8Validation`, e.g., + // to delay UTF-8 migrations in time or aid in debugging unforeseen results of + // the change. In such a case, a temporary assignment to `LegacyValidation` + // value in the `init()` function in your main.go or so, could be considered. + // + // Historically we opted for a global variable for feature gating different + // validation schemes in operations that were not otherwise easily adjustable + // (e.g. Labels yaml unmarshaling). That could have been a mistake, a separate + // Labels structure or package might have been a better choice. Given the + // change was made and many upgraded the common already, we live this as-is + // with this warning and learning for the future. NameValidationScheme = UTF8Validation // NameEscapingScheme defines the default way that names will be escaped when @@ -50,16 +64,151 @@ var ( type ValidationScheme int const ( - // LegacyValidation is a setting that requirets that metric and label names + // UnsetValidation represents an undefined ValidationScheme. + // Should not be used in practice. + UnsetValidation ValidationScheme = iota + + // LegacyValidation is a setting that requires that all metric and label names // conform to the original Prometheus character requirements described by // MetricNameRE and LabelNameRE. - LegacyValidation ValidationScheme = iota + LegacyValidation // UTF8Validation only requires that metric and label names be valid UTF-8 // strings. UTF8Validation ) +var _ interface { + yaml.Marshaler + yaml.Unmarshaler + json.Marshaler + json.Unmarshaler + fmt.Stringer +} = new(ValidationScheme) + +// String returns the string representation of s. +func (s ValidationScheme) String() string { + switch s { + case UnsetValidation: + return "unset" + case LegacyValidation: + return "legacy" + case UTF8Validation: + return "utf8" + default: + panic(fmt.Errorf("unhandled ValidationScheme: %d", s)) + } +} + +// MarshalYAML implements the yaml.Marshaler interface. +func (s ValidationScheme) MarshalYAML() (any, error) { + switch s { + case UnsetValidation: + return "", nil + case LegacyValidation, UTF8Validation: + return s.String(), nil + default: + panic(fmt.Errorf("unhandled ValidationScheme: %d", s)) + } +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (s *ValidationScheme) UnmarshalYAML(unmarshal func(any) error) error { + var scheme string + if err := unmarshal(&scheme); err != nil { + return err + } + return s.Set(scheme) +} + +// MarshalJSON implements the json.Marshaler interface. +func (s ValidationScheme) MarshalJSON() ([]byte, error) { + switch s { + case UnsetValidation: + return json.Marshal("") + case UTF8Validation, LegacyValidation: + return json.Marshal(s.String()) + default: + return nil, fmt.Errorf("unhandled ValidationScheme: %d", s) + } +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (s *ValidationScheme) UnmarshalJSON(bytes []byte) error { + var repr string + if err := json.Unmarshal(bytes, &repr); err != nil { + return err + } + return s.Set(repr) +} + +// Set implements the pflag.Value interface. +func (s *ValidationScheme) Set(text string) error { + switch text { + case "": + // Don't change the value. + case LegacyValidation.String(): + *s = LegacyValidation + case UTF8Validation.String(): + *s = UTF8Validation + default: + return fmt.Errorf("unrecognized ValidationScheme: %q", text) + } + return nil +} + +// IsValidMetricName returns whether metricName is valid according to s. +func (s ValidationScheme) IsValidMetricName(metricName string) bool { + switch s { + case LegacyValidation: + if len(metricName) == 0 { + return false + } + for i, b := range metricName { + if !isValidLegacyRune(b, i) { + return false + } + } + return true + case UTF8Validation: + if len(metricName) == 0 { + return false + } + return utf8.ValidString(metricName) + default: + panic(fmt.Sprintf("Invalid name validation scheme requested: %s", s.String())) + } +} + +// IsValidLabelName returns whether labelName is valid according to s. +func (s ValidationScheme) IsValidLabelName(labelName string) bool { + switch s { + case LegacyValidation: + if len(labelName) == 0 { + return false + } + for i, b := range labelName { + // TODO: Apply De Morgan's law. Make sure there are tests for this. + if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { //nolint:staticcheck + return false + } + } + return true + case UTF8Validation: + if len(labelName) == 0 { + return false + } + return utf8.ValidString(labelName) + default: + panic(fmt.Sprintf("Invalid name validation scheme requested: %s", s)) + } +} + +// Type implements the pflag.Value interface. +func (ValidationScheme) Type() string { + return "validationScheme" +} + type EscapingScheme int const ( @@ -89,7 +238,7 @@ const ( // Accept header, the default NameEscapingScheme will be used. EscapingKey = "escaping" - // Possible values for Escaping Key: + // Possible values for Escaping Key. AllowUTF8 = "allow-utf-8" // No escaping required. EscapeUnderscores = "underscores" EscapeDots = "dots" @@ -163,34 +312,22 @@ func (m Metric) FastFingerprint() Fingerprint { // IsValidMetricName returns true iff name matches the pattern of MetricNameRE // for legacy names, and iff it's valid UTF-8 if the UTF8Validation scheme is // selected. +// +// Deprecated: This function should not be used and might be removed in the future. +// Use [ValidationScheme.IsValidMetricName] instead. func IsValidMetricName(n LabelValue) bool { - switch NameValidationScheme { - case LegacyValidation: - return IsValidLegacyMetricName(string(n)) - case UTF8Validation: - if len(n) == 0 { - return false - } - return utf8.ValidString(string(n)) - default: - panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme)) - } + return NameValidationScheme.IsValidMetricName(string(n)) } // IsValidLegacyMetricName is similar to IsValidMetricName but always uses the // legacy validation scheme regardless of the value of NameValidationScheme. // This function, however, does not use MetricNameRE for the check but a much // faster hardcoded implementation. +// +// Deprecated: This function should not be used and might be removed in the future. +// Use [LegacyValidation.IsValidMetricName] instead. func IsValidLegacyMetricName(n string) bool { - if len(n) == 0 { - return false - } - for i, b := range n { - if !isValidLegacyRune(b, i) { - return false - } - } - return true + return LegacyValidation.IsValidMetricName(n) } // EscapeMetricFamily escapes the given metric names and labels with the given @@ -298,13 +435,14 @@ func EscapeName(name string, scheme EscapingScheme) string { case DotsEscaping: // Do not early return for legacy valid names, we still escape underscores. for i, b := range name { - if b == '_' { + switch { + case b == '_': escaped.WriteString("__") - } else if b == '.' { + case b == '.': escaped.WriteString("_dot_") - } else if isValidLegacyRune(b, i) { + case isValidLegacyRune(b, i): escaped.WriteRune(b) - } else { + default: escaped.WriteString("__") } } @@ -315,13 +453,14 @@ func EscapeName(name string, scheme EscapingScheme) string { } escaped.WriteString("U__") for i, b := range name { - if b == '_' { + switch { + case b == '_': escaped.WriteString("__") - } else if isValidLegacyRune(b, i) { + case isValidLegacyRune(b, i): escaped.WriteRune(b) - } else if !utf8.ValidRune(b) { + case !utf8.ValidRune(b): escaped.WriteString("_FFFD_") - } else { + default: escaped.WriteRune('_') escaped.WriteString(strconv.FormatInt(int64(b), 16)) escaped.WriteRune('_') @@ -333,7 +472,7 @@ func EscapeName(name string, scheme EscapingScheme) string { } } -// lower function taken from strconv.atoi +// lower function taken from strconv.atoi. func lower(c byte) byte { return c | ('x' - 'X') } @@ -397,11 +536,12 @@ func UnescapeName(name string, scheme EscapingScheme) string { } r := lower(escapedName[i]) utf8Val *= 16 - if r >= '0' && r <= '9' { + switch { + case r >= '0' && r <= '9': utf8Val += uint(r) - '0' - } else if r >= 'a' && r <= 'f' { + case r >= 'a' && r <= 'f': utf8Val += uint(r) - 'a' + 10 - } else { + default: return name } i++ diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go index 5727452c1e..1730b0fdc1 100644 --- a/vendor/github.com/prometheus/common/model/time.go +++ b/vendor/github.com/prometheus/common/model/time.go @@ -126,14 +126,14 @@ func (t *Time) UnmarshalJSON(b []byte) error { p := strings.Split(string(b), ".") switch len(p) { case 1: - v, err := strconv.ParseInt(string(p[0]), 10, 64) + v, err := strconv.ParseInt(p[0], 10, 64) if err != nil { return err } *t = Time(v * second) case 2: - v, err := strconv.ParseInt(string(p[0]), 10, 64) + v, err := strconv.ParseInt(p[0], 10, 64) if err != nil { return err } @@ -143,7 +143,7 @@ func (t *Time) UnmarshalJSON(b []byte) error { if prec < 0 { p[1] = p[1][:dotPrecision] } else if prec > 0 { - p[1] = p[1] + strings.Repeat("0", prec) + p[1] += strings.Repeat("0", prec) } va, err := strconv.ParseInt(p[1], 10, 32) @@ -170,15 +170,15 @@ func (t *Time) UnmarshalJSON(b []byte) error { // This type should not propagate beyond the scope of input/output processing. type Duration time.Duration -// Set implements pflag/flag.Value +// Set implements pflag/flag.Value. func (d *Duration) Set(s string) error { var err error *d, err = ParseDuration(s) return err } -// Type implements pflag.Value -func (d *Duration) Type() string { +// Type implements pflag.Value. +func (*Duration) Type() string { return "duration" } @@ -201,6 +201,7 @@ var unitMap = map[string]struct { // ParseDuration parses a string into a time.Duration, assuming that a year // always has 365d, a week always has 7d, and a day always has 24h. +// Negative durations are not supported. func ParseDuration(s string) (Duration, error) { switch s { case "0": @@ -253,18 +254,36 @@ func ParseDuration(s string) (Duration, error) { return 0, errors.New("duration out of range") } } + return Duration(dur), nil } +// ParseDurationAllowNegative is like ParseDuration but also accepts negative durations. +func ParseDurationAllowNegative(s string) (Duration, error) { + if s == "" || s[0] != '-' { + return ParseDuration(s) + } + + d, err := ParseDuration(s[1:]) + + return -d, err +} + func (d Duration) String() string { var ( - ms = int64(time.Duration(d) / time.Millisecond) - r = "" + ms = int64(time.Duration(d) / time.Millisecond) + r = "" + sign = "" ) + if ms == 0 { return "0s" } + if ms < 0 { + sign, ms = "-", -ms + } + f := func(unit string, mult int64, exact bool) { if exact && ms%mult != 0 { return @@ -286,7 +305,7 @@ func (d Duration) String() string { f("s", 1000, false) f("ms", 1, false) - return r + return sign + r } // MarshalJSON implements the json.Marshaler interface. diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go index 8050637d82..a9995a37ee 100644 --- a/vendor/github.com/prometheus/common/model/value.go +++ b/vendor/github.com/prometheus/common/model/value.go @@ -191,7 +191,8 @@ func (ss SampleStream) String() string { } func (ss SampleStream) MarshalJSON() ([]byte, error) { - if len(ss.Histograms) > 0 && len(ss.Values) > 0 { + switch { + case len(ss.Histograms) > 0 && len(ss.Values) > 0: v := struct { Metric Metric `json:"metric"` Values []SamplePair `json:"values"` @@ -202,7 +203,7 @@ func (ss SampleStream) MarshalJSON() ([]byte, error) { Histograms: ss.Histograms, } return json.Marshal(&v) - } else if len(ss.Histograms) > 0 { + case len(ss.Histograms) > 0: v := struct { Metric Metric `json:"metric"` Histograms []SampleHistogramPair `json:"histograms"` @@ -211,7 +212,7 @@ func (ss SampleStream) MarshalJSON() ([]byte, error) { Histograms: ss.Histograms, } return json.Marshal(&v) - } else { + default: v := struct { Metric Metric `json:"metric"` Values []SamplePair `json:"values"` @@ -258,7 +259,7 @@ func (s Scalar) String() string { // MarshalJSON implements json.Marshaler. func (s Scalar) MarshalJSON() ([]byte, error) { v := strconv.FormatFloat(float64(s.Value), 'f', -1, 64) - return json.Marshal([...]interface{}{s.Timestamp, string(v)}) + return json.Marshal([...]interface{}{s.Timestamp, v}) } // UnmarshalJSON implements json.Unmarshaler. @@ -349,9 +350,9 @@ func (m Matrix) Len() int { return len(m) } func (m Matrix) Less(i, j int) bool { return m[i].Metric.Before(m[j].Metric) } func (m Matrix) Swap(i, j int) { m[i], m[j] = m[j], m[i] } -func (mat Matrix) String() string { - matCp := make(Matrix, len(mat)) - copy(matCp, mat) +func (m Matrix) String() string { + matCp := make(Matrix, len(m)) + copy(matCp, m) sort.Sort(matCp) strs := make([]string, len(matCp)) diff --git a/vendor/github.com/prometheus/common/model/value_histogram.go b/vendor/github.com/prometheus/common/model/value_histogram.go index 895e6a3e83..91ce5b7a45 100644 --- a/vendor/github.com/prometheus/common/model/value_histogram.go +++ b/vendor/github.com/prometheus/common/model/value_histogram.go @@ -86,22 +86,22 @@ func (s *HistogramBucket) Equal(o *HistogramBucket) bool { return s == o || (s.Boundaries == o.Boundaries && s.Lower == o.Lower && s.Upper == o.Upper && s.Count == o.Count) } -func (b HistogramBucket) String() string { +func (s HistogramBucket) String() string { var sb strings.Builder - lowerInclusive := b.Boundaries == 1 || b.Boundaries == 3 - upperInclusive := b.Boundaries == 0 || b.Boundaries == 3 + lowerInclusive := s.Boundaries == 1 || s.Boundaries == 3 + upperInclusive := s.Boundaries == 0 || s.Boundaries == 3 if lowerInclusive { sb.WriteRune('[') } else { sb.WriteRune('(') } - fmt.Fprintf(&sb, "%g,%g", b.Lower, b.Upper) + fmt.Fprintf(&sb, "%g,%g", s.Lower, s.Upper) if upperInclusive { sb.WriteRune(']') } else { sb.WriteRune(')') } - fmt.Fprintf(&sb, ":%v", b.Count) + fmt.Fprintf(&sb, ":%v", s.Count) return sb.String() } diff --git a/vendor/github.com/prometheus/common/model/value_type.go b/vendor/github.com/prometheus/common/model/value_type.go index 726c50ee63..078910f46b 100644 --- a/vendor/github.com/prometheus/common/model/value_type.go +++ b/vendor/github.com/prometheus/common/model/value_type.go @@ -66,8 +66,8 @@ func (et *ValueType) UnmarshalJSON(b []byte) error { return nil } -func (e ValueType) String() string { - switch e { +func (et ValueType) String() string { + switch et { case ValNone: return "" case ValScalar: diff --git a/vendor/github.com/prometheus/procfs/.golangci.yml b/vendor/github.com/prometheus/procfs/.golangci.yml index 126df9e67a..3c3bf910fd 100644 --- a/vendor/github.com/prometheus/procfs/.golangci.yml +++ b/vendor/github.com/prometheus/procfs/.golangci.yml @@ -1,22 +1,45 @@ ---- +version: "2" linters: enable: - - errcheck - - godot - - gosimple - - govet - - ineffassign - - misspell - - revive - - staticcheck - - testifylint - - unused - -linter-settings: - godot: - capital: true - exclude: - # Ignore "See: URL" - - 'See:' - misspell: - locale: US + - forbidigo + - godot + - misspell + - revive + - testifylint + settings: + forbidigo: + forbid: + - pattern: ^fmt\.Print.*$ + msg: Do not commit print statements. + godot: + exclude: + # Ignore "See: URL". + - 'See:' + capital: true + misspell: + locale: US + exclusions: + generated: lax + presets: + - comments + - common-false-positives + - legacy + - std-error-handling + paths: + - third_party$ + - builtin$ + - examples$ +formatters: + enable: + - gofmt + - goimports + settings: + goimports: + local-prefixes: + - github.com/prometheus/procfs + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common index 1617292350..0ed55c2ba2 100644 --- a/vendor/github.com/prometheus/procfs/Makefile.common +++ b/vendor/github.com/prometheus/procfs/Makefile.common @@ -33,7 +33,7 @@ GOHOSTOS ?= $(shell $(GO) env GOHOSTOS) GOHOSTARCH ?= $(shell $(GO) env GOHOSTARCH) GO_VERSION ?= $(shell $(GO) version) -GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION)) +GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION))Error Parsing File PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.') PROMU := $(FIRST_GOPATH)/bin/promu @@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_ SKIP_GOLANGCI_LINT := GOLANGCI_LINT := GOLANGCI_LINT_OPTS ?= -GOLANGCI_LINT_VERSION ?= v1.59.0 +GOLANGCI_LINT_VERSION ?= v2.0.2 # golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64. # windows isn't included here because of the path separator being different. ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) @@ -275,3 +275,9 @@ $(1)_precheck: exit 1; \ fi endef + +govulncheck: install-govulncheck + govulncheck ./... + +install-govulncheck: + command -v govulncheck > /dev/null || go install golang.org/x/vuln/cmd/govulncheck@latest diff --git a/vendor/github.com/prometheus/procfs/README.md b/vendor/github.com/prometheus/procfs/README.md index 1224816c2a..0718239cf1 100644 --- a/vendor/github.com/prometheus/procfs/README.md +++ b/vendor/github.com/prometheus/procfs/README.md @@ -47,15 +47,15 @@ However, most of the API includes unit tests which can be run with `make test`. The procfs library includes a set of test fixtures which include many example files from the `/proc` and `/sys` filesystems. These fixtures are included as a [ttar](https://github.com/ideaship/ttar) file which is extracted automatically during testing. To add/update the test fixtures, first -ensure the `fixtures` directory is up to date by removing the existing directory and then -extracting the ttar file using `make fixtures/.unpacked` or just `make test`. +ensure the `testdata/fixtures` directory is up to date by removing the existing directory and then +extracting the ttar file using `make testdata/fixtures/.unpacked` or just `make test`. ```bash rm -rf testdata/fixtures make test ``` -Next, make the required changes to the extracted files in the `fixtures` directory. When +Next, make the required changes to the extracted files in the `testdata/fixtures` directory. When the changes are complete, run `make update_fixtures` to create a new `fixtures.ttar` file based on the updated `fixtures` directory. And finally, verify the changes using `git diff testdata/fixtures.ttar`. diff --git a/vendor/github.com/prometheus/procfs/arp.go b/vendor/github.com/prometheus/procfs/arp.go index cdcc8a7ccc..2e53344151 100644 --- a/vendor/github.com/prometheus/procfs/arp.go +++ b/vendor/github.com/prometheus/procfs/arp.go @@ -23,9 +23,9 @@ import ( // Learned from include/uapi/linux/if_arp.h. const ( - // completed entry (ha valid). + // Completed entry (ha valid). ATFComplete = 0x02 - // permanent entry. + // Permanent entry. ATFPermanent = 0x04 // Publish entry. ATFPublish = 0x08 diff --git a/vendor/github.com/prometheus/procfs/fs.go b/vendor/github.com/prometheus/procfs/fs.go index 4980c875bf..9bdaccc7c8 100644 --- a/vendor/github.com/prometheus/procfs/fs.go +++ b/vendor/github.com/prometheus/procfs/fs.go @@ -24,8 +24,14 @@ type FS struct { isReal bool } -// DefaultMountPoint is the common mount point of the proc filesystem. -const DefaultMountPoint = fs.DefaultProcMountPoint +const ( + // DefaultMountPoint is the common mount point of the proc filesystem. + DefaultMountPoint = fs.DefaultProcMountPoint + + // SectorSize represents the size of a sector in bytes. + // It is specific to Linux block I/O operations. + SectorSize = 512 +) // NewDefaultFS returns a new proc FS mounted under the default proc mountPoint. // It will error if the mount point directory can't be read or is a file. diff --git a/vendor/github.com/prometheus/procfs/fs_statfs_notype.go b/vendor/github.com/prometheus/procfs/fs_statfs_notype.go index 134767d69a..1b5bdbdf84 100644 --- a/vendor/github.com/prometheus/procfs/fs_statfs_notype.go +++ b/vendor/github.com/prometheus/procfs/fs_statfs_notype.go @@ -17,7 +17,7 @@ package procfs // isRealProc returns true on architectures that don't have a Type argument -// in their Statfs_t struct -func isRealProc(mountPoint string) (bool, error) { +// in their Statfs_t struct. +func isRealProc(_ string) (bool, error) { return true, nil } diff --git a/vendor/github.com/prometheus/procfs/fscache.go b/vendor/github.com/prometheus/procfs/fscache.go index cf2e3eaa03..7db8633077 100644 --- a/vendor/github.com/prometheus/procfs/fscache.go +++ b/vendor/github.com/prometheus/procfs/fscache.go @@ -162,7 +162,7 @@ type Fscacheinfo struct { ReleaseRequestsAgainstPagesStoredByTimeLockGranted uint64 // Number of release reqs ignored due to in-progress store ReleaseRequestsIgnoredDueToInProgressStore uint64 - // Number of page stores cancelled due to release req + // Number of page stores canceled due to release req PageStoresCancelledByReleaseRequests uint64 VmscanWaiting uint64 // Number of times async ops added to pending queues @@ -171,11 +171,11 @@ type Fscacheinfo struct { OpsRunning uint64 // Number of times async ops queued for processing OpsEnqueued uint64 - // Number of async ops cancelled + // Number of async ops canceled OpsCancelled uint64 // Number of async ops rejected due to object lookup/create failure OpsRejected uint64 - // Number of async ops initialised + // Number of async ops initialized OpsInitialised uint64 // Number of async ops queued for deferred release OpsDeferred uint64 diff --git a/vendor/github.com/prometheus/procfs/internal/fs/fs.go b/vendor/github.com/prometheus/procfs/internal/fs/fs.go index 3c18c7610e..3a43e83915 100644 --- a/vendor/github.com/prometheus/procfs/internal/fs/fs.go +++ b/vendor/github.com/prometheus/procfs/internal/fs/fs.go @@ -28,6 +28,9 @@ const ( // DefaultConfigfsMountPoint is the common mount point of the configfs. DefaultConfigfsMountPoint = "/sys/kernel/config" + + // DefaultSelinuxMountPoint is the common mount point of the selinuxfs. + DefaultSelinuxMountPoint = "/sys/fs/selinux" ) // FS represents a pseudo-filesystem, normally /proc or /sys, which provides an diff --git a/vendor/github.com/prometheus/procfs/internal/util/parse.go b/vendor/github.com/prometheus/procfs/internal/util/parse.go index 14272dc788..5a7d2df06a 100644 --- a/vendor/github.com/prometheus/procfs/internal/util/parse.go +++ b/vendor/github.com/prometheus/procfs/internal/util/parse.go @@ -14,6 +14,7 @@ package util import ( + "errors" "os" "strconv" "strings" @@ -110,3 +111,16 @@ func ParseBool(b string) *bool { } return &truth } + +// ReadHexFromFile reads a file and attempts to parse a uint64 from a hexadecimal format 0xXX. +func ReadHexFromFile(path string) (uint64, error) { + data, err := os.ReadFile(path) + if err != nil { + return 0, err + } + hexString := strings.TrimSpace(string(data)) + if !strings.HasPrefix(hexString, "0x") { + return 0, errors.New("invalid format: hex string does not start with '0x'") + } + return strconv.ParseUint(hexString[2:], 16, 64) +} diff --git a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go index 1ab875ceec..d5404a6d72 100644 --- a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go +++ b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go @@ -20,6 +20,8 @@ package util import ( "bytes" "os" + "strconv" + "strings" "syscall" ) @@ -48,3 +50,21 @@ func SysReadFile(file string) (string, error) { return string(bytes.TrimSpace(b[:n])), nil } + +// SysReadUintFromFile reads a file using SysReadFile and attempts to parse a uint64 from it. +func SysReadUintFromFile(path string) (uint64, error) { + data, err := SysReadFile(path) + if err != nil { + return 0, err + } + return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64) +} + +// SysReadIntFromFile reads a file using SysReadFile and attempts to parse a int64 from it. +func SysReadIntFromFile(path string) (int64, error) { + data, err := SysReadFile(path) + if err != nil { + return 0, err + } + return strconv.ParseInt(strings.TrimSpace(string(data)), 10, 64) +} diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go index 75a3b6c810..50caa73274 100644 --- a/vendor/github.com/prometheus/procfs/mountstats.go +++ b/vendor/github.com/prometheus/procfs/mountstats.go @@ -45,11 +45,11 @@ const ( fieldTransport11TCPLen = 13 fieldTransport11UDPLen = 10 - // kernel version >= 4.14 MaxLen + // Kernel version >= 4.14 MaxLen // See: https://elixir.bootlin.com/linux/v6.4.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L393 fieldTransport11RDMAMaxLen = 28 - // kernel version <= 4.2 MinLen + // Kernel version <= 4.2 MinLen // See: https://elixir.bootlin.com/linux/v4.2.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L331 fieldTransport11RDMAMinLen = 20 ) @@ -601,11 +601,12 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats switch statVersion { case statVersion10: var expectedLength int - if protocol == "tcp" { + switch protocol { + case "tcp": expectedLength = fieldTransport10TCPLen - } else if protocol == "udp" { + case "udp": expectedLength = fieldTransport10UDPLen - } else { + default: return nil, fmt.Errorf("%w: Invalid NFS protocol \"%s\" in stats 1.0 statement: %v", ErrFileParse, protocol, ss) } if len(ss) != expectedLength { @@ -613,13 +614,14 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats } case statVersion11: var expectedLength int - if protocol == "tcp" { + switch protocol { + case "tcp": expectedLength = fieldTransport11TCPLen - } else if protocol == "udp" { + case "udp": expectedLength = fieldTransport11UDPLen - } else if protocol == "rdma" { + case "rdma": expectedLength = fieldTransport11RDMAMinLen - } else { + default: return nil, fmt.Errorf("%w: invalid NFS protocol \"%s\" in stats 1.1 statement: %v", ErrFileParse, protocol, ss) } if (len(ss) != expectedLength && (protocol == "tcp" || protocol == "udp")) || @@ -655,11 +657,12 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats // For the udp RPC transport there is no connection count, connect idle time, // or idle time (fields #3, #4, and #5); all other fields are the same. So // we set them to 0 here. - if protocol == "udp" { + switch protocol { + case "udp": ns = append(ns[:2], append(make([]uint64, 3), ns[2:]...)...) - } else if protocol == "tcp" { + case "tcp": ns = append(ns[:fieldTransport11TCPLen], make([]uint64, fieldTransport11RDMAMaxLen-fieldTransport11TCPLen+3)...) - } else if protocol == "rdma" { + case "rdma": ns = append(ns[:fieldTransport10TCPLen], append(make([]uint64, 3), ns[fieldTransport10TCPLen:]...)...) } diff --git a/vendor/github.com/prometheus/procfs/net_dev_snmp6.go b/vendor/github.com/prometheus/procfs/net_dev_snmp6.go new file mode 100644 index 0000000000..f50b38e352 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_dev_snmp6.go @@ -0,0 +1,96 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "errors" + "io" + "os" + "strconv" + "strings" +) + +// NetDevSNMP6 is parsed from files in /proc/net/dev_snmp6/ or /proc//net/dev_snmp6/. +// The outer map's keys are interface names and the inner map's keys are stat names. +// +// If you'd like a total across all interfaces, please use the Snmp6() method of the Proc type. +type NetDevSNMP6 map[string]map[string]uint64 + +// Returns kernel/system statistics read from interface files within the /proc/net/dev_snmp6/ +// directory. +func (fs FS) NetDevSNMP6() (NetDevSNMP6, error) { + return newNetDevSNMP6(fs.proc.Path("net/dev_snmp6")) +} + +// Returns kernel/system statistics read from interface files within the /proc//net/dev_snmp6/ +// directory. +func (p Proc) NetDevSNMP6() (NetDevSNMP6, error) { + return newNetDevSNMP6(p.path("net/dev_snmp6")) +} + +// newNetDevSNMP6 creates a new NetDevSNMP6 from the contents of the given directory. +func newNetDevSNMP6(dir string) (NetDevSNMP6, error) { + netDevSNMP6 := make(NetDevSNMP6) + + // The net/dev_snmp6 folders contain one file per interface + ifaceFiles, err := os.ReadDir(dir) + if err != nil { + // On systems with IPv6 disabled, this directory won't exist. + // Do nothing. + if errors.Is(err, os.ErrNotExist) { + return netDevSNMP6, err + } + return netDevSNMP6, err + } + + for _, iFaceFile := range ifaceFiles { + f, err := os.Open(dir + "/" + iFaceFile.Name()) + if err != nil { + return netDevSNMP6, err + } + defer f.Close() + + netDevSNMP6[iFaceFile.Name()], err = parseNetDevSNMP6Stats(f) + if err != nil { + return netDevSNMP6, err + } + } + + return netDevSNMP6, nil +} + +func parseNetDevSNMP6Stats(r io.Reader) (map[string]uint64, error) { + m := make(map[string]uint64) + + scanner := bufio.NewScanner(r) + for scanner.Scan() { + stat := strings.Fields(scanner.Text()) + if len(stat) < 2 { + continue + } + key, val := stat[0], stat[1] + + // Expect stat name to contain "6" or be "ifIndex" + if strings.Contains(key, "6") || key == "ifIndex" { + v, err := strconv.ParseUint(val, 10, 64) + if err != nil { + return m, err + } + + m[key] = v + } + } + return m, scanner.Err() +} diff --git a/vendor/github.com/prometheus/procfs/net_ip_socket.go b/vendor/github.com/prometheus/procfs/net_ip_socket.go index b70f1fc7a4..19e3378f72 100644 --- a/vendor/github.com/prometheus/procfs/net_ip_socket.go +++ b/vendor/github.com/prometheus/procfs/net_ip_socket.go @@ -25,7 +25,7 @@ import ( ) const ( - // readLimit is used by io.LimitReader while reading the content of the + // Maximum size limit used by io.LimitReader while reading the content of the // /proc/net/udp{,6} files. The number of lines inside such a file is dynamic // as each line represents a single used socket. // In theory, the number of available sockets is 65535 (2^16 - 1) per IP. @@ -50,12 +50,12 @@ type ( // UsedSockets shows the total number of parsed lines representing the // number of used sockets. UsedSockets uint64 - // Drops shows the total number of dropped packets of all UPD sockets. + // Drops shows the total number of dropped packets of all UDP sockets. Drops *uint64 } - // netIPSocketLine represents the fields parsed from a single line - // in /proc/net/{t,u}dp{,6}. Fields which are not used by IPSocket are skipped. + // A single line parser for fields from /proc/net/{t,u}dp{,6}. + // Fields which are not used by IPSocket are skipped. // Drops is non-nil for udp{,6}, but nil for tcp{,6}. // For the proc file format details, see https://linux.die.net/man/5/proc. netIPSocketLine struct { diff --git a/vendor/github.com/prometheus/procfs/net_protocols.go b/vendor/github.com/prometheus/procfs/net_protocols.go index b6c77b709f..8d4b1ac05b 100644 --- a/vendor/github.com/prometheus/procfs/net_protocols.go +++ b/vendor/github.com/prometheus/procfs/net_protocols.go @@ -115,22 +115,24 @@ func (ps NetProtocolStats) parseLine(rawLine string) (*NetProtocolStatLine, erro if err != nil { return nil, err } - if fields[4] == enabled { + switch fields[4] { + case enabled: line.Pressure = 1 - } else if fields[4] == disabled { + case disabled: line.Pressure = 0 - } else { + default: line.Pressure = -1 } line.MaxHeader, err = strconv.ParseUint(fields[5], 10, 64) if err != nil { return nil, err } - if fields[6] == enabled { + switch fields[6] { + case enabled: line.Slab = true - } else if fields[6] == disabled { + case disabled: line.Slab = false - } else { + default: return nil, fmt.Errorf("%w: capability for protocol: %s", ErrFileParse, line.Name) } line.ModuleName = fields[7] @@ -168,11 +170,12 @@ func (pc *NetProtocolCapabilities) parseCapabilities(capabilities []string) erro } for i := 0; i < len(capabilities); i++ { - if capabilities[i] == "y" { + switch capabilities[i] { + case "y": *capabilityFields[i] = true - } else if capabilities[i] == "n" { + case "n": *capabilityFields[i] = false - } else { + default: return fmt.Errorf("%w: capability block for protocol: position %d", ErrFileParse, i) } } diff --git a/vendor/github.com/prometheus/procfs/net_tcp.go b/vendor/github.com/prometheus/procfs/net_tcp.go index 5277629557..0396d72015 100644 --- a/vendor/github.com/prometheus/procfs/net_tcp.go +++ b/vendor/github.com/prometheus/procfs/net_tcp.go @@ -25,24 +25,28 @@ type ( // NetTCP returns the IPv4 kernel/networking statistics for TCP datagrams // read from /proc/net/tcp. +// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET) instead. func (fs FS) NetTCP() (NetTCP, error) { return newNetTCP(fs.proc.Path("net/tcp")) } // NetTCP6 returns the IPv6 kernel/networking statistics for TCP datagrams // read from /proc/net/tcp6. +// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET6) instead. func (fs FS) NetTCP6() (NetTCP, error) { return newNetTCP(fs.proc.Path("net/tcp6")) } // NetTCPSummary returns already computed statistics like the total queue lengths // for TCP datagrams read from /proc/net/tcp. +// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET) instead. func (fs FS) NetTCPSummary() (*NetTCPSummary, error) { return newNetTCPSummary(fs.proc.Path("net/tcp")) } // NetTCP6Summary returns already computed statistics like the total queue lengths // for TCP datagrams read from /proc/net/tcp6. +// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET6) instead. func (fs FS) NetTCP6Summary() (*NetTCPSummary, error) { return newNetTCPSummary(fs.proc.Path("net/tcp6")) } diff --git a/vendor/github.com/prometheus/procfs/net_unix.go b/vendor/github.com/prometheus/procfs/net_unix.go index d868cebdaa..d7e0cacb4c 100644 --- a/vendor/github.com/prometheus/procfs/net_unix.go +++ b/vendor/github.com/prometheus/procfs/net_unix.go @@ -121,12 +121,12 @@ func parseNetUNIX(r io.Reader) (*NetUNIX, error) { return &nu, nil } -func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine, error) { +func (u *NetUNIX) parseLine(line string, hasInode bool, minFields int) (*NetUNIXLine, error) { fields := strings.Fields(line) l := len(fields) - if l < min { - return nil, fmt.Errorf("%w: expected at least %d fields but got %d", ErrFileParse, min, l) + if l < minFields { + return nil, fmt.Errorf("%w: expected at least %d fields but got %d", ErrFileParse, minFields, l) } // Field offsets are as follows: @@ -172,7 +172,7 @@ func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine, } // Path field is optional. - if l > min { + if l > minFields { // Path occurs at either index 6 or 7 depending on whether inode is // already present. pathIdx := 7 diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go index 142796368f..368187fa88 100644 --- a/vendor/github.com/prometheus/procfs/proc.go +++ b/vendor/github.com/prometheus/procfs/proc.go @@ -37,9 +37,9 @@ type Proc struct { type Procs []Proc var ( - ErrFileParse = errors.New("Error Parsing File") - ErrFileRead = errors.New("Error Reading File") - ErrMountPoint = errors.New("Error Accessing Mount point") + ErrFileParse = errors.New("error parsing file") + ErrFileRead = errors.New("error reading file") + ErrMountPoint = errors.New("error accessing mount point") ) func (p Procs) Len() int { return len(p) } @@ -79,7 +79,7 @@ func (fs FS) Self() (Proc, error) { if err != nil { return Proc{}, err } - pid, err := strconv.Atoi(strings.Replace(p, string(fs.proc), "", -1)) + pid, err := strconv.Atoi(strings.ReplaceAll(p, string(fs.proc), "")) if err != nil { return Proc{}, err } diff --git a/vendor/github.com/prometheus/procfs/proc_cgroup.go b/vendor/github.com/prometheus/procfs/proc_cgroup.go index daeed7f571..4a64347c03 100644 --- a/vendor/github.com/prometheus/procfs/proc_cgroup.go +++ b/vendor/github.com/prometheus/procfs/proc_cgroup.go @@ -24,7 +24,7 @@ import ( ) // Cgroup models one line from /proc/[pid]/cgroup. Each Cgroup struct describes the placement of a PID inside a -// specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. v1 has one hierarchy per available resource +// specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. The v1 has one hierarchy per available resource // controller, while v2 has one unified hierarchy shared by all controllers. Regardless of v1 or v2, all hierarchies // contain all running processes, so the question answerable with a Cgroup struct is 'where is this process in // this hierarchy' (where==what path on the specific cgroupfs). By prefixing this path with the mount point of diff --git a/vendor/github.com/prometheus/procfs/proc_io.go b/vendor/github.com/prometheus/procfs/proc_io.go index 776f349717..d15b66ddb6 100644 --- a/vendor/github.com/prometheus/procfs/proc_io.go +++ b/vendor/github.com/prometheus/procfs/proc_io.go @@ -50,7 +50,7 @@ func (p Proc) IO() (ProcIO, error) { ioFormat := "rchar: %d\nwchar: %d\nsyscr: %d\nsyscw: %d\n" + "read_bytes: %d\nwrite_bytes: %d\n" + - "cancelled_write_bytes: %d\n" + "cancelled_write_bytes: %d\n" //nolint:misspell _, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR, &pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes) diff --git a/vendor/github.com/prometheus/procfs/proc_netstat.go b/vendor/github.com/prometheus/procfs/proc_netstat.go index 8e3ff4d794..4248c1716e 100644 --- a/vendor/github.com/prometheus/procfs/proc_netstat.go +++ b/vendor/github.com/prometheus/procfs/proc_netstat.go @@ -209,232 +209,232 @@ func parseProcNetstat(r io.Reader, fileName string) (ProcNetstat, error) { case "TcpExt": switch key { case "SyncookiesSent": - procNetstat.TcpExt.SyncookiesSent = &value + procNetstat.SyncookiesSent = &value case "SyncookiesRecv": - procNetstat.TcpExt.SyncookiesRecv = &value + procNetstat.SyncookiesRecv = &value case "SyncookiesFailed": - procNetstat.TcpExt.SyncookiesFailed = &value + procNetstat.SyncookiesFailed = &value case "EmbryonicRsts": - procNetstat.TcpExt.EmbryonicRsts = &value + procNetstat.EmbryonicRsts = &value case "PruneCalled": - procNetstat.TcpExt.PruneCalled = &value + procNetstat.PruneCalled = &value case "RcvPruned": - procNetstat.TcpExt.RcvPruned = &value + procNetstat.RcvPruned = &value case "OfoPruned": - procNetstat.TcpExt.OfoPruned = &value + procNetstat.OfoPruned = &value case "OutOfWindowIcmps": - procNetstat.TcpExt.OutOfWindowIcmps = &value + procNetstat.OutOfWindowIcmps = &value case "LockDroppedIcmps": - procNetstat.TcpExt.LockDroppedIcmps = &value + procNetstat.LockDroppedIcmps = &value case "ArpFilter": - procNetstat.TcpExt.ArpFilter = &value + procNetstat.ArpFilter = &value case "TW": - procNetstat.TcpExt.TW = &value + procNetstat.TW = &value case "TWRecycled": - procNetstat.TcpExt.TWRecycled = &value + procNetstat.TWRecycled = &value case "TWKilled": - procNetstat.TcpExt.TWKilled = &value + procNetstat.TWKilled = &value case "PAWSActive": - procNetstat.TcpExt.PAWSActive = &value + procNetstat.PAWSActive = &value case "PAWSEstab": - procNetstat.TcpExt.PAWSEstab = &value + procNetstat.PAWSEstab = &value case "DelayedACKs": - procNetstat.TcpExt.DelayedACKs = &value + procNetstat.DelayedACKs = &value case "DelayedACKLocked": - procNetstat.TcpExt.DelayedACKLocked = &value + procNetstat.DelayedACKLocked = &value case "DelayedACKLost": - procNetstat.TcpExt.DelayedACKLost = &value + procNetstat.DelayedACKLost = &value case "ListenOverflows": - procNetstat.TcpExt.ListenOverflows = &value + procNetstat.ListenOverflows = &value case "ListenDrops": - procNetstat.TcpExt.ListenDrops = &value + procNetstat.ListenDrops = &value case "TCPHPHits": - procNetstat.TcpExt.TCPHPHits = &value + procNetstat.TCPHPHits = &value case "TCPPureAcks": - procNetstat.TcpExt.TCPPureAcks = &value + procNetstat.TCPPureAcks = &value case "TCPHPAcks": - procNetstat.TcpExt.TCPHPAcks = &value + procNetstat.TCPHPAcks = &value case "TCPRenoRecovery": - procNetstat.TcpExt.TCPRenoRecovery = &value + procNetstat.TCPRenoRecovery = &value case "TCPSackRecovery": - procNetstat.TcpExt.TCPSackRecovery = &value + procNetstat.TCPSackRecovery = &value case "TCPSACKReneging": - procNetstat.TcpExt.TCPSACKReneging = &value + procNetstat.TCPSACKReneging = &value case "TCPSACKReorder": - procNetstat.TcpExt.TCPSACKReorder = &value + procNetstat.TCPSACKReorder = &value case "TCPRenoReorder": - procNetstat.TcpExt.TCPRenoReorder = &value + procNetstat.TCPRenoReorder = &value case "TCPTSReorder": - procNetstat.TcpExt.TCPTSReorder = &value + procNetstat.TCPTSReorder = &value case "TCPFullUndo": - procNetstat.TcpExt.TCPFullUndo = &value + procNetstat.TCPFullUndo = &value case "TCPPartialUndo": - procNetstat.TcpExt.TCPPartialUndo = &value + procNetstat.TCPPartialUndo = &value case "TCPDSACKUndo": - procNetstat.TcpExt.TCPDSACKUndo = &value + procNetstat.TCPDSACKUndo = &value case "TCPLossUndo": - procNetstat.TcpExt.TCPLossUndo = &value + procNetstat.TCPLossUndo = &value case "TCPLostRetransmit": - procNetstat.TcpExt.TCPLostRetransmit = &value + procNetstat.TCPLostRetransmit = &value case "TCPRenoFailures": - procNetstat.TcpExt.TCPRenoFailures = &value + procNetstat.TCPRenoFailures = &value case "TCPSackFailures": - procNetstat.TcpExt.TCPSackFailures = &value + procNetstat.TCPSackFailures = &value case "TCPLossFailures": - procNetstat.TcpExt.TCPLossFailures = &value + procNetstat.TCPLossFailures = &value case "TCPFastRetrans": - procNetstat.TcpExt.TCPFastRetrans = &value + procNetstat.TCPFastRetrans = &value case "TCPSlowStartRetrans": - procNetstat.TcpExt.TCPSlowStartRetrans = &value + procNetstat.TCPSlowStartRetrans = &value case "TCPTimeouts": - procNetstat.TcpExt.TCPTimeouts = &value + procNetstat.TCPTimeouts = &value case "TCPLossProbes": - procNetstat.TcpExt.TCPLossProbes = &value + procNetstat.TCPLossProbes = &value case "TCPLossProbeRecovery": - procNetstat.TcpExt.TCPLossProbeRecovery = &value + procNetstat.TCPLossProbeRecovery = &value case "TCPRenoRecoveryFail": - procNetstat.TcpExt.TCPRenoRecoveryFail = &value + procNetstat.TCPRenoRecoveryFail = &value case "TCPSackRecoveryFail": - procNetstat.TcpExt.TCPSackRecoveryFail = &value + procNetstat.TCPSackRecoveryFail = &value case "TCPRcvCollapsed": - procNetstat.TcpExt.TCPRcvCollapsed = &value + procNetstat.TCPRcvCollapsed = &value case "TCPDSACKOldSent": - procNetstat.TcpExt.TCPDSACKOldSent = &value + procNetstat.TCPDSACKOldSent = &value case "TCPDSACKOfoSent": - procNetstat.TcpExt.TCPDSACKOfoSent = &value + procNetstat.TCPDSACKOfoSent = &value case "TCPDSACKRecv": - procNetstat.TcpExt.TCPDSACKRecv = &value + procNetstat.TCPDSACKRecv = &value case "TCPDSACKOfoRecv": - procNetstat.TcpExt.TCPDSACKOfoRecv = &value + procNetstat.TCPDSACKOfoRecv = &value case "TCPAbortOnData": - procNetstat.TcpExt.TCPAbortOnData = &value + procNetstat.TCPAbortOnData = &value case "TCPAbortOnClose": - procNetstat.TcpExt.TCPAbortOnClose = &value + procNetstat.TCPAbortOnClose = &value case "TCPDeferAcceptDrop": - procNetstat.TcpExt.TCPDeferAcceptDrop = &value + procNetstat.TCPDeferAcceptDrop = &value case "IPReversePathFilter": - procNetstat.TcpExt.IPReversePathFilter = &value + procNetstat.IPReversePathFilter = &value case "TCPTimeWaitOverflow": - procNetstat.TcpExt.TCPTimeWaitOverflow = &value + procNetstat.TCPTimeWaitOverflow = &value case "TCPReqQFullDoCookies": - procNetstat.TcpExt.TCPReqQFullDoCookies = &value + procNetstat.TCPReqQFullDoCookies = &value case "TCPReqQFullDrop": - procNetstat.TcpExt.TCPReqQFullDrop = &value + procNetstat.TCPReqQFullDrop = &value case "TCPRetransFail": - procNetstat.TcpExt.TCPRetransFail = &value + procNetstat.TCPRetransFail = &value case "TCPRcvCoalesce": - procNetstat.TcpExt.TCPRcvCoalesce = &value + procNetstat.TCPRcvCoalesce = &value case "TCPRcvQDrop": - procNetstat.TcpExt.TCPRcvQDrop = &value + procNetstat.TCPRcvQDrop = &value case "TCPOFOQueue": - procNetstat.TcpExt.TCPOFOQueue = &value + procNetstat.TCPOFOQueue = &value case "TCPOFODrop": - procNetstat.TcpExt.TCPOFODrop = &value + procNetstat.TCPOFODrop = &value case "TCPOFOMerge": - procNetstat.TcpExt.TCPOFOMerge = &value + procNetstat.TCPOFOMerge = &value case "TCPChallengeACK": - procNetstat.TcpExt.TCPChallengeACK = &value + procNetstat.TCPChallengeACK = &value case "TCPSYNChallenge": - procNetstat.TcpExt.TCPSYNChallenge = &value + procNetstat.TCPSYNChallenge = &value case "TCPFastOpenActive": - procNetstat.TcpExt.TCPFastOpenActive = &value + procNetstat.TCPFastOpenActive = &value case "TCPFastOpenActiveFail": - procNetstat.TcpExt.TCPFastOpenActiveFail = &value + procNetstat.TCPFastOpenActiveFail = &value case "TCPFastOpenPassive": - procNetstat.TcpExt.TCPFastOpenPassive = &value + procNetstat.TCPFastOpenPassive = &value case "TCPFastOpenPassiveFail": - procNetstat.TcpExt.TCPFastOpenPassiveFail = &value + procNetstat.TCPFastOpenPassiveFail = &value case "TCPFastOpenListenOverflow": - procNetstat.TcpExt.TCPFastOpenListenOverflow = &value + procNetstat.TCPFastOpenListenOverflow = &value case "TCPFastOpenCookieReqd": - procNetstat.TcpExt.TCPFastOpenCookieReqd = &value + procNetstat.TCPFastOpenCookieReqd = &value case "TCPFastOpenBlackhole": - procNetstat.TcpExt.TCPFastOpenBlackhole = &value + procNetstat.TCPFastOpenBlackhole = &value case "TCPSpuriousRtxHostQueues": - procNetstat.TcpExt.TCPSpuriousRtxHostQueues = &value + procNetstat.TCPSpuriousRtxHostQueues = &value case "BusyPollRxPackets": - procNetstat.TcpExt.BusyPollRxPackets = &value + procNetstat.BusyPollRxPackets = &value case "TCPAutoCorking": - procNetstat.TcpExt.TCPAutoCorking = &value + procNetstat.TCPAutoCorking = &value case "TCPFromZeroWindowAdv": - procNetstat.TcpExt.TCPFromZeroWindowAdv = &value + procNetstat.TCPFromZeroWindowAdv = &value case "TCPToZeroWindowAdv": - procNetstat.TcpExt.TCPToZeroWindowAdv = &value + procNetstat.TCPToZeroWindowAdv = &value case "TCPWantZeroWindowAdv": - procNetstat.TcpExt.TCPWantZeroWindowAdv = &value + procNetstat.TCPWantZeroWindowAdv = &value case "TCPSynRetrans": - procNetstat.TcpExt.TCPSynRetrans = &value + procNetstat.TCPSynRetrans = &value case "TCPOrigDataSent": - procNetstat.TcpExt.TCPOrigDataSent = &value + procNetstat.TCPOrigDataSent = &value case "TCPHystartTrainDetect": - procNetstat.TcpExt.TCPHystartTrainDetect = &value + procNetstat.TCPHystartTrainDetect = &value case "TCPHystartTrainCwnd": - procNetstat.TcpExt.TCPHystartTrainCwnd = &value + procNetstat.TCPHystartTrainCwnd = &value case "TCPHystartDelayDetect": - procNetstat.TcpExt.TCPHystartDelayDetect = &value + procNetstat.TCPHystartDelayDetect = &value case "TCPHystartDelayCwnd": - procNetstat.TcpExt.TCPHystartDelayCwnd = &value + procNetstat.TCPHystartDelayCwnd = &value case "TCPACKSkippedSynRecv": - procNetstat.TcpExt.TCPACKSkippedSynRecv = &value + procNetstat.TCPACKSkippedSynRecv = &value case "TCPACKSkippedPAWS": - procNetstat.TcpExt.TCPACKSkippedPAWS = &value + procNetstat.TCPACKSkippedPAWS = &value case "TCPACKSkippedSeq": - procNetstat.TcpExt.TCPACKSkippedSeq = &value + procNetstat.TCPACKSkippedSeq = &value case "TCPACKSkippedFinWait2": - procNetstat.TcpExt.TCPACKSkippedFinWait2 = &value + procNetstat.TCPACKSkippedFinWait2 = &value case "TCPACKSkippedTimeWait": - procNetstat.TcpExt.TCPACKSkippedTimeWait = &value + procNetstat.TCPACKSkippedTimeWait = &value case "TCPACKSkippedChallenge": - procNetstat.TcpExt.TCPACKSkippedChallenge = &value + procNetstat.TCPACKSkippedChallenge = &value case "TCPWinProbe": - procNetstat.TcpExt.TCPWinProbe = &value + procNetstat.TCPWinProbe = &value case "TCPKeepAlive": - procNetstat.TcpExt.TCPKeepAlive = &value + procNetstat.TCPKeepAlive = &value case "TCPMTUPFail": - procNetstat.TcpExt.TCPMTUPFail = &value + procNetstat.TCPMTUPFail = &value case "TCPMTUPSuccess": - procNetstat.TcpExt.TCPMTUPSuccess = &value + procNetstat.TCPMTUPSuccess = &value case "TCPWqueueTooBig": - procNetstat.TcpExt.TCPWqueueTooBig = &value + procNetstat.TCPWqueueTooBig = &value } case "IpExt": switch key { case "InNoRoutes": - procNetstat.IpExt.InNoRoutes = &value + procNetstat.InNoRoutes = &value case "InTruncatedPkts": - procNetstat.IpExt.InTruncatedPkts = &value + procNetstat.InTruncatedPkts = &value case "InMcastPkts": - procNetstat.IpExt.InMcastPkts = &value + procNetstat.InMcastPkts = &value case "OutMcastPkts": - procNetstat.IpExt.OutMcastPkts = &value + procNetstat.OutMcastPkts = &value case "InBcastPkts": - procNetstat.IpExt.InBcastPkts = &value + procNetstat.InBcastPkts = &value case "OutBcastPkts": - procNetstat.IpExt.OutBcastPkts = &value + procNetstat.OutBcastPkts = &value case "InOctets": - procNetstat.IpExt.InOctets = &value + procNetstat.InOctets = &value case "OutOctets": - procNetstat.IpExt.OutOctets = &value + procNetstat.OutOctets = &value case "InMcastOctets": - procNetstat.IpExt.InMcastOctets = &value + procNetstat.InMcastOctets = &value case "OutMcastOctets": - procNetstat.IpExt.OutMcastOctets = &value + procNetstat.OutMcastOctets = &value case "InBcastOctets": - procNetstat.IpExt.InBcastOctets = &value + procNetstat.InBcastOctets = &value case "OutBcastOctets": - procNetstat.IpExt.OutBcastOctets = &value + procNetstat.OutBcastOctets = &value case "InCsumErrors": - procNetstat.IpExt.InCsumErrors = &value + procNetstat.InCsumErrors = &value case "InNoECTPkts": - procNetstat.IpExt.InNoECTPkts = &value + procNetstat.InNoECTPkts = &value case "InECT1Pkts": - procNetstat.IpExt.InECT1Pkts = &value + procNetstat.InECT1Pkts = &value case "InECT0Pkts": - procNetstat.IpExt.InECT0Pkts = &value + procNetstat.InECT0Pkts = &value case "InCEPkts": - procNetstat.IpExt.InCEPkts = &value + procNetstat.InCEPkts = &value case "ReasmOverlaps": - procNetstat.IpExt.ReasmOverlaps = &value + procNetstat.ReasmOverlaps = &value } } } diff --git a/vendor/github.com/prometheus/procfs/proc_smaps.go b/vendor/github.com/prometheus/procfs/proc_smaps.go index 09060e8208..9a297afcf8 100644 --- a/vendor/github.com/prometheus/procfs/proc_smaps.go +++ b/vendor/github.com/prometheus/procfs/proc_smaps.go @@ -19,7 +19,6 @@ package procfs import ( "bufio" "errors" - "fmt" "os" "regexp" "strconv" @@ -29,7 +28,7 @@ import ( ) var ( - // match the header line before each mapped zone in `/proc/pid/smaps`. + // Match the header line before each mapped zone in `/proc/pid/smaps`. procSMapsHeaderLine = regexp.MustCompile(`^[a-f0-9].*$`) ) @@ -117,7 +116,6 @@ func (p Proc) procSMapsRollupManual() (ProcSMapsRollup, error) { func (s *ProcSMapsRollup) parseLine(line string) error { kv := strings.SplitN(line, ":", 2) if len(kv) != 2 { - fmt.Println(line) return errors.New("invalid net/dev line, missing colon") } diff --git a/vendor/github.com/prometheus/procfs/proc_snmp.go b/vendor/github.com/prometheus/procfs/proc_snmp.go index b9d2cf642a..4bdc90b07e 100644 --- a/vendor/github.com/prometheus/procfs/proc_snmp.go +++ b/vendor/github.com/prometheus/procfs/proc_snmp.go @@ -173,138 +173,138 @@ func parseSnmp(r io.Reader, fileName string) (ProcSnmp, error) { case "Ip": switch key { case "Forwarding": - procSnmp.Ip.Forwarding = &value + procSnmp.Forwarding = &value case "DefaultTTL": - procSnmp.Ip.DefaultTTL = &value + procSnmp.DefaultTTL = &value case "InReceives": - procSnmp.Ip.InReceives = &value + procSnmp.InReceives = &value case "InHdrErrors": - procSnmp.Ip.InHdrErrors = &value + procSnmp.InHdrErrors = &value case "InAddrErrors": - procSnmp.Ip.InAddrErrors = &value + procSnmp.InAddrErrors = &value case "ForwDatagrams": - procSnmp.Ip.ForwDatagrams = &value + procSnmp.ForwDatagrams = &value case "InUnknownProtos": - procSnmp.Ip.InUnknownProtos = &value + procSnmp.InUnknownProtos = &value case "InDiscards": - procSnmp.Ip.InDiscards = &value + procSnmp.InDiscards = &value case "InDelivers": - procSnmp.Ip.InDelivers = &value + procSnmp.InDelivers = &value case "OutRequests": - procSnmp.Ip.OutRequests = &value + procSnmp.OutRequests = &value case "OutDiscards": - procSnmp.Ip.OutDiscards = &value + procSnmp.OutDiscards = &value case "OutNoRoutes": - procSnmp.Ip.OutNoRoutes = &value + procSnmp.OutNoRoutes = &value case "ReasmTimeout": - procSnmp.Ip.ReasmTimeout = &value + procSnmp.ReasmTimeout = &value case "ReasmReqds": - procSnmp.Ip.ReasmReqds = &value + procSnmp.ReasmReqds = &value case "ReasmOKs": - procSnmp.Ip.ReasmOKs = &value + procSnmp.ReasmOKs = &value case "ReasmFails": - procSnmp.Ip.ReasmFails = &value + procSnmp.ReasmFails = &value case "FragOKs": - procSnmp.Ip.FragOKs = &value + procSnmp.FragOKs = &value case "FragFails": - procSnmp.Ip.FragFails = &value + procSnmp.FragFails = &value case "FragCreates": - procSnmp.Ip.FragCreates = &value + procSnmp.FragCreates = &value } case "Icmp": switch key { case "InMsgs": - procSnmp.Icmp.InMsgs = &value + procSnmp.InMsgs = &value case "InErrors": procSnmp.Icmp.InErrors = &value case "InCsumErrors": procSnmp.Icmp.InCsumErrors = &value case "InDestUnreachs": - procSnmp.Icmp.InDestUnreachs = &value + procSnmp.InDestUnreachs = &value case "InTimeExcds": - procSnmp.Icmp.InTimeExcds = &value + procSnmp.InTimeExcds = &value case "InParmProbs": - procSnmp.Icmp.InParmProbs = &value + procSnmp.InParmProbs = &value case "InSrcQuenchs": - procSnmp.Icmp.InSrcQuenchs = &value + procSnmp.InSrcQuenchs = &value case "InRedirects": - procSnmp.Icmp.InRedirects = &value + procSnmp.InRedirects = &value case "InEchos": - procSnmp.Icmp.InEchos = &value + procSnmp.InEchos = &value case "InEchoReps": - procSnmp.Icmp.InEchoReps = &value + procSnmp.InEchoReps = &value case "InTimestamps": - procSnmp.Icmp.InTimestamps = &value + procSnmp.InTimestamps = &value case "InTimestampReps": - procSnmp.Icmp.InTimestampReps = &value + procSnmp.InTimestampReps = &value case "InAddrMasks": - procSnmp.Icmp.InAddrMasks = &value + procSnmp.InAddrMasks = &value case "InAddrMaskReps": - procSnmp.Icmp.InAddrMaskReps = &value + procSnmp.InAddrMaskReps = &value case "OutMsgs": - procSnmp.Icmp.OutMsgs = &value + procSnmp.OutMsgs = &value case "OutErrors": - procSnmp.Icmp.OutErrors = &value + procSnmp.OutErrors = &value case "OutDestUnreachs": - procSnmp.Icmp.OutDestUnreachs = &value + procSnmp.OutDestUnreachs = &value case "OutTimeExcds": - procSnmp.Icmp.OutTimeExcds = &value + procSnmp.OutTimeExcds = &value case "OutParmProbs": - procSnmp.Icmp.OutParmProbs = &value + procSnmp.OutParmProbs = &value case "OutSrcQuenchs": - procSnmp.Icmp.OutSrcQuenchs = &value + procSnmp.OutSrcQuenchs = &value case "OutRedirects": - procSnmp.Icmp.OutRedirects = &value + procSnmp.OutRedirects = &value case "OutEchos": - procSnmp.Icmp.OutEchos = &value + procSnmp.OutEchos = &value case "OutEchoReps": - procSnmp.Icmp.OutEchoReps = &value + procSnmp.OutEchoReps = &value case "OutTimestamps": - procSnmp.Icmp.OutTimestamps = &value + procSnmp.OutTimestamps = &value case "OutTimestampReps": - procSnmp.Icmp.OutTimestampReps = &value + procSnmp.OutTimestampReps = &value case "OutAddrMasks": - procSnmp.Icmp.OutAddrMasks = &value + procSnmp.OutAddrMasks = &value case "OutAddrMaskReps": - procSnmp.Icmp.OutAddrMaskReps = &value + procSnmp.OutAddrMaskReps = &value } case "IcmpMsg": switch key { case "InType3": - procSnmp.IcmpMsg.InType3 = &value + procSnmp.InType3 = &value case "OutType3": - procSnmp.IcmpMsg.OutType3 = &value + procSnmp.OutType3 = &value } case "Tcp": switch key { case "RtoAlgorithm": - procSnmp.Tcp.RtoAlgorithm = &value + procSnmp.RtoAlgorithm = &value case "RtoMin": - procSnmp.Tcp.RtoMin = &value + procSnmp.RtoMin = &value case "RtoMax": - procSnmp.Tcp.RtoMax = &value + procSnmp.RtoMax = &value case "MaxConn": - procSnmp.Tcp.MaxConn = &value + procSnmp.MaxConn = &value case "ActiveOpens": - procSnmp.Tcp.ActiveOpens = &value + procSnmp.ActiveOpens = &value case "PassiveOpens": - procSnmp.Tcp.PassiveOpens = &value + procSnmp.PassiveOpens = &value case "AttemptFails": - procSnmp.Tcp.AttemptFails = &value + procSnmp.AttemptFails = &value case "EstabResets": - procSnmp.Tcp.EstabResets = &value + procSnmp.EstabResets = &value case "CurrEstab": - procSnmp.Tcp.CurrEstab = &value + procSnmp.CurrEstab = &value case "InSegs": - procSnmp.Tcp.InSegs = &value + procSnmp.InSegs = &value case "OutSegs": - procSnmp.Tcp.OutSegs = &value + procSnmp.OutSegs = &value case "RetransSegs": - procSnmp.Tcp.RetransSegs = &value + procSnmp.RetransSegs = &value case "InErrs": - procSnmp.Tcp.InErrs = &value + procSnmp.InErrs = &value case "OutRsts": - procSnmp.Tcp.OutRsts = &value + procSnmp.OutRsts = &value case "InCsumErrors": procSnmp.Tcp.InCsumErrors = &value } diff --git a/vendor/github.com/prometheus/procfs/proc_snmp6.go b/vendor/github.com/prometheus/procfs/proc_snmp6.go index 3059cc6a13..fb7fd3995b 100644 --- a/vendor/github.com/prometheus/procfs/proc_snmp6.go +++ b/vendor/github.com/prometheus/procfs/proc_snmp6.go @@ -182,161 +182,161 @@ func parseSNMP6Stats(r io.Reader) (ProcSnmp6, error) { case "Ip6": switch key { case "InReceives": - procSnmp6.Ip6.InReceives = &value + procSnmp6.InReceives = &value case "InHdrErrors": - procSnmp6.Ip6.InHdrErrors = &value + procSnmp6.InHdrErrors = &value case "InTooBigErrors": - procSnmp6.Ip6.InTooBigErrors = &value + procSnmp6.InTooBigErrors = &value case "InNoRoutes": - procSnmp6.Ip6.InNoRoutes = &value + procSnmp6.InNoRoutes = &value case "InAddrErrors": - procSnmp6.Ip6.InAddrErrors = &value + procSnmp6.InAddrErrors = &value case "InUnknownProtos": - procSnmp6.Ip6.InUnknownProtos = &value + procSnmp6.InUnknownProtos = &value case "InTruncatedPkts": - procSnmp6.Ip6.InTruncatedPkts = &value + procSnmp6.InTruncatedPkts = &value case "InDiscards": - procSnmp6.Ip6.InDiscards = &value + procSnmp6.InDiscards = &value case "InDelivers": - procSnmp6.Ip6.InDelivers = &value + procSnmp6.InDelivers = &value case "OutForwDatagrams": - procSnmp6.Ip6.OutForwDatagrams = &value + procSnmp6.OutForwDatagrams = &value case "OutRequests": - procSnmp6.Ip6.OutRequests = &value + procSnmp6.OutRequests = &value case "OutDiscards": - procSnmp6.Ip6.OutDiscards = &value + procSnmp6.OutDiscards = &value case "OutNoRoutes": - procSnmp6.Ip6.OutNoRoutes = &value + procSnmp6.OutNoRoutes = &value case "ReasmTimeout": - procSnmp6.Ip6.ReasmTimeout = &value + procSnmp6.ReasmTimeout = &value case "ReasmReqds": - procSnmp6.Ip6.ReasmReqds = &value + procSnmp6.ReasmReqds = &value case "ReasmOKs": - procSnmp6.Ip6.ReasmOKs = &value + procSnmp6.ReasmOKs = &value case "ReasmFails": - procSnmp6.Ip6.ReasmFails = &value + procSnmp6.ReasmFails = &value case "FragOKs": - procSnmp6.Ip6.FragOKs = &value + procSnmp6.FragOKs = &value case "FragFails": - procSnmp6.Ip6.FragFails = &value + procSnmp6.FragFails = &value case "FragCreates": - procSnmp6.Ip6.FragCreates = &value + procSnmp6.FragCreates = &value case "InMcastPkts": - procSnmp6.Ip6.InMcastPkts = &value + procSnmp6.InMcastPkts = &value case "OutMcastPkts": - procSnmp6.Ip6.OutMcastPkts = &value + procSnmp6.OutMcastPkts = &value case "InOctets": - procSnmp6.Ip6.InOctets = &value + procSnmp6.InOctets = &value case "OutOctets": - procSnmp6.Ip6.OutOctets = &value + procSnmp6.OutOctets = &value case "InMcastOctets": - procSnmp6.Ip6.InMcastOctets = &value + procSnmp6.InMcastOctets = &value case "OutMcastOctets": - procSnmp6.Ip6.OutMcastOctets = &value + procSnmp6.OutMcastOctets = &value case "InBcastOctets": - procSnmp6.Ip6.InBcastOctets = &value + procSnmp6.InBcastOctets = &value case "OutBcastOctets": - procSnmp6.Ip6.OutBcastOctets = &value + procSnmp6.OutBcastOctets = &value case "InNoECTPkts": - procSnmp6.Ip6.InNoECTPkts = &value + procSnmp6.InNoECTPkts = &value case "InECT1Pkts": - procSnmp6.Ip6.InECT1Pkts = &value + procSnmp6.InECT1Pkts = &value case "InECT0Pkts": - procSnmp6.Ip6.InECT0Pkts = &value + procSnmp6.InECT0Pkts = &value case "InCEPkts": - procSnmp6.Ip6.InCEPkts = &value + procSnmp6.InCEPkts = &value } case "Icmp6": switch key { case "InMsgs": - procSnmp6.Icmp6.InMsgs = &value + procSnmp6.InMsgs = &value case "InErrors": procSnmp6.Icmp6.InErrors = &value case "OutMsgs": - procSnmp6.Icmp6.OutMsgs = &value + procSnmp6.OutMsgs = &value case "OutErrors": - procSnmp6.Icmp6.OutErrors = &value + procSnmp6.OutErrors = &value case "InCsumErrors": procSnmp6.Icmp6.InCsumErrors = &value case "InDestUnreachs": - procSnmp6.Icmp6.InDestUnreachs = &value + procSnmp6.InDestUnreachs = &value case "InPktTooBigs": - procSnmp6.Icmp6.InPktTooBigs = &value + procSnmp6.InPktTooBigs = &value case "InTimeExcds": - procSnmp6.Icmp6.InTimeExcds = &value + procSnmp6.InTimeExcds = &value case "InParmProblems": - procSnmp6.Icmp6.InParmProblems = &value + procSnmp6.InParmProblems = &value case "InEchos": - procSnmp6.Icmp6.InEchos = &value + procSnmp6.InEchos = &value case "InEchoReplies": - procSnmp6.Icmp6.InEchoReplies = &value + procSnmp6.InEchoReplies = &value case "InGroupMembQueries": - procSnmp6.Icmp6.InGroupMembQueries = &value + procSnmp6.InGroupMembQueries = &value case "InGroupMembResponses": - procSnmp6.Icmp6.InGroupMembResponses = &value + procSnmp6.InGroupMembResponses = &value case "InGroupMembReductions": - procSnmp6.Icmp6.InGroupMembReductions = &value + procSnmp6.InGroupMembReductions = &value case "InRouterSolicits": - procSnmp6.Icmp6.InRouterSolicits = &value + procSnmp6.InRouterSolicits = &value case "InRouterAdvertisements": - procSnmp6.Icmp6.InRouterAdvertisements = &value + procSnmp6.InRouterAdvertisements = &value case "InNeighborSolicits": - procSnmp6.Icmp6.InNeighborSolicits = &value + procSnmp6.InNeighborSolicits = &value case "InNeighborAdvertisements": - procSnmp6.Icmp6.InNeighborAdvertisements = &value + procSnmp6.InNeighborAdvertisements = &value case "InRedirects": - procSnmp6.Icmp6.InRedirects = &value + procSnmp6.InRedirects = &value case "InMLDv2Reports": - procSnmp6.Icmp6.InMLDv2Reports = &value + procSnmp6.InMLDv2Reports = &value case "OutDestUnreachs": - procSnmp6.Icmp6.OutDestUnreachs = &value + procSnmp6.OutDestUnreachs = &value case "OutPktTooBigs": - procSnmp6.Icmp6.OutPktTooBigs = &value + procSnmp6.OutPktTooBigs = &value case "OutTimeExcds": - procSnmp6.Icmp6.OutTimeExcds = &value + procSnmp6.OutTimeExcds = &value case "OutParmProblems": - procSnmp6.Icmp6.OutParmProblems = &value + procSnmp6.OutParmProblems = &value case "OutEchos": - procSnmp6.Icmp6.OutEchos = &value + procSnmp6.OutEchos = &value case "OutEchoReplies": - procSnmp6.Icmp6.OutEchoReplies = &value + procSnmp6.OutEchoReplies = &value case "OutGroupMembQueries": - procSnmp6.Icmp6.OutGroupMembQueries = &value + procSnmp6.OutGroupMembQueries = &value case "OutGroupMembResponses": - procSnmp6.Icmp6.OutGroupMembResponses = &value + procSnmp6.OutGroupMembResponses = &value case "OutGroupMembReductions": - procSnmp6.Icmp6.OutGroupMembReductions = &value + procSnmp6.OutGroupMembReductions = &value case "OutRouterSolicits": - procSnmp6.Icmp6.OutRouterSolicits = &value + procSnmp6.OutRouterSolicits = &value case "OutRouterAdvertisements": - procSnmp6.Icmp6.OutRouterAdvertisements = &value + procSnmp6.OutRouterAdvertisements = &value case "OutNeighborSolicits": - procSnmp6.Icmp6.OutNeighborSolicits = &value + procSnmp6.OutNeighborSolicits = &value case "OutNeighborAdvertisements": - procSnmp6.Icmp6.OutNeighborAdvertisements = &value + procSnmp6.OutNeighborAdvertisements = &value case "OutRedirects": - procSnmp6.Icmp6.OutRedirects = &value + procSnmp6.OutRedirects = &value case "OutMLDv2Reports": - procSnmp6.Icmp6.OutMLDv2Reports = &value + procSnmp6.OutMLDv2Reports = &value case "InType1": - procSnmp6.Icmp6.InType1 = &value + procSnmp6.InType1 = &value case "InType134": - procSnmp6.Icmp6.InType134 = &value + procSnmp6.InType134 = &value case "InType135": - procSnmp6.Icmp6.InType135 = &value + procSnmp6.InType135 = &value case "InType136": - procSnmp6.Icmp6.InType136 = &value + procSnmp6.InType136 = &value case "InType143": - procSnmp6.Icmp6.InType143 = &value + procSnmp6.InType143 = &value case "OutType133": - procSnmp6.Icmp6.OutType133 = &value + procSnmp6.OutType133 = &value case "OutType135": - procSnmp6.Icmp6.OutType135 = &value + procSnmp6.OutType135 = &value case "OutType136": - procSnmp6.Icmp6.OutType136 = &value + procSnmp6.OutType136 = &value case "OutType143": - procSnmp6.Icmp6.OutType143 = &value + procSnmp6.OutType143 = &value } case "Udp6": switch key { @@ -355,7 +355,7 @@ func parseSNMP6Stats(r io.Reader) (ProcSnmp6, error) { case "InCsumErrors": procSnmp6.Udp6.InCsumErrors = &value case "IgnoredMulti": - procSnmp6.Udp6.IgnoredMulti = &value + procSnmp6.IgnoredMulti = &value } case "UdpLite6": switch key { diff --git a/vendor/github.com/prometheus/procfs/proc_status.go b/vendor/github.com/prometheus/procfs/proc_status.go index a055197c63..dd8aa56885 100644 --- a/vendor/github.com/prometheus/procfs/proc_status.go +++ b/vendor/github.com/prometheus/procfs/proc_status.go @@ -146,7 +146,11 @@ func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintByt } } case "NSpid": - s.NSpids = calcNSPidsList(vString) + nspids, err := calcNSPidsList(vString) + if err != nil { + return err + } + s.NSpids = nspids case "VmPeak": s.VmPeak = vUintBytes case "VmSize": @@ -222,17 +226,17 @@ func calcCpusAllowedList(cpuString string) []uint64 { return g } -func calcNSPidsList(nspidsString string) []uint64 { - s := strings.Split(nspidsString, " ") +func calcNSPidsList(nspidsString string) ([]uint64, error) { + s := strings.Split(nspidsString, "\t") var nspids []uint64 for _, nspid := range s { - nspid, _ := strconv.ParseUint(nspid, 10, 64) - if nspid == 0 { - continue + nspid, err := strconv.ParseUint(nspid, 10, 64) + if err != nil { + return nil, err } nspids = append(nspids, nspid) } - return nspids + return nspids, nil } diff --git a/vendor/github.com/prometheus/procfs/proc_sys.go b/vendor/github.com/prometheus/procfs/proc_sys.go index 5eefbe2ef8..3810d1ac99 100644 --- a/vendor/github.com/prometheus/procfs/proc_sys.go +++ b/vendor/github.com/prometheus/procfs/proc_sys.go @@ -21,7 +21,7 @@ import ( ) func sysctlToPath(sysctl string) string { - return strings.Replace(sysctl, ".", "/", -1) + return strings.ReplaceAll(sysctl, ".", "/") } func (fs FS) SysctlStrings(sysctl string) ([]string, error) { diff --git a/vendor/github.com/prometheus/procfs/softirqs.go b/vendor/github.com/prometheus/procfs/softirqs.go index 28708e0745..403e6ae708 100644 --- a/vendor/github.com/prometheus/procfs/softirqs.go +++ b/vendor/github.com/prometheus/procfs/softirqs.go @@ -68,8 +68,8 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { if len(parts) < 2 { continue } - switch { - case parts[0] == "HI:": + switch parts[0] { + case "HI:": perCPU := parts[1:] softirqs.Hi = make([]uint64, len(perCPU)) for i, count := range perCPU { @@ -77,7 +77,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HI%d): %w", ErrFileParse, count, i, err) } } - case parts[0] == "TIMER:": + case "TIMER:": perCPU := parts[1:] softirqs.Timer = make([]uint64, len(perCPU)) for i, count := range perCPU { @@ -85,7 +85,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TIMER%d): %w", ErrFileParse, count, i, err) } } - case parts[0] == "NET_TX:": + case "NET_TX:": perCPU := parts[1:] softirqs.NetTx = make([]uint64, len(perCPU)) for i, count := range perCPU { @@ -93,7 +93,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_TX%d): %w", ErrFileParse, count, i, err) } } - case parts[0] == "NET_RX:": + case "NET_RX:": perCPU := parts[1:] softirqs.NetRx = make([]uint64, len(perCPU)) for i, count := range perCPU { @@ -101,7 +101,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_RX%d): %w", ErrFileParse, count, i, err) } } - case parts[0] == "BLOCK:": + case "BLOCK:": perCPU := parts[1:] softirqs.Block = make([]uint64, len(perCPU)) for i, count := range perCPU { @@ -109,7 +109,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (BLOCK%d): %w", ErrFileParse, count, i, err) } } - case parts[0] == "IRQ_POLL:": + case "IRQ_POLL:": perCPU := parts[1:] softirqs.IRQPoll = make([]uint64, len(perCPU)) for i, count := range perCPU { @@ -117,7 +117,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (IRQ_POLL%d): %w", ErrFileParse, count, i, err) } } - case parts[0] == "TASKLET:": + case "TASKLET:": perCPU := parts[1:] softirqs.Tasklet = make([]uint64, len(perCPU)) for i, count := range perCPU { @@ -125,7 +125,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TASKLET%d): %w", ErrFileParse, count, i, err) } } - case parts[0] == "SCHED:": + case "SCHED:": perCPU := parts[1:] softirqs.Sched = make([]uint64, len(perCPU)) for i, count := range perCPU { @@ -133,7 +133,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (SCHED%d): %w", ErrFileParse, count, i, err) } } - case parts[0] == "HRTIMER:": + case "HRTIMER:": perCPU := parts[1:] softirqs.HRTimer = make([]uint64, len(perCPU)) for i, count := range perCPU { @@ -141,7 +141,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HRTIMER%d): %w", ErrFileParse, count, i, err) } } - case parts[0] == "RCU:": + case "RCU:": perCPU := parts[1:] softirqs.RCU = make([]uint64, len(perCPU)) for i, count := range perCPU { diff --git a/vendor/github.com/redis/go-redis/extra/rediscmd/v9/rediscmd.go b/vendor/github.com/redis/go-redis/extra/rediscmd/v9/rediscmd.go index c97689f95c..6423b6abdb 100644 --- a/vendor/github.com/redis/go-redis/extra/rediscmd/v9/rediscmd.go +++ b/vendor/github.com/redis/go-redis/extra/rediscmd/v9/rediscmd.go @@ -17,7 +17,6 @@ func CmdString(cmd redis.Cmder) string { } func CmdsString(cmds []redis.Cmder) (string, string) { - const numCmdLimit = 100 const numNameLimit = 10 seen := make(map[string]struct{}, numNameLimit) @@ -26,10 +25,6 @@ func CmdsString(cmds []redis.Cmder) (string, string) { b := make([]byte, 0, 32*len(cmds)) for i, cmd := range cmds { - if i > numCmdLimit { - break - } - if i > 0 { b = append(b, '\n') } @@ -51,12 +46,7 @@ func CmdsString(cmds []redis.Cmder) (string, string) { } func AppendCmd(b []byte, cmd redis.Cmder) []byte { - const numArgLimit = 32 - for i, arg := range cmd.Args() { - if i > numArgLimit { - break - } if i > 0 { b = append(b, ' ') } @@ -72,20 +62,12 @@ func AppendCmd(b []byte, cmd redis.Cmder) []byte { } func appendArg(b []byte, v interface{}) []byte { - const argLenLimit = 64 - switch v := v.(type) { case nil: return append(b, ""...) case string: - if len(v) > argLenLimit { - v = v[:argLenLimit] - } return appendUTF8String(b, Bytes(v)) case []byte: - if len(v) > argLenLimit { - v = v[:argLenLimit] - } return appendUTF8String(b, v) case int: return strconv.AppendInt(b, int64(v), 10) diff --git a/vendor/github.com/redis/go-redis/extra/redisotel/v9/config.go b/vendor/github.com/redis/go-redis/extra/redisotel/v9/config.go index c24f896729..c02ee0b312 100644 --- a/vendor/github.com/redis/go-redis/extra/redisotel/v9/config.go +++ b/vendor/github.com/redis/go-redis/extra/redisotel/v9/config.go @@ -4,7 +4,7 @@ import ( "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/metric" - semconv "go.opentelemetry.io/otel/semconv/v1.12.0" + semconv "go.opentelemetry.io/otel/semconv/v1.24.0" "go.opentelemetry.io/otel/trace" ) diff --git a/vendor/github.com/redis/go-redis/extra/redisotel/v9/metrics.go b/vendor/github.com/redis/go-redis/extra/redisotel/v9/metrics.go index 695c7ee3ef..4974f4e8de 100644 --- a/vendor/github.com/redis/go-redis/extra/redisotel/v9/metrics.go +++ b/vendor/github.com/redis/go-redis/extra/redisotel/v9/metrics.go @@ -6,10 +6,11 @@ import ( "net" "time" - "github.com/redis/go-redis/v9" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/metric" + + "github.com/redis/go-redis/v9" ) // InstrumentMetrics starts reporting OpenTelemetry Metrics. @@ -126,6 +127,22 @@ func reportPoolStats(rdb *redis.Client, conf *config) error { return err } + hits, err := conf.meter.Int64ObservableUpDownCounter( + "db.client.connections.hits", + metric.WithDescription("The number of times free connection was found in the pool"), + ) + if err != nil { + return err + } + + misses, err := conf.meter.Int64ObservableUpDownCounter( + "db.client.connections.misses", + metric.WithDescription("The number of times free connection was not found in the pool"), + ) + if err != nil { + return err + } + redisConf := rdb.Options() _, err = conf.meter.RegisterCallback( func(ctx context.Context, o metric.Observer) error { @@ -139,6 +156,8 @@ func reportPoolStats(rdb *redis.Client, conf *config) error { o.ObserveInt64(usage, int64(stats.TotalConns-stats.IdleConns), metric.WithAttributes(usedAttrs...)) o.ObserveInt64(timeouts, int64(stats.Timeouts), metric.WithAttributes(labels...)) + o.ObserveInt64(hits, int64(stats.Hits), metric.WithAttributes(labels...)) + o.ObserveInt64(misses, int64(stats.Misses), metric.WithAttributes(labels...)) return nil }, idleMax, @@ -146,6 +165,8 @@ func reportPoolStats(rdb *redis.Client, conf *config) error { connsMax, usage, timeouts, + hits, + misses, ) return err @@ -192,11 +213,13 @@ func (mh *metricsHook) DialHook(hook redis.DialHook) redis.DialHook { conn, err := hook(ctx, network, addr) + dur := time.Since(start) + attrs := make([]attribute.KeyValue, 0, len(mh.attrs)+1) attrs = append(attrs, mh.attrs...) attrs = append(attrs, statusAttr(err)) - mh.createTime.Record(ctx, milliseconds(time.Since(start)), metric.WithAttributes(attrs...)) + mh.createTime.Record(ctx, milliseconds(dur), metric.WithAttributes(attrs...)) return conn, err } } diff --git a/vendor/github.com/redis/go-redis/extra/redisotel/v9/tracing.go b/vendor/github.com/redis/go-redis/extra/redisotel/v9/tracing.go index 0bbf692adf..33b7abac18 100644 --- a/vendor/github.com/redis/go-redis/extra/redisotel/v9/tracing.go +++ b/vendor/github.com/redis/go-redis/extra/redisotel/v9/tracing.go @@ -5,11 +5,12 @@ import ( "fmt" "net" "runtime" + "strconv" "strings" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" - semconv "go.opentelemetry.io/otel/semconv/v1.10.0" + semconv "go.opentelemetry.io/otel/semconv/v1.24.0" "go.opentelemetry.io/otel/trace" "github.com/redis/go-redis/extra/rediscmd/v9" @@ -25,22 +26,21 @@ func InstrumentTracing(rdb redis.UniversalClient, opts ...TracingOption) error { case *redis.Client: opt := rdb.Options() connString := formatDBConnString(opt.Network, opt.Addr) + opts = addServerAttributes(opts, opt.Addr) rdb.AddHook(newTracingHook(connString, opts...)) return nil case *redis.ClusterClient: - rdb.AddHook(newTracingHook("", opts...)) - rdb.OnNewNode(func(rdb *redis.Client) { opt := rdb.Options() + opts = addServerAttributes(opts, opt.Addr) connString := formatDBConnString(opt.Network, opt.Addr) rdb.AddHook(newTracingHook(connString, opts...)) }) return nil case *redis.Ring: - rdb.AddHook(newTracingHook("", opts...)) - rdb.OnNewNode(func(rdb *redis.Client) { opt := rdb.Options() + opts = addServerAttributes(opts, opt.Addr) connString := formatDBConnString(opt.Network, opt.Addr) rdb.AddHook(newTracingHook(connString, opts...)) }) @@ -72,7 +72,7 @@ func newTracingHook(connString string, opts ...TracingOption) *tracingHook { ) } if connString != "" { - conf.attrs = append(conf.attrs, semconv.DBConnectionStringKey.String(connString)) + conf.attrs = append(conf.attrs, semconv.DBConnectionString(connString)) } return &tracingHook{ @@ -87,10 +87,6 @@ func newTracingHook(connString string, opts ...TracingOption) *tracingHook { func (th *tracingHook) DialHook(hook redis.DialHook) redis.DialHook { return func(ctx context.Context, network, addr string) (net.Conn, error) { - if !trace.SpanFromContext(ctx).IsRecording() { - return hook(ctx, network, addr) - } - ctx, span := th.conf.tracer.Start(ctx, "redis.dial", th.spanOpts...) defer span.End() @@ -105,22 +101,18 @@ func (th *tracingHook) DialHook(hook redis.DialHook) redis.DialHook { func (th *tracingHook) ProcessHook(hook redis.ProcessHook) redis.ProcessHook { return func(ctx context.Context, cmd redis.Cmder) error { - if !trace.SpanFromContext(ctx).IsRecording() { - return hook(ctx, cmd) - } - fn, file, line := funcFileLine("github.com/redis/go-redis") attrs := make([]attribute.KeyValue, 0, 8) attrs = append(attrs, - semconv.CodeFunctionKey.String(fn), - semconv.CodeFilepathKey.String(file), - semconv.CodeLineNumberKey.Int(line), + semconv.CodeFunction(fn), + semconv.CodeFilepath(file), + semconv.CodeLineNumber(line), ) if th.conf.dbStmtEnabled { cmdString := rediscmd.CmdString(cmd) - attrs = append(attrs, semconv.DBStatementKey.String(cmdString)) + attrs = append(attrs, semconv.DBStatement(cmdString)) } opts := th.spanOpts @@ -141,23 +133,19 @@ func (th *tracingHook) ProcessPipelineHook( hook redis.ProcessPipelineHook, ) redis.ProcessPipelineHook { return func(ctx context.Context, cmds []redis.Cmder) error { - if !trace.SpanFromContext(ctx).IsRecording() { - return hook(ctx, cmds) - } - fn, file, line := funcFileLine("github.com/redis/go-redis") attrs := make([]attribute.KeyValue, 0, 8) attrs = append(attrs, - semconv.CodeFunctionKey.String(fn), - semconv.CodeFilepathKey.String(file), - semconv.CodeLineNumberKey.Int(line), + semconv.CodeFunction(fn), + semconv.CodeFilepath(file), + semconv.CodeLineNumber(line), attribute.Int("db.redis.num_cmd", len(cmds)), ) summary, cmdsString := rediscmd.CmdsString(cmds) if th.conf.dbStmtEnabled { - attrs = append(attrs, semconv.DBStatementKey.String(cmdsString)) + attrs = append(attrs, semconv.DBStatement(cmdsString)) } opts := th.spanOpts @@ -213,3 +201,28 @@ func funcFileLine(pkg string) (string, string, int) { return fn, file, line } + +// Database span attributes semantic conventions recommended server address and port +// https://opentelemetry.io/docs/specs/semconv/database/database-spans/#connection-level-attributes +func addServerAttributes(opts []TracingOption, addr string) []TracingOption { + host, portString, err := net.SplitHostPort(addr) + if err != nil { + return opts + } + + opts = append(opts, WithAttributes( + semconv.ServerAddress(host), + )) + + // Parse the port string to an integer + port, err := strconv.Atoi(portString) + if err != nil { + return opts + } + + opts = append(opts, WithAttributes( + semconv.ServerPort(port), + )) + + return opts +} diff --git a/vendor/github.com/redis/go-redis/v9/.gitignore b/vendor/github.com/redis/go-redis/v9/.gitignore index 6f868895ba..0d99709e34 100644 --- a/vendor/github.com/redis/go-redis/v9/.gitignore +++ b/vendor/github.com/redis/go-redis/v9/.gitignore @@ -3,4 +3,9 @@ testdata/* .idea/ .DS_Store *.tar.gz -*.dic \ No newline at end of file +*.dic +redis8tests.sh +coverage.txt +**/coverage.txt +.vscode +tmp/* diff --git a/vendor/github.com/redis/go-redis/v9/.golangci.yml b/vendor/github.com/redis/go-redis/v9/.golangci.yml index 285aca6b3a..872454ff7f 100644 --- a/vendor/github.com/redis/go-redis/v9/.golangci.yml +++ b/vendor/github.com/redis/go-redis/v9/.golangci.yml @@ -1,3 +1,34 @@ +version: "2" run: timeout: 5m tests: false +linters: + settings: + staticcheck: + checks: + - all + # Incorrect or missing package comment. + # https://staticcheck.dev/docs/checks/#ST1000 + - -ST1000 + # Omit embedded fields from selector expression. + # https://staticcheck.dev/docs/checks/#QF1008 + - -QF1008 + - -ST1003 + exclusions: + generated: lax + presets: + - comments + - common-false-positives + - legacy + - std-error-handling + paths: + - third_party$ + - builtin$ + - examples$ +formatters: + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ diff --git a/vendor/github.com/redis/go-redis/v9/CONTRIBUTING.md b/vendor/github.com/redis/go-redis/v9/CONTRIBUTING.md index 90030b89f6..7228a4a060 100644 --- a/vendor/github.com/redis/go-redis/v9/CONTRIBUTING.md +++ b/vendor/github.com/redis/go-redis/v9/CONTRIBUTING.md @@ -32,20 +32,33 @@ Here's how to get started with your code contribution: 1. Create your own fork of go-redis 2. Do the changes in your fork -3. If you need a development environment, run `make test`. Note: this clones and builds the latest release of [redis](https://redis.io). You also need a redis-stack-server docker, in order to run the capabilities tests. This can be started by running: - ```docker run -p 6379:6379 -it redis/redis-stack-server:edge``` -4. While developing, make sure the tests pass by running `make tests` +3. If you need a development environment, run `make docker.start`. + +> Note: this clones and builds the docker containers specified in `docker-compose.yml`, to understand more about +> the infrastructure that will be started you can check the `docker-compose.yml`. You also have the possiblity +> to specify the redis image that will be pulled with the env variable `CLIENT_LIBS_TEST_IMAGE`. +> By default the docker image that will be pulled and started is `redislabs/client-libs-test:rs-7.4.0-v2`. +> If you want to test with newer Redis version, using a newer version of `redislabs/client-libs-test` should work out of the box. + +4. While developing, make sure the tests pass by running `make test` (if you have the docker containers running, `make test.ci` may be sufficient). +> Note: `make test` will try to start all containers, run the tests with `make test.ci` and then stop all containers. 5. If you like the change and think the project could use it, send a pull request To see what else is part of the automation, run `invoke -l` + ## Testing -Call `make test` to run all tests, including linters. +### Setting up Docker +To run the tests, you need to have Docker installed and running. If you are using a host OS that does not support +docker host networks out of the box (e.g. Windows, OSX), you need to set up a docker desktop and enable docker host networks. + +### Running tests +Call `make test` to run all tests. Continuous Integration uses these same wrappers to run all of these -tests against multiple versions of python. Feel free to test your +tests against multiple versions of redis. Feel free to test your changes against all the go versions supported, as declared by the [build.yml](./.github/workflows/build.yml) file. @@ -99,3 +112,7 @@ The core team regularly looks at pull requests. We will provide feedback as soon as possible. After receiving our feedback, please respond within two weeks. After that time, we may close your PR if it isn't showing any activity. + +## Support + +Maintainers can provide limited support to contributors on discord: https://discord.gg/W4txy5AeKM diff --git a/vendor/github.com/redis/go-redis/v9/Makefile b/vendor/github.com/redis/go-redis/v9/Makefile index 1a6bd17862..655f16f44f 100644 --- a/vendor/github.com/redis/go-redis/v9/Makefile +++ b/vendor/github.com/redis/go-redis/v9/Makefile @@ -1,42 +1,59 @@ GO_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | sort) -test: testdeps - $(eval GO_VERSION := $(shell go version | cut -d " " -f 3 | cut -d. -f2)) +docker.start: + docker compose --profile all up -d --quiet-pull + +docker.stop: + docker compose --profile all down + +test: + $(MAKE) docker.start + @if [ -z "$(REDIS_VERSION)" ]; then \ + echo "REDIS_VERSION not set, running all tests"; \ + $(MAKE) test.ci; \ + else \ + MAJOR_VERSION=$$(echo "$(REDIS_VERSION)" | cut -d. -f1); \ + if [ "$$MAJOR_VERSION" -ge 8 ]; then \ + echo "REDIS_VERSION $(REDIS_VERSION) >= 8, running all tests"; \ + $(MAKE) test.ci; \ + else \ + echo "REDIS_VERSION $(REDIS_VERSION) < 8, skipping vector_sets tests"; \ + $(MAKE) test.ci.skip-vectorsets; \ + fi; \ + fi + $(MAKE) docker.stop + +test.ci: set -e; for dir in $(GO_MOD_DIRS); do \ - if echo "$${dir}" | grep -q "./example" && [ "$(GO_VERSION)" = "19" ]; then \ - echo "Skipping go test in $${dir} due to Go version 1.19 and dir contains ./example"; \ - continue; \ - fi; \ echo "go test in $${dir}"; \ (cd "$${dir}" && \ go mod tidy -compat=1.18 && \ - go test && \ - go test ./... -short -race && \ - go test ./... -run=NONE -bench=. -benchmem && \ - env GOOS=linux GOARCH=386 go test && \ - go test -coverprofile=coverage.txt -covermode=atomic ./... && \ - go vet); \ + go vet && \ + go test -v -coverprofile=coverage.txt -covermode=atomic ./... -race -skip Example); \ done cd internal/customvet && go build . go vet -vettool ./internal/customvet/customvet -testdeps: testdata/redis/src/redis-server +test.ci.skip-vectorsets: + set -e; for dir in $(GO_MOD_DIRS); do \ + echo "go test in $${dir} (skipping vector sets)"; \ + (cd "$${dir}" && \ + go mod tidy -compat=1.18 && \ + go vet && \ + go test -v -coverprofile=coverage.txt -covermode=atomic ./... -race \ + -run '^(?!.*(?:VectorSet|vectorset|ExampleClient_vectorset)).*$$' -skip Example); \ + done + cd internal/customvet && go build . + go vet -vettool ./internal/customvet/customvet -bench: testdeps - go test ./... -test.run=NONE -test.bench=. -test.benchmem +bench: + go test ./... -test.run=NONE -test.bench=. -test.benchmem -skip Example -.PHONY: all test testdeps bench fmt +.PHONY: all test test.ci test.ci.skip-vectorsets bench fmt build: go build . -testdata/redis: - mkdir -p $@ - wget -qO- https://download.redis.io/releases/redis-7.4-rc2.tar.gz | tar xvz --strip-components=1 -C $@ - -testdata/redis/src/redis-server: testdata/redis - cd $< && make all - fmt: gofumpt -w ./ goimports -w -local github.com/redis/go-redis ./ diff --git a/vendor/github.com/redis/go-redis/v9/README.md b/vendor/github.com/redis/go-redis/v9/README.md index 9395c652f1..c37a52ec70 100644 --- a/vendor/github.com/redis/go-redis/v9/README.md +++ b/vendor/github.com/redis/go-redis/v9/README.md @@ -3,16 +3,30 @@ [![build workflow](https://github.com/redis/go-redis/actions/workflows/build.yml/badge.svg)](https://github.com/redis/go-redis/actions) [![PkgGoDev](https://pkg.go.dev/badge/github.com/redis/go-redis/v9)](https://pkg.go.dev/github.com/redis/go-redis/v9?tab=doc) [![Documentation](https://img.shields.io/badge/redis-documentation-informational)](https://redis.uptrace.dev/) +[![Go Report Card](https://goreportcard.com/badge/github.com/redis/go-redis/v9)](https://goreportcard.com/report/github.com/redis/go-redis/v9) [![codecov](https://codecov.io/github/redis/go-redis/graph/badge.svg?token=tsrCZKuSSw)](https://codecov.io/github/redis/go-redis) -[![Chat](https://discordapp.com/api/guilds/752070105847955518/widget.png)](https://discord.gg/rWtp5Aj) -> go-redis is brought to you by :star: [**uptrace/uptrace**](https://github.com/uptrace/uptrace). -> Uptrace is an open-source APM tool that supports distributed tracing, metrics, and logs. You can -> use it to monitor applications and set up automatic alerts to receive notifications via email, -> Slack, Telegram, and others. -> -> See [OpenTelemetry](https://github.com/redis/go-redis/tree/master/example/otel) example which -> demonstrates how you can use Uptrace to monitor go-redis. +[![Discord](https://img.shields.io/discord/697882427875393627.svg?style=social&logo=discord)](https://discord.gg/W4txy5AeKM) +[![Twitch](https://img.shields.io/twitch/status/redisinc?style=social)](https://www.twitch.tv/redisinc) +[![YouTube](https://img.shields.io/youtube/channel/views/UCD78lHSwYqMlyetR0_P4Vig?style=social)](https://www.youtube.com/redisinc) +[![Twitter](https://img.shields.io/twitter/follow/redisinc?style=social)](https://twitter.com/redisinc) +[![Stack Exchange questions](https://img.shields.io/stackexchange/stackoverflow/t/go-redis?style=social&logo=stackoverflow&label=Stackoverflow)](https://stackoverflow.com/questions/tagged/go-redis) + +> go-redis is the official Redis client library for the Go programming language. It offers a straightforward interface for interacting with Redis servers. + +## Supported versions + +In `go-redis` we are aiming to support the last three releases of Redis. Currently, this means we do support: +- [Redis 7.2](https://raw.githubusercontent.com/redis/redis/7.2/00-RELEASENOTES) - using Redis Stack 7.2 for modules support +- [Redis 7.4](https://raw.githubusercontent.com/redis/redis/7.4/00-RELEASENOTES) - using Redis Stack 7.4 for modules support +- [Redis 8.0](https://raw.githubusercontent.com/redis/redis/8.0/00-RELEASENOTES) - using Redis CE 8.0 where modules are included + +Although the `go.mod` states it requires at minimum `go 1.18`, our CI is configured to run the tests against all three +versions of Redis and latest two versions of Go ([1.23](https://go.dev/doc/devel/release#go1.23.0), +[1.24](https://go.dev/doc/devel/release#go1.24.0)). We observe that some modules related test may not pass with +Redis Stack 7.2 and some commands are changed with Redis CE 8.0. +Please do refer to the documentation and the tests if you experience any issues. We do plan to update the go version +in the `go.mod` to `go 1.24` in one of the next releases. ## How do I Redis? @@ -36,7 +50,7 @@ ## Resources - [Discussions](https://github.com/redis/go-redis/discussions) -- [Chat](https://discord.gg/rWtp5Aj) +- [Chat](https://discord.gg/W4txy5AeKM) - [Reference](https://pkg.go.dev/github.com/redis/go-redis/v9) - [Examples](https://pkg.go.dev/github.com/redis/go-redis/v9#pkg-examples) @@ -54,6 +68,7 @@ key value NoSQL database that uses RocksDB as storage engine and is compatible w - Redis commands except QUIT and SYNC. - Automatic connection pooling. +- [StreamingCredentialsProvider (e.g. entra id, oauth)](#1-streaming-credentials-provider-highest-priority) (experimental) - [Pub/Sub](https://redis.uptrace.dev/guide/go-redis-pubsub.html). - [Pipelines and transactions](https://redis.uptrace.dev/guide/go-redis-pipelines.html). - [Scripting](https://redis.uptrace.dev/guide/lua-scripting.html). @@ -122,17 +137,121 @@ func ExampleClient() { } ``` -The above can be modified to specify the version of the RESP protocol by adding the `protocol` -option to the `Options` struct: +### Authentication + +The Redis client supports multiple ways to provide authentication credentials, with a clear priority order. Here are the available options: + +#### 1. Streaming Credentials Provider (Highest Priority) - Experimental feature + +The streaming credentials provider allows for dynamic credential updates during the connection lifetime. This is particularly useful for managed identity services and token-based authentication. ```go - rdb := redis.NewClient(&redis.Options{ - Addr: "localhost:6379", - Password: "", // no password set - DB: 0, // use default DB - Protocol: 3, // specify 2 for RESP 2 or 3 for RESP 3 - }) +type StreamingCredentialsProvider interface { + Subscribe(listener CredentialsListener) (Credentials, UnsubscribeFunc, error) +} + +type CredentialsListener interface { + OnNext(credentials Credentials) // Called when credentials are updated + OnError(err error) // Called when an error occurs +} + +type Credentials interface { + BasicAuth() (username string, password string) + RawCredentials() string +} +``` + +Example usage: +```go +rdb := redis.NewClient(&redis.Options{ + Addr: "localhost:6379", + StreamingCredentialsProvider: &MyCredentialsProvider{}, +}) +``` + +**Note:** The streaming credentials provider can be used with [go-redis-entraid](https://github.com/redis/go-redis-entraid) to enable Entra ID (formerly Azure AD) authentication. This allows for seamless integration with Azure's managed identity services and token-based authentication. + +Example with Entra ID: +```go +import ( + "github.com/redis/go-redis/v9" + "github.com/redis/go-redis-entraid" +) + +// Create an Entra ID credentials provider +provider := entraid.NewDefaultAzureIdentityProvider() + +// Configure Redis client with Entra ID authentication +rdb := redis.NewClient(&redis.Options{ + Addr: "your-redis-server.redis.cache.windows.net:6380", + StreamingCredentialsProvider: provider, + TLSConfig: &tls.Config{ + MinVersion: tls.VersionTLS12, + }, +}) +``` + +#### 2. Context-based Credentials Provider + +The context-based provider allows credentials to be determined at the time of each operation, using the context. + +```go +rdb := redis.NewClient(&redis.Options{ + Addr: "localhost:6379", + CredentialsProviderContext: func(ctx context.Context) (string, string, error) { + // Return username, password, and any error + return "user", "pass", nil + }, +}) +``` + +#### 3. Regular Credentials Provider + +A simple function-based provider that returns static credentials. + +```go +rdb := redis.NewClient(&redis.Options{ + Addr: "localhost:6379", + CredentialsProvider: func() (string, string) { + // Return username and password + return "user", "pass" + }, +}) +``` + +#### 4. Username/Password Fields (Lowest Priority) +The most basic way to provide credentials is through the `Username` and `Password` fields in the options. + +```go +rdb := redis.NewClient(&redis.Options{ + Addr: "localhost:6379", + Username: "user", + Password: "pass", +}) +``` + +#### Priority Order + +The client will use credentials in the following priority order: +1. Streaming Credentials Provider (if set) +2. Context-based Credentials Provider (if set) +3. Regular Credentials Provider (if set) +4. Username/Password fields (if set) + +If none of these are set, the client will attempt to connect without authentication. + +### Protocol Version + +The client supports both RESP2 and RESP3 protocols. You can specify the protocol version in the options: + +```go +rdb := redis.NewClient(&redis.Options{ + Addr: "localhost:6379", + Password: "", // no password set + DB: 0, // use default DB + Protocol: 3, // specify 2 for RESP 2 or 3 for RESP 3 +}) ``` ### Connecting via a redis url @@ -159,6 +278,24 @@ func ExampleClient() *redis.Client { ``` +### Instrument with OpenTelemetry + +```go +import ( + "github.com/redis/go-redis/v9" + "github.com/redis/go-redis/extra/redisotel/v9" + "errors" +) + +func main() { + ... + rdb := redis.NewClient(&redis.Options{...}) + + if err := errors.Join(redisotel.InstrumentTracing(rdb), redisotel.InstrumentMetrics(rdb)); err != nil { + log.Fatal(err) + } +``` + ### Advanced Configuration @@ -203,9 +340,30 @@ res1, err := client.FTSearchWithArgs(ctx, "txt", "foo bar", &redis.FTSearchOptio val1 := client.FTSearchWithArgs(ctx, "txt", "foo bar", &redis.FTSearchOptions{}).RawVal() ``` -## Contributing +#### Redis-Search Default Dialect -Please see [out contributing guidelines](CONTRIBUTING.md) to help us improve this library! +In the Redis-Search module, **the default dialect is 2**. If needed, you can explicitly specify a different dialect using the appropriate configuration in your queries. + +**Important**: Be aware that the query dialect may impact the results returned. If needed, you can revert to a different dialect version by passing the desired dialect in the arguments of the command you want to execute. +For example: +``` + res2, err := rdb.FTSearchWithArgs(ctx, + "idx:bicycle", + "@pickup_zone:[CONTAINS $bike]", + &redis.FTSearchOptions{ + Params: map[string]interface{}{ + "bike": "POINT(-0.1278 51.5074)", + }, + DialectVersion: 3, + }, + ).Result() +``` +You can find further details in the [query dialect documentation](https://redis.io/docs/latest/develop/interact/search-and-query/advanced-concepts/dialects/). + +## Contributing +We welcome contributions to the go-redis library! If you have a bug fix, feature request, or improvement, please open an issue or pull request on GitHub. +We appreciate your help in making go-redis better for everyone. +If you are interested in contributing to the go-redis library, please check out our [contributing guidelines](CONTRIBUTING.md) for more information on how to get started. ## Look and feel @@ -285,6 +443,14 @@ REDIS_PORT=9999 go test ## Contributors +> The go-redis project was originally initiated by :star: [**uptrace/uptrace**](https://github.com/uptrace/uptrace). +> Uptrace is an open-source APM tool that supports distributed tracing, metrics, and logs. You can +> use it to monitor applications and set up automatic alerts to receive notifications via email, +> Slack, Telegram, and others. +> +> See [OpenTelemetry](https://github.com/redis/go-redis/tree/master/example/otel) example which +> demonstrates how you can use Uptrace to monitor go-redis. + Thanks to all the people who already contributed! diff --git a/vendor/github.com/redis/go-redis/v9/RELEASE-NOTES.md b/vendor/github.com/redis/go-redis/v9/RELEASE-NOTES.md new file mode 100644 index 0000000000..f6a4abb921 --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/RELEASE-NOTES.md @@ -0,0 +1,163 @@ +# Release Notes + +# 9.10.0 (2025-06-06) + +## 🚀 Highlights + +`go-redis` now supports [vector sets](https://redis.io/docs/latest/develop/data-types/vector-sets/). This data type is marked +as "in preview" in Redis and its support in `go-redis` is marked as experimental. You can find examples in the documentation and +in the `doctests` folder. + +# Changes + +## 🚀 New Features + +- feat: support vectorset ([#3375](https://github.com/redis/go-redis/pull/3375)) + +## 🧰 Maintenance + +- Add the missing NewFloatSliceResult for testing ([#3393](https://github.com/redis/go-redis/pull/3393)) +- DOC-5078 vector set examples ([#3394](https://github.com/redis/go-redis/pull/3394)) + +## Contributors +We'd like to thank all the contributors who worked on this release! + +[@AndBobsYourUncle](https://github.com/AndBobsYourUncle), [@andy-stark-redis](https://github.com/andy-stark-redis), [@fukua95](https://github.com/fukua95) and [@ndyakov](https://github.com/ndyakov) + + + +# 9.9.0 (2025-05-27) + +## 🚀 Highlights +- **Token-based Authentication**: Added `StreamingCredentialsProvider` for dynamic credential updates (experimental) + - Can be used with [go-redis-entraid](https://github.com/redis/go-redis-entraid) for Azure AD authentication +- **Connection Statistics**: Added connection waiting statistics for better monitoring +- **Failover Improvements**: Added `ParseFailoverURL` for easier failover configuration +- **Ring Client Enhancements**: Added shard access methods for better Pub/Sub management + +## ✨ New Features +- Added `StreamingCredentialsProvider` for token-based authentication ([#3320](https://github.com/redis/go-redis/pull/3320)) + - Supports dynamic credential updates + - Includes connection close hooks + - Note: Currently marked as experimental +- Added `ParseFailoverURL` for parsing failover URLs ([#3362](https://github.com/redis/go-redis/pull/3362)) +- Added connection waiting statistics ([#2804](https://github.com/redis/go-redis/pull/2804)) +- Added new utility functions: + - `ParseFloat` and `MustParseFloat` in public utils package ([#3371](https://github.com/redis/go-redis/pull/3371)) + - Unit tests for `Atoi`, `ParseInt`, `ParseUint`, and `ParseFloat` ([#3377](https://github.com/redis/go-redis/pull/3377)) +- Added Ring client shard access methods: + - `GetShardClients()` to retrieve all active shard clients + - `GetShardClientForKey(key string)` to get the shard client for a specific key ([#3388](https://github.com/redis/go-redis/pull/3388)) + +## 🐛 Bug Fixes +- Fixed routing reads to loading slave nodes ([#3370](https://github.com/redis/go-redis/pull/3370)) +- Added support for nil lag in XINFO GROUPS ([#3369](https://github.com/redis/go-redis/pull/3369)) +- Fixed pool acquisition timeout issues ([#3381](https://github.com/redis/go-redis/pull/3381)) +- Optimized unnecessary copy operations ([#3376](https://github.com/redis/go-redis/pull/3376)) + +## 📚 Documentation +- Updated documentation for XINFO GROUPS with nil lag support ([#3369](https://github.com/redis/go-redis/pull/3369)) +- Added package-level comments for new features + +## ⚡ Performance and Reliability +- Optimized `ReplaceSpaces` function ([#3383](https://github.com/redis/go-redis/pull/3383)) +- Set default value for `Options.Protocol` in `init()` ([#3387](https://github.com/redis/go-redis/pull/3387)) +- Exported pool errors for public consumption ([#3380](https://github.com/redis/go-redis/pull/3380)) + +## 🔧 Dependencies and Infrastructure +- Updated Redis CI to version 8.0.1 ([#3372](https://github.com/redis/go-redis/pull/3372)) +- Updated spellcheck GitHub Actions ([#3389](https://github.com/redis/go-redis/pull/3389)) +- Removed unused parameters ([#3382](https://github.com/redis/go-redis/pull/3382), [#3384](https://github.com/redis/go-redis/pull/3384)) + +## 🧪 Testing +- Added unit tests for pool acquisition timeout ([#3381](https://github.com/redis/go-redis/pull/3381)) +- Added unit tests for utility functions ([#3377](https://github.com/redis/go-redis/pull/3377)) + +## 👥 Contributors + +We would like to thank all the contributors who made this release possible: + +[@ndyakov](https://github.com/ndyakov), [@ofekshenawa](https://github.com/ofekshenawa), [@LINKIWI](https://github.com/LINKIWI), [@iamamirsalehi](https://github.com/iamamirsalehi), [@fukua95](https://github.com/fukua95), [@lzakharov](https://github.com/lzakharov), [@DengY11](https://github.com/DengY11) + +## 📝 Changelog + +For a complete list of changes, see the [full changelog](https://github.com/redis/go-redis/compare/v9.8.0...v9.9.0). + +# 9.8.0 (2025-04-30) + +## 🚀 Highlights +- **Redis 8 Support**: Full compatibility with Redis 8.0, including testing and CI integration +- **Enhanced Hash Operations**: Added support for new hash commands (`HGETDEL`, `HGETEX`, `HSETEX`) and `HSTRLEN` command +- **Search Improvements**: Enabled Search DIALECT 2 by default and added `CountOnly` argument for `FT.Search` + +## ✨ New Features +- Added support for new hash commands: `HGETDEL`, `HGETEX`, `HSETEX` ([#3305](https://github.com/redis/go-redis/pull/3305)) +- Added `HSTRLEN` command for hash operations ([#2843](https://github.com/redis/go-redis/pull/2843)) +- Added `Do` method for raw query by single connection from `pool.Conn()` ([#3182](https://github.com/redis/go-redis/pull/3182)) +- Prevent false-positive marshaling by treating zero time.Time as empty in isEmptyValue ([#3273](https://github.com/redis/go-redis/pull/3273)) +- Added FailoverClusterClient support for Universal client ([#2794](https://github.com/redis/go-redis/pull/2794)) +- Added support for cluster mode with `IsClusterMode` config parameter ([#3255](https://github.com/redis/go-redis/pull/3255)) +- Added client name support in `HELLO` RESP handshake ([#3294](https://github.com/redis/go-redis/pull/3294)) +- **Enabled Search DIALECT 2 by default** ([#3213](https://github.com/redis/go-redis/pull/3213)) +- Added read-only option for failover configurations ([#3281](https://github.com/redis/go-redis/pull/3281)) +- Added `CountOnly` argument for `FT.Search` to use `LIMIT 0 0` ([#3338](https://github.com/redis/go-redis/pull/3338)) +- Added `DB` option support in `NewFailoverClusterClient` ([#3342](https://github.com/redis/go-redis/pull/3342)) +- Added `nil` check for the options when creating a client ([#3363](https://github.com/redis/go-redis/pull/3363)) + +## 🐛 Bug Fixes +- Fixed `PubSub` concurrency safety issues ([#3360](https://github.com/redis/go-redis/pull/3360)) +- Fixed panic caused when argument is `nil` ([#3353](https://github.com/redis/go-redis/pull/3353)) +- Improved error handling when fetching master node from sentinels ([#3349](https://github.com/redis/go-redis/pull/3349)) +- Fixed connection pool timeout issues and increased retries ([#3298](https://github.com/redis/go-redis/pull/3298)) +- Fixed context cancellation error leading to connection spikes on Primary instances ([#3190](https://github.com/redis/go-redis/pull/3190)) +- Fixed RedisCluster client to consider `MASTERDOWN` a retriable error ([#3164](https://github.com/redis/go-redis/pull/3164)) +- Fixed tracing to show complete commands instead of truncated versions ([#3290](https://github.com/redis/go-redis/pull/3290)) +- Fixed OpenTelemetry instrumentation to prevent multiple span reporting ([#3168](https://github.com/redis/go-redis/pull/3168)) +- Fixed `FT.Search` Limit argument and added `CountOnly` argument for limit 0 0 ([#3338](https://github.com/redis/go-redis/pull/3338)) +- Fixed missing command in interface ([#3344](https://github.com/redis/go-redis/pull/3344)) +- Fixed slot calculation for `COUNTKEYSINSLOT` command ([#3327](https://github.com/redis/go-redis/pull/3327)) +- Updated PubSub implementation with correct context ([#3329](https://github.com/redis/go-redis/pull/3329)) + +## 📚 Documentation +- Added hash search examples ([#3357](https://github.com/redis/go-redis/pull/3357)) +- Fixed documentation comments ([#3351](https://github.com/redis/go-redis/pull/3351)) +- Added `CountOnly` search example ([#3345](https://github.com/redis/go-redis/pull/3345)) +- Added examples for list commands: `LLEN`, `LPOP`, `LPUSH`, `LRANGE`, `RPOP`, `RPUSH` ([#3234](https://github.com/redis/go-redis/pull/3234)) +- Added `SADD` and `SMEMBERS` command examples ([#3242](https://github.com/redis/go-redis/pull/3242)) +- Updated `README.md` to use Redis Discord guild ([#3331](https://github.com/redis/go-redis/pull/3331)) +- Updated `HExpire` command documentation ([#3355](https://github.com/redis/go-redis/pull/3355)) +- Featured OpenTelemetry instrumentation more prominently ([#3316](https://github.com/redis/go-redis/pull/3316)) +- Updated `README.md` with additional information ([#310ce55](https://github.com/redis/go-redis/commit/310ce55)) + +## ⚡ Performance and Reliability +- Bound connection pool background dials to configured dial timeout ([#3089](https://github.com/redis/go-redis/pull/3089)) +- Ensured context isn't exhausted via concurrent query ([#3334](https://github.com/redis/go-redis/pull/3334)) + +## 🔧 Dependencies and Infrastructure +- Updated testing image to Redis 8.0-RC2 ([#3361](https://github.com/redis/go-redis/pull/3361)) +- Enabled CI for Redis CE 8.0 ([#3274](https://github.com/redis/go-redis/pull/3274)) +- Updated various dependencies: + - Bumped golangci/golangci-lint-action from 6.5.0 to 7.0.0 ([#3354](https://github.com/redis/go-redis/pull/3354)) + - Bumped rojopolis/spellcheck-github-actions ([#3336](https://github.com/redis/go-redis/pull/3336)) + - Bumped golang.org/x/net in example/otel ([#3308](https://github.com/redis/go-redis/pull/3308)) +- Migrated golangci-lint configuration to v2 format ([#3354](https://github.com/redis/go-redis/pull/3354)) + +## ⚠️ Breaking Changes +- **Enabled Search DIALECT 2 by default** ([#3213](https://github.com/redis/go-redis/pull/3213)) +- Dropped RedisGears (Triggers and Functions) support ([#3321](https://github.com/redis/go-redis/pull/3321)) +- Dropped FT.PROFILE command that was never enabled ([#3323](https://github.com/redis/go-redis/pull/3323)) + +## 🔒 Security +- Fixed network error handling on SETINFO (CVE-2025-29923) ([#3295](https://github.com/redis/go-redis/pull/3295)) + +## 🧪 Testing +- Added integration tests for Redis 8 behavior changes in Redis Search ([#3337](https://github.com/redis/go-redis/pull/3337)) +- Added vector types INT8 and UINT8 tests ([#3299](https://github.com/redis/go-redis/pull/3299)) +- Added test codes for search_commands.go ([#3285](https://github.com/redis/go-redis/pull/3285)) +- Fixed example test sorting ([#3292](https://github.com/redis/go-redis/pull/3292)) + +## 👥 Contributors + +We would like to thank all the contributors who made this release possible: + +[@alexander-menshchikov](https://github.com/alexander-menshchikov), [@EXPEbdodla](https://github.com/EXPEbdodla), [@afti](https://github.com/afti), [@dmaier-redislabs](https://github.com/dmaier-redislabs), [@four_leaf_clover](https://github.com/four_leaf_clover), [@alohaglenn](https://github.com/alohaglenn), [@gh73962](https://github.com/gh73962), [@justinmir](https://github.com/justinmir), [@LINKIWI](https://github.com/LINKIWI), [@liushuangbill](https://github.com/liushuangbill), [@golang88](https://github.com/golang88), [@gnpaone](https://github.com/gnpaone), [@ndyakov](https://github.com/ndyakov), [@nikolaydubina](https://github.com/nikolaydubina), [@oleglacto](https://github.com/oleglacto), [@andy-stark-redis](https://github.com/andy-stark-redis), [@rodneyosodo](https://github.com/rodneyosodo), [@dependabot](https://github.com/dependabot), [@rfyiamcool](https://github.com/rfyiamcool), [@frankxjkuang](https://github.com/frankxjkuang), [@fukua95](https://github.com/fukua95), [@soleymani-milad](https://github.com/soleymani-milad), [@ofekshenawa](https://github.com/ofekshenawa), [@khasanovbi](https://github.com/khasanovbi) diff --git a/vendor/github.com/redis/go-redis/v9/acl_commands.go b/vendor/github.com/redis/go-redis/v9/acl_commands.go index 06847be2ed..9cb800bb3b 100644 --- a/vendor/github.com/redis/go-redis/v9/acl_commands.go +++ b/vendor/github.com/redis/go-redis/v9/acl_commands.go @@ -4,8 +4,20 @@ import "context" type ACLCmdable interface { ACLDryRun(ctx context.Context, username string, command ...interface{}) *StringCmd + ACLLog(ctx context.Context, count int64) *ACLLogCmd ACLLogReset(ctx context.Context) *StatusCmd + + ACLSetUser(ctx context.Context, username string, rules ...string) *StatusCmd + ACLDelUser(ctx context.Context, username string) *IntCmd + ACLList(ctx context.Context) *StringSliceCmd + + ACLCat(ctx context.Context) *StringSliceCmd + ACLCatArgs(ctx context.Context, options *ACLCatArgs) *StringSliceCmd +} + +type ACLCatArgs struct { + Category string } func (c cmdable) ACLDryRun(ctx context.Context, username string, command ...interface{}) *StringCmd { @@ -33,3 +45,45 @@ func (c cmdable) ACLLogReset(ctx context.Context) *StatusCmd { _ = c(ctx, cmd) return cmd } + +func (c cmdable) ACLDelUser(ctx context.Context, username string) *IntCmd { + cmd := NewIntCmd(ctx, "acl", "deluser", username) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ACLSetUser(ctx context.Context, username string, rules ...string) *StatusCmd { + args := make([]interface{}, 3+len(rules)) + args[0] = "acl" + args[1] = "setuser" + args[2] = username + for i, rule := range rules { + args[i+3] = rule + } + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ACLList(ctx context.Context) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, "acl", "list") + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ACLCat(ctx context.Context) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, "acl", "cat") + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ACLCatArgs(ctx context.Context, options *ACLCatArgs) *StringSliceCmd { + // if there is a category passed, build new cmd, if there isn't - use the ACLCat method + if options != nil && options.Category != "" { + cmd := NewStringSliceCmd(ctx, "acl", "cat", options.Category) + _ = c(ctx, cmd) + return cmd + } + + return c.ACLCat(ctx) +} diff --git a/vendor/github.com/redis/go-redis/v9/auth/auth.go b/vendor/github.com/redis/go-redis/v9/auth/auth.go new file mode 100644 index 0000000000..1f5c802248 --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/auth/auth.go @@ -0,0 +1,61 @@ +// Package auth package provides authentication-related interfaces and types. +// It also includes a basic implementation of credentials using username and password. +package auth + +// StreamingCredentialsProvider is an interface that defines the methods for a streaming credentials provider. +// It is used to provide credentials for authentication. +// The CredentialsListener is used to receive updates when the credentials change. +type StreamingCredentialsProvider interface { + // Subscribe subscribes to the credentials provider for updates. + // It returns the current credentials, a cancel function to unsubscribe from the provider, + // and an error if any. + // TODO(ndyakov): Should we add context to the Subscribe method? + Subscribe(listener CredentialsListener) (Credentials, UnsubscribeFunc, error) +} + +// UnsubscribeFunc is a function that is used to cancel the subscription to the credentials provider. +// It is used to unsubscribe from the provider when the credentials are no longer needed. +type UnsubscribeFunc func() error + +// CredentialsListener is an interface that defines the methods for a credentials listener. +// It is used to receive updates when the credentials change. +// The OnNext method is called when the credentials change. +// The OnError method is called when an error occurs while requesting the credentials. +type CredentialsListener interface { + OnNext(credentials Credentials) + OnError(err error) +} + +// Credentials is an interface that defines the methods for credentials. +// It is used to provide the credentials for authentication. +type Credentials interface { + // BasicAuth returns the username and password for basic authentication. + BasicAuth() (username string, password string) + // RawCredentials returns the raw credentials as a string. + // This can be used to extract the username and password from the raw credentials or + // additional information if present in the token. + RawCredentials() string +} + +type basicAuth struct { + username string + password string +} + +// RawCredentials returns the raw credentials as a string. +func (b *basicAuth) RawCredentials() string { + return b.username + ":" + b.password +} + +// BasicAuth returns the username and password for basic authentication. +func (b *basicAuth) BasicAuth() (username string, password string) { + return b.username, b.password +} + +// NewBasicCredentials creates a new Credentials object from the given username and password. +func NewBasicCredentials(username, password string) Credentials { + return &basicAuth{ + username: username, + password: password, + } +} diff --git a/vendor/github.com/redis/go-redis/v9/auth/reauth_credentials_listener.go b/vendor/github.com/redis/go-redis/v9/auth/reauth_credentials_listener.go new file mode 100644 index 0000000000..40076a0b13 --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/auth/reauth_credentials_listener.go @@ -0,0 +1,47 @@ +package auth + +// ReAuthCredentialsListener is a struct that implements the CredentialsListener interface. +// It is used to re-authenticate the credentials when they are updated. +// It contains: +// - reAuth: a function that takes the new credentials and returns an error if any. +// - onErr: a function that takes an error and handles it. +type ReAuthCredentialsListener struct { + reAuth func(credentials Credentials) error + onErr func(err error) +} + +// OnNext is called when the credentials are updated. +// It calls the reAuth function with the new credentials. +// If the reAuth function returns an error, it calls the onErr function with the error. +func (c *ReAuthCredentialsListener) OnNext(credentials Credentials) { + if c.reAuth == nil { + return + } + + err := c.reAuth(credentials) + if err != nil { + c.OnError(err) + } +} + +// OnError is called when an error occurs. +// It can be called from both the credentials provider and the reAuth function. +func (c *ReAuthCredentialsListener) OnError(err error) { + if c.onErr == nil { + return + } + + c.onErr(err) +} + +// NewReAuthCredentialsListener creates a new ReAuthCredentialsListener. +// Implements the auth.CredentialsListener interface. +func NewReAuthCredentialsListener(reAuth func(credentials Credentials) error, onErr func(err error)) *ReAuthCredentialsListener { + return &ReAuthCredentialsListener{ + reAuth: reAuth, + onErr: onErr, + } +} + +// Ensure ReAuthCredentialsListener implements the CredentialsListener interface. +var _ CredentialsListener = (*ReAuthCredentialsListener)(nil) diff --git a/vendor/github.com/redis/go-redis/v9/cluster_commands.go b/vendor/github.com/redis/go-redis/v9/cluster_commands.go index 0caf0977a7..4857b01eaa 100644 --- a/vendor/github.com/redis/go-redis/v9/cluster_commands.go +++ b/vendor/github.com/redis/go-redis/v9/cluster_commands.go @@ -4,6 +4,7 @@ import "context" type ClusterCmdable interface { ClusterMyShardID(ctx context.Context) *StringCmd + ClusterMyID(ctx context.Context) *StringCmd ClusterSlots(ctx context.Context) *ClusterSlotsCmd ClusterShards(ctx context.Context) *ClusterShardsCmd ClusterLinks(ctx context.Context) *ClusterLinksCmd @@ -35,6 +36,12 @@ func (c cmdable) ClusterMyShardID(ctx context.Context) *StringCmd { return cmd } +func (c cmdable) ClusterMyID(ctx context.Context) *StringCmd { + cmd := NewStringCmd(ctx, "cluster", "myid") + _ = c(ctx, cmd) + return cmd +} + func (c cmdable) ClusterSlots(ctx context.Context) *ClusterSlotsCmd { cmd := NewClusterSlotsCmd(ctx, "cluster", "slots") _ = c(ctx, cmd) diff --git a/vendor/github.com/redis/go-redis/v9/command.go b/vendor/github.com/redis/go-redis/v9/command.go index f3d0e49b79..56b2257214 100644 --- a/vendor/github.com/redis/go-redis/v9/command.go +++ b/vendor/github.com/redis/go-redis/v9/command.go @@ -1405,27 +1405,64 @@ func (cmd *MapStringSliceInterfaceCmd) Val() map[string][]interface{} { } func (cmd *MapStringSliceInterfaceCmd) readReply(rd *proto.Reader) (err error) { - n, err := rd.ReadMapLen() + readType, err := rd.PeekReplyType() if err != nil { return err } - cmd.val = make(map[string][]interface{}, n) - for i := 0; i < n; i++ { - k, err := rd.ReadString() + + cmd.val = make(map[string][]interface{}) + + switch readType { + case proto.RespMap: + n, err := rd.ReadMapLen() if err != nil { return err } - nn, err := rd.ReadArrayLen() + for i := 0; i < n; i++ { + k, err := rd.ReadString() + if err != nil { + return err + } + nn, err := rd.ReadArrayLen() + if err != nil { + return err + } + cmd.val[k] = make([]interface{}, nn) + for j := 0; j < nn; j++ { + value, err := rd.ReadReply() + if err != nil { + return err + } + cmd.val[k][j] = value + } + } + case proto.RespArray: + // RESP2 response + n, err := rd.ReadArrayLen() if err != nil { return err } - cmd.val[k] = make([]interface{}, nn) - for j := 0; j < nn; j++ { - value, err := rd.ReadReply() + + for i := 0; i < n; i++ { + // Each entry in this array is itself an array with key details + itemLen, err := rd.ReadArrayLen() if err != nil { return err } - cmd.val[k][j] = value + + key, err := rd.ReadString() + if err != nil { + return err + } + cmd.val[key] = make([]interface{}, 0, itemLen-1) + for j := 1; j < itemLen; j++ { + // Read the inner array for timestamp-value pairs + data, err := rd.ReadReply() + if err != nil { + return err + } + cmd.val[key] = append(cmd.val[key], data) + } } } @@ -2067,7 +2104,9 @@ type XInfoGroup struct { Pending int64 LastDeliveredID string EntriesRead int64 - Lag int64 + // Lag represents the number of pending messages in the stream not yet + // delivered to this consumer group. Returns -1 when the lag cannot be determined. + Lag int64 } var _ Cmder = (*XInfoGroupsCmd)(nil) @@ -2150,8 +2189,11 @@ func (cmd *XInfoGroupsCmd) readReply(rd *proto.Reader) error { // lag: the number of entries in the stream that are still waiting to be delivered // to the group's consumers, or a NULL(Nil) when that number can't be determined. + // In that case, we return -1. if err != nil && err != Nil { return err + } else if err == Nil { + group.Lag = -1 } default: return fmt.Errorf("redis: unexpected key %q in XINFO GROUPS reply", key) @@ -3795,7 +3837,8 @@ func (cmd *MapStringStringSliceCmd) readReply(rd *proto.Reader) error { } // ----------------------------------------------------------------------- -// MapStringInterfaceCmd represents a command that returns a map of strings to interface{}. + +// MapMapStringInterfaceCmd represents a command that returns a map of strings to interface{}. type MapMapStringInterfaceCmd struct { baseCmd val map[string]interface{} @@ -3826,30 +3869,48 @@ func (cmd *MapMapStringInterfaceCmd) Val() map[string]interface{} { return cmd.val } +// readReply will try to parse the reply from the proto.Reader for both resp2 and resp3 func (cmd *MapMapStringInterfaceCmd) readReply(rd *proto.Reader) (err error) { - n, err := rd.ReadArrayLen() + data, err := rd.ReadReply() if err != nil { return err } + resultMap := map[string]interface{}{} - data := make(map[string]interface{}, n/2) - for i := 0; i < n; i += 2 { - _, err := rd.ReadArrayLen() - if err != nil { - cmd.err = err - } - key, err := rd.ReadString() - if err != nil { - cmd.err = err - } - value, err := rd.ReadString() - if err != nil { - cmd.err = err + switch midResponse := data.(type) { + case map[interface{}]interface{}: // resp3 will return map + for k, v := range midResponse { + stringKey, ok := k.(string) + if !ok { + return fmt.Errorf("redis: invalid map key %#v", k) + } + resultMap[stringKey] = v + } + case []interface{}: // resp2 will return array of arrays + n := len(midResponse) + for i := 0; i < n; i++ { + finalArr, ok := midResponse[i].([]interface{}) // final array that we need to transform to map + if !ok { + return fmt.Errorf("redis: unexpected response %#v", data) + } + m := len(finalArr) + if m%2 != 0 { // since this should be map, keys should be even number + return fmt.Errorf("redis: unexpected response %#v", data) + } + + for j := 0; j < m; j += 2 { + stringKey, ok := finalArr[j].(string) // the first one + if !ok { + return fmt.Errorf("redis: invalid map key %#v", finalArr[i]) + } + resultMap[stringKey] = finalArr[j+1] // second one is value + } } - data[key] = value + default: + return fmt.Errorf("redis: unexpected response %#v", data) } - cmd.val = data + cmd.val = resultMap return nil } @@ -5078,6 +5139,7 @@ type ClientInfo struct { OutputListLength int // oll, output list length (replies are queued in this list when the buffer is full) OutputMemory int // omem, output buffer memory usage TotalMemory int // tot-mem, total memory consumed by this client in its various buffers + IoThread int // io-thread id Events string // file descriptor events (see below) LastCmd string // cmd, last command played User string // the authenticated username of the client @@ -5256,6 +5318,8 @@ func parseClientInfo(txt string) (info *ClientInfo, err error) { info.LibName = val case "lib-ver": info.LibVer = val + case "io-thread": + info.IoThread, err = strconv.Atoi(val) default: return nil, fmt.Errorf("redis: unexpected client info key(%s)", key) } @@ -5435,8 +5499,6 @@ func (cmd *InfoCmd) readReply(rd *proto.Reader) error { section := "" scanner := bufio.NewScanner(strings.NewReader(val)) - moduleRe := regexp.MustCompile(`module:name=(.+?),(.+)$`) - for scanner.Scan() { line := scanner.Text() if strings.HasPrefix(line, "#") { @@ -5447,6 +5509,7 @@ func (cmd *InfoCmd) readReply(rd *proto.Reader) error { cmd.val[section] = make(map[string]string) } else if line != "" { if section == "Modules" { + moduleRe := regexp.MustCompile(`module:name=(.+?),(.+)$`) kv := moduleRe.FindStringSubmatch(line) if len(kv) == 3 { cmd.val[section][kv[1]] = kv[2] @@ -5557,3 +5620,59 @@ func (cmd *MonitorCmd) Stop() { defer cmd.mu.Unlock() cmd.status = monitorStatusStop } + +type VectorScoreSliceCmd struct { + baseCmd + + val []VectorScore +} + +var _ Cmder = (*VectorScoreSliceCmd)(nil) + +func NewVectorInfoSliceCmd(ctx context.Context, args ...any) *VectorScoreSliceCmd { + return &VectorScoreSliceCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *VectorScoreSliceCmd) SetVal(val []VectorScore) { + cmd.val = val +} + +func (cmd *VectorScoreSliceCmd) Val() []VectorScore { + return cmd.val +} + +func (cmd *VectorScoreSliceCmd) Result() ([]VectorScore, error) { + return cmd.val, cmd.err +} + +func (cmd *VectorScoreSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *VectorScoreSliceCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadMapLen() + if err != nil { + return err + } + + cmd.val = make([]VectorScore, n) + for i := 0; i < n; i++ { + name, err := rd.ReadString() + if err != nil { + return err + } + cmd.val[i].Name = name + + score, err := rd.ReadFloat() + if err != nil { + return err + } + cmd.val[i].Score = score + } + return nil +} diff --git a/vendor/github.com/redis/go-redis/v9/commands.go b/vendor/github.com/redis/go-redis/v9/commands.go index 034daa2350..c0358001d1 100644 --- a/vendor/github.com/redis/go-redis/v9/commands.go +++ b/vendor/github.com/redis/go-redis/v9/commands.go @@ -81,6 +81,8 @@ func appendArg(dst []interface{}, arg interface{}) []interface{} { return dst case time.Time, time.Duration, encoding.BinaryMarshaler, net.IP: return append(dst, arg) + case nil: + return dst default: // scan struct field v := reflect.ValueOf(arg) @@ -153,6 +155,12 @@ func isEmptyValue(v reflect.Value) bool { return v.Float() == 0 case reflect.Interface, reflect.Pointer: return v.IsNil() + case reflect.Struct: + if v.Type() == reflect.TypeOf(time.Time{}) { + return v.IsZero() + } + // Only supports the struct time.Time, + // subsequent iterations will follow the func Scan support decoder. } return false } @@ -211,7 +219,6 @@ type Cmdable interface { ACLCmdable BitMapCmdable ClusterCmdable - GearsCmdable GenericCmdable GeoCmdable HashCmdable @@ -227,6 +234,7 @@ type Cmdable interface { StreamCmdable TimeseriesCmdable JSONCmdable + VectorSetCmdable } type StatefulCmdable interface { @@ -331,7 +339,7 @@ func (info LibraryInfo) Validate() error { return nil } -// Hello Set the resp protocol used. +// Hello sets the resp protocol used. func (c statefulCmdable) Hello(ctx context.Context, ver int, username, password, clientName string, ) *MapStringInterfaceCmd { @@ -423,6 +431,12 @@ func (c cmdable) Ping(ctx context.Context) *StatusCmd { return cmd } +func (c cmdable) Do(ctx context.Context, args ...interface{}) *Cmd { + cmd := NewCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + func (c cmdable) Quit(_ context.Context) *StatusCmd { panic("not implemented") } diff --git a/vendor/github.com/redis/go-redis/v9/docker-compose.yml b/vendor/github.com/redis/go-redis/v9/docker-compose.yml new file mode 100644 index 0000000000..3d4347bf21 --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/docker-compose.yml @@ -0,0 +1,106 @@ +--- + +services: + redis: + image: ${CLIENT_LIBS_TEST_IMAGE:-redislabs/client-libs-test:rs-7.4.0-v2} + platform: linux/amd64 + container_name: redis-standalone + environment: + - TLS_ENABLED=yes + - REDIS_CLUSTER=no + - PORT=6379 + - TLS_PORT=6666 + command: ${REDIS_EXTRA_ARGS:---enable-debug-command yes --enable-module-command yes --tls-auth-clients optional --save ""} + ports: + - 6379:6379 + - 6666:6666 # TLS port + volumes: + - "./dockers/standalone:/redis/work" + profiles: + - standalone + - sentinel + - all-stack + - all + + osscluster: + image: ${CLIENT_LIBS_TEST_IMAGE:-redislabs/client-libs-test:rs-7.4.0-v2} + platform: linux/amd64 + container_name: redis-osscluster + environment: + - NODES=6 + - PORT=16600 + command: "--cluster-enabled yes" + ports: + - "16600-16605:16600-16605" + volumes: + - "./dockers/osscluster:/redis/work" + profiles: + - cluster + - all-stack + - all + + sentinel-cluster: + image: ${CLIENT_LIBS_TEST_IMAGE:-redislabs/client-libs-test:rs-7.4.0-v2} + platform: linux/amd64 + container_name: redis-sentinel-cluster + network_mode: "host" + environment: + - NODES=3 + - TLS_ENABLED=yes + - REDIS_CLUSTER=no + - PORT=9121 + command: ${REDIS_EXTRA_ARGS:---enable-debug-command yes --enable-module-command yes --tls-auth-clients optional --save ""} + #ports: + # - "9121-9123:9121-9123" + volumes: + - "./dockers/sentinel-cluster:/redis/work" + profiles: + - sentinel + - all-stack + - all + + sentinel: + image: ${CLIENT_LIBS_TEST_IMAGE:-redislabs/client-libs-test:rs-7.4.0-v2} + platform: linux/amd64 + container_name: redis-sentinel + depends_on: + - sentinel-cluster + environment: + - NODES=3 + - REDIS_CLUSTER=no + - PORT=26379 + command: ${REDIS_EXTRA_ARGS:---sentinel} + network_mode: "host" + #ports: + # - 26379:26379 + # - 26380:26380 + # - 26381:26381 + volumes: + - "./dockers/sentinel.conf:/redis/config-default/redis.conf" + - "./dockers/sentinel:/redis/work" + profiles: + - sentinel + - all-stack + - all + + ring-cluster: + image: ${CLIENT_LIBS_TEST_IMAGE:-redislabs/client-libs-test:rs-7.4.0-v2} + platform: linux/amd64 + container_name: redis-ring-cluster + environment: + - NODES=3 + - TLS_ENABLED=yes + - REDIS_CLUSTER=no + - PORT=6390 + command: ${REDIS_EXTRA_ARGS:---enable-debug-command yes --enable-module-command yes --tls-auth-clients optional --save ""} + ports: + - 6390:6390 + - 6391:6391 + - 6392:6392 + volumes: + - "./dockers/ring:/redis/work" + profiles: + - ring + - cluster + - all-stack + - all diff --git a/vendor/github.com/redis/go-redis/v9/error.go b/vendor/github.com/redis/go-redis/v9/error.go index 9b348193a4..8c811966fb 100644 --- a/vendor/github.com/redis/go-redis/v9/error.go +++ b/vendor/github.com/redis/go-redis/v9/error.go @@ -15,6 +15,13 @@ import ( // ErrClosed performs any operation on the closed client will return this error. var ErrClosed = pool.ErrClosed +// ErrPoolExhausted is returned from a pool connection method +// when the maximum number of database connections in the pool has been reached. +var ErrPoolExhausted = pool.ErrPoolExhausted + +// ErrPoolTimeout timed out waiting to get a connection from the connection pool. +var ErrPoolTimeout = pool.ErrPoolTimeout + // HasErrorPrefix checks if the err is a Redis error and the message contains a prefix. func HasErrorPrefix(err error, prefix string) bool { var rErr Error @@ -38,12 +45,24 @@ type Error interface { var _ Error = proto.RedisError("") +func isContextError(err error) bool { + switch err { + case context.Canceled, context.DeadlineExceeded: + return true + default: + return false + } +} + func shouldRetry(err error, retryTimeout bool) bool { switch err { case io.EOF, io.ErrUnexpectedEOF: return true case nil, context.Canceled, context.DeadlineExceeded: return false + case pool.ErrPoolTimeout: + // connection pool timeout, increase retries. #3289 + return true } if v, ok := err.(timeoutError); ok { @@ -63,6 +82,9 @@ func shouldRetry(err error, retryTimeout bool) bool { if strings.HasPrefix(s, "READONLY ") { return true } + if strings.HasPrefix(s, "MASTERDOWN ") { + return true + } if strings.HasPrefix(s, "CLUSTERDOWN ") { return true } diff --git a/vendor/github.com/redis/go-redis/v9/gears_commands.go b/vendor/github.com/redis/go-redis/v9/gears_commands.go deleted file mode 100644 index e0d49a6b78..0000000000 --- a/vendor/github.com/redis/go-redis/v9/gears_commands.go +++ /dev/null @@ -1,149 +0,0 @@ -package redis - -import ( - "context" - "fmt" - "strings" -) - -type GearsCmdable interface { - TFunctionLoad(ctx context.Context, lib string) *StatusCmd - TFunctionLoadArgs(ctx context.Context, lib string, options *TFunctionLoadOptions) *StatusCmd - TFunctionDelete(ctx context.Context, libName string) *StatusCmd - TFunctionList(ctx context.Context) *MapStringInterfaceSliceCmd - TFunctionListArgs(ctx context.Context, options *TFunctionListOptions) *MapStringInterfaceSliceCmd - TFCall(ctx context.Context, libName string, funcName string, numKeys int) *Cmd - TFCallArgs(ctx context.Context, libName string, funcName string, numKeys int, options *TFCallOptions) *Cmd - TFCallASYNC(ctx context.Context, libName string, funcName string, numKeys int) *Cmd - TFCallASYNCArgs(ctx context.Context, libName string, funcName string, numKeys int, options *TFCallOptions) *Cmd -} - -type TFunctionLoadOptions struct { - Replace bool - Config string -} - -type TFunctionListOptions struct { - Withcode bool - Verbose int - Library string -} - -type TFCallOptions struct { - Keys []string - Arguments []string -} - -// TFunctionLoad - load a new JavaScript library into Redis. -// For more information - https://redis.io/commands/tfunction-load/ -func (c cmdable) TFunctionLoad(ctx context.Context, lib string) *StatusCmd { - args := []interface{}{"TFUNCTION", "LOAD", lib} - cmd := NewStatusCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) TFunctionLoadArgs(ctx context.Context, lib string, options *TFunctionLoadOptions) *StatusCmd { - args := []interface{}{"TFUNCTION", "LOAD"} - if options != nil { - if options.Replace { - args = append(args, "REPLACE") - } - if options.Config != "" { - args = append(args, "CONFIG", options.Config) - } - } - args = append(args, lib) - cmd := NewStatusCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -// TFunctionDelete - delete a JavaScript library from Redis. -// For more information - https://redis.io/commands/tfunction-delete/ -func (c cmdable) TFunctionDelete(ctx context.Context, libName string) *StatusCmd { - args := []interface{}{"TFUNCTION", "DELETE", libName} - cmd := NewStatusCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -// TFunctionList - list the functions with additional information about each function. -// For more information - https://redis.io/commands/tfunction-list/ -func (c cmdable) TFunctionList(ctx context.Context) *MapStringInterfaceSliceCmd { - args := []interface{}{"TFUNCTION", "LIST"} - cmd := NewMapStringInterfaceSliceCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) TFunctionListArgs(ctx context.Context, options *TFunctionListOptions) *MapStringInterfaceSliceCmd { - args := []interface{}{"TFUNCTION", "LIST"} - if options != nil { - if options.Withcode { - args = append(args, "WITHCODE") - } - if options.Verbose != 0 { - v := strings.Repeat("v", options.Verbose) - args = append(args, v) - } - if options.Library != "" { - args = append(args, "LIBRARY", options.Library) - } - } - cmd := NewMapStringInterfaceSliceCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -// TFCall - invoke a function. -// For more information - https://redis.io/commands/tfcall/ -func (c cmdable) TFCall(ctx context.Context, libName string, funcName string, numKeys int) *Cmd { - lf := libName + "." + funcName - args := []interface{}{"TFCALL", lf, numKeys} - cmd := NewCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) TFCallArgs(ctx context.Context, libName string, funcName string, numKeys int, options *TFCallOptions) *Cmd { - lf := libName + "." + funcName - args := []interface{}{"TFCALL", lf, numKeys} - if options != nil { - for _, key := range options.Keys { - args = append(args, key) - } - for _, key := range options.Arguments { - args = append(args, key) - } - } - cmd := NewCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -// TFCallASYNC - invoke an asynchronous JavaScript function (coroutine). -// For more information - https://redis.io/commands/TFCallASYNC/ -func (c cmdable) TFCallASYNC(ctx context.Context, libName string, funcName string, numKeys int) *Cmd { - lf := fmt.Sprintf("%s.%s", libName, funcName) - args := []interface{}{"TFCALLASYNC", lf, numKeys} - cmd := NewCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} - -func (c cmdable) TFCallASYNCArgs(ctx context.Context, libName string, funcName string, numKeys int, options *TFCallOptions) *Cmd { - lf := fmt.Sprintf("%s.%s", libName, funcName) - args := []interface{}{"TFCALLASYNC", lf, numKeys} - if options != nil { - for _, key := range options.Keys { - args = append(args, key) - } - for _, key := range options.Arguments { - args = append(args, key) - } - } - cmd := NewCmd(ctx, args...) - _ = c(ctx, cmd) - return cmd -} diff --git a/vendor/github.com/redis/go-redis/v9/hash_commands.go b/vendor/github.com/redis/go-redis/v9/hash_commands.go index 6596c6f5f7..98a361b3ef 100644 --- a/vendor/github.com/redis/go-redis/v9/hash_commands.go +++ b/vendor/github.com/redis/go-redis/v9/hash_commands.go @@ -10,6 +10,9 @@ type HashCmdable interface { HExists(ctx context.Context, key, field string) *BoolCmd HGet(ctx context.Context, key, field string) *StringCmd HGetAll(ctx context.Context, key string) *MapStringStringCmd + HGetDel(ctx context.Context, key string, fields ...string) *StringSliceCmd + HGetEX(ctx context.Context, key string, fields ...string) *StringSliceCmd + HGetEXWithArgs(ctx context.Context, key string, options *HGetEXOptions, fields ...string) *StringSliceCmd HIncrBy(ctx context.Context, key, field string, incr int64) *IntCmd HIncrByFloat(ctx context.Context, key, field string, incr float64) *FloatCmd HKeys(ctx context.Context, key string) *StringSliceCmd @@ -17,12 +20,15 @@ type HashCmdable interface { HMGet(ctx context.Context, key string, fields ...string) *SliceCmd HSet(ctx context.Context, key string, values ...interface{}) *IntCmd HMSet(ctx context.Context, key string, values ...interface{}) *BoolCmd + HSetEX(ctx context.Context, key string, fieldsAndValues ...string) *IntCmd + HSetEXWithArgs(ctx context.Context, key string, options *HSetEXOptions, fieldsAndValues ...string) *IntCmd HSetNX(ctx context.Context, key, field string, value interface{}) *BoolCmd HScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd HScanNoValues(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd HVals(ctx context.Context, key string) *StringSliceCmd HRandField(ctx context.Context, key string, count int) *StringSliceCmd HRandFieldWithValues(ctx context.Context, key string, count int) *KeyValueSliceCmd + HStrLen(ctx context.Context, key, field string) *IntCmd HExpire(ctx context.Context, key string, expiration time.Duration, fields ...string) *IntSliceCmd HExpireWithArgs(ctx context.Context, key string, expiration time.Duration, expirationArgs HExpireArgs, fields ...string) *IntSliceCmd HPExpire(ctx context.Context, key string, expiration time.Duration, fields ...string) *IntSliceCmd @@ -190,6 +196,11 @@ func (c cmdable) HScan(ctx context.Context, key string, cursor uint64, match str return cmd } +func (c cmdable) HStrLen(ctx context.Context, key, field string) *IntCmd { + cmd := NewIntCmd(ctx, "hstrlen", key, field) + _ = c(ctx, cmd) + return cmd +} func (c cmdable) HScanNoValues(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd { args := []interface{}{"hscan", key, cursor} if match != "" { @@ -213,7 +224,10 @@ type HExpireArgs struct { // HExpire - Sets the expiration time for specified fields in a hash in seconds. // The command constructs an argument list starting with "HEXPIRE", followed by the key, duration, any conditional flags, and the specified fields. -// For more information - https://redis.io/commands/hexpire/ +// Available since Redis 7.4 CE. +// For more information refer to [HEXPIRE Documentation]. +// +// [HEXPIRE Documentation]: https://redis.io/commands/hexpire/ func (c cmdable) HExpire(ctx context.Context, key string, expiration time.Duration, fields ...string) *IntSliceCmd { args := []interface{}{"HEXPIRE", key, formatSec(ctx, expiration), "FIELDS", len(fields)} @@ -228,7 +242,10 @@ func (c cmdable) HExpire(ctx context.Context, key string, expiration time.Durati // HExpireWithArgs - Sets the expiration time for specified fields in a hash in seconds. // It requires a key, an expiration duration, a struct with boolean flags for conditional expiration settings (NX, XX, GT, LT), and a list of fields. // The command constructs an argument list starting with "HEXPIRE", followed by the key, duration, any conditional flags, and the specified fields. -// For more information - https://redis.io/commands/hexpire/ +// Available since Redis 7.4 CE. +// For more information refer to [HEXPIRE Documentation]. +// +// [HEXPIRE Documentation]: https://redis.io/commands/hexpire/ func (c cmdable) HExpireWithArgs(ctx context.Context, key string, expiration time.Duration, expirationArgs HExpireArgs, fields ...string) *IntSliceCmd { args := []interface{}{"HEXPIRE", key, formatSec(ctx, expiration)} @@ -257,7 +274,10 @@ func (c cmdable) HExpireWithArgs(ctx context.Context, key string, expiration tim // HPExpire - Sets the expiration time for specified fields in a hash in milliseconds. // Similar to HExpire, it accepts a key, an expiration duration in milliseconds, a struct with expiration condition flags, and a list of fields. // The command modifies the standard time.Duration to milliseconds for the Redis command. -// For more information - https://redis.io/commands/hpexpire/ +// Available since Redis 7.4 CE. +// For more information refer to [HPEXPIRE Documentation]. +// +// [HPEXPIRE Documentation]: https://redis.io/commands/hpexpire/ func (c cmdable) HPExpire(ctx context.Context, key string, expiration time.Duration, fields ...string) *IntSliceCmd { args := []interface{}{"HPEXPIRE", key, formatMs(ctx, expiration), "FIELDS", len(fields)} @@ -269,6 +289,13 @@ func (c cmdable) HPExpire(ctx context.Context, key string, expiration time.Durat return cmd } +// HPExpireWithArgs - Sets the expiration time for specified fields in a hash in milliseconds. +// It requires a key, an expiration duration, a struct with boolean flags for conditional expiration settings (NX, XX, GT, LT), and a list of fields. +// The command constructs an argument list starting with "HPEXPIRE", followed by the key, duration, any conditional flags, and the specified fields. +// Available since Redis 7.4 CE. +// For more information refer to [HPEXPIRE Documentation]. +// +// [HPEXPIRE Documentation]: https://redis.io/commands/hpexpire/ func (c cmdable) HPExpireWithArgs(ctx context.Context, key string, expiration time.Duration, expirationArgs HExpireArgs, fields ...string) *IntSliceCmd { args := []interface{}{"HPEXPIRE", key, formatMs(ctx, expiration)} @@ -297,7 +324,10 @@ func (c cmdable) HPExpireWithArgs(ctx context.Context, key string, expiration ti // HExpireAt - Sets the expiration time for specified fields in a hash to a UNIX timestamp in seconds. // Takes a key, a UNIX timestamp, a struct of conditional flags, and a list of fields. // The command sets absolute expiration times based on the UNIX timestamp provided. -// For more information - https://redis.io/commands/hexpireat/ +// Available since Redis 7.4 CE. +// For more information refer to [HExpireAt Documentation]. +// +// [HExpireAt Documentation]: https://redis.io/commands/hexpireat/ func (c cmdable) HExpireAt(ctx context.Context, key string, tm time.Time, fields ...string) *IntSliceCmd { args := []interface{}{"HEXPIREAT", key, tm.Unix(), "FIELDS", len(fields)} @@ -337,7 +367,10 @@ func (c cmdable) HExpireAtWithArgs(ctx context.Context, key string, tm time.Time // HPExpireAt - Sets the expiration time for specified fields in a hash to a UNIX timestamp in milliseconds. // Similar to HExpireAt but for timestamps in milliseconds. It accepts the same parameters and adjusts the UNIX time to milliseconds. -// For more information - https://redis.io/commands/hpexpireat/ +// Available since Redis 7.4 CE. +// For more information refer to [HExpireAt Documentation]. +// +// [HExpireAt Documentation]: https://redis.io/commands/hexpireat/ func (c cmdable) HPExpireAt(ctx context.Context, key string, tm time.Time, fields ...string) *IntSliceCmd { args := []interface{}{"HPEXPIREAT", key, tm.UnixNano() / int64(time.Millisecond), "FIELDS", len(fields)} @@ -377,7 +410,10 @@ func (c cmdable) HPExpireAtWithArgs(ctx context.Context, key string, tm time.Tim // HPersist - Removes the expiration time from specified fields in a hash. // Accepts a key and the fields themselves. // This command ensures that each field specified will have its expiration removed if present. -// For more information - https://redis.io/commands/hpersist/ +// Available since Redis 7.4 CE. +// For more information refer to [HPersist Documentation]. +// +// [HPersist Documentation]: https://redis.io/commands/hpersist/ func (c cmdable) HPersist(ctx context.Context, key string, fields ...string) *IntSliceCmd { args := []interface{}{"HPERSIST", key, "FIELDS", len(fields)} @@ -392,6 +428,10 @@ func (c cmdable) HPersist(ctx context.Context, key string, fields ...string) *In // HExpireTime - Retrieves the expiration time for specified fields in a hash as a UNIX timestamp in seconds. // Requires a key and the fields themselves to fetch their expiration timestamps. // This command returns the expiration times for each field or error/status codes for each field as specified. +// Available since Redis 7.4 CE. +// For more information refer to [HExpireTime Documentation]. +// +// [HExpireTime Documentation]: https://redis.io/commands/hexpiretime/ // For more information - https://redis.io/commands/hexpiretime/ func (c cmdable) HExpireTime(ctx context.Context, key string, fields ...string) *IntSliceCmd { args := []interface{}{"HEXPIRETIME", key, "FIELDS", len(fields)} @@ -407,6 +447,10 @@ func (c cmdable) HExpireTime(ctx context.Context, key string, fields ...string) // HPExpireTime - Retrieves the expiration time for specified fields in a hash as a UNIX timestamp in milliseconds. // Similar to HExpireTime, adjusted for timestamps in milliseconds. It requires the same parameters. // Provides the expiration timestamp for each field in milliseconds. +// Available since Redis 7.4 CE. +// For more information refer to [HExpireTime Documentation]. +// +// [HExpireTime Documentation]: https://redis.io/commands/hexpiretime/ // For more information - https://redis.io/commands/hexpiretime/ func (c cmdable) HPExpireTime(ctx context.Context, key string, fields ...string) *IntSliceCmd { args := []interface{}{"HPEXPIRETIME", key, "FIELDS", len(fields)} @@ -422,7 +466,10 @@ func (c cmdable) HPExpireTime(ctx context.Context, key string, fields ...string) // HTTL - Retrieves the remaining time to live for specified fields in a hash in seconds. // Requires a key and the fields themselves. It returns the TTL for each specified field. // This command fetches the TTL in seconds for each field or returns error/status codes as appropriate. -// For more information - https://redis.io/commands/httl/ +// Available since Redis 7.4 CE. +// For more information refer to [HTTL Documentation]. +// +// [HTTL Documentation]: https://redis.io/commands/httl/ func (c cmdable) HTTL(ctx context.Context, key string, fields ...string) *IntSliceCmd { args := []interface{}{"HTTL", key, "FIELDS", len(fields)} @@ -437,6 +484,10 @@ func (c cmdable) HTTL(ctx context.Context, key string, fields ...string) *IntSli // HPTTL - Retrieves the remaining time to live for specified fields in a hash in milliseconds. // Similar to HTTL, but returns the TTL in milliseconds. It requires a key and the specified fields. // This command provides the TTL in milliseconds for each field or returns error/status codes as needed. +// Available since Redis 7.4 CE. +// For more information refer to [HPTTL Documentation]. +// +// [HPTTL Documentation]: https://redis.io/commands/hpttl/ // For more information - https://redis.io/commands/hpttl/ func (c cmdable) HPTTL(ctx context.Context, key string, fields ...string) *IntSliceCmd { args := []interface{}{"HPTTL", key, "FIELDS", len(fields)} @@ -448,3 +499,113 @@ func (c cmdable) HPTTL(ctx context.Context, key string, fields ...string) *IntSl _ = c(ctx, cmd) return cmd } + +func (c cmdable) HGetDel(ctx context.Context, key string, fields ...string) *StringSliceCmd { + args := []interface{}{"HGETDEL", key, "FIELDS", len(fields)} + for _, field := range fields { + args = append(args, field) + } + cmd := NewStringSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) HGetEX(ctx context.Context, key string, fields ...string) *StringSliceCmd { + args := []interface{}{"HGETEX", key, "FIELDS", len(fields)} + for _, field := range fields { + args = append(args, field) + } + cmd := NewStringSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// HGetEXExpirationType represents an expiration option for the HGETEX command. +type HGetEXExpirationType string + +const ( + HGetEXExpirationEX HGetEXExpirationType = "EX" + HGetEXExpirationPX HGetEXExpirationType = "PX" + HGetEXExpirationEXAT HGetEXExpirationType = "EXAT" + HGetEXExpirationPXAT HGetEXExpirationType = "PXAT" + HGetEXExpirationPERSIST HGetEXExpirationType = "PERSIST" +) + +type HGetEXOptions struct { + ExpirationType HGetEXExpirationType + ExpirationVal int64 +} + +func (c cmdable) HGetEXWithArgs(ctx context.Context, key string, options *HGetEXOptions, fields ...string) *StringSliceCmd { + args := []interface{}{"HGETEX", key} + if options.ExpirationType != "" { + args = append(args, string(options.ExpirationType)) + if options.ExpirationType != HGetEXExpirationPERSIST { + args = append(args, options.ExpirationVal) + } + } + + args = append(args, "FIELDS", len(fields)) + for _, field := range fields { + args = append(args, field) + } + + cmd := NewStringSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +type HSetEXCondition string + +const ( + HSetEXFNX HSetEXCondition = "FNX" // Only set the fields if none of them already exist. + HSetEXFXX HSetEXCondition = "FXX" // Only set the fields if all already exist. +) + +type HSetEXExpirationType string + +const ( + HSetEXExpirationEX HSetEXExpirationType = "EX" + HSetEXExpirationPX HSetEXExpirationType = "PX" + HSetEXExpirationEXAT HSetEXExpirationType = "EXAT" + HSetEXExpirationPXAT HSetEXExpirationType = "PXAT" + HSetEXExpirationKEEPTTL HSetEXExpirationType = "KEEPTTL" +) + +type HSetEXOptions struct { + Condition HSetEXCondition + ExpirationType HSetEXExpirationType + ExpirationVal int64 +} + +func (c cmdable) HSetEX(ctx context.Context, key string, fieldsAndValues ...string) *IntCmd { + args := []interface{}{"HSETEX", key, "FIELDS", len(fieldsAndValues) / 2} + for _, field := range fieldsAndValues { + args = append(args, field) + } + + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) HSetEXWithArgs(ctx context.Context, key string, options *HSetEXOptions, fieldsAndValues ...string) *IntCmd { + args := []interface{}{"HSETEX", key} + if options.Condition != "" { + args = append(args, string(options.Condition)) + } + if options.ExpirationType != "" { + args = append(args, string(options.ExpirationType)) + if options.ExpirationType != HSetEXExpirationKEEPTTL { + args = append(args, options.ExpirationVal) + } + } + args = append(args, "FIELDS", len(fieldsAndValues)/2) + for _, field := range fieldsAndValues { + args = append(args, field) + } + + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} diff --git a/vendor/github.com/redis/go-redis/v9/internal/pool/conn.go b/vendor/github.com/redis/go-redis/v9/internal/pool/conn.go index 7f45bc0bb7..c1087b401a 100644 --- a/vendor/github.com/redis/go-redis/v9/internal/pool/conn.go +++ b/vendor/github.com/redis/go-redis/v9/internal/pool/conn.go @@ -23,6 +23,8 @@ type Conn struct { Inited bool pooled bool createdAt time.Time + + onClose func() error } func NewConn(netConn net.Conn) *Conn { @@ -46,6 +48,10 @@ func (cn *Conn) SetUsedAt(tm time.Time) { atomic.StoreInt64(&cn.usedAt, tm.Unix()) } +func (cn *Conn) SetOnClose(fn func() error) { + cn.onClose = fn +} + func (cn *Conn) SetNetConn(netConn net.Conn) { cn.netConn = netConn cn.rd.Reset(netConn) @@ -95,6 +101,10 @@ func (cn *Conn) WithWriter( } func (cn *Conn) Close() error { + if cn.onClose != nil { + // ignore error + _ = cn.onClose() + } return cn.netConn.Close() } diff --git a/vendor/github.com/redis/go-redis/v9/internal/pool/pool.go b/vendor/github.com/redis/go-redis/v9/internal/pool/pool.go index 2125f3e133..e7d951e268 100644 --- a/vendor/github.com/redis/go-redis/v9/internal/pool/pool.go +++ b/vendor/github.com/redis/go-redis/v9/internal/pool/pool.go @@ -33,9 +33,11 @@ var timers = sync.Pool{ // Stats contains pool state information and accumulated stats. type Stats struct { - Hits uint32 // number of times free connection was found in the pool - Misses uint32 // number of times free connection was NOT found in the pool - Timeouts uint32 // number of times a wait timeout occurred + Hits uint32 // number of times free connection was found in the pool + Misses uint32 // number of times free connection was NOT found in the pool + Timeouts uint32 // number of times a wait timeout occurred + WaitCount uint32 // number of times a connection was waited + WaitDurationNs int64 // total time spent for waiting a connection in nanoseconds TotalConns uint32 // number of total connections in the pool IdleConns uint32 // number of idle connections in the pool @@ -62,6 +64,7 @@ type Options struct { PoolFIFO bool PoolSize int + DialTimeout time.Duration PoolTimeout time.Duration MinIdleConns int MaxIdleConns int @@ -89,7 +92,8 @@ type ConnPool struct { poolSize int idleConnsLen int - stats Stats + stats Stats + waitDurationNs atomic.Int64 _closed uint32 // atomic } @@ -140,7 +144,10 @@ func (p *ConnPool) checkMinIdleConns() { } func (p *ConnPool) addIdleConn() error { - cn, err := p.dialConn(context.TODO(), true) + ctx, cancel := context.WithTimeout(context.Background(), p.cfg.DialTimeout) + defer cancel() + + cn, err := p.dialConn(ctx, true) if err != nil { return err } @@ -230,15 +237,19 @@ func (p *ConnPool) tryDial() { return } - conn, err := p.cfg.Dialer(context.Background()) + ctx, cancel := context.WithTimeout(context.Background(), p.cfg.DialTimeout) + + conn, err := p.cfg.Dialer(ctx) if err != nil { p.setLastDialError(err) time.Sleep(time.Second) + cancel() continue } atomic.StoreUint32(&p.dialErrorsNum, 0) _ = conn.Close() + cancel() return } } @@ -312,6 +323,7 @@ func (p *ConnPool) waitTurn(ctx context.Context) error { default: } + start := time.Now() timer := timers.Get().(*time.Timer) timer.Reset(p.cfg.PoolTimeout) @@ -323,6 +335,8 @@ func (p *ConnPool) waitTurn(ctx context.Context) error { timers.Put(timer) return ctx.Err() case p.queue <- struct{}{}: + p.waitDurationNs.Add(time.Since(start).Nanoseconds()) + atomic.AddUint32(&p.stats.WaitCount, 1) if !timer.Stop() { <-timer.C } @@ -449,9 +463,11 @@ func (p *ConnPool) IdleLen() int { func (p *ConnPool) Stats() *Stats { return &Stats{ - Hits: atomic.LoadUint32(&p.stats.Hits), - Misses: atomic.LoadUint32(&p.stats.Misses), - Timeouts: atomic.LoadUint32(&p.stats.Timeouts), + Hits: atomic.LoadUint32(&p.stats.Hits), + Misses: atomic.LoadUint32(&p.stats.Misses), + Timeouts: atomic.LoadUint32(&p.stats.Timeouts), + WaitCount: atomic.LoadUint32(&p.stats.WaitCount), + WaitDurationNs: p.waitDurationNs.Load(), TotalConns: uint32(p.Len()), IdleConns: uint32(p.IdleLen()), diff --git a/vendor/github.com/redis/go-redis/v9/internal/proto/writer.go b/vendor/github.com/redis/go-redis/v9/internal/proto/writer.go index 78595cc4f0..38e66c6887 100644 --- a/vendor/github.com/redis/go-redis/v9/internal/proto/writer.go +++ b/vendor/github.com/redis/go-redis/v9/internal/proto/writer.go @@ -66,56 +66,95 @@ func (w *Writer) WriteArg(v interface{}) error { case string: return w.string(v) case *string: + if v == nil { + return w.string("") + } return w.string(*v) case []byte: return w.bytes(v) case int: return w.int(int64(v)) case *int: + if v == nil { + return w.int(0) + } return w.int(int64(*v)) case int8: return w.int(int64(v)) case *int8: + if v == nil { + return w.int(0) + } return w.int(int64(*v)) case int16: return w.int(int64(v)) case *int16: + if v == nil { + return w.int(0) + } return w.int(int64(*v)) case int32: return w.int(int64(v)) case *int32: + if v == nil { + return w.int(0) + } return w.int(int64(*v)) case int64: return w.int(v) case *int64: + if v == nil { + return w.int(0) + } return w.int(*v) case uint: return w.uint(uint64(v)) case *uint: + if v == nil { + return w.uint(0) + } return w.uint(uint64(*v)) case uint8: return w.uint(uint64(v)) case *uint8: + if v == nil { + return w.string("") + } return w.uint(uint64(*v)) case uint16: return w.uint(uint64(v)) case *uint16: + if v == nil { + return w.uint(0) + } return w.uint(uint64(*v)) case uint32: return w.uint(uint64(v)) case *uint32: + if v == nil { + return w.uint(0) + } return w.uint(uint64(*v)) case uint64: return w.uint(v) case *uint64: + if v == nil { + return w.uint(0) + } return w.uint(*v) case float32: return w.float(float64(v)) case *float32: + if v == nil { + return w.float(0) + } return w.float(float64(*v)) case float64: return w.float(v) case *float64: + if v == nil { + return w.float(0) + } return w.float(*v) case bool: if v { @@ -123,6 +162,9 @@ func (w *Writer) WriteArg(v interface{}) error { } return w.int(0) case *bool: + if v == nil { + return w.int(0) + } if *v { return w.int(1) } @@ -130,8 +172,19 @@ func (w *Writer) WriteArg(v interface{}) error { case time.Time: w.numBuf = v.AppendFormat(w.numBuf[:0], time.RFC3339Nano) return w.bytes(w.numBuf) + case *time.Time: + if v == nil { + v = &time.Time{} + } + w.numBuf = v.AppendFormat(w.numBuf[:0], time.RFC3339Nano) + return w.bytes(w.numBuf) case time.Duration: return w.int(v.Nanoseconds()) + case *time.Duration: + if v == nil { + return w.int(0) + } + return w.int(v.Nanoseconds()) case encoding.BinaryMarshaler: b, err := v.MarshalBinary() if err != nil { diff --git a/vendor/github.com/redis/go-redis/v9/internal/util.go b/vendor/github.com/redis/go-redis/v9/internal/util.go index cc1bff24e6..f77775ff40 100644 --- a/vendor/github.com/redis/go-redis/v9/internal/util.go +++ b/vendor/github.com/redis/go-redis/v9/internal/util.go @@ -49,22 +49,7 @@ func isLower(s string) bool { } func ReplaceSpaces(s string) string { - // Pre-allocate a builder with the same length as s to minimize allocations. - // This is a basic optimization; adjust the initial size based on your use case. - var builder strings.Builder - builder.Grow(len(s)) - - for _, char := range s { - if char == ' ' { - // Replace space with a hyphen. - builder.WriteRune('-') - } else { - // Copy the character as-is. - builder.WriteRune(char) - } - } - - return builder.String() + return strings.ReplaceAll(s, " ", "-") } func GetAddr(addr string) string { diff --git a/vendor/github.com/redis/go-redis/v9/internal/util/convert.go b/vendor/github.com/redis/go-redis/v9/internal/util/convert.go new file mode 100644 index 0000000000..d326d50d35 --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/internal/util/convert.go @@ -0,0 +1,30 @@ +package util + +import ( + "fmt" + "math" + "strconv" +) + +// ParseFloat parses a Redis RESP3 float reply into a Go float64, +// handling "inf", "-inf", "nan" per Redis conventions. +func ParseStringToFloat(s string) (float64, error) { + switch s { + case "inf": + return math.Inf(1), nil + case "-inf": + return math.Inf(-1), nil + case "nan", "-nan": + return math.NaN(), nil + } + return strconv.ParseFloat(s, 64) +} + +// MustParseFloat is like ParseFloat but panics on parse errors. +func MustParseFloat(s string) float64 { + f, err := ParseStringToFloat(s) + if err != nil { + panic(fmt.Sprintf("redis: failed to parse float %q: %v", s, err)) + } + return f +} diff --git a/vendor/github.com/redis/go-redis/v9/options.go b/vendor/github.com/redis/go-redis/v9/options.go index 53991dc9d9..b87a234a41 100644 --- a/vendor/github.com/redis/go-redis/v9/options.go +++ b/vendor/github.com/redis/go-redis/v9/options.go @@ -13,6 +13,7 @@ import ( "strings" "time" + "github.com/redis/go-redis/v9/auth" "github.com/redis/go-redis/v9/internal/pool" ) @@ -29,10 +30,13 @@ type Limiter interface { // Options keeps the settings to set up redis connection. type Options struct { - // The network type, either tcp or unix. - // Default is tcp. + + // Network type, either tcp or unix. + // + // default: is tcp. Network string - // host:port address. + + // Addr is the address formated as host:port Addr string // ClientName will execute the `CLIENT SETNAME ClientName` command for each conn. @@ -46,17 +50,21 @@ type Options struct { OnConnect func(ctx context.Context, cn *Conn) error // Protocol 2 or 3. Use the version to negotiate RESP version with redis-server. - // Default is 3. + // + // default: 3. Protocol int - // Use the specified Username to authenticate the current connection + + // Username is used to authenticate the current connection // with one of the connections defined in the ACL list when connecting // to a Redis 6.0 instance, or greater, that is using the Redis ACL system. Username string - // Optional password. Must match the password specified in the - // requirepass server configuration option (if connecting to a Redis 5.0 instance, or lower), + + // Password is an optional password. Must match the password specified in the + // `requirepass` server configuration option (if connecting to a Redis 5.0 instance, or lower), // or the User Password when connecting to a Redis 6.0 instance, or greater, // that is using the Redis ACL system. Password string + // CredentialsProvider allows the username and password to be updated // before reconnecting. It should return the current username and password. CredentialsProvider func() (username string, password string) @@ -67,85 +75,126 @@ type Options struct { // There will be a conflict between them; if CredentialsProviderContext exists, we will ignore CredentialsProvider. CredentialsProviderContext func(ctx context.Context) (username string, password string, err error) - // Database to be selected after connecting to the server. + // StreamingCredentialsProvider is used to retrieve the credentials + // for the connection from an external source. Those credentials may change + // during the connection lifetime. This is useful for managed identity + // scenarios where the credentials are retrieved from an external source. + // + // Currently, this is a placeholder for the future implementation. + StreamingCredentialsProvider auth.StreamingCredentialsProvider + + // DB is the database to be selected after connecting to the server. DB int - // Maximum number of retries before giving up. - // Default is 3 retries; -1 (not 0) disables retries. + // MaxRetries is the maximum number of retries before giving up. + // -1 (not 0) disables retries. + // + // default: 3 retries MaxRetries int - // Minimum backoff between each retry. - // Default is 8 milliseconds; -1 disables backoff. + + // MinRetryBackoff is the minimum backoff between each retry. + // -1 disables backoff. + // + // default: 8 milliseconds MinRetryBackoff time.Duration - // Maximum backoff between each retry. - // Default is 512 milliseconds; -1 disables backoff. + + // MaxRetryBackoff is the maximum backoff between each retry. + // -1 disables backoff. + // default: 512 milliseconds; MaxRetryBackoff time.Duration - // Dial timeout for establishing new connections. - // Default is 5 seconds. + // DialTimeout for establishing new connections. + // + // default: 5 seconds DialTimeout time.Duration - // Timeout for socket reads. If reached, commands will fail + + // ReadTimeout for socket reads. If reached, commands will fail // with a timeout instead of blocking. Supported values: - // - `0` - default timeout (3 seconds). - // - `-1` - no timeout (block indefinitely). - // - `-2` - disables SetReadDeadline calls completely. + // + // - `-1` - no timeout (block indefinitely). + // - `-2` - disables SetReadDeadline calls completely. + // + // default: 3 seconds ReadTimeout time.Duration - // Timeout for socket writes. If reached, commands will fail + + // WriteTimeout for socket writes. If reached, commands will fail // with a timeout instead of blocking. Supported values: - // - `0` - default timeout (3 seconds). - // - `-1` - no timeout (block indefinitely). - // - `-2` - disables SetWriteDeadline calls completely. + // + // - `-1` - no timeout (block indefinitely). + // - `-2` - disables SetWriteDeadline calls completely. + // + // default: 3 seconds WriteTimeout time.Duration + // ContextTimeoutEnabled controls whether the client respects context timeouts and deadlines. // See https://redis.uptrace.dev/guide/go-redis-debugging.html#timeouts ContextTimeoutEnabled bool - // Type of connection pool. - // true for FIFO pool, false for LIFO pool. + // PoolFIFO type of connection pool. + // + // - true for FIFO pool + // - false for LIFO pool. + // // Note that FIFO has slightly higher overhead compared to LIFO, // but it helps closing idle connections faster reducing the pool size. PoolFIFO bool - // Base number of socket connections. + + // PoolSize is the base number of socket connections. // Default is 10 connections per every available CPU as reported by runtime.GOMAXPROCS. // If there is not enough connections in the pool, new connections will be allocated in excess of PoolSize, // you can limit it through MaxActiveConns + // + // default: 10 * runtime.GOMAXPROCS(0) PoolSize int - // Amount of time client waits for connection if all connections + + // PoolTimeout is the amount of time client waits for connection if all connections // are busy before returning an error. - // Default is ReadTimeout + 1 second. + // + // default: ReadTimeout + 1 second PoolTimeout time.Duration - // Minimum number of idle connections which is useful when establishing - // new connection is slow. - // Default is 0. the idle connections are not closed by default. + + // MinIdleConns is the minimum number of idle connections which is useful when establishing + // new connection is slow. The idle connections are not closed by default. + // + // default: 0 MinIdleConns int - // Maximum number of idle connections. - // Default is 0. the idle connections are not closed by default. + + // MaxIdleConns is the maximum number of idle connections. + // The idle connections are not closed by default. + // + // default: 0 MaxIdleConns int - // Maximum number of connections allocated by the pool at a given time. + + // MaxActiveConns is the maximum number of connections allocated by the pool at a given time. // When zero, there is no limit on the number of connections in the pool. + // If the pool is full, the next call to Get() will block until a connection is released. MaxActiveConns int + // ConnMaxIdleTime is the maximum amount of time a connection may be idle. // Should be less than server's timeout. // // Expired connections may be closed lazily before reuse. // If d <= 0, connections are not closed due to a connection's idle time. + // -1 disables idle timeout check. // - // Default is 30 minutes. -1 disables idle timeout check. + // default: 30 minutes ConnMaxIdleTime time.Duration + // ConnMaxLifetime is the maximum amount of time a connection may be reused. // // Expired connections may be closed lazily before reuse. // If <= 0, connections are not closed due to a connection's age. // - // Default is to not close idle connections. + // default: 0 ConnMaxLifetime time.Duration - // TLS Config to use. When set, TLS will be negotiated. + // TLSConfig to use. When set, TLS will be negotiated. TLSConfig *tls.Config // Limiter interface used to implement circuit breaker or rate limiter. Limiter Limiter - // Enables read only queries on slave/follower nodes. + // readOnly enables read only queries on slave/follower nodes. readOnly bool // DisableIndentity - Disable set-lib on connect. @@ -161,9 +210,11 @@ type Options struct { DisableIdentity bool // Add suffix to client name. Default is empty. + // IdentitySuffix - add suffix to client name. IdentitySuffix string // UnstableResp3 enables Unstable mode for Redis Search module with RESP3. + // When unstable mode is enabled, the client will use RESP3 protocol and only be able to use RawResult UnstableResp3 bool } @@ -178,6 +229,9 @@ func (opt *Options) init() { opt.Network = "tcp" } } + if opt.Protocol < 2 { + opt.Protocol = 3 + } if opt.DialTimeout == 0 { opt.DialTimeout = 5 * time.Second } @@ -214,9 +268,10 @@ func (opt *Options) init() { opt.ConnMaxIdleTime = 30 * time.Minute } - if opt.MaxRetries == -1 { + switch opt.MaxRetries { + case -1: opt.MaxRetries = 0 - } else if opt.MaxRetries == 0 { + case 0: opt.MaxRetries = 3 } switch opt.MinRetryBackoff { @@ -276,6 +331,7 @@ func NewDialer(opt *Options) func(context.Context, string, string) (net.Conn, er // URL attributes (scheme, host, userinfo, resp.), query parameters using these // names will be treated as unknown parameters // - unknown parameter names will result in an error +// - use "skip_verify=true" to ignore TLS certificate validation // // Examples: // @@ -496,6 +552,9 @@ func setupConnParams(u *url.URL, o *Options) (*Options, error) { if q.err != nil { return nil, q.err } + if o.TLSConfig != nil && q.has("skip_verify") { + o.TLSConfig.InsecureSkipVerify = q.bool("skip_verify") + } // any parameters left? if r := q.remaining(); len(r) > 0 { @@ -527,6 +586,7 @@ func newConnPool( PoolFIFO: opt.PoolFIFO, PoolSize: opt.PoolSize, PoolTimeout: opt.PoolTimeout, + DialTimeout: opt.DialTimeout, MinIdleConns: opt.MinIdleConns, MaxIdleConns: opt.MaxIdleConns, MaxActiveConns: opt.MaxActiveConns, diff --git a/vendor/github.com/redis/go-redis/v9/osscluster.go b/vendor/github.com/redis/go-redis/v9/osscluster.go index f5a5768421..6c6b756380 100644 --- a/vendor/github.com/redis/go-redis/v9/osscluster.go +++ b/vendor/github.com/redis/go-redis/v9/osscluster.go @@ -14,6 +14,7 @@ import ( "sync/atomic" "time" + "github.com/redis/go-redis/v9/auth" "github.com/redis/go-redis/v9/internal" "github.com/redis/go-redis/v9/internal/hashtag" "github.com/redis/go-redis/v9/internal/pool" @@ -21,6 +22,10 @@ import ( "github.com/redis/go-redis/v9/internal/rand" ) +const ( + minLatencyMeasurementInterval = 10 * time.Second +) + var errClusterNoNodes = fmt.Errorf("redis: cluster has no nodes") // ClusterOptions are used to configure a cluster client and should be @@ -62,11 +67,12 @@ type ClusterOptions struct { OnConnect func(ctx context.Context, cn *Conn) error - Protocol int - Username string - Password string - CredentialsProvider func() (username string, password string) - CredentialsProviderContext func(ctx context.Context) (username string, password string, err error) + Protocol int + Username string + Password string + CredentialsProvider func() (username string, password string) + CredentialsProviderContext func(ctx context.Context) (username string, password string, err error) + StreamingCredentialsProvider auth.StreamingCredentialsProvider MaxRetries int MinRetryBackoff time.Duration @@ -107,9 +113,10 @@ type ClusterOptions struct { } func (opt *ClusterOptions) init() { - if opt.MaxRedirects == -1 { + switch opt.MaxRedirects { + case -1: opt.MaxRedirects = 0 - } else if opt.MaxRedirects == 0 { + case 0: opt.MaxRedirects = 3 } @@ -287,11 +294,12 @@ func (opt *ClusterOptions) clientOptions() *Options { Dialer: opt.Dialer, OnConnect: opt.OnConnect, - Protocol: opt.Protocol, - Username: opt.Username, - Password: opt.Password, - CredentialsProvider: opt.CredentialsProvider, - CredentialsProviderContext: opt.CredentialsProviderContext, + Protocol: opt.Protocol, + Username: opt.Username, + Password: opt.Password, + CredentialsProvider: opt.CredentialsProvider, + CredentialsProviderContext: opt.CredentialsProviderContext, + StreamingCredentialsProvider: opt.StreamingCredentialsProvider, MaxRetries: opt.MaxRetries, MinRetryBackoff: opt.MinRetryBackoff, @@ -332,6 +340,10 @@ type clusterNode struct { latency uint32 // atomic generation uint32 // atomic failing uint32 // atomic + + // last time the latency measurement was performed for the node, stored in nanoseconds + // from epoch + lastLatencyMeasurement int64 // atomic } func newClusterNode(clOpt *ClusterOptions, addr string) *clusterNode { @@ -384,6 +396,7 @@ func (n *clusterNode) updateLatency() { latency = float64(dur) / float64(successes) } atomic.StoreUint32(&n.latency, uint32(latency+0.5)) + n.SetLastLatencyMeasurement(time.Now()) } func (n *clusterNode) Latency() time.Duration { @@ -413,6 +426,10 @@ func (n *clusterNode) Generation() uint32 { return atomic.LoadUint32(&n.generation) } +func (n *clusterNode) LastLatencyMeasurement() int64 { + return atomic.LoadInt64(&n.lastLatencyMeasurement) +} + func (n *clusterNode) SetGeneration(gen uint32) { for { v := atomic.LoadUint32(&n.generation) @@ -422,6 +439,23 @@ func (n *clusterNode) SetGeneration(gen uint32) { } } +func (n *clusterNode) SetLastLatencyMeasurement(t time.Time) { + for { + v := atomic.LoadInt64(&n.lastLatencyMeasurement) + if t.UnixNano() < v || atomic.CompareAndSwapInt64(&n.lastLatencyMeasurement, v, t.UnixNano()) { + break + } + } +} + +func (n *clusterNode) Loading() bool { + ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) + defer cancel() + + err := n.Client.Ping(ctx).Err() + return err != nil && isLoadingError(err) +} + //------------------------------------------------------------------------------ type clusterNodes struct { @@ -511,10 +545,11 @@ func (c *clusterNodes) GC(generation uint32) { c.mu.Lock() c.activeAddrs = c.activeAddrs[:0] + now := time.Now() for addr, node := range c.nodes { if node.Generation() >= generation { c.activeAddrs = append(c.activeAddrs, addr) - if c.opt.RouteByLatency { + if c.opt.RouteByLatency && node.LastLatencyMeasurement() < now.Add(-minLatencyMeasurementInterval).UnixNano() { go node.updateLatency() } continue @@ -730,7 +765,8 @@ func (c *clusterState) slotSlaveNode(slot int) (*clusterNode, error) { case 1: return nodes[0], nil case 2: - if slave := nodes[1]; !slave.Failing() { + slave := nodes[1] + if !slave.Failing() && !slave.Loading() { return slave, nil } return nodes[0], nil @@ -739,7 +775,7 @@ func (c *clusterState) slotSlaveNode(slot int) (*clusterNode, error) { for i := 0; i < 10; i++ { n := rand.Intn(len(nodes)-1) + 1 slave = nodes[n] - if !slave.Failing() { + if !slave.Failing() && !slave.Loading() { return slave, nil } } @@ -900,6 +936,9 @@ type ClusterClient struct { // NewClusterClient returns a Redis Cluster client as described in // http://redis.io/topics/cluster-spec. func NewClusterClient(opt *ClusterOptions) *ClusterClient { + if opt == nil { + panic("redis: NewClusterClient nil options") + } opt.init() c := &ClusterClient{ @@ -954,7 +993,7 @@ func (c *ClusterClient) Process(ctx context.Context, cmd Cmder) error { } func (c *ClusterClient) process(ctx context.Context, cmd Cmder) error { - slot := c.cmdSlot(ctx, cmd) + slot := c.cmdSlot(cmd) var node *clusterNode var moved bool var ask bool @@ -1302,7 +1341,7 @@ func (c *ClusterClient) mapCmdsByNode(ctx context.Context, cmdsMap *cmdsMap, cmd if c.opt.ReadOnly && c.cmdsAreReadOnly(ctx, cmds) { for _, cmd := range cmds { - slot := c.cmdSlot(ctx, cmd) + slot := c.cmdSlot(cmd) node, err := c.slotReadOnlyNode(state, slot) if err != nil { return err @@ -1313,7 +1352,7 @@ func (c *ClusterClient) mapCmdsByNode(ctx context.Context, cmdsMap *cmdsMap, cmd } for _, cmd := range cmds { - slot := c.cmdSlot(ctx, cmd) + slot := c.cmdSlot(cmd) node, err := state.slotMasterNode(slot) if err != nil { return err @@ -1339,7 +1378,9 @@ func (c *ClusterClient) processPipelineNode( _ = node.Client.withProcessPipelineHook(ctx, cmds, func(ctx context.Context, cmds []Cmder) error { cn, err := node.Client.getConn(ctx) if err != nil { - node.MarkAsFailing() + if !isContextError(err) { + node.MarkAsFailing() + } _ = c.mapCmdsByNode(ctx, failedCmds, cmds) setCmdsErr(cmds, err) return err @@ -1469,7 +1510,7 @@ func (c *ClusterClient) processTxPipeline(ctx context.Context, cmds []Cmder) err return err } - cmdsMap := c.mapCmdsBySlot(ctx, cmds) + cmdsMap := c.mapCmdsBySlot(cmds) for slot, cmds := range cmdsMap { node, err := state.slotMasterNode(slot) if err != nil { @@ -1508,10 +1549,10 @@ func (c *ClusterClient) processTxPipeline(ctx context.Context, cmds []Cmder) err return cmdsFirstErr(cmds) } -func (c *ClusterClient) mapCmdsBySlot(ctx context.Context, cmds []Cmder) map[int][]Cmder { +func (c *ClusterClient) mapCmdsBySlot(cmds []Cmder) map[int][]Cmder { cmdsMap := make(map[int][]Cmder) for _, cmd := range cmds { - slot := c.cmdSlot(ctx, cmd) + slot := c.cmdSlot(cmd) cmdsMap[slot] = append(cmdsMap[slot], cmd) } return cmdsMap @@ -1540,7 +1581,7 @@ func (c *ClusterClient) processTxPipelineNode( } func (c *ClusterClient) processTxPipelineNodeConn( - ctx context.Context, node *clusterNode, cn *pool.Conn, cmds []Cmder, failedCmds *cmdsMap, + ctx context.Context, _ *clusterNode, cn *pool.Conn, cmds []Cmder, failedCmds *cmdsMap, ) error { if err := cn.WithWriter(c.context(ctx), c.opt.WriteTimeout, func(wr *proto.Writer) error { return writeCmds(wr, cmds) @@ -1829,9 +1870,9 @@ func (c *ClusterClient) cmdInfo(ctx context.Context, name string) *CommandInfo { return info } -func (c *ClusterClient) cmdSlot(ctx context.Context, cmd Cmder) int { +func (c *ClusterClient) cmdSlot(cmd Cmder) int { args := cmd.Args() - if args[0] == "cluster" && args[1] == "getkeysinslot" { + if args[0] == "cluster" && (args[1] == "getkeysinslot" || args[1] == "countkeysinslot") { return args[2].(int) } diff --git a/vendor/github.com/redis/go-redis/v9/probabilistic.go b/vendor/github.com/redis/go-redis/v9/probabilistic.go index 5d5cd1a628..02ca263cbd 100644 --- a/vendor/github.com/redis/go-redis/v9/probabilistic.go +++ b/vendor/github.com/redis/go-redis/v9/probabilistic.go @@ -319,37 +319,69 @@ func (cmd *BFInfoCmd) Result() (BFInfo, error) { } func (cmd *BFInfoCmd) readReply(rd *proto.Reader) (err error) { - n, err := rd.ReadMapLen() + result := BFInfo{} + + // Create a mapping from key names to pointers of struct fields + respMapping := map[string]*int64{ + "Capacity": &result.Capacity, + "CAPACITY": &result.Capacity, + "Size": &result.Size, + "SIZE": &result.Size, + "Number of filters": &result.Filters, + "FILTERS": &result.Filters, + "Number of items inserted": &result.ItemsInserted, + "ITEMS": &result.ItemsInserted, + "Expansion rate": &result.ExpansionRate, + "EXPANSION": &result.ExpansionRate, + } + + // Helper function to read and assign a value based on the key + readAndAssignValue := func(key string) error { + fieldPtr, exists := respMapping[key] + if !exists { + return fmt.Errorf("redis: BLOOM.INFO unexpected key %s", key) + } + + // Read the integer and assign to the field via pointer dereferencing + val, err := rd.ReadInt() + if err != nil { + return err + } + *fieldPtr = val + return nil + } + + readType, err := rd.PeekReplyType() if err != nil { return err } - var key string - var result BFInfo - for f := 0; f < n; f++ { - key, err = rd.ReadString() + if len(cmd.args) > 2 && readType == proto.RespArray { + n, err := rd.ReadArrayLen() if err != nil { return err } - - switch key { - case "Capacity": - result.Capacity, err = rd.ReadInt() - case "Size": - result.Size, err = rd.ReadInt() - case "Number of filters": - result.Filters, err = rd.ReadInt() - case "Number of items inserted": - result.ItemsInserted, err = rd.ReadInt() - case "Expansion rate": - result.ExpansionRate, err = rd.ReadInt() - default: - return fmt.Errorf("redis: BLOOM.INFO unexpected key %s", key) + if key, ok := cmd.args[2].(string); ok && n == 1 { + if err := readAndAssignValue(key); err != nil { + return err + } + } else { + return fmt.Errorf("redis: BLOOM.INFO invalid argument key type") } - + } else { + n, err := rd.ReadMapLen() if err != nil { return err } + for i := 0; i < n; i++ { + key, err := rd.ReadString() + if err != nil { + return err + } + if err := readAndAssignValue(key); err != nil { + return err + } + } } cmd.val = result diff --git a/vendor/github.com/redis/go-redis/v9/pubsub.go b/vendor/github.com/redis/go-redis/v9/pubsub.go index 72b18f49a7..2a0e7a81e1 100644 --- a/vendor/github.com/redis/go-redis/v9/pubsub.go +++ b/vendor/github.com/redis/go-redis/v9/pubsub.go @@ -45,6 +45,9 @@ func (c *PubSub) init() { } func (c *PubSub) String() string { + c.mu.Lock() + defer c.mu.Unlock() + channels := mapKeys(c.channels) channels = append(channels, mapKeys(c.patterns)...) channels = append(channels, mapKeys(c.schannels)...) @@ -432,7 +435,7 @@ func (c *PubSub) ReceiveTimeout(ctx context.Context, timeout time.Duration) (int return nil, err } - err = cn.WithReader(context.Background(), timeout, func(rd *proto.Reader) error { + err = cn.WithReader(ctx, timeout, func(rd *proto.Reader) error { return c.cmd.readReply(rd) }) diff --git a/vendor/github.com/redis/go-redis/v9/redis.go b/vendor/github.com/redis/go-redis/v9/redis.go index 7304305154..bafe82f752 100644 --- a/vendor/github.com/redis/go-redis/v9/redis.go +++ b/vendor/github.com/redis/go-redis/v9/redis.go @@ -9,6 +9,7 @@ import ( "sync/atomic" "time" + "github.com/redis/go-redis/v9/auth" "github.com/redis/go-redis/v9/internal" "github.com/redis/go-redis/v9/internal/hscan" "github.com/redis/go-redis/v9/internal/pool" @@ -203,6 +204,7 @@ func (hs *hooksMixin) processTxPipelineHook(ctx context.Context, cmds []Cmder) e type baseClient struct { opt *Options connPool pool.Pooler + hooksMixin onClose func() error // hook called when client is closed } @@ -282,36 +284,107 @@ func (c *baseClient) _getConn(ctx context.Context) (*pool.Conn, error) { return cn, nil } +func (c *baseClient) newReAuthCredentialsListener(poolCn *pool.Conn) auth.CredentialsListener { + return auth.NewReAuthCredentialsListener( + c.reAuthConnection(poolCn), + c.onAuthenticationErr(poolCn), + ) +} + +func (c *baseClient) reAuthConnection(poolCn *pool.Conn) func(credentials auth.Credentials) error { + return func(credentials auth.Credentials) error { + var err error + username, password := credentials.BasicAuth() + ctx := context.Background() + connPool := pool.NewSingleConnPool(c.connPool, poolCn) + // hooksMixin are intentionally empty here + cn := newConn(c.opt, connPool, nil) + + if username != "" { + err = cn.AuthACL(ctx, username, password).Err() + } else { + err = cn.Auth(ctx, password).Err() + } + return err + } +} +func (c *baseClient) onAuthenticationErr(poolCn *pool.Conn) func(err error) { + return func(err error) { + if err != nil { + if isBadConn(err, false, c.opt.Addr) { + // Close the connection to force a reconnection. + err := c.connPool.CloseConn(poolCn) + if err != nil { + internal.Logger.Printf(context.Background(), "redis: failed to close connection: %v", err) + // try to close the network connection directly + // so that no resource is leaked + err := poolCn.Close() + if err != nil { + internal.Logger.Printf(context.Background(), "redis: failed to close network connection: %v", err) + } + } + } + internal.Logger.Printf(context.Background(), "redis: re-authentication failed: %v", err) + } + } +} + +func (c *baseClient) wrappedOnClose(newOnClose func() error) func() error { + onClose := c.onClose + return func() error { + var firstErr error + err := newOnClose() + // Even if we have an error we would like to execute the onClose hook + // if it exists. We will return the first error that occurred. + // This is to keep error handling consistent with the rest of the code. + if err != nil { + firstErr = err + } + if onClose != nil { + err = onClose() + if err != nil && firstErr == nil { + firstErr = err + } + } + return firstErr + } +} + func (c *baseClient) initConn(ctx context.Context, cn *pool.Conn) error { if cn.Inited { return nil } - cn.Inited = true var err error - username, password := c.opt.Username, c.opt.Password - if c.opt.CredentialsProviderContext != nil { - if username, password, err = c.opt.CredentialsProviderContext(ctx); err != nil { - return err + cn.Inited = true + connPool := pool.NewSingleConnPool(c.connPool, cn) + conn := newConn(c.opt, connPool, &c.hooksMixin) + + username, password := "", "" + if c.opt.StreamingCredentialsProvider != nil { + credentials, unsubscribeFromCredentialsProvider, err := c.opt.StreamingCredentialsProvider. + Subscribe(c.newReAuthCredentialsListener(cn)) + if err != nil { + return fmt.Errorf("failed to subscribe to streaming credentials: %w", err) + } + c.onClose = c.wrappedOnClose(unsubscribeFromCredentialsProvider) + cn.SetOnClose(unsubscribeFromCredentialsProvider) + username, password = credentials.BasicAuth() + } else if c.opt.CredentialsProviderContext != nil { + username, password, err = c.opt.CredentialsProviderContext(ctx) + if err != nil { + return fmt.Errorf("failed to get credentials from context provider: %w", err) } } else if c.opt.CredentialsProvider != nil { username, password = c.opt.CredentialsProvider() - } - - connPool := pool.NewSingleConnPool(c.connPool, cn) - conn := newConn(c.opt, connPool) - - var auth bool - protocol := c.opt.Protocol - // By default, use RESP3 in current version. - if protocol < 2 { - protocol = 3 + } else if c.opt.Username != "" || c.opt.Password != "" { + username, password = c.opt.Username, c.opt.Password } // for redis-server versions that do not support the HELLO command, // RESP2 will continue to be used. - if err = conn.Hello(ctx, protocol, username, password, "").Err(); err == nil { - auth = true + if err = conn.Hello(ctx, c.opt.Protocol, username, password, c.opt.ClientName).Err(); err == nil { + // Authentication successful with HELLO command } else if !isRedisError(err) { // When the server responds with the RESP protocol and the result is not a normal // execution result of the HELLO command, we consider it to be an indication that @@ -321,17 +394,19 @@ func (c *baseClient) initConn(ctx context.Context, cn *pool.Conn) error { // with different error string results for unsupported commands, making it // difficult to rely on error strings to determine all results. return err + } else if password != "" { + // Try legacy AUTH command if HELLO failed + if username != "" { + err = conn.AuthACL(ctx, username, password).Err() + } else { + err = conn.Auth(ctx, password).Err() + } + if err != nil { + return fmt.Errorf("failed to authenticate: %w", err) + } } _, err = conn.Pipelined(ctx, func(pipe Pipeliner) error { - if !auth && password != "" { - if username != "" { - pipe.AuthACL(ctx, username, password) - } else { - pipe.Auth(ctx, password) - } - } - if c.opt.DB > 0 { pipe.Select(ctx, c.opt.DB) } @@ -347,7 +422,7 @@ func (c *baseClient) initConn(ctx context.Context, cn *pool.Conn) error { return nil }) if err != nil { - return err + return fmt.Errorf("failed to initialize connection options: %w", err) } if !c.opt.DisableIdentity && !c.opt.DisableIndentity { @@ -369,6 +444,7 @@ func (c *baseClient) initConn(ctx context.Context, cn *pool.Conn) error { if c.opt.OnConnect != nil { return c.opt.OnConnect(ctx, conn) } + return nil } @@ -487,6 +563,16 @@ func (c *baseClient) cmdTimeout(cmd Cmder) time.Duration { return c.opt.ReadTimeout } +// context returns the context for the current connection. +// If the context timeout is enabled, it returns the original context. +// Otherwise, it returns a new background context. +func (c *baseClient) context(ctx context.Context) context.Context { + if c.opt.ContextTimeoutEnabled { + return ctx + } + return context.Background() +} + // Close closes the client, releasing any open resources. // // It is rare to Close a Client, as the Client is meant to be @@ -639,13 +725,6 @@ func txPipelineReadQueued(rd *proto.Reader, statusCmd *StatusCmd, cmds []Cmder) return nil } -func (c *baseClient) context(ctx context.Context) context.Context { - if c.opt.ContextTimeoutEnabled { - return ctx - } - return context.Background() -} - //------------------------------------------------------------------------------ // Client is a Redis client representing a pool of zero or more underlying connections. @@ -656,11 +735,13 @@ func (c *baseClient) context(ctx context.Context) context.Context { type Client struct { *baseClient cmdable - hooksMixin } // NewClient returns a client to the Redis Server specified by Options. func NewClient(opt *Options) *Client { + if opt == nil { + panic("redis: NewClient nil options") + } opt.init() c := Client{ @@ -692,7 +773,7 @@ func (c *Client) WithTimeout(timeout time.Duration) *Client { } func (c *Client) Conn() *Conn { - return newConn(c.opt, pool.NewStickyConnPool(c.connPool)) + return newConn(c.opt, pool.NewStickyConnPool(c.connPool), &c.hooksMixin) } // Do create a Cmd from the args and processes the cmd. @@ -825,10 +906,12 @@ type Conn struct { baseClient cmdable statefulCmdable - hooksMixin } -func newConn(opt *Options, connPool pool.Pooler) *Conn { +// newConn is a helper func to create a new Conn instance. +// the Conn instance is not thread-safe and should not be shared between goroutines. +// the parentHooks will be cloned, no need to clone before passing it. +func newConn(opt *Options, connPool pool.Pooler, parentHooks *hooksMixin) *Conn { c := Conn{ baseClient: baseClient{ opt: opt, @@ -836,6 +919,10 @@ func newConn(opt *Options, connPool pool.Pooler) *Conn { }, } + if parentHooks != nil { + c.hooksMixin = parentHooks.clone() + } + c.cmdable = c.Process c.statefulCmdable = c.Process c.initHooks(hooks{ diff --git a/vendor/github.com/redis/go-redis/v9/result.go b/vendor/github.com/redis/go-redis/v9/result.go index cfd4cf92ed..3e0d0a1348 100644 --- a/vendor/github.com/redis/go-redis/v9/result.go +++ b/vendor/github.com/redis/go-redis/v9/result.go @@ -82,6 +82,14 @@ func NewBoolSliceResult(val []bool, err error) *BoolSliceCmd { return &cmd } +// NewFloatSliceResult returns a FloatSliceCmd initialised with val and err for testing. +func NewFloatSliceResult(val []float64, err error) *FloatSliceCmd { + var cmd FloatSliceCmd + cmd.val = val + cmd.SetErr(err) + return &cmd +} + // NewMapStringStringResult returns a MapStringStringCmd initialised with val and err for testing. func NewMapStringStringResult(val map[string]string, err error) *MapStringStringCmd { var cmd MapStringStringCmd diff --git a/vendor/github.com/redis/go-redis/v9/ring.go b/vendor/github.com/redis/go-redis/v9/ring.go index 990a4c8872..8a004b8c0e 100644 --- a/vendor/github.com/redis/go-redis/v9/ring.go +++ b/vendor/github.com/redis/go-redis/v9/ring.go @@ -128,9 +128,10 @@ func (opt *RingOptions) init() { opt.NewConsistentHash = newRendezvous } - if opt.MaxRetries == -1 { + switch opt.MaxRetries { + case -1: opt.MaxRetries = 0 - } else if opt.MaxRetries == 0 { + case 0: opt.MaxRetries = 3 } switch opt.MinRetryBackoff { @@ -348,16 +349,16 @@ func (c *ringSharding) newRingShards( return } +// Warning: External exposure of `c.shards.list` may cause data races. +// So keep internal or implement deep copy if exposed. func (c *ringSharding) List() []*ringShard { - var list []*ringShard - c.mu.RLock() - if !c.closed { - list = c.shards.list - } - c.mu.RUnlock() + defer c.mu.RUnlock() - return list + if c.closed { + return nil + } + return c.shards.list } func (c *ringSharding) Hash(key string) string { @@ -421,6 +422,7 @@ func (c *ringSharding) Heartbeat(ctx context.Context, frequency time.Duration) { case <-ticker.C: var rebalance bool + // note: `c.List()` return a shadow copy of `[]*ringShard`. for _, shard := range c.List() { err := shard.Client.Ping(ctx).Err() isUp := err == nil || err == pool.ErrPoolTimeout @@ -521,6 +523,9 @@ type Ring struct { } func NewRing(opt *RingOptions) *Ring { + if opt == nil { + panic("redis: NewRing nil options") + } opt.init() hbCtx, hbCancel := context.WithCancel(context.Background()) @@ -577,6 +582,7 @@ func (c *Ring) retryBackoff(attempt int) time.Duration { // PoolStats returns accumulated connection pool stats. func (c *Ring) PoolStats() *PoolStats { + // note: `c.List()` return a shadow copy of `[]*ringShard`. shards := c.sharding.List() var acc PoolStats for _, shard := range shards { @@ -646,6 +652,7 @@ func (c *Ring) ForEachShard( ctx context.Context, fn func(ctx context.Context, client *Client) error, ) error { + // note: `c.List()` return a shadow copy of `[]*ringShard`. shards := c.sharding.List() var wg sync.WaitGroup errCh := make(chan error, 1) @@ -677,6 +684,7 @@ func (c *Ring) ForEachShard( } func (c *Ring) cmdsInfo(ctx context.Context) (map[string]*CommandInfo, error) { + // note: `c.List()` return a shadow copy of `[]*ringShard`. shards := c.sharding.List() var firstErr error for _, shard := range shards { @@ -694,7 +702,7 @@ func (c *Ring) cmdsInfo(ctx context.Context) (map[string]*CommandInfo, error) { return nil, firstErr } -func (c *Ring) cmdShard(ctx context.Context, cmd Cmder) (*ringShard, error) { +func (c *Ring) cmdShard(cmd Cmder) (*ringShard, error) { pos := cmdFirstKeyPos(cmd) if pos == 0 { return c.sharding.Random() @@ -712,7 +720,7 @@ func (c *Ring) process(ctx context.Context, cmd Cmder) error { } } - shard, err := c.cmdShard(ctx, cmd) + shard, err := c.cmdShard(cmd) if err != nil { return err } @@ -805,7 +813,7 @@ func (c *Ring) Watch(ctx context.Context, fn func(*Tx) error, keys ...string) er for _, key := range keys { if key != "" { - shard, err := c.sharding.GetByKey(hashtag.Key(key)) + shard, err := c.sharding.GetByKey(key) if err != nil { return err } @@ -839,3 +847,26 @@ func (c *Ring) Close() error { return c.sharding.Close() } + +// GetShardClients returns a list of all shard clients in the ring. +// This can be used to create dedicated connections (e.g., PubSub) for each shard. +func (c *Ring) GetShardClients() []*Client { + shards := c.sharding.List() + clients := make([]*Client, 0, len(shards)) + for _, shard := range shards { + if shard.IsUp() { + clients = append(clients, shard.Client) + } + } + return clients +} + +// GetShardClientForKey returns the shard client that would handle the given key. +// This can be used to determine which shard a particular key/channel would be routed to. +func (c *Ring) GetShardClientForKey(key string) (*Client, error) { + shard, err := c.sharding.GetByKey(key) + if err != nil { + return nil, err + } + return shard.Client, nil +} diff --git a/vendor/github.com/redis/go-redis/v9/search_commands.go b/vendor/github.com/redis/go-redis/v9/search_commands.go index 9359a723e9..b31baaa760 100644 --- a/vendor/github.com/redis/go-redis/v9/search_commands.go +++ b/vendor/github.com/redis/go-redis/v9/search_commands.go @@ -114,6 +114,7 @@ type SpellCheckTerms struct { } type FTExplainOptions struct { + // Dialect 1,3 and 4 are deprecated since redis 8.0 Dialect string } @@ -240,14 +241,19 @@ type FTAggregateWithCursor struct { } type FTAggregateOptions struct { - Verbatim bool - LoadAll bool - Load []FTAggregateLoad - Timeout int - GroupBy []FTAggregateGroupBy - SortBy []FTAggregateSortBy - SortByMax int - Scorer string + Verbatim bool + LoadAll bool + Load []FTAggregateLoad + Timeout int + GroupBy []FTAggregateGroupBy + SortBy []FTAggregateSortBy + SortByMax int + // Scorer is used to set scoring function, if not set passed, a default will be used. + // The default scorer depends on the Redis version: + // - `BM25` for Redis >= 8 + // - `TFIDF` for Redis < 8 + Scorer string + // AddScores is available in Redis CE 8 AddScores bool Apply []FTAggregateApply LimitOffset int @@ -256,7 +262,8 @@ type FTAggregateOptions struct { WithCursor bool WithCursorOptions *FTAggregateWithCursor Params map[string]interface{} - DialectVersion int + // Dialect 1,3 and 4 are deprecated since redis 8.0 + DialectVersion int } type FTSearchFilter struct { @@ -284,23 +291,30 @@ type FTSearchSortBy struct { Desc bool } +// FTSearchOptions hold options that can be passed to the FT.SEARCH command. +// More information about the options can be found +// in the documentation for FT.SEARCH https://redis.io/docs/latest/commands/ft.search/ type FTSearchOptions struct { - NoContent bool - Verbatim bool - NoStopWords bool - WithScores bool - WithPayloads bool - WithSortKeys bool - Filters []FTSearchFilter - GeoFilter []FTSearchGeoFilter - InKeys []interface{} - InFields []interface{} - Return []FTSearchReturn - Slop int - Timeout int - InOrder bool - Language string - Expander string + NoContent bool + Verbatim bool + NoStopWords bool + WithScores bool + WithPayloads bool + WithSortKeys bool + Filters []FTSearchFilter + GeoFilter []FTSearchGeoFilter + InKeys []interface{} + InFields []interface{} + Return []FTSearchReturn + Slop int + Timeout int + InOrder bool + Language string + Expander string + // Scorer is used to set scoring function, if not set passed, a default will be used. + // The default scorer depends on the Redis version: + // - `BM25` for Redis >= 8 + // - `TFIDF` for Redis < 8 Scorer string ExplainScore bool Payload string @@ -308,8 +322,12 @@ type FTSearchOptions struct { SortByWithCount bool LimitOffset int Limit int - Params map[string]interface{} - DialectVersion int + // CountOnly sets LIMIT 0 0 to get the count - number of documents in the result set without actually returning the result set. + // When using this option, the Limit and LimitOffset options are ignored. + CountOnly bool + Params map[string]interface{} + // Dialect 1,3 and 4 are deprecated since redis 8.0 + DialectVersion int } type FTSynDumpResult struct { @@ -425,7 +443,8 @@ type IndexDefinition struct { type FTSpellCheckOptions struct { Distance int Terms *FTSpellCheckTerms - Dialect int + // Dialect 1,3 and 4 are deprecated since redis 8.0 + Dialect int } type FTSpellCheckTerms struct { @@ -592,6 +611,8 @@ func FTAggregateQuery(query string, options *FTAggregateOptions) AggregateQuery if options.DialectVersion > 0 { queryArgs = append(queryArgs, "DIALECT", options.DialectVersion) + } else { + queryArgs = append(queryArgs, "DIALECT", 2) } } return queryArgs @@ -789,6 +810,8 @@ func (c cmdable) FTAggregateWithArgs(ctx context.Context, index string, query st } if options.DialectVersion > 0 { args = append(args, "DIALECT", options.DialectVersion) + } else { + args = append(args, "DIALECT", 2) } } @@ -846,20 +869,32 @@ func (c cmdable) FTAlter(ctx context.Context, index string, skipInitialScan bool return cmd } -// FTConfigGet - Retrieves the value of a RediSearch configuration parameter. +// Retrieves the value of a RediSearch configuration parameter. // The 'option' parameter specifies the configuration parameter to retrieve. -// For more information, please refer to the Redis documentation: -// [FT.CONFIG GET]: (https://redis.io/commands/ft.config-get/) +// For more information, please refer to the Redis [FT.CONFIG GET] documentation. +// +// Deprecated: FTConfigGet is deprecated in Redis 8. +// All configuration will be done with the CONFIG GET command. +// For more information check [Client.ConfigGet] and [CONFIG GET Documentation] +// +// [CONFIG GET Documentation]: https://redis.io/commands/config-get/ +// [FT.CONFIG GET]: https://redis.io/commands/ft.config-get/ func (c cmdable) FTConfigGet(ctx context.Context, option string) *MapMapStringInterfaceCmd { cmd := NewMapMapStringInterfaceCmd(ctx, "FT.CONFIG", "GET", option) _ = c(ctx, cmd) return cmd } -// FTConfigSet - Sets the value of a RediSearch configuration parameter. +// Sets the value of a RediSearch configuration parameter. // The 'option' parameter specifies the configuration parameter to set, and the 'value' parameter specifies the new value. -// For more information, please refer to the Redis documentation: -// [FT.CONFIG SET]: (https://redis.io/commands/ft.config-set/) +// For more information, please refer to the Redis [FT.CONFIG SET] documentation. +// +// Deprecated: FTConfigSet is deprecated in Redis 8. +// All configuration will be done with the CONFIG SET command. +// For more information check [Client.ConfigSet] and [CONFIG SET Documentation] +// +// [CONFIG SET Documentation]: https://redis.io/commands/config-set/ +// [FT.CONFIG SET]: https://redis.io/commands/ft.config-set/ func (c cmdable) FTConfigSet(ctx context.Context, option string, value interface{}) *StatusCmd { cmd := NewStatusCmd(ctx, "FT.CONFIG", "SET", option, value) _ = c(ctx, cmd) @@ -1150,6 +1185,8 @@ func (c cmdable) FTExplainWithArgs(ctx context.Context, index string, query stri args := []interface{}{"FT.EXPLAIN", index, query} if options.Dialect != "" { args = append(args, "DIALECT", options.Dialect) + } else { + args = append(args, "DIALECT", 2) } cmd := NewStringCmd(ctx, args...) _ = c(ctx, cmd) @@ -1447,6 +1484,8 @@ func (c cmdable) FTSpellCheckWithArgs(ctx context.Context, index string, query s } if options.Dialect > 0 { args = append(args, "DIALECT", options.Dialect) + } else { + args = append(args, "DIALECT", 2) } } cmd := newFTSpellCheckCmd(ctx, args...) @@ -1816,6 +1855,8 @@ func FTSearchQuery(query string, options *FTSearchOptions) SearchQuery { } if options.DialectVersion > 0 { queryArgs = append(queryArgs, "DIALECT", options.DialectVersion) + } else { + queryArgs = append(queryArgs, "DIALECT", 2) } } return queryArgs @@ -1920,8 +1961,12 @@ func (c cmdable) FTSearchWithArgs(ctx context.Context, index string, query strin args = append(args, "WITHCOUNT") } } - if options.LimitOffset >= 0 && options.Limit > 0 { - args = append(args, "LIMIT", options.LimitOffset, options.Limit) + if options.CountOnly { + args = append(args, "LIMIT", 0, 0) + } else { + if options.LimitOffset >= 0 && options.Limit > 0 || options.LimitOffset > 0 && options.Limit == 0 { + args = append(args, "LIMIT", options.LimitOffset, options.Limit) + } } if options.Params != nil { args = append(args, "PARAMS", len(options.Params)*2) @@ -1931,6 +1976,8 @@ func (c cmdable) FTSearchWithArgs(ctx context.Context, index string, query strin } if options.DialectVersion > 0 { args = append(args, "DIALECT", options.DialectVersion) + } else { + args = append(args, "DIALECT", 2) } } cmd := newFTSearchCmd(ctx, options, args...) @@ -2054,215 +2101,3 @@ func (c cmdable) FTTagVals(ctx context.Context, index string, field string) *Str _ = c(ctx, cmd) return cmd } - -// type FTProfileResult struct { -// Results []interface{} -// Profile ProfileDetails -// } - -// type ProfileDetails struct { -// TotalProfileTime string -// ParsingTime string -// PipelineCreationTime string -// Warning string -// IteratorsProfile []IteratorProfile -// ResultProcessorsProfile []ResultProcessorProfile -// } - -// type IteratorProfile struct { -// Type string -// QueryType string -// Time interface{} -// Counter int -// Term string -// Size int -// ChildIterators []IteratorProfile -// } - -// type ResultProcessorProfile struct { -// Type string -// Time interface{} -// Counter int -// } - -// func parseFTProfileResult(data []interface{}) (FTProfileResult, error) { -// var result FTProfileResult -// if len(data) < 2 { -// return result, fmt.Errorf("unexpected data length") -// } - -// // Parse results -// result.Results = data[0].([]interface{}) - -// // Parse profile details -// profileData := data[1].([]interface{}) -// profileDetails := ProfileDetails{} -// for i := 0; i < len(profileData); i += 2 { -// switch profileData[i].(string) { -// case "Total profile time": -// profileDetails.TotalProfileTime = profileData[i+1].(string) -// case "Parsing time": -// profileDetails.ParsingTime = profileData[i+1].(string) -// case "Pipeline creation time": -// profileDetails.PipelineCreationTime = profileData[i+1].(string) -// case "Warning": -// profileDetails.Warning = profileData[i+1].(string) -// case "Iterators profile": -// profileDetails.IteratorsProfile = parseIteratorsProfile(profileData[i+1].([]interface{})) -// case "Result processors profile": -// profileDetails.ResultProcessorsProfile = parseResultProcessorsProfile(profileData[i+1].([]interface{})) -// } -// } - -// result.Profile = profileDetails -// return result, nil -// } - -// func parseIteratorsProfile(data []interface{}) []IteratorProfile { -// var iterators []IteratorProfile -// for _, item := range data { -// profile := item.([]interface{}) -// iterator := IteratorProfile{} -// for i := 0; i < len(profile); i += 2 { -// switch profile[i].(string) { -// case "Type": -// iterator.Type = profile[i+1].(string) -// case "Query type": -// iterator.QueryType = profile[i+1].(string) -// case "Time": -// iterator.Time = profile[i+1] -// case "Counter": -// iterator.Counter = int(profile[i+1].(int64)) -// case "Term": -// iterator.Term = profile[i+1].(string) -// case "Size": -// iterator.Size = int(profile[i+1].(int64)) -// case "Child iterators": -// iterator.ChildIterators = parseChildIteratorsProfile(profile[i+1].([]interface{})) -// } -// } -// iterators = append(iterators, iterator) -// } -// return iterators -// } - -// func parseChildIteratorsProfile(data []interface{}) []IteratorProfile { -// var iterators []IteratorProfile -// for _, item := range data { -// profile := item.([]interface{}) -// iterator := IteratorProfile{} -// for i := 0; i < len(profile); i += 2 { -// switch profile[i].(string) { -// case "Type": -// iterator.Type = profile[i+1].(string) -// case "Query type": -// iterator.QueryType = profile[i+1].(string) -// case "Time": -// iterator.Time = profile[i+1] -// case "Counter": -// iterator.Counter = int(profile[i+1].(int64)) -// case "Term": -// iterator.Term = profile[i+1].(string) -// case "Size": -// iterator.Size = int(profile[i+1].(int64)) -// } -// } -// iterators = append(iterators, iterator) -// } -// return iterators -// } - -// func parseResultProcessorsProfile(data []interface{}) []ResultProcessorProfile { -// var processors []ResultProcessorProfile -// for _, item := range data { -// profile := item.([]interface{}) -// processor := ResultProcessorProfile{} -// for i := 0; i < len(profile); i += 2 { -// switch profile[i].(string) { -// case "Type": -// processor.Type = profile[i+1].(string) -// case "Time": -// processor.Time = profile[i+1] -// case "Counter": -// processor.Counter = int(profile[i+1].(int64)) -// } -// } -// processors = append(processors, processor) -// } -// return processors -// } - -// func NewFTProfileCmd(ctx context.Context, args ...interface{}) *FTProfileCmd { -// return &FTProfileCmd{ -// baseCmd: baseCmd{ -// ctx: ctx, -// args: args, -// }, -// } -// } - -// type FTProfileCmd struct { -// baseCmd -// val FTProfileResult -// } - -// func (cmd *FTProfileCmd) String() string { -// return cmdString(cmd, cmd.val) -// } - -// func (cmd *FTProfileCmd) SetVal(val FTProfileResult) { -// cmd.val = val -// } - -// func (cmd *FTProfileCmd) Result() (FTProfileResult, error) { -// return cmd.val, cmd.err -// } - -// func (cmd *FTProfileCmd) Val() FTProfileResult { -// return cmd.val -// } - -// func (cmd *FTProfileCmd) readReply(rd *proto.Reader) (err error) { -// data, err := rd.ReadSlice() -// if err != nil { -// return err -// } -// cmd.val, err = parseFTProfileResult(data) -// if err != nil { -// cmd.err = err -// } -// return nil -// } - -// // FTProfile - Executes a search query and returns a profile of how the query was processed. -// // The 'index' parameter specifies the index to search, the 'limited' parameter specifies whether to limit the results, -// // and the 'query' parameter specifies the search / aggreagte query. Please notice that you must either pass a SearchQuery or an AggregateQuery. -// // For more information, please refer to the Redis documentation: -// // [FT.PROFILE]: (https://redis.io/commands/ft.profile/) -// func (c cmdable) FTProfile(ctx context.Context, index string, limited bool, query interface{}) *FTProfileCmd { -// queryType := "" -// var argsQuery []interface{} - -// switch v := query.(type) { -// case AggregateQuery: -// queryType = "AGGREGATE" -// argsQuery = v -// case SearchQuery: -// queryType = "SEARCH" -// argsQuery = v -// default: -// panic("FT.PROFILE: query must be either AggregateQuery or SearchQuery") -// } - -// args := []interface{}{"FT.PROFILE", index, queryType} - -// if limited { -// args = append(args, "LIMITED") -// } -// args = append(args, "QUERY") -// args = append(args, argsQuery...) - -// cmd := NewFTProfileCmd(ctx, args...) -// _ = c(ctx, cmd) -// return cmd -// } diff --git a/vendor/github.com/redis/go-redis/v9/sentinel.go b/vendor/github.com/redis/go-redis/v9/sentinel.go index a4c9f53c40..43fbcd2443 100644 --- a/vendor/github.com/redis/go-redis/v9/sentinel.go +++ b/vendor/github.com/redis/go-redis/v9/sentinel.go @@ -4,7 +4,10 @@ import ( "context" "crypto/tls" "errors" + "fmt" "net" + "net/url" + "strconv" "strings" "sync" "time" @@ -219,10 +222,154 @@ func (opt *FailoverOptions) clusterOptions() *ClusterOptions { } } +// ParseFailoverURL parses a URL into FailoverOptions that can be used to connect to Redis. +// The URL must be in the form: +// +// redis://:@:/ +// or +// rediss://:@:/ +// +// To add additional addresses, specify the query parameter, "addr" one or more times. e.g: +// +// redis://:@:/?addr=:&addr=: +// or +// rediss://:@:/?addr=:&addr=: +// +// Most Option fields can be set using query parameters, with the following restrictions: +// - field names are mapped using snake-case conversion: to set MaxRetries, use max_retries +// - only scalar type fields are supported (bool, int, time.Duration) +// - for time.Duration fields, values must be a valid input for time.ParseDuration(); +// additionally a plain integer as value (i.e. without unit) is interpreted as seconds +// - to disable a duration field, use value less than or equal to 0; to use the default +// value, leave the value blank or remove the parameter +// - only the last value is interpreted if a parameter is given multiple times +// - fields "network", "addr", "sentinel_username" and "sentinel_password" can only be set using other +// URL attributes (scheme, host, userinfo, resp.), query parameters using these +// names will be treated as unknown parameters +// - unknown parameter names will result in an error +// +// Example: +// +// redis://user:password@localhost:6789?master_name=mymaster&dial_timeout=3&read_timeout=6s&addr=localhost:6790&addr=localhost:6791 +// is equivalent to: +// &FailoverOptions{ +// MasterName: "mymaster", +// Addr: ["localhost:6789", "localhost:6790", "localhost:6791"] +// DialTimeout: 3 * time.Second, // no time unit = seconds +// ReadTimeout: 6 * time.Second, +// } +func ParseFailoverURL(redisURL string) (*FailoverOptions, error) { + u, err := url.Parse(redisURL) + if err != nil { + return nil, err + } + return setupFailoverConn(u) +} + +func setupFailoverConn(u *url.URL) (*FailoverOptions, error) { + o := &FailoverOptions{} + + o.SentinelUsername, o.SentinelPassword = getUserPassword(u) + + h, p := getHostPortWithDefaults(u) + o.SentinelAddrs = append(o.SentinelAddrs, net.JoinHostPort(h, p)) + + switch u.Scheme { + case "rediss": + o.TLSConfig = &tls.Config{ServerName: h, MinVersion: tls.VersionTLS12} + case "redis": + o.TLSConfig = nil + default: + return nil, fmt.Errorf("redis: invalid URL scheme: %s", u.Scheme) + } + + f := strings.FieldsFunc(u.Path, func(r rune) bool { + return r == '/' + }) + switch len(f) { + case 0: + o.DB = 0 + case 1: + var err error + if o.DB, err = strconv.Atoi(f[0]); err != nil { + return nil, fmt.Errorf("redis: invalid database number: %q", f[0]) + } + default: + return nil, fmt.Errorf("redis: invalid URL path: %s", u.Path) + } + + return setupFailoverConnParams(u, o) +} + +func setupFailoverConnParams(u *url.URL, o *FailoverOptions) (*FailoverOptions, error) { + q := queryOptions{q: u.Query()} + + o.MasterName = q.string("master_name") + o.ClientName = q.string("client_name") + o.RouteByLatency = q.bool("route_by_latency") + o.RouteRandomly = q.bool("route_randomly") + o.ReplicaOnly = q.bool("replica_only") + o.UseDisconnectedReplicas = q.bool("use_disconnected_replicas") + o.Protocol = q.int("protocol") + o.Username = q.string("username") + o.Password = q.string("password") + o.MaxRetries = q.int("max_retries") + o.MinRetryBackoff = q.duration("min_retry_backoff") + o.MaxRetryBackoff = q.duration("max_retry_backoff") + o.DialTimeout = q.duration("dial_timeout") + o.ReadTimeout = q.duration("read_timeout") + o.WriteTimeout = q.duration("write_timeout") + o.ContextTimeoutEnabled = q.bool("context_timeout_enabled") + o.PoolFIFO = q.bool("pool_fifo") + o.PoolSize = q.int("pool_size") + o.MinIdleConns = q.int("min_idle_conns") + o.MaxIdleConns = q.int("max_idle_conns") + o.MaxActiveConns = q.int("max_active_conns") + o.ConnMaxLifetime = q.duration("conn_max_lifetime") + o.ConnMaxIdleTime = q.duration("conn_max_idle_time") + o.PoolTimeout = q.duration("pool_timeout") + o.DisableIdentity = q.bool("disableIdentity") + o.IdentitySuffix = q.string("identitySuffix") + o.UnstableResp3 = q.bool("unstable_resp3") + + if q.err != nil { + return nil, q.err + } + + if tmp := q.string("db"); tmp != "" { + db, err := strconv.Atoi(tmp) + if err != nil { + return nil, fmt.Errorf("redis: invalid database number: %w", err) + } + o.DB = db + } + + addrs := q.strings("addr") + for _, addr := range addrs { + h, p, err := net.SplitHostPort(addr) + if err != nil || h == "" || p == "" { + return nil, fmt.Errorf("redis: unable to parse addr param: %s", addr) + } + + o.SentinelAddrs = append(o.SentinelAddrs, net.JoinHostPort(h, p)) + } + + // any parameters left? + if r := q.remaining(); len(r) > 0 { + return nil, fmt.Errorf("redis: unexpected option: %s", strings.Join(r, ", ")) + } + + return o, nil +} + // NewFailoverClient returns a Redis client that uses Redis Sentinel // for automatic failover. It's safe for concurrent use by multiple // goroutines. func NewFailoverClient(failoverOpt *FailoverOptions) *Client { + if failoverOpt == nil { + panic("redis: NewFailoverClient nil options") + } + if failoverOpt.RouteByLatency { panic("to route commands by latency, use NewFailoverClusterClient") } @@ -257,7 +404,7 @@ func NewFailoverClient(failoverOpt *FailoverOptions) *Client { connPool = newConnPool(opt, rdb.dialHook) rdb.connPool = connPool - rdb.onClose = failover.Close + rdb.onClose = rdb.wrappedOnClose(failover.Close) failover.mu.Lock() failover.onFailover = func(ctx context.Context, addr string) { @@ -308,10 +455,12 @@ func masterReplicaDialer( // SentinelClient is a client for a Redis Sentinel. type SentinelClient struct { *baseClient - hooksMixin } func NewSentinelClient(opt *Options) *SentinelClient { + if opt == nil { + panic("redis: NewSentinelClient nil options") + } opt.init() c := &SentinelClient{ baseClient: &baseClient{ @@ -566,29 +715,50 @@ func (c *sentinelFailover) MasterAddr(ctx context.Context) (string, error) { } } - for i, sentinelAddr := range c.sentinelAddrs { - sentinel := NewSentinelClient(c.opt.sentinelOptions(sentinelAddr)) - - masterAddr, err := sentinel.GetMasterAddrByName(ctx, c.opt.MasterName).Result() - if err != nil { - _ = sentinel.Close() - if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { - return "", err - } - internal.Logger.Printf(ctx, "sentinel: GetMasterAddrByName master=%q failed: %s", - c.opt.MasterName, err) - continue - } + var ( + masterAddr string + wg sync.WaitGroup + once sync.Once + errCh = make(chan error, len(c.sentinelAddrs)) + ) - // Push working sentinel to the top. - c.sentinelAddrs[0], c.sentinelAddrs[i] = c.sentinelAddrs[i], c.sentinelAddrs[0] - c.setSentinel(ctx, sentinel) + ctx, cancel := context.WithCancel(ctx) + defer cancel() - addr := net.JoinHostPort(masterAddr[0], masterAddr[1]) - return addr, nil + for i, sentinelAddr := range c.sentinelAddrs { + wg.Add(1) + go func(i int, addr string) { + defer wg.Done() + sentinelCli := NewSentinelClient(c.opt.sentinelOptions(addr)) + addrVal, err := sentinelCli.GetMasterAddrByName(ctx, c.opt.MasterName).Result() + if err != nil { + internal.Logger.Printf(ctx, "sentinel: GetMasterAddrByName addr=%s, master=%q failed: %s", + addr, c.opt.MasterName, err) + _ = sentinelCli.Close() + errCh <- err + return + } + once.Do(func() { + masterAddr = net.JoinHostPort(addrVal[0], addrVal[1]) + // Push working sentinel to the top + c.sentinelAddrs[0], c.sentinelAddrs[i] = c.sentinelAddrs[i], c.sentinelAddrs[0] + c.setSentinel(ctx, sentinelCli) + internal.Logger.Printf(ctx, "sentinel: selected addr=%s masterAddr=%s", addr, masterAddr) + cancel() + }) + }(i, sentinelAddr) } - return "", errors.New("redis: all sentinels specified in configuration are unreachable") + wg.Wait() + close(errCh) + if masterAddr != "" { + return masterAddr, nil + } + errs := make([]error, 0, len(errCh)) + for err := range errCh { + errs = append(errs, err) + } + return "", fmt.Errorf("redis: all sentinels specified in configuration are unreachable: %w", errors.Join(errs...)) } func (c *sentinelFailover) replicaAddrs(ctx context.Context, useDisconnected bool) ([]string, error) { @@ -806,6 +976,10 @@ func contains(slice []string, str string) bool { // NewFailoverClusterClient returns a client that supports routing read-only commands // to a replica node. func NewFailoverClusterClient(failoverOpt *FailoverOptions) *ClusterClient { + if failoverOpt == nil { + panic("redis: NewFailoverClusterClient nil options") + } + sentinelAddrs := make([]string, len(failoverOpt.SentinelAddrs)) copy(sentinelAddrs, failoverOpt.SentinelAddrs) @@ -815,6 +989,22 @@ func NewFailoverClusterClient(failoverOpt *FailoverOptions) *ClusterClient { } opt := failoverOpt.clusterOptions() + if failoverOpt.DB != 0 { + onConnect := opt.OnConnect + + opt.OnConnect = func(ctx context.Context, cn *Conn) error { + if err := cn.Select(ctx, failoverOpt.DB).Err(); err != nil { + return err + } + + if onConnect != nil { + return onConnect(ctx, cn) + } + + return nil + } + } + opt.ClusterSlots = func(ctx context.Context) ([]ClusterSlot, error) { masterAddr, err := failover.MasterAddr(ctx) if err != nil { diff --git a/vendor/github.com/redis/go-redis/v9/tx.go b/vendor/github.com/redis/go-redis/v9/tx.go index 039eaf3516..0daa222e35 100644 --- a/vendor/github.com/redis/go-redis/v9/tx.go +++ b/vendor/github.com/redis/go-redis/v9/tx.go @@ -19,16 +19,15 @@ type Tx struct { baseClient cmdable statefulCmdable - hooksMixin } func (c *Client) newTx() *Tx { tx := Tx{ baseClient: baseClient{ - opt: c.opt, - connPool: pool.NewStickyConnPool(c.connPool), + opt: c.opt, + connPool: pool.NewStickyConnPool(c.connPool), + hooksMixin: c.hooksMixin.clone(), }, - hooksMixin: c.hooksMixin.clone(), } tx.init() return &tx diff --git a/vendor/github.com/redis/go-redis/v9/universal.go b/vendor/github.com/redis/go-redis/v9/universal.go index 483c81127a..a1ce17bac3 100644 --- a/vendor/github.com/redis/go-redis/v9/universal.go +++ b/vendor/github.com/redis/go-redis/v9/universal.go @@ -80,6 +80,8 @@ type UniversalOptions struct { IdentitySuffix string UnstableResp3 bool + // IsClusterMode can be used when only one Addrs is provided (e.g. Elasticache supports setting up cluster mode with configuration endpoint). + IsClusterMode bool } // Cluster returns cluster options created from the universal options. @@ -152,6 +154,9 @@ func (o *UniversalOptions) Failover() *FailoverOptions { SentinelUsername: o.SentinelUsername, SentinelPassword: o.SentinelPassword, + RouteByLatency: o.RouteByLatency, + RouteRandomly: o.RouteRandomly, + MaxRetries: o.MaxRetries, MinRetryBackoff: o.MinRetryBackoff, MaxRetryBackoff: o.MaxRetryBackoff, @@ -172,6 +177,8 @@ func (o *UniversalOptions) Failover() *FailoverOptions { TLSConfig: o.TLSConfig, + ReplicaOnly: o.ReadOnly, + DisableIdentity: o.DisableIdentity, DisableIndentity: o.DisableIndentity, IdentitySuffix: o.IdentitySuffix, @@ -252,14 +259,26 @@ var ( // NewUniversalClient returns a new multi client. The type of the returned client depends // on the following conditions: // -// 1. If the MasterName option is specified, a sentinel-backed FailoverClient is returned. -// 2. if the number of Addrs is two or more, a ClusterClient is returned. -// 3. Otherwise, a single-node Client is returned. +// 1. If the MasterName option is specified with RouteByLatency, RouteRandomly or IsClusterMode, +// a FailoverClusterClient is returned. +// 2. If the MasterName option is specified without RouteByLatency, RouteRandomly or IsClusterMode, +// a sentinel-backed FailoverClient is returned. +// 3. If the number of Addrs is two or more, or IsClusterMode option is specified, +// a ClusterClient is returned. +// 4. Otherwise, a single-node Client is returned. func NewUniversalClient(opts *UniversalOptions) UniversalClient { - if opts.MasterName != "" { + if opts == nil { + panic("redis: NewUniversalClient nil options") + } + + switch { + case opts.MasterName != "" && (opts.RouteByLatency || opts.RouteRandomly || opts.IsClusterMode): + return NewFailoverClusterClient(opts.Failover()) + case opts.MasterName != "": return NewFailoverClient(opts.Failover()) - } else if len(opts.Addrs) > 1 { + case len(opts.Addrs) > 1 || opts.IsClusterMode: return NewClusterClient(opts.Cluster()) + default: + return NewClient(opts.Simple()) } - return NewClient(opts.Simple()) } diff --git a/vendor/github.com/redis/go-redis/v9/vectorset_commands.go b/vendor/github.com/redis/go-redis/v9/vectorset_commands.go new file mode 100644 index 0000000000..2bd9e22166 --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/vectorset_commands.go @@ -0,0 +1,348 @@ +package redis + +import ( + "context" + "encoding/json" + "strconv" +) + +// note: the APIs is experimental and may be subject to change. +type VectorSetCmdable interface { + VAdd(ctx context.Context, key, element string, val Vector) *BoolCmd + VAddWithArgs(ctx context.Context, key, element string, val Vector, addArgs *VAddArgs) *BoolCmd + VCard(ctx context.Context, key string) *IntCmd + VDim(ctx context.Context, key string) *IntCmd + VEmb(ctx context.Context, key, element string, raw bool) *SliceCmd + VGetAttr(ctx context.Context, key, element string) *StringCmd + VInfo(ctx context.Context, key string) *MapStringInterfaceCmd + VLinks(ctx context.Context, key, element string) *StringSliceCmd + VLinksWithScores(ctx context.Context, key, element string) *VectorScoreSliceCmd + VRandMember(ctx context.Context, key string) *StringCmd + VRandMemberCount(ctx context.Context, key string, count int) *StringSliceCmd + VRem(ctx context.Context, key, element string) *BoolCmd + VSetAttr(ctx context.Context, key, element string, attr interface{}) *BoolCmd + VClearAttributes(ctx context.Context, key, element string) *BoolCmd + VSim(ctx context.Context, key string, val Vector) *StringSliceCmd + VSimWithScores(ctx context.Context, key string, val Vector) *VectorScoreSliceCmd + VSimWithArgs(ctx context.Context, key string, val Vector, args *VSimArgs) *StringSliceCmd + VSimWithArgsWithScores(ctx context.Context, key string, val Vector, args *VSimArgs) *VectorScoreSliceCmd +} + +type Vector interface { + Value() []any +} + +const ( + vectorFormatFP32 string = "FP32" + vectorFormatValues string = "Values" +) + +type VectorFP32 struct { + Val []byte +} + +func (v *VectorFP32) Value() []any { + return []any{vectorFormatFP32, v.Val} +} + +var _ Vector = (*VectorFP32)(nil) + +type VectorValues struct { + Val []float64 +} + +func (v *VectorValues) Value() []any { + res := make([]any, 2+len(v.Val)) + res[0] = vectorFormatValues + res[1] = len(v.Val) + for i, v := range v.Val { + res[2+i] = v + } + return res +} + +var _ Vector = (*VectorValues)(nil) + +type VectorRef struct { + Name string // the name of the referent vector +} + +func (v *VectorRef) Value() []any { + return []any{"ele", v.Name} +} + +var _ Vector = (*VectorRef)(nil) + +type VectorScore struct { + Name string + Score float64 +} + +// `VADD key (FP32 | VALUES num) vector element` +// note: the API is experimental and may be subject to change. +func (c cmdable) VAdd(ctx context.Context, key, element string, val Vector) *BoolCmd { + return c.VAddWithArgs(ctx, key, element, val, &VAddArgs{}) +} + +type VAddArgs struct { + // the REDUCE option must be passed immediately after the key + Reduce int64 + Cas bool + + // The NoQuant, Q8 and Bin options are mutually exclusive. + NoQuant bool + Q8 bool + Bin bool + + EF int64 + SetAttr string + M int64 +} + +func (v VAddArgs) reduce() int64 { + return v.Reduce +} + +func (v VAddArgs) appendArgs(args []any) []any { + if v.Cas { + args = append(args, "cas") + } + + if v.NoQuant { + args = append(args, "noquant") + } else if v.Q8 { + args = append(args, "q8") + } else if v.Bin { + args = append(args, "bin") + } + + if v.EF > 0 { + args = append(args, "ef", strconv.FormatInt(v.EF, 10)) + } + if len(v.SetAttr) > 0 { + args = append(args, "setattr", v.SetAttr) + } + if v.M > 0 { + args = append(args, "m", strconv.FormatInt(v.M, 10)) + } + return args +} + +// `VADD key [REDUCE dim] (FP32 | VALUES num) vector element [CAS] [NOQUANT | Q8 | BIN] [EF build-exploration-factor] [SETATTR attributes] [M numlinks]` +// note: the API is experimental and may be subject to change. +func (c cmdable) VAddWithArgs(ctx context.Context, key, element string, val Vector, addArgs *VAddArgs) *BoolCmd { + if addArgs == nil { + addArgs = &VAddArgs{} + } + args := []any{"vadd", key} + if addArgs.reduce() > 0 { + args = append(args, "reduce", addArgs.reduce()) + } + args = append(args, val.Value()...) + args = append(args, element) + args = addArgs.appendArgs(args) + cmd := NewBoolCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// `VCARD key` +// note: the API is experimental and may be subject to change. +func (c cmdable) VCard(ctx context.Context, key string) *IntCmd { + cmd := NewIntCmd(ctx, "vcard", key) + _ = c(ctx, cmd) + return cmd +} + +// `VDIM key` +// note: the API is experimental and may be subject to change. +func (c cmdable) VDim(ctx context.Context, key string) *IntCmd { + cmd := NewIntCmd(ctx, "vdim", key) + _ = c(ctx, cmd) + return cmd +} + +// `VEMB key element [RAW]` +// note: the API is experimental and may be subject to change. +func (c cmdable) VEmb(ctx context.Context, key, element string, raw bool) *SliceCmd { + args := []any{"vemb", key, element} + if raw { + args = append(args, "raw") + } + cmd := NewSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// `VGETATTR key element` +// note: the API is experimental and may be subject to change. +func (c cmdable) VGetAttr(ctx context.Context, key, element string) *StringCmd { + cmd := NewStringCmd(ctx, "vgetattr", key, element) + _ = c(ctx, cmd) + return cmd +} + +// `VINFO key` +// note: the API is experimental and may be subject to change. +func (c cmdable) VInfo(ctx context.Context, key string) *MapStringInterfaceCmd { + cmd := NewMapStringInterfaceCmd(ctx, "vinfo", key) + _ = c(ctx, cmd) + return cmd +} + +// `VLINKS key element` +// note: the API is experimental and may be subject to change. +func (c cmdable) VLinks(ctx context.Context, key, element string) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, "vlinks", key, element) + _ = c(ctx, cmd) + return cmd +} + +// `VLINKS key element WITHSCORES` +// note: the API is experimental and may be subject to change. +func (c cmdable) VLinksWithScores(ctx context.Context, key, element string) *VectorScoreSliceCmd { + cmd := NewVectorInfoSliceCmd(ctx, "vlinks", key, element, "withscores") + _ = c(ctx, cmd) + return cmd +} + +// `VRANDMEMBER key` +// note: the API is experimental and may be subject to change. +func (c cmdable) VRandMember(ctx context.Context, key string) *StringCmd { + cmd := NewStringCmd(ctx, "vrandmember", key) + _ = c(ctx, cmd) + return cmd +} + +// `VRANDMEMBER key [count]` +// note: the API is experimental and may be subject to change. +func (c cmdable) VRandMemberCount(ctx context.Context, key string, count int) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, "vrandmember", key, count) + _ = c(ctx, cmd) + return cmd +} + +// `VREM key element` +// note: the API is experimental and may be subject to change. +func (c cmdable) VRem(ctx context.Context, key, element string) *BoolCmd { + cmd := NewBoolCmd(ctx, "vrem", key, element) + _ = c(ctx, cmd) + return cmd +} + +// `VSETATTR key element "{ JSON obj }"` +// The `attr` must be something that can be marshaled to JSON (using encoding/JSON) unless +// the argument is a string or []byte when we assume that it can be passed directly as JSON. +// +// note: the API is experimental and may be subject to change. +func (c cmdable) VSetAttr(ctx context.Context, key, element string, attr interface{}) *BoolCmd { + var attrStr string + var err error + switch v := attr.(type) { + case string: + attrStr = v + case []byte: + attrStr = string(v) + default: + var bytes []byte + bytes, err = json.Marshal(v) + if err != nil { + // If marshalling fails, create the command and set the error; this command won't be executed. + cmd := NewBoolCmd(ctx, "vsetattr", key, element, "") + cmd.SetErr(err) + return cmd + } + attrStr = string(bytes) + } + cmd := NewBoolCmd(ctx, "vsetattr", key, element, attrStr) + _ = c(ctx, cmd) + return cmd +} + +// `VClearAttributes` clear attributes on a vector set element. +// The implementation of `VClearAttributes` is execute command `VSETATTR key element ""`. +// note: the API is experimental and may be subject to change. +func (c cmdable) VClearAttributes(ctx context.Context, key, element string) *BoolCmd { + cmd := NewBoolCmd(ctx, "vsetattr", key, element, "") + _ = c(ctx, cmd) + return cmd +} + +// `VSIM key (ELE | FP32 | VALUES num) (vector | element)` +// note: the API is experimental and may be subject to change. +func (c cmdable) VSim(ctx context.Context, key string, val Vector) *StringSliceCmd { + return c.VSimWithArgs(ctx, key, val, &VSimArgs{}) +} + +// `VSIM key (ELE | FP32 | VALUES num) (vector | element) WITHSCORES` +// note: the API is experimental and may be subject to change. +func (c cmdable) VSimWithScores(ctx context.Context, key string, val Vector) *VectorScoreSliceCmd { + return c.VSimWithArgsWithScores(ctx, key, val, &VSimArgs{}) +} + +type VSimArgs struct { + Count int64 + EF int64 + Filter string + FilterEF int64 + Truth bool + NoThread bool + // The `VSim` command in Redis has the option, by the doc in Redis.io don't have. + // Epsilon float64 +} + +func (v VSimArgs) appendArgs(args []any) []any { + if v.Count > 0 { + args = append(args, "count", v.Count) + } + if v.EF > 0 { + args = append(args, "ef", v.EF) + } + if len(v.Filter) > 0 { + args = append(args, "filter", v.Filter) + } + if v.FilterEF > 0 { + args = append(args, "filter-ef", v.FilterEF) + } + if v.Truth { + args = append(args, "truth") + } + if v.NoThread { + args = append(args, "nothread") + } + // if v.Epsilon > 0 { + // args = append(args, "Epsilon", v.Epsilon) + // } + return args +} + +// `VSIM key (ELE | FP32 | VALUES num) (vector | element) [COUNT num] +// [EF search-exploration-factor] [FILTER expression] [FILTER-EF max-filtering-effort] [TRUTH] [NOTHREAD]` +// note: the API is experimental and may be subject to change. +func (c cmdable) VSimWithArgs(ctx context.Context, key string, val Vector, simArgs *VSimArgs) *StringSliceCmd { + if simArgs == nil { + simArgs = &VSimArgs{} + } + args := []any{"vsim", key} + args = append(args, val.Value()...) + args = simArgs.appendArgs(args) + cmd := NewStringSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// `VSIM key (ELE | FP32 | VALUES num) (vector | element) [WITHSCORES] [COUNT num] +// [EF search-exploration-factor] [FILTER expression] [FILTER-EF max-filtering-effort] [TRUTH] [NOTHREAD]` +// note: the API is experimental and may be subject to change. +func (c cmdable) VSimWithArgsWithScores(ctx context.Context, key string, val Vector, simArgs *VSimArgs) *VectorScoreSliceCmd { + if simArgs == nil { + simArgs = &VSimArgs{} + } + args := []any{"vsim", key} + args = append(args, val.Value()...) + args = append(args, "withscores") + args = simArgs.appendArgs(args) + cmd := NewVectorInfoSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} diff --git a/vendor/github.com/redis/go-redis/v9/version.go b/vendor/github.com/redis/go-redis/v9/version.go index a4832fc1e0..cbed8bd8d2 100644 --- a/vendor/github.com/redis/go-redis/v9/version.go +++ b/vendor/github.com/redis/go-redis/v9/version.go @@ -2,5 +2,5 @@ package redis // Version is the current release version. func Version() string { - return "9.7.3" + return "9.10.0" } diff --git a/vendor/github.com/rubenv/sql-migrate/.golangci.yaml b/vendor/github.com/rubenv/sql-migrate/.golangci.yaml index f0a970753e..f581841653 100644 --- a/vendor/github.com/rubenv/sql-migrate/.golangci.yaml +++ b/vendor/github.com/rubenv/sql-migrate/.golangci.yaml @@ -1,107 +1,133 @@ -linters-settings: - gocritic: - disabled-checks: - - ifElseChain - goimports: - local-prefixes: github.com/rubenv/sql-migrate - govet: - enable-all: true - disable: - - fieldalignment - depguard: - rules: - main: - allow: - - $gostd - - github.com/denisenkom/go-mssqldb - - github.com/go-sql-driver/mysql - - github.com/go-gorp/gorp/v3 - - github.com/lib/pq - - github.com/mattn/go-sqlite3 - - github.com/mitchellh/cli - - github.com/olekukonko/tablewriter - - github.com/rubenv/sql-migrate - exhaustive: - default-signifies-exhaustive: true - nolintlint: - allow-unused: false - allow-leading-space: false - allow-no-explanation: - - depguard - require-explanation: true - require-specific: true - revive: - enable-all-rules: false - rules: - - name: atomic - - name: blank-imports - - name: bool-literal-in-expr - - name: call-to-gc - - name: constant-logical-expr - - name: context-as-argument - - name: context-keys-type - - name: dot-imports - - name: duplicated-imports - - name: empty-block - - name: empty-lines - - name: error-naming - - name: error-return - - name: error-strings - - name: errorf - - name: exported - - name: identical-branches - - name: imports-blacklist - - name: increment-decrement - - name: indent-error-flow - - name: modifies-parameter - - name: modifies-value-receiver - - name: package-comments - - name: range - - name: range-val-address - - name: range-val-in-closure - - name: receiver-naming - - name: string-format - - name: string-of-int - - name: struct-tag - - name: time-naming - - name: unconditional-recursion - - name: unexported-naming - - name: unexported-return - - name: superfluous-else - - name: unreachable-code - - name: var-declaration - - name: waitgroup-by-value - - name: unused-receiver - - name: unnecessary-stmt - - name: unused-parameter +version: "2" run: tests: true - timeout: 1m linters: - disable-all: true + default: none enable: - asciicheck - depguard - errcheck + - errorlint - exhaustive - gocritic - - gofmt - - gofumpt - - goimports - govet - ineffassign - nolintlint - revive - staticcheck - - typecheck + - unparam - unused - whitespace - - errorlint - - gosimple - - unparam + settings: + depguard: + rules: + main: + allow: + - $gostd + - github.com/denisenkom/go-mssqldb + - github.com/go-sql-driver/mysql + - github.com/go-gorp/gorp/v3 + - github.com/lib/pq + - github.com/mattn/go-sqlite3 + - github.com/mitchellh/cli + - github.com/olekukonko/tablewriter + - github.com/rubenv/sql-migrate + - gopkg.in/check.v1 + - gopkg.in/yaml.v2 + exhaustive: + default-signifies-exhaustive: true + gocritic: + disabled-checks: + - ifElseChain + govet: + disable: + - fieldalignment + enable-all: true + nolintlint: + require-explanation: true + require-specific: true + allow-no-explanation: + - depguard + allow-unused: false + revive: + enable-all-rules: false + rules: + - name: atomic + - name: blank-imports + - name: bool-literal-in-expr + - name: call-to-gc + - name: constant-logical-expr + - name: context-as-argument + - name: context-keys-type + - name: dot-imports + - name: duplicated-imports + - name: empty-block + - name: empty-lines + - name: error-naming + - name: error-return + - name: error-strings + - name: errorf + - name: exported + - name: identical-branches + - name: imports-blocklist + - name: increment-decrement + - name: indent-error-flow + - name: modifies-parameter + - name: modifies-value-receiver + - name: package-comments + - name: range + - name: range-val-address + - name: range-val-in-closure + - name: receiver-naming + - name: string-format + - name: string-of-int + - name: struct-tag + - name: time-naming + - name: unconditional-recursion + - name: unexported-naming + - name: unexported-return + - name: superfluous-else + - name: unreachable-code + - name: var-declaration + - name: waitgroup-by-value + - name: unused-receiver + - name: unnecessary-stmt + - name: unused-parameter + exclusions: + generated: lax + presets: + - comments + - common-false-positives + - legacy + - std-error-handling + rules: + - path: (.+)\.go$ + text: declaration of "err" shadows declaration at + - path: (.+)\.go$ + text: 'error-strings: error strings should not be capitalized or end with punctuation or a newline' + - path: (.+)\.go$ + text: 'ST1005: error strings should not end with punctuation or newline' + - path: (.+)\.go$ + text: 'ST1005: error strings should not be capitalized' + paths: + - third_party$ + - builtin$ + - examples$ issues: - exclude: - - 'declaration of "err" shadows declaration at' # Allow shadowing of `err` because it's so common - - 'error-strings: error strings should not be capitalized or end with punctuation or a newline' - max-same-issues: 10000 max-issues-per-linter: 10000 + max-same-issues: 10000 +formatters: + enable: + - gofmt + - gofumpt + - goimports + settings: + goimports: + local-prefixes: + - github.com/rubenv/sql-migrate + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ diff --git a/vendor/github.com/rubenv/sql-migrate/migrate.go b/vendor/github.com/rubenv/sql-migrate/migrate.go index 7fb56f1a95..c9cb4a48b6 100644 --- a/vendor/github.com/rubenv/sql-migrate/migrate.go +++ b/vendor/github.com/rubenv/sql-migrate/migrate.go @@ -700,13 +700,14 @@ func (ms MigrationSet) planMigrationCommon(db *sql.DB, dialect string, m Migrati toApplyCount = max } for _, v := range toApply[0:toApplyCount] { - if dir == Up { + switch dir { + case Up: result = append(result, &PlannedMigration{ Migration: v, Queries: v.Up, DisableTransaction: v.DisableTransactionUp, }) - } else if dir == Down { + case Down: result = append(result, &PlannedMigration{ Migration: v, Queries: v.Down, @@ -779,14 +780,13 @@ func ToApply(migrations []*Migration, current string, direction MigrationDirecti } } - if direction == Up { + switch direction { + case Up: return migrations[index+1:] - } else if direction == Down { + case Down: if index == -1 { return []*Migration{} } - - // Add in reverse order toApply := make([]*Migration, index+1) for i := 0; i < index+1; i++ { toApply[index-i] = migrations[i] diff --git a/vendor/github.com/sagikazarmark/locafero/.envrc b/vendor/github.com/sagikazarmark/locafero/.envrc index 2e0f9f5f71..5c95dc7989 100644 --- a/vendor/github.com/sagikazarmark/locafero/.envrc +++ b/vendor/github.com/sagikazarmark/locafero/.envrc @@ -1,4 +1,4 @@ -if ! has nix_direnv_version || ! nix_direnv_version 3.0.4; then - source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/3.0.4/direnvrc" "sha256-DzlYZ33mWF/Gs8DDeyjr8mnVmQGx7ASYqA5WlxwvBG4=" +if ! has nix_direnv_version || ! nix_direnv_version 3.1.0; then + source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/3.1.0/direnvrc" "sha256-yMJ2OVMzrFaDPn7q8nCBZFRYpL/f0RcHzhmw/i6btJM=" fi use flake . --impure diff --git a/vendor/github.com/sagikazarmark/locafero/.golangci.yaml b/vendor/github.com/sagikazarmark/locafero/.golangci.yaml index 829de2a4a0..a27a42959f 100644 --- a/vendor/github.com/sagikazarmark/locafero/.golangci.yaml +++ b/vendor/github.com/sagikazarmark/locafero/.golangci.yaml @@ -1,27 +1,37 @@ +version: "2" + run: timeout: 10m -linters-settings: - gci: - sections: - - standard - - default - - prefix(github.com/sagikazarmark/locafero) - goimports: - local-prefixes: github.com/sagikazarmark/locafero - misspell: - locale: US - nolintlint: - allow-leading-space: false # require machine-readable nolint directives (with no leading space) - allow-unused: false # report any unused nolint directives - require-specific: false # don't require nolint directives to be specific about which linter is being skipped - revive: - confidence: 0 - linters: enable: - - gci - - goimports + - errcheck + - govet + - ineffassign - misspell - nolintlint - revive + - staticcheck + - unused + + settings: + misspell: + locale: US + nolintlint: + allow-unused: false # report any unused nolint directives + require-specific: false # don't require nolint directives to be specific about which linter is being skipped + +formatters: + enable: + - gci + - gofmt + - gofumpt + - goimports + - golines + + settings: + gci: + sections: + - standard + - default + - localmodule diff --git a/vendor/github.com/sagikazarmark/locafero/README.md b/vendor/github.com/sagikazarmark/locafero/README.md index a48e8e9789..d25fe80f3e 100644 --- a/vendor/github.com/sagikazarmark/locafero/README.md +++ b/vendor/github.com/sagikazarmark/locafero/README.md @@ -2,8 +2,8 @@ [![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/sagikazarmark/locafero/ci.yaml?style=flat-square)](https://github.com/sagikazarmark/locafero/actions/workflows/ci.yaml) [![go.dev reference](https://img.shields.io/badge/go.dev-reference-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/mod/github.com/sagikazarmark/locafero) -![Go Version](https://img.shields.io/badge/go%20version-%3E=1.20-61CFDD.svg?style=flat-square) -[![built with nix](https://img.shields.io/badge/builtwith-nix-7d81f7?style=flat-square)](https://builtwithnix.org) +![GitHub go.mod Go version](https://img.shields.io/github/go-mod/go-version/sagikazarmark/locafero?style=flat-square&color=61CFDD) +[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/sagikazarmark/locafero/badge?style=flat-square)](https://deps.dev/go/github.com%252Fsagikazarmark%252Flocafero) **Finder library for [Afero](https://github.com/spf13/afero) ported from [go-finder](https://github.com/sagikazarmark/go-finder).** diff --git a/vendor/github.com/sagikazarmark/locafero/file_type.go b/vendor/github.com/sagikazarmark/locafero/file_type.go index 9a9b140233..5ea57c93ee 100644 --- a/vendor/github.com/sagikazarmark/locafero/file_type.go +++ b/vendor/github.com/sagikazarmark/locafero/file_type.go @@ -5,19 +5,23 @@ import "io/fs" // FileType represents the kind of entries [Finder] can return. type FileType int +// FileType represents the kind of entries [Finder] can return. const ( - FileTypeAll FileType = iota + FileTypeAny FileType = iota FileTypeFile FileTypeDir + + // Deprecated: Use [FileTypeAny] instead. + FileTypeAll = FileTypeAny ) -func (ft FileType) matchFileInfo(info fs.FileInfo) bool { +func (ft FileType) match(info fs.FileInfo) bool { switch ft { - case FileTypeAll: + case FileTypeAny: return true case FileTypeFile: - return !info.IsDir() + return info.Mode().IsRegular() case FileTypeDir: return info.IsDir() diff --git a/vendor/github.com/sagikazarmark/locafero/finder.go b/vendor/github.com/sagikazarmark/locafero/finder.go index ef8d547122..ce43c78264 100644 --- a/vendor/github.com/sagikazarmark/locafero/finder.go +++ b/vendor/github.com/sagikazarmark/locafero/finder.go @@ -1,4 +1,4 @@ -// Package finder looks for files and directories in an {fs.Fs} filesystem. +// Package locafero looks for files and directories in an {fs.Fs} filesystem. package locafero import ( @@ -7,7 +7,7 @@ import ( "path/filepath" "strings" - "github.com/sourcegraph/conc/iter" + "github.com/sourcegraph/conc/pool" "github.com/spf13/afero" ) @@ -44,65 +44,66 @@ type Finder struct { // Find looks for files and directories in an [afero.Fs] filesystem. func (f Finder) Find(fsys afero.Fs) ([]string, error) { // Arbitrary go routine limit (TODO: make this a parameter) - // pool := pool.NewWithResults[[]string]().WithMaxGoroutines(5).WithErrors().WithFirstError() + p := pool.NewWithResults[[]searchResult]().WithMaxGoroutines(5).WithErrors().WithFirstError() - type searchItem struct { - path string - name string + for _, searchPath := range f.Paths { + for _, searchName := range f.Names { + p.Go(func() ([]searchResult, error) { + // If the name contains any glob character, perform a glob match + if strings.ContainsAny(searchName, globMatch) { + return globWalkSearch(fsys, searchPath, searchName, f.Type) + } + + return statSearch(fsys, searchPath, searchName, f.Type) + }) + } } - var searchItems []searchItem + searchResults, err := flatten(p.Wait()) + if err != nil { + return nil, err + } - for _, searchPath := range f.Paths { - searchPath := searchPath + // Return early if no results were found + if len(searchResults) == 0 { + return nil, nil + } - for _, searchName := range f.Names { - searchName := searchName - - searchItems = append(searchItems, searchItem{searchPath, searchName}) - - // pool.Go(func() ([]string, error) { - // // If the name contains any glob character, perform a glob match - // if strings.ContainsAny(searchName, globMatch) { - // return globWalkSearch(fsys, searchPath, searchName, f.Type) - // } - // - // return statSearch(fsys, searchPath, searchName, f.Type) - // }) - } + results := make([]string, 0, len(searchResults)) + + for _, searchResult := range searchResults { + results = append(results, searchResult.path) } - // allResults, err := pool.Wait() - // if err != nil { - // return nil, err - // } + return results, nil +} - allResults, err := iter.MapErr(searchItems, func(item *searchItem) ([]string, error) { - // If the name contains any glob character, perform a glob match - if strings.ContainsAny(item.name, globMatch) { - return globWalkSearch(fsys, item.path, item.name, f.Type) - } +type searchResult struct { + path string + info fs.FileInfo +} - return statSearch(fsys, item.path, item.name, f.Type) - }) +func flatten[T any](results [][]T, err error) ([]T, error) { if err != nil { return nil, err } - var results []string + var flattened []T - for _, r := range allResults { - results = append(results, r...) + for _, r := range results { + flattened = append(flattened, r...) } - // Sort results in alphabetical order for now - // sort.Strings(results) - - return results, nil + return flattened, nil } -func globWalkSearch(fsys afero.Fs, searchPath string, searchName string, searchType FileType) ([]string, error) { - var results []string +func globWalkSearch( + fsys afero.Fs, + searchPath string, + searchName string, + searchType FileType, +) ([]searchResult, error) { + var results []searchResult err := afero.Walk(fsys, searchPath, func(p string, fileInfo fs.FileInfo, err error) error { if err != nil { @@ -123,7 +124,7 @@ func globWalkSearch(fsys afero.Fs, searchPath string, searchName string, searchT } // Skip unmatching type - if !searchType.matchFileInfo(fileInfo) { + if !searchType.match(fileInfo) { return result } @@ -133,7 +134,7 @@ func globWalkSearch(fsys afero.Fs, searchPath string, searchName string, searchT } if match { - results = append(results, p) + results = append(results, searchResult{p, fileInfo}) } return result @@ -145,7 +146,12 @@ func globWalkSearch(fsys afero.Fs, searchPath string, searchName string, searchT return results, nil } -func statSearch(fsys afero.Fs, searchPath string, searchName string, searchType FileType) ([]string, error) { +func statSearch( + fsys afero.Fs, + searchPath string, + searchName string, + searchType FileType, +) ([]searchResult, error) { filePath := filepath.Join(searchPath, searchName) fileInfo, err := fsys.Stat(filePath) @@ -157,9 +163,9 @@ func statSearch(fsys afero.Fs, searchPath string, searchName string, searchType } // Skip unmatching type - if !searchType.matchFileInfo(fileInfo) { + if !searchType.match(fileInfo) { return nil, nil } - return []string{filePath}, nil + return []searchResult{{filePath, fileInfo}}, nil } diff --git a/vendor/github.com/sagikazarmark/locafero/flake.lock b/vendor/github.com/sagikazarmark/locafero/flake.lock index df2a8cceca..b14a842c2f 100644 --- a/vendor/github.com/sagikazarmark/locafero/flake.lock +++ b/vendor/github.com/sagikazarmark/locafero/flake.lock @@ -2,30 +2,32 @@ "nodes": { "cachix": { "inputs": { - "devenv": "devenv_2", + "devenv": [ + "devenv" + ], "flake-compat": [ + "devenv" + ], + "git-hooks": [ "devenv", - "flake-compat" + "git-hooks" ], "nixpkgs": [ "devenv", "nixpkgs" - ], - "pre-commit-hooks": [ - "devenv", - "pre-commit-hooks" ] }, "locked": { - "lastModified": 1712055811, - "narHash": "sha256-7FcfMm5A/f02yyzuavJe06zLa9hcMHsagE28ADcmQvk=", + "lastModified": 1748883665, + "narHash": "sha256-R0W7uAg+BLoHjMRMQ8+oiSbTq8nkGz5RDpQ+ZfxxP3A=", "owner": "cachix", "repo": "cachix", - "rev": "02e38da89851ec7fec3356a5c04bc8349cae0e30", + "rev": "f707778d902af4d62d8dd92c269f8e70de09acbe", "type": "github" }, "original": { "owner": "cachix", + "ref": "latest", "repo": "cachix", "type": "github" } @@ -33,52 +35,21 @@ "devenv": { "inputs": { "cachix": "cachix", - "flake-compat": "flake-compat_2", - "nix": "nix_2", - "nixpkgs": "nixpkgs_2", - "pre-commit-hooks": "pre-commit-hooks" - }, - "locked": { - "lastModified": 1725907707, - "narHash": "sha256-s3pbtzZmVPHzc86WQjK7MGZMNvvw6hWnFMljEkllAfM=", - "owner": "cachix", - "repo": "devenv", - "rev": "2bbbbc468fc02257265a79652a8350651cca495a", - "type": "github" - }, - "original": { - "owner": "cachix", - "repo": "devenv", - "type": "github" - } - }, - "devenv_2": { - "inputs": { - "flake-compat": [ - "devenv", - "cachix", - "flake-compat" - ], + "flake-compat": "flake-compat", + "git-hooks": "git-hooks", "nix": "nix", - "nixpkgs": "nixpkgs", - "poetry2nix": "poetry2nix", - "pre-commit-hooks": [ - "devenv", - "cachix", - "pre-commit-hooks" - ] + "nixpkgs": "nixpkgs" }, "locked": { - "lastModified": 1708704632, - "narHash": "sha256-w+dOIW60FKMaHI1q5714CSibk99JfYxm0CzTinYWr+Q=", + "lastModified": 1753981111, + "narHash": "sha256-uBJOyMxOkGRmxhD2M5rbN2aV6oP1T2AKq5oBaHHC4mw=", "owner": "cachix", "repo": "devenv", - "rev": "2ee4450b0f4b95a1b90f2eb5ffea98b90e48c196", + "rev": "d4d70df706b153b601a87ab8e81c88a0b1a373b6", "type": "github" }, "original": { "owner": "cachix", - "ref": "python-rewrite", "repo": "devenv", "type": "github" } @@ -86,27 +57,11 @@ "flake-compat": { "flake": false, "locked": { - "lastModified": 1673956053, - "narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=", + "lastModified": 1747046372, + "narHash": "sha256-CIVLLkVgvHYbgI2UpXvIIBJ12HWgX+fjA8Xf8PUmqCY=", "owner": "edolstra", "repo": "flake-compat", - "rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9", - "type": "github" - }, - "original": { - "owner": "edolstra", - "repo": "flake-compat", - "type": "github" - } - }, - "flake-compat_2": { - "flake": false, - "locked": { - "lastModified": 1696426674, - "narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=", - "owner": "edolstra", - "repo": "flake-compat", - "rev": "0f9255e01c2351cc7d116c072cb317785dd33b33", + "rev": "9100a0f413b0c601e0533d1d94ffd501ce2e7885", "type": "github" }, "original": { @@ -117,14 +72,18 @@ }, "flake-parts": { "inputs": { - "nixpkgs-lib": "nixpkgs-lib" + "nixpkgs-lib": [ + "devenv", + "nix", + "nixpkgs" + ] }, "locked": { - "lastModified": 1725234343, - "narHash": "sha256-+ebgonl3NbiKD2UD0x4BszCZQ6sTfL4xioaM49o5B3Y=", + "lastModified": 1733312601, + "narHash": "sha256-4pDvzqnegAfRkPwO3wmwBhVi/Sye1mzps0zHWYnP88c=", "owner": "hercules-ci", "repo": "flake-parts", - "rev": "567b938d64d4b4112ee253b9274472dc3a346eb6", + "rev": "205b12d8b7cd4802fbcb8e8ef6a0f1408781a4f9", "type": "github" }, "original": { @@ -133,39 +92,47 @@ "type": "github" } }, - "flake-utils": { + "flake-parts_2": { "inputs": { - "systems": "systems" + "nixpkgs-lib": "nixpkgs-lib" }, "locked": { - "lastModified": 1689068808, - "narHash": "sha256-6ixXo3wt24N/melDWjq70UuHQLxGV8jZvooRanIHXw0=", - "owner": "numtide", - "repo": "flake-utils", - "rev": "919d646de7be200f3bf08cb76ae1f09402b6f9b4", + "lastModified": 1753121425, + "narHash": "sha256-TVcTNvOeWWk1DXljFxVRp+E0tzG1LhrVjOGGoMHuXio=", + "owner": "hercules-ci", + "repo": "flake-parts", + "rev": "644e0fc48951a860279da645ba77fe4a6e814c5e", "type": "github" }, "original": { - "owner": "numtide", - "repo": "flake-utils", + "owner": "hercules-ci", + "repo": "flake-parts", "type": "github" } }, - "flake-utils_2": { + "git-hooks": { "inputs": { - "systems": "systems_2" + "flake-compat": [ + "devenv", + "flake-compat" + ], + "gitignore": "gitignore", + "nixpkgs": [ + "devenv", + "nixpkgs" + ] }, "locked": { - "lastModified": 1710146030, - "narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=", - "owner": "numtide", - "repo": "flake-utils", - "rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a", + "lastModified": 1750779888, + "narHash": "sha256-wibppH3g/E2lxU43ZQHC5yA/7kIKLGxVEnsnVK1BtRg=", + "owner": "cachix", + "repo": "git-hooks.nix", + "rev": "16ec914f6fb6f599ce988427d9d94efddf25fe6d", "type": "github" }, "original": { - "owner": "numtide", - "repo": "flake-utils", + "owner": "cachix", + "repo": "git-hooks.nix", "type": "github" } }, @@ -173,7 +140,7 @@ "inputs": { "nixpkgs": [ "devenv", - "pre-commit-hooks", + "git-hooks", "nixpkgs" ] }, @@ -192,165 +159,49 @@ } }, "nix": { - "inputs": { - "flake-compat": "flake-compat", - "nixpkgs": [ - "devenv", - "cachix", - "devenv", - "nixpkgs" - ], - "nixpkgs-regression": "nixpkgs-regression" - }, - "locked": { - "lastModified": 1712911606, - "narHash": "sha256-BGvBhepCufsjcUkXnEEXhEVjwdJAwPglCC2+bInc794=", - "owner": "domenkozar", - "repo": "nix", - "rev": "b24a9318ea3f3600c1e24b4a00691ee912d4de12", - "type": "github" - }, - "original": { - "owner": "domenkozar", - "ref": "devenv-2.21", - "repo": "nix", - "type": "github" - } - }, - "nix-github-actions": { - "inputs": { - "nixpkgs": [ - "devenv", - "cachix", - "devenv", - "poetry2nix", - "nixpkgs" - ] - }, - "locked": { - "lastModified": 1688870561, - "narHash": "sha256-4UYkifnPEw1nAzqqPOTL2MvWtm3sNGw1UTYTalkTcGY=", - "owner": "nix-community", - "repo": "nix-github-actions", - "rev": "165b1650b753316aa7f1787f3005a8d2da0f5301", - "type": "github" - }, - "original": { - "owner": "nix-community", - "repo": "nix-github-actions", - "type": "github" - } - }, - "nix_2": { "inputs": { "flake-compat": [ "devenv", "flake-compat" ], + "flake-parts": "flake-parts", + "git-hooks-nix": [ + "devenv", + "git-hooks" + ], "nixpkgs": [ "devenv", "nixpkgs" ], - "nixpkgs-regression": "nixpkgs-regression_2" + "nixpkgs-23-11": [ + "devenv" + ], + "nixpkgs-regression": [ + "devenv" + ] }, "locked": { - "lastModified": 1712911606, - "narHash": "sha256-BGvBhepCufsjcUkXnEEXhEVjwdJAwPglCC2+bInc794=", - "owner": "domenkozar", + "lastModified": 1752773918, + "narHash": "sha256-dOi/M6yNeuJlj88exI+7k154z+hAhFcuB8tZktiW7rg=", + "owner": "cachix", "repo": "nix", - "rev": "b24a9318ea3f3600c1e24b4a00691ee912d4de12", + "rev": "031c3cf42d2e9391eee373507d8c12e0f9606779", "type": "github" }, "original": { - "owner": "domenkozar", - "ref": "devenv-2.21", + "owner": "cachix", + "ref": "devenv-2.30", "repo": "nix", "type": "github" } }, "nixpkgs": { "locked": { - "lastModified": 1692808169, - "narHash": "sha256-x9Opq06rIiwdwGeK2Ykj69dNc2IvUH1fY55Wm7atwrE=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "9201b5ff357e781bf014d0330d18555695df7ba8", - "type": "github" - }, - "original": { - "owner": "NixOS", - "ref": "nixpkgs-unstable", - "repo": "nixpkgs", - "type": "github" - } - }, - "nixpkgs-lib": { - "locked": { - "lastModified": 1725233747, - "narHash": "sha256-Ss8QWLXdr2JCBPcYChJhz4xJm+h/xjl4G0c0XlP6a74=", - "type": "tarball", - "url": "https://github.com/NixOS/nixpkgs/archive/356624c12086a18f2ea2825fed34523d60ccc4e3.tar.gz" - }, - "original": { - "type": "tarball", - "url": "https://github.com/NixOS/nixpkgs/archive/356624c12086a18f2ea2825fed34523d60ccc4e3.tar.gz" - } - }, - "nixpkgs-regression": { - "locked": { - "lastModified": 1643052045, - "narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", - "type": "github" - }, - "original": { - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", - "type": "github" - } - }, - "nixpkgs-regression_2": { - "locked": { - "lastModified": 1643052045, - "narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", - "type": "github" - }, - "original": { - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", - "type": "github" - } - }, - "nixpkgs-stable": { - "locked": { - "lastModified": 1710695816, - "narHash": "sha256-3Eh7fhEID17pv9ZxrPwCLfqXnYP006RKzSs0JptsN84=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "614b4613980a522ba49f0d194531beddbb7220d3", - "type": "github" - }, - "original": { - "owner": "NixOS", - "ref": "nixos-23.11", - "repo": "nixpkgs", - "type": "github" - } - }, - "nixpkgs_2": { - "locked": { - "lastModified": 1713361204, - "narHash": "sha256-TA6EDunWTkc5FvDCqU3W2T3SFn0gRZqh6D/hJnM02MM=", + "lastModified": 1750441195, + "narHash": "sha256-yke+pm+MdgRb6c0dPt8MgDhv7fcBbdjmv1ZceNTyzKg=", "owner": "cachix", "repo": "devenv-nixpkgs", - "rev": "285676e87ad9f0ca23d8714a6ab61e7e027020c6", + "rev": "0ceffe312871b443929ff3006960d29b120dc627", "type": "github" }, "original": { @@ -360,110 +211,42 @@ "type": "github" } }, - "nixpkgs_3": { - "locked": { - "lastModified": 1725910328, - "narHash": "sha256-n9pCtzGZ0httmTwMuEbi5E78UQ4ZbQMr1pzi5N0LAG8=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "5775c2583f1801df7b790bf7f7d710a19bac66f4", - "type": "github" - }, - "original": { - "owner": "NixOS", - "ref": "nixpkgs-unstable", - "repo": "nixpkgs", - "type": "github" - } - }, - "poetry2nix": { - "inputs": { - "flake-utils": "flake-utils", - "nix-github-actions": "nix-github-actions", - "nixpkgs": [ - "devenv", - "cachix", - "devenv", - "nixpkgs" - ] - }, + "nixpkgs-lib": { "locked": { - "lastModified": 1692876271, - "narHash": "sha256-IXfZEkI0Mal5y1jr6IRWMqK8GW2/f28xJenZIPQqkY0=", + "lastModified": 1751159883, + "narHash": "sha256-urW/Ylk9FIfvXfliA1ywh75yszAbiTEVgpPeinFyVZo=", "owner": "nix-community", - "repo": "poetry2nix", - "rev": "d5006be9c2c2417dafb2e2e5034d83fabd207ee3", + "repo": "nixpkgs.lib", + "rev": "14a40a1d7fb9afa4739275ac642ed7301a9ba1ab", "type": "github" }, "original": { "owner": "nix-community", - "repo": "poetry2nix", + "repo": "nixpkgs.lib", "type": "github" } }, - "pre-commit-hooks": { - "inputs": { - "flake-compat": [ - "devenv", - "flake-compat" - ], - "flake-utils": "flake-utils_2", - "gitignore": "gitignore", - "nixpkgs": [ - "devenv", - "nixpkgs" - ], - "nixpkgs-stable": "nixpkgs-stable" - }, + "nixpkgs_2": { "locked": { - "lastModified": 1713775815, - "narHash": "sha256-Wu9cdYTnGQQwtT20QQMg7jzkANKQjwBD9iccfGKkfls=", - "owner": "cachix", - "repo": "pre-commit-hooks.nix", - "rev": "2ac4dcbf55ed43f3be0bae15e181f08a57af24a4", + "lastModified": 1753939845, + "narHash": "sha256-K2ViRJfdVGE8tpJejs8Qpvvejks1+A4GQej/lBk5y7I=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "94def634a20494ee057c76998843c015909d6311", "type": "github" }, "original": { - "owner": "cachix", - "repo": "pre-commit-hooks.nix", + "owner": "NixOS", + "ref": "nixos-unstable", + "repo": "nixpkgs", "type": "github" } }, "root": { "inputs": { "devenv": "devenv", - "flake-parts": "flake-parts", - "nixpkgs": "nixpkgs_3" - } - }, - "systems": { - "locked": { - "lastModified": 1681028828, - "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", - "owner": "nix-systems", - "repo": "default", - "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", - "type": "github" - }, - "original": { - "owner": "nix-systems", - "repo": "default", - "type": "github" - } - }, - "systems_2": { - "locked": { - "lastModified": 1681028828, - "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", - "owner": "nix-systems", - "repo": "default", - "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", - "type": "github" - }, - "original": { - "owner": "nix-systems", - "repo": "default", - "type": "github" + "flake-parts": "flake-parts_2", + "nixpkgs": "nixpkgs_2" } } }, diff --git a/vendor/github.com/sagikazarmark/locafero/flake.nix b/vendor/github.com/sagikazarmark/locafero/flake.nix index 312f1ec8cf..bdb10dbe4f 100644 --- a/vendor/github.com/sagikazarmark/locafero/flake.nix +++ b/vendor/github.com/sagikazarmark/locafero/flake.nix @@ -1,64 +1,42 @@ { - description = "Finder library for Afero"; - inputs = { - nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable"; + nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable"; flake-parts.url = "github:hercules-ci/flake-parts"; devenv.url = "github:cachix/devenv"; }; - outputs = inputs@{ flake-parts, ... }: + outputs = + inputs@{ flake-parts, ... }: flake-parts.lib.mkFlake { inherit inputs; } { imports = [ inputs.devenv.flakeModule ]; - systems = [ "x86_64-linux" "aarch64-darwin" ]; - - perSystem = { config, self', inputs', pkgs, system, ... }: rec { - devenv.shells = { - default = { - languages = { - go.enable = true; - go.package = pkgs.lib.mkDefault pkgs.go_1_23; - }; - - packages = with pkgs; [ - just - - golangci-lint - ]; - - # https://github.com/cachix/devenv/issues/528#issuecomment-1556108767 - containers = pkgs.lib.mkForce { }; - }; - - ci = devenv.shells.default; - - ci_1_21 = { - imports = [ devenv.shells.ci ]; - - languages = { - go.package = pkgs.go_1_21; - }; - }; + systems = [ + "x86_64-linux" + "aarch64-darwin" + ]; - ci_1_22 = { - imports = [ devenv.shells.ci ]; + perSystem = + { pkgs, ... }: + { + devenv.shells = { + default = { + languages = { + go.enable = true; + go.package = pkgs.lib.mkDefault pkgs.go_1_24; + }; - languages = { - go.package = pkgs.go_1_22; - }; - }; + packages = with pkgs; [ + just - ci_1_23 = { - imports = [ devenv.shells.ci ]; + golangci-lint + ]; - languages = { - go.package = pkgs.go_1_23; + # https://github.com/cachix/devenv/issues/528#issuecomment-1556108767 + containers = pkgs.lib.mkForce { }; }; }; }; - }; }; } diff --git a/vendor/github.com/sagikazarmark/locafero/justfile b/vendor/github.com/sagikazarmark/locafero/justfile index 00a88850cc..bac5e75db4 100644 --- a/vendor/github.com/sagikazarmark/locafero/justfile +++ b/vendor/github.com/sagikazarmark/locafero/justfile @@ -2,10 +2,13 @@ default: just --list test: - go test -race -v ./... + go test -count 10 -shuffle on -race -v ./... + +fuzz: + go test -race -v -fuzz=Fuzz -fuzztime=60s ./... lint: golangci-lint run fmt: - golangci-lint run --fix + golangci-lint fmt diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/.gitmodules b/vendor/github.com/santhosh-tekuri/jsonschema/v6/.gitmodules new file mode 100644 index 0000000000..d14f5ea70f --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/.gitmodules @@ -0,0 +1,4 @@ +[submodule "testdata/JSON-Schema-Test-Suite"] + path = testdata/JSON-Schema-Test-Suite + url = https://github.com/json-schema-org/JSON-Schema-Test-Suite.git + branch = main diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/.golangci.yml b/vendor/github.com/santhosh-tekuri/jsonschema/v6/.golangci.yml new file mode 100644 index 0000000000..6534d53166 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/.golangci.yml @@ -0,0 +1,7 @@ +version: "2" +linters: + enable: + - nakedret + - errname + - godot + - misspell diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/.pre-commit-hooks.yaml b/vendor/github.com/santhosh-tekuri/jsonschema/v6/.pre-commit-hooks.yaml new file mode 100644 index 0000000000..695b502ed9 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/.pre-commit-hooks.yaml @@ -0,0 +1,7 @@ +- id: jsonschema-validate + name: Validate JSON against JSON Schema + description: ensure json files follow specified JSON Schema + entry: jv + language: golang + additional_dependencies: + - ./cmd/jv diff --git a/vendor/github.com/xeipuuv/gojsonpointer/LICENSE-APACHE-2.0.txt b/vendor/github.com/santhosh-tekuri/jsonschema/v6/LICENSE similarity index 89% rename from vendor/github.com/xeipuuv/gojsonpointer/LICENSE-APACHE-2.0.txt rename to vendor/github.com/santhosh-tekuri/jsonschema/v6/LICENSE index 55ede8a42c..19dc35b243 100644 --- a/vendor/github.com/xeipuuv/gojsonpointer/LICENSE-APACHE-2.0.txt +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/LICENSE @@ -172,31 +172,4 @@ of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2015 xeipuuv - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. + of your accepting any such warranty or additional liability. \ No newline at end of file diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/README.md b/vendor/github.com/santhosh-tekuri/jsonschema/v6/README.md new file mode 100644 index 0000000000..1243b66c5a --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/README.md @@ -0,0 +1,88 @@ +# jsonschema v6.0.2 + +[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) +[![GoDoc](https://godoc.org/github.com/santhosh-tekuri/jsonschema?status.svg)](https://pkg.go.dev/github.com/santhosh-tekuri/jsonschema/v6) +[![Go Report Card](https://goreportcard.com/badge/github.com/santhosh-tekuri/jsonschema/v6)](https://goreportcard.com/report/github.com/santhosh-tekuri/jsonschema/v6) +[![Build Status](https://github.com/santhosh-tekuri/jsonschema/actions/workflows/go.yaml/badge.svg?branch=boon)](https://github.com/santhosh-tekuri/jsonschema/actions/workflows/go.yaml) +[![codecov](https://codecov.io/gh/santhosh-tekuri/jsonschema/branch/boon/graph/badge.svg?token=JMVj1pFT2l)](https://codecov.io/gh/santhosh-tekuri/jsonschema/tree/boon) + +see [godoc](https://pkg.go.dev/github.com/santhosh-tekuri/jsonschema/v6) for examples + +## Library Features + +- [x] pass [JSON-Schema-Test-Suite](https://github.com/json-schema-org/JSON-Schema-Test-Suite) excluding optional(compare with other impls at [bowtie](https://bowtie-json-schema.github.io/bowtie/#)) + - [x] [![draft-04](https://img.shields.io/endpoint?url=https://bowtie.report/badges/go-jsonschema/compliance/draft4.json)](https://bowtie.report/#/dialects/draft4) + - [x] [![draft-06](https://img.shields.io/endpoint?url=https://bowtie.report/badges/go-jsonschema/compliance/draft6.json)](https://bowtie.report/#/dialects/draft6) + - [x] [![draft-07](https://img.shields.io/endpoint?url=https://bowtie.report/badges/go-jsonschema/compliance/draft7.json)](https://bowtie.report/#/dialects/draft7) + - [x] [![draft/2019-09](https://img.shields.io/endpoint?url=https://bowtie.report/badges/go-jsonschema/compliance/draft2019-09.json)](https://bowtie.report/#/dialects/draft2019-09) + - [x] [![draft/2020-12](https://img.shields.io/endpoint?url=https://bowtie.report/badges/go-jsonschema/compliance/draft2020-12.json)](https://bowtie.report/#/dialects/draft2020-12) +- [x] detect infinite loop traps + - [x] `$schema` cycle + - [x] validation cycle +- [x] custom `$schema` url +- [x] vocabulary based validation +- [x] custom regex engine +- [x] format assertions + - [x] flag to enable in draft >= 2019-09 + - [x] custom format registration + - [x] built-in formats + - [x] regex, uuid + - [x] ipv4, ipv6 + - [x] hostname, email + - [x] date, time, date-time, duration + - [x] json-pointer, relative-json-pointer + - [x] uri, uri-reference, uri-template + - [x] iri, iri-reference + - [x] period, semver +- [x] content assertions + - [x] flag to enable in draft >= 7 + - [x] contentEncoding + - [x] base64 + - [x] custom + - [x] contentMediaType + - [x] application/json + - [x] custom + - [x] contentSchema +- [x] errors + - [x] introspectable + - [x] hierarchy + - [x] alternative display with `#` + - [x] output + - [x] flag + - [x] basic + - [x] detailed +- [x] custom vocabulary + - enable via `$vocabulary` for draft >=2019-19 + - enable via flag for draft <= 7 +- [x] mixed dialect support + +## CLI v0.7.0 + +to install: `go install github.com/santhosh-tekuri/jsonschema/cmd/jv@latest` + +Note that the cli is versioned independently. you can see it in git tags `cmd/jv/v0.7.0` + +``` +Usage: jv [OPTIONS] SCHEMA [INSTANCE...] + +Options: + -c, --assert-content Enable content assertions with draft >= 7 + -f, --assert-format Enable format assertions with draft >= 2019 + --cacert pem-file Use the specified pem-file to verify the peer. The file may contain multiple CA certificates + -d, --draft version Draft version used when '$schema' is missing. Valid values 4, 6, 7, 2019, 2020 (default 2020) + -h, --help Print help information + -k, --insecure Use insecure TLS connection + -o, --output format Output format. Valid values simple, alt, flag, basic, detailed (default "simple") + -q, --quiet Do not print errors + -v, --version Print build information +``` + +- [x] exit code `1` for validation errors, `2` for usage errors +- [x] validate both schema and multiple instances +- [x] support both json and yaml files +- [x] support standard input, use `-` +- [x] quite mode with parsable output +- [x] http(s) url support + - [x] custom certs for validation, use `--cacert` + - [x] flag to skip certificate verification, use `--insecure` + diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/compiler.go b/vendor/github.com/santhosh-tekuri/jsonschema/v6/compiler.go new file mode 100644 index 0000000000..4da7361038 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/compiler.go @@ -0,0 +1,332 @@ +package jsonschema + +import ( + "fmt" + "regexp" + "slices" +) + +// Compiler compiles json schema into *Schema. +type Compiler struct { + schemas map[urlPtr]*Schema + roots *roots + formats map[string]*Format + decoders map[string]*Decoder + mediaTypes map[string]*MediaType + assertFormat bool + assertContent bool +} + +// NewCompiler create Compiler Object. +func NewCompiler() *Compiler { + return &Compiler{ + schemas: map[urlPtr]*Schema{}, + roots: newRoots(), + formats: map[string]*Format{}, + decoders: map[string]*Decoder{}, + mediaTypes: map[string]*MediaType{}, + assertFormat: false, + assertContent: false, + } +} + +// DefaultDraft overrides the draft used to +// compile schemas without `$schema` field. +// +// By default, this library uses the latest +// draft supported. +// +// The use of this option is HIGHLY encouraged +// to ensure continued correct operation of your +// schema. The current default value will not stay +// the same overtime. +func (c *Compiler) DefaultDraft(d *Draft) { + c.roots.defaultDraft = d +} + +// AssertFormat always enables format assertions. +// +// Default Behavior: +// for draft-07: enabled. +// for draft/2019-09: disabled unless metaschema says `format` vocabulary is required. +// for draft/2020-12: disabled unless metaschema says `format-assertion` vocabulary is required. +func (c *Compiler) AssertFormat() { + c.assertFormat = true +} + +// AssertContent enables content assertions. +// +// Content assertions include keywords: +// - contentEncoding +// - contentMediaType +// - contentSchema +// +// Default behavior is always disabled. +func (c *Compiler) AssertContent() { + c.assertContent = true +} + +// RegisterFormat registers custom format. +// +// NOTE: +// - "regex" format can not be overridden +// - format assertions are disabled for draft >= 2019-09 +// see [Compiler.AssertFormat] +func (c *Compiler) RegisterFormat(f *Format) { + if f.Name != "regex" { + c.formats[f.Name] = f + } +} + +// RegisterContentEncoding registers custom contentEncoding. +// +// NOTE: content assertions are disabled by default. +// see [Compiler.AssertContent]. +func (c *Compiler) RegisterContentEncoding(d *Decoder) { + c.decoders[d.Name] = d +} + +// RegisterContentMediaType registers custom contentMediaType. +// +// NOTE: content assertions are disabled by default. +// see [Compiler.AssertContent]. +func (c *Compiler) RegisterContentMediaType(mt *MediaType) { + c.mediaTypes[mt.Name] = mt +} + +// RegisterVocabulary registers custom vocabulary. +// +// NOTE: +// - vocabularies are disabled for draft >= 2019-09 +// see [Compiler.AssertVocabs] +func (c *Compiler) RegisterVocabulary(vocab *Vocabulary) { + c.roots.vocabularies[vocab.URL] = vocab +} + +// AssertVocabs always enables user-defined vocabularies assertions. +// +// Default Behavior: +// for draft-07: enabled. +// for draft/2019-09: disabled unless metaschema enables a vocabulary. +// for draft/2020-12: disabled unless metaschema enables a vocabulary. +func (c *Compiler) AssertVocabs() { + c.roots.assertVocabs = true +} + +// AddResource adds schema resource which gets used later in reference +// resolution. +// +// The argument url can be file path or url. Any fragment in url is ignored. +// The argument doc must be valid json value. +func (c *Compiler) AddResource(url string, doc any) error { + uf, err := absolute(url) + if err != nil { + return err + } + if isMeta(string(uf.url)) { + return &ResourceExistsError{string(uf.url)} + } + if !c.roots.loader.add(uf.url, doc) { + return &ResourceExistsError{string(uf.url)} + } + return nil +} + +// UseLoader overrides the default [URLLoader] used +// to load schema resources. +func (c *Compiler) UseLoader(loader URLLoader) { + c.roots.loader.loader = loader +} + +// UseRegexpEngine changes the regexp-engine used. +// By default it uses regexp package from go standard +// library. +// +// NOTE: must be called before compiling any schemas. +func (c *Compiler) UseRegexpEngine(engine RegexpEngine) { + if engine == nil { + engine = goRegexpCompile + } + c.roots.regexpEngine = engine +} + +func (c *Compiler) enqueue(q *queue, up urlPtr) *Schema { + if sch, ok := c.schemas[up]; ok { + // already got compiled + return sch + } + if sch := q.get(up); sch != nil { + return sch + } + sch := newSchema(up) + q.append(sch) + return sch +} + +// MustCompile is like [Compile] but panics if compilation fails. +// It simplifies safe initialization of global variables holding +// compiled schema. +func (c *Compiler) MustCompile(loc string) *Schema { + sch, err := c.Compile(loc) + if err != nil { + panic(fmt.Sprintf("jsonschema: Compile(%q): %v", loc, err)) + } + return sch +} + +// Compile compiles json-schema at given loc. +func (c *Compiler) Compile(loc string) (*Schema, error) { + uf, err := absolute(loc) + if err != nil { + return nil, err + } + up, err := c.roots.resolveFragment(*uf) + if err != nil { + return nil, err + } + return c.doCompile(up) +} + +func (c *Compiler) doCompile(up urlPtr) (*Schema, error) { + q := &queue{} + compiled := 0 + + c.enqueue(q, up) + for q.len() > compiled { + sch := q.at(compiled) + if err := c.roots.ensureSubschema(sch.up); err != nil { + return nil, err + } + r := c.roots.roots[sch.up.url] + v, err := sch.up.lookup(r.doc) + if err != nil { + return nil, err + } + if err := c.compileValue(v, sch, r, q); err != nil { + return nil, err + } + compiled++ + } + for _, sch := range *q { + c.schemas[sch.up] = sch + } + return c.schemas[up], nil +} + +func (c *Compiler) compileValue(v any, sch *Schema, r *root, q *queue) error { + res := r.resource(sch.up.ptr) + sch.DraftVersion = res.dialect.draft.version + + base := urlPtr{sch.up.url, res.ptr} + sch.resource = c.enqueue(q, base) + + // if resource, enqueue dynamic anchors for compilation + if sch.DraftVersion >= 2020 && sch.up == sch.resource.up { + res := r.resource(sch.up.ptr) + for anchor, anchorPtr := range res.anchors { + if slices.Contains(res.dynamicAnchors, anchor) { + up := urlPtr{sch.up.url, anchorPtr} + danchorSch := c.enqueue(q, up) + if sch.dynamicAnchors == nil { + sch.dynamicAnchors = map[string]*Schema{} + } + sch.dynamicAnchors[string(anchor)] = danchorSch + } + } + } + + switch v := v.(type) { + case bool: + sch.Bool = &v + case map[string]any: + if err := c.compileObject(v, sch, r, q); err != nil { + return err + } + } + + sch.allPropsEvaluated = sch.AdditionalProperties != nil + if sch.DraftVersion < 2020 { + sch.allItemsEvaluated = sch.AdditionalItems != nil + switch items := sch.Items.(type) { + case *Schema: + sch.allItemsEvaluated = true + case []*Schema: + sch.numItemsEvaluated = len(items) + } + } else { + sch.allItemsEvaluated = sch.Items2020 != nil + sch.numItemsEvaluated = len(sch.PrefixItems) + } + + return nil +} + +func (c *Compiler) compileObject(obj map[string]any, sch *Schema, r *root, q *queue) error { + if len(obj) == 0 { + b := true + sch.Bool = &b + return nil + } + oc := objCompiler{ + c: c, + obj: obj, + up: sch.up, + r: r, + res: r.resource(sch.up.ptr), + q: q, + } + return oc.compile(sch) +} + +// queue -- + +type queue []*Schema + +func (q *queue) append(sch *Schema) { + *q = append(*q, sch) +} + +func (q *queue) at(i int) *Schema { + return (*q)[i] +} + +func (q *queue) len() int { + return len(*q) +} + +func (q *queue) get(up urlPtr) *Schema { + i := slices.IndexFunc(*q, func(sch *Schema) bool { return sch.up == up }) + if i != -1 { + return (*q)[i] + } + return nil +} + +// regexp -- + +// Regexp is the representation of compiled regular expression. +type Regexp interface { + fmt.Stringer + + // MatchString reports whether the string s contains + // any match of the regular expression. + MatchString(string) bool +} + +// RegexpEngine parses a regular expression and returns, +// if successful, a Regexp object that can be used to +// match against text. +type RegexpEngine func(string) (Regexp, error) + +func (re RegexpEngine) validate(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + _, err := re(s) + return err +} + +func goRegexpCompile(s string) (Regexp, error) { + return regexp.Compile(s) +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/content.go b/vendor/github.com/santhosh-tekuri/jsonschema/v6/content.go new file mode 100644 index 0000000000..8d62e58b09 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/content.go @@ -0,0 +1,51 @@ +package jsonschema + +import ( + "bytes" + "encoding/base64" + "encoding/json" +) + +// Decoder specifies how to decode specific contentEncoding. +type Decoder struct { + // Name of contentEncoding. + Name string + // Decode given string to byte array. + Decode func(string) ([]byte, error) +} + +var decoders = map[string]*Decoder{ + "base64": { + Name: "base64", + Decode: func(s string) ([]byte, error) { + return base64.StdEncoding.DecodeString(s) + }, + }, +} + +// MediaType specified how to validate bytes against specific contentMediaType. +type MediaType struct { + // Name of contentMediaType. + Name string + + // Validate checks whether bytes conform to this mediatype. + Validate func([]byte) error + + // UnmarshalJSON unmarshals bytes into json value. + // This must be nil if this mediatype is not compatible + // with json. + UnmarshalJSON func([]byte) (any, error) +} + +var mediaTypes = map[string]*MediaType{ + "application/json": { + Name: "application/json", + Validate: func(b []byte) error { + var v any + return json.Unmarshal(b, &v) + }, + UnmarshalJSON: func(b []byte) (any, error) { + return UnmarshalJSON(bytes.NewReader(b)) + }, + }, +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/draft.go b/vendor/github.com/santhosh-tekuri/jsonschema/v6/draft.go new file mode 100644 index 0000000000..fd09bae8d3 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/draft.go @@ -0,0 +1,360 @@ +package jsonschema + +import ( + "fmt" + "slices" + "strings" +) + +// A Draft represents json-schema specification. +type Draft struct { + version int + url string + sch *Schema + id string // property name used to represent id + subschemas []SchemaPath // locations of subschemas + vocabPrefix string // prefix used for vocabulary + allVocabs map[string]*Schema // names of supported vocabs with its schemas + defaultVocabs []string // names of default vocabs +} + +// String returns the specification url. +func (d *Draft) String() string { + return d.url +} + +var ( + Draft4 = &Draft{ + version: 4, + url: "http://json-schema.org/draft-04/schema", + id: "id", + subschemas: []SchemaPath{ + // type agonistic + schemaPath("definitions/*"), + schemaPath("not"), + schemaPath("allOf/[]"), + schemaPath("anyOf/[]"), + schemaPath("oneOf/[]"), + // object + schemaPath("properties/*"), + schemaPath("additionalProperties"), + schemaPath("patternProperties/*"), + // array + schemaPath("items"), + schemaPath("items/[]"), + schemaPath("additionalItems"), + schemaPath("dependencies/*"), + }, + vocabPrefix: "", + allVocabs: map[string]*Schema{}, + defaultVocabs: []string{}, + } + + Draft6 = &Draft{ + version: 6, + url: "http://json-schema.org/draft-06/schema", + id: "$id", + subschemas: joinSubschemas(Draft4.subschemas, + schemaPath("propertyNames"), + schemaPath("contains"), + ), + vocabPrefix: "", + allVocabs: map[string]*Schema{}, + defaultVocabs: []string{}, + } + + Draft7 = &Draft{ + version: 7, + url: "http://json-schema.org/draft-07/schema", + id: "$id", + subschemas: joinSubschemas(Draft6.subschemas, + schemaPath("if"), + schemaPath("then"), + schemaPath("else"), + ), + vocabPrefix: "", + allVocabs: map[string]*Schema{}, + defaultVocabs: []string{}, + } + + Draft2019 = &Draft{ + version: 2019, + url: "https://json-schema.org/draft/2019-09/schema", + id: "$id", + subschemas: joinSubschemas(Draft7.subschemas, + schemaPath("$defs/*"), + schemaPath("dependentSchemas/*"), + schemaPath("unevaluatedProperties"), + schemaPath("unevaluatedItems"), + schemaPath("contentSchema"), + ), + vocabPrefix: "https://json-schema.org/draft/2019-09/vocab/", + allVocabs: map[string]*Schema{ + "core": nil, + "applicator": nil, + "validation": nil, + "meta-data": nil, + "format": nil, + "content": nil, + }, + defaultVocabs: []string{"core", "applicator", "validation"}, + } + + Draft2020 = &Draft{ + version: 2020, + url: "https://json-schema.org/draft/2020-12/schema", + id: "$id", + subschemas: joinSubschemas(Draft2019.subschemas, + schemaPath("prefixItems/[]"), + ), + vocabPrefix: "https://json-schema.org/draft/2020-12/vocab/", + allVocabs: map[string]*Schema{ + "core": nil, + "applicator": nil, + "unevaluated": nil, + "validation": nil, + "meta-data": nil, + "format-annotation": nil, + "format-assertion": nil, + "content": nil, + }, + defaultVocabs: []string{"core", "applicator", "unevaluated", "validation"}, + } + + draftLatest = Draft2020 +) + +func init() { + c := NewCompiler() + c.AssertFormat() + for _, d := range []*Draft{Draft4, Draft6, Draft7, Draft2019, Draft2020} { + d.sch = c.MustCompile(d.url) + for name := range d.allVocabs { + d.allVocabs[name] = c.MustCompile(strings.TrimSuffix(d.url, "schema") + "meta/" + name) + } + } +} + +func draftFromURL(url string) *Draft { + u, frag := split(url) + if frag != "" { + return nil + } + u, ok := strings.CutPrefix(u, "http://") + if !ok { + u, _ = strings.CutPrefix(u, "https://") + } + switch u { + case "json-schema.org/schema": + return draftLatest + case "json-schema.org/draft/2020-12/schema": + return Draft2020 + case "json-schema.org/draft/2019-09/schema": + return Draft2019 + case "json-schema.org/draft-07/schema": + return Draft7 + case "json-schema.org/draft-06/schema": + return Draft6 + case "json-schema.org/draft-04/schema": + return Draft4 + default: + return nil + } +} + +func (d *Draft) getID(obj map[string]any) string { + if d.version < 2019 { + if _, ok := obj["$ref"]; ok { + // All other properties in a "$ref" object MUST be ignored + return "" + } + } + + id, ok := strVal(obj, d.id) + if !ok { + return "" + } + id, _ = split(id) // ignore fragment + return id +} + +func (d *Draft) getVocabs(url url, doc any, vocabularies map[string]*Vocabulary) ([]string, error) { + if d.version < 2019 { + return nil, nil + } + obj, ok := doc.(map[string]any) + if !ok { + return nil, nil + } + v, ok := obj["$vocabulary"] + if !ok { + return nil, nil + } + obj, ok = v.(map[string]any) + if !ok { + return nil, nil + } + + var vocabs []string + for vocab, reqd := range obj { + if reqd, ok := reqd.(bool); !ok || !reqd { + continue + } + name, ok := strings.CutPrefix(vocab, d.vocabPrefix) + if ok { + if _, ok := d.allVocabs[name]; ok { + if !slices.Contains(vocabs, name) { + vocabs = append(vocabs, name) + continue + } + } + } + if _, ok := vocabularies[vocab]; !ok { + return nil, &UnsupportedVocabularyError{url.String(), vocab} + } + if !slices.Contains(vocabs, vocab) { + vocabs = append(vocabs, vocab) + } + } + if !slices.Contains(vocabs, "core") { + vocabs = append(vocabs, "core") + } + return vocabs, nil +} + +// -- + +type dialect struct { + draft *Draft + vocabs []string // nil means use draft.defaultVocabs +} + +func (d *dialect) hasVocab(name string) bool { + if name == "core" || d.draft.version < 2019 { + return true + } + if d.vocabs != nil { + return slices.Contains(d.vocabs, name) + } + return slices.Contains(d.draft.defaultVocabs, name) +} + +func (d *dialect) activeVocabs(assertVocabs bool, vocabularies map[string]*Vocabulary) []string { + if len(vocabularies) == 0 { + return d.vocabs + } + if d.draft.version < 2019 { + assertVocabs = true + } + if !assertVocabs { + return d.vocabs + } + var vocabs []string + if d.vocabs == nil { + vocabs = slices.Clone(d.draft.defaultVocabs) + } else { + vocabs = slices.Clone(d.vocabs) + } + for vocab := range vocabularies { + if !slices.Contains(vocabs, vocab) { + vocabs = append(vocabs, vocab) + } + } + return vocabs +} + +func (d *dialect) getSchema(assertVocabs bool, vocabularies map[string]*Vocabulary) *Schema { + vocabs := d.activeVocabs(assertVocabs, vocabularies) + if vocabs == nil { + return d.draft.sch + } + + var allOf []*Schema + for _, vocab := range vocabs { + sch := d.draft.allVocabs[vocab] + if sch == nil { + if v, ok := vocabularies[vocab]; ok { + sch = v.Schema + } + } + if sch != nil { + allOf = append(allOf, sch) + } + } + if !slices.Contains(vocabs, "core") { + sch := d.draft.allVocabs["core"] + if sch == nil { + sch = d.draft.sch + } + allOf = append(allOf, sch) + } + sch := &Schema{ + Location: "urn:mem:metaschema", + up: urlPtr{url("urn:mem:metaschema"), ""}, + DraftVersion: d.draft.version, + AllOf: allOf, + } + sch.resource = sch + if sch.DraftVersion >= 2020 { + sch.DynamicAnchor = "meta" + sch.dynamicAnchors = map[string]*Schema{ + "meta": sch, + } + } + return sch +} + +// -- + +type ParseIDError struct { + URL string +} + +func (e *ParseIDError) Error() string { + return fmt.Sprintf("error in parsing id at %q", e.URL) +} + +// -- + +type ParseAnchorError struct { + URL string +} + +func (e *ParseAnchorError) Error() string { + return fmt.Sprintf("error in parsing anchor at %q", e.URL) +} + +// -- + +type DuplicateIDError struct { + ID string + URL string + Ptr1 string + Ptr2 string +} + +func (e *DuplicateIDError) Error() string { + return fmt.Sprintf("duplicate id %q in %q at %q and %q", e.ID, e.URL, e.Ptr1, e.Ptr2) +} + +// -- + +type DuplicateAnchorError struct { + Anchor string + URL string + Ptr1 string + Ptr2 string +} + +func (e *DuplicateAnchorError) Error() string { + return fmt.Sprintf("duplicate anchor %q in %q at %q and %q", e.Anchor, e.URL, e.Ptr1, e.Ptr2) +} + +// -- + +func joinSubschemas(a1 []SchemaPath, a2 ...SchemaPath) []SchemaPath { + var a []SchemaPath + a = append(a, a1...) + a = append(a, a2...) + return a +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/format.go b/vendor/github.com/santhosh-tekuri/jsonschema/v6/format.go new file mode 100644 index 0000000000..b78b22e2a5 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/format.go @@ -0,0 +1,708 @@ +package jsonschema + +import ( + "net/netip" + gourl "net/url" + "strconv" + "strings" + "time" +) + +// Format defined specific format. +type Format struct { + // Name of format. + Name string + + // Validate checks if given value is of this format. + Validate func(v any) error +} + +var formats = map[string]*Format{ + "json-pointer": {"json-pointer", validateJSONPointer}, + "relative-json-pointer": {"relative-json-pointer", validateRelativeJSONPointer}, + "uuid": {"uuid", validateUUID}, + "duration": {"duration", validateDuration}, + "period": {"period", validatePeriod}, + "ipv4": {"ipv4", validateIPV4}, + "ipv6": {"ipv6", validateIPV6}, + "hostname": {"hostname", validateHostname}, + "email": {"email", validateEmail}, + "date": {"date", validateDate}, + "time": {"time", validateTime}, + "date-time": {"date-time", validateDateTime}, + "uri": {"uri", validateURI}, + "iri": {"iri", validateURI}, + "uri-reference": {"uri-reference", validateURIReference}, + "iri-reference": {"iri-reference", validateURIReference}, + "uri-template": {"uri-template", validateURITemplate}, + "semver": {"semver", validateSemver}, +} + +// see https://www.rfc-editor.org/rfc/rfc6901#section-3 +func validateJSONPointer(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + if s == "" { + return nil + } + if !strings.HasPrefix(s, "/") { + return LocalizableError("not starting with /") + } + for _, tok := range strings.Split(s, "/")[1:] { + escape := false + for _, ch := range tok { + if escape { + escape = false + if ch != '0' && ch != '1' { + return LocalizableError("~ must be followed by 0 or 1") + } + continue + } + if ch == '~' { + escape = true + continue + } + switch { + case ch >= '\x00' && ch <= '\x2E': + case ch >= '\x30' && ch <= '\x7D': + case ch >= '\x7F' && ch <= '\U0010FFFF': + default: + return LocalizableError("invalid character %q", ch) + } + } + if escape { + return LocalizableError("~ must be followed by 0 or 1") + } + } + return nil +} + +// see https://tools.ietf.org/html/draft-handrews-relative-json-pointer-01#section-3 +func validateRelativeJSONPointer(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + + // start with non-negative-integer + numDigits := 0 + for _, ch := range s { + if ch >= '0' && ch <= '9' { + numDigits++ + } else { + break + } + } + if numDigits == 0 { + return LocalizableError("must start with non-negative integer") + } + if numDigits > 1 && strings.HasPrefix(s, "0") { + return LocalizableError("starts with zero") + } + s = s[numDigits:] + + // followed by either json-pointer or '#' + if s == "#" { + return nil + } + return validateJSONPointer(s) +} + +// see https://datatracker.ietf.org/doc/html/rfc4122#page-4 +func validateUUID(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + + hexGroups := []int{8, 4, 4, 4, 12} + groups := strings.Split(s, "-") + if len(groups) != len(hexGroups) { + return LocalizableError("must have %d elements", len(hexGroups)) + } + for i, group := range groups { + if len(group) != hexGroups[i] { + return LocalizableError("element %d must be %d characters long", i+1, hexGroups[i]) + } + for _, ch := range group { + switch { + case ch >= '0' && ch <= '9': + case ch >= 'a' && ch <= 'f': + case ch >= 'A' && ch <= 'F': + default: + return LocalizableError("non-hex character %q", ch) + } + } + } + return nil +} + +// see https://datatracker.ietf.org/doc/html/rfc3339#appendix-A +func validateDuration(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + + // must start with 'P' + s, ok = strings.CutPrefix(s, "P") + if !ok { + return LocalizableError("must start with P") + } + if s == "" { + return LocalizableError("nothing after P") + } + + // dur-week + if s, ok := strings.CutSuffix(s, "W"); ok { + if s == "" { + return LocalizableError("no number in week") + } + for _, ch := range s { + if ch < '0' || ch > '9' { + return LocalizableError("invalid week") + } + } + return nil + } + + allUnits := []string{"YMD", "HMS"} + for i, s := range strings.Split(s, "T") { + if i != 0 && s == "" { + return LocalizableError("no time elements") + } + if i >= len(allUnits) { + return LocalizableError("more than one T") + } + units := allUnits[i] + for s != "" { + digitCount := 0 + for _, ch := range s { + if ch >= '0' && ch <= '9' { + digitCount++ + } else { + break + } + } + if digitCount == 0 { + return LocalizableError("missing number") + } + s = s[digitCount:] + if s == "" { + return LocalizableError("missing unit") + } + unit := s[0] + j := strings.IndexByte(units, unit) + if j == -1 { + if strings.IndexByte(allUnits[i], unit) != -1 { + return LocalizableError("unit %q out of order", unit) + } + return LocalizableError("invalid unit %q", unit) + } + units = units[j+1:] + s = s[1:] + } + } + + return nil +} + +func validateIPV4(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + groups := strings.Split(s, ".") + if len(groups) != 4 { + return LocalizableError("expected four decimals") + } + for _, group := range groups { + if len(group) > 1 && group[0] == '0' { + return LocalizableError("leading zeros") + } + n, err := strconv.Atoi(group) + if err != nil { + return err + } + if n < 0 || n > 255 { + return LocalizableError("decimal must be between 0 and 255") + } + } + return nil +} + +func validateIPV6(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + if !strings.Contains(s, ":") { + return LocalizableError("missing colon") + } + addr, err := netip.ParseAddr(s) + if err != nil { + return err + } + if addr.Zone() != "" { + return LocalizableError("zone id is not a part of ipv6 address") + } + return nil +} + +// see https://en.wikipedia.org/wiki/Hostname#Restrictions_on_valid_host_names +func validateHostname(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + + // entire hostname (including the delimiting dots but not a trailing dot) has a maximum of 253 ASCII characters + s = strings.TrimSuffix(s, ".") + if len(s) > 253 { + return LocalizableError("more than 253 characters long") + } + + // Hostnames are composed of series of labels concatenated with dots, as are all domain names + for _, label := range strings.Split(s, ".") { + // Each label must be from 1 to 63 characters long + if len(label) < 1 || len(label) > 63 { + return LocalizableError("label must be 1 to 63 characters long") + } + + // labels must not start or end with a hyphen + if strings.HasPrefix(label, "-") { + return LocalizableError("label starts with hyphen") + } + if strings.HasSuffix(label, "-") { + return LocalizableError("label ends with hyphen") + } + + // labels may contain only the ASCII letters 'a' through 'z' (in a case-insensitive manner), + // the digits '0' through '9', and the hyphen ('-') + for _, ch := range label { + switch { + case ch >= 'a' && ch <= 'z': + case ch >= 'A' && ch <= 'Z': + case ch >= '0' && ch <= '9': + case ch == '-': + default: + return LocalizableError("invalid character %q", ch) + } + } + } + return nil +} + +// see https://en.wikipedia.org/wiki/Email_address +func validateEmail(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + // entire email address to be no more than 254 characters long + if len(s) > 254 { + return LocalizableError("more than 255 characters long") + } + + // email address is generally recognized as having two parts joined with an at-sign + at := strings.LastIndexByte(s, '@') + if at == -1 { + return LocalizableError("missing @") + } + local, domain := s[:at], s[at+1:] + + // local part may be up to 64 characters long + if len(local) > 64 { + return LocalizableError("local part more than 64 characters long") + } + + if len(local) > 1 && strings.HasPrefix(local, `"`) && strings.HasPrefix(local, `"`) { + // quoted + local := local[1 : len(local)-1] + if strings.IndexByte(local, '\\') != -1 || strings.IndexByte(local, '"') != -1 { + return LocalizableError("backslash and quote are not allowed within quoted local part") + } + } else { + // unquoted + if strings.HasPrefix(local, ".") { + return LocalizableError("starts with dot") + } + if strings.HasSuffix(local, ".") { + return LocalizableError("ends with dot") + } + + // consecutive dots not allowed + if strings.Contains(local, "..") { + return LocalizableError("consecutive dots") + } + + // check allowed chars + for _, ch := range local { + switch { + case ch >= 'a' && ch <= 'z': + case ch >= 'A' && ch <= 'Z': + case ch >= '0' && ch <= '9': + case strings.ContainsRune(".!#$%&'*+-/=?^_`{|}~", ch): + default: + return LocalizableError("invalid character %q", ch) + } + } + } + + // domain if enclosed in brackets, must match an IP address + if strings.HasPrefix(domain, "[") && strings.HasSuffix(domain, "]") { + domain = domain[1 : len(domain)-1] + if rem, ok := strings.CutPrefix(domain, "IPv6:"); ok { + if err := validateIPV6(rem); err != nil { + return LocalizableError("invalid ipv6 address: %v", err) + } + return nil + } + if err := validateIPV4(domain); err != nil { + return LocalizableError("invalid ipv4 address: %v", err) + } + return nil + } + + // domain must match the requirements for a hostname + if err := validateHostname(domain); err != nil { + return LocalizableError("invalid domain: %v", err) + } + + return nil +} + +// see see https://datatracker.ietf.org/doc/html/rfc3339#section-5.6 +func validateDate(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + _, err := time.Parse("2006-01-02", s) + return err +} + +// see https://datatracker.ietf.org/doc/html/rfc3339#section-5.6 +// NOTE: golang time package does not support leap seconds. +func validateTime(v any) error { + str, ok := v.(string) + if !ok { + return nil + } + + // min: hh:mm:ssZ + if len(str) < 9 { + return LocalizableError("less than 9 characters long") + } + if str[2] != ':' || str[5] != ':' { + return LocalizableError("missing colon in correct place") + } + + // parse hh:mm:ss + var hms []int + for _, tok := range strings.SplitN(str[:8], ":", 3) { + i, err := strconv.Atoi(tok) + if err != nil { + return LocalizableError("invalid hour/min/sec") + } + if i < 0 { + return LocalizableError("non-positive hour/min/sec") + } + hms = append(hms, i) + } + if len(hms) != 3 { + return LocalizableError("missing hour/min/sec") + } + h, m, s := hms[0], hms[1], hms[2] + if h > 23 || m > 59 || s > 60 { + return LocalizableError("hour/min/sec out of range") + } + str = str[8:] + + // parse sec-frac if present + if rem, ok := strings.CutPrefix(str, "."); ok { + numDigits := 0 + for _, ch := range rem { + if ch >= '0' && ch <= '9' { + numDigits++ + } else { + break + } + } + if numDigits == 0 { + return LocalizableError("no digits in second fraction") + } + str = rem[numDigits:] + } + + if str != "z" && str != "Z" { + // parse time-numoffset + if len(str) != 6 { + return LocalizableError("offset must be 6 characters long") + } + var sign int + switch str[0] { + case '+': + sign = -1 + case '-': + sign = +1 + default: + return LocalizableError("offset must begin with plus/minus") + } + str = str[1:] + if str[2] != ':' { + return LocalizableError("missing colon in offset in correct place") + } + + var zhm []int + for _, tok := range strings.SplitN(str, ":", 2) { + i, err := strconv.Atoi(tok) + if err != nil { + return LocalizableError("invalid hour/min in offset") + } + if i < 0 { + return LocalizableError("non-positive hour/min in offset") + } + zhm = append(zhm, i) + } + zh, zm := zhm[0], zhm[1] + if zh > 23 || zm > 59 { + return LocalizableError("hour/min in offset out of range") + } + + // apply timezone + hm := (h*60 + m) + sign*(zh*60+zm) + if hm < 0 { + hm += 24 * 60 + } + h, m = hm/60, hm%60 + } + + // check leap second + if s >= 60 && (h != 23 || m != 59) { + return LocalizableError("invalid leap second") + } + + return nil +} + +// see https://datatracker.ietf.org/doc/html/rfc3339#section-5.6 +func validateDateTime(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + + // min: yyyy-mm-ddThh:mm:ssZ + if len(s) < 20 { + return LocalizableError("less than 20 characters long") + } + + if s[10] != 't' && s[10] != 'T' { + return LocalizableError("11th character must be t or T") + } + if err := validateDate(s[:10]); err != nil { + return LocalizableError("invalid date element: %v", err) + } + if err := validateTime(s[11:]); err != nil { + return LocalizableError("invalid time element: %v", err) + } + return nil +} + +func parseURL(s string) (*gourl.URL, error) { + u, err := gourl.Parse(s) + if err != nil { + return nil, err + } + + // gourl does not validate ipv6 host address + hostName := u.Hostname() + if strings.Contains(hostName, ":") { + if !strings.Contains(u.Host, "[") || !strings.Contains(u.Host, "]") { + return nil, LocalizableError("ipv6 address not enclosed in brackets") + } + if err := validateIPV6(hostName); err != nil { + return nil, LocalizableError("invalid ipv6 address: %v", err) + } + } + + return u, nil +} + +func validateURI(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + u, err := parseURL(s) + if err != nil { + return err + } + if !u.IsAbs() { + return LocalizableError("relative url") + } + return nil +} + +func validateURIReference(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + if strings.Contains(s, `\`) { + return LocalizableError(`contains \`) + } + _, err := parseURL(s) + return err +} + +func validateURITemplate(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + u, err := parseURL(s) + if err != nil { + return err + } + for _, tok := range strings.Split(u.RawPath, "/") { + tok, err = decode(tok) + if err != nil { + return LocalizableError("percent decode failed: %v", err) + } + want := true + for _, ch := range tok { + var got bool + switch ch { + case '{': + got = true + case '}': + got = false + default: + continue + } + if got != want { + return LocalizableError("nested curly braces") + } + want = !want + } + if !want { + return LocalizableError("no matching closing brace") + } + } + return nil +} + +func validatePeriod(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + + slash := strings.IndexByte(s, '/') + if slash == -1 { + return LocalizableError("missing slash") + } + + start, end := s[:slash], s[slash+1:] + if strings.HasPrefix(start, "P") { + if err := validateDuration(start); err != nil { + return LocalizableError("invalid start duration: %v", err) + } + if err := validateDateTime(end); err != nil { + return LocalizableError("invalid end date-time: %v", err) + } + } else { + if err := validateDateTime(start); err != nil { + return LocalizableError("invalid start date-time: %v", err) + } + if strings.HasPrefix(end, "P") { + if err := validateDuration(end); err != nil { + return LocalizableError("invalid end duration: %v", err) + } + } else if err := validateDateTime(end); err != nil { + return LocalizableError("invalid end date-time: %v", err) + } + } + + return nil +} + +// see https://semver.org/#backusnaur-form-grammar-for-valid-semver-versions +func validateSemver(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + + // build -- + if i := strings.IndexByte(s, '+'); i != -1 { + build := s[i+1:] + if build == "" { + return LocalizableError("build is empty") + } + for _, buildID := range strings.Split(build, ".") { + if buildID == "" { + return LocalizableError("build identifier is empty") + } + for _, ch := range buildID { + switch { + case ch >= '0' && ch <= '9': + case (ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z') || ch == '-': + default: + return LocalizableError("invalid character %q in build identifier", ch) + } + } + } + s = s[:i] + } + + // pre-release -- + if i := strings.IndexByte(s, '-'); i != -1 { + preRelease := s[i+1:] + for _, preReleaseID := range strings.Split(preRelease, ".") { + if preReleaseID == "" { + return LocalizableError("pre-release identifier is empty") + } + allDigits := true + for _, ch := range preReleaseID { + switch { + case ch >= '0' && ch <= '9': + case (ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z') || ch == '-': + allDigits = false + default: + return LocalizableError("invalid character %q in pre-release identifier", ch) + } + } + if allDigits && len(preReleaseID) > 1 && preReleaseID[0] == '0' { + return LocalizableError("pre-release numeric identifier starts with zero") + } + } + s = s[:i] + } + + // versionCore -- + versions := strings.Split(s, ".") + if len(versions) != 3 { + return LocalizableError("versionCore must have 3 numbers separated by dot") + } + names := []string{"major", "minor", "patch"} + for i, version := range versions { + if version == "" { + return LocalizableError("%s is empty", names[i]) + } + if len(version) > 1 && version[0] == '0' { + return LocalizableError("%s starts with zero", names[i]) + } + for _, ch := range version { + if ch < '0' || ch > '9' { + return LocalizableError("%s contains non-digit", names[i]) + } + } + } + + return nil +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/go.work b/vendor/github.com/santhosh-tekuri/jsonschema/v6/go.work new file mode 100644 index 0000000000..e7f4d93de4 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/go.work @@ -0,0 +1,8 @@ +go 1.21.1 + +use ( + . + ./cmd/jv +) + +// replace github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 => ./ diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/go.work.sum b/vendor/github.com/santhosh-tekuri/jsonschema/v6/go.work.sum new file mode 100644 index 0000000000..2b5b811d99 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/go.work.sum @@ -0,0 +1,4 @@ +github.com/santhosh-tekuri/jsonschema/v6 v6.0.1/go.mod h1:JXeL+ps8p7/KNMjDQk3TCwPpBy0wYklyWTfbkIzdIFU= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/kind/kind.go b/vendor/github.com/santhosh-tekuri/jsonschema/v6/kind/kind.go new file mode 100644 index 0000000000..a37fb0b978 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/kind/kind.go @@ -0,0 +1,651 @@ +package kind + +import ( + "fmt" + "math/big" + "strings" + + "golang.org/x/text/message" +) + +// -- + +type InvalidJsonValue struct { + Value any +} + +func (*InvalidJsonValue) KeywordPath() []string { + return nil +} + +func (k *InvalidJsonValue) LocalizedString(p *message.Printer) string { + return p.Sprintf("invalid jsonType %T", k.Value) +} + +// -- + +type Schema struct { + Location string +} + +func (*Schema) KeywordPath() []string { + return nil +} + +func (k *Schema) LocalizedString(p *message.Printer) string { + return p.Sprintf("jsonschema validation failed with %s", quote(k.Location)) +} + +// -- + +type Group struct{} + +func (*Group) KeywordPath() []string { + return nil +} + +func (*Group) LocalizedString(p *message.Printer) string { + return p.Sprintf("validation failed") +} + +// -- + +type Not struct{} + +func (*Not) KeywordPath() []string { + return nil +} + +func (*Not) LocalizedString(p *message.Printer) string { + return p.Sprintf("'not' failed") +} + +// -- + +type AllOf struct{} + +func (*AllOf) KeywordPath() []string { + return []string{"allOf"} +} + +func (*AllOf) LocalizedString(p *message.Printer) string { + return p.Sprintf("'allOf' failed") +} + +// -- + +type AnyOf struct{} + +func (*AnyOf) KeywordPath() []string { + return []string{"anyOf"} +} + +func (*AnyOf) LocalizedString(p *message.Printer) string { + return p.Sprintf("'anyOf' failed") +} + +// -- + +type OneOf struct { + // Subschemas gives indexes of Subschemas that have matched. + // Value nil, means none of the subschemas matched. + Subschemas []int +} + +func (*OneOf) KeywordPath() []string { + return []string{"oneOf"} +} + +func (k *OneOf) LocalizedString(p *message.Printer) string { + if len(k.Subschemas) == 0 { + return p.Sprintf("'oneOf' failed, none matched") + } + return p.Sprintf("'oneOf' failed, subschemas %d, %d matched", k.Subschemas[0], k.Subschemas[1]) +} + +//-- + +type FalseSchema struct{} + +func (*FalseSchema) KeywordPath() []string { + return nil +} + +func (*FalseSchema) LocalizedString(p *message.Printer) string { + return p.Sprintf("false schema") +} + +// -- + +type RefCycle struct { + URL string + KeywordLocation1 string + KeywordLocation2 string +} + +func (*RefCycle) KeywordPath() []string { + return nil +} + +func (k *RefCycle) LocalizedString(p *message.Printer) string { + return p.Sprintf("both %s and %s resolve to %q causing reference cycle", k.KeywordLocation1, k.KeywordLocation2, k.URL) +} + +// -- + +type Type struct { + Got string + Want []string +} + +func (*Type) KeywordPath() []string { + return []string{"type"} +} + +func (k *Type) LocalizedString(p *message.Printer) string { + want := strings.Join(k.Want, " or ") + return p.Sprintf("got %s, want %s", k.Got, want) +} + +// -- + +type Enum struct { + Got any + Want []any +} + +// KeywordPath implements jsonschema.ErrorKind. +func (*Enum) KeywordPath() []string { + return []string{"enum"} +} + +func (k *Enum) LocalizedString(p *message.Printer) string { + allPrimitive := true +loop: + for _, item := range k.Want { + switch item.(type) { + case []any, map[string]any: + allPrimitive = false + break loop + } + } + if allPrimitive { + if len(k.Want) == 1 { + return p.Sprintf("value must be %s", display(k.Want[0])) + } + var want []string + for _, v := range k.Want { + want = append(want, display(v)) + } + return p.Sprintf("value must be one of %s", strings.Join(want, ", ")) + } + return p.Sprintf("'enum' failed") +} + +// -- + +type Const struct { + Got any + Want any +} + +func (*Const) KeywordPath() []string { + return []string{"const"} +} + +func (k *Const) LocalizedString(p *message.Printer) string { + switch want := k.Want.(type) { + case []any, map[string]any: + return p.Sprintf("'const' failed") + default: + return p.Sprintf("value must be %s", display(want)) + } +} + +// -- + +type Format struct { + Got any + Want string + Err error +} + +func (*Format) KeywordPath() []string { + return []string{"format"} +} + +func (k *Format) LocalizedString(p *message.Printer) string { + return p.Sprintf("%s is not valid %s: %v", display(k.Got), k.Want, localizedError(k.Err, p)) +} + +// -- + +type Reference struct { + Keyword string + URL string +} + +func (k *Reference) KeywordPath() []string { + return []string{k.Keyword} +} + +func (*Reference) LocalizedString(p *message.Printer) string { + return p.Sprintf("validation failed") +} + +// -- + +type MinProperties struct { + Got, Want int +} + +func (*MinProperties) KeywordPath() []string { + return []string{"minProperties"} +} + +func (k *MinProperties) LocalizedString(p *message.Printer) string { + return p.Sprintf("minProperties: got %d, want %d", k.Got, k.Want) +} + +// -- + +type MaxProperties struct { + Got, Want int +} + +func (*MaxProperties) KeywordPath() []string { + return []string{"maxProperties"} +} + +func (k *MaxProperties) LocalizedString(p *message.Printer) string { + return p.Sprintf("maxProperties: got %d, want %d", k.Got, k.Want) +} + +// -- + +type MinItems struct { + Got, Want int +} + +func (*MinItems) KeywordPath() []string { + return []string{"minItems"} +} + +func (k *MinItems) LocalizedString(p *message.Printer) string { + return p.Sprintf("minItems: got %d, want %d", k.Got, k.Want) +} + +// -- + +type MaxItems struct { + Got, Want int +} + +func (*MaxItems) KeywordPath() []string { + return []string{"maxItems"} +} + +func (k *MaxItems) LocalizedString(p *message.Printer) string { + return p.Sprintf("maxItems: got %d, want %d", k.Got, k.Want) +} + +// -- + +type AdditionalItems struct { + Count int +} + +func (*AdditionalItems) KeywordPath() []string { + return []string{"additionalItems"} +} + +func (k *AdditionalItems) LocalizedString(p *message.Printer) string { + return p.Sprintf("last %d additionalItem(s) not allowed", k.Count) +} + +// -- + +type Required struct { + Missing []string +} + +func (*Required) KeywordPath() []string { + return []string{"required"} +} + +func (k *Required) LocalizedString(p *message.Printer) string { + if len(k.Missing) == 1 { + return p.Sprintf("missing property %s", quote(k.Missing[0])) + } + return p.Sprintf("missing properties %s", joinQuoted(k.Missing, ", ")) +} + +// -- + +type Dependency struct { + Prop string // dependency of prop that failed + Missing []string // missing props +} + +func (k *Dependency) KeywordPath() []string { + return []string{"dependency", k.Prop} +} + +func (k *Dependency) LocalizedString(p *message.Printer) string { + return p.Sprintf("properties %s required, if %s exists", joinQuoted(k.Missing, ", "), quote(k.Prop)) +} + +// -- + +type DependentRequired struct { + Prop string // dependency of prop that failed + Missing []string // missing props +} + +func (k *DependentRequired) KeywordPath() []string { + return []string{"dependentRequired", k.Prop} +} + +func (k *DependentRequired) LocalizedString(p *message.Printer) string { + return p.Sprintf("properties %s required, if %s exists", joinQuoted(k.Missing, ", "), quote(k.Prop)) +} + +// -- + +type AdditionalProperties struct { + Properties []string +} + +func (*AdditionalProperties) KeywordPath() []string { + return []string{"additionalProperties"} +} + +func (k *AdditionalProperties) LocalizedString(p *message.Printer) string { + return p.Sprintf("additional properties %s not allowed", joinQuoted(k.Properties, ", ")) +} + +// -- + +type PropertyNames struct { + Property string +} + +func (*PropertyNames) KeywordPath() []string { + return []string{"propertyNames"} +} + +func (k *PropertyNames) LocalizedString(p *message.Printer) string { + return p.Sprintf("invalid propertyName %s", quote(k.Property)) +} + +// -- + +type UniqueItems struct { + Duplicates [2]int +} + +func (*UniqueItems) KeywordPath() []string { + return []string{"uniqueItems"} +} + +func (k *UniqueItems) LocalizedString(p *message.Printer) string { + return p.Sprintf("items at %d and %d are equal", k.Duplicates[0], k.Duplicates[1]) +} + +// -- + +type Contains struct{} + +func (*Contains) KeywordPath() []string { + return []string{"contains"} +} + +func (*Contains) LocalizedString(p *message.Printer) string { + return p.Sprintf("no items match contains schema") +} + +// -- + +type MinContains struct { + Got []int + Want int +} + +func (*MinContains) KeywordPath() []string { + return []string{"minContains"} +} + +func (k *MinContains) LocalizedString(p *message.Printer) string { + if len(k.Got) == 0 { + return p.Sprintf("min %d items required to match contains schema, but none matched", k.Want) + } else { + got := fmt.Sprintf("%v", k.Got) + return p.Sprintf("min %d items required to match contains schema, but matched %d items at %v", k.Want, len(k.Got), got[1:len(got)-1]) + } +} + +// -- + +type MaxContains struct { + Got []int + Want int +} + +func (*MaxContains) KeywordPath() []string { + return []string{"maxContains"} +} + +func (k *MaxContains) LocalizedString(p *message.Printer) string { + got := fmt.Sprintf("%v", k.Got) + return p.Sprintf("max %d items required to match contains schema, but matched %d items at %v", k.Want, len(k.Got), got[1:len(got)-1]) +} + +// -- + +type MinLength struct { + Got, Want int +} + +func (*MinLength) KeywordPath() []string { + return []string{"minLength"} +} + +func (k *MinLength) LocalizedString(p *message.Printer) string { + return p.Sprintf("minLength: got %d, want %d", k.Got, k.Want) +} + +// -- + +type MaxLength struct { + Got, Want int +} + +func (*MaxLength) KeywordPath() []string { + return []string{"maxLength"} +} + +func (k *MaxLength) LocalizedString(p *message.Printer) string { + return p.Sprintf("maxLength: got %d, want %d", k.Got, k.Want) +} + +// -- + +type Pattern struct { + Got string + Want string +} + +func (*Pattern) KeywordPath() []string { + return []string{"pattern"} +} + +func (k *Pattern) LocalizedString(p *message.Printer) string { + return p.Sprintf("%s does not match pattern %s", quote(k.Got), quote(k.Want)) +} + +// -- + +type ContentEncoding struct { + Want string + Err error +} + +func (*ContentEncoding) KeywordPath() []string { + return []string{"contentEncoding"} +} + +func (k *ContentEncoding) LocalizedString(p *message.Printer) string { + return p.Sprintf("value is not %s encoded: %v", quote(k.Want), localizedError(k.Err, p)) +} + +// -- + +type ContentMediaType struct { + Got []byte + Want string + Err error +} + +func (*ContentMediaType) KeywordPath() []string { + return []string{"contentMediaType"} +} + +func (k *ContentMediaType) LocalizedString(p *message.Printer) string { + return p.Sprintf("value if not of mediatype %s: %v", quote(k.Want), k.Err) +} + +// -- + +type ContentSchema struct{} + +func (*ContentSchema) KeywordPath() []string { + return []string{"contentSchema"} +} + +func (*ContentSchema) LocalizedString(p *message.Printer) string { + return p.Sprintf("'contentSchema' failed") +} + +// -- + +type Minimum struct { + Got *big.Rat + Want *big.Rat +} + +func (*Minimum) KeywordPath() []string { + return []string{"minimum"} +} + +func (k *Minimum) LocalizedString(p *message.Printer) string { + got, _ := k.Got.Float64() + want, _ := k.Want.Float64() + return p.Sprintf("minimum: got %v, want %v", got, want) +} + +// -- + +type Maximum struct { + Got *big.Rat + Want *big.Rat +} + +func (*Maximum) KeywordPath() []string { + return []string{"maximum"} +} + +func (k *Maximum) LocalizedString(p *message.Printer) string { + got, _ := k.Got.Float64() + want, _ := k.Want.Float64() + return p.Sprintf("maximum: got %v, want %v", got, want) +} + +// -- + +type ExclusiveMinimum struct { + Got *big.Rat + Want *big.Rat +} + +func (*ExclusiveMinimum) KeywordPath() []string { + return []string{"exclusiveMinimum"} +} + +func (k *ExclusiveMinimum) LocalizedString(p *message.Printer) string { + got, _ := k.Got.Float64() + want, _ := k.Want.Float64() + return p.Sprintf("exclusiveMinimum: got %v, want %v", got, want) +} + +// -- + +type ExclusiveMaximum struct { + Got *big.Rat + Want *big.Rat +} + +func (*ExclusiveMaximum) KeywordPath() []string { + return []string{"exclusiveMaximum"} +} + +func (k *ExclusiveMaximum) LocalizedString(p *message.Printer) string { + got, _ := k.Got.Float64() + want, _ := k.Want.Float64() + return p.Sprintf("exclusiveMaximum: got %v, want %v", got, want) +} + +// -- + +type MultipleOf struct { + Got *big.Rat + Want *big.Rat +} + +func (*MultipleOf) KeywordPath() []string { + return []string{"multipleOf"} +} + +func (k *MultipleOf) LocalizedString(p *message.Printer) string { + got, _ := k.Got.Float64() + want, _ := k.Want.Float64() + return p.Sprintf("multipleOf: got %v, want %v", got, want) +} + +// -- + +func quote(s string) string { + s = fmt.Sprintf("%q", s) + s = strings.ReplaceAll(s, `\"`, `"`) + s = strings.ReplaceAll(s, `'`, `\'`) + return "'" + s[1:len(s)-1] + "'" +} + +func joinQuoted(arr []string, sep string) string { + var sb strings.Builder + for _, s := range arr { + if sb.Len() > 0 { + sb.WriteString(sep) + } + sb.WriteString(quote(s)) + } + return sb.String() +} + +// to be used only for primitive. +func display(v any) string { + switch v := v.(type) { + case string: + return quote(v) + case []any, map[string]any: + return "value" + default: + return fmt.Sprintf("%v", v) + } +} + +func localizedError(err error, p *message.Printer) string { + if err, ok := err.(interface{ LocalizedError(*message.Printer) string }); ok { + return err.LocalizedError(p) + } + return err.Error() +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/loader.go b/vendor/github.com/santhosh-tekuri/jsonschema/v6/loader.go new file mode 100644 index 0000000000..ce0170e20a --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/loader.go @@ -0,0 +1,266 @@ +package jsonschema + +import ( + "embed" + "encoding/json" + "errors" + "fmt" + "io" + "io/fs" + gourl "net/url" + "os" + "path/filepath" + "runtime" + "strings" +) + +// URLLoader knows how to load json from given url. +type URLLoader interface { + // Load loads json from given absolute url. + Load(url string) (any, error) +} + +// -- + +// FileLoader loads json file url. +type FileLoader struct{} + +func (l FileLoader) Load(url string) (any, error) { + path, err := l.ToFile(url) + if err != nil { + return nil, err + } + f, err := os.Open(path) + if err != nil { + return nil, err + } + defer f.Close() + return UnmarshalJSON(f) +} + +// ToFile is helper method to convert file url to file path. +func (l FileLoader) ToFile(url string) (string, error) { + u, err := gourl.Parse(url) + if err != nil { + return "", err + } + if u.Scheme != "file" { + return "", fmt.Errorf("invalid file url: %s", u) + } + path := u.Path + if runtime.GOOS == "windows" { + path = strings.TrimPrefix(path, "/") + path = filepath.FromSlash(path) + } + return path, nil +} + +// -- + +// SchemeURLLoader delegates to other [URLLoaders] +// based on url scheme. +type SchemeURLLoader map[string]URLLoader + +func (l SchemeURLLoader) Load(url string) (any, error) { + u, err := gourl.Parse(url) + if err != nil { + return nil, err + } + ll, ok := l[u.Scheme] + if !ok { + return nil, &UnsupportedURLSchemeError{u.String()} + } + return ll.Load(url) +} + +// -- + +//go:embed metaschemas +var metaFS embed.FS + +func openMeta(url string) (fs.File, error) { + u, meta := strings.CutPrefix(url, "http://json-schema.org/") + if !meta { + u, meta = strings.CutPrefix(url, "https://json-schema.org/") + } + if meta { + if u == "schema" { + return openMeta(draftLatest.url) + } + f, err := metaFS.Open("metaschemas/" + u) + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + return nil, nil + } + return nil, err + } + return f, err + } + return nil, nil + +} + +func isMeta(url string) bool { + f, err := openMeta(url) + if err != nil { + return true + } + if f != nil { + f.Close() + return true + } + return false +} + +func loadMeta(url string) (any, error) { + f, err := openMeta(url) + if err != nil { + return nil, err + } + if f == nil { + return nil, nil + } + defer f.Close() + return UnmarshalJSON(f) +} + +// -- + +type defaultLoader struct { + docs map[url]any // docs loaded so far + loader URLLoader +} + +func (l *defaultLoader) add(url url, doc any) bool { + if _, ok := l.docs[url]; ok { + return false + } + l.docs[url] = doc + return true +} + +func (l *defaultLoader) load(url url) (any, error) { + if doc, ok := l.docs[url]; ok { + return doc, nil + } + doc, err := loadMeta(url.String()) + if err != nil { + return nil, err + } + if doc != nil { + l.add(url, doc) + return doc, nil + } + if l.loader == nil { + return nil, &LoadURLError{url.String(), errors.New("no URLLoader set")} + } + doc, err = l.loader.Load(url.String()) + if err != nil { + return nil, &LoadURLError{URL: url.String(), Err: err} + } + l.add(url, doc) + return doc, nil +} + +func (l *defaultLoader) getDraft(up urlPtr, doc any, defaultDraft *Draft, cycle map[url]struct{}) (*Draft, error) { + obj, ok := doc.(map[string]any) + if !ok { + return defaultDraft, nil + } + sch, ok := strVal(obj, "$schema") + if !ok { + return defaultDraft, nil + } + if draft := draftFromURL(sch); draft != nil { + return draft, nil + } + sch, _ = split(sch) + if _, err := gourl.Parse(sch); err != nil { + return nil, &InvalidMetaSchemaURLError{up.String(), err} + } + schUrl := url(sch) + if up.ptr.isEmpty() && schUrl == up.url { + return nil, &UnsupportedDraftError{schUrl.String()} + } + if _, ok := cycle[schUrl]; ok { + return nil, &MetaSchemaCycleError{schUrl.String()} + } + cycle[schUrl] = struct{}{} + doc, err := l.load(schUrl) + if err != nil { + return nil, err + } + return l.getDraft(urlPtr{schUrl, ""}, doc, defaultDraft, cycle) +} + +func (l *defaultLoader) getMetaVocabs(doc any, draft *Draft, vocabularies map[string]*Vocabulary) ([]string, error) { + obj, ok := doc.(map[string]any) + if !ok { + return nil, nil + } + sch, ok := strVal(obj, "$schema") + if !ok { + return nil, nil + } + if draft := draftFromURL(sch); draft != nil { + return nil, nil + } + sch, _ = split(sch) + if _, err := gourl.Parse(sch); err != nil { + return nil, &ParseURLError{sch, err} + } + schUrl := url(sch) + doc, err := l.load(schUrl) + if err != nil { + return nil, err + } + return draft.getVocabs(schUrl, doc, vocabularies) +} + +// -- + +type LoadURLError struct { + URL string + Err error +} + +func (e *LoadURLError) Error() string { + return fmt.Sprintf("failing loading %q: %v", e.URL, e.Err) +} + +// -- + +type UnsupportedURLSchemeError struct { + url string +} + +func (e *UnsupportedURLSchemeError) Error() string { + return fmt.Sprintf("no URLLoader registered for %q", e.url) +} + +// -- + +type ResourceExistsError struct { + url string +} + +func (e *ResourceExistsError) Error() string { + return fmt.Sprintf("resource for %q already exists", e.url) +} + +// -- + +// UnmarshalJSON unmarshals into [any] without losing +// number precision using [json.Number]. +func UnmarshalJSON(r io.Reader) (any, error) { + decoder := json.NewDecoder(r) + decoder.UseNumber() + var doc any + if err := decoder.Decode(&doc); err != nil { + return nil, err + } + if _, err := decoder.Token(); err == nil || err != io.EOF { + return nil, fmt.Errorf("invalid character after top-level value") + } + return doc, nil +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft-04/schema b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft-04/schema new file mode 100644 index 0000000000..b2a7ff0f54 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft-04/schema @@ -0,0 +1,151 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "description": "Core schema meta-schema", + "definitions": { + "schemaArray": { + "type": "array", + "minItems": 1, + "items": { "$ref": "#" } + }, + "positiveInteger": { + "type": "integer", + "minimum": 0 + }, + "positiveIntegerDefault0": { + "allOf": [ { "$ref": "#/definitions/positiveInteger" }, { "default": 0 } ] + }, + "simpleTypes": { + "enum": [ "array", "boolean", "integer", "null", "number", "object", "string" ] + }, + "stringArray": { + "type": "array", + "items": { "type": "string" }, + "minItems": 1, + "uniqueItems": true + } + }, + "type": "object", + "properties": { + "id": { + "type": "string", + "format": "uriref" + }, + "$schema": { + "type": "string", + "format": "uri" + }, + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "default": {}, + "multipleOf": { + "type": "number", + "minimum": 0, + "exclusiveMinimum": true + }, + "maximum": { + "type": "number" + }, + "exclusiveMaximum": { + "type": "boolean", + "default": false + }, + "minimum": { + "type": "number" + }, + "exclusiveMinimum": { + "type": "boolean", + "default": false + }, + "maxLength": { "$ref": "#/definitions/positiveInteger" }, + "minLength": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "pattern": { + "type": "string", + "format": "regex" + }, + "additionalItems": { + "anyOf": [ + { "type": "boolean" }, + { "$ref": "#" } + ], + "default": {} + }, + "items": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/schemaArray" } + ], + "default": {} + }, + "maxItems": { "$ref": "#/definitions/positiveInteger" }, + "minItems": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "uniqueItems": { + "type": "boolean", + "default": false + }, + "maxProperties": { "$ref": "#/definitions/positiveInteger" }, + "minProperties": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "required": { "$ref": "#/definitions/stringArray" }, + "additionalProperties": { + "anyOf": [ + { "type": "boolean" }, + { "$ref": "#" } + ], + "default": {} + }, + "definitions": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "properties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "patternProperties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "dependencies": { + "type": "object", + "additionalProperties": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/stringArray" } + ] + } + }, + "enum": { + "type": "array", + "minItems": 1, + "uniqueItems": true + }, + "type": { + "anyOf": [ + { "$ref": "#/definitions/simpleTypes" }, + { + "type": "array", + "items": { "$ref": "#/definitions/simpleTypes" }, + "minItems": 1, + "uniqueItems": true + } + ] + }, + "allOf": { "$ref": "#/definitions/schemaArray" }, + "anyOf": { "$ref": "#/definitions/schemaArray" }, + "oneOf": { "$ref": "#/definitions/schemaArray" }, + "not": { "$ref": "#" }, + "format": { "type": "string" }, + "$ref": { "type": "string" } + }, + "dependencies": { + "exclusiveMaximum": [ "maximum" ], + "exclusiveMinimum": [ "minimum" ] + }, + "default": {} +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft-06/schema b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft-06/schema new file mode 100644 index 0000000000..fa22ad1b06 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft-06/schema @@ -0,0 +1,150 @@ +{ + "$schema": "http://json-schema.org/draft-06/schema#", + "$id": "http://json-schema.org/draft-06/schema#", + "title": "Core schema meta-schema", + "definitions": { + "schemaArray": { + "type": "array", + "minItems": 1, + "items": { "$ref": "#" } + }, + "nonNegativeInteger": { + "type": "integer", + "minimum": 0 + }, + "nonNegativeIntegerDefault0": { + "allOf": [ + { "$ref": "#/definitions/nonNegativeInteger" }, + { "default": 0 } + ] + }, + "simpleTypes": { + "enum": [ + "array", + "boolean", + "integer", + "null", + "number", + "object", + "string" + ] + }, + "stringArray": { + "type": "array", + "items": { "type": "string" }, + "uniqueItems": true, + "default": [] + } + }, + "type": ["object", "boolean"], + "properties": { + "$id": { + "type": "string", + "format": "uri-reference" + }, + "$schema": { + "type": "string", + "format": "uri" + }, + "$ref": { + "type": "string", + "format": "uri-reference" + }, + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "default": {}, + "multipleOf": { + "type": "number", + "exclusiveMinimum": 0 + }, + "maximum": { + "type": "number" + }, + "exclusiveMaximum": { + "type": "number" + }, + "minimum": { + "type": "number" + }, + "exclusiveMinimum": { + "type": "number" + }, + "maxLength": { "$ref": "#/definitions/nonNegativeInteger" }, + "minLength": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, + "pattern": { + "type": "string", + "format": "regex" + }, + "additionalItems": { "$ref": "#" }, + "items": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/schemaArray" } + ], + "default": {} + }, + "maxItems": { "$ref": "#/definitions/nonNegativeInteger" }, + "minItems": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, + "uniqueItems": { + "type": "boolean", + "default": false + }, + "contains": { "$ref": "#" }, + "maxProperties": { "$ref": "#/definitions/nonNegativeInteger" }, + "minProperties": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, + "required": { "$ref": "#/definitions/stringArray" }, + "additionalProperties": { "$ref": "#" }, + "definitions": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "properties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "patternProperties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "dependencies": { + "type": "object", + "additionalProperties": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/stringArray" } + ] + } + }, + "propertyNames": { "$ref": "#" }, + "const": {}, + "enum": { + "type": "array", + "minItems": 1, + "uniqueItems": true + }, + "type": { + "anyOf": [ + { "$ref": "#/definitions/simpleTypes" }, + { + "type": "array", + "items": { "$ref": "#/definitions/simpleTypes" }, + "minItems": 1, + "uniqueItems": true + } + ] + }, + "format": { "type": "string" }, + "allOf": { "$ref": "#/definitions/schemaArray" }, + "anyOf": { "$ref": "#/definitions/schemaArray" }, + "oneOf": { "$ref": "#/definitions/schemaArray" }, + "not": { "$ref": "#" } + }, + "default": {} +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft-07/schema b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft-07/schema new file mode 100644 index 0000000000..326759a622 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft-07/schema @@ -0,0 +1,172 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "$id": "http://json-schema.org/draft-07/schema#", + "title": "Core schema meta-schema", + "definitions": { + "schemaArray": { + "type": "array", + "minItems": 1, + "items": { "$ref": "#" } + }, + "nonNegativeInteger": { + "type": "integer", + "minimum": 0 + }, + "nonNegativeIntegerDefault0": { + "allOf": [ + { "$ref": "#/definitions/nonNegativeInteger" }, + { "default": 0 } + ] + }, + "simpleTypes": { + "enum": [ + "array", + "boolean", + "integer", + "null", + "number", + "object", + "string" + ] + }, + "stringArray": { + "type": "array", + "items": { "type": "string" }, + "uniqueItems": true, + "default": [] + } + }, + "type": ["object", "boolean"], + "properties": { + "$id": { + "type": "string", + "format": "uri-reference" + }, + "$schema": { + "type": "string", + "format": "uri" + }, + "$ref": { + "type": "string", + "format": "uri-reference" + }, + "$comment": { + "type": "string" + }, + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "default": true, + "readOnly": { + "type": "boolean", + "default": false + }, + "writeOnly": { + "type": "boolean", + "default": false + }, + "examples": { + "type": "array", + "items": true + }, + "multipleOf": { + "type": "number", + "exclusiveMinimum": 0 + }, + "maximum": { + "type": "number" + }, + "exclusiveMaximum": { + "type": "number" + }, + "minimum": { + "type": "number" + }, + "exclusiveMinimum": { + "type": "number" + }, + "maxLength": { "$ref": "#/definitions/nonNegativeInteger" }, + "minLength": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, + "pattern": { + "type": "string", + "format": "regex" + }, + "additionalItems": { "$ref": "#" }, + "items": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/schemaArray" } + ], + "default": true + }, + "maxItems": { "$ref": "#/definitions/nonNegativeInteger" }, + "minItems": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, + "uniqueItems": { + "type": "boolean", + "default": false + }, + "contains": { "$ref": "#" }, + "maxProperties": { "$ref": "#/definitions/nonNegativeInteger" }, + "minProperties": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, + "required": { "$ref": "#/definitions/stringArray" }, + "additionalProperties": { "$ref": "#" }, + "definitions": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "properties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "patternProperties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "propertyNames": { "format": "regex" }, + "default": {} + }, + "dependencies": { + "type": "object", + "additionalProperties": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/stringArray" } + ] + } + }, + "propertyNames": { "$ref": "#" }, + "const": true, + "enum": { + "type": "array", + "items": true, + "minItems": 1, + "uniqueItems": true + }, + "type": { + "anyOf": [ + { "$ref": "#/definitions/simpleTypes" }, + { + "type": "array", + "items": { "$ref": "#/definitions/simpleTypes" }, + "minItems": 1, + "uniqueItems": true + } + ] + }, + "format": { "type": "string" }, + "contentMediaType": { "type": "string" }, + "contentEncoding": { "type": "string" }, + "if": { "$ref": "#" }, + "then": { "$ref": "#" }, + "else": { "$ref": "#" }, + "allOf": { "$ref": "#/definitions/schemaArray" }, + "anyOf": { "$ref": "#/definitions/schemaArray" }, + "oneOf": { "$ref": "#/definitions/schemaArray" }, + "not": { "$ref": "#" } + }, + "default": true +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/applicator b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/applicator new file mode 100644 index 0000000000..857d2d4955 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/applicator @@ -0,0 +1,55 @@ +{ + "$schema": "https://json-schema.org/draft/2019-09/schema", + "$id": "https://json-schema.org/draft/2019-09/meta/applicator", + "$vocabulary": { + "https://json-schema.org/draft/2019-09/vocab/applicator": true + }, + "$recursiveAnchor": true, + "title": "Applicator vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "additionalItems": { "$recursiveRef": "#" }, + "unevaluatedItems": { "$recursiveRef": "#" }, + "items": { + "anyOf": [ + { "$recursiveRef": "#" }, + { "$ref": "#/$defs/schemaArray" } + ] + }, + "contains": { "$recursiveRef": "#" }, + "additionalProperties": { "$recursiveRef": "#" }, + "unevaluatedProperties": { "$recursiveRef": "#" }, + "properties": { + "type": "object", + "additionalProperties": { "$recursiveRef": "#" }, + "default": {} + }, + "patternProperties": { + "type": "object", + "additionalProperties": { "$recursiveRef": "#" }, + "propertyNames": { "format": "regex" }, + "default": {} + }, + "dependentSchemas": { + "type": "object", + "additionalProperties": { + "$recursiveRef": "#" + } + }, + "propertyNames": { "$recursiveRef": "#" }, + "if": { "$recursiveRef": "#" }, + "then": { "$recursiveRef": "#" }, + "else": { "$recursiveRef": "#" }, + "allOf": { "$ref": "#/$defs/schemaArray" }, + "anyOf": { "$ref": "#/$defs/schemaArray" }, + "oneOf": { "$ref": "#/$defs/schemaArray" }, + "not": { "$recursiveRef": "#" } + }, + "$defs": { + "schemaArray": { + "type": "array", + "minItems": 1, + "items": { "$recursiveRef": "#" } + } + } +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/content b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/content new file mode 100644 index 0000000000..fa5d20b8d6 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/content @@ -0,0 +1,15 @@ +{ + "$schema": "https://json-schema.org/draft/2019-09/schema", + "$id": "https://json-schema.org/draft/2019-09/meta/content", + "$vocabulary": { + "https://json-schema.org/draft/2019-09/vocab/content": true + }, + "$recursiveAnchor": true, + "title": "Content vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "contentMediaType": { "type": "string" }, + "contentEncoding": { "type": "string" }, + "contentSchema": { "$recursiveRef": "#" } + } +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/core b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/core new file mode 100644 index 0000000000..bf5731985d --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/core @@ -0,0 +1,56 @@ +{ + "$schema": "https://json-schema.org/draft/2019-09/schema", + "$id": "https://json-schema.org/draft/2019-09/meta/core", + "$vocabulary": { + "https://json-schema.org/draft/2019-09/vocab/core": true + }, + "$recursiveAnchor": true, + "title": "Core vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "$id": { + "type": "string", + "format": "uri-reference", + "$comment": "Non-empty fragments not allowed.", + "pattern": "^[^#]*#?$" + }, + "$schema": { + "type": "string", + "format": "uri" + }, + "$anchor": { + "type": "string", + "pattern": "^[A-Za-z][-A-Za-z0-9.:_]*$" + }, + "$ref": { + "type": "string", + "format": "uri-reference" + }, + "$recursiveRef": { + "type": "string", + "format": "uri-reference" + }, + "$recursiveAnchor": { + "type": "boolean", + "default": false + }, + "$vocabulary": { + "type": "object", + "propertyNames": { + "type": "string", + "format": "uri" + }, + "additionalProperties": { + "type": "boolean" + } + }, + "$comment": { + "type": "string" + }, + "$defs": { + "type": "object", + "additionalProperties": { "$recursiveRef": "#" }, + "default": {} + } + } +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/format b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/format new file mode 100644 index 0000000000..fe553c2397 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/format @@ -0,0 +1,13 @@ +{ + "$schema": "https://json-schema.org/draft/2019-09/schema", + "$id": "https://json-schema.org/draft/2019-09/meta/format", + "$vocabulary": { + "https://json-schema.org/draft/2019-09/vocab/format": true + }, + "$recursiveAnchor": true, + "title": "Format vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "format": { "type": "string" } + } +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/meta-data b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/meta-data new file mode 100644 index 0000000000..5c95715c4a --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/meta-data @@ -0,0 +1,35 @@ +{ + "$schema": "https://json-schema.org/draft/2019-09/schema", + "$id": "https://json-schema.org/draft/2019-09/meta/meta-data", + "$vocabulary": { + "https://json-schema.org/draft/2019-09/vocab/meta-data": true + }, + "$recursiveAnchor": true, + "title": "Meta-data vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "default": true, + "deprecated": { + "type": "boolean", + "default": false + }, + "readOnly": { + "type": "boolean", + "default": false + }, + "writeOnly": { + "type": "boolean", + "default": false + }, + "examples": { + "type": "array", + "items": true + } + } +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/validation b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/validation new file mode 100644 index 0000000000..f3525e0760 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/validation @@ -0,0 +1,97 @@ +{ + "$schema": "https://json-schema.org/draft/2019-09/schema", + "$id": "https://json-schema.org/draft/2019-09/meta/validation", + "$vocabulary": { + "https://json-schema.org/draft/2019-09/vocab/validation": true + }, + "$recursiveAnchor": true, + "title": "Validation vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "multipleOf": { + "type": "number", + "exclusiveMinimum": 0 + }, + "maximum": { + "type": "number" + }, + "exclusiveMaximum": { + "type": "number" + }, + "minimum": { + "type": "number" + }, + "exclusiveMinimum": { + "type": "number" + }, + "maxLength": { "$ref": "#/$defs/nonNegativeInteger" }, + "minLength": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, + "pattern": { + "type": "string", + "format": "regex" + }, + "maxItems": { "$ref": "#/$defs/nonNegativeInteger" }, + "minItems": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, + "uniqueItems": { + "type": "boolean", + "default": false + }, + "maxContains": { "$ref": "#/$defs/nonNegativeInteger" }, + "minContains": { + "$ref": "#/$defs/nonNegativeInteger", + "default": 1 + }, + "maxProperties": { "$ref": "#/$defs/nonNegativeInteger" }, + "minProperties": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, + "required": { "$ref": "#/$defs/stringArray" }, + "dependentRequired": { + "type": "object", + "additionalProperties": { + "$ref": "#/$defs/stringArray" + } + }, + "const": true, + "enum": { + "type": "array", + "items": true + }, + "type": { + "anyOf": [ + { "$ref": "#/$defs/simpleTypes" }, + { + "type": "array", + "items": { "$ref": "#/$defs/simpleTypes" }, + "minItems": 1, + "uniqueItems": true + } + ] + } + }, + "$defs": { + "nonNegativeInteger": { + "type": "integer", + "minimum": 0 + }, + "nonNegativeIntegerDefault0": { + "$ref": "#/$defs/nonNegativeInteger", + "default": 0 + }, + "simpleTypes": { + "enum": [ + "array", + "boolean", + "integer", + "null", + "number", + "object", + "string" + ] + }, + "stringArray": { + "type": "array", + "items": { "type": "string" }, + "uniqueItems": true, + "default": [] + } + } +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/schema b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/schema new file mode 100644 index 0000000000..f433389be6 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/schema @@ -0,0 +1,41 @@ +{ + "$schema": "https://json-schema.org/draft/2019-09/schema", + "$id": "https://json-schema.org/draft/2019-09/schema", + "$vocabulary": { + "https://json-schema.org/draft/2019-09/vocab/core": true, + "https://json-schema.org/draft/2019-09/vocab/applicator": true, + "https://json-schema.org/draft/2019-09/vocab/validation": true, + "https://json-schema.org/draft/2019-09/vocab/meta-data": true, + "https://json-schema.org/draft/2019-09/vocab/format": false, + "https://json-schema.org/draft/2019-09/vocab/content": true + }, + "$recursiveAnchor": true, + "title": "Core and Validation specifications meta-schema", + "allOf": [ + {"$ref": "meta/core"}, + {"$ref": "meta/applicator"}, + {"$ref": "meta/validation"}, + {"$ref": "meta/meta-data"}, + {"$ref": "meta/format"}, + {"$ref": "meta/content"} + ], + "type": ["object", "boolean"], + "properties": { + "definitions": { + "$comment": "While no longer an official keyword as it is replaced by $defs, this keyword is retained in the meta-schema to prevent incompatible extensions as it remains in common use.", + "type": "object", + "additionalProperties": { "$recursiveRef": "#" }, + "default": {} + }, + "dependencies": { + "$comment": "\"dependencies\" is no longer a keyword, but schema authors should avoid redefining it to facilitate a smooth transition to \"dependentSchemas\" and \"dependentRequired\"", + "type": "object", + "additionalProperties": { + "anyOf": [ + { "$recursiveRef": "#" }, + { "$ref": "meta/validation#/$defs/stringArray" } + ] + } + } + } +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/applicator b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/applicator new file mode 100644 index 0000000000..0ef24edc81 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/applicator @@ -0,0 +1,47 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/applicator", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/applicator": true + }, + "$dynamicAnchor": "meta", + "title": "Applicator vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "prefixItems": { "$ref": "#/$defs/schemaArray" }, + "items": { "$dynamicRef": "#meta" }, + "contains": { "$dynamicRef": "#meta" }, + "additionalProperties": { "$dynamicRef": "#meta" }, + "properties": { + "type": "object", + "additionalProperties": { "$dynamicRef": "#meta" }, + "default": {} + }, + "patternProperties": { + "type": "object", + "additionalProperties": { "$dynamicRef": "#meta" }, + "propertyNames": { "format": "regex" }, + "default": {} + }, + "dependentSchemas": { + "type": "object", + "additionalProperties": { "$dynamicRef": "#meta" }, + "default": {} + }, + "propertyNames": { "$dynamicRef": "#meta" }, + "if": { "$dynamicRef": "#meta" }, + "then": { "$dynamicRef": "#meta" }, + "else": { "$dynamicRef": "#meta" }, + "allOf": { "$ref": "#/$defs/schemaArray" }, + "anyOf": { "$ref": "#/$defs/schemaArray" }, + "oneOf": { "$ref": "#/$defs/schemaArray" }, + "not": { "$dynamicRef": "#meta" } + }, + "$defs": { + "schemaArray": { + "type": "array", + "minItems": 1, + "items": { "$dynamicRef": "#meta" } + } + } +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/content b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/content new file mode 100644 index 0000000000..0330ff0a81 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/content @@ -0,0 +1,15 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/content", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/content": true + }, + "$dynamicAnchor": "meta", + "title": "Content vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "contentEncoding": { "type": "string" }, + "contentMediaType": { "type": "string" }, + "contentSchema": { "$dynamicRef": "#meta" } + } +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/core b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/core new file mode 100644 index 0000000000..c4de7005ab --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/core @@ -0,0 +1,50 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/core", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/core": true + }, + "$dynamicAnchor": "meta", + "title": "Core vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "$id": { + "$ref": "#/$defs/uriReferenceString", + "$comment": "Non-empty fragments not allowed.", + "pattern": "^[^#]*#?$" + }, + "$schema": { "$ref": "#/$defs/uriString" }, + "$ref": { "$ref": "#/$defs/uriReferenceString" }, + "$anchor": { "$ref": "#/$defs/anchorString" }, + "$dynamicRef": { "$ref": "#/$defs/uriReferenceString" }, + "$dynamicAnchor": { "$ref": "#/$defs/anchorString" }, + "$vocabulary": { + "type": "object", + "propertyNames": { "$ref": "#/$defs/uriString" }, + "additionalProperties": { + "type": "boolean" + } + }, + "$comment": { + "type": "string" + }, + "$defs": { + "type": "object", + "additionalProperties": { "$dynamicRef": "#meta" } + } + }, + "$defs": { + "anchorString": { + "type": "string", + "pattern": "^[A-Za-z_][-A-Za-z0-9._]*$" + }, + "uriString": { + "type": "string", + "format": "uri" + }, + "uriReferenceString": { + "type": "string", + "format": "uri-reference" + } + } +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/format-annotation b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/format-annotation new file mode 100644 index 0000000000..0aa07d1c15 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/format-annotation @@ -0,0 +1,13 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/format-annotation", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/format-annotation": true + }, + "$dynamicAnchor": "meta", + "title": "Format vocabulary meta-schema for annotation results", + "type": ["object", "boolean"], + "properties": { + "format": { "type": "string" } + } +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/format-assertion b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/format-assertion new file mode 100644 index 0000000000..38613bff6e --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/format-assertion @@ -0,0 +1,13 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/format-assertion", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/format-assertion": true + }, + "$dynamicAnchor": "meta", + "title": "Format vocabulary meta-schema for assertion results", + "type": ["object", "boolean"], + "properties": { + "format": { "type": "string" } + } +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/meta-data b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/meta-data new file mode 100644 index 0000000000..30e2837140 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/meta-data @@ -0,0 +1,35 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/meta-data", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/meta-data": true + }, + "$dynamicAnchor": "meta", + "title": "Meta-data vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "default": true, + "deprecated": { + "type": "boolean", + "default": false + }, + "readOnly": { + "type": "boolean", + "default": false + }, + "writeOnly": { + "type": "boolean", + "default": false + }, + "examples": { + "type": "array", + "items": true + } + } +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/unevaluated b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/unevaluated new file mode 100644 index 0000000000..e9e093d12a --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/unevaluated @@ -0,0 +1,14 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/unevaluated", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/unevaluated": true + }, + "$dynamicAnchor": "meta", + "title": "Unevaluated applicator vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "unevaluatedItems": { "$dynamicRef": "#meta" }, + "unevaluatedProperties": { "$dynamicRef": "#meta" } + } +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/validation b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/validation new file mode 100644 index 0000000000..4e016ed2b7 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/validation @@ -0,0 +1,97 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/validation", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/validation": true + }, + "$dynamicAnchor": "meta", + "title": "Validation vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "type": { + "anyOf": [ + { "$ref": "#/$defs/simpleTypes" }, + { + "type": "array", + "items": { "$ref": "#/$defs/simpleTypes" }, + "minItems": 1, + "uniqueItems": true + } + ] + }, + "const": true, + "enum": { + "type": "array", + "items": true + }, + "multipleOf": { + "type": "number", + "exclusiveMinimum": 0 + }, + "maximum": { + "type": "number" + }, + "exclusiveMaximum": { + "type": "number" + }, + "minimum": { + "type": "number" + }, + "exclusiveMinimum": { + "type": "number" + }, + "maxLength": { "$ref": "#/$defs/nonNegativeInteger" }, + "minLength": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, + "pattern": { + "type": "string", + "format": "regex" + }, + "maxItems": { "$ref": "#/$defs/nonNegativeInteger" }, + "minItems": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, + "uniqueItems": { + "type": "boolean", + "default": false + }, + "maxContains": { "$ref": "#/$defs/nonNegativeInteger" }, + "minContains": { + "$ref": "#/$defs/nonNegativeInteger", + "default": 1 + }, + "maxProperties": { "$ref": "#/$defs/nonNegativeInteger" }, + "minProperties": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, + "required": { "$ref": "#/$defs/stringArray" }, + "dependentRequired": { + "type": "object", + "additionalProperties": { + "$ref": "#/$defs/stringArray" + } + } + }, + "$defs": { + "nonNegativeInteger": { + "type": "integer", + "minimum": 0 + }, + "nonNegativeIntegerDefault0": { + "$ref": "#/$defs/nonNegativeInteger", + "default": 0 + }, + "simpleTypes": { + "enum": [ + "array", + "boolean", + "integer", + "null", + "number", + "object", + "string" + ] + }, + "stringArray": { + "type": "array", + "items": { "type": "string" }, + "uniqueItems": true, + "default": [] + } + } +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/schema b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/schema new file mode 100644 index 0000000000..364f8ada6c --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/schema @@ -0,0 +1,57 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/schema", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/core": true, + "https://json-schema.org/draft/2020-12/vocab/applicator": true, + "https://json-schema.org/draft/2020-12/vocab/unevaluated": true, + "https://json-schema.org/draft/2020-12/vocab/validation": true, + "https://json-schema.org/draft/2020-12/vocab/meta-data": true, + "https://json-schema.org/draft/2020-12/vocab/format-annotation": true, + "https://json-schema.org/draft/2020-12/vocab/content": true + }, + "$dynamicAnchor": "meta", + "title": "Core and Validation specifications meta-schema", + "allOf": [ + {"$ref": "meta/core"}, + {"$ref": "meta/applicator"}, + {"$ref": "meta/unevaluated"}, + {"$ref": "meta/validation"}, + {"$ref": "meta/meta-data"}, + {"$ref": "meta/format-annotation"}, + {"$ref": "meta/content"} + ], + "type": ["object", "boolean"], + "$comment": "This meta-schema also defines keywords that have appeared in previous drafts in order to prevent incompatible extensions as they remain in common use.", + "properties": { + "definitions": { + "$comment": "\"definitions\" has been replaced by \"$defs\".", + "type": "object", + "additionalProperties": { "$dynamicRef": "#meta" }, + "deprecated": true, + "default": {} + }, + "dependencies": { + "$comment": "\"dependencies\" has been split and replaced by \"dependentSchemas\" and \"dependentRequired\" in order to serve their differing semantics.", + "type": "object", + "additionalProperties": { + "anyOf": [ + { "$dynamicRef": "#meta" }, + { "$ref": "meta/validation#/$defs/stringArray" } + ] + }, + "deprecated": true, + "default": {} + }, + "$recursiveAnchor": { + "$comment": "\"$recursiveAnchor\" has been replaced by \"$dynamicAnchor\".", + "$ref": "meta/core#/$defs/anchorString", + "deprecated": true + }, + "$recursiveRef": { + "$comment": "\"$recursiveRef\" has been replaced by \"$dynamicRef\".", + "$ref": "meta/core#/$defs/uriReferenceString", + "deprecated": true + } + } +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/objcompiler.go b/vendor/github.com/santhosh-tekuri/jsonschema/v6/objcompiler.go new file mode 100644 index 0000000000..f1494b13a8 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/objcompiler.go @@ -0,0 +1,549 @@ +package jsonschema + +import ( + "encoding/json" + "fmt" + "math/big" + "strconv" +) + +type objCompiler struct { + c *Compiler + obj map[string]any + up urlPtr + r *root + res *resource + q *queue +} + +func (c *objCompiler) compile(s *Schema) error { + // id -- + if id := c.res.dialect.draft.getID(c.obj); id != "" { + s.ID = id + } + + // anchor -- + if s.DraftVersion < 2019 { + // anchor is specified in id + id := c.string(c.res.dialect.draft.id) + if id != "" { + _, f := split(id) + if f != "" { + var err error + s.Anchor, err = decode(f) + if err != nil { + return &ParseAnchorError{URL: s.Location} + } + } + } + } else { + s.Anchor = c.string("$anchor") + } + + if err := c.compileDraft4(s); err != nil { + return err + } + if s.DraftVersion >= 6 { + if err := c.compileDraft6(s); err != nil { + return err + } + } + if s.DraftVersion >= 7 { + if err := c.compileDraft7(s); err != nil { + return err + } + } + if s.DraftVersion >= 2019 { + if err := c.compileDraft2019(s); err != nil { + return err + } + } + if s.DraftVersion >= 2020 { + if err := c.compileDraft2020(s); err != nil { + return err + } + } + + // vocabularies + vocabs := c.res.dialect.activeVocabs(c.c.roots.assertVocabs, c.c.roots.vocabularies) + for _, vocab := range vocabs { + v := c.c.roots.vocabularies[vocab] + if v == nil { + continue + } + ext, err := v.Compile(&CompilerContext{c}, c.obj) + if err != nil { + return err + } + if ext != nil { + s.Extensions = append(s.Extensions, ext) + } + } + + return nil +} + +func (c *objCompiler) compileDraft4(s *Schema) error { + var err error + + if c.hasVocab("core") { + if s.Ref, err = c.enqueueRef("$ref"); err != nil { + return err + } + if s.DraftVersion < 2019 && s.Ref != nil { + // All other properties in a "$ref" object MUST be ignored + return nil + } + } + + if c.hasVocab("applicator") { + s.AllOf = c.enqueueArr("allOf") + s.AnyOf = c.enqueueArr("anyOf") + s.OneOf = c.enqueueArr("oneOf") + s.Not = c.enqueueProp("not") + + if s.DraftVersion < 2020 { + if items, ok := c.obj["items"]; ok { + if _, ok := items.([]any); ok { + s.Items = c.enqueueArr("items") + s.AdditionalItems = c.enqueueAdditional("additionalItems") + } else { + s.Items = c.enqueueProp("items") + } + } + } + + s.Properties = c.enqueueMap("properties") + if m := c.enqueueMap("patternProperties"); m != nil { + s.PatternProperties = map[Regexp]*Schema{} + for pname, sch := range m { + re, err := c.c.roots.regexpEngine(pname) + if err != nil { + return &InvalidRegexError{c.up.format("patternProperties"), pname, err} + } + s.PatternProperties[re] = sch + } + } + s.AdditionalProperties = c.enqueueAdditional("additionalProperties") + + if m := c.objVal("dependencies"); m != nil { + s.Dependencies = map[string]any{} + for pname, pvalue := range m { + if arr, ok := pvalue.([]any); ok { + s.Dependencies[pname] = toStrings(arr) + } else { + ptr := c.up.ptr.append2("dependencies", pname) + s.Dependencies[pname] = c.enqueuePtr(ptr) + } + } + } + } + + if c.hasVocab("validation") { + if t, ok := c.obj["type"]; ok { + s.Types = newTypes(t) + } + if arr := c.arrVal("enum"); arr != nil { + s.Enum = newEnum(arr) + } + s.MultipleOf = c.numVal("multipleOf") + s.Maximum = c.numVal("maximum") + if c.boolean("exclusiveMaximum") { + s.ExclusiveMaximum = s.Maximum + s.Maximum = nil + } else { + s.ExclusiveMaximum = c.numVal("exclusiveMaximum") + } + s.Minimum = c.numVal("minimum") + if c.boolean("exclusiveMinimum") { + s.ExclusiveMinimum = s.Minimum + s.Minimum = nil + } else { + s.ExclusiveMinimum = c.numVal("exclusiveMinimum") + } + + s.MinLength = c.intVal("minLength") + s.MaxLength = c.intVal("maxLength") + if pat := c.strVal("pattern"); pat != nil { + s.Pattern, err = c.c.roots.regexpEngine(*pat) + if err != nil { + return &InvalidRegexError{c.up.format("pattern"), *pat, err} + } + } + + s.MinItems = c.intVal("minItems") + s.MaxItems = c.intVal("maxItems") + s.UniqueItems = c.boolean("uniqueItems") + + s.MaxProperties = c.intVal("maxProperties") + s.MinProperties = c.intVal("minProperties") + if arr := c.arrVal("required"); arr != nil { + s.Required = toStrings(arr) + } + } + + // format -- + if c.assertFormat(s.DraftVersion) { + if f := c.strVal("format"); f != nil { + if *f == "regex" { + s.Format = &Format{ + Name: "regex", + Validate: c.c.roots.regexpEngine.validate, + } + } else { + s.Format = c.c.formats[*f] + if s.Format == nil { + s.Format = formats[*f] + } + } + } + } + + // annotations -- + s.Title = c.string("title") + s.Description = c.string("description") + if v, ok := c.obj["default"]; ok { + s.Default = &v + } + + return nil +} + +func (c *objCompiler) compileDraft6(s *Schema) error { + if c.hasVocab("applicator") { + s.Contains = c.enqueueProp("contains") + s.PropertyNames = c.enqueueProp("propertyNames") + } + if c.hasVocab("validation") { + if v, ok := c.obj["const"]; ok { + s.Const = &v + } + } + return nil +} + +func (c *objCompiler) compileDraft7(s *Schema) error { + if c.hasVocab("applicator") { + s.If = c.enqueueProp("if") + if s.If != nil { + b := c.boolVal("if") + if b == nil || *b { + s.Then = c.enqueueProp("then") + } + if b == nil || !*b { + s.Else = c.enqueueProp("else") + } + } + } + + if c.c.assertContent { + if ce := c.strVal("contentEncoding"); ce != nil { + s.ContentEncoding = c.c.decoders[*ce] + if s.ContentEncoding == nil { + s.ContentEncoding = decoders[*ce] + } + } + if cm := c.strVal("contentMediaType"); cm != nil { + s.ContentMediaType = c.c.mediaTypes[*cm] + if s.ContentMediaType == nil { + s.ContentMediaType = mediaTypes[*cm] + } + } + } + + // annotations -- + s.Comment = c.string("$comment") + s.ReadOnly = c.boolean("readOnly") + s.WriteOnly = c.boolean("writeOnly") + if arr, ok := c.obj["examples"].([]any); ok { + s.Examples = arr + } + + return nil +} + +func (c *objCompiler) compileDraft2019(s *Schema) error { + var err error + + if c.hasVocab("core") { + if s.RecursiveRef, err = c.enqueueRef("$recursiveRef"); err != nil { + return err + } + s.RecursiveAnchor = c.boolean("$recursiveAnchor") + } + + if c.hasVocab("validation") { + if s.Contains != nil { + s.MinContains = c.intVal("minContains") + s.MaxContains = c.intVal("maxContains") + } + if m := c.objVal("dependentRequired"); m != nil { + s.DependentRequired = map[string][]string{} + for pname, pvalue := range m { + if arr, ok := pvalue.([]any); ok { + s.DependentRequired[pname] = toStrings(arr) + } + } + } + } + + if c.hasVocab("applicator") { + s.DependentSchemas = c.enqueueMap("dependentSchemas") + } + + var unevaluated bool + if s.DraftVersion == 2019 { + unevaluated = c.hasVocab("applicator") + } else { + unevaluated = c.hasVocab("unevaluated") + } + if unevaluated { + s.UnevaluatedItems = c.enqueueProp("unevaluatedItems") + s.UnevaluatedProperties = c.enqueueProp("unevaluatedProperties") + } + + if c.c.assertContent { + if s.ContentMediaType != nil && s.ContentMediaType.UnmarshalJSON != nil { + s.ContentSchema = c.enqueueProp("contentSchema") + } + } + + // annotations -- + s.Deprecated = c.boolean("deprecated") + + return nil +} + +func (c *objCompiler) compileDraft2020(s *Schema) error { + if c.hasVocab("core") { + sch, err := c.enqueueRef("$dynamicRef") + if err != nil { + return err + } + if sch != nil { + dref := c.strVal("$dynamicRef") + _, frag, err := splitFragment(*dref) + if err != nil { + return err + } + var anch string + if anchor, ok := frag.convert().(anchor); ok { + anch = string(anchor) + } + s.DynamicRef = &DynamicRef{sch, anch} + } + s.DynamicAnchor = c.string("$dynamicAnchor") + } + + if c.hasVocab("applicator") { + s.PrefixItems = c.enqueueArr("prefixItems") + s.Items2020 = c.enqueueProp("items") + } + + return nil +} + +// enqueue helpers -- + +func (c *objCompiler) enqueuePtr(ptr jsonPointer) *Schema { + up := urlPtr{c.up.url, ptr} + return c.c.enqueue(c.q, up) +} + +func (c *objCompiler) enqueueRef(pname string) (*Schema, error) { + ref := c.strVal(pname) + if ref == nil { + return nil, nil + } + baseURL := c.res.id + // baseURL := c.r.baseURL(c.up.ptr) + uf, err := baseURL.join(*ref) + if err != nil { + return nil, err + } + + up, err := c.r.resolve(*uf) + if err != nil { + return nil, err + } + if up != nil { + // local ref + return c.enqueuePtr(up.ptr), nil + } + + // remote ref + up_, err := c.c.roots.resolveFragment(*uf) + if err != nil { + return nil, err + } + return c.c.enqueue(c.q, up_), nil +} + +func (c *objCompiler) enqueueProp(pname string) *Schema { + if _, ok := c.obj[pname]; !ok { + return nil + } + ptr := c.up.ptr.append(pname) + return c.enqueuePtr(ptr) +} + +func (c *objCompiler) enqueueArr(pname string) []*Schema { + arr := c.arrVal(pname) + if arr == nil { + return nil + } + sch := make([]*Schema, len(arr)) + for i := range arr { + ptr := c.up.ptr.append2(pname, strconv.Itoa(i)) + sch[i] = c.enqueuePtr(ptr) + } + return sch +} + +func (c *objCompiler) enqueueMap(pname string) map[string]*Schema { + obj := c.objVal(pname) + if obj == nil { + return nil + } + sch := make(map[string]*Schema) + for k := range obj { + ptr := c.up.ptr.append2(pname, k) + sch[k] = c.enqueuePtr(ptr) + } + return sch +} + +func (c *objCompiler) enqueueAdditional(pname string) any { + if b := c.boolVal(pname); b != nil { + return *b + } + if sch := c.enqueueProp(pname); sch != nil { + return sch + } + return nil +} + +// -- + +func (c *objCompiler) hasVocab(name string) bool { + return c.res.dialect.hasVocab(name) +} + +func (c *objCompiler) assertFormat(draftVersion int) bool { + if c.c.assertFormat || draftVersion < 2019 { + return true + } + if draftVersion == 2019 { + return c.hasVocab("format") + } else { + return c.hasVocab("format-assertion") + } +} + +// value helpers -- + +func (c *objCompiler) boolVal(pname string) *bool { + v, ok := c.obj[pname] + if !ok { + return nil + } + b, ok := v.(bool) + if !ok { + return nil + } + return &b +} + +func (c *objCompiler) boolean(pname string) bool { + b := c.boolVal(pname) + return b != nil && *b +} + +func (c *objCompiler) strVal(pname string) *string { + v, ok := c.obj[pname] + if !ok { + return nil + } + s, ok := v.(string) + if !ok { + return nil + } + return &s +} + +func (c *objCompiler) string(pname string) string { + if s := c.strVal(pname); s != nil { + return *s + } + return "" +} + +func (c *objCompiler) numVal(pname string) *big.Rat { + v, ok := c.obj[pname] + if !ok { + return nil + } + switch v.(type) { + case json.Number, float32, float64, int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: + if n, ok := new(big.Rat).SetString(fmt.Sprint(v)); ok { + return n + } + } + return nil +} + +func (c *objCompiler) intVal(pname string) *int { + if n := c.numVal(pname); n != nil && n.IsInt() { + n := int(n.Num().Int64()) + return &n + } + return nil +} + +func (c *objCompiler) objVal(pname string) map[string]any { + v, ok := c.obj[pname] + if !ok { + return nil + } + obj, ok := v.(map[string]any) + if !ok { + return nil + } + return obj +} + +func (c *objCompiler) arrVal(pname string) []any { + v, ok := c.obj[pname] + if !ok { + return nil + } + arr, ok := v.([]any) + if !ok { + return nil + } + return arr +} + +// -- + +type InvalidRegexError struct { + URL string + Regex string + Err error +} + +func (e *InvalidRegexError) Error() string { + return fmt.Sprintf("invalid regex %q at %q: %v", e.Regex, e.URL, e.Err) +} + +// -- + +func toStrings(arr []any) []string { + var strings []string + for _, item := range arr { + if s, ok := item.(string); ok { + strings = append(strings, s) + } + } + return strings +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/output.go b/vendor/github.com/santhosh-tekuri/jsonschema/v6/output.go new file mode 100644 index 0000000000..69d3f26de5 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/output.go @@ -0,0 +1,216 @@ +package jsonschema + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/santhosh-tekuri/jsonschema/v6/kind" + "golang.org/x/text/language" + "golang.org/x/text/message" +) + +var defaultPrinter = message.NewPrinter(language.English) + +// format --- + +func (e *ValidationError) schemaURL() string { + if ref, ok := e.ErrorKind.(*kind.Reference); ok { + return ref.URL + } else { + return e.SchemaURL + } +} + +func (e *ValidationError) absoluteKeywordLocation() string { + var schemaURL string + var keywordPath []string + if ref, ok := e.ErrorKind.(*kind.Reference); ok { + schemaURL = ref.URL + keywordPath = nil + } else { + schemaURL = e.SchemaURL + keywordPath = e.ErrorKind.KeywordPath() + } + return fmt.Sprintf("%s%s", schemaURL, encode(jsonPtr(keywordPath))) +} + +func (e *ValidationError) skip() bool { + if len(e.Causes) == 1 { + _, ok := e.ErrorKind.(*kind.Reference) + return ok + } + return false +} + +func (e *ValidationError) display(sb *strings.Builder, verbose bool, indent int, absKwLoc string, p *message.Printer) { + if !e.skip() { + if indent > 0 { + sb.WriteByte('\n') + for i := 0; i < indent-1; i++ { + sb.WriteString(" ") + } + sb.WriteString("- ") + } + indent = indent + 1 + + prevAbsKwLoc := absKwLoc + absKwLoc = e.absoluteKeywordLocation() + + if _, ok := e.ErrorKind.(*kind.Schema); ok { + sb.WriteString(e.ErrorKind.LocalizedString(p)) + } else { + sb.WriteString(p.Sprintf("at %s", quote(jsonPtr(e.InstanceLocation)))) + if verbose { + schLoc := absKwLoc + if prevAbsKwLoc != "" { + pu, _ := split(prevAbsKwLoc) + u, f := split(absKwLoc) + if u == pu { + schLoc = fmt.Sprintf("S#%s", f) + } + } + fmt.Fprintf(sb, " [%s]", schLoc) + } + fmt.Fprintf(sb, ": %s", e.ErrorKind.LocalizedString(p)) + } + } + for _, cause := range e.Causes { + cause.display(sb, verbose, indent, absKwLoc, p) + } +} + +func (e *ValidationError) Error() string { + return e.LocalizedError(defaultPrinter) +} + +func (e *ValidationError) LocalizedError(p *message.Printer) string { + var sb strings.Builder + e.display(&sb, false, 0, "", p) + return sb.String() +} + +func (e *ValidationError) GoString() string { + return e.LocalizedGoString(defaultPrinter) +} + +func (e *ValidationError) LocalizedGoString(p *message.Printer) string { + var sb strings.Builder + e.display(&sb, true, 0, "", p) + return sb.String() +} + +func jsonPtr(tokens []string) string { + var sb strings.Builder + for _, tok := range tokens { + sb.WriteByte('/') + sb.WriteString(escape(tok)) + } + return sb.String() +} + +// -- + +// Flag is output format with simple boolean property valid. +type FlagOutput struct { + Valid bool `json:"valid"` +} + +// The `Flag` output format, merely the boolean result. +func (e *ValidationError) FlagOutput() *FlagOutput { + return &FlagOutput{Valid: false} +} + +// -- + +type OutputUnit struct { + Valid bool `json:"valid"` + KeywordLocation string `json:"keywordLocation"` + AbsoluteKeywordLocation string `json:"AbsoluteKeywordLocation,omitempty"` + InstanceLocation string `json:"instanceLocation"` + Error *OutputError `json:"error,omitempty"` + Errors []OutputUnit `json:"errors,omitempty"` +} + +type OutputError struct { + Kind ErrorKind + p *message.Printer +} + +func (k OutputError) String() string { + return k.Kind.LocalizedString(k.p) +} + +func (k OutputError) MarshalJSON() ([]byte, error) { + return json.Marshal(k.Kind.LocalizedString(k.p)) +} + +// The `Basic` structure, a flat list of output units. +func (e *ValidationError) BasicOutput() *OutputUnit { + return e.LocalizedBasicOutput(defaultPrinter) +} + +func (e *ValidationError) LocalizedBasicOutput(p *message.Printer) *OutputUnit { + out := e.output(true, false, "", "", p) + return &out +} + +// The `Detailed` structure, based on the schema. +func (e *ValidationError) DetailedOutput() *OutputUnit { + return e.LocalizedDetailedOutput(defaultPrinter) +} + +func (e *ValidationError) LocalizedDetailedOutput(p *message.Printer) *OutputUnit { + out := e.output(false, false, "", "", p) + return &out +} + +func (e *ValidationError) output(flatten, inRef bool, schemaURL, kwLoc string, p *message.Printer) OutputUnit { + if !inRef { + if _, ok := e.ErrorKind.(*kind.Reference); ok { + inRef = true + } + } + if schemaURL != "" { + kwLoc += e.SchemaURL[len(schemaURL):] + if ref, ok := e.ErrorKind.(*kind.Reference); ok { + kwLoc += jsonPtr(ref.KeywordPath()) + } + } + schemaURL = e.schemaURL() + + keywordLocation := kwLoc + if _, ok := e.ErrorKind.(*kind.Reference); !ok { + keywordLocation += jsonPtr(e.ErrorKind.KeywordPath()) + } + + out := OutputUnit{ + Valid: false, + InstanceLocation: jsonPtr(e.InstanceLocation), + KeywordLocation: keywordLocation, + } + if inRef { + out.AbsoluteKeywordLocation = e.absoluteKeywordLocation() + } + for _, cause := range e.Causes { + causeOut := cause.output(flatten, inRef, schemaURL, kwLoc, p) + if cause.skip() { + causeOut = causeOut.Errors[0] + } + if flatten { + errors := causeOut.Errors + causeOut.Errors = nil + causeOut.Error = &OutputError{cause.ErrorKind, p} + out.Errors = append(out.Errors, causeOut) + if len(errors) > 0 { + out.Errors = append(out.Errors, errors...) + } + } else { + out.Errors = append(out.Errors, causeOut) + } + } + if len(out.Errors) == 0 { + out.Error = &OutputError{e.ErrorKind, p} + } + return out +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/position.go b/vendor/github.com/santhosh-tekuri/jsonschema/v6/position.go new file mode 100644 index 0000000000..576a2a47f7 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/position.go @@ -0,0 +1,142 @@ +package jsonschema + +import ( + "strconv" + "strings" +) + +// Position tells possible tokens in json. +type Position interface { + collect(v any, ptr jsonPointer) map[jsonPointer]any +} + +// -- + +type AllProp struct{} + +func (AllProp) collect(v any, ptr jsonPointer) map[jsonPointer]any { + obj, ok := v.(map[string]any) + if !ok { + return nil + } + m := map[jsonPointer]any{} + for pname, pvalue := range obj { + m[ptr.append(pname)] = pvalue + } + return m +} + +// -- + +type AllItem struct{} + +func (AllItem) collect(v any, ptr jsonPointer) map[jsonPointer]any { + arr, ok := v.([]any) + if !ok { + return nil + } + m := map[jsonPointer]any{} + for i, item := range arr { + m[ptr.append(strconv.Itoa(i))] = item + } + return m +} + +// -- + +type Prop string + +func (p Prop) collect(v any, ptr jsonPointer) map[jsonPointer]any { + obj, ok := v.(map[string]any) + if !ok { + return nil + } + pvalue, ok := obj[string(p)] + if !ok { + return nil + } + return map[jsonPointer]any{ + ptr.append(string(p)): pvalue, + } +} + +// -- + +type Item int + +func (i Item) collect(v any, ptr jsonPointer) map[jsonPointer]any { + arr, ok := v.([]any) + if !ok { + return nil + } + if i < 0 || int(i) >= len(arr) { + return nil + } + return map[jsonPointer]any{ + ptr.append(strconv.Itoa(int(i))): arr[int(i)], + } +} + +// -- + +// SchemaPath tells where to look for subschema inside keyword. +type SchemaPath []Position + +func schemaPath(path string) SchemaPath { + var sp SchemaPath + for _, tok := range strings.Split(path, "/") { + var pos Position + switch tok { + case "*": + pos = AllProp{} + case "[]": + pos = AllItem{} + default: + if i, err := strconv.Atoi(tok); err == nil { + pos = Item(i) + } else { + pos = Prop(tok) + } + } + sp = append(sp, pos) + } + return sp +} + +func (sp SchemaPath) collect(v any, ptr jsonPointer) map[jsonPointer]any { + if len(sp) == 0 { + return map[jsonPointer]any{ + ptr: v, + } + } + p, sp := sp[0], sp[1:] + m := p.collect(v, ptr) + mm := map[jsonPointer]any{} + for ptr, v := range m { + m = sp.collect(v, ptr) + for k, v := range m { + mm[k] = v + } + } + return mm +} + +func (sp SchemaPath) String() string { + var sb strings.Builder + for _, pos := range sp { + if sb.Len() != 0 { + sb.WriteByte('/') + } + switch pos := pos.(type) { + case AllProp: + sb.WriteString("*") + case AllItem: + sb.WriteString("[]") + case Prop: + sb.WriteString(string(pos)) + case Item: + sb.WriteString(strconv.Itoa(int(pos))) + } + } + return sb.String() +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/root.go b/vendor/github.com/santhosh-tekuri/jsonschema/v6/root.go new file mode 100644 index 0000000000..a8b819bab0 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/root.go @@ -0,0 +1,202 @@ +package jsonschema + +import ( + "fmt" + "slices" + "strings" +) + +type root struct { + url url + doc any + resources map[jsonPointer]*resource + subschemasProcessed map[jsonPointer]struct{} +} + +func (r *root) rootResource() *resource { + return r.resources[""] +} + +func (r *root) resource(ptr jsonPointer) *resource { + for { + if res, ok := r.resources[ptr]; ok { + return res + } + slash := strings.LastIndexByte(string(ptr), '/') + if slash == -1 { + break + } + ptr = ptr[:slash] + } + return r.rootResource() +} + +func (r *root) resolveFragmentIn(frag fragment, res *resource) (urlPtr, error) { + var ptr jsonPointer + switch f := frag.convert().(type) { + case jsonPointer: + ptr = res.ptr.concat(f) + case anchor: + aptr, ok := res.anchors[f] + if !ok { + return urlPtr{}, &AnchorNotFoundError{ + URL: r.url.String(), + Reference: (&urlFrag{res.id, frag}).String(), + } + } + ptr = aptr + } + return urlPtr{r.url, ptr}, nil +} + +func (r *root) resolveFragment(frag fragment) (urlPtr, error) { + return r.resolveFragmentIn(frag, r.rootResource()) +} + +// resolves urlFrag to urlPtr from root. +// returns nil if it is external. +func (r *root) resolve(uf urlFrag) (*urlPtr, error) { + var res *resource + if uf.url == r.url { + res = r.rootResource() + } else { + // look for resource with id==uf.url + for _, v := range r.resources { + if v.id == uf.url { + res = v + break + } + } + if res == nil { + return nil, nil // external url + } + } + up, err := r.resolveFragmentIn(uf.frag, res) + return &up, err +} + +func (r *root) collectAnchors(sch any, schPtr jsonPointer, res *resource) error { + obj, ok := sch.(map[string]any) + if !ok { + return nil + } + + addAnchor := func(anchor anchor) error { + ptr1, ok := res.anchors[anchor] + if ok { + if ptr1 == schPtr { + // anchor with same root_ptr already exists + return nil + } + return &DuplicateAnchorError{ + string(anchor), r.url.String(), string(ptr1), string(schPtr), + } + } + res.anchors[anchor] = schPtr + return nil + } + + if res.dialect.draft.version < 2019 { + if _, ok := obj["$ref"]; ok { + // All other properties in a "$ref" object MUST be ignored + return nil + } + // anchor is specified in id + if id, ok := strVal(obj, res.dialect.draft.id); ok { + _, frag, err := splitFragment(id) + if err != nil { + loc := urlPtr{r.url, schPtr} + return &ParseAnchorError{loc.String()} + } + if anchor, ok := frag.convert().(anchor); ok { + if err := addAnchor(anchor); err != nil { + return err + } + } + } + } + if res.dialect.draft.version >= 2019 { + if s, ok := strVal(obj, "$anchor"); ok { + if err := addAnchor(anchor(s)); err != nil { + return err + } + } + } + if res.dialect.draft.version >= 2020 { + if s, ok := strVal(obj, "$dynamicAnchor"); ok { + if err := addAnchor(anchor(s)); err != nil { + return err + } + res.dynamicAnchors = append(res.dynamicAnchors, anchor(s)) + } + } + + return nil +} + +func (r *root) clone() *root { + processed := map[jsonPointer]struct{}{} + for k := range r.subschemasProcessed { + processed[k] = struct{}{} + } + resources := map[jsonPointer]*resource{} + for k, v := range r.resources { + resources[k] = v.clone() + } + return &root{ + url: r.url, + doc: r.doc, + resources: resources, + subschemasProcessed: processed, + } +} + +// -- + +type resource struct { + ptr jsonPointer + id url + dialect dialect + anchors map[anchor]jsonPointer + dynamicAnchors []anchor +} + +func newResource(ptr jsonPointer, id url) *resource { + return &resource{ptr: ptr, id: id, anchors: make(map[anchor]jsonPointer)} +} + +func (res *resource) clone() *resource { + anchors := map[anchor]jsonPointer{} + for k, v := range res.anchors { + anchors[k] = v + } + return &resource{ + ptr: res.ptr, + id: res.id, + dialect: res.dialect, + anchors: anchors, + dynamicAnchors: slices.Clone(res.dynamicAnchors), + } +} + +//-- + +type UnsupportedVocabularyError struct { + URL string + Vocabulary string +} + +func (e *UnsupportedVocabularyError) Error() string { + return fmt.Sprintf("unsupported vocabulary %q in %q", e.Vocabulary, e.URL) +} + +// -- + +type AnchorNotFoundError struct { + URL string + Reference string +} + +func (e *AnchorNotFoundError) Error() string { + return fmt.Sprintf("anchor in %q not found in schema %q", e.Reference, e.URL) +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/roots.go b/vendor/github.com/santhosh-tekuri/jsonschema/v6/roots.go new file mode 100644 index 0000000000..a8d0ef0ce2 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/roots.go @@ -0,0 +1,286 @@ +package jsonschema + +import ( + "fmt" + "strings" +) + +type roots struct { + defaultDraft *Draft + roots map[url]*root + loader defaultLoader + regexpEngine RegexpEngine + vocabularies map[string]*Vocabulary + assertVocabs bool +} + +func newRoots() *roots { + return &roots{ + defaultDraft: draftLatest, + roots: map[url]*root{}, + loader: defaultLoader{ + docs: map[url]any{}, + loader: FileLoader{}, + }, + regexpEngine: goRegexpCompile, + vocabularies: map[string]*Vocabulary{}, + } +} + +func (rr *roots) orLoad(u url) (*root, error) { + if r, ok := rr.roots[u]; ok { + return r, nil + } + doc, err := rr.loader.load(u) + if err != nil { + return nil, err + } + return rr.addRoot(u, doc) +} + +func (rr *roots) addRoot(u url, doc any) (*root, error) { + r := &root{ + url: u, + doc: doc, + resources: map[jsonPointer]*resource{}, + subschemasProcessed: map[jsonPointer]struct{}{}, + } + if err := rr.collectResources(r, doc, u, "", dialect{rr.defaultDraft, nil}); err != nil { + return nil, err + } + if !strings.HasPrefix(u.String(), "http://json-schema.org/") && + !strings.HasPrefix(u.String(), "https://json-schema.org/") { + if err := rr.validate(r, doc, ""); err != nil { + return nil, err + } + } + + rr.roots[u] = r + return r, nil +} + +func (rr *roots) resolveFragment(uf urlFrag) (urlPtr, error) { + r, err := rr.orLoad(uf.url) + if err != nil { + return urlPtr{}, err + } + return r.resolveFragment(uf.frag) +} + +func (rr *roots) collectResources(r *root, sch any, base url, schPtr jsonPointer, fallback dialect) error { + if _, ok := r.subschemasProcessed[schPtr]; ok { + return nil + } + if err := rr._collectResources(r, sch, base, schPtr, fallback); err != nil { + return err + } + r.subschemasProcessed[schPtr] = struct{}{} + return nil +} + +func (rr *roots) _collectResources(r *root, sch any, base url, schPtr jsonPointer, fallback dialect) error { + obj, ok := sch.(map[string]any) + if !ok { + if schPtr.isEmpty() { + // root resource + res := newResource(schPtr, base) + res.dialect = fallback + r.resources[schPtr] = res + } + return nil + } + + hasSchema := false + if sch, ok := obj["$schema"]; ok { + if _, ok := sch.(string); ok { + hasSchema = true + } + } + + draft, err := rr.loader.getDraft(urlPtr{r.url, schPtr}, sch, fallback.draft, map[url]struct{}{}) + if err != nil { + return err + } + id := draft.getID(obj) + if id == "" && !schPtr.isEmpty() { + // ignore $schema + draft = fallback.draft + hasSchema = false + id = draft.getID(obj) + } + + var res *resource + if id != "" { + uf, err := base.join(id) + if err != nil { + loc := urlPtr{r.url, schPtr} + return &ParseIDError{loc.String()} + } + base = uf.url + res = newResource(schPtr, base) + } else if schPtr.isEmpty() { + // root resource + res = newResource(schPtr, base) + } + + if res != nil { + found := false + for _, res := range r.resources { + if res.id == base { + found = true + if res.ptr != schPtr { + return &DuplicateIDError{base.String(), r.url.String(), string(schPtr), string(res.ptr)} + } + } + } + if !found { + if hasSchema { + vocabs, err := rr.loader.getMetaVocabs(sch, draft, rr.vocabularies) + if err != nil { + return err + } + res.dialect = dialect{draft, vocabs} + } else { + res.dialect = fallback + } + r.resources[schPtr] = res + } + } + + var baseRes *resource + for _, res := range r.resources { + if res.id == base { + baseRes = res + break + } + } + if baseRes == nil { + panic("baseres is nil") + } + + // found base resource + if err := r.collectAnchors(sch, schPtr, baseRes); err != nil { + return err + } + + // process subschemas + subschemas := map[jsonPointer]any{} + for _, sp := range draft.subschemas { + ss := sp.collect(obj, schPtr) + for k, v := range ss { + subschemas[k] = v + } + } + for _, vocab := range baseRes.dialect.activeVocabs(true, rr.vocabularies) { + if v := rr.vocabularies[vocab]; v != nil { + for _, sp := range v.Subschemas { + ss := sp.collect(obj, schPtr) + for k, v := range ss { + subschemas[k] = v + } + } + } + } + for ptr, v := range subschemas { + if err := rr.collectResources(r, v, base, ptr, baseRes.dialect); err != nil { + return err + } + } + + return nil +} + +func (rr *roots) ensureSubschema(up urlPtr) error { + r, err := rr.orLoad(up.url) + if err != nil { + return err + } + if _, ok := r.subschemasProcessed[up.ptr]; ok { + return nil + } + v, err := up.lookup(r.doc) + if err != nil { + return err + } + rClone := r.clone() + if err := rr.addSubschema(rClone, up.ptr); err != nil { + return err + } + if err := rr.validate(rClone, v, up.ptr); err != nil { + return err + } + rr.roots[r.url] = rClone + return nil +} + +func (rr *roots) addSubschema(r *root, ptr jsonPointer) error { + v, err := (&urlPtr{r.url, ptr}).lookup(r.doc) + if err != nil { + return err + } + base := r.resource(ptr) + baseURL := base.id + if err := rr.collectResources(r, v, baseURL, ptr, base.dialect); err != nil { + return err + } + + // collect anchors + if _, ok := r.resources[ptr]; !ok { + res := r.resource(ptr) + if err := r.collectAnchors(v, ptr, res); err != nil { + return err + } + } + return nil +} + +func (rr *roots) validate(r *root, v any, ptr jsonPointer) error { + dialect := r.resource(ptr).dialect + meta := dialect.getSchema(rr.assertVocabs, rr.vocabularies) + if err := meta.validate(v, rr.regexpEngine, meta, r.resources, rr.assertVocabs, rr.vocabularies); err != nil { + up := urlPtr{r.url, ptr} + return &SchemaValidationError{URL: up.String(), Err: err} + } + return nil +} + +// -- + +type InvalidMetaSchemaURLError struct { + URL string + Err error +} + +func (e *InvalidMetaSchemaURLError) Error() string { + return fmt.Sprintf("invalid $schema in %q: %v", e.URL, e.Err) +} + +// -- + +type UnsupportedDraftError struct { + URL string +} + +func (e *UnsupportedDraftError) Error() string { + return fmt.Sprintf("draft %q is not supported", e.URL) +} + +// -- + +type MetaSchemaCycleError struct { + URL string +} + +func (e *MetaSchemaCycleError) Error() string { + return fmt.Sprintf("cycle in resolving $schema in %q", e.URL) +} + +// -- + +type MetaSchemaMismatchError struct { + URL string +} + +func (e *MetaSchemaMismatchError) Error() string { + return fmt.Sprintf("$schema in %q does not match with $schema in root", e.URL) +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/schema.go b/vendor/github.com/santhosh-tekuri/jsonschema/v6/schema.go new file mode 100644 index 0000000000..b4c1f37afb --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/schema.go @@ -0,0 +1,254 @@ +package jsonschema + +import ( + "encoding/json" + "fmt" + "math/big" +) + +// Schema is the representation of a compiled +// jsonschema. +type Schema struct { + up urlPtr + resource *Schema + dynamicAnchors map[string]*Schema + allPropsEvaluated bool + allItemsEvaluated bool + numItemsEvaluated int + + DraftVersion int + Location string + + // type agnostic -- + Bool *bool // boolean schema + ID string + Ref *Schema + Anchor string + RecursiveRef *Schema + RecursiveAnchor bool + DynamicRef *DynamicRef + DynamicAnchor string // "" if not specified + Types *Types + Enum *Enum + Const *any + Not *Schema + AllOf []*Schema + AnyOf []*Schema + OneOf []*Schema + If *Schema + Then *Schema + Else *Schema + Format *Format + + // object -- + MaxProperties *int + MinProperties *int + Required []string + PropertyNames *Schema + Properties map[string]*Schema + PatternProperties map[Regexp]*Schema + AdditionalProperties any // nil or bool or *Schema + Dependencies map[string]any // value is []string or *Schema + DependentRequired map[string][]string + DependentSchemas map[string]*Schema + UnevaluatedProperties *Schema + + // array -- + MinItems *int + MaxItems *int + UniqueItems bool + Contains *Schema + MinContains *int + MaxContains *int + Items any // nil or []*Schema or *Schema + AdditionalItems any // nil or bool or *Schema + PrefixItems []*Schema + Items2020 *Schema + UnevaluatedItems *Schema + + // string -- + MinLength *int + MaxLength *int + Pattern Regexp + ContentEncoding *Decoder + ContentMediaType *MediaType + ContentSchema *Schema + + // number -- + Maximum *big.Rat + Minimum *big.Rat + ExclusiveMaximum *big.Rat + ExclusiveMinimum *big.Rat + MultipleOf *big.Rat + + Extensions []SchemaExt + + // annotations -- + Title string + Description string + Default *any + Comment string + ReadOnly bool + WriteOnly bool + Examples []any + Deprecated bool +} + +// -- + +type jsonType int + +const ( + invalidType jsonType = 0 + nullType jsonType = 1 << iota + booleanType + numberType + integerType + stringType + arrayType + objectType +) + +func typeOf(v any) jsonType { + switch v.(type) { + case nil: + return nullType + case bool: + return booleanType + case json.Number, float32, float64, int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: + return numberType + case string: + return stringType + case []any: + return arrayType + case map[string]any: + return objectType + default: + return invalidType + } +} + +func typeFromString(s string) jsonType { + switch s { + case "null": + return nullType + case "boolean": + return booleanType + case "number": + return numberType + case "integer": + return integerType + case "string": + return stringType + case "array": + return arrayType + case "object": + return objectType + } + return invalidType +} + +func (jt jsonType) String() string { + switch jt { + case nullType: + return "null" + case booleanType: + return "boolean" + case numberType: + return "number" + case integerType: + return "integer" + case stringType: + return "string" + case arrayType: + return "array" + case objectType: + return "object" + } + return "" +} + +// -- + +// Types encapsulates list of json value types. +type Types int + +func newTypes(v any) *Types { + var types Types + switch v := v.(type) { + case string: + types.Add(v) + case []any: + for _, item := range v { + if s, ok := item.(string); ok { + types.Add(s) + } + } + } + if types.IsEmpty() { + return nil + } + return &types +} + +func (tt Types) IsEmpty() bool { + return tt == 0 +} + +// Add specified json type. If typ is +// not valid json type it is ignored. +func (tt *Types) Add(typ string) { + tt.add(typeFromString(typ)) +} + +func (tt *Types) add(t jsonType) { + *tt = Types(int(*tt) | int(t)) +} + +func (tt Types) contains(t jsonType) bool { + return int(tt)&int(t) != 0 +} + +func (tt Types) ToStrings() []string { + types := []jsonType{ + nullType, booleanType, numberType, integerType, + stringType, arrayType, objectType, + } + var arr []string + for _, t := range types { + if tt.contains(t) { + arr = append(arr, t.String()) + } + } + return arr +} + +func (tt Types) String() string { + return fmt.Sprintf("%v", tt.ToStrings()) +} + +// -- + +type Enum struct { + Values []any + types Types +} + +func newEnum(arr []any) *Enum { + var types Types + for _, item := range arr { + types.add(typeOf(item)) + } + return &Enum{arr, types} +} + +// -- + +type DynamicRef struct { + Ref *Schema + Anchor string // "" if not specified +} + +func newSchema(up urlPtr) *Schema { + return &Schema{up: up, Location: up.String()} +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/util.go b/vendor/github.com/santhosh-tekuri/jsonschema/v6/util.go new file mode 100644 index 0000000000..c6f8e77526 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/util.go @@ -0,0 +1,464 @@ +package jsonschema + +import ( + "encoding/json" + "fmt" + "hash/maphash" + "math/big" + gourl "net/url" + "path/filepath" + "runtime" + "slices" + "strconv" + "strings" + + "github.com/santhosh-tekuri/jsonschema/v6/kind" + "golang.org/x/text/message" +) + +// -- + +type url (string) + +func (u url) String() string { + return string(u) +} + +func (u url) join(ref string) (*urlFrag, error) { + base, err := gourl.Parse(string(u)) + if err != nil { + return nil, &ParseURLError{URL: u.String(), Err: err} + } + + ref, frag, err := splitFragment(ref) + if err != nil { + return nil, err + } + refURL, err := gourl.Parse(ref) + if err != nil { + return nil, &ParseURLError{URL: ref, Err: err} + } + resolved := base.ResolveReference(refURL) + + // see https://github.com/golang/go/issues/66084 (net/url: ResolveReference ignores Opaque value) + if !refURL.IsAbs() && base.Opaque != "" { + resolved.Opaque = base.Opaque + } + + return &urlFrag{url: url(resolved.String()), frag: frag}, nil +} + +// -- + +type jsonPointer string + +func escape(tok string) string { + tok = strings.ReplaceAll(tok, "~", "~0") + tok = strings.ReplaceAll(tok, "/", "~1") + return tok +} + +func unescape(tok string) (string, bool) { + tilde := strings.IndexByte(tok, '~') + if tilde == -1 { + return tok, true + } + sb := new(strings.Builder) + for { + sb.WriteString(tok[:tilde]) + tok = tok[tilde+1:] + if tok == "" { + return "", false + } + switch tok[0] { + case '0': + sb.WriteByte('~') + case '1': + sb.WriteByte('/') + default: + return "", false + } + tok = tok[1:] + tilde = strings.IndexByte(tok, '~') + if tilde == -1 { + sb.WriteString(tok) + break + } + } + return sb.String(), true +} + +func (ptr jsonPointer) isEmpty() bool { + return string(ptr) == "" +} + +func (ptr jsonPointer) concat(next jsonPointer) jsonPointer { + return jsonPointer(fmt.Sprintf("%s%s", ptr, next)) +} + +func (ptr jsonPointer) append(tok string) jsonPointer { + return jsonPointer(fmt.Sprintf("%s/%s", ptr, escape(tok))) +} + +func (ptr jsonPointer) append2(tok1, tok2 string) jsonPointer { + return jsonPointer(fmt.Sprintf("%s/%s/%s", ptr, escape(tok1), escape(tok2))) +} + +// -- + +type anchor string + +// -- + +type fragment string + +func decode(frag string) (string, error) { + return gourl.PathUnescape(frag) +} + +// avoids escaping /. +func encode(frag string) string { + var sb strings.Builder + for i, tok := range strings.Split(frag, "/") { + if i > 0 { + sb.WriteByte('/') + } + sb.WriteString(gourl.PathEscape(tok)) + } + return sb.String() +} + +func splitFragment(str string) (string, fragment, error) { + u, f := split(str) + f, err := decode(f) + if err != nil { + return "", fragment(""), &ParseURLError{URL: str, Err: err} + } + return u, fragment(f), nil +} + +func split(str string) (string, string) { + hash := strings.IndexByte(str, '#') + if hash == -1 { + return str, "" + } + return str[:hash], str[hash+1:] +} + +func (frag fragment) convert() any { + str := string(frag) + if str == "" || strings.HasPrefix(str, "/") { + return jsonPointer(str) + } + return anchor(str) +} + +// -- + +type urlFrag struct { + url url + frag fragment +} + +func startsWithWindowsDrive(s string) bool { + if s != "" && strings.HasPrefix(s[1:], `:\`) { + return (s[0] >= 'a' && s[0] <= 'z') || (s[0] >= 'A' && s[0] <= 'Z') + } + return false +} + +func absolute(input string) (*urlFrag, error) { + u, frag, err := splitFragment(input) + if err != nil { + return nil, err + } + + // if windows absolute file path, convert to file url + // because: net/url parses driver name as scheme + if runtime.GOOS == "windows" && startsWithWindowsDrive(u) { + u = "file:///" + filepath.ToSlash(u) + } + + gourl, err := gourl.Parse(u) + if err != nil { + return nil, &ParseURLError{URL: input, Err: err} + } + if gourl.IsAbs() { + return &urlFrag{url(u), frag}, nil + } + + // avoid filesystem api in wasm + if runtime.GOOS != "js" { + abs, err := filepath.Abs(u) + if err != nil { + return nil, &ParseURLError{URL: input, Err: err} + } + u = abs + } + if !strings.HasPrefix(u, "/") { + u = "/" + u + } + u = "file://" + filepath.ToSlash(u) + + _, err = gourl.Parse(u) + if err != nil { + return nil, &ParseURLError{URL: input, Err: err} + } + return &urlFrag{url: url(u), frag: frag}, nil +} + +func (uf *urlFrag) String() string { + return fmt.Sprintf("%s#%s", uf.url, encode(string(uf.frag))) +} + +// -- + +type urlPtr struct { + url url + ptr jsonPointer +} + +func (up *urlPtr) lookup(v any) (any, error) { + for _, tok := range strings.Split(string(up.ptr), "/")[1:] { + tok, ok := unescape(tok) + if !ok { + return nil, &InvalidJsonPointerError{up.String()} + } + switch val := v.(type) { + case map[string]any: + if pvalue, ok := val[tok]; ok { + v = pvalue + continue + } + case []any: + if index, err := strconv.Atoi(tok); err == nil { + if index >= 0 && index < len(val) { + v = val[index] + continue + } + } + } + return nil, &JSONPointerNotFoundError{up.String()} + } + return v, nil +} + +func (up *urlPtr) format(tok string) string { + return fmt.Sprintf("%s#%s/%s", up.url, encode(string(up.ptr)), encode(escape(tok))) +} + +func (up *urlPtr) String() string { + return fmt.Sprintf("%s#%s", up.url, encode(string(up.ptr))) +} + +// -- + +func minInt(i, j int) int { + if i < j { + return i + } + return j +} + +func strVal(obj map[string]any, prop string) (string, bool) { + v, ok := obj[prop] + if !ok { + return "", false + } + s, ok := v.(string) + return s, ok +} + +func isInteger(num any) bool { + rat, ok := new(big.Rat).SetString(fmt.Sprint(num)) + return ok && rat.IsInt() +} + +// quote returns single-quoted string. +// used for embedding quoted strings in json. +func quote(s string) string { + s = fmt.Sprintf("%q", s) + s = strings.ReplaceAll(s, `\"`, `"`) + s = strings.ReplaceAll(s, `'`, `\'`) + return "'" + s[1:len(s)-1] + "'" +} + +func equals(v1, v2 any) (bool, ErrorKind) { + switch v1 := v1.(type) { + case map[string]any: + v2, ok := v2.(map[string]any) + if !ok || len(v1) != len(v2) { + return false, nil + } + for k, val1 := range v1 { + val2, ok := v2[k] + if !ok { + return false, nil + } + if ok, k := equals(val1, val2); !ok || k != nil { + return ok, k + } + } + return true, nil + case []any: + v2, ok := v2.([]any) + if !ok || len(v1) != len(v2) { + return false, nil + } + for i := range v1 { + if ok, k := equals(v1[i], v2[i]); !ok || k != nil { + return ok, k + } + } + return true, nil + case nil: + return v2 == nil, nil + case bool: + v2, ok := v2.(bool) + return ok && v1 == v2, nil + case string: + v2, ok := v2.(string) + return ok && v1 == v2, nil + case json.Number, float32, float64, int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: + num1, ok1 := new(big.Rat).SetString(fmt.Sprint(v1)) + num2, ok2 := new(big.Rat).SetString(fmt.Sprint(v2)) + return ok1 && ok2 && num1.Cmp(num2) == 0, nil + default: + return false, &kind.InvalidJsonValue{Value: v1} + } +} + +func duplicates(arr []any) (int, int, ErrorKind) { + if len(arr) <= 20 { + for i := 1; i < len(arr); i++ { + for j := 0; j < i; j++ { + if ok, k := equals(arr[i], arr[j]); ok || k != nil { + return j, i, k + } + } + } + return -1, -1, nil + } + + m := make(map[uint64][]int) + h := new(maphash.Hash) + for i, item := range arr { + h.Reset() + writeHash(item, h) + hash := h.Sum64() + indexes, ok := m[hash] + if ok { + for _, j := range indexes { + if ok, k := equals(item, arr[j]); ok || k != nil { + return j, i, k + } + } + } + indexes = append(indexes, i) + m[hash] = indexes + } + return -1, -1, nil +} + +func writeHash(v any, h *maphash.Hash) ErrorKind { + switch v := v.(type) { + case map[string]any: + _ = h.WriteByte(0) + props := make([]string, 0, len(v)) + for prop := range v { + props = append(props, prop) + } + slices.Sort(props) + for _, prop := range props { + writeHash(prop, h) + writeHash(v[prop], h) + } + case []any: + _ = h.WriteByte(1) + for _, item := range v { + writeHash(item, h) + } + case nil: + _ = h.WriteByte(2) + case bool: + _ = h.WriteByte(3) + if v { + _ = h.WriteByte(1) + } else { + _ = h.WriteByte(0) + } + case string: + _ = h.WriteByte(4) + _, _ = h.WriteString(v) + case json.Number, float32, float64, int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: + _ = h.WriteByte(5) + num, _ := new(big.Rat).SetString(fmt.Sprint(v)) + _, _ = h.Write(num.Num().Bytes()) + _, _ = h.Write(num.Denom().Bytes()) + default: + return &kind.InvalidJsonValue{Value: v} + } + return nil +} + +// -- + +type ParseURLError struct { + URL string + Err error +} + +func (e *ParseURLError) Error() string { + return fmt.Sprintf("error in parsing %q: %v", e.URL, e.Err) +} + +// -- + +type InvalidJsonPointerError struct { + URL string +} + +func (e *InvalidJsonPointerError) Error() string { + return fmt.Sprintf("invalid json-pointer %q", e.URL) +} + +// -- + +type JSONPointerNotFoundError struct { + URL string +} + +func (e *JSONPointerNotFoundError) Error() string { + return fmt.Sprintf("json-pointer in %q not found", e.URL) +} + +// -- + +type SchemaValidationError struct { + URL string + Err error +} + +func (e *SchemaValidationError) Error() string { + return fmt.Sprintf("%q is not valid against metaschema: %v", e.URL, e.Err) +} + +// -- + +// LocalizableError is an error whose message is localizable. +func LocalizableError(format string, args ...any) error { + return &localizableError{format, args} +} + +type localizableError struct { + msg string + args []any +} + +func (e *localizableError) Error() string { + return fmt.Sprintf(e.msg, e.args...) +} + +func (e *localizableError) LocalizedError(p *message.Printer) string { + return p.Sprintf(e.msg, e.args...) +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/validator.go b/vendor/github.com/santhosh-tekuri/jsonschema/v6/validator.go new file mode 100644 index 0000000000..e2ace37a9f --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/validator.go @@ -0,0 +1,975 @@ +package jsonschema + +import ( + "encoding/json" + "fmt" + "math/big" + "slices" + "strconv" + "unicode/utf8" + + "github.com/santhosh-tekuri/jsonschema/v6/kind" + "golang.org/x/text/message" +) + +func (sch *Schema) Validate(v any) error { + return sch.validate(v, nil, nil, nil, false, nil) +} + +func (sch *Schema) validate(v any, regexpEngine RegexpEngine, meta *Schema, resources map[jsonPointer]*resource, assertVocabs bool, vocabularies map[string]*Vocabulary) error { + vd := validator{ + v: v, + vloc: make([]string, 0, 8), + sch: sch, + scp: &scope{sch, "", 0, nil}, + uneval: unevalFrom(v, sch, false), + errors: nil, + boolResult: false, + regexpEngine: regexpEngine, + meta: meta, + resources: resources, + assertVocabs: assertVocabs, + vocabularies: vocabularies, + } + if _, err := vd.validate(); err != nil { + verr := err.(*ValidationError) + var causes []*ValidationError + if _, ok := verr.ErrorKind.(*kind.Group); ok { + causes = verr.Causes + } else { + causes = []*ValidationError{verr} + } + return &ValidationError{ + SchemaURL: sch.Location, + InstanceLocation: nil, + ErrorKind: &kind.Schema{Location: sch.Location}, + Causes: causes, + } + } + + return nil +} + +type validator struct { + v any + vloc []string + sch *Schema + scp *scope + uneval *uneval + errors []*ValidationError + boolResult bool // is interested to know valid or not (but not actuall error) + regexpEngine RegexpEngine + + // meta validation + meta *Schema // set only when validating with metaschema + resources map[jsonPointer]*resource // resources which should be validated with their dialect + assertVocabs bool + vocabularies map[string]*Vocabulary +} + +func (vd *validator) validate() (*uneval, error) { + s := vd.sch + v := vd.v + + // boolean -- + if s.Bool != nil { + if *s.Bool { + return vd.uneval, nil + } else { + return nil, vd.error(&kind.FalseSchema{}) + } + } + + // check cycle -- + if scp := vd.scp.checkCycle(); scp != nil { + return nil, vd.error(&kind.RefCycle{ + URL: s.Location, + KeywordLocation1: vd.scp.kwLoc(), + KeywordLocation2: scp.kwLoc(), + }) + } + + t := typeOf(v) + if t == invalidType { + return nil, vd.error(&kind.InvalidJsonValue{Value: v}) + } + + // type -- + if s.Types != nil && !s.Types.IsEmpty() { + matched := s.Types.contains(t) || (s.Types.contains(integerType) && t == numberType && isInteger(v)) + if !matched { + return nil, vd.error(&kind.Type{Got: t.String(), Want: s.Types.ToStrings()}) + } + } + + // const -- + if s.Const != nil { + ok, k := equals(v, *s.Const) + if k != nil { + return nil, vd.error(k) + } else if !ok { + return nil, vd.error(&kind.Const{Got: v, Want: *s.Const}) + } + } + + // enum -- + if s.Enum != nil { + matched := s.Enum.types.contains(typeOf(v)) + if matched { + matched = false + for _, item := range s.Enum.Values { + ok, k := equals(v, item) + if k != nil { + return nil, vd.error(k) + } else if ok { + matched = true + break + } + } + } + if !matched { + return nil, vd.error(&kind.Enum{Got: v, Want: s.Enum.Values}) + } + } + + // format -- + if s.Format != nil { + var err error + if s.Format.Name == "regex" && vd.regexpEngine != nil { + err = vd.regexpEngine.validate(v) + } else { + err = s.Format.Validate(v) + } + if err != nil { + return nil, vd.error(&kind.Format{Got: v, Want: s.Format.Name, Err: err}) + } + } + + // $ref -- + if s.Ref != nil { + err := vd.validateRef(s.Ref, "$ref") + if s.DraftVersion < 2019 { + return vd.uneval, err + } + if err != nil { + vd.addErr(err) + } + } + + // type specific validations -- + switch v := v.(type) { + case map[string]any: + vd.objValidate(v) + case []any: + vd.arrValidate(v) + case string: + vd.strValidate(v) + case json.Number, float32, float64, int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: + vd.numValidate(v) + } + + if len(vd.errors) == 0 || !vd.boolResult { + if s.DraftVersion >= 2019 { + vd.validateRefs() + } + vd.condValidate() + + for _, ext := range s.Extensions { + ext.Validate(&ValidatorContext{vd}, v) + } + + if s.DraftVersion >= 2019 { + vd.unevalValidate() + } + } + + switch len(vd.errors) { + case 0: + return vd.uneval, nil + case 1: + return nil, vd.errors[0] + default: + verr := vd.error(&kind.Group{}) + verr.Causes = vd.errors + return nil, verr + } +} + +func (vd *validator) objValidate(obj map[string]any) { + s := vd.sch + + // minProperties -- + if s.MinProperties != nil { + if len(obj) < *s.MinProperties { + vd.addError(&kind.MinProperties{Got: len(obj), Want: *s.MinProperties}) + } + } + + // maxProperties -- + if s.MaxProperties != nil { + if len(obj) > *s.MaxProperties { + vd.addError(&kind.MaxProperties{Got: len(obj), Want: *s.MaxProperties}) + } + } + + // required -- + if len(s.Required) > 0 { + if missing := vd.findMissing(obj, s.Required); missing != nil { + vd.addError(&kind.Required{Missing: missing}) + } + } + + if vd.boolResult && len(vd.errors) > 0 { + return + } + + // dependencies -- + for pname, dep := range s.Dependencies { + if _, ok := obj[pname]; ok { + switch dep := dep.(type) { + case []string: + if missing := vd.findMissing(obj, dep); missing != nil { + vd.addError(&kind.Dependency{Prop: pname, Missing: missing}) + } + case *Schema: + vd.addErr(vd.validateSelf(dep, "", false)) + } + } + } + + var additionalPros []string + for pname, pvalue := range obj { + if vd.boolResult && len(vd.errors) > 0 { + return + } + evaluated := false + + // properties -- + if sch, ok := s.Properties[pname]; ok { + evaluated = true + vd.addErr(vd.validateVal(sch, pvalue, pname)) + } + + // patternProperties -- + for regex, sch := range s.PatternProperties { + if regex.MatchString(pname) { + evaluated = true + vd.addErr(vd.validateVal(sch, pvalue, pname)) + } + } + + if !evaluated && s.AdditionalProperties != nil { + evaluated = true + switch additional := s.AdditionalProperties.(type) { + case bool: + if !additional { + additionalPros = append(additionalPros, pname) + } + case *Schema: + vd.addErr(vd.validateVal(additional, pvalue, pname)) + } + } + + if evaluated { + delete(vd.uneval.props, pname) + } + } + if len(additionalPros) > 0 { + vd.addError(&kind.AdditionalProperties{Properties: additionalPros}) + } + + if s.DraftVersion == 4 { + return + } + + // propertyNames -- + if s.PropertyNames != nil { + for pname := range obj { + sch, meta, resources := s.PropertyNames, vd.meta, vd.resources + res := vd.metaResource(sch) + if res != nil { + meta = res.dialect.getSchema(vd.assertVocabs, vd.vocabularies) + sch = meta + } + if err := sch.validate(pname, vd.regexpEngine, meta, resources, vd.assertVocabs, vd.vocabularies); err != nil { + verr := err.(*ValidationError) + verr.SchemaURL = s.PropertyNames.Location + verr.ErrorKind = &kind.PropertyNames{Property: pname} + vd.addErr(verr) + } + } + } + + if s.DraftVersion == 6 { + return + } + + // dependentSchemas -- + for pname, sch := range s.DependentSchemas { + if _, ok := obj[pname]; ok { + vd.addErr(vd.validateSelf(sch, "", false)) + } + } + + // dependentRequired -- + for pname, reqd := range s.DependentRequired { + if _, ok := obj[pname]; ok { + if missing := vd.findMissing(obj, reqd); missing != nil { + vd.addError(&kind.DependentRequired{Prop: pname, Missing: missing}) + } + } + } +} + +func (vd *validator) arrValidate(arr []any) { + s := vd.sch + + // minItems -- + if s.MinItems != nil { + if len(arr) < *s.MinItems { + vd.addError(&kind.MinItems{Got: len(arr), Want: *s.MinItems}) + } + } + + // maxItems -- + if s.MaxItems != nil { + if len(arr) > *s.MaxItems { + vd.addError(&kind.MaxItems{Got: len(arr), Want: *s.MaxItems}) + } + } + + // uniqueItems -- + if s.UniqueItems && len(arr) > 1 { + i, j, k := duplicates(arr) + if k != nil { + vd.addError(k) + } else if i != -1 { + vd.addError(&kind.UniqueItems{Duplicates: [2]int{i, j}}) + } + } + + if s.DraftVersion < 2020 { + evaluated := 0 + + // items -- + switch items := s.Items.(type) { + case *Schema: + for i, item := range arr { + vd.addErr(vd.validateVal(items, item, strconv.Itoa(i))) + } + evaluated = len(arr) + case []*Schema: + min := minInt(len(arr), len(items)) + for i, item := range arr[:min] { + vd.addErr(vd.validateVal(items[i], item, strconv.Itoa(i))) + } + evaluated = min + } + + // additionalItems -- + if s.AdditionalItems != nil { + switch additional := s.AdditionalItems.(type) { + case bool: + if !additional && evaluated != len(arr) { + vd.addError(&kind.AdditionalItems{Count: len(arr) - evaluated}) + } + case *Schema: + for i, item := range arr[evaluated:] { + vd.addErr(vd.validateVal(additional, item, strconv.Itoa(i))) + } + } + } + } else { + evaluated := minInt(len(s.PrefixItems), len(arr)) + + // prefixItems -- + for i, item := range arr[:evaluated] { + vd.addErr(vd.validateVal(s.PrefixItems[i], item, strconv.Itoa(i))) + } + + // items2020 -- + if s.Items2020 != nil { + for i, item := range arr[evaluated:] { + vd.addErr(vd.validateVal(s.Items2020, item, strconv.Itoa(i))) + } + } + } + + // contains -- + if s.Contains != nil { + var errors []*ValidationError + var matched []int + + for i, item := range arr { + if err := vd.validateVal(s.Contains, item, strconv.Itoa(i)); err != nil { + errors = append(errors, err.(*ValidationError)) + } else { + matched = append(matched, i) + if s.DraftVersion >= 2020 { + delete(vd.uneval.items, i) + } + } + } + + // minContains -- + if s.MinContains != nil { + if len(matched) < *s.MinContains { + vd.addErrors(errors, &kind.MinContains{Got: matched, Want: *s.MinContains}) + } + } else if len(matched) == 0 { + vd.addErrors(errors, &kind.Contains{}) + } + + // maxContains -- + if s.MaxContains != nil { + if len(matched) > *s.MaxContains { + vd.addError(&kind.MaxContains{Got: matched, Want: *s.MaxContains}) + } + } + } +} + +func (vd *validator) strValidate(str string) { + s := vd.sch + + strLen := -1 + if s.MinLength != nil || s.MaxLength != nil { + strLen = utf8.RuneCount([]byte(str)) + } + + // minLength -- + if s.MinLength != nil { + if strLen < *s.MinLength { + vd.addError(&kind.MinLength{Got: strLen, Want: *s.MinLength}) + } + } + + // maxLength -- + if s.MaxLength != nil { + if strLen > *s.MaxLength { + vd.addError(&kind.MaxLength{Got: strLen, Want: *s.MaxLength}) + } + } + + // pattern -- + if s.Pattern != nil { + if !s.Pattern.MatchString(str) { + vd.addError(&kind.Pattern{Got: str, Want: s.Pattern.String()}) + } + } + + if s.DraftVersion == 6 { + return + } + + var err error + + // contentEncoding -- + decoded := []byte(str) + if s.ContentEncoding != nil { + decoded, err = s.ContentEncoding.Decode(str) + if err != nil { + decoded = nil + vd.addError(&kind.ContentEncoding{Want: s.ContentEncoding.Name, Err: err}) + } + } + + var deserialized *any + if decoded != nil && s.ContentMediaType != nil { + if s.ContentSchema == nil { + err = s.ContentMediaType.Validate(decoded) + } else { + var value any + value, err = s.ContentMediaType.UnmarshalJSON(decoded) + if err == nil { + deserialized = &value + } + } + if err != nil { + vd.addError(&kind.ContentMediaType{ + Got: decoded, + Want: s.ContentMediaType.Name, + Err: err, + }) + } + } + + if deserialized != nil && s.ContentSchema != nil { + sch, meta, resources := s.ContentSchema, vd.meta, vd.resources + res := vd.metaResource(sch) + if res != nil { + meta = res.dialect.getSchema(vd.assertVocabs, vd.vocabularies) + sch = meta + } + if err = sch.validate(*deserialized, vd.regexpEngine, meta, resources, vd.assertVocabs, vd.vocabularies); err != nil { + verr := err.(*ValidationError) + verr.SchemaURL = s.Location + verr.ErrorKind = &kind.ContentSchema{} + vd.addErr(verr) + } + } +} + +func (vd *validator) numValidate(v any) { + s := vd.sch + + var numVal *big.Rat + num := func() *big.Rat { + if numVal == nil { + numVal, _ = new(big.Rat).SetString(fmt.Sprintf("%v", v)) + } + return numVal + } + + // minimum -- + if s.Minimum != nil && num().Cmp(s.Minimum) < 0 { + vd.addError(&kind.Minimum{Got: num(), Want: s.Minimum}) + } + + // maximum -- + if s.Maximum != nil && num().Cmp(s.Maximum) > 0 { + vd.addError(&kind.Maximum{Got: num(), Want: s.Maximum}) + } + + // exclusiveMinimum + if s.ExclusiveMinimum != nil && num().Cmp(s.ExclusiveMinimum) <= 0 { + vd.addError(&kind.ExclusiveMinimum{Got: num(), Want: s.ExclusiveMinimum}) + } + + // exclusiveMaximum + if s.ExclusiveMaximum != nil && num().Cmp(s.ExclusiveMaximum) >= 0 { + vd.addError(&kind.ExclusiveMaximum{Got: num(), Want: s.ExclusiveMaximum}) + } + + // multipleOf + if s.MultipleOf != nil { + if q := new(big.Rat).Quo(num(), s.MultipleOf); !q.IsInt() { + vd.addError(&kind.MultipleOf{Got: num(), Want: s.MultipleOf}) + } + } +} + +func (vd *validator) condValidate() { + s := vd.sch + + // not -- + if s.Not != nil { + if vd.validateSelf(s.Not, "", true) == nil { + vd.addError(&kind.Not{}) + } + } + + // allOf -- + if len(s.AllOf) > 0 { + var errors []*ValidationError + for _, sch := range s.AllOf { + if err := vd.validateSelf(sch, "", false); err != nil { + errors = append(errors, err.(*ValidationError)) + if vd.boolResult { + break + } + } + } + if len(errors) != 0 { + vd.addErrors(errors, &kind.AllOf{}) + } + } + + // anyOf + if len(s.AnyOf) > 0 { + var matched bool + var errors []*ValidationError + for _, sch := range s.AnyOf { + if err := vd.validateSelf(sch, "", false); err != nil { + errors = append(errors, err.(*ValidationError)) + } else { + matched = true + // for uneval, all schemas must be evaluated + if vd.uneval.isEmpty() { + break + } + } + } + if !matched { + vd.addErrors(errors, &kind.AnyOf{}) + } + } + + // oneOf + if len(s.OneOf) > 0 { + var matched = -1 + var errors []*ValidationError + for i, sch := range s.OneOf { + if err := vd.validateSelf(sch, "", matched != -1); err != nil { + if matched == -1 { + errors = append(errors, err.(*ValidationError)) + } + } else { + if matched == -1 { + matched = i + } else { + vd.addError(&kind.OneOf{Subschemas: []int{matched, i}}) + break + } + } + } + if matched == -1 { + vd.addErrors(errors, &kind.OneOf{Subschemas: nil}) + } + } + + // if, then, else -- + if s.If != nil { + if vd.validateSelf(s.If, "", true) == nil { + if s.Then != nil { + vd.addErr(vd.validateSelf(s.Then, "", false)) + } + } else if s.Else != nil { + vd.addErr(vd.validateSelf(s.Else, "", false)) + } + } +} + +func (vd *validator) unevalValidate() { + s := vd.sch + + // unevaluatedProperties + if obj, ok := vd.v.(map[string]any); ok && s.UnevaluatedProperties != nil { + for pname := range vd.uneval.props { + if pvalue, ok := obj[pname]; ok { + vd.addErr(vd.validateVal(s.UnevaluatedProperties, pvalue, pname)) + } + } + vd.uneval.props = nil + } + + // unevaluatedItems + if arr, ok := vd.v.([]any); ok && s.UnevaluatedItems != nil { + for i := range vd.uneval.items { + vd.addErr(vd.validateVal(s.UnevaluatedItems, arr[i], strconv.Itoa(i))) + } + vd.uneval.items = nil + } +} + +// validation helpers -- + +func (vd *validator) validateSelf(sch *Schema, refKw string, boolResult bool) error { + scp := vd.scp.child(sch, refKw, vd.scp.vid) + uneval := unevalFrom(vd.v, sch, !vd.uneval.isEmpty()) + subvd := validator{ + v: vd.v, + vloc: vd.vloc, + sch: sch, + scp: scp, + uneval: uneval, + errors: nil, + boolResult: vd.boolResult || boolResult, + regexpEngine: vd.regexpEngine, + meta: vd.meta, + resources: vd.resources, + assertVocabs: vd.assertVocabs, + vocabularies: vd.vocabularies, + } + subvd.handleMeta() + uneval, err := subvd.validate() + if err == nil { + vd.uneval.merge(uneval) + } + return err +} + +func (vd *validator) validateVal(sch *Schema, v any, vtok string) error { + vloc := append(vd.vloc, vtok) + scp := vd.scp.child(sch, "", vd.scp.vid+1) + uneval := unevalFrom(v, sch, false) + subvd := validator{ + v: v, + vloc: vloc, + sch: sch, + scp: scp, + uneval: uneval, + errors: nil, + boolResult: vd.boolResult, + regexpEngine: vd.regexpEngine, + meta: vd.meta, + resources: vd.resources, + assertVocabs: vd.assertVocabs, + vocabularies: vd.vocabularies, + } + subvd.handleMeta() + _, err := subvd.validate() + return err +} + +func (vd *validator) validateValue(sch *Schema, v any, vpath []string) error { + vloc := append(vd.vloc, vpath...) + scp := vd.scp.child(sch, "", vd.scp.vid+1) + uneval := unevalFrom(v, sch, false) + subvd := validator{ + v: v, + vloc: vloc, + sch: sch, + scp: scp, + uneval: uneval, + errors: nil, + boolResult: vd.boolResult, + regexpEngine: vd.regexpEngine, + meta: vd.meta, + resources: vd.resources, + assertVocabs: vd.assertVocabs, + vocabularies: vd.vocabularies, + } + subvd.handleMeta() + _, err := subvd.validate() + return err +} + +func (vd *validator) metaResource(sch *Schema) *resource { + if sch != vd.meta { + return nil + } + ptr := "" + for _, tok := range vd.instanceLocation() { + ptr += "/" + ptr += escape(tok) + } + return vd.resources[jsonPointer(ptr)] +} + +func (vd *validator) handleMeta() { + res := vd.metaResource(vd.sch) + if res == nil { + return + } + sch := res.dialect.getSchema(vd.assertVocabs, vd.vocabularies) + vd.meta = sch + vd.sch = sch +} + +// reference validation -- + +func (vd *validator) validateRef(sch *Schema, kw string) error { + err := vd.validateSelf(sch, kw, false) + if err != nil { + refErr := vd.error(&kind.Reference{Keyword: kw, URL: sch.Location}) + verr := err.(*ValidationError) + if _, ok := verr.ErrorKind.(*kind.Group); ok { + refErr.Causes = verr.Causes + } else { + refErr.Causes = append(refErr.Causes, verr) + } + return refErr + } + return nil +} + +func (vd *validator) resolveRecursiveAnchor(fallback *Schema) *Schema { + sch := fallback + scp := vd.scp + for scp != nil { + if scp.sch.resource.RecursiveAnchor { + sch = scp.sch + } + scp = scp.parent + } + return sch +} + +func (vd *validator) resolveDynamicAnchor(name string, fallback *Schema) *Schema { + sch := fallback + scp := vd.scp + for scp != nil { + if dsch, ok := scp.sch.resource.dynamicAnchors[name]; ok { + sch = dsch + } + scp = scp.parent + } + return sch +} + +func (vd *validator) validateRefs() { + // $recursiveRef -- + if sch := vd.sch.RecursiveRef; sch != nil { + if sch.RecursiveAnchor { + sch = vd.resolveRecursiveAnchor(sch) + } + vd.addErr(vd.validateRef(sch, "$recursiveRef")) + } + + // $dynamicRef -- + if dref := vd.sch.DynamicRef; dref != nil { + sch := dref.Ref // initial target + if dref.Anchor != "" { + // $dynamicRef includes anchor + if sch.DynamicAnchor == dref.Anchor { + // initial target has matching $dynamicAnchor + sch = vd.resolveDynamicAnchor(dref.Anchor, sch) + } + } + vd.addErr(vd.validateRef(sch, "$dynamicRef")) + } +} + +// error helpers -- + +func (vd *validator) instanceLocation() []string { + return slices.Clone(vd.vloc) +} + +func (vd *validator) error(kind ErrorKind) *ValidationError { + if vd.boolResult { + return &ValidationError{} + } + return &ValidationError{ + SchemaURL: vd.sch.Location, + InstanceLocation: vd.instanceLocation(), + ErrorKind: kind, + Causes: nil, + } +} + +func (vd *validator) addErr(err error) { + if err != nil { + vd.errors = append(vd.errors, err.(*ValidationError)) + } +} + +func (vd *validator) addError(kind ErrorKind) { + vd.errors = append(vd.errors, vd.error(kind)) +} + +func (vd *validator) addErrors(errors []*ValidationError, kind ErrorKind) { + err := vd.error(kind) + err.Causes = errors + vd.errors = append(vd.errors, err) +} + +func (vd *validator) findMissing(obj map[string]any, reqd []string) []string { + var missing []string + for _, pname := range reqd { + if _, ok := obj[pname]; !ok { + if vd.boolResult { + return []string{} // non-nil + } + missing = append(missing, pname) + } + } + return missing +} + +// -- + +type scope struct { + sch *Schema + + // if empty, compute from self.sch and self.parent.sch. + // not empty, only when there is a jump i.e, $ref, $XXXRef + refKeyword string + + // unique id of value being validated + // if two scopes validate same value, they will have + // same vid + vid int + + parent *scope +} + +func (sc *scope) child(sch *Schema, refKeyword string, vid int) *scope { + return &scope{sch, refKeyword, vid, sc} +} + +func (sc *scope) checkCycle() *scope { + scp := sc.parent + for scp != nil { + if scp.vid != sc.vid { + break + } + if scp.sch == sc.sch { + return scp + } + scp = scp.parent + } + return nil +} + +func (sc *scope) kwLoc() string { + var loc string + for sc.parent != nil { + if sc.refKeyword != "" { + loc = fmt.Sprintf("/%s%s", escape(sc.refKeyword), loc) + } else { + cur := sc.sch.Location + parent := sc.parent.sch.Location + loc = fmt.Sprintf("%s%s", cur[len(parent):], loc) + } + sc = sc.parent + } + return loc +} + +// -- + +type uneval struct { + props map[string]struct{} + items map[int]struct{} +} + +func unevalFrom(v any, sch *Schema, callerNeeds bool) *uneval { + uneval := &uneval{} + switch v := v.(type) { + case map[string]any: + if !sch.allPropsEvaluated && (callerNeeds || sch.UnevaluatedProperties != nil) { + uneval.props = map[string]struct{}{} + for k := range v { + uneval.props[k] = struct{}{} + } + } + case []any: + if !sch.allItemsEvaluated && (callerNeeds || sch.UnevaluatedItems != nil) && sch.numItemsEvaluated < len(v) { + uneval.items = map[int]struct{}{} + for i := sch.numItemsEvaluated; i < len(v); i++ { + uneval.items[i] = struct{}{} + } + } + } + return uneval +} + +func (ue *uneval) merge(other *uneval) { + for k := range ue.props { + if _, ok := other.props[k]; !ok { + delete(ue.props, k) + } + } + for i := range ue.items { + if _, ok := other.items[i]; !ok { + delete(ue.items, i) + } + } +} + +func (ue *uneval) isEmpty() bool { + return len(ue.props) == 0 && len(ue.items) == 0 +} + +// -- + +type ValidationError struct { + // absolute, dereferenced schema location. + SchemaURL string + + // location of the JSON value within the instance being validated. + InstanceLocation []string + + // kind of error + ErrorKind ErrorKind + + // holds nested errors + Causes []*ValidationError +} + +type ErrorKind interface { + KeywordPath() []string + LocalizedString(*message.Printer) string +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/vocab.go b/vendor/github.com/santhosh-tekuri/jsonschema/v6/vocab.go new file mode 100644 index 0000000000..c81cb700f1 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/vocab.go @@ -0,0 +1,111 @@ +package jsonschema + +// CompilerContext provides helpers for +// compiling a [Vocabulary]. +type CompilerContext struct { + c *objCompiler +} + +func (ctx *CompilerContext) Enqueue(schPath []string) *Schema { + ptr := ctx.c.up.ptr + for _, tok := range schPath { + ptr = ptr.append(tok) + } + return ctx.c.enqueuePtr(ptr) +} + +// Vocabulary defines a set of keywords, their syntax and +// their semantics. +type Vocabulary struct { + // URL identifier for this Vocabulary. + URL string + + // Schema that is used to validate the keywords that is introduced by this + // vocabulary. + Schema *Schema + + // Subschemas lists the possible locations of subschemas introduced by + // this vocabulary. + Subschemas []SchemaPath + + // Compile compiles the keywords(introduced by this vocabulary) in obj into [SchemaExt]. + // If obj does not contain any keywords introduced by this vocabulary, nil SchemaExt must + // be returned. + Compile func(ctx *CompilerContext, obj map[string]any) (SchemaExt, error) +} + +// -- + +// SchemaExt is compled form of vocabulary. +type SchemaExt interface { + // Validate validates v against and errors if any are reported + // to ctx. + Validate(ctx *ValidatorContext, v any) +} + +// ValidatorContext provides helpers for +// validating with [SchemaExt]. +type ValidatorContext struct { + vd *validator +} + +// ValueLocation returns location of value as jsonpath token array. +func (ctx *ValidatorContext) ValueLocation() []string { + return ctx.vd.vloc +} + +// Validate validates v with sch. vpath gives path of v from current context value. +func (ctx *ValidatorContext) Validate(sch *Schema, v any, vpath []string) error { + switch len(vpath) { + case 0: + return ctx.vd.validateSelf(sch, "", false) + case 1: + return ctx.vd.validateVal(sch, v, vpath[0]) + default: + return ctx.vd.validateValue(sch, v, vpath) + } +} + +// EvaluatedProp marks given property of current object as evaluated. +func (ctx *ValidatorContext) EvaluatedProp(pname string) { + delete(ctx.vd.uneval.props, pname) +} + +// EvaluatedItem marks items at given index of current array as evaluated. +func (ctx *ValidatorContext) EvaluatedItem(index int) { + delete(ctx.vd.uneval.items, index) +} + +// AddError reports validation-error of given kind. +func (ctx *ValidatorContext) AddError(k ErrorKind) { + ctx.vd.addError(k) +} + +// AddErrors reports validation-errors of given kind. +func (ctx *ValidatorContext) AddErrors(errors []*ValidationError, k ErrorKind) { + ctx.vd.addErrors(errors, k) +} + +// AddErr reports the given err. This is typically used to report +// the error created by subschema validation. +// +// NOTE that err must be of type *ValidationError. +func (ctx *ValidatorContext) AddErr(err error) { + ctx.vd.addErr(err) +} + +func (ctx *ValidatorContext) Equals(v1, v2 any) (bool, error) { + b, k := equals(v1, v2) + if k != nil { + return false, ctx.vd.error(k) + } + return b, nil +} + +func (ctx *ValidatorContext) Duplicates(arr []any) (int, int, error) { + i, j, k := duplicates(arr) + if k != nil { + return -1, -1, ctx.vd.error(k) + } + return i, j, nil +} diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go index 915d5090dd..08c36e74f4 100644 --- a/vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go +++ b/vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go @@ -1146,13 +1146,28 @@ func (dmp *DiffMatchPatch) DiffPrettyText(diffs []Diff) string { switch diff.Type { case DiffInsert: - _, _ = buff.WriteString("\x1b[32m") - _, _ = buff.WriteString(text) - _, _ = buff.WriteString("\x1b[0m") + lines := strings.Split(text, "\n") + for i, line := range lines { + _, _ = buff.WriteString("\x1b[32m") + _, _ = buff.WriteString(line) + if i < len(lines)-1 { + _, _ = buff.WriteString("\x1b[0m\n") + } else { + _, _ = buff.WriteString("\x1b[0m") + } + } + case DiffDelete: - _, _ = buff.WriteString("\x1b[31m") - _, _ = buff.WriteString(text) - _, _ = buff.WriteString("\x1b[0m") + lines := strings.Split(text, "\n") + for i, line := range lines { + _, _ = buff.WriteString("\x1b[31m") + _, _ = buff.WriteString(line) + if i < len(lines)-1 { + _, _ = buff.WriteString("\x1b[0m\n") + } else { + _, _ = buff.WriteString("\x1b[0m") + } + } case DiffEqual: _, _ = buff.WriteString(text) } @@ -1305,7 +1320,6 @@ func (dmp *DiffMatchPatch) DiffFromDelta(text1 string, delta string) (diffs []Di // diffLinesToStrings splits two texts into a list of strings. Each string represents one line. func (dmp *DiffMatchPatch) diffLinesToStrings(text1, text2 string) (string, string, []string) { - // '\x00' is a valid character, but various debuggers don't like it. So we'll insert a junk entry to avoid generating a null character. lineArray := []string{""} // e.g. lineArray[4] == 'Hello\n' lineHash := make(map[string]int) @@ -1316,12 +1330,11 @@ func (dmp *DiffMatchPatch) diffLinesToStrings(text1, text2 string) (string, stri return intArrayToString(strIndexArray1), intArrayToString(strIndexArray2), lineArray } -// diffLinesToStringsMunge splits a text into an array of strings, and reduces the texts to a []string. -func (dmp *DiffMatchPatch) diffLinesToStringsMunge(text string, lineArray *[]string, lineHash map[string]int) []uint32 { - // Walk the text, pulling out a substring for each line. text.split('\n') would would temporarily double our memory footprint. Modifying text would create many large strings to garbage collect. +// diffLinesToStringsMunge splits a text into an array of strings, and reduces the texts to a []index. +func (dmp *DiffMatchPatch) diffLinesToStringsMunge(text string, lineArray *[]string, lineHash map[string]int) []index { lineStart := 0 lineEnd := -1 - strs := []uint32{} + strs := []index{} for lineEnd < len(text)-1 { lineEnd = indexOf(text, "\n", lineStart) @@ -1335,11 +1348,11 @@ func (dmp *DiffMatchPatch) diffLinesToStringsMunge(text string, lineArray *[]str lineValue, ok := lineHash[line] if ok { - strs = append(strs, uint32(lineValue)) + strs = append(strs, index(lineValue)) } else { *lineArray = append(*lineArray, line) lineHash[line] = len(*lineArray) - 1 - strs = append(strs, uint32(len(*lineArray)-1)) + strs = append(strs, index(len(*lineArray)-1)) } } diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/index.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/index.go new file mode 100644 index 0000000000..965a1c64bd --- /dev/null +++ b/vendor/github.com/sergi/go-diff/diffmatchpatch/index.go @@ -0,0 +1,32 @@ +package diffmatchpatch + +type index uint32 + +const runeSkipStart = 0xd800 +const runeSkipEnd = 0xdfff + 1 +const runeMax = 0x110000 // next invalid code point + +func stringToIndex(text string) []index { + runes := []rune(text) + indexes := make([]index, len(runes)) + for i, r := range runes { + if r < runeSkipEnd { + indexes[i] = index(r) + } else { + indexes[i] = index(r) - (runeSkipEnd - runeSkipStart) + } + } + return indexes +} + +func indexesToString(indexes []index) string { + runes := make([]rune, len(indexes)) + for i, index := range indexes { + if index < runeSkipStart { + runes[i] = rune(index) + } else { + runes[i] = rune(index + (runeSkipEnd - runeSkipStart)) + } + } + return string(runes) +} diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil.go index eb727bb594..573b6bf751 100644 --- a/vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil.go +++ b/vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil.go @@ -93,14 +93,14 @@ func runesIndex(r1, r2 []rune) int { return -1 } -func intArrayToString(ns []uint32) string { +func intArrayToString(ns []index) string { if len(ns) == 0 { return "" } b := []rune{} for _, n := range ns { - b = append(b, intToRune(n)) + b = append(b, intToRune(uint32(n))) } return string(b) } diff --git a/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/common/v1/sigstore_common.pb.go b/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/common/v1/sigstore_common.pb.go index 40725bd79c..5f339b2d78 100644 --- a/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/common/v1/sigstore_common.pb.go +++ b/vendor/github.com/sigstore/protobuf-specs/gen/pb-go/common/v1/sigstore_common.pb.go @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.36.5 -// protoc v5.29.4 +// protoc v6.30.2 // source: sigstore_common.proto package v1 @@ -112,7 +112,8 @@ func (HashAlgorithm) EnumDescriptor() ([]byte, []int) { // opinionated options instead of allowing every possible permutation. // // Any changes to this enum MUST be reflected in the algorithm registry. -// See: docs/algorithm-registry.md +// +// See: // // To avoid the possibility of contradicting formats such as PKCS1 with // ED25519 the valid permutations are listed as a linear set instead of a @@ -159,8 +160,9 @@ const ( PublicKeyDetails_PKIX_ECDSA_P521_SHA_256 PublicKeyDetails = 20 // LMS and LM-OTS // - // These keys and signatures may be used by private Sigstore - // deployments, but are not currently supported by the public + // These algorithms are deprecated and should not be used. + // Keys and signatures MAY be used by private Sigstore + // deployments, but will not be supported by the public // good instance. // // USER WARNING: LMS and LM-OTS are both stateful signature schemes. @@ -170,8 +172,26 @@ const ( // MUST NOT be used for more than one signature per LM-OTS key. // If you cannot maintain these invariants, you MUST NOT use these // schemes. - PublicKeyDetails_LMS_SHA256 PublicKeyDetails = 14 + // + // Deprecated: Marked as deprecated in sigstore_common.proto. + PublicKeyDetails_LMS_SHA256 PublicKeyDetails = 14 + // Deprecated: Marked as deprecated in sigstore_common.proto. PublicKeyDetails_LMOTS_SHA256 PublicKeyDetails = 15 + // ML-DSA + // + // These ML_DSA_65 and ML-DSA_87 algorithms are the pure variants that + // take data to sign rather than the prehash variants (HashML-DSA), which + // take digests. While considered quantum-resistant, their usage + // involves tradeoffs in that signatures and keys are much larger, and + // this makes deployments more costly. + // + // USER WARNING: ML_DSA_65 and ML_DSA_87 are experimental algorithms. + // In the future they MAY be used by private Sigstore deployments, but + // they are not yet fully functional. This warning will be removed when + // these algorithms are widely supported by Sigstore clients and servers, + // but care should still be taken for production environments. + PublicKeyDetails_ML_DSA_65 PublicKeyDetails = 21 // See NIST FIPS 204 + PublicKeyDetails_ML_DSA_87 PublicKeyDetails = 22 ) // Enum value maps for PublicKeyDetails. @@ -198,6 +218,8 @@ var ( 20: "PKIX_ECDSA_P521_SHA_256", 14: "LMS_SHA256", 15: "LMOTS_SHA256", + 21: "ML_DSA_65", + 22: "ML_DSA_87", } PublicKeyDetails_value = map[string]int32{ "PUBLIC_KEY_DETAILS_UNSPECIFIED": 0, @@ -221,6 +243,8 @@ var ( "PKIX_ECDSA_P521_SHA_256": 20, "LMS_SHA256": 14, "LMOTS_SHA256": 15, + "ML_DSA_65": 21, + "ML_DSA_87": 22, } ) @@ -1134,7 +1158,7 @@ var file_sigstore_common_proto_rawDesc = string([]byte{ 0x48, 0x41, 0x32, 0x5f, 0x33, 0x38, 0x34, 0x10, 0x02, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x41, 0x32, 0x5f, 0x35, 0x31, 0x32, 0x10, 0x03, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x41, 0x33, 0x5f, 0x32, 0x35, 0x36, 0x10, 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x41, 0x33, 0x5f, 0x33, 0x38, - 0x34, 0x10, 0x05, 0x2a, 0xe9, 0x04, 0x0a, 0x10, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, + 0x34, 0x10, 0x05, 0x2a, 0x8f, 0x05, 0x0a, 0x10, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x22, 0x0a, 0x1e, 0x50, 0x55, 0x42, 0x4c, 0x49, 0x43, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x44, 0x45, 0x54, 0x41, 0x49, 0x4c, 0x53, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x19, 0x0a, 0x11, @@ -1170,25 +1194,27 @@ var file_sigstore_common_proto_rawDesc = string([]byte{ 0x44, 0x53, 0x41, 0x5f, 0x50, 0x33, 0x38, 0x34, 0x5f, 0x53, 0x48, 0x41, 0x5f, 0x32, 0x35, 0x36, 0x10, 0x13, 0x1a, 0x02, 0x08, 0x01, 0x12, 0x1f, 0x0a, 0x17, 0x50, 0x4b, 0x49, 0x58, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x50, 0x35, 0x32, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x5f, 0x32, 0x35, - 0x36, 0x10, 0x14, 0x1a, 0x02, 0x08, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x4c, 0x4d, 0x53, 0x5f, 0x53, - 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x0e, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x4d, 0x4f, 0x54, 0x53, - 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x0f, 0x22, 0x04, 0x08, 0x15, 0x10, 0x32, 0x2a, - 0x6f, 0x0a, 0x1a, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, 0x74, 0x65, 0x72, 0x6e, - 0x61, 0x74, 0x69, 0x76, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2d, 0x0a, - 0x29, 0x53, 0x55, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x41, 0x4c, 0x54, 0x45, 0x52, 0x4e, 0x41, - 0x54, 0x49, 0x56, 0x45, 0x5f, 0x4e, 0x41, 0x4d, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, - 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, - 0x45, 0x4d, 0x41, 0x49, 0x4c, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x55, 0x52, 0x49, 0x10, 0x02, - 0x12, 0x0e, 0x0a, 0x0a, 0x4f, 0x54, 0x48, 0x45, 0x52, 0x5f, 0x4e, 0x41, 0x4d, 0x45, 0x10, 0x03, - 0x42, 0x7c, 0x0a, 0x1c, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, - 0x42, 0x0b, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, - 0x36, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x69, 0x67, 0x73, - 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2d, 0x73, 0x70, - 0x65, 0x63, 0x73, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x70, 0x62, 0x2d, 0x67, 0x6f, 0x2f, 0x63, 0x6f, - 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0xea, 0x02, 0x14, 0x53, 0x69, 0x67, 0x73, 0x74, 0x6f, - 0x72, 0x65, 0x3a, 0x3a, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x36, 0x10, 0x14, 0x1a, 0x02, 0x08, 0x01, 0x12, 0x12, 0x0a, 0x0a, 0x4c, 0x4d, 0x53, 0x5f, 0x53, + 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x0e, 0x1a, 0x02, 0x08, 0x01, 0x12, 0x14, 0x0a, 0x0c, 0x4c, + 0x4d, 0x4f, 0x54, 0x53, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x0f, 0x1a, 0x02, 0x08, + 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x4d, 0x4c, 0x5f, 0x44, 0x53, 0x41, 0x5f, 0x36, 0x35, 0x10, 0x15, + 0x12, 0x0d, 0x0a, 0x09, 0x4d, 0x4c, 0x5f, 0x44, 0x53, 0x41, 0x5f, 0x38, 0x37, 0x10, 0x16, 0x22, + 0x04, 0x08, 0x17, 0x10, 0x32, 0x2a, 0x6f, 0x0a, 0x1a, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x41, 0x6c, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x69, 0x76, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x2d, 0x0a, 0x29, 0x53, 0x55, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x41, + 0x4c, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x4e, 0x41, 0x4d, 0x45, 0x5f, + 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, + 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x4d, 0x41, 0x49, 0x4c, 0x10, 0x01, 0x12, 0x07, 0x0a, + 0x03, 0x55, 0x52, 0x49, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x4f, 0x54, 0x48, 0x45, 0x52, 0x5f, + 0x4e, 0x41, 0x4d, 0x45, 0x10, 0x03, 0x42, 0x7c, 0x0a, 0x1c, 0x64, 0x65, 0x76, 0x2e, 0x73, 0x69, + 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x36, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x73, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2d, 0x73, 0x70, 0x65, 0x63, 0x73, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x70, 0x62, + 0x2d, 0x67, 0x6f, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0xea, 0x02, 0x14, + 0x53, 0x69, 0x67, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x3a, 0x3a, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, }) var ( diff --git a/vendor/github.com/sigstore/rekor/CONTRIBUTORS.md b/vendor/github.com/sigstore/rekor/CONTRIBUTORS.md deleted file mode 100644 index bdff02765c..0000000000 --- a/vendor/github.com/sigstore/rekor/CONTRIBUTORS.md +++ /dev/null @@ -1,122 +0,0 @@ -# Contributing - -When contributing to this repository, please first discuss the change you wish -to make via an [issue](https://github.com/sigstore/rekor/issues). - -## Pull Request Process - -1. Create an [issue](https://github.com/sigstore/rekor/issues) - outlining the fix or feature. -2. Fork the rekor repository to your own github account and clone it locally. -3. Hack on your changes. -4. Update the README.md with details of changes to any interface, this includes new environment - variables, exposed ports, useful file locations, CLI parameters and - new or changed configuration values. -5. Correctly format your commit message see [Commit Messages](#Commit Message Guidelines) - below. -6. Ensure that CI passes, if it fails, fix the failures. -7. Every pull request requires a review from the [core rekor team](https://github.com/orgs/github.com/sigstore/teams/core-team) - before merging. -8. If your pull request consists of more than one commit, please squash your - commits as described in [Squash Commits](#Squash Commits) - -## Commit Message Guidelines - -We follow the commit formatting recommendations found on [Chris Beams' How to Write a Git Commit Message article]((https://chris.beams.io/posts/git-commit/). - -Well formed commit messages not only help reviewers understand the nature of -the Pull Request, but also assists the release process where commit messages -are used to generate release notes. - -A good example of a commit message would be as follows: - -``` -Summarize changes in around 50 characters or less - -More detailed explanatory text, if necessary. Wrap it to about 72 -characters or so. In some contexts, the first line is treated as the -subject of the commit and the rest of the text as the body. The -blank line separating the summary from the body is critical (unless -you omit the body entirely); various tools like `log`, `shortlog` -and `rebase` can get confused if you run the two together. - -Explain the problem that this commit is solving. Focus on why you -are making this change as opposed to how (the code explains that). -Are there side effects or other unintuitive consequences of this -change? Here's the place to explain them. - -Further paragraphs come after blank lines. - - - Bullet points are okay, too - - - Typically a hyphen or asterisk is used for the bullet, preceded - by a single space, with blank lines in between, but conventions - vary here - -If you use an issue tracker, put references to them at the bottom, -like this: - -Resolves: #123 -See also: #456, #789 -``` - -Note the `Resolves #123` tag, this references the issue raised and allows us to -ensure issues are associated and closed when a pull request is merged. - -Please refer to [the github help page on message types](https://help.github.com/articles/closing-issues-using-keywords/) -for a complete list of issue references. - -## Squash Commits - -Should your pull request consist of more than one commit (perhaps due to -a change being requested during the review cycle), please perform a git squash -once a reviewer has approved your pull request. - -A squash can be performed as follows. Let's say you have the following commits: - - initial commit - second commit - final commit - -Run the command below with the number set to the total commits you wish to -squash (in our case 3 commits): - - git rebase -i HEAD~3 - -You default text editor will then open up and you will see the following:: - - pick eb36612 initial commit - pick 9ac8968 second commit - pick a760569 final commit - - # Rebase eb1429f..a760569 onto eb1429f (3 commands) - -We want to rebase on top of our first commit, so we change the other two commits -to `squash`: - - pick eb36612 initial commit - squash 9ac8968 second commit - squash a760569 final commit - -After this, should you wish to update your commit message to better summarise -all of your pull request, run: - - git commit --amend - -You will then need to force push (assuming your initial commit(s) were posted -to github): - - git push origin your-branch --force - -Alternatively, a core member can squash your commits within Github. - -## DCO Signoff - -Make sure to sign the [Developer Certificate of -Origin](https://git-scm.com/docs/git-commit#Documentation/git-commit.txt---signoff). - -## Code of Conduct - -Rekor adheres to and enforces the [Contributor Covenant](http://contributor-covenant.org/version/1/4/) Code of Conduct. -Please take a moment to read the [CODE_OF_CONDUCT.md](https://github.com/sigstore/rekor/blob/master/CODE_OF_CONDUCT.md) document. - diff --git a/vendor/github.com/sigstore/rekor/LICENSE b/vendor/github.com/sigstore/rekor/LICENSE deleted file mode 100644 index d645695673..0000000000 --- a/vendor/github.com/sigstore/rekor/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/alpine.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/alpine.go deleted file mode 100644 index 5607679fdf..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/alpine.go +++ /dev/null @@ -1,210 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "bytes" - "context" - "encoding/json" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// Alpine Alpine package -// -// swagger:model alpine -type Alpine struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec AlpineSchema `json:"spec"` -} - -// Kind gets the kind of this subtype -func (m *Alpine) Kind() string { - return "alpine" -} - -// SetKind sets the kind of this subtype -func (m *Alpine) SetKind(val string) { -} - -// UnmarshalJSON unmarshals this object with a polymorphic type from a JSON structure -func (m *Alpine) UnmarshalJSON(raw []byte) error { - var data struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec AlpineSchema `json:"spec"` - } - buf := bytes.NewBuffer(raw) - dec := json.NewDecoder(buf) - dec.UseNumber() - - if err := dec.Decode(&data); err != nil { - return err - } - - var base struct { - /* Just the base type fields. Used for unmashalling polymorphic types.*/ - - Kind string `json:"kind"` - } - buf = bytes.NewBuffer(raw) - dec = json.NewDecoder(buf) - dec.UseNumber() - - if err := dec.Decode(&base); err != nil { - return err - } - - var result Alpine - - if base.Kind != result.Kind() { - /* Not the type we're looking for. */ - return errors.New(422, "invalid kind value: %q", base.Kind) - } - - result.APIVersion = data.APIVersion - result.Spec = data.Spec - - *m = result - - return nil -} - -// MarshalJSON marshals this object with a polymorphic type to a JSON structure -func (m Alpine) MarshalJSON() ([]byte, error) { - var b1, b2, b3 []byte - var err error - b1, err = json.Marshal(struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec AlpineSchema `json:"spec"` - }{ - - APIVersion: m.APIVersion, - - Spec: m.Spec, - }) - if err != nil { - return nil, err - } - b2, err = json.Marshal(struct { - Kind string `json:"kind"` - }{ - - Kind: m.Kind(), - }) - if err != nil { - return nil, err - } - - return swag.ConcatJSON(b1, b2, b3), nil -} - -// Validate validates this alpine -func (m *Alpine) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAPIVersion(formats); err != nil { - res = append(res, err) - } - - if err := m.validateSpec(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *Alpine) validateAPIVersion(formats strfmt.Registry) error { - - if err := validate.Required("apiVersion", "body", m.APIVersion); err != nil { - return err - } - - if err := validate.Pattern("apiVersion", "body", *m.APIVersion, `^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`); err != nil { - return err - } - - return nil -} - -func (m *Alpine) validateSpec(formats strfmt.Registry) error { - - if m.Spec == nil { - return errors.Required("spec", "body", nil) - } - - return nil -} - -// ContextValidate validate this alpine based on the context it is used -func (m *Alpine) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// MarshalBinary interface implementation -func (m *Alpine) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *Alpine) UnmarshalBinary(b []byte) error { - var res Alpine - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/alpine_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/alpine_schema.go deleted file mode 100644 index edd25408bb..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/alpine_schema.go +++ /dev/null @@ -1,29 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// AlpineSchema Alpine Package Schema -// -// # Schema for Alpine package objects -// -// swagger:model alpineSchema -type AlpineSchema interface{} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/alpine_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/alpine_v001_schema.go deleted file mode 100644 index a239c84faa..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/alpine_v001_schema.go +++ /dev/null @@ -1,455 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "encoding/json" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// AlpineV001Schema Alpine v0.0.1 Schema -// -// # Schema for Alpine Package entries -// -// swagger:model alpineV001Schema -type AlpineV001Schema struct { - - // package - // Required: true - Package *AlpineV001SchemaPackage `json:"package"` - - // public key - // Required: true - PublicKey *AlpineV001SchemaPublicKey `json:"publicKey"` -} - -// Validate validates this alpine v001 schema -func (m *AlpineV001Schema) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validatePackage(formats); err != nil { - res = append(res, err) - } - - if err := m.validatePublicKey(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *AlpineV001Schema) validatePackage(formats strfmt.Registry) error { - - if err := validate.Required("package", "body", m.Package); err != nil { - return err - } - - if m.Package != nil { - if err := m.Package.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("package") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("package") - } - return err - } - } - - return nil -} - -func (m *AlpineV001Schema) validatePublicKey(formats strfmt.Registry) error { - - if err := validate.Required("publicKey", "body", m.PublicKey); err != nil { - return err - } - - if m.PublicKey != nil { - if err := m.PublicKey.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("publicKey") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("publicKey") - } - return err - } - } - - return nil -} - -// ContextValidate validate this alpine v001 schema based on the context it is used -func (m *AlpineV001Schema) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidatePackage(ctx, formats); err != nil { - res = append(res, err) - } - - if err := m.contextValidatePublicKey(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *AlpineV001Schema) contextValidatePackage(ctx context.Context, formats strfmt.Registry) error { - - if m.Package != nil { - - if err := m.Package.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("package") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("package") - } - return err - } - } - - return nil -} - -func (m *AlpineV001Schema) contextValidatePublicKey(ctx context.Context, formats strfmt.Registry) error { - - if m.PublicKey != nil { - - if err := m.PublicKey.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("publicKey") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("publicKey") - } - return err - } - } - - return nil -} - -// MarshalBinary interface implementation -func (m *AlpineV001Schema) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *AlpineV001Schema) UnmarshalBinary(b []byte) error { - var res AlpineV001Schema - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// AlpineV001SchemaPackage Information about the package associated with the entry -// -// swagger:model AlpineV001SchemaPackage -type AlpineV001SchemaPackage struct { - - // Specifies the package inline within the document - // Format: byte - Content strfmt.Base64 `json:"content,omitempty"` - - // hash - Hash *AlpineV001SchemaPackageHash `json:"hash,omitempty"` - - // Values of the .PKGINFO key / value pairs - // Read Only: true - Pkginfo map[string]string `json:"pkginfo,omitempty"` -} - -// Validate validates this alpine v001 schema package -func (m *AlpineV001SchemaPackage) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateHash(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *AlpineV001SchemaPackage) validateHash(formats strfmt.Registry) error { - if swag.IsZero(m.Hash) { // not required - return nil - } - - if m.Hash != nil { - if err := m.Hash.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("package" + "." + "hash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("package" + "." + "hash") - } - return err - } - } - - return nil -} - -// ContextValidate validate this alpine v001 schema package based on the context it is used -func (m *AlpineV001SchemaPackage) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateHash(ctx, formats); err != nil { - res = append(res, err) - } - - if err := m.contextValidatePkginfo(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *AlpineV001SchemaPackage) contextValidateHash(ctx context.Context, formats strfmt.Registry) error { - - if m.Hash != nil { - - if swag.IsZero(m.Hash) { // not required - return nil - } - - if err := m.Hash.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("package" + "." + "hash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("package" + "." + "hash") - } - return err - } - } - - return nil -} - -func (m *AlpineV001SchemaPackage) contextValidatePkginfo(ctx context.Context, formats strfmt.Registry) error { - - return nil -} - -// MarshalBinary interface implementation -func (m *AlpineV001SchemaPackage) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *AlpineV001SchemaPackage) UnmarshalBinary(b []byte) error { - var res AlpineV001SchemaPackage - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// AlpineV001SchemaPackageHash Specifies the hash algorithm and value for the package -// -// swagger:model AlpineV001SchemaPackageHash -type AlpineV001SchemaPackageHash struct { - - // The hashing function used to compute the hash value - // Required: true - // Enum: ["sha256"] - Algorithm *string `json:"algorithm"` - - // The hash value for the package - // Required: true - Value *string `json:"value"` -} - -// Validate validates this alpine v001 schema package hash -func (m *AlpineV001SchemaPackageHash) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAlgorithm(formats); err != nil { - res = append(res, err) - } - - if err := m.validateValue(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -var alpineV001SchemaPackageHashTypeAlgorithmPropEnum []interface{} - -func init() { - var res []string - if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil { - panic(err) - } - for _, v := range res { - alpineV001SchemaPackageHashTypeAlgorithmPropEnum = append(alpineV001SchemaPackageHashTypeAlgorithmPropEnum, v) - } -} - -const ( - - // AlpineV001SchemaPackageHashAlgorithmSha256 captures enum value "sha256" - AlpineV001SchemaPackageHashAlgorithmSha256 string = "sha256" -) - -// prop value enum -func (m *AlpineV001SchemaPackageHash) validateAlgorithmEnum(path, location string, value string) error { - if err := validate.EnumCase(path, location, value, alpineV001SchemaPackageHashTypeAlgorithmPropEnum, true); err != nil { - return err - } - return nil -} - -func (m *AlpineV001SchemaPackageHash) validateAlgorithm(formats strfmt.Registry) error { - - if err := validate.Required("package"+"."+"hash"+"."+"algorithm", "body", m.Algorithm); err != nil { - return err - } - - // value enum - if err := m.validateAlgorithmEnum("package"+"."+"hash"+"."+"algorithm", "body", *m.Algorithm); err != nil { - return err - } - - return nil -} - -func (m *AlpineV001SchemaPackageHash) validateValue(formats strfmt.Registry) error { - - if err := validate.Required("package"+"."+"hash"+"."+"value", "body", m.Value); err != nil { - return err - } - - return nil -} - -// ContextValidate validate this alpine v001 schema package hash based on the context it is used -func (m *AlpineV001SchemaPackageHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// MarshalBinary interface implementation -func (m *AlpineV001SchemaPackageHash) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *AlpineV001SchemaPackageHash) UnmarshalBinary(b []byte) error { - var res AlpineV001SchemaPackageHash - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// AlpineV001SchemaPublicKey The public key that can verify the package signature -// -// swagger:model AlpineV001SchemaPublicKey -type AlpineV001SchemaPublicKey struct { - - // Specifies the content of the public key inline within the document - // Required: true - // Format: byte - Content *strfmt.Base64 `json:"content"` -} - -// Validate validates this alpine v001 schema public key -func (m *AlpineV001SchemaPublicKey) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateContent(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *AlpineV001SchemaPublicKey) validateContent(formats strfmt.Registry) error { - - if err := validate.Required("publicKey"+"."+"content", "body", m.Content); err != nil { - return err - } - - return nil -} - -// ContextValidate validates this alpine v001 schema public key based on context it is used -func (m *AlpineV001SchemaPublicKey) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - return nil -} - -// MarshalBinary interface implementation -func (m *AlpineV001SchemaPublicKey) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *AlpineV001SchemaPublicKey) UnmarshalBinary(b []byte) error { - var res AlpineV001SchemaPublicKey - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/consistency_proof.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/consistency_proof.go deleted file mode 100644 index 804ddd11a9..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/consistency_proof.go +++ /dev/null @@ -1,118 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "strconv" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// ConsistencyProof consistency proof -// -// swagger:model ConsistencyProof -type ConsistencyProof struct { - - // hashes - // Required: true - Hashes []string `json:"hashes"` - - // The hash value stored at the root of the merkle tree at the time the proof was generated - // Required: true - // Pattern: ^[0-9a-fA-F]{64}$ - RootHash *string `json:"rootHash"` -} - -// Validate validates this consistency proof -func (m *ConsistencyProof) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateHashes(formats); err != nil { - res = append(res, err) - } - - if err := m.validateRootHash(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *ConsistencyProof) validateHashes(formats strfmt.Registry) error { - - if err := validate.Required("hashes", "body", m.Hashes); err != nil { - return err - } - - for i := 0; i < len(m.Hashes); i++ { - - if err := validate.Pattern("hashes"+"."+strconv.Itoa(i), "body", m.Hashes[i], `^[0-9a-fA-F]{64}$`); err != nil { - return err - } - - } - - return nil -} - -func (m *ConsistencyProof) validateRootHash(formats strfmt.Registry) error { - - if err := validate.Required("rootHash", "body", m.RootHash); err != nil { - return err - } - - if err := validate.Pattern("rootHash", "body", *m.RootHash, `^[0-9a-fA-F]{64}$`); err != nil { - return err - } - - return nil -} - -// ContextValidate validates this consistency proof based on context it is used -func (m *ConsistencyProof) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - return nil -} - -// MarshalBinary interface implementation -func (m *ConsistencyProof) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *ConsistencyProof) UnmarshalBinary(b []byte) error { - var res ConsistencyProof - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/cose.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/cose.go deleted file mode 100644 index 8de4083baf..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/cose.go +++ /dev/null @@ -1,210 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "bytes" - "context" - "encoding/json" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// Cose COSE object -// -// swagger:model cose -type Cose struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec CoseSchema `json:"spec"` -} - -// Kind gets the kind of this subtype -func (m *Cose) Kind() string { - return "cose" -} - -// SetKind sets the kind of this subtype -func (m *Cose) SetKind(val string) { -} - -// UnmarshalJSON unmarshals this object with a polymorphic type from a JSON structure -func (m *Cose) UnmarshalJSON(raw []byte) error { - var data struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec CoseSchema `json:"spec"` - } - buf := bytes.NewBuffer(raw) - dec := json.NewDecoder(buf) - dec.UseNumber() - - if err := dec.Decode(&data); err != nil { - return err - } - - var base struct { - /* Just the base type fields. Used for unmashalling polymorphic types.*/ - - Kind string `json:"kind"` - } - buf = bytes.NewBuffer(raw) - dec = json.NewDecoder(buf) - dec.UseNumber() - - if err := dec.Decode(&base); err != nil { - return err - } - - var result Cose - - if base.Kind != result.Kind() { - /* Not the type we're looking for. */ - return errors.New(422, "invalid kind value: %q", base.Kind) - } - - result.APIVersion = data.APIVersion - result.Spec = data.Spec - - *m = result - - return nil -} - -// MarshalJSON marshals this object with a polymorphic type to a JSON structure -func (m Cose) MarshalJSON() ([]byte, error) { - var b1, b2, b3 []byte - var err error - b1, err = json.Marshal(struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec CoseSchema `json:"spec"` - }{ - - APIVersion: m.APIVersion, - - Spec: m.Spec, - }) - if err != nil { - return nil, err - } - b2, err = json.Marshal(struct { - Kind string `json:"kind"` - }{ - - Kind: m.Kind(), - }) - if err != nil { - return nil, err - } - - return swag.ConcatJSON(b1, b2, b3), nil -} - -// Validate validates this cose -func (m *Cose) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAPIVersion(formats); err != nil { - res = append(res, err) - } - - if err := m.validateSpec(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *Cose) validateAPIVersion(formats strfmt.Registry) error { - - if err := validate.Required("apiVersion", "body", m.APIVersion); err != nil { - return err - } - - if err := validate.Pattern("apiVersion", "body", *m.APIVersion, `^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`); err != nil { - return err - } - - return nil -} - -func (m *Cose) validateSpec(formats strfmt.Registry) error { - - if m.Spec == nil { - return errors.Required("spec", "body", nil) - } - - return nil -} - -// ContextValidate validate this cose based on the context it is used -func (m *Cose) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// MarshalBinary interface implementation -func (m *Cose) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *Cose) UnmarshalBinary(b []byte) error { - var res Cose - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/cose_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/cose_schema.go deleted file mode 100644 index e653f22029..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/cose_schema.go +++ /dev/null @@ -1,29 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// CoseSchema COSE Schema -// -// # COSE for Rekord objects -// -// swagger:model coseSchema -type CoseSchema interface{} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/cose_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/cose_v001_schema.go deleted file mode 100644 index 5818dca1c8..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/cose_v001_schema.go +++ /dev/null @@ -1,521 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "encoding/json" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// CoseV001Schema cose v0.0.1 Schema -// -// # Schema for cose object -// -// swagger:model coseV001Schema -type CoseV001Schema struct { - - // data - Data *CoseV001SchemaData `json:"data,omitempty"` - - // The COSE Sign1 Message - // Format: byte - Message strfmt.Base64 `json:"message,omitempty"` - - // The public key that can verify the signature - // Required: true - // Format: byte - PublicKey *strfmt.Base64 `json:"publicKey"` -} - -// Validate validates this cose v001 schema -func (m *CoseV001Schema) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateData(formats); err != nil { - res = append(res, err) - } - - if err := m.validatePublicKey(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *CoseV001Schema) validateData(formats strfmt.Registry) error { - if swag.IsZero(m.Data) { // not required - return nil - } - - if m.Data != nil { - if err := m.Data.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("data") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("data") - } - return err - } - } - - return nil -} - -func (m *CoseV001Schema) validatePublicKey(formats strfmt.Registry) error { - - if err := validate.Required("publicKey", "body", m.PublicKey); err != nil { - return err - } - - return nil -} - -// ContextValidate validate this cose v001 schema based on the context it is used -func (m *CoseV001Schema) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateData(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *CoseV001Schema) contextValidateData(ctx context.Context, formats strfmt.Registry) error { - - if m.Data != nil { - - if swag.IsZero(m.Data) { // not required - return nil - } - - if err := m.Data.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("data") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("data") - } - return err - } - } - - return nil -} - -// MarshalBinary interface implementation -func (m *CoseV001Schema) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *CoseV001Schema) UnmarshalBinary(b []byte) error { - var res CoseV001Schema - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// CoseV001SchemaData Information about the content associated with the entry -// -// swagger:model CoseV001SchemaData -type CoseV001SchemaData struct { - - // Specifies the additional authenticated data required to verify the signature - // Format: byte - Aad strfmt.Base64 `json:"aad,omitempty"` - - // envelope hash - EnvelopeHash *CoseV001SchemaDataEnvelopeHash `json:"envelopeHash,omitempty"` - - // payload hash - PayloadHash *CoseV001SchemaDataPayloadHash `json:"payloadHash,omitempty"` -} - -// Validate validates this cose v001 schema data -func (m *CoseV001SchemaData) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateEnvelopeHash(formats); err != nil { - res = append(res, err) - } - - if err := m.validatePayloadHash(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *CoseV001SchemaData) validateEnvelopeHash(formats strfmt.Registry) error { - if swag.IsZero(m.EnvelopeHash) { // not required - return nil - } - - if m.EnvelopeHash != nil { - if err := m.EnvelopeHash.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("data" + "." + "envelopeHash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("data" + "." + "envelopeHash") - } - return err - } - } - - return nil -} - -func (m *CoseV001SchemaData) validatePayloadHash(formats strfmt.Registry) error { - if swag.IsZero(m.PayloadHash) { // not required - return nil - } - - if m.PayloadHash != nil { - if err := m.PayloadHash.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("data" + "." + "payloadHash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("data" + "." + "payloadHash") - } - return err - } - } - - return nil -} - -// ContextValidate validate this cose v001 schema data based on the context it is used -func (m *CoseV001SchemaData) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateEnvelopeHash(ctx, formats); err != nil { - res = append(res, err) - } - - if err := m.contextValidatePayloadHash(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *CoseV001SchemaData) contextValidateEnvelopeHash(ctx context.Context, formats strfmt.Registry) error { - - if m.EnvelopeHash != nil { - - if swag.IsZero(m.EnvelopeHash) { // not required - return nil - } - - if err := m.EnvelopeHash.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("data" + "." + "envelopeHash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("data" + "." + "envelopeHash") - } - return err - } - } - - return nil -} - -func (m *CoseV001SchemaData) contextValidatePayloadHash(ctx context.Context, formats strfmt.Registry) error { - - if m.PayloadHash != nil { - - if swag.IsZero(m.PayloadHash) { // not required - return nil - } - - if err := m.PayloadHash.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("data" + "." + "payloadHash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("data" + "." + "payloadHash") - } - return err - } - } - - return nil -} - -// MarshalBinary interface implementation -func (m *CoseV001SchemaData) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *CoseV001SchemaData) UnmarshalBinary(b []byte) error { - var res CoseV001SchemaData - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// CoseV001SchemaDataEnvelopeHash Specifies the hash algorithm and value for the COSE envelope -// -// swagger:model CoseV001SchemaDataEnvelopeHash -type CoseV001SchemaDataEnvelopeHash struct { - - // The hashing function used to compute the hash value - // Required: true - // Enum: ["sha256"] - Algorithm *string `json:"algorithm"` - - // The hash value for the envelope - // Required: true - Value *string `json:"value"` -} - -// Validate validates this cose v001 schema data envelope hash -func (m *CoseV001SchemaDataEnvelopeHash) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAlgorithm(formats); err != nil { - res = append(res, err) - } - - if err := m.validateValue(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -var coseV001SchemaDataEnvelopeHashTypeAlgorithmPropEnum []interface{} - -func init() { - var res []string - if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil { - panic(err) - } - for _, v := range res { - coseV001SchemaDataEnvelopeHashTypeAlgorithmPropEnum = append(coseV001SchemaDataEnvelopeHashTypeAlgorithmPropEnum, v) - } -} - -const ( - - // CoseV001SchemaDataEnvelopeHashAlgorithmSha256 captures enum value "sha256" - CoseV001SchemaDataEnvelopeHashAlgorithmSha256 string = "sha256" -) - -// prop value enum -func (m *CoseV001SchemaDataEnvelopeHash) validateAlgorithmEnum(path, location string, value string) error { - if err := validate.EnumCase(path, location, value, coseV001SchemaDataEnvelopeHashTypeAlgorithmPropEnum, true); err != nil { - return err - } - return nil -} - -func (m *CoseV001SchemaDataEnvelopeHash) validateAlgorithm(formats strfmt.Registry) error { - - if err := validate.Required("data"+"."+"envelopeHash"+"."+"algorithm", "body", m.Algorithm); err != nil { - return err - } - - // value enum - if err := m.validateAlgorithmEnum("data"+"."+"envelopeHash"+"."+"algorithm", "body", *m.Algorithm); err != nil { - return err - } - - return nil -} - -func (m *CoseV001SchemaDataEnvelopeHash) validateValue(formats strfmt.Registry) error { - - if err := validate.Required("data"+"."+"envelopeHash"+"."+"value", "body", m.Value); err != nil { - return err - } - - return nil -} - -// ContextValidate validate this cose v001 schema data envelope hash based on the context it is used -func (m *CoseV001SchemaDataEnvelopeHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// MarshalBinary interface implementation -func (m *CoseV001SchemaDataEnvelopeHash) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *CoseV001SchemaDataEnvelopeHash) UnmarshalBinary(b []byte) error { - var res CoseV001SchemaDataEnvelopeHash - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// CoseV001SchemaDataPayloadHash Specifies the hash algorithm and value for the content -// -// swagger:model CoseV001SchemaDataPayloadHash -type CoseV001SchemaDataPayloadHash struct { - - // The hashing function used to compute the hash value - // Required: true - // Enum: ["sha256"] - Algorithm *string `json:"algorithm"` - - // The hash value for the content - // Required: true - Value *string `json:"value"` -} - -// Validate validates this cose v001 schema data payload hash -func (m *CoseV001SchemaDataPayloadHash) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAlgorithm(formats); err != nil { - res = append(res, err) - } - - if err := m.validateValue(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -var coseV001SchemaDataPayloadHashTypeAlgorithmPropEnum []interface{} - -func init() { - var res []string - if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil { - panic(err) - } - for _, v := range res { - coseV001SchemaDataPayloadHashTypeAlgorithmPropEnum = append(coseV001SchemaDataPayloadHashTypeAlgorithmPropEnum, v) - } -} - -const ( - - // CoseV001SchemaDataPayloadHashAlgorithmSha256 captures enum value "sha256" - CoseV001SchemaDataPayloadHashAlgorithmSha256 string = "sha256" -) - -// prop value enum -func (m *CoseV001SchemaDataPayloadHash) validateAlgorithmEnum(path, location string, value string) error { - if err := validate.EnumCase(path, location, value, coseV001SchemaDataPayloadHashTypeAlgorithmPropEnum, true); err != nil { - return err - } - return nil -} - -func (m *CoseV001SchemaDataPayloadHash) validateAlgorithm(formats strfmt.Registry) error { - - if err := validate.Required("data"+"."+"payloadHash"+"."+"algorithm", "body", m.Algorithm); err != nil { - return err - } - - // value enum - if err := m.validateAlgorithmEnum("data"+"."+"payloadHash"+"."+"algorithm", "body", *m.Algorithm); err != nil { - return err - } - - return nil -} - -func (m *CoseV001SchemaDataPayloadHash) validateValue(formats strfmt.Registry) error { - - if err := validate.Required("data"+"."+"payloadHash"+"."+"value", "body", m.Value); err != nil { - return err - } - - return nil -} - -// ContextValidate validate this cose v001 schema data payload hash based on the context it is used -func (m *CoseV001SchemaDataPayloadHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// MarshalBinary interface implementation -func (m *CoseV001SchemaDataPayloadHash) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *CoseV001SchemaDataPayloadHash) UnmarshalBinary(b []byte) error { - var res CoseV001SchemaDataPayloadHash - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse.go deleted file mode 100644 index dde562054c..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse.go +++ /dev/null @@ -1,210 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "bytes" - "context" - "encoding/json" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// DSSE DSSE envelope -// -// swagger:model dsse -type DSSE struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec DSSESchema `json:"spec"` -} - -// Kind gets the kind of this subtype -func (m *DSSE) Kind() string { - return "dsse" -} - -// SetKind sets the kind of this subtype -func (m *DSSE) SetKind(val string) { -} - -// UnmarshalJSON unmarshals this object with a polymorphic type from a JSON structure -func (m *DSSE) UnmarshalJSON(raw []byte) error { - var data struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec DSSESchema `json:"spec"` - } - buf := bytes.NewBuffer(raw) - dec := json.NewDecoder(buf) - dec.UseNumber() - - if err := dec.Decode(&data); err != nil { - return err - } - - var base struct { - /* Just the base type fields. Used for unmashalling polymorphic types.*/ - - Kind string `json:"kind"` - } - buf = bytes.NewBuffer(raw) - dec = json.NewDecoder(buf) - dec.UseNumber() - - if err := dec.Decode(&base); err != nil { - return err - } - - var result DSSE - - if base.Kind != result.Kind() { - /* Not the type we're looking for. */ - return errors.New(422, "invalid kind value: %q", base.Kind) - } - - result.APIVersion = data.APIVersion - result.Spec = data.Spec - - *m = result - - return nil -} - -// MarshalJSON marshals this object with a polymorphic type to a JSON structure -func (m DSSE) MarshalJSON() ([]byte, error) { - var b1, b2, b3 []byte - var err error - b1, err = json.Marshal(struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec DSSESchema `json:"spec"` - }{ - - APIVersion: m.APIVersion, - - Spec: m.Spec, - }) - if err != nil { - return nil, err - } - b2, err = json.Marshal(struct { - Kind string `json:"kind"` - }{ - - Kind: m.Kind(), - }) - if err != nil { - return nil, err - } - - return swag.ConcatJSON(b1, b2, b3), nil -} - -// Validate validates this dsse -func (m *DSSE) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAPIVersion(formats); err != nil { - res = append(res, err) - } - - if err := m.validateSpec(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *DSSE) validateAPIVersion(formats strfmt.Registry) error { - - if err := validate.Required("apiVersion", "body", m.APIVersion); err != nil { - return err - } - - if err := validate.Pattern("apiVersion", "body", *m.APIVersion, `^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`); err != nil { - return err - } - - return nil -} - -func (m *DSSE) validateSpec(formats strfmt.Registry) error { - - if m.Spec == nil { - return errors.Required("spec", "body", nil) - } - - return nil -} - -// ContextValidate validate this dsse based on the context it is used -func (m *DSSE) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// MarshalBinary interface implementation -func (m *DSSE) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *DSSE) UnmarshalBinary(b []byte) error { - var res DSSE - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse_schema.go deleted file mode 100644 index 7795626438..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse_schema.go +++ /dev/null @@ -1,29 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// DSSESchema DSSE Schema -// -// log entry schema for dsse envelopes -// -// swagger:model dsseSchema -type DSSESchema interface{} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse_v001_schema.go deleted file mode 100644 index 5fde2a77e1..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse_v001_schema.go +++ /dev/null @@ -1,685 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "encoding/json" - "strconv" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// DSSEV001Schema DSSE v0.0.1 Schema -// -// # Schema for DSSE envelopes -// -// swagger:model dsseV001Schema -type DSSEV001Schema struct { - - // envelope hash - EnvelopeHash *DSSEV001SchemaEnvelopeHash `json:"envelopeHash,omitempty"` - - // payload hash - PayloadHash *DSSEV001SchemaPayloadHash `json:"payloadHash,omitempty"` - - // proposed content - ProposedContent *DSSEV001SchemaProposedContent `json:"proposedContent,omitempty"` - - // extracted collection of all signatures of the envelope's payload; elements will be sorted by lexicographical order of the base64 encoded signature strings - // Read Only: true - // Min Items: 1 - Signatures []*DSSEV001SchemaSignaturesItems0 `json:"signatures"` -} - -// Validate validates this dsse v001 schema -func (m *DSSEV001Schema) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateEnvelopeHash(formats); err != nil { - res = append(res, err) - } - - if err := m.validatePayloadHash(formats); err != nil { - res = append(res, err) - } - - if err := m.validateProposedContent(formats); err != nil { - res = append(res, err) - } - - if err := m.validateSignatures(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *DSSEV001Schema) validateEnvelopeHash(formats strfmt.Registry) error { - if swag.IsZero(m.EnvelopeHash) { // not required - return nil - } - - if m.EnvelopeHash != nil { - if err := m.EnvelopeHash.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("envelopeHash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("envelopeHash") - } - return err - } - } - - return nil -} - -func (m *DSSEV001Schema) validatePayloadHash(formats strfmt.Registry) error { - if swag.IsZero(m.PayloadHash) { // not required - return nil - } - - if m.PayloadHash != nil { - if err := m.PayloadHash.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("payloadHash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("payloadHash") - } - return err - } - } - - return nil -} - -func (m *DSSEV001Schema) validateProposedContent(formats strfmt.Registry) error { - if swag.IsZero(m.ProposedContent) { // not required - return nil - } - - if m.ProposedContent != nil { - if err := m.ProposedContent.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("proposedContent") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("proposedContent") - } - return err - } - } - - return nil -} - -func (m *DSSEV001Schema) validateSignatures(formats strfmt.Registry) error { - if swag.IsZero(m.Signatures) { // not required - return nil - } - - iSignaturesSize := int64(len(m.Signatures)) - - if err := validate.MinItems("signatures", "body", iSignaturesSize, 1); err != nil { - return err - } - - for i := 0; i < len(m.Signatures); i++ { - if swag.IsZero(m.Signatures[i]) { // not required - continue - } - - if m.Signatures[i] != nil { - if err := m.Signatures[i].Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("signatures" + "." + strconv.Itoa(i)) - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("signatures" + "." + strconv.Itoa(i)) - } - return err - } - } - - } - - return nil -} - -// ContextValidate validate this dsse v001 schema based on the context it is used -func (m *DSSEV001Schema) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateEnvelopeHash(ctx, formats); err != nil { - res = append(res, err) - } - - if err := m.contextValidatePayloadHash(ctx, formats); err != nil { - res = append(res, err) - } - - if err := m.contextValidateProposedContent(ctx, formats); err != nil { - res = append(res, err) - } - - if err := m.contextValidateSignatures(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *DSSEV001Schema) contextValidateEnvelopeHash(ctx context.Context, formats strfmt.Registry) error { - - if m.EnvelopeHash != nil { - - if swag.IsZero(m.EnvelopeHash) { // not required - return nil - } - - if err := m.EnvelopeHash.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("envelopeHash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("envelopeHash") - } - return err - } - } - - return nil -} - -func (m *DSSEV001Schema) contextValidatePayloadHash(ctx context.Context, formats strfmt.Registry) error { - - if m.PayloadHash != nil { - - if swag.IsZero(m.PayloadHash) { // not required - return nil - } - - if err := m.PayloadHash.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("payloadHash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("payloadHash") - } - return err - } - } - - return nil -} - -func (m *DSSEV001Schema) contextValidateProposedContent(ctx context.Context, formats strfmt.Registry) error { - - if m.ProposedContent != nil { - - if swag.IsZero(m.ProposedContent) { // not required - return nil - } - - if err := m.ProposedContent.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("proposedContent") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("proposedContent") - } - return err - } - } - - return nil -} - -func (m *DSSEV001Schema) contextValidateSignatures(ctx context.Context, formats strfmt.Registry) error { - - if err := validate.ReadOnly(ctx, "signatures", "body", []*DSSEV001SchemaSignaturesItems0(m.Signatures)); err != nil { - return err - } - - for i := 0; i < len(m.Signatures); i++ { - - if m.Signatures[i] != nil { - - if swag.IsZero(m.Signatures[i]) { // not required - return nil - } - - if err := m.Signatures[i].ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("signatures" + "." + strconv.Itoa(i)) - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("signatures" + "." + strconv.Itoa(i)) - } - return err - } - } - - } - - return nil -} - -// MarshalBinary interface implementation -func (m *DSSEV001Schema) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *DSSEV001Schema) UnmarshalBinary(b []byte) error { - var res DSSEV001Schema - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// DSSEV001SchemaEnvelopeHash Specifies the hash algorithm and value encompassing the entire envelope sent to Rekor -// -// swagger:model DSSEV001SchemaEnvelopeHash -type DSSEV001SchemaEnvelopeHash struct { - - // The hashing function used to compute the hash value - // Required: true - // Enum: ["sha256"] - Algorithm *string `json:"algorithm"` - - // The value of the computed digest over the entire envelope - // Required: true - Value *string `json:"value"` -} - -// Validate validates this DSSE v001 schema envelope hash -func (m *DSSEV001SchemaEnvelopeHash) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAlgorithm(formats); err != nil { - res = append(res, err) - } - - if err := m.validateValue(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -var dsseV001SchemaEnvelopeHashTypeAlgorithmPropEnum []interface{} - -func init() { - var res []string - if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil { - panic(err) - } - for _, v := range res { - dsseV001SchemaEnvelopeHashTypeAlgorithmPropEnum = append(dsseV001SchemaEnvelopeHashTypeAlgorithmPropEnum, v) - } -} - -const ( - - // DSSEV001SchemaEnvelopeHashAlgorithmSha256 captures enum value "sha256" - DSSEV001SchemaEnvelopeHashAlgorithmSha256 string = "sha256" -) - -// prop value enum -func (m *DSSEV001SchemaEnvelopeHash) validateAlgorithmEnum(path, location string, value string) error { - if err := validate.EnumCase(path, location, value, dsseV001SchemaEnvelopeHashTypeAlgorithmPropEnum, true); err != nil { - return err - } - return nil -} - -func (m *DSSEV001SchemaEnvelopeHash) validateAlgorithm(formats strfmt.Registry) error { - - if err := validate.Required("envelopeHash"+"."+"algorithm", "body", m.Algorithm); err != nil { - return err - } - - // value enum - if err := m.validateAlgorithmEnum("envelopeHash"+"."+"algorithm", "body", *m.Algorithm); err != nil { - return err - } - - return nil -} - -func (m *DSSEV001SchemaEnvelopeHash) validateValue(formats strfmt.Registry) error { - - if err := validate.Required("envelopeHash"+"."+"value", "body", m.Value); err != nil { - return err - } - - return nil -} - -// ContextValidate validate this DSSE v001 schema envelope hash based on the context it is used -func (m *DSSEV001SchemaEnvelopeHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// MarshalBinary interface implementation -func (m *DSSEV001SchemaEnvelopeHash) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *DSSEV001SchemaEnvelopeHash) UnmarshalBinary(b []byte) error { - var res DSSEV001SchemaEnvelopeHash - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// DSSEV001SchemaPayloadHash Specifies the hash algorithm and value covering the payload within the DSSE envelope -// -// swagger:model DSSEV001SchemaPayloadHash -type DSSEV001SchemaPayloadHash struct { - - // The hashing function used to compute the hash value - // Required: true - // Enum: ["sha256"] - Algorithm *string `json:"algorithm"` - - // The value of the computed digest over the payload within the envelope - // Required: true - Value *string `json:"value"` -} - -// Validate validates this DSSE v001 schema payload hash -func (m *DSSEV001SchemaPayloadHash) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAlgorithm(formats); err != nil { - res = append(res, err) - } - - if err := m.validateValue(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -var dsseV001SchemaPayloadHashTypeAlgorithmPropEnum []interface{} - -func init() { - var res []string - if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil { - panic(err) - } - for _, v := range res { - dsseV001SchemaPayloadHashTypeAlgorithmPropEnum = append(dsseV001SchemaPayloadHashTypeAlgorithmPropEnum, v) - } -} - -const ( - - // DSSEV001SchemaPayloadHashAlgorithmSha256 captures enum value "sha256" - DSSEV001SchemaPayloadHashAlgorithmSha256 string = "sha256" -) - -// prop value enum -func (m *DSSEV001SchemaPayloadHash) validateAlgorithmEnum(path, location string, value string) error { - if err := validate.EnumCase(path, location, value, dsseV001SchemaPayloadHashTypeAlgorithmPropEnum, true); err != nil { - return err - } - return nil -} - -func (m *DSSEV001SchemaPayloadHash) validateAlgorithm(formats strfmt.Registry) error { - - if err := validate.Required("payloadHash"+"."+"algorithm", "body", m.Algorithm); err != nil { - return err - } - - // value enum - if err := m.validateAlgorithmEnum("payloadHash"+"."+"algorithm", "body", *m.Algorithm); err != nil { - return err - } - - return nil -} - -func (m *DSSEV001SchemaPayloadHash) validateValue(formats strfmt.Registry) error { - - if err := validate.Required("payloadHash"+"."+"value", "body", m.Value); err != nil { - return err - } - - return nil -} - -// ContextValidate validate this DSSE v001 schema payload hash based on the context it is used -func (m *DSSEV001SchemaPayloadHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// MarshalBinary interface implementation -func (m *DSSEV001SchemaPayloadHash) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *DSSEV001SchemaPayloadHash) UnmarshalBinary(b []byte) error { - var res DSSEV001SchemaPayloadHash - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// DSSEV001SchemaProposedContent DSSE v001 schema proposed content -// -// swagger:model DSSEV001SchemaProposedContent -type DSSEV001SchemaProposedContent struct { - - // DSSE envelope specified as a stringified JSON object - // Required: true - Envelope *string `json:"envelope"` - - // collection of all verification material (e.g. public keys or certificates) used to verify signatures over envelope's payload, specified as base64-encoded strings - // Required: true - // Min Items: 1 - Verifiers []strfmt.Base64 `json:"verifiers"` -} - -// Validate validates this DSSE v001 schema proposed content -func (m *DSSEV001SchemaProposedContent) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateEnvelope(formats); err != nil { - res = append(res, err) - } - - if err := m.validateVerifiers(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *DSSEV001SchemaProposedContent) validateEnvelope(formats strfmt.Registry) error { - - if err := validate.Required("proposedContent"+"."+"envelope", "body", m.Envelope); err != nil { - return err - } - - return nil -} - -func (m *DSSEV001SchemaProposedContent) validateVerifiers(formats strfmt.Registry) error { - - if err := validate.Required("proposedContent"+"."+"verifiers", "body", m.Verifiers); err != nil { - return err - } - - iVerifiersSize := int64(len(m.Verifiers)) - - if err := validate.MinItems("proposedContent"+"."+"verifiers", "body", iVerifiersSize, 1); err != nil { - return err - } - - return nil -} - -// ContextValidate validates this DSSE v001 schema proposed content based on context it is used -func (m *DSSEV001SchemaProposedContent) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - return nil -} - -// MarshalBinary interface implementation -func (m *DSSEV001SchemaProposedContent) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *DSSEV001SchemaProposedContent) UnmarshalBinary(b []byte) error { - var res DSSEV001SchemaProposedContent - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// DSSEV001SchemaSignaturesItems0 a signature of the envelope's payload along with the verification material for the signature -// -// swagger:model DSSEV001SchemaSignaturesItems0 -type DSSEV001SchemaSignaturesItems0 struct { - - // base64 encoded signature of the payload - // Required: true - // Pattern: ^(?:[A-Za-z0-9+\/]{4})*(?:[A-Za-z0-9+\/]{2}==|[A-Za-z0-9+\/]{3}=|[A-Za-z0-9+\/]{4})$ - Signature *string `json:"signature"` - - // verification material that was used to verify the corresponding signature, specified as a base64 encoded string - // Required: true - // Format: byte - Verifier *strfmt.Base64 `json:"verifier"` -} - -// Validate validates this DSSE v001 schema signatures items0 -func (m *DSSEV001SchemaSignaturesItems0) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateSignature(formats); err != nil { - res = append(res, err) - } - - if err := m.validateVerifier(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *DSSEV001SchemaSignaturesItems0) validateSignature(formats strfmt.Registry) error { - - if err := validate.Required("signature", "body", m.Signature); err != nil { - return err - } - - if err := validate.Pattern("signature", "body", *m.Signature, `^(?:[A-Za-z0-9+\/]{4})*(?:[A-Za-z0-9+\/]{2}==|[A-Za-z0-9+\/]{3}=|[A-Za-z0-9+\/]{4})$`); err != nil { - return err - } - - return nil -} - -func (m *DSSEV001SchemaSignaturesItems0) validateVerifier(formats strfmt.Registry) error { - - if err := validate.Required("verifier", "body", m.Verifier); err != nil { - return err - } - - return nil -} - -// ContextValidate validates this DSSE v001 schema signatures items0 based on context it is used -func (m *DSSEV001SchemaSignaturesItems0) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - return nil -} - -// MarshalBinary interface implementation -func (m *DSSEV001SchemaSignaturesItems0) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *DSSEV001SchemaSignaturesItems0) UnmarshalBinary(b []byte) error { - var res DSSEV001SchemaSignaturesItems0 - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/error.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/error.go deleted file mode 100644 index ac14f2026e..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/error.go +++ /dev/null @@ -1,69 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" -) - -// Error error -// -// swagger:model Error -type Error struct { - - // code - Code int64 `json:"code,omitempty"` - - // message - Message string `json:"message,omitempty"` -} - -// Validate validates this error -func (m *Error) Validate(formats strfmt.Registry) error { - return nil -} - -// ContextValidate validates this error based on context it is used -func (m *Error) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - return nil -} - -// MarshalBinary interface implementation -func (m *Error) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *Error) UnmarshalBinary(b []byte) error { - var res Error - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord.go deleted file mode 100644 index b3e1f8a3bd..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord.go +++ /dev/null @@ -1,210 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "bytes" - "context" - "encoding/json" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// Hashedrekord Hashed Rekord object -// -// swagger:model hashedrekord -type Hashedrekord struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec HashedrekordSchema `json:"spec"` -} - -// Kind gets the kind of this subtype -func (m *Hashedrekord) Kind() string { - return "hashedrekord" -} - -// SetKind sets the kind of this subtype -func (m *Hashedrekord) SetKind(val string) { -} - -// UnmarshalJSON unmarshals this object with a polymorphic type from a JSON structure -func (m *Hashedrekord) UnmarshalJSON(raw []byte) error { - var data struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec HashedrekordSchema `json:"spec"` - } - buf := bytes.NewBuffer(raw) - dec := json.NewDecoder(buf) - dec.UseNumber() - - if err := dec.Decode(&data); err != nil { - return err - } - - var base struct { - /* Just the base type fields. Used for unmashalling polymorphic types.*/ - - Kind string `json:"kind"` - } - buf = bytes.NewBuffer(raw) - dec = json.NewDecoder(buf) - dec.UseNumber() - - if err := dec.Decode(&base); err != nil { - return err - } - - var result Hashedrekord - - if base.Kind != result.Kind() { - /* Not the type we're looking for. */ - return errors.New(422, "invalid kind value: %q", base.Kind) - } - - result.APIVersion = data.APIVersion - result.Spec = data.Spec - - *m = result - - return nil -} - -// MarshalJSON marshals this object with a polymorphic type to a JSON structure -func (m Hashedrekord) MarshalJSON() ([]byte, error) { - var b1, b2, b3 []byte - var err error - b1, err = json.Marshal(struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec HashedrekordSchema `json:"spec"` - }{ - - APIVersion: m.APIVersion, - - Spec: m.Spec, - }) - if err != nil { - return nil, err - } - b2, err = json.Marshal(struct { - Kind string `json:"kind"` - }{ - - Kind: m.Kind(), - }) - if err != nil { - return nil, err - } - - return swag.ConcatJSON(b1, b2, b3), nil -} - -// Validate validates this hashedrekord -func (m *Hashedrekord) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAPIVersion(formats); err != nil { - res = append(res, err) - } - - if err := m.validateSpec(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *Hashedrekord) validateAPIVersion(formats strfmt.Registry) error { - - if err := validate.Required("apiVersion", "body", m.APIVersion); err != nil { - return err - } - - if err := validate.Pattern("apiVersion", "body", *m.APIVersion, `^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`); err != nil { - return err - } - - return nil -} - -func (m *Hashedrekord) validateSpec(formats strfmt.Registry) error { - - if m.Spec == nil { - return errors.Required("spec", "body", nil) - } - - return nil -} - -// ContextValidate validate this hashedrekord based on the context it is used -func (m *Hashedrekord) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// MarshalBinary interface implementation -func (m *Hashedrekord) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *Hashedrekord) UnmarshalBinary(b []byte) error { - var res Hashedrekord - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord_schema.go deleted file mode 100644 index 56034a579e..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord_schema.go +++ /dev/null @@ -1,29 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// HashedrekordSchema Hashedrekord Schema -// -// # Schema for Hashedrekord objects -// -// swagger:model hashedrekordSchema -type HashedrekordSchema interface{} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord_v001_schema.go deleted file mode 100644 index 586025c5bb..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord_v001_schema.go +++ /dev/null @@ -1,519 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "encoding/json" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// HashedrekordV001Schema Hashed Rekor v0.0.1 Schema -// -// # Schema for Hashed Rekord object -// -// swagger:model hashedrekordV001Schema -type HashedrekordV001Schema struct { - - // data - // Required: true - Data *HashedrekordV001SchemaData `json:"data"` - - // signature - // Required: true - Signature *HashedrekordV001SchemaSignature `json:"signature"` -} - -// Validate validates this hashedrekord v001 schema -func (m *HashedrekordV001Schema) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateData(formats); err != nil { - res = append(res, err) - } - - if err := m.validateSignature(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *HashedrekordV001Schema) validateData(formats strfmt.Registry) error { - - if err := validate.Required("data", "body", m.Data); err != nil { - return err - } - - if m.Data != nil { - if err := m.Data.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("data") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("data") - } - return err - } - } - - return nil -} - -func (m *HashedrekordV001Schema) validateSignature(formats strfmt.Registry) error { - - if err := validate.Required("signature", "body", m.Signature); err != nil { - return err - } - - if m.Signature != nil { - if err := m.Signature.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("signature") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("signature") - } - return err - } - } - - return nil -} - -// ContextValidate validate this hashedrekord v001 schema based on the context it is used -func (m *HashedrekordV001Schema) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateData(ctx, formats); err != nil { - res = append(res, err) - } - - if err := m.contextValidateSignature(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *HashedrekordV001Schema) contextValidateData(ctx context.Context, formats strfmt.Registry) error { - - if m.Data != nil { - - if err := m.Data.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("data") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("data") - } - return err - } - } - - return nil -} - -func (m *HashedrekordV001Schema) contextValidateSignature(ctx context.Context, formats strfmt.Registry) error { - - if m.Signature != nil { - - if err := m.Signature.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("signature") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("signature") - } - return err - } - } - - return nil -} - -// MarshalBinary interface implementation -func (m *HashedrekordV001Schema) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *HashedrekordV001Schema) UnmarshalBinary(b []byte) error { - var res HashedrekordV001Schema - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// HashedrekordV001SchemaData Information about the content associated with the entry -// -// swagger:model HashedrekordV001SchemaData -type HashedrekordV001SchemaData struct { - - // hash - Hash *HashedrekordV001SchemaDataHash `json:"hash,omitempty"` -} - -// Validate validates this hashedrekord v001 schema data -func (m *HashedrekordV001SchemaData) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateHash(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *HashedrekordV001SchemaData) validateHash(formats strfmt.Registry) error { - if swag.IsZero(m.Hash) { // not required - return nil - } - - if m.Hash != nil { - if err := m.Hash.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("data" + "." + "hash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("data" + "." + "hash") - } - return err - } - } - - return nil -} - -// ContextValidate validate this hashedrekord v001 schema data based on the context it is used -func (m *HashedrekordV001SchemaData) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateHash(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *HashedrekordV001SchemaData) contextValidateHash(ctx context.Context, formats strfmt.Registry) error { - - if m.Hash != nil { - - if swag.IsZero(m.Hash) { // not required - return nil - } - - if err := m.Hash.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("data" + "." + "hash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("data" + "." + "hash") - } - return err - } - } - - return nil -} - -// MarshalBinary interface implementation -func (m *HashedrekordV001SchemaData) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *HashedrekordV001SchemaData) UnmarshalBinary(b []byte) error { - var res HashedrekordV001SchemaData - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// HashedrekordV001SchemaDataHash Specifies the hash algorithm and value for the content -// -// swagger:model HashedrekordV001SchemaDataHash -type HashedrekordV001SchemaDataHash struct { - - // The hashing function used to compute the hash value - // Required: true - // Enum: ["sha256","sha384","sha512"] - Algorithm *string `json:"algorithm"` - - // The hash value for the content, as represented by a lower case hexadecimal string - // Required: true - Value *string `json:"value"` -} - -// Validate validates this hashedrekord v001 schema data hash -func (m *HashedrekordV001SchemaDataHash) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAlgorithm(formats); err != nil { - res = append(res, err) - } - - if err := m.validateValue(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -var hashedrekordV001SchemaDataHashTypeAlgorithmPropEnum []interface{} - -func init() { - var res []string - if err := json.Unmarshal([]byte(`["sha256","sha384","sha512"]`), &res); err != nil { - panic(err) - } - for _, v := range res { - hashedrekordV001SchemaDataHashTypeAlgorithmPropEnum = append(hashedrekordV001SchemaDataHashTypeAlgorithmPropEnum, v) - } -} - -const ( - - // HashedrekordV001SchemaDataHashAlgorithmSha256 captures enum value "sha256" - HashedrekordV001SchemaDataHashAlgorithmSha256 string = "sha256" - - // HashedrekordV001SchemaDataHashAlgorithmSha384 captures enum value "sha384" - HashedrekordV001SchemaDataHashAlgorithmSha384 string = "sha384" - - // HashedrekordV001SchemaDataHashAlgorithmSha512 captures enum value "sha512" - HashedrekordV001SchemaDataHashAlgorithmSha512 string = "sha512" -) - -// prop value enum -func (m *HashedrekordV001SchemaDataHash) validateAlgorithmEnum(path, location string, value string) error { - if err := validate.EnumCase(path, location, value, hashedrekordV001SchemaDataHashTypeAlgorithmPropEnum, true); err != nil { - return err - } - return nil -} - -func (m *HashedrekordV001SchemaDataHash) validateAlgorithm(formats strfmt.Registry) error { - - if err := validate.Required("data"+"."+"hash"+"."+"algorithm", "body", m.Algorithm); err != nil { - return err - } - - // value enum - if err := m.validateAlgorithmEnum("data"+"."+"hash"+"."+"algorithm", "body", *m.Algorithm); err != nil { - return err - } - - return nil -} - -func (m *HashedrekordV001SchemaDataHash) validateValue(formats strfmt.Registry) error { - - if err := validate.Required("data"+"."+"hash"+"."+"value", "body", m.Value); err != nil { - return err - } - - return nil -} - -// ContextValidate validates this hashedrekord v001 schema data hash based on context it is used -func (m *HashedrekordV001SchemaDataHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - return nil -} - -// MarshalBinary interface implementation -func (m *HashedrekordV001SchemaDataHash) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *HashedrekordV001SchemaDataHash) UnmarshalBinary(b []byte) error { - var res HashedrekordV001SchemaDataHash - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// HashedrekordV001SchemaSignature Information about the detached signature associated with the entry -// -// swagger:model HashedrekordV001SchemaSignature -type HashedrekordV001SchemaSignature struct { - - // Specifies the content of the signature inline within the document - // Format: byte - Content strfmt.Base64 `json:"content,omitempty"` - - // public key - PublicKey *HashedrekordV001SchemaSignaturePublicKey `json:"publicKey,omitempty"` -} - -// Validate validates this hashedrekord v001 schema signature -func (m *HashedrekordV001SchemaSignature) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validatePublicKey(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *HashedrekordV001SchemaSignature) validatePublicKey(formats strfmt.Registry) error { - if swag.IsZero(m.PublicKey) { // not required - return nil - } - - if m.PublicKey != nil { - if err := m.PublicKey.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("signature" + "." + "publicKey") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("signature" + "." + "publicKey") - } - return err - } - } - - return nil -} - -// ContextValidate validate this hashedrekord v001 schema signature based on the context it is used -func (m *HashedrekordV001SchemaSignature) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidatePublicKey(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *HashedrekordV001SchemaSignature) contextValidatePublicKey(ctx context.Context, formats strfmt.Registry) error { - - if m.PublicKey != nil { - - if swag.IsZero(m.PublicKey) { // not required - return nil - } - - if err := m.PublicKey.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("signature" + "." + "publicKey") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("signature" + "." + "publicKey") - } - return err - } - } - - return nil -} - -// MarshalBinary interface implementation -func (m *HashedrekordV001SchemaSignature) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *HashedrekordV001SchemaSignature) UnmarshalBinary(b []byte) error { - var res HashedrekordV001SchemaSignature - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// HashedrekordV001SchemaSignaturePublicKey The public key that can verify the signature; this can also be an X509 code signing certificate that contains the raw public key information -// -// swagger:model HashedrekordV001SchemaSignaturePublicKey -type HashedrekordV001SchemaSignaturePublicKey struct { - - // Specifies the content of the public key or code signing certificate inline within the document - // Format: byte - Content strfmt.Base64 `json:"content,omitempty"` -} - -// Validate validates this hashedrekord v001 schema signature public key -func (m *HashedrekordV001SchemaSignaturePublicKey) Validate(formats strfmt.Registry) error { - return nil -} - -// ContextValidate validates this hashedrekord v001 schema signature public key based on context it is used -func (m *HashedrekordV001SchemaSignaturePublicKey) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - return nil -} - -// MarshalBinary interface implementation -func (m *HashedrekordV001SchemaSignaturePublicKey) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *HashedrekordV001SchemaSignaturePublicKey) UnmarshalBinary(b []byte) error { - var res HashedrekordV001SchemaSignaturePublicKey - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/helm.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/helm.go deleted file mode 100644 index d19b8bc8c9..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/helm.go +++ /dev/null @@ -1,210 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "bytes" - "context" - "encoding/json" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// Helm Helm chart -// -// swagger:model helm -type Helm struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec HelmSchema `json:"spec"` -} - -// Kind gets the kind of this subtype -func (m *Helm) Kind() string { - return "helm" -} - -// SetKind sets the kind of this subtype -func (m *Helm) SetKind(val string) { -} - -// UnmarshalJSON unmarshals this object with a polymorphic type from a JSON structure -func (m *Helm) UnmarshalJSON(raw []byte) error { - var data struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec HelmSchema `json:"spec"` - } - buf := bytes.NewBuffer(raw) - dec := json.NewDecoder(buf) - dec.UseNumber() - - if err := dec.Decode(&data); err != nil { - return err - } - - var base struct { - /* Just the base type fields. Used for unmashalling polymorphic types.*/ - - Kind string `json:"kind"` - } - buf = bytes.NewBuffer(raw) - dec = json.NewDecoder(buf) - dec.UseNumber() - - if err := dec.Decode(&base); err != nil { - return err - } - - var result Helm - - if base.Kind != result.Kind() { - /* Not the type we're looking for. */ - return errors.New(422, "invalid kind value: %q", base.Kind) - } - - result.APIVersion = data.APIVersion - result.Spec = data.Spec - - *m = result - - return nil -} - -// MarshalJSON marshals this object with a polymorphic type to a JSON structure -func (m Helm) MarshalJSON() ([]byte, error) { - var b1, b2, b3 []byte - var err error - b1, err = json.Marshal(struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec HelmSchema `json:"spec"` - }{ - - APIVersion: m.APIVersion, - - Spec: m.Spec, - }) - if err != nil { - return nil, err - } - b2, err = json.Marshal(struct { - Kind string `json:"kind"` - }{ - - Kind: m.Kind(), - }) - if err != nil { - return nil, err - } - - return swag.ConcatJSON(b1, b2, b3), nil -} - -// Validate validates this helm -func (m *Helm) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAPIVersion(formats); err != nil { - res = append(res, err) - } - - if err := m.validateSpec(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *Helm) validateAPIVersion(formats strfmt.Registry) error { - - if err := validate.Required("apiVersion", "body", m.APIVersion); err != nil { - return err - } - - if err := validate.Pattern("apiVersion", "body", *m.APIVersion, `^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`); err != nil { - return err - } - - return nil -} - -func (m *Helm) validateSpec(formats strfmt.Registry) error { - - if m.Spec == nil { - return errors.Required("spec", "body", nil) - } - - return nil -} - -// ContextValidate validate this helm based on the context it is used -func (m *Helm) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// MarshalBinary interface implementation -func (m *Helm) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *Helm) UnmarshalBinary(b []byte) error { - var res Helm - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/helm_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/helm_schema.go deleted file mode 100644 index 0ab87df9ce..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/helm_schema.go +++ /dev/null @@ -1,29 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// HelmSchema Helm Schema -// -// # Schema for Helm objects -// -// swagger:model helmSchema -type HelmSchema interface{} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/helm_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/helm_v001_schema.go deleted file mode 100644 index 13c00597c6..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/helm_v001_schema.go +++ /dev/null @@ -1,662 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "encoding/json" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// HelmV001Schema Helm v0.0.1 Schema -// -// # Schema for Helm object -// -// swagger:model helmV001Schema -type HelmV001Schema struct { - - // chart - // Required: true - Chart *HelmV001SchemaChart `json:"chart"` - - // public key - // Required: true - PublicKey *HelmV001SchemaPublicKey `json:"publicKey"` -} - -// Validate validates this helm v001 schema -func (m *HelmV001Schema) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateChart(formats); err != nil { - res = append(res, err) - } - - if err := m.validatePublicKey(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *HelmV001Schema) validateChart(formats strfmt.Registry) error { - - if err := validate.Required("chart", "body", m.Chart); err != nil { - return err - } - - if m.Chart != nil { - if err := m.Chart.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("chart") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("chart") - } - return err - } - } - - return nil -} - -func (m *HelmV001Schema) validatePublicKey(formats strfmt.Registry) error { - - if err := validate.Required("publicKey", "body", m.PublicKey); err != nil { - return err - } - - if m.PublicKey != nil { - if err := m.PublicKey.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("publicKey") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("publicKey") - } - return err - } - } - - return nil -} - -// ContextValidate validate this helm v001 schema based on the context it is used -func (m *HelmV001Schema) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateChart(ctx, formats); err != nil { - res = append(res, err) - } - - if err := m.contextValidatePublicKey(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *HelmV001Schema) contextValidateChart(ctx context.Context, formats strfmt.Registry) error { - - if m.Chart != nil { - - if err := m.Chart.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("chart") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("chart") - } - return err - } - } - - return nil -} - -func (m *HelmV001Schema) contextValidatePublicKey(ctx context.Context, formats strfmt.Registry) error { - - if m.PublicKey != nil { - - if err := m.PublicKey.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("publicKey") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("publicKey") - } - return err - } - } - - return nil -} - -// MarshalBinary interface implementation -func (m *HelmV001Schema) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *HelmV001Schema) UnmarshalBinary(b []byte) error { - var res HelmV001Schema - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// HelmV001SchemaChart Information about the Helm chart associated with the entry -// -// swagger:model HelmV001SchemaChart -type HelmV001SchemaChart struct { - - // hash - Hash *HelmV001SchemaChartHash `json:"hash,omitempty"` - - // provenance - // Required: true - Provenance *HelmV001SchemaChartProvenance `json:"provenance"` -} - -// Validate validates this helm v001 schema chart -func (m *HelmV001SchemaChart) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateHash(formats); err != nil { - res = append(res, err) - } - - if err := m.validateProvenance(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *HelmV001SchemaChart) validateHash(formats strfmt.Registry) error { - if swag.IsZero(m.Hash) { // not required - return nil - } - - if m.Hash != nil { - if err := m.Hash.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("chart" + "." + "hash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("chart" + "." + "hash") - } - return err - } - } - - return nil -} - -func (m *HelmV001SchemaChart) validateProvenance(formats strfmt.Registry) error { - - if err := validate.Required("chart"+"."+"provenance", "body", m.Provenance); err != nil { - return err - } - - if m.Provenance != nil { - if err := m.Provenance.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("chart" + "." + "provenance") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("chart" + "." + "provenance") - } - return err - } - } - - return nil -} - -// ContextValidate validate this helm v001 schema chart based on the context it is used -func (m *HelmV001SchemaChart) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateHash(ctx, formats); err != nil { - res = append(res, err) - } - - if err := m.contextValidateProvenance(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *HelmV001SchemaChart) contextValidateHash(ctx context.Context, formats strfmt.Registry) error { - - if m.Hash != nil { - - if swag.IsZero(m.Hash) { // not required - return nil - } - - if err := m.Hash.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("chart" + "." + "hash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("chart" + "." + "hash") - } - return err - } - } - - return nil -} - -func (m *HelmV001SchemaChart) contextValidateProvenance(ctx context.Context, formats strfmt.Registry) error { - - if m.Provenance != nil { - - if err := m.Provenance.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("chart" + "." + "provenance") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("chart" + "." + "provenance") - } - return err - } - } - - return nil -} - -// MarshalBinary interface implementation -func (m *HelmV001SchemaChart) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *HelmV001SchemaChart) UnmarshalBinary(b []byte) error { - var res HelmV001SchemaChart - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// HelmV001SchemaChartHash Specifies the hash algorithm and value for the chart -// -// swagger:model HelmV001SchemaChartHash -type HelmV001SchemaChartHash struct { - - // The hashing function used to compute the hash value - // Required: true - // Enum: ["sha256"] - Algorithm *string `json:"algorithm"` - - // The hash value for the chart - // Required: true - Value *string `json:"value"` -} - -// Validate validates this helm v001 schema chart hash -func (m *HelmV001SchemaChartHash) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAlgorithm(formats); err != nil { - res = append(res, err) - } - - if err := m.validateValue(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -var helmV001SchemaChartHashTypeAlgorithmPropEnum []interface{} - -func init() { - var res []string - if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil { - panic(err) - } - for _, v := range res { - helmV001SchemaChartHashTypeAlgorithmPropEnum = append(helmV001SchemaChartHashTypeAlgorithmPropEnum, v) - } -} - -const ( - - // HelmV001SchemaChartHashAlgorithmSha256 captures enum value "sha256" - HelmV001SchemaChartHashAlgorithmSha256 string = "sha256" -) - -// prop value enum -func (m *HelmV001SchemaChartHash) validateAlgorithmEnum(path, location string, value string) error { - if err := validate.EnumCase(path, location, value, helmV001SchemaChartHashTypeAlgorithmPropEnum, true); err != nil { - return err - } - return nil -} - -func (m *HelmV001SchemaChartHash) validateAlgorithm(formats strfmt.Registry) error { - - if err := validate.Required("chart"+"."+"hash"+"."+"algorithm", "body", m.Algorithm); err != nil { - return err - } - - // value enum - if err := m.validateAlgorithmEnum("chart"+"."+"hash"+"."+"algorithm", "body", *m.Algorithm); err != nil { - return err - } - - return nil -} - -func (m *HelmV001SchemaChartHash) validateValue(formats strfmt.Registry) error { - - if err := validate.Required("chart"+"."+"hash"+"."+"value", "body", m.Value); err != nil { - return err - } - - return nil -} - -// ContextValidate validate this helm v001 schema chart hash based on the context it is used -func (m *HelmV001SchemaChartHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// MarshalBinary interface implementation -func (m *HelmV001SchemaChartHash) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *HelmV001SchemaChartHash) UnmarshalBinary(b []byte) error { - var res HelmV001SchemaChartHash - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// HelmV001SchemaChartProvenance The provenance entry associated with the signed Helm Chart -// -// swagger:model HelmV001SchemaChartProvenance -type HelmV001SchemaChartProvenance struct { - - // Specifies the content of the provenance file inline within the document - // Format: byte - Content strfmt.Base64 `json:"content,omitempty"` - - // signature - Signature *HelmV001SchemaChartProvenanceSignature `json:"signature,omitempty"` -} - -// Validate validates this helm v001 schema chart provenance -func (m *HelmV001SchemaChartProvenance) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateSignature(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *HelmV001SchemaChartProvenance) validateSignature(formats strfmt.Registry) error { - if swag.IsZero(m.Signature) { // not required - return nil - } - - if m.Signature != nil { - if err := m.Signature.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("chart" + "." + "provenance" + "." + "signature") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("chart" + "." + "provenance" + "." + "signature") - } - return err - } - } - - return nil -} - -// ContextValidate validate this helm v001 schema chart provenance based on the context it is used -func (m *HelmV001SchemaChartProvenance) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateSignature(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *HelmV001SchemaChartProvenance) contextValidateSignature(ctx context.Context, formats strfmt.Registry) error { - - if m.Signature != nil { - - if swag.IsZero(m.Signature) { // not required - return nil - } - - if err := m.Signature.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("chart" + "." + "provenance" + "." + "signature") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("chart" + "." + "provenance" + "." + "signature") - } - return err - } - } - - return nil -} - -// MarshalBinary interface implementation -func (m *HelmV001SchemaChartProvenance) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *HelmV001SchemaChartProvenance) UnmarshalBinary(b []byte) error { - var res HelmV001SchemaChartProvenance - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// HelmV001SchemaChartProvenanceSignature Information about the included signature in the provenance file -// -// swagger:model HelmV001SchemaChartProvenanceSignature -type HelmV001SchemaChartProvenanceSignature struct { - - // Specifies the signature embedded within the provenance file - // Required: true - // Read Only: true - // Format: byte - Content strfmt.Base64 `json:"content"` -} - -// Validate validates this helm v001 schema chart provenance signature -func (m *HelmV001SchemaChartProvenanceSignature) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateContent(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *HelmV001SchemaChartProvenanceSignature) validateContent(formats strfmt.Registry) error { - - if err := validate.Required("chart"+"."+"provenance"+"."+"signature"+"."+"content", "body", strfmt.Base64(m.Content)); err != nil { - return err - } - - return nil -} - -// ContextValidate validate this helm v001 schema chart provenance signature based on the context it is used -func (m *HelmV001SchemaChartProvenanceSignature) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateContent(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *HelmV001SchemaChartProvenanceSignature) contextValidateContent(ctx context.Context, formats strfmt.Registry) error { - - if err := validate.ReadOnly(ctx, "chart"+"."+"provenance"+"."+"signature"+"."+"content", "body", strfmt.Base64(m.Content)); err != nil { - return err - } - - return nil -} - -// MarshalBinary interface implementation -func (m *HelmV001SchemaChartProvenanceSignature) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *HelmV001SchemaChartProvenanceSignature) UnmarshalBinary(b []byte) error { - var res HelmV001SchemaChartProvenanceSignature - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// HelmV001SchemaPublicKey The public key that can verify the package signature -// -// swagger:model HelmV001SchemaPublicKey -type HelmV001SchemaPublicKey struct { - - // Specifies the content of the public key inline within the document - // Required: true - // Format: byte - Content *strfmt.Base64 `json:"content"` -} - -// Validate validates this helm v001 schema public key -func (m *HelmV001SchemaPublicKey) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateContent(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *HelmV001SchemaPublicKey) validateContent(formats strfmt.Registry) error { - - if err := validate.Required("publicKey"+"."+"content", "body", m.Content); err != nil { - return err - } - - return nil -} - -// ContextValidate validates this helm v001 schema public key based on context it is used -func (m *HelmV001SchemaPublicKey) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - return nil -} - -// MarshalBinary interface implementation -func (m *HelmV001SchemaPublicKey) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *HelmV001SchemaPublicKey) UnmarshalBinary(b []byte) error { - var res HelmV001SchemaPublicKey - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/inactive_shard_log_info.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/inactive_shard_log_info.go deleted file mode 100644 index c555eb2da6..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/inactive_shard_log_info.go +++ /dev/null @@ -1,153 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// InactiveShardLogInfo inactive shard log info -// -// swagger:model InactiveShardLogInfo -type InactiveShardLogInfo struct { - - // The current hash value stored at the root of the merkle tree - // Required: true - // Pattern: ^[0-9a-fA-F]{64}$ - RootHash *string `json:"rootHash"` - - // The current signed tree head - // Required: true - SignedTreeHead *string `json:"signedTreeHead"` - - // The current treeID - // Required: true - // Pattern: ^[0-9]+$ - TreeID *string `json:"treeID"` - - // The current number of nodes in the merkle tree - // Required: true - // Minimum: 1 - TreeSize *int64 `json:"treeSize"` -} - -// Validate validates this inactive shard log info -func (m *InactiveShardLogInfo) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateRootHash(formats); err != nil { - res = append(res, err) - } - - if err := m.validateSignedTreeHead(formats); err != nil { - res = append(res, err) - } - - if err := m.validateTreeID(formats); err != nil { - res = append(res, err) - } - - if err := m.validateTreeSize(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *InactiveShardLogInfo) validateRootHash(formats strfmt.Registry) error { - - if err := validate.Required("rootHash", "body", m.RootHash); err != nil { - return err - } - - if err := validate.Pattern("rootHash", "body", *m.RootHash, `^[0-9a-fA-F]{64}$`); err != nil { - return err - } - - return nil -} - -func (m *InactiveShardLogInfo) validateSignedTreeHead(formats strfmt.Registry) error { - - if err := validate.Required("signedTreeHead", "body", m.SignedTreeHead); err != nil { - return err - } - - return nil -} - -func (m *InactiveShardLogInfo) validateTreeID(formats strfmt.Registry) error { - - if err := validate.Required("treeID", "body", m.TreeID); err != nil { - return err - } - - if err := validate.Pattern("treeID", "body", *m.TreeID, `^[0-9]+$`); err != nil { - return err - } - - return nil -} - -func (m *InactiveShardLogInfo) validateTreeSize(formats strfmt.Registry) error { - - if err := validate.Required("treeSize", "body", m.TreeSize); err != nil { - return err - } - - if err := validate.MinimumInt("treeSize", "body", *m.TreeSize, 1, false); err != nil { - return err - } - - return nil -} - -// ContextValidate validates this inactive shard log info based on context it is used -func (m *InactiveShardLogInfo) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - return nil -} - -// MarshalBinary interface implementation -func (m *InactiveShardLogInfo) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *InactiveShardLogInfo) UnmarshalBinary(b []byte) error { - var res InactiveShardLogInfo - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/inclusion_proof.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/inclusion_proof.go deleted file mode 100644 index 86f0d7b94e..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/inclusion_proof.go +++ /dev/null @@ -1,179 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "strconv" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// InclusionProof inclusion proof -// -// swagger:model InclusionProof -type InclusionProof struct { - - // The checkpoint (signed tree head) that the inclusion proof is based on - // Required: true - Checkpoint *string `json:"checkpoint"` - - // A list of hashes required to compute the inclusion proof, sorted in order from leaf to root - // Required: true - Hashes []string `json:"hashes"` - - // The index of the entry in the transparency log - // Required: true - // Minimum: 0 - LogIndex *int64 `json:"logIndex"` - - // The hash value stored at the root of the merkle tree at the time the proof was generated - // Required: true - // Pattern: ^[0-9a-fA-F]{64}$ - RootHash *string `json:"rootHash"` - - // The size of the merkle tree at the time the inclusion proof was generated - // Required: true - // Minimum: 1 - TreeSize *int64 `json:"treeSize"` -} - -// Validate validates this inclusion proof -func (m *InclusionProof) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateCheckpoint(formats); err != nil { - res = append(res, err) - } - - if err := m.validateHashes(formats); err != nil { - res = append(res, err) - } - - if err := m.validateLogIndex(formats); err != nil { - res = append(res, err) - } - - if err := m.validateRootHash(formats); err != nil { - res = append(res, err) - } - - if err := m.validateTreeSize(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *InclusionProof) validateCheckpoint(formats strfmt.Registry) error { - - if err := validate.Required("checkpoint", "body", m.Checkpoint); err != nil { - return err - } - - return nil -} - -func (m *InclusionProof) validateHashes(formats strfmt.Registry) error { - - if err := validate.Required("hashes", "body", m.Hashes); err != nil { - return err - } - - for i := 0; i < len(m.Hashes); i++ { - - if err := validate.Pattern("hashes"+"."+strconv.Itoa(i), "body", m.Hashes[i], `^[0-9a-fA-F]{64}$`); err != nil { - return err - } - - } - - return nil -} - -func (m *InclusionProof) validateLogIndex(formats strfmt.Registry) error { - - if err := validate.Required("logIndex", "body", m.LogIndex); err != nil { - return err - } - - if err := validate.MinimumInt("logIndex", "body", *m.LogIndex, 0, false); err != nil { - return err - } - - return nil -} - -func (m *InclusionProof) validateRootHash(formats strfmt.Registry) error { - - if err := validate.Required("rootHash", "body", m.RootHash); err != nil { - return err - } - - if err := validate.Pattern("rootHash", "body", *m.RootHash, `^[0-9a-fA-F]{64}$`); err != nil { - return err - } - - return nil -} - -func (m *InclusionProof) validateTreeSize(formats strfmt.Registry) error { - - if err := validate.Required("treeSize", "body", m.TreeSize); err != nil { - return err - } - - if err := validate.MinimumInt("treeSize", "body", *m.TreeSize, 1, false); err != nil { - return err - } - - return nil -} - -// ContextValidate validates this inclusion proof based on context it is used -func (m *InclusionProof) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - return nil -} - -// MarshalBinary interface implementation -func (m *InclusionProof) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *InclusionProof) UnmarshalBinary(b []byte) error { - var res InclusionProof - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto.go deleted file mode 100644 index 4f208de1d5..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto.go +++ /dev/null @@ -1,210 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "bytes" - "context" - "encoding/json" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// Intoto Intoto object -// -// swagger:model intoto -type Intoto struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec IntotoSchema `json:"spec"` -} - -// Kind gets the kind of this subtype -func (m *Intoto) Kind() string { - return "intoto" -} - -// SetKind sets the kind of this subtype -func (m *Intoto) SetKind(val string) { -} - -// UnmarshalJSON unmarshals this object with a polymorphic type from a JSON structure -func (m *Intoto) UnmarshalJSON(raw []byte) error { - var data struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec IntotoSchema `json:"spec"` - } - buf := bytes.NewBuffer(raw) - dec := json.NewDecoder(buf) - dec.UseNumber() - - if err := dec.Decode(&data); err != nil { - return err - } - - var base struct { - /* Just the base type fields. Used for unmashalling polymorphic types.*/ - - Kind string `json:"kind"` - } - buf = bytes.NewBuffer(raw) - dec = json.NewDecoder(buf) - dec.UseNumber() - - if err := dec.Decode(&base); err != nil { - return err - } - - var result Intoto - - if base.Kind != result.Kind() { - /* Not the type we're looking for. */ - return errors.New(422, "invalid kind value: %q", base.Kind) - } - - result.APIVersion = data.APIVersion - result.Spec = data.Spec - - *m = result - - return nil -} - -// MarshalJSON marshals this object with a polymorphic type to a JSON structure -func (m Intoto) MarshalJSON() ([]byte, error) { - var b1, b2, b3 []byte - var err error - b1, err = json.Marshal(struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec IntotoSchema `json:"spec"` - }{ - - APIVersion: m.APIVersion, - - Spec: m.Spec, - }) - if err != nil { - return nil, err - } - b2, err = json.Marshal(struct { - Kind string `json:"kind"` - }{ - - Kind: m.Kind(), - }) - if err != nil { - return nil, err - } - - return swag.ConcatJSON(b1, b2, b3), nil -} - -// Validate validates this intoto -func (m *Intoto) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAPIVersion(formats); err != nil { - res = append(res, err) - } - - if err := m.validateSpec(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *Intoto) validateAPIVersion(formats strfmt.Registry) error { - - if err := validate.Required("apiVersion", "body", m.APIVersion); err != nil { - return err - } - - if err := validate.Pattern("apiVersion", "body", *m.APIVersion, `^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`); err != nil { - return err - } - - return nil -} - -func (m *Intoto) validateSpec(formats strfmt.Registry) error { - - if m.Spec == nil { - return errors.Required("spec", "body", nil) - } - - return nil -} - -// ContextValidate validate this intoto based on the context it is used -func (m *Intoto) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// MarshalBinary interface implementation -func (m *Intoto) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *Intoto) UnmarshalBinary(b []byte) error { - var res Intoto - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_schema.go deleted file mode 100644 index a7fdaa6a6d..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_schema.go +++ /dev/null @@ -1,29 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// IntotoSchema Intoto Schema -// -// # Intoto for Rekord objects -// -// swagger:model intotoSchema -type IntotoSchema interface{} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v001_schema.go deleted file mode 100644 index 6973c72990..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v001_schema.go +++ /dev/null @@ -1,514 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "encoding/json" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// IntotoV001Schema intoto v0.0.1 Schema -// -// # Schema for intoto object -// -// swagger:model intotoV001Schema -type IntotoV001Schema struct { - - // content - // Required: true - Content *IntotoV001SchemaContent `json:"content"` - - // The public key that can verify the signature - // Required: true - // Format: byte - PublicKey *strfmt.Base64 `json:"publicKey"` -} - -// Validate validates this intoto v001 schema -func (m *IntotoV001Schema) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateContent(formats); err != nil { - res = append(res, err) - } - - if err := m.validatePublicKey(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *IntotoV001Schema) validateContent(formats strfmt.Registry) error { - - if err := validate.Required("content", "body", m.Content); err != nil { - return err - } - - if m.Content != nil { - if err := m.Content.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("content") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("content") - } - return err - } - } - - return nil -} - -func (m *IntotoV001Schema) validatePublicKey(formats strfmt.Registry) error { - - if err := validate.Required("publicKey", "body", m.PublicKey); err != nil { - return err - } - - return nil -} - -// ContextValidate validate this intoto v001 schema based on the context it is used -func (m *IntotoV001Schema) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateContent(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *IntotoV001Schema) contextValidateContent(ctx context.Context, formats strfmt.Registry) error { - - if m.Content != nil { - - if err := m.Content.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("content") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("content") - } - return err - } - } - - return nil -} - -// MarshalBinary interface implementation -func (m *IntotoV001Schema) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *IntotoV001Schema) UnmarshalBinary(b []byte) error { - var res IntotoV001Schema - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// IntotoV001SchemaContent intoto v001 schema content -// -// swagger:model IntotoV001SchemaContent -type IntotoV001SchemaContent struct { - - // envelope - Envelope string `json:"envelope,omitempty"` - - // hash - Hash *IntotoV001SchemaContentHash `json:"hash,omitempty"` - - // payload hash - PayloadHash *IntotoV001SchemaContentPayloadHash `json:"payloadHash,omitempty"` -} - -// Validate validates this intoto v001 schema content -func (m *IntotoV001SchemaContent) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateHash(formats); err != nil { - res = append(res, err) - } - - if err := m.validatePayloadHash(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *IntotoV001SchemaContent) validateHash(formats strfmt.Registry) error { - if swag.IsZero(m.Hash) { // not required - return nil - } - - if m.Hash != nil { - if err := m.Hash.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("content" + "." + "hash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("content" + "." + "hash") - } - return err - } - } - - return nil -} - -func (m *IntotoV001SchemaContent) validatePayloadHash(formats strfmt.Registry) error { - if swag.IsZero(m.PayloadHash) { // not required - return nil - } - - if m.PayloadHash != nil { - if err := m.PayloadHash.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("content" + "." + "payloadHash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("content" + "." + "payloadHash") - } - return err - } - } - - return nil -} - -// ContextValidate validate this intoto v001 schema content based on the context it is used -func (m *IntotoV001SchemaContent) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateHash(ctx, formats); err != nil { - res = append(res, err) - } - - if err := m.contextValidatePayloadHash(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *IntotoV001SchemaContent) contextValidateHash(ctx context.Context, formats strfmt.Registry) error { - - if m.Hash != nil { - - if swag.IsZero(m.Hash) { // not required - return nil - } - - if err := m.Hash.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("content" + "." + "hash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("content" + "." + "hash") - } - return err - } - } - - return nil -} - -func (m *IntotoV001SchemaContent) contextValidatePayloadHash(ctx context.Context, formats strfmt.Registry) error { - - if m.PayloadHash != nil { - - if swag.IsZero(m.PayloadHash) { // not required - return nil - } - - if err := m.PayloadHash.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("content" + "." + "payloadHash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("content" + "." + "payloadHash") - } - return err - } - } - - return nil -} - -// MarshalBinary interface implementation -func (m *IntotoV001SchemaContent) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *IntotoV001SchemaContent) UnmarshalBinary(b []byte) error { - var res IntotoV001SchemaContent - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// IntotoV001SchemaContentHash Specifies the hash algorithm and value encompassing the entire signed envelope; this is computed by the rekor server, client-provided values are ignored -// -// swagger:model IntotoV001SchemaContentHash -type IntotoV001SchemaContentHash struct { - - // The hashing function used to compute the hash value - // Required: true - // Enum: ["sha256"] - Algorithm *string `json:"algorithm"` - - // The hash value for the archive - // Required: true - Value *string `json:"value"` -} - -// Validate validates this intoto v001 schema content hash -func (m *IntotoV001SchemaContentHash) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAlgorithm(formats); err != nil { - res = append(res, err) - } - - if err := m.validateValue(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -var intotoV001SchemaContentHashTypeAlgorithmPropEnum []interface{} - -func init() { - var res []string - if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil { - panic(err) - } - for _, v := range res { - intotoV001SchemaContentHashTypeAlgorithmPropEnum = append(intotoV001SchemaContentHashTypeAlgorithmPropEnum, v) - } -} - -const ( - - // IntotoV001SchemaContentHashAlgorithmSha256 captures enum value "sha256" - IntotoV001SchemaContentHashAlgorithmSha256 string = "sha256" -) - -// prop value enum -func (m *IntotoV001SchemaContentHash) validateAlgorithmEnum(path, location string, value string) error { - if err := validate.EnumCase(path, location, value, intotoV001SchemaContentHashTypeAlgorithmPropEnum, true); err != nil { - return err - } - return nil -} - -func (m *IntotoV001SchemaContentHash) validateAlgorithm(formats strfmt.Registry) error { - - if err := validate.Required("content"+"."+"hash"+"."+"algorithm", "body", m.Algorithm); err != nil { - return err - } - - // value enum - if err := m.validateAlgorithmEnum("content"+"."+"hash"+"."+"algorithm", "body", *m.Algorithm); err != nil { - return err - } - - return nil -} - -func (m *IntotoV001SchemaContentHash) validateValue(formats strfmt.Registry) error { - - if err := validate.Required("content"+"."+"hash"+"."+"value", "body", m.Value); err != nil { - return err - } - - return nil -} - -// ContextValidate validate this intoto v001 schema content hash based on the context it is used -func (m *IntotoV001SchemaContentHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// MarshalBinary interface implementation -func (m *IntotoV001SchemaContentHash) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *IntotoV001SchemaContentHash) UnmarshalBinary(b []byte) error { - var res IntotoV001SchemaContentHash - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// IntotoV001SchemaContentPayloadHash Specifies the hash algorithm and value covering the payload within the DSSE envelope; this is computed by the rekor server, client-provided values are ignored -// -// swagger:model IntotoV001SchemaContentPayloadHash -type IntotoV001SchemaContentPayloadHash struct { - - // The hashing function used to compute the hash value - // Required: true - // Enum: ["sha256"] - Algorithm *string `json:"algorithm"` - - // The hash value for the envelope's payload - // Required: true - Value *string `json:"value"` -} - -// Validate validates this intoto v001 schema content payload hash -func (m *IntotoV001SchemaContentPayloadHash) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAlgorithm(formats); err != nil { - res = append(res, err) - } - - if err := m.validateValue(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -var intotoV001SchemaContentPayloadHashTypeAlgorithmPropEnum []interface{} - -func init() { - var res []string - if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil { - panic(err) - } - for _, v := range res { - intotoV001SchemaContentPayloadHashTypeAlgorithmPropEnum = append(intotoV001SchemaContentPayloadHashTypeAlgorithmPropEnum, v) - } -} - -const ( - - // IntotoV001SchemaContentPayloadHashAlgorithmSha256 captures enum value "sha256" - IntotoV001SchemaContentPayloadHashAlgorithmSha256 string = "sha256" -) - -// prop value enum -func (m *IntotoV001SchemaContentPayloadHash) validateAlgorithmEnum(path, location string, value string) error { - if err := validate.EnumCase(path, location, value, intotoV001SchemaContentPayloadHashTypeAlgorithmPropEnum, true); err != nil { - return err - } - return nil -} - -func (m *IntotoV001SchemaContentPayloadHash) validateAlgorithm(formats strfmt.Registry) error { - - if err := validate.Required("content"+"."+"payloadHash"+"."+"algorithm", "body", m.Algorithm); err != nil { - return err - } - - // value enum - if err := m.validateAlgorithmEnum("content"+"."+"payloadHash"+"."+"algorithm", "body", *m.Algorithm); err != nil { - return err - } - - return nil -} - -func (m *IntotoV001SchemaContentPayloadHash) validateValue(formats strfmt.Registry) error { - - if err := validate.Required("content"+"."+"payloadHash"+"."+"value", "body", m.Value); err != nil { - return err - } - - return nil -} - -// ContextValidate validate this intoto v001 schema content payload hash based on the context it is used -func (m *IntotoV001SchemaContentPayloadHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// MarshalBinary interface implementation -func (m *IntotoV001SchemaContentPayloadHash) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *IntotoV001SchemaContentPayloadHash) UnmarshalBinary(b []byte) error { - var res IntotoV001SchemaContentPayloadHash - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v002_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v002_schema.go deleted file mode 100644 index 309073a1c7..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v002_schema.go +++ /dev/null @@ -1,757 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "encoding/json" - "strconv" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// IntotoV002Schema intoto v0.0.2 Schema -// -// # Schema for intoto object -// -// swagger:model intotoV002Schema -type IntotoV002Schema struct { - - // content - // Required: true - Content *IntotoV002SchemaContent `json:"content"` -} - -// Validate validates this intoto v002 schema -func (m *IntotoV002Schema) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateContent(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *IntotoV002Schema) validateContent(formats strfmt.Registry) error { - - if err := validate.Required("content", "body", m.Content); err != nil { - return err - } - - if m.Content != nil { - if err := m.Content.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("content") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("content") - } - return err - } - } - - return nil -} - -// ContextValidate validate this intoto v002 schema based on the context it is used -func (m *IntotoV002Schema) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateContent(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *IntotoV002Schema) contextValidateContent(ctx context.Context, formats strfmt.Registry) error { - - if m.Content != nil { - - if err := m.Content.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("content") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("content") - } - return err - } - } - - return nil -} - -// MarshalBinary interface implementation -func (m *IntotoV002Schema) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *IntotoV002Schema) UnmarshalBinary(b []byte) error { - var res IntotoV002Schema - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// IntotoV002SchemaContent intoto v002 schema content -// -// swagger:model IntotoV002SchemaContent -type IntotoV002SchemaContent struct { - - // envelope - // Required: true - Envelope *IntotoV002SchemaContentEnvelope `json:"envelope"` - - // hash - Hash *IntotoV002SchemaContentHash `json:"hash,omitempty"` - - // payload hash - PayloadHash *IntotoV002SchemaContentPayloadHash `json:"payloadHash,omitempty"` -} - -// Validate validates this intoto v002 schema content -func (m *IntotoV002SchemaContent) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateEnvelope(formats); err != nil { - res = append(res, err) - } - - if err := m.validateHash(formats); err != nil { - res = append(res, err) - } - - if err := m.validatePayloadHash(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *IntotoV002SchemaContent) validateEnvelope(formats strfmt.Registry) error { - - if err := validate.Required("content"+"."+"envelope", "body", m.Envelope); err != nil { - return err - } - - if m.Envelope != nil { - if err := m.Envelope.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("content" + "." + "envelope") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("content" + "." + "envelope") - } - return err - } - } - - return nil -} - -func (m *IntotoV002SchemaContent) validateHash(formats strfmt.Registry) error { - if swag.IsZero(m.Hash) { // not required - return nil - } - - if m.Hash != nil { - if err := m.Hash.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("content" + "." + "hash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("content" + "." + "hash") - } - return err - } - } - - return nil -} - -func (m *IntotoV002SchemaContent) validatePayloadHash(formats strfmt.Registry) error { - if swag.IsZero(m.PayloadHash) { // not required - return nil - } - - if m.PayloadHash != nil { - if err := m.PayloadHash.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("content" + "." + "payloadHash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("content" + "." + "payloadHash") - } - return err - } - } - - return nil -} - -// ContextValidate validate this intoto v002 schema content based on the context it is used -func (m *IntotoV002SchemaContent) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateEnvelope(ctx, formats); err != nil { - res = append(res, err) - } - - if err := m.contextValidateHash(ctx, formats); err != nil { - res = append(res, err) - } - - if err := m.contextValidatePayloadHash(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *IntotoV002SchemaContent) contextValidateEnvelope(ctx context.Context, formats strfmt.Registry) error { - - if m.Envelope != nil { - - if err := m.Envelope.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("content" + "." + "envelope") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("content" + "." + "envelope") - } - return err - } - } - - return nil -} - -func (m *IntotoV002SchemaContent) contextValidateHash(ctx context.Context, formats strfmt.Registry) error { - - if m.Hash != nil { - - if swag.IsZero(m.Hash) { // not required - return nil - } - - if err := m.Hash.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("content" + "." + "hash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("content" + "." + "hash") - } - return err - } - } - - return nil -} - -func (m *IntotoV002SchemaContent) contextValidatePayloadHash(ctx context.Context, formats strfmt.Registry) error { - - if m.PayloadHash != nil { - - if swag.IsZero(m.PayloadHash) { // not required - return nil - } - - if err := m.PayloadHash.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("content" + "." + "payloadHash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("content" + "." + "payloadHash") - } - return err - } - } - - return nil -} - -// MarshalBinary interface implementation -func (m *IntotoV002SchemaContent) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *IntotoV002SchemaContent) UnmarshalBinary(b []byte) error { - var res IntotoV002SchemaContent - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// IntotoV002SchemaContentEnvelope dsse envelope -// -// swagger:model IntotoV002SchemaContentEnvelope -type IntotoV002SchemaContentEnvelope struct { - - // payload of the envelope - // Format: byte - Payload strfmt.Base64 `json:"payload,omitempty"` - - // type describing the payload - // Required: true - PayloadType *string `json:"payloadType"` - - // collection of all signatures of the envelope's payload - // Required: true - // Min Items: 1 - Signatures []*IntotoV002SchemaContentEnvelopeSignaturesItems0 `json:"signatures"` -} - -// Validate validates this intoto v002 schema content envelope -func (m *IntotoV002SchemaContentEnvelope) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validatePayloadType(formats); err != nil { - res = append(res, err) - } - - if err := m.validateSignatures(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *IntotoV002SchemaContentEnvelope) validatePayloadType(formats strfmt.Registry) error { - - if err := validate.Required("content"+"."+"envelope"+"."+"payloadType", "body", m.PayloadType); err != nil { - return err - } - - return nil -} - -func (m *IntotoV002SchemaContentEnvelope) validateSignatures(formats strfmt.Registry) error { - - if err := validate.Required("content"+"."+"envelope"+"."+"signatures", "body", m.Signatures); err != nil { - return err - } - - iSignaturesSize := int64(len(m.Signatures)) - - if err := validate.MinItems("content"+"."+"envelope"+"."+"signatures", "body", iSignaturesSize, 1); err != nil { - return err - } - - for i := 0; i < len(m.Signatures); i++ { - if swag.IsZero(m.Signatures[i]) { // not required - continue - } - - if m.Signatures[i] != nil { - if err := m.Signatures[i].Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("content" + "." + "envelope" + "." + "signatures" + "." + strconv.Itoa(i)) - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("content" + "." + "envelope" + "." + "signatures" + "." + strconv.Itoa(i)) - } - return err - } - } - - } - - return nil -} - -// ContextValidate validate this intoto v002 schema content envelope based on the context it is used -func (m *IntotoV002SchemaContentEnvelope) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateSignatures(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *IntotoV002SchemaContentEnvelope) contextValidateSignatures(ctx context.Context, formats strfmt.Registry) error { - - for i := 0; i < len(m.Signatures); i++ { - - if m.Signatures[i] != nil { - - if swag.IsZero(m.Signatures[i]) { // not required - return nil - } - - if err := m.Signatures[i].ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("content" + "." + "envelope" + "." + "signatures" + "." + strconv.Itoa(i)) - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("content" + "." + "envelope" + "." + "signatures" + "." + strconv.Itoa(i)) - } - return err - } - } - - } - - return nil -} - -// MarshalBinary interface implementation -func (m *IntotoV002SchemaContentEnvelope) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *IntotoV002SchemaContentEnvelope) UnmarshalBinary(b []byte) error { - var res IntotoV002SchemaContentEnvelope - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// IntotoV002SchemaContentEnvelopeSignaturesItems0 a signature of the envelope's payload along with the public key for the signature -// -// swagger:model IntotoV002SchemaContentEnvelopeSignaturesItems0 -type IntotoV002SchemaContentEnvelopeSignaturesItems0 struct { - - // optional id of the key used to create the signature - Keyid string `json:"keyid,omitempty"` - - // public key that corresponds to this signature - // Required: true - // Format: byte - PublicKey *strfmt.Base64 `json:"publicKey"` - - // signature of the payload - // Required: true - // Format: byte - Sig *strfmt.Base64 `json:"sig"` -} - -// Validate validates this intoto v002 schema content envelope signatures items0 -func (m *IntotoV002SchemaContentEnvelopeSignaturesItems0) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validatePublicKey(formats); err != nil { - res = append(res, err) - } - - if err := m.validateSig(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *IntotoV002SchemaContentEnvelopeSignaturesItems0) validatePublicKey(formats strfmt.Registry) error { - - if err := validate.Required("publicKey", "body", m.PublicKey); err != nil { - return err - } - - return nil -} - -func (m *IntotoV002SchemaContentEnvelopeSignaturesItems0) validateSig(formats strfmt.Registry) error { - - if err := validate.Required("sig", "body", m.Sig); err != nil { - return err - } - - return nil -} - -// ContextValidate validates this intoto v002 schema content envelope signatures items0 based on context it is used -func (m *IntotoV002SchemaContentEnvelopeSignaturesItems0) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - return nil -} - -// MarshalBinary interface implementation -func (m *IntotoV002SchemaContentEnvelopeSignaturesItems0) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *IntotoV002SchemaContentEnvelopeSignaturesItems0) UnmarshalBinary(b []byte) error { - var res IntotoV002SchemaContentEnvelopeSignaturesItems0 - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// IntotoV002SchemaContentHash Specifies the hash algorithm and value encompassing the entire signed envelope -// -// swagger:model IntotoV002SchemaContentHash -type IntotoV002SchemaContentHash struct { - - // The hashing function used to compute the hash value - // Required: true - // Enum: ["sha256"] - Algorithm *string `json:"algorithm"` - - // The hash value for the archive - // Required: true - Value *string `json:"value"` -} - -// Validate validates this intoto v002 schema content hash -func (m *IntotoV002SchemaContentHash) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAlgorithm(formats); err != nil { - res = append(res, err) - } - - if err := m.validateValue(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -var intotoV002SchemaContentHashTypeAlgorithmPropEnum []interface{} - -func init() { - var res []string - if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil { - panic(err) - } - for _, v := range res { - intotoV002SchemaContentHashTypeAlgorithmPropEnum = append(intotoV002SchemaContentHashTypeAlgorithmPropEnum, v) - } -} - -const ( - - // IntotoV002SchemaContentHashAlgorithmSha256 captures enum value "sha256" - IntotoV002SchemaContentHashAlgorithmSha256 string = "sha256" -) - -// prop value enum -func (m *IntotoV002SchemaContentHash) validateAlgorithmEnum(path, location string, value string) error { - if err := validate.EnumCase(path, location, value, intotoV002SchemaContentHashTypeAlgorithmPropEnum, true); err != nil { - return err - } - return nil -} - -func (m *IntotoV002SchemaContentHash) validateAlgorithm(formats strfmt.Registry) error { - - if err := validate.Required("content"+"."+"hash"+"."+"algorithm", "body", m.Algorithm); err != nil { - return err - } - - // value enum - if err := m.validateAlgorithmEnum("content"+"."+"hash"+"."+"algorithm", "body", *m.Algorithm); err != nil { - return err - } - - return nil -} - -func (m *IntotoV002SchemaContentHash) validateValue(formats strfmt.Registry) error { - - if err := validate.Required("content"+"."+"hash"+"."+"value", "body", m.Value); err != nil { - return err - } - - return nil -} - -// ContextValidate validate this intoto v002 schema content hash based on the context it is used -func (m *IntotoV002SchemaContentHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// MarshalBinary interface implementation -func (m *IntotoV002SchemaContentHash) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *IntotoV002SchemaContentHash) UnmarshalBinary(b []byte) error { - var res IntotoV002SchemaContentHash - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// IntotoV002SchemaContentPayloadHash Specifies the hash algorithm and value covering the payload within the DSSE envelope -// -// swagger:model IntotoV002SchemaContentPayloadHash -type IntotoV002SchemaContentPayloadHash struct { - - // The hashing function used to compute the hash value - // Required: true - // Enum: ["sha256"] - Algorithm *string `json:"algorithm"` - - // The hash value of the payload - // Required: true - Value *string `json:"value"` -} - -// Validate validates this intoto v002 schema content payload hash -func (m *IntotoV002SchemaContentPayloadHash) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAlgorithm(formats); err != nil { - res = append(res, err) - } - - if err := m.validateValue(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -var intotoV002SchemaContentPayloadHashTypeAlgorithmPropEnum []interface{} - -func init() { - var res []string - if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil { - panic(err) - } - for _, v := range res { - intotoV002SchemaContentPayloadHashTypeAlgorithmPropEnum = append(intotoV002SchemaContentPayloadHashTypeAlgorithmPropEnum, v) - } -} - -const ( - - // IntotoV002SchemaContentPayloadHashAlgorithmSha256 captures enum value "sha256" - IntotoV002SchemaContentPayloadHashAlgorithmSha256 string = "sha256" -) - -// prop value enum -func (m *IntotoV002SchemaContentPayloadHash) validateAlgorithmEnum(path, location string, value string) error { - if err := validate.EnumCase(path, location, value, intotoV002SchemaContentPayloadHashTypeAlgorithmPropEnum, true); err != nil { - return err - } - return nil -} - -func (m *IntotoV002SchemaContentPayloadHash) validateAlgorithm(formats strfmt.Registry) error { - - if err := validate.Required("content"+"."+"payloadHash"+"."+"algorithm", "body", m.Algorithm); err != nil { - return err - } - - // value enum - if err := m.validateAlgorithmEnum("content"+"."+"payloadHash"+"."+"algorithm", "body", *m.Algorithm); err != nil { - return err - } - - return nil -} - -func (m *IntotoV002SchemaContentPayloadHash) validateValue(formats strfmt.Registry) error { - - if err := validate.Required("content"+"."+"payloadHash"+"."+"value", "body", m.Value); err != nil { - return err - } - - return nil -} - -// ContextValidate validate this intoto v002 schema content payload hash based on the context it is used -func (m *IntotoV002SchemaContentPayloadHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// MarshalBinary interface implementation -func (m *IntotoV002SchemaContentPayloadHash) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *IntotoV002SchemaContentPayloadHash) UnmarshalBinary(b []byte) error { - var res IntotoV002SchemaContentPayloadHash - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/jar.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/jar.go deleted file mode 100644 index 3df3d21b8a..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/jar.go +++ /dev/null @@ -1,210 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "bytes" - "context" - "encoding/json" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// Jar Java Archive (JAR) -// -// swagger:model jar -type Jar struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec JarSchema `json:"spec"` -} - -// Kind gets the kind of this subtype -func (m *Jar) Kind() string { - return "jar" -} - -// SetKind sets the kind of this subtype -func (m *Jar) SetKind(val string) { -} - -// UnmarshalJSON unmarshals this object with a polymorphic type from a JSON structure -func (m *Jar) UnmarshalJSON(raw []byte) error { - var data struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec JarSchema `json:"spec"` - } - buf := bytes.NewBuffer(raw) - dec := json.NewDecoder(buf) - dec.UseNumber() - - if err := dec.Decode(&data); err != nil { - return err - } - - var base struct { - /* Just the base type fields. Used for unmashalling polymorphic types.*/ - - Kind string `json:"kind"` - } - buf = bytes.NewBuffer(raw) - dec = json.NewDecoder(buf) - dec.UseNumber() - - if err := dec.Decode(&base); err != nil { - return err - } - - var result Jar - - if base.Kind != result.Kind() { - /* Not the type we're looking for. */ - return errors.New(422, "invalid kind value: %q", base.Kind) - } - - result.APIVersion = data.APIVersion - result.Spec = data.Spec - - *m = result - - return nil -} - -// MarshalJSON marshals this object with a polymorphic type to a JSON structure -func (m Jar) MarshalJSON() ([]byte, error) { - var b1, b2, b3 []byte - var err error - b1, err = json.Marshal(struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec JarSchema `json:"spec"` - }{ - - APIVersion: m.APIVersion, - - Spec: m.Spec, - }) - if err != nil { - return nil, err - } - b2, err = json.Marshal(struct { - Kind string `json:"kind"` - }{ - - Kind: m.Kind(), - }) - if err != nil { - return nil, err - } - - return swag.ConcatJSON(b1, b2, b3), nil -} - -// Validate validates this jar -func (m *Jar) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAPIVersion(formats); err != nil { - res = append(res, err) - } - - if err := m.validateSpec(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *Jar) validateAPIVersion(formats strfmt.Registry) error { - - if err := validate.Required("apiVersion", "body", m.APIVersion); err != nil { - return err - } - - if err := validate.Pattern("apiVersion", "body", *m.APIVersion, `^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`); err != nil { - return err - } - - return nil -} - -func (m *Jar) validateSpec(formats strfmt.Registry) error { - - if m.Spec == nil { - return errors.Required("spec", "body", nil) - } - - return nil -} - -// ContextValidate validate this jar based on the context it is used -func (m *Jar) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// MarshalBinary interface implementation -func (m *Jar) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *Jar) UnmarshalBinary(b []byte) error { - var res Jar - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/jar_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/jar_schema.go deleted file mode 100644 index e7b9a590ed..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/jar_schema.go +++ /dev/null @@ -1,29 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// JarSchema JAR Schema -// -// # Schema for JAR objects -// -// swagger:model jarSchema -type JarSchema interface{} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/jar_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/jar_v001_schema.go deleted file mode 100644 index 2d741f3c52..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/jar_v001_schema.go +++ /dev/null @@ -1,569 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "encoding/json" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// JarV001Schema JAR v0.0.1 Schema -// -// # Schema for JAR entries -// -// swagger:model jarV001Schema -type JarV001Schema struct { - - // archive - // Required: true - Archive *JarV001SchemaArchive `json:"archive"` - - // signature - Signature *JarV001SchemaSignature `json:"signature,omitempty"` -} - -// Validate validates this jar v001 schema -func (m *JarV001Schema) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateArchive(formats); err != nil { - res = append(res, err) - } - - if err := m.validateSignature(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *JarV001Schema) validateArchive(formats strfmt.Registry) error { - - if err := validate.Required("archive", "body", m.Archive); err != nil { - return err - } - - if m.Archive != nil { - if err := m.Archive.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("archive") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("archive") - } - return err - } - } - - return nil -} - -func (m *JarV001Schema) validateSignature(formats strfmt.Registry) error { - if swag.IsZero(m.Signature) { // not required - return nil - } - - if m.Signature != nil { - if err := m.Signature.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("signature") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("signature") - } - return err - } - } - - return nil -} - -// ContextValidate validate this jar v001 schema based on the context it is used -func (m *JarV001Schema) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateArchive(ctx, formats); err != nil { - res = append(res, err) - } - - if err := m.contextValidateSignature(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *JarV001Schema) contextValidateArchive(ctx context.Context, formats strfmt.Registry) error { - - if m.Archive != nil { - - if err := m.Archive.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("archive") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("archive") - } - return err - } - } - - return nil -} - -func (m *JarV001Schema) contextValidateSignature(ctx context.Context, formats strfmt.Registry) error { - - if m.Signature != nil { - - if swag.IsZero(m.Signature) { // not required - return nil - } - - if err := m.Signature.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("signature") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("signature") - } - return err - } - } - - return nil -} - -// MarshalBinary interface implementation -func (m *JarV001Schema) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *JarV001Schema) UnmarshalBinary(b []byte) error { - var res JarV001Schema - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// JarV001SchemaArchive Information about the archive associated with the entry -// -// swagger:model JarV001SchemaArchive -type JarV001SchemaArchive struct { - - // Specifies the archive inline within the document - // Format: byte - Content strfmt.Base64 `json:"content,omitempty"` - - // hash - Hash *JarV001SchemaArchiveHash `json:"hash,omitempty"` -} - -// Validate validates this jar v001 schema archive -func (m *JarV001SchemaArchive) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateHash(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *JarV001SchemaArchive) validateHash(formats strfmt.Registry) error { - if swag.IsZero(m.Hash) { // not required - return nil - } - - if m.Hash != nil { - if err := m.Hash.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("archive" + "." + "hash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("archive" + "." + "hash") - } - return err - } - } - - return nil -} - -// ContextValidate validate this jar v001 schema archive based on the context it is used -func (m *JarV001SchemaArchive) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateHash(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *JarV001SchemaArchive) contextValidateHash(ctx context.Context, formats strfmt.Registry) error { - - if m.Hash != nil { - - if swag.IsZero(m.Hash) { // not required - return nil - } - - if err := m.Hash.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("archive" + "." + "hash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("archive" + "." + "hash") - } - return err - } - } - - return nil -} - -// MarshalBinary interface implementation -func (m *JarV001SchemaArchive) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *JarV001SchemaArchive) UnmarshalBinary(b []byte) error { - var res JarV001SchemaArchive - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// JarV001SchemaArchiveHash Specifies the hash algorithm and value encompassing the entire signed archive -// -// swagger:model JarV001SchemaArchiveHash -type JarV001SchemaArchiveHash struct { - - // The hashing function used to compute the hash value - // Required: true - // Enum: ["sha256"] - Algorithm *string `json:"algorithm"` - - // The hash value for the archive - // Required: true - Value *string `json:"value"` -} - -// Validate validates this jar v001 schema archive hash -func (m *JarV001SchemaArchiveHash) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAlgorithm(formats); err != nil { - res = append(res, err) - } - - if err := m.validateValue(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -var jarV001SchemaArchiveHashTypeAlgorithmPropEnum []interface{} - -func init() { - var res []string - if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil { - panic(err) - } - for _, v := range res { - jarV001SchemaArchiveHashTypeAlgorithmPropEnum = append(jarV001SchemaArchiveHashTypeAlgorithmPropEnum, v) - } -} - -const ( - - // JarV001SchemaArchiveHashAlgorithmSha256 captures enum value "sha256" - JarV001SchemaArchiveHashAlgorithmSha256 string = "sha256" -) - -// prop value enum -func (m *JarV001SchemaArchiveHash) validateAlgorithmEnum(path, location string, value string) error { - if err := validate.EnumCase(path, location, value, jarV001SchemaArchiveHashTypeAlgorithmPropEnum, true); err != nil { - return err - } - return nil -} - -func (m *JarV001SchemaArchiveHash) validateAlgorithm(formats strfmt.Registry) error { - - if err := validate.Required("archive"+"."+"hash"+"."+"algorithm", "body", m.Algorithm); err != nil { - return err - } - - // value enum - if err := m.validateAlgorithmEnum("archive"+"."+"hash"+"."+"algorithm", "body", *m.Algorithm); err != nil { - return err - } - - return nil -} - -func (m *JarV001SchemaArchiveHash) validateValue(formats strfmt.Registry) error { - - if err := validate.Required("archive"+"."+"hash"+"."+"value", "body", m.Value); err != nil { - return err - } - - return nil -} - -// ContextValidate validates this jar v001 schema archive hash based on context it is used -func (m *JarV001SchemaArchiveHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - return nil -} - -// MarshalBinary interface implementation -func (m *JarV001SchemaArchiveHash) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *JarV001SchemaArchiveHash) UnmarshalBinary(b []byte) error { - var res JarV001SchemaArchiveHash - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// JarV001SchemaSignature Information about the included signature in the JAR file -// -// swagger:model JarV001SchemaSignature -type JarV001SchemaSignature struct { - - // Specifies the PKCS7 signature embedded within the JAR file - // Required: true - // Read Only: true - // Format: byte - Content strfmt.Base64 `json:"content"` - - // public key - // Required: true - PublicKey *JarV001SchemaSignaturePublicKey `json:"publicKey"` -} - -// Validate validates this jar v001 schema signature -func (m *JarV001SchemaSignature) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateContent(formats); err != nil { - res = append(res, err) - } - - if err := m.validatePublicKey(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *JarV001SchemaSignature) validateContent(formats strfmt.Registry) error { - - if err := validate.Required("signature"+"."+"content", "body", strfmt.Base64(m.Content)); err != nil { - return err - } - - return nil -} - -func (m *JarV001SchemaSignature) validatePublicKey(formats strfmt.Registry) error { - - if err := validate.Required("signature"+"."+"publicKey", "body", m.PublicKey); err != nil { - return err - } - - if m.PublicKey != nil { - if err := m.PublicKey.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("signature" + "." + "publicKey") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("signature" + "." + "publicKey") - } - return err - } - } - - return nil -} - -// ContextValidate validate this jar v001 schema signature based on the context it is used -func (m *JarV001SchemaSignature) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateContent(ctx, formats); err != nil { - res = append(res, err) - } - - if err := m.contextValidatePublicKey(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *JarV001SchemaSignature) contextValidateContent(ctx context.Context, formats strfmt.Registry) error { - - if err := validate.ReadOnly(ctx, "signature"+"."+"content", "body", strfmt.Base64(m.Content)); err != nil { - return err - } - - return nil -} - -func (m *JarV001SchemaSignature) contextValidatePublicKey(ctx context.Context, formats strfmt.Registry) error { - - if m.PublicKey != nil { - - if err := m.PublicKey.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("signature" + "." + "publicKey") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("signature" + "." + "publicKey") - } - return err - } - } - - return nil -} - -// MarshalBinary interface implementation -func (m *JarV001SchemaSignature) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *JarV001SchemaSignature) UnmarshalBinary(b []byte) error { - var res JarV001SchemaSignature - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// JarV001SchemaSignaturePublicKey The X509 certificate containing the public key JAR which verifies the signature of the JAR -// -// swagger:model JarV001SchemaSignaturePublicKey -type JarV001SchemaSignaturePublicKey struct { - - // Specifies the content of the X509 certificate containing the public key used to verify the signature - // Required: true - // Format: byte - Content *strfmt.Base64 `json:"content"` -} - -// Validate validates this jar v001 schema signature public key -func (m *JarV001SchemaSignaturePublicKey) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateContent(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *JarV001SchemaSignaturePublicKey) validateContent(formats strfmt.Registry) error { - - if err := validate.Required("signature"+"."+"publicKey"+"."+"content", "body", m.Content); err != nil { - return err - } - - return nil -} - -// ContextValidate validate this jar v001 schema signature public key based on the context it is used -func (m *JarV001SchemaSignaturePublicKey) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// MarshalBinary interface implementation -func (m *JarV001SchemaSignaturePublicKey) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *JarV001SchemaSignaturePublicKey) UnmarshalBinary(b []byte) error { - var res JarV001SchemaSignaturePublicKey - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/log_entry.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/log_entry.go deleted file mode 100644 index ee32ded414..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/log_entry.go +++ /dev/null @@ -1,445 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// LogEntry log entry -// -// swagger:model LogEntry -type LogEntry map[string]LogEntryAnon - -// Validate validates this log entry -func (m LogEntry) Validate(formats strfmt.Registry) error { - var res []error - - for k := range m { - - if swag.IsZero(m[k]) { // not required - continue - } - if val, ok := m[k]; ok { - if err := val.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName(k) - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName(k) - } - return err - } - } - - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// ContextValidate validate this log entry based on the context it is used -func (m LogEntry) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - for k := range m { - - if val, ok := m[k]; ok { - if err := val.ContextValidate(ctx, formats); err != nil { - return err - } - } - - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// LogEntryAnon log entry anon -// -// swagger:model LogEntryAnon -type LogEntryAnon struct { - - // attestation - Attestation *LogEntryAnonAttestation `json:"attestation,omitempty"` - - // body - // Required: true - Body interface{} `json:"body"` - - // The time the entry was added to the log as a Unix timestamp in seconds - // Required: true - IntegratedTime *int64 `json:"integratedTime"` - - // This is the SHA256 hash of the DER-encoded public key for the log at the time the entry was included in the log - // Required: true - // Pattern: ^[0-9a-fA-F]{64}$ - LogID *string `json:"logID"` - - // log index - // Required: true - // Minimum: 0 - LogIndex *int64 `json:"logIndex"` - - // verification - Verification *LogEntryAnonVerification `json:"verification,omitempty"` -} - -// Validate validates this log entry anon -func (m *LogEntryAnon) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAttestation(formats); err != nil { - res = append(res, err) - } - - if err := m.validateBody(formats); err != nil { - res = append(res, err) - } - - if err := m.validateIntegratedTime(formats); err != nil { - res = append(res, err) - } - - if err := m.validateLogID(formats); err != nil { - res = append(res, err) - } - - if err := m.validateLogIndex(formats); err != nil { - res = append(res, err) - } - - if err := m.validateVerification(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *LogEntryAnon) validateAttestation(formats strfmt.Registry) error { - if swag.IsZero(m.Attestation) { // not required - return nil - } - - if m.Attestation != nil { - if err := m.Attestation.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("attestation") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("attestation") - } - return err - } - } - - return nil -} - -func (m *LogEntryAnon) validateBody(formats strfmt.Registry) error { - - if m.Body == nil { - return errors.Required("body", "body", nil) - } - - return nil -} - -func (m *LogEntryAnon) validateIntegratedTime(formats strfmt.Registry) error { - - if err := validate.Required("integratedTime", "body", m.IntegratedTime); err != nil { - return err - } - - return nil -} - -func (m *LogEntryAnon) validateLogID(formats strfmt.Registry) error { - - if err := validate.Required("logID", "body", m.LogID); err != nil { - return err - } - - if err := validate.Pattern("logID", "body", *m.LogID, `^[0-9a-fA-F]{64}$`); err != nil { - return err - } - - return nil -} - -func (m *LogEntryAnon) validateLogIndex(formats strfmt.Registry) error { - - if err := validate.Required("logIndex", "body", m.LogIndex); err != nil { - return err - } - - if err := validate.MinimumInt("logIndex", "body", *m.LogIndex, 0, false); err != nil { - return err - } - - return nil -} - -func (m *LogEntryAnon) validateVerification(formats strfmt.Registry) error { - if swag.IsZero(m.Verification) { // not required - return nil - } - - if m.Verification != nil { - if err := m.Verification.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("verification") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("verification") - } - return err - } - } - - return nil -} - -// ContextValidate validate this log entry anon based on the context it is used -func (m *LogEntryAnon) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateAttestation(ctx, formats); err != nil { - res = append(res, err) - } - - if err := m.contextValidateVerification(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *LogEntryAnon) contextValidateAttestation(ctx context.Context, formats strfmt.Registry) error { - - if m.Attestation != nil { - - if swag.IsZero(m.Attestation) { // not required - return nil - } - - if err := m.Attestation.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("attestation") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("attestation") - } - return err - } - } - - return nil -} - -func (m *LogEntryAnon) contextValidateVerification(ctx context.Context, formats strfmt.Registry) error { - - if m.Verification != nil { - - if swag.IsZero(m.Verification) { // not required - return nil - } - - if err := m.Verification.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("verification") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("verification") - } - return err - } - } - - return nil -} - -// MarshalBinary interface implementation -func (m *LogEntryAnon) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *LogEntryAnon) UnmarshalBinary(b []byte) error { - var res LogEntryAnon - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// LogEntryAnonAttestation log entry anon attestation -// -// swagger:model LogEntryAnonAttestation -type LogEntryAnonAttestation struct { - - // data - // Format: byte - Data strfmt.Base64 `json:"data,omitempty"` -} - -// Validate validates this log entry anon attestation -func (m *LogEntryAnonAttestation) Validate(formats strfmt.Registry) error { - return nil -} - -// ContextValidate validates this log entry anon attestation based on context it is used -func (m *LogEntryAnonAttestation) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - return nil -} - -// MarshalBinary interface implementation -func (m *LogEntryAnonAttestation) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *LogEntryAnonAttestation) UnmarshalBinary(b []byte) error { - var res LogEntryAnonAttestation - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// LogEntryAnonVerification log entry anon verification -// -// swagger:model LogEntryAnonVerification -type LogEntryAnonVerification struct { - - // inclusion proof - InclusionProof *InclusionProof `json:"inclusionProof,omitempty"` - - // Signature over the logID, logIndex, body and integratedTime. - // Format: byte - SignedEntryTimestamp strfmt.Base64 `json:"signedEntryTimestamp,omitempty"` -} - -// Validate validates this log entry anon verification -func (m *LogEntryAnonVerification) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateInclusionProof(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *LogEntryAnonVerification) validateInclusionProof(formats strfmt.Registry) error { - if swag.IsZero(m.InclusionProof) { // not required - return nil - } - - if m.InclusionProof != nil { - if err := m.InclusionProof.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("verification" + "." + "inclusionProof") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("verification" + "." + "inclusionProof") - } - return err - } - } - - return nil -} - -// ContextValidate validate this log entry anon verification based on the context it is used -func (m *LogEntryAnonVerification) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateInclusionProof(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *LogEntryAnonVerification) contextValidateInclusionProof(ctx context.Context, formats strfmt.Registry) error { - - if m.InclusionProof != nil { - - if swag.IsZero(m.InclusionProof) { // not required - return nil - } - - if err := m.InclusionProof.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("verification" + "." + "inclusionProof") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("verification" + "." + "inclusionProof") - } - return err - } - } - - return nil -} - -// MarshalBinary interface implementation -func (m *LogEntryAnonVerification) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *LogEntryAnonVerification) UnmarshalBinary(b []byte) error { - var res LogEntryAnonVerification - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/log_info.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/log_info.go deleted file mode 100644 index cb57b27f51..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/log_info.go +++ /dev/null @@ -1,221 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "strconv" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// LogInfo log info -// -// swagger:model LogInfo -type LogInfo struct { - - // inactive shards - InactiveShards []*InactiveShardLogInfo `json:"inactiveShards"` - - // The current hash value stored at the root of the merkle tree - // Required: true - // Pattern: ^[0-9a-fA-F]{64}$ - RootHash *string `json:"rootHash"` - - // The current signed tree head - // Required: true - SignedTreeHead *string `json:"signedTreeHead"` - - // The current treeID - // Required: true - // Pattern: ^[0-9]+$ - TreeID *string `json:"treeID"` - - // The current number of nodes in the merkle tree - // Required: true - // Minimum: 1 - TreeSize *int64 `json:"treeSize"` -} - -// Validate validates this log info -func (m *LogInfo) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateInactiveShards(formats); err != nil { - res = append(res, err) - } - - if err := m.validateRootHash(formats); err != nil { - res = append(res, err) - } - - if err := m.validateSignedTreeHead(formats); err != nil { - res = append(res, err) - } - - if err := m.validateTreeID(formats); err != nil { - res = append(res, err) - } - - if err := m.validateTreeSize(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *LogInfo) validateInactiveShards(formats strfmt.Registry) error { - if swag.IsZero(m.InactiveShards) { // not required - return nil - } - - for i := 0; i < len(m.InactiveShards); i++ { - if swag.IsZero(m.InactiveShards[i]) { // not required - continue - } - - if m.InactiveShards[i] != nil { - if err := m.InactiveShards[i].Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("inactiveShards" + "." + strconv.Itoa(i)) - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("inactiveShards" + "." + strconv.Itoa(i)) - } - return err - } - } - - } - - return nil -} - -func (m *LogInfo) validateRootHash(formats strfmt.Registry) error { - - if err := validate.Required("rootHash", "body", m.RootHash); err != nil { - return err - } - - if err := validate.Pattern("rootHash", "body", *m.RootHash, `^[0-9a-fA-F]{64}$`); err != nil { - return err - } - - return nil -} - -func (m *LogInfo) validateSignedTreeHead(formats strfmt.Registry) error { - - if err := validate.Required("signedTreeHead", "body", m.SignedTreeHead); err != nil { - return err - } - - return nil -} - -func (m *LogInfo) validateTreeID(formats strfmt.Registry) error { - - if err := validate.Required("treeID", "body", m.TreeID); err != nil { - return err - } - - if err := validate.Pattern("treeID", "body", *m.TreeID, `^[0-9]+$`); err != nil { - return err - } - - return nil -} - -func (m *LogInfo) validateTreeSize(formats strfmt.Registry) error { - - if err := validate.Required("treeSize", "body", m.TreeSize); err != nil { - return err - } - - if err := validate.MinimumInt("treeSize", "body", *m.TreeSize, 1, false); err != nil { - return err - } - - return nil -} - -// ContextValidate validate this log info based on the context it is used -func (m *LogInfo) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateInactiveShards(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *LogInfo) contextValidateInactiveShards(ctx context.Context, formats strfmt.Registry) error { - - for i := 0; i < len(m.InactiveShards); i++ { - - if m.InactiveShards[i] != nil { - - if swag.IsZero(m.InactiveShards[i]) { // not required - return nil - } - - if err := m.InactiveShards[i].ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("inactiveShards" + "." + strconv.Itoa(i)) - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("inactiveShards" + "." + strconv.Itoa(i)) - } - return err - } - } - - } - - return nil -} - -// MarshalBinary interface implementation -func (m *LogInfo) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *LogInfo) UnmarshalBinary(b []byte) error { - var res LogInfo - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/proposed_entry.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/proposed_entry.go deleted file mode 100644 index 5b734a5fff..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/proposed_entry.go +++ /dev/null @@ -1,195 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "bytes" - "context" - "encoding/json" - "io" - - "github.com/go-openapi/errors" - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/validate" -) - -// ProposedEntry proposed entry -// -// swagger:discriminator ProposedEntry kind -type ProposedEntry interface { - runtime.Validatable - runtime.ContextValidatable - - // kind - // Required: true - Kind() string - SetKind(string) - - // AdditionalProperties in base type shoud be handled just like regular properties - // At this moment, the base type property is pushed down to the subtype -} - -type proposedEntry struct { - kindField string -} - -// Kind gets the kind of this polymorphic type -func (m *proposedEntry) Kind() string { - return "ProposedEntry" -} - -// SetKind sets the kind of this polymorphic type -func (m *proposedEntry) SetKind(val string) { -} - -// UnmarshalProposedEntrySlice unmarshals polymorphic slices of ProposedEntry -func UnmarshalProposedEntrySlice(reader io.Reader, consumer runtime.Consumer) ([]ProposedEntry, error) { - var elements []json.RawMessage - if err := consumer.Consume(reader, &elements); err != nil { - return nil, err - } - - var result []ProposedEntry - for _, element := range elements { - obj, err := unmarshalProposedEntry(element, consumer) - if err != nil { - return nil, err - } - result = append(result, obj) - } - return result, nil -} - -// UnmarshalProposedEntry unmarshals polymorphic ProposedEntry -func UnmarshalProposedEntry(reader io.Reader, consumer runtime.Consumer) (ProposedEntry, error) { - // we need to read this twice, so first into a buffer - data, err := io.ReadAll(reader) - if err != nil { - return nil, err - } - return unmarshalProposedEntry(data, consumer) -} - -func unmarshalProposedEntry(data []byte, consumer runtime.Consumer) (ProposedEntry, error) { - buf := bytes.NewBuffer(data) - buf2 := bytes.NewBuffer(data) - - // the first time this is read is to fetch the value of the kind property. - var getType struct { - Kind string `json:"kind"` - } - if err := consumer.Consume(buf, &getType); err != nil { - return nil, err - } - - if err := validate.RequiredString("kind", "body", getType.Kind); err != nil { - return nil, err - } - - // The value of kind is used to determine which type to create and unmarshal the data into - switch getType.Kind { - case "ProposedEntry": - var result proposedEntry - if err := consumer.Consume(buf2, &result); err != nil { - return nil, err - } - return &result, nil - case "alpine": - var result Alpine - if err := consumer.Consume(buf2, &result); err != nil { - return nil, err - } - return &result, nil - case "cose": - var result Cose - if err := consumer.Consume(buf2, &result); err != nil { - return nil, err - } - return &result, nil - case "dsse": - var result DSSE - if err := consumer.Consume(buf2, &result); err != nil { - return nil, err - } - return &result, nil - case "hashedrekord": - var result Hashedrekord - if err := consumer.Consume(buf2, &result); err != nil { - return nil, err - } - return &result, nil - case "helm": - var result Helm - if err := consumer.Consume(buf2, &result); err != nil { - return nil, err - } - return &result, nil - case "intoto": - var result Intoto - if err := consumer.Consume(buf2, &result); err != nil { - return nil, err - } - return &result, nil - case "jar": - var result Jar - if err := consumer.Consume(buf2, &result); err != nil { - return nil, err - } - return &result, nil - case "rekord": - var result Rekord - if err := consumer.Consume(buf2, &result); err != nil { - return nil, err - } - return &result, nil - case "rfc3161": - var result Rfc3161 - if err := consumer.Consume(buf2, &result); err != nil { - return nil, err - } - return &result, nil - case "rpm": - var result Rpm - if err := consumer.Consume(buf2, &result); err != nil { - return nil, err - } - return &result, nil - case "tuf": - var result TUF - if err := consumer.Consume(buf2, &result); err != nil { - return nil, err - } - return &result, nil - } - return nil, errors.New(422, "invalid kind value: %q", getType.Kind) -} - -// Validate validates this proposed entry -func (m *proposedEntry) Validate(formats strfmt.Registry) error { - return nil -} - -// ContextValidate validates this proposed entry based on context it is used -func (m *proposedEntry) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/rekord.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/rekord.go deleted file mode 100644 index 81c8ff0545..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/rekord.go +++ /dev/null @@ -1,210 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "bytes" - "context" - "encoding/json" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// Rekord Rekord object -// -// swagger:model rekord -type Rekord struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec RekordSchema `json:"spec"` -} - -// Kind gets the kind of this subtype -func (m *Rekord) Kind() string { - return "rekord" -} - -// SetKind sets the kind of this subtype -func (m *Rekord) SetKind(val string) { -} - -// UnmarshalJSON unmarshals this object with a polymorphic type from a JSON structure -func (m *Rekord) UnmarshalJSON(raw []byte) error { - var data struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec RekordSchema `json:"spec"` - } - buf := bytes.NewBuffer(raw) - dec := json.NewDecoder(buf) - dec.UseNumber() - - if err := dec.Decode(&data); err != nil { - return err - } - - var base struct { - /* Just the base type fields. Used for unmashalling polymorphic types.*/ - - Kind string `json:"kind"` - } - buf = bytes.NewBuffer(raw) - dec = json.NewDecoder(buf) - dec.UseNumber() - - if err := dec.Decode(&base); err != nil { - return err - } - - var result Rekord - - if base.Kind != result.Kind() { - /* Not the type we're looking for. */ - return errors.New(422, "invalid kind value: %q", base.Kind) - } - - result.APIVersion = data.APIVersion - result.Spec = data.Spec - - *m = result - - return nil -} - -// MarshalJSON marshals this object with a polymorphic type to a JSON structure -func (m Rekord) MarshalJSON() ([]byte, error) { - var b1, b2, b3 []byte - var err error - b1, err = json.Marshal(struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec RekordSchema `json:"spec"` - }{ - - APIVersion: m.APIVersion, - - Spec: m.Spec, - }) - if err != nil { - return nil, err - } - b2, err = json.Marshal(struct { - Kind string `json:"kind"` - }{ - - Kind: m.Kind(), - }) - if err != nil { - return nil, err - } - - return swag.ConcatJSON(b1, b2, b3), nil -} - -// Validate validates this rekord -func (m *Rekord) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAPIVersion(formats); err != nil { - res = append(res, err) - } - - if err := m.validateSpec(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *Rekord) validateAPIVersion(formats strfmt.Registry) error { - - if err := validate.Required("apiVersion", "body", m.APIVersion); err != nil { - return err - } - - if err := validate.Pattern("apiVersion", "body", *m.APIVersion, `^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`); err != nil { - return err - } - - return nil -} - -func (m *Rekord) validateSpec(formats strfmt.Registry) error { - - if m.Spec == nil { - return errors.Required("spec", "body", nil) - } - - return nil -} - -// ContextValidate validate this rekord based on the context it is used -func (m *Rekord) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// MarshalBinary interface implementation -func (m *Rekord) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *Rekord) UnmarshalBinary(b []byte) error { - var res Rekord - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/rekord_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/rekord_schema.go deleted file mode 100644 index e85442ae97..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/rekord_schema.go +++ /dev/null @@ -1,29 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// RekordSchema Rekor Schema -// -// # Schema for Rekord objects -// -// swagger:model rekordSchema -type RekordSchema interface{} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/rekord_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/rekord_v001_schema.go deleted file mode 100644 index aaaad9d7b4..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/rekord_v001_schema.go +++ /dev/null @@ -1,611 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "encoding/json" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// RekordV001Schema Rekor v0.0.1 Schema -// -// # Schema for Rekord object -// -// swagger:model rekordV001Schema -type RekordV001Schema struct { - - // data - // Required: true - Data *RekordV001SchemaData `json:"data"` - - // signature - // Required: true - Signature *RekordV001SchemaSignature `json:"signature"` -} - -// Validate validates this rekord v001 schema -func (m *RekordV001Schema) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateData(formats); err != nil { - res = append(res, err) - } - - if err := m.validateSignature(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *RekordV001Schema) validateData(formats strfmt.Registry) error { - - if err := validate.Required("data", "body", m.Data); err != nil { - return err - } - - if m.Data != nil { - if err := m.Data.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("data") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("data") - } - return err - } - } - - return nil -} - -func (m *RekordV001Schema) validateSignature(formats strfmt.Registry) error { - - if err := validate.Required("signature", "body", m.Signature); err != nil { - return err - } - - if m.Signature != nil { - if err := m.Signature.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("signature") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("signature") - } - return err - } - } - - return nil -} - -// ContextValidate validate this rekord v001 schema based on the context it is used -func (m *RekordV001Schema) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateData(ctx, formats); err != nil { - res = append(res, err) - } - - if err := m.contextValidateSignature(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *RekordV001Schema) contextValidateData(ctx context.Context, formats strfmt.Registry) error { - - if m.Data != nil { - - if err := m.Data.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("data") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("data") - } - return err - } - } - - return nil -} - -func (m *RekordV001Schema) contextValidateSignature(ctx context.Context, formats strfmt.Registry) error { - - if m.Signature != nil { - - if err := m.Signature.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("signature") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("signature") - } - return err - } - } - - return nil -} - -// MarshalBinary interface implementation -func (m *RekordV001Schema) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *RekordV001Schema) UnmarshalBinary(b []byte) error { - var res RekordV001Schema - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// RekordV001SchemaData Information about the content associated with the entry -// -// swagger:model RekordV001SchemaData -type RekordV001SchemaData struct { - - // Specifies the content inline within the document - // Format: byte - Content strfmt.Base64 `json:"content,omitempty"` - - // hash - Hash *RekordV001SchemaDataHash `json:"hash,omitempty"` -} - -// Validate validates this rekord v001 schema data -func (m *RekordV001SchemaData) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateHash(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *RekordV001SchemaData) validateHash(formats strfmt.Registry) error { - if swag.IsZero(m.Hash) { // not required - return nil - } - - if m.Hash != nil { - if err := m.Hash.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("data" + "." + "hash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("data" + "." + "hash") - } - return err - } - } - - return nil -} - -// ContextValidate validate this rekord v001 schema data based on the context it is used -func (m *RekordV001SchemaData) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateHash(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *RekordV001SchemaData) contextValidateHash(ctx context.Context, formats strfmt.Registry) error { - - if m.Hash != nil { - - if swag.IsZero(m.Hash) { // not required - return nil - } - - if err := m.Hash.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("data" + "." + "hash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("data" + "." + "hash") - } - return err - } - } - - return nil -} - -// MarshalBinary interface implementation -func (m *RekordV001SchemaData) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *RekordV001SchemaData) UnmarshalBinary(b []byte) error { - var res RekordV001SchemaData - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// RekordV001SchemaDataHash Specifies the hash algorithm and value for the content -// -// swagger:model RekordV001SchemaDataHash -type RekordV001SchemaDataHash struct { - - // The hashing function used to compute the hash value - // Required: true - // Enum: ["sha256"] - Algorithm *string `json:"algorithm"` - - // The hash value for the content - // Required: true - Value *string `json:"value"` -} - -// Validate validates this rekord v001 schema data hash -func (m *RekordV001SchemaDataHash) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAlgorithm(formats); err != nil { - res = append(res, err) - } - - if err := m.validateValue(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -var rekordV001SchemaDataHashTypeAlgorithmPropEnum []interface{} - -func init() { - var res []string - if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil { - panic(err) - } - for _, v := range res { - rekordV001SchemaDataHashTypeAlgorithmPropEnum = append(rekordV001SchemaDataHashTypeAlgorithmPropEnum, v) - } -} - -const ( - - // RekordV001SchemaDataHashAlgorithmSha256 captures enum value "sha256" - RekordV001SchemaDataHashAlgorithmSha256 string = "sha256" -) - -// prop value enum -func (m *RekordV001SchemaDataHash) validateAlgorithmEnum(path, location string, value string) error { - if err := validate.EnumCase(path, location, value, rekordV001SchemaDataHashTypeAlgorithmPropEnum, true); err != nil { - return err - } - return nil -} - -func (m *RekordV001SchemaDataHash) validateAlgorithm(formats strfmt.Registry) error { - - if err := validate.Required("data"+"."+"hash"+"."+"algorithm", "body", m.Algorithm); err != nil { - return err - } - - // value enum - if err := m.validateAlgorithmEnum("data"+"."+"hash"+"."+"algorithm", "body", *m.Algorithm); err != nil { - return err - } - - return nil -} - -func (m *RekordV001SchemaDataHash) validateValue(formats strfmt.Registry) error { - - if err := validate.Required("data"+"."+"hash"+"."+"value", "body", m.Value); err != nil { - return err - } - - return nil -} - -// ContextValidate validate this rekord v001 schema data hash based on the context it is used -func (m *RekordV001SchemaDataHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// MarshalBinary interface implementation -func (m *RekordV001SchemaDataHash) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *RekordV001SchemaDataHash) UnmarshalBinary(b []byte) error { - var res RekordV001SchemaDataHash - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// RekordV001SchemaSignature Information about the detached signature associated with the entry -// -// swagger:model RekordV001SchemaSignature -type RekordV001SchemaSignature struct { - - // Specifies the content of the signature inline within the document - // Required: true - // Format: byte - Content *strfmt.Base64 `json:"content"` - - // Specifies the format of the signature - // Required: true - // Enum: ["pgp","minisign","x509","ssh"] - Format *string `json:"format"` - - // public key - // Required: true - PublicKey *RekordV001SchemaSignaturePublicKey `json:"publicKey"` -} - -// Validate validates this rekord v001 schema signature -func (m *RekordV001SchemaSignature) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateContent(formats); err != nil { - res = append(res, err) - } - - if err := m.validateFormat(formats); err != nil { - res = append(res, err) - } - - if err := m.validatePublicKey(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *RekordV001SchemaSignature) validateContent(formats strfmt.Registry) error { - - if err := validate.Required("signature"+"."+"content", "body", m.Content); err != nil { - return err - } - - return nil -} - -var rekordV001SchemaSignatureTypeFormatPropEnum []interface{} - -func init() { - var res []string - if err := json.Unmarshal([]byte(`["pgp","minisign","x509","ssh"]`), &res); err != nil { - panic(err) - } - for _, v := range res { - rekordV001SchemaSignatureTypeFormatPropEnum = append(rekordV001SchemaSignatureTypeFormatPropEnum, v) - } -} - -const ( - - // RekordV001SchemaSignatureFormatPgp captures enum value "pgp" - RekordV001SchemaSignatureFormatPgp string = "pgp" - - // RekordV001SchemaSignatureFormatMinisign captures enum value "minisign" - RekordV001SchemaSignatureFormatMinisign string = "minisign" - - // RekordV001SchemaSignatureFormatX509 captures enum value "x509" - RekordV001SchemaSignatureFormatX509 string = "x509" - - // RekordV001SchemaSignatureFormatSSH captures enum value "ssh" - RekordV001SchemaSignatureFormatSSH string = "ssh" -) - -// prop value enum -func (m *RekordV001SchemaSignature) validateFormatEnum(path, location string, value string) error { - if err := validate.EnumCase(path, location, value, rekordV001SchemaSignatureTypeFormatPropEnum, true); err != nil { - return err - } - return nil -} - -func (m *RekordV001SchemaSignature) validateFormat(formats strfmt.Registry) error { - - if err := validate.Required("signature"+"."+"format", "body", m.Format); err != nil { - return err - } - - // value enum - if err := m.validateFormatEnum("signature"+"."+"format", "body", *m.Format); err != nil { - return err - } - - return nil -} - -func (m *RekordV001SchemaSignature) validatePublicKey(formats strfmt.Registry) error { - - if err := validate.Required("signature"+"."+"publicKey", "body", m.PublicKey); err != nil { - return err - } - - if m.PublicKey != nil { - if err := m.PublicKey.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("signature" + "." + "publicKey") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("signature" + "." + "publicKey") - } - return err - } - } - - return nil -} - -// ContextValidate validate this rekord v001 schema signature based on the context it is used -func (m *RekordV001SchemaSignature) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidatePublicKey(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *RekordV001SchemaSignature) contextValidatePublicKey(ctx context.Context, formats strfmt.Registry) error { - - if m.PublicKey != nil { - - if err := m.PublicKey.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("signature" + "." + "publicKey") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("signature" + "." + "publicKey") - } - return err - } - } - - return nil -} - -// MarshalBinary interface implementation -func (m *RekordV001SchemaSignature) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *RekordV001SchemaSignature) UnmarshalBinary(b []byte) error { - var res RekordV001SchemaSignature - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// RekordV001SchemaSignaturePublicKey The public key that can verify the signature -// -// swagger:model RekordV001SchemaSignaturePublicKey -type RekordV001SchemaSignaturePublicKey struct { - - // Specifies the content of the public key inline within the document - // Required: true - // Format: byte - Content *strfmt.Base64 `json:"content"` -} - -// Validate validates this rekord v001 schema signature public key -func (m *RekordV001SchemaSignaturePublicKey) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateContent(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *RekordV001SchemaSignaturePublicKey) validateContent(formats strfmt.Registry) error { - - if err := validate.Required("signature"+"."+"publicKey"+"."+"content", "body", m.Content); err != nil { - return err - } - - return nil -} - -// ContextValidate validates this rekord v001 schema signature public key based on context it is used -func (m *RekordV001SchemaSignaturePublicKey) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - return nil -} - -// MarshalBinary interface implementation -func (m *RekordV001SchemaSignaturePublicKey) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *RekordV001SchemaSignaturePublicKey) UnmarshalBinary(b []byte) error { - var res RekordV001SchemaSignaturePublicKey - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/rfc3161.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/rfc3161.go deleted file mode 100644 index ef8d42e7a2..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/rfc3161.go +++ /dev/null @@ -1,210 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "bytes" - "context" - "encoding/json" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// Rfc3161 RFC3161 Timestamp -// -// swagger:model rfc3161 -type Rfc3161 struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec Rfc3161Schema `json:"spec"` -} - -// Kind gets the kind of this subtype -func (m *Rfc3161) Kind() string { - return "rfc3161" -} - -// SetKind sets the kind of this subtype -func (m *Rfc3161) SetKind(val string) { -} - -// UnmarshalJSON unmarshals this object with a polymorphic type from a JSON structure -func (m *Rfc3161) UnmarshalJSON(raw []byte) error { - var data struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec Rfc3161Schema `json:"spec"` - } - buf := bytes.NewBuffer(raw) - dec := json.NewDecoder(buf) - dec.UseNumber() - - if err := dec.Decode(&data); err != nil { - return err - } - - var base struct { - /* Just the base type fields. Used for unmashalling polymorphic types.*/ - - Kind string `json:"kind"` - } - buf = bytes.NewBuffer(raw) - dec = json.NewDecoder(buf) - dec.UseNumber() - - if err := dec.Decode(&base); err != nil { - return err - } - - var result Rfc3161 - - if base.Kind != result.Kind() { - /* Not the type we're looking for. */ - return errors.New(422, "invalid kind value: %q", base.Kind) - } - - result.APIVersion = data.APIVersion - result.Spec = data.Spec - - *m = result - - return nil -} - -// MarshalJSON marshals this object with a polymorphic type to a JSON structure -func (m Rfc3161) MarshalJSON() ([]byte, error) { - var b1, b2, b3 []byte - var err error - b1, err = json.Marshal(struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec Rfc3161Schema `json:"spec"` - }{ - - APIVersion: m.APIVersion, - - Spec: m.Spec, - }) - if err != nil { - return nil, err - } - b2, err = json.Marshal(struct { - Kind string `json:"kind"` - }{ - - Kind: m.Kind(), - }) - if err != nil { - return nil, err - } - - return swag.ConcatJSON(b1, b2, b3), nil -} - -// Validate validates this rfc3161 -func (m *Rfc3161) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAPIVersion(formats); err != nil { - res = append(res, err) - } - - if err := m.validateSpec(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *Rfc3161) validateAPIVersion(formats strfmt.Registry) error { - - if err := validate.Required("apiVersion", "body", m.APIVersion); err != nil { - return err - } - - if err := validate.Pattern("apiVersion", "body", *m.APIVersion, `^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`); err != nil { - return err - } - - return nil -} - -func (m *Rfc3161) validateSpec(formats strfmt.Registry) error { - - if m.Spec == nil { - return errors.Required("spec", "body", nil) - } - - return nil -} - -// ContextValidate validate this rfc3161 based on the context it is used -func (m *Rfc3161) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// MarshalBinary interface implementation -func (m *Rfc3161) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *Rfc3161) UnmarshalBinary(b []byte) error { - var res Rfc3161 - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/rfc3161_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/rfc3161_schema.go deleted file mode 100644 index 826013a28d..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/rfc3161_schema.go +++ /dev/null @@ -1,29 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// Rfc3161Schema Timestamp Schema -// -// # Schema for RFC 3161 timestamp objects -// -// swagger:model rfc3161Schema -type Rfc3161Schema interface{} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/rfc3161_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/rfc3161_v001_schema.go deleted file mode 100644 index c3a50c8492..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/rfc3161_v001_schema.go +++ /dev/null @@ -1,183 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// Rfc3161V001Schema Timestamp v0.0.1 Schema -// -// # Schema for RFC3161 entries -// -// swagger:model rfc3161V001Schema -type Rfc3161V001Schema struct { - - // tsr - // Required: true - Tsr *Rfc3161V001SchemaTsr `json:"tsr"` -} - -// Validate validates this rfc3161 v001 schema -func (m *Rfc3161V001Schema) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateTsr(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *Rfc3161V001Schema) validateTsr(formats strfmt.Registry) error { - - if err := validate.Required("tsr", "body", m.Tsr); err != nil { - return err - } - - if m.Tsr != nil { - if err := m.Tsr.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("tsr") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("tsr") - } - return err - } - } - - return nil -} - -// ContextValidate validate this rfc3161 v001 schema based on the context it is used -func (m *Rfc3161V001Schema) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateTsr(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *Rfc3161V001Schema) contextValidateTsr(ctx context.Context, formats strfmt.Registry) error { - - if m.Tsr != nil { - - if err := m.Tsr.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("tsr") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("tsr") - } - return err - } - } - - return nil -} - -// MarshalBinary interface implementation -func (m *Rfc3161V001Schema) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *Rfc3161V001Schema) UnmarshalBinary(b []byte) error { - var res Rfc3161V001Schema - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// Rfc3161V001SchemaTsr Information about the tsr file associated with the entry -// -// swagger:model Rfc3161V001SchemaTsr -type Rfc3161V001SchemaTsr struct { - - // Specifies the tsr file content inline within the document - // Required: true - // Format: byte - Content *strfmt.Base64 `json:"content"` -} - -// Validate validates this rfc3161 v001 schema tsr -func (m *Rfc3161V001SchemaTsr) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateContent(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *Rfc3161V001SchemaTsr) validateContent(formats strfmt.Registry) error { - - if err := validate.Required("tsr"+"."+"content", "body", m.Content); err != nil { - return err - } - - return nil -} - -// ContextValidate validates this rfc3161 v001 schema tsr based on context it is used -func (m *Rfc3161V001SchemaTsr) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - return nil -} - -// MarshalBinary interface implementation -func (m *Rfc3161V001SchemaTsr) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *Rfc3161V001SchemaTsr) UnmarshalBinary(b []byte) error { - var res Rfc3161V001SchemaTsr - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/rpm.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/rpm.go deleted file mode 100644 index 8b1f10c77e..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/rpm.go +++ /dev/null @@ -1,210 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "bytes" - "context" - "encoding/json" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// Rpm RPM package -// -// swagger:model rpm -type Rpm struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec RpmSchema `json:"spec"` -} - -// Kind gets the kind of this subtype -func (m *Rpm) Kind() string { - return "rpm" -} - -// SetKind sets the kind of this subtype -func (m *Rpm) SetKind(val string) { -} - -// UnmarshalJSON unmarshals this object with a polymorphic type from a JSON structure -func (m *Rpm) UnmarshalJSON(raw []byte) error { - var data struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec RpmSchema `json:"spec"` - } - buf := bytes.NewBuffer(raw) - dec := json.NewDecoder(buf) - dec.UseNumber() - - if err := dec.Decode(&data); err != nil { - return err - } - - var base struct { - /* Just the base type fields. Used for unmashalling polymorphic types.*/ - - Kind string `json:"kind"` - } - buf = bytes.NewBuffer(raw) - dec = json.NewDecoder(buf) - dec.UseNumber() - - if err := dec.Decode(&base); err != nil { - return err - } - - var result Rpm - - if base.Kind != result.Kind() { - /* Not the type we're looking for. */ - return errors.New(422, "invalid kind value: %q", base.Kind) - } - - result.APIVersion = data.APIVersion - result.Spec = data.Spec - - *m = result - - return nil -} - -// MarshalJSON marshals this object with a polymorphic type to a JSON structure -func (m Rpm) MarshalJSON() ([]byte, error) { - var b1, b2, b3 []byte - var err error - b1, err = json.Marshal(struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec RpmSchema `json:"spec"` - }{ - - APIVersion: m.APIVersion, - - Spec: m.Spec, - }) - if err != nil { - return nil, err - } - b2, err = json.Marshal(struct { - Kind string `json:"kind"` - }{ - - Kind: m.Kind(), - }) - if err != nil { - return nil, err - } - - return swag.ConcatJSON(b1, b2, b3), nil -} - -// Validate validates this rpm -func (m *Rpm) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAPIVersion(formats); err != nil { - res = append(res, err) - } - - if err := m.validateSpec(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *Rpm) validateAPIVersion(formats strfmt.Registry) error { - - if err := validate.Required("apiVersion", "body", m.APIVersion); err != nil { - return err - } - - if err := validate.Pattern("apiVersion", "body", *m.APIVersion, `^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`); err != nil { - return err - } - - return nil -} - -func (m *Rpm) validateSpec(formats strfmt.Registry) error { - - if m.Spec == nil { - return errors.Required("spec", "body", nil) - } - - return nil -} - -// ContextValidate validate this rpm based on the context it is used -func (m *Rpm) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// MarshalBinary interface implementation -func (m *Rpm) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *Rpm) UnmarshalBinary(b []byte) error { - var res Rpm - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/rpm_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/rpm_schema.go deleted file mode 100644 index 5cb378366f..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/rpm_schema.go +++ /dev/null @@ -1,29 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// RpmSchema RPM Schema -// -// # Schema for RPM objects -// -// swagger:model rpmSchema -type RpmSchema interface{} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/rpm_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/rpm_v001_schema.go deleted file mode 100644 index 394eece414..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/rpm_v001_schema.go +++ /dev/null @@ -1,450 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "encoding/json" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// RpmV001Schema RPM v0.0.1 Schema -// -// # Schema for RPM entries -// -// swagger:model rpmV001Schema -type RpmV001Schema struct { - - // package - // Required: true - Package *RpmV001SchemaPackage `json:"package"` - - // public key - // Required: true - PublicKey *RpmV001SchemaPublicKey `json:"publicKey"` -} - -// Validate validates this rpm v001 schema -func (m *RpmV001Schema) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validatePackage(formats); err != nil { - res = append(res, err) - } - - if err := m.validatePublicKey(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *RpmV001Schema) validatePackage(formats strfmt.Registry) error { - - if err := validate.Required("package", "body", m.Package); err != nil { - return err - } - - if m.Package != nil { - if err := m.Package.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("package") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("package") - } - return err - } - } - - return nil -} - -func (m *RpmV001Schema) validatePublicKey(formats strfmt.Registry) error { - - if err := validate.Required("publicKey", "body", m.PublicKey); err != nil { - return err - } - - if m.PublicKey != nil { - if err := m.PublicKey.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("publicKey") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("publicKey") - } - return err - } - } - - return nil -} - -// ContextValidate validate this rpm v001 schema based on the context it is used -func (m *RpmV001Schema) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidatePackage(ctx, formats); err != nil { - res = append(res, err) - } - - if err := m.contextValidatePublicKey(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *RpmV001Schema) contextValidatePackage(ctx context.Context, formats strfmt.Registry) error { - - if m.Package != nil { - - if err := m.Package.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("package") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("package") - } - return err - } - } - - return nil -} - -func (m *RpmV001Schema) contextValidatePublicKey(ctx context.Context, formats strfmt.Registry) error { - - if m.PublicKey != nil { - - if err := m.PublicKey.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("publicKey") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("publicKey") - } - return err - } - } - - return nil -} - -// MarshalBinary interface implementation -func (m *RpmV001Schema) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *RpmV001Schema) UnmarshalBinary(b []byte) error { - var res RpmV001Schema - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// RpmV001SchemaPackage Information about the package associated with the entry -// -// swagger:model RpmV001SchemaPackage -type RpmV001SchemaPackage struct { - - // Specifies the package inline within the document - // Format: byte - Content strfmt.Base64 `json:"content,omitempty"` - - // hash - Hash *RpmV001SchemaPackageHash `json:"hash,omitempty"` - - // Values of the RPM headers - // Read Only: true - Headers map[string]string `json:"headers,omitempty"` -} - -// Validate validates this rpm v001 schema package -func (m *RpmV001SchemaPackage) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateHash(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *RpmV001SchemaPackage) validateHash(formats strfmt.Registry) error { - if swag.IsZero(m.Hash) { // not required - return nil - } - - if m.Hash != nil { - if err := m.Hash.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("package" + "." + "hash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("package" + "." + "hash") - } - return err - } - } - - return nil -} - -// ContextValidate validate this rpm v001 schema package based on the context it is used -func (m *RpmV001SchemaPackage) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateHash(ctx, formats); err != nil { - res = append(res, err) - } - - if err := m.contextValidateHeaders(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *RpmV001SchemaPackage) contextValidateHash(ctx context.Context, formats strfmt.Registry) error { - - if m.Hash != nil { - - if swag.IsZero(m.Hash) { // not required - return nil - } - - if err := m.Hash.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("package" + "." + "hash") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("package" + "." + "hash") - } - return err - } - } - - return nil -} - -func (m *RpmV001SchemaPackage) contextValidateHeaders(ctx context.Context, formats strfmt.Registry) error { - - return nil -} - -// MarshalBinary interface implementation -func (m *RpmV001SchemaPackage) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *RpmV001SchemaPackage) UnmarshalBinary(b []byte) error { - var res RpmV001SchemaPackage - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// RpmV001SchemaPackageHash Specifies the hash algorithm and value for the package -// -// swagger:model RpmV001SchemaPackageHash -type RpmV001SchemaPackageHash struct { - - // The hashing function used to compute the hash value - // Required: true - // Enum: ["sha256"] - Algorithm *string `json:"algorithm"` - - // The hash value for the package - // Required: true - Value *string `json:"value"` -} - -// Validate validates this rpm v001 schema package hash -func (m *RpmV001SchemaPackageHash) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAlgorithm(formats); err != nil { - res = append(res, err) - } - - if err := m.validateValue(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -var rpmV001SchemaPackageHashTypeAlgorithmPropEnum []interface{} - -func init() { - var res []string - if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil { - panic(err) - } - for _, v := range res { - rpmV001SchemaPackageHashTypeAlgorithmPropEnum = append(rpmV001SchemaPackageHashTypeAlgorithmPropEnum, v) - } -} - -const ( - - // RpmV001SchemaPackageHashAlgorithmSha256 captures enum value "sha256" - RpmV001SchemaPackageHashAlgorithmSha256 string = "sha256" -) - -// prop value enum -func (m *RpmV001SchemaPackageHash) validateAlgorithmEnum(path, location string, value string) error { - if err := validate.EnumCase(path, location, value, rpmV001SchemaPackageHashTypeAlgorithmPropEnum, true); err != nil { - return err - } - return nil -} - -func (m *RpmV001SchemaPackageHash) validateAlgorithm(formats strfmt.Registry) error { - - if err := validate.Required("package"+"."+"hash"+"."+"algorithm", "body", m.Algorithm); err != nil { - return err - } - - // value enum - if err := m.validateAlgorithmEnum("package"+"."+"hash"+"."+"algorithm", "body", *m.Algorithm); err != nil { - return err - } - - return nil -} - -func (m *RpmV001SchemaPackageHash) validateValue(formats strfmt.Registry) error { - - if err := validate.Required("package"+"."+"hash"+"."+"value", "body", m.Value); err != nil { - return err - } - - return nil -} - -// ContextValidate validates this rpm v001 schema package hash based on context it is used -func (m *RpmV001SchemaPackageHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - return nil -} - -// MarshalBinary interface implementation -func (m *RpmV001SchemaPackageHash) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *RpmV001SchemaPackageHash) UnmarshalBinary(b []byte) error { - var res RpmV001SchemaPackageHash - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// RpmV001SchemaPublicKey The PGP public key that can verify the RPM signature -// -// swagger:model RpmV001SchemaPublicKey -type RpmV001SchemaPublicKey struct { - - // Specifies the content of the public key inline within the document - // Required: true - // Format: byte - Content *strfmt.Base64 `json:"content"` -} - -// Validate validates this rpm v001 schema public key -func (m *RpmV001SchemaPublicKey) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateContent(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *RpmV001SchemaPublicKey) validateContent(formats strfmt.Registry) error { - - if err := validate.Required("publicKey"+"."+"content", "body", m.Content); err != nil { - return err - } - - return nil -} - -// ContextValidate validates this rpm v001 schema public key based on context it is used -func (m *RpmV001SchemaPublicKey) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - return nil -} - -// MarshalBinary interface implementation -func (m *RpmV001SchemaPublicKey) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *RpmV001SchemaPublicKey) UnmarshalBinary(b []byte) error { - var res RpmV001SchemaPublicKey - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/search_index.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/search_index.go deleted file mode 100644 index 0f66abb5b6..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/search_index.go +++ /dev/null @@ -1,341 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "encoding/json" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// SearchIndex search index -// -// swagger:model SearchIndex -type SearchIndex struct { - - // email - // Format: email - Email strfmt.Email `json:"email,omitempty"` - - // hash - // Pattern: ^(sha512:)?[0-9a-fA-F]{128}$|^(sha256:)?[0-9a-fA-F]{64}$|^(sha1:)?[0-9a-fA-F]{40}$ - Hash string `json:"hash,omitempty"` - - // operator - // Enum: ["and","or"] - Operator string `json:"operator,omitempty"` - - // public key - PublicKey *SearchIndexPublicKey `json:"publicKey,omitempty"` -} - -// Validate validates this search index -func (m *SearchIndex) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateEmail(formats); err != nil { - res = append(res, err) - } - - if err := m.validateHash(formats); err != nil { - res = append(res, err) - } - - if err := m.validateOperator(formats); err != nil { - res = append(res, err) - } - - if err := m.validatePublicKey(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *SearchIndex) validateEmail(formats strfmt.Registry) error { - if swag.IsZero(m.Email) { // not required - return nil - } - - if err := validate.FormatOf("email", "body", "email", m.Email.String(), formats); err != nil { - return err - } - - return nil -} - -func (m *SearchIndex) validateHash(formats strfmt.Registry) error { - if swag.IsZero(m.Hash) { // not required - return nil - } - - if err := validate.Pattern("hash", "body", m.Hash, `^(sha512:)?[0-9a-fA-F]{128}$|^(sha256:)?[0-9a-fA-F]{64}$|^(sha1:)?[0-9a-fA-F]{40}$`); err != nil { - return err - } - - return nil -} - -var searchIndexTypeOperatorPropEnum []interface{} - -func init() { - var res []string - if err := json.Unmarshal([]byte(`["and","or"]`), &res); err != nil { - panic(err) - } - for _, v := range res { - searchIndexTypeOperatorPropEnum = append(searchIndexTypeOperatorPropEnum, v) - } -} - -const ( - - // SearchIndexOperatorAnd captures enum value "and" - SearchIndexOperatorAnd string = "and" - - // SearchIndexOperatorOr captures enum value "or" - SearchIndexOperatorOr string = "or" -) - -// prop value enum -func (m *SearchIndex) validateOperatorEnum(path, location string, value string) error { - if err := validate.EnumCase(path, location, value, searchIndexTypeOperatorPropEnum, true); err != nil { - return err - } - return nil -} - -func (m *SearchIndex) validateOperator(formats strfmt.Registry) error { - if swag.IsZero(m.Operator) { // not required - return nil - } - - // value enum - if err := m.validateOperatorEnum("operator", "body", m.Operator); err != nil { - return err - } - - return nil -} - -func (m *SearchIndex) validatePublicKey(formats strfmt.Registry) error { - if swag.IsZero(m.PublicKey) { // not required - return nil - } - - if m.PublicKey != nil { - if err := m.PublicKey.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("publicKey") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("publicKey") - } - return err - } - } - - return nil -} - -// ContextValidate validate this search index based on the context it is used -func (m *SearchIndex) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidatePublicKey(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *SearchIndex) contextValidatePublicKey(ctx context.Context, formats strfmt.Registry) error { - - if m.PublicKey != nil { - - if swag.IsZero(m.PublicKey) { // not required - return nil - } - - if err := m.PublicKey.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("publicKey") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("publicKey") - } - return err - } - } - - return nil -} - -// MarshalBinary interface implementation -func (m *SearchIndex) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *SearchIndex) UnmarshalBinary(b []byte) error { - var res SearchIndex - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// SearchIndexPublicKey search index public key -// -// swagger:model SearchIndexPublicKey -type SearchIndexPublicKey struct { - - // content - // Format: byte - Content strfmt.Base64 `json:"content,omitempty"` - - // format - // Required: true - // Enum: ["pgp","x509","minisign","ssh","tuf"] - Format *string `json:"format"` - - // url - // Format: uri - URL strfmt.URI `json:"url,omitempty"` -} - -// Validate validates this search index public key -func (m *SearchIndexPublicKey) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateFormat(formats); err != nil { - res = append(res, err) - } - - if err := m.validateURL(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -var searchIndexPublicKeyTypeFormatPropEnum []interface{} - -func init() { - var res []string - if err := json.Unmarshal([]byte(`["pgp","x509","minisign","ssh","tuf"]`), &res); err != nil { - panic(err) - } - for _, v := range res { - searchIndexPublicKeyTypeFormatPropEnum = append(searchIndexPublicKeyTypeFormatPropEnum, v) - } -} - -const ( - - // SearchIndexPublicKeyFormatPgp captures enum value "pgp" - SearchIndexPublicKeyFormatPgp string = "pgp" - - // SearchIndexPublicKeyFormatX509 captures enum value "x509" - SearchIndexPublicKeyFormatX509 string = "x509" - - // SearchIndexPublicKeyFormatMinisign captures enum value "minisign" - SearchIndexPublicKeyFormatMinisign string = "minisign" - - // SearchIndexPublicKeyFormatSSH captures enum value "ssh" - SearchIndexPublicKeyFormatSSH string = "ssh" - - // SearchIndexPublicKeyFormatTUF captures enum value "tuf" - SearchIndexPublicKeyFormatTUF string = "tuf" -) - -// prop value enum -func (m *SearchIndexPublicKey) validateFormatEnum(path, location string, value string) error { - if err := validate.EnumCase(path, location, value, searchIndexPublicKeyTypeFormatPropEnum, true); err != nil { - return err - } - return nil -} - -func (m *SearchIndexPublicKey) validateFormat(formats strfmt.Registry) error { - - if err := validate.Required("publicKey"+"."+"format", "body", m.Format); err != nil { - return err - } - - // value enum - if err := m.validateFormatEnum("publicKey"+"."+"format", "body", *m.Format); err != nil { - return err - } - - return nil -} - -func (m *SearchIndexPublicKey) validateURL(formats strfmt.Registry) error { - if swag.IsZero(m.URL) { // not required - return nil - } - - if err := validate.FormatOf("publicKey"+"."+"url", "body", "uri", m.URL.String(), formats); err != nil { - return err - } - - return nil -} - -// ContextValidate validates this search index public key based on context it is used -func (m *SearchIndexPublicKey) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - return nil -} - -// MarshalBinary interface implementation -func (m *SearchIndexPublicKey) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *SearchIndexPublicKey) UnmarshalBinary(b []byte) error { - var res SearchIndexPublicKey - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/search_log_query.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/search_log_query.go deleted file mode 100644 index 425ec8b348..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/search_log_query.go +++ /dev/null @@ -1,297 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "bytes" - "context" - "encoding/json" - "io" - "strconv" - - "github.com/go-openapi/errors" - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// SearchLogQuery search log query -// -// swagger:model SearchLogQuery -type SearchLogQuery struct { - entriesField []ProposedEntry - - // entry u UI ds - // Max Items: 10 - // Min Items: 1 - EntryUUIDs []string `json:"entryUUIDs"` - - // log indexes - // Max Items: 10 - // Min Items: 1 - LogIndexes []*int64 `json:"logIndexes"` -} - -// Entries gets the entries of this base type -func (m *SearchLogQuery) Entries() []ProposedEntry { - return m.entriesField -} - -// SetEntries sets the entries of this base type -func (m *SearchLogQuery) SetEntries(val []ProposedEntry) { - m.entriesField = val -} - -// UnmarshalJSON unmarshals this object with a polymorphic type from a JSON structure -func (m *SearchLogQuery) UnmarshalJSON(raw []byte) error { - var data struct { - Entries json.RawMessage `json:"entries"` - - EntryUUIDs []string `json:"entryUUIDs"` - - LogIndexes []*int64 `json:"logIndexes"` - } - buf := bytes.NewBuffer(raw) - dec := json.NewDecoder(buf) - dec.UseNumber() - - if err := dec.Decode(&data); err != nil { - return err - } - - var propEntries []ProposedEntry - if string(data.Entries) != "null" { - entries, err := UnmarshalProposedEntrySlice(bytes.NewBuffer(data.Entries), runtime.JSONConsumer()) - if err != nil && err != io.EOF { - return err - } - propEntries = entries - } - - var result SearchLogQuery - - // entries - result.entriesField = propEntries - - // entryUUIDs - result.EntryUUIDs = data.EntryUUIDs - - // logIndexes - result.LogIndexes = data.LogIndexes - - *m = result - - return nil -} - -// MarshalJSON marshals this object with a polymorphic type to a JSON structure -func (m SearchLogQuery) MarshalJSON() ([]byte, error) { - var b1, b2, b3 []byte - var err error - b1, err = json.Marshal(struct { - EntryUUIDs []string `json:"entryUUIDs"` - - LogIndexes []*int64 `json:"logIndexes"` - }{ - - EntryUUIDs: m.EntryUUIDs, - - LogIndexes: m.LogIndexes, - }) - if err != nil { - return nil, err - } - b2, err = json.Marshal(struct { - Entries []ProposedEntry `json:"entries"` - }{ - - Entries: m.entriesField, - }) - if err != nil { - return nil, err - } - - return swag.ConcatJSON(b1, b2, b3), nil -} - -// Validate validates this search log query -func (m *SearchLogQuery) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateEntries(formats); err != nil { - res = append(res, err) - } - - if err := m.validateEntryUUIDs(formats); err != nil { - res = append(res, err) - } - - if err := m.validateLogIndexes(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *SearchLogQuery) validateEntries(formats strfmt.Registry) error { - if swag.IsZero(m.Entries()) { // not required - return nil - } - - iEntriesSize := int64(len(m.Entries())) - - if err := validate.MinItems("entries", "body", iEntriesSize, 1); err != nil { - return err - } - - if err := validate.MaxItems("entries", "body", iEntriesSize, 10); err != nil { - return err - } - - for i := 0; i < len(m.Entries()); i++ { - - if err := m.entriesField[i].Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("entries" + "." + strconv.Itoa(i)) - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("entries" + "." + strconv.Itoa(i)) - } - return err - } - - } - - return nil -} - -func (m *SearchLogQuery) validateEntryUUIDs(formats strfmt.Registry) error { - if swag.IsZero(m.EntryUUIDs) { // not required - return nil - } - - iEntryUUIDsSize := int64(len(m.EntryUUIDs)) - - if err := validate.MinItems("entryUUIDs", "body", iEntryUUIDsSize, 1); err != nil { - return err - } - - if err := validate.MaxItems("entryUUIDs", "body", iEntryUUIDsSize, 10); err != nil { - return err - } - - for i := 0; i < len(m.EntryUUIDs); i++ { - - if err := validate.Pattern("entryUUIDs"+"."+strconv.Itoa(i), "body", m.EntryUUIDs[i], `^([0-9a-fA-F]{64}|[0-9a-fA-F]{80})$`); err != nil { - return err - } - - } - - return nil -} - -func (m *SearchLogQuery) validateLogIndexes(formats strfmt.Registry) error { - if swag.IsZero(m.LogIndexes) { // not required - return nil - } - - iLogIndexesSize := int64(len(m.LogIndexes)) - - if err := validate.MinItems("logIndexes", "body", iLogIndexesSize, 1); err != nil { - return err - } - - if err := validate.MaxItems("logIndexes", "body", iLogIndexesSize, 10); err != nil { - return err - } - - for i := 0; i < len(m.LogIndexes); i++ { - if swag.IsZero(m.LogIndexes[i]) { // not required - continue - } - - if err := validate.MinimumInt("logIndexes"+"."+strconv.Itoa(i), "body", *m.LogIndexes[i], 0, false); err != nil { - return err - } - - } - - return nil -} - -// ContextValidate validate this search log query based on the context it is used -func (m *SearchLogQuery) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateEntries(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *SearchLogQuery) contextValidateEntries(ctx context.Context, formats strfmt.Registry) error { - - for i := 0; i < len(m.Entries()); i++ { - - if swag.IsZero(m.entriesField[i]) { // not required - return nil - } - - if err := m.entriesField[i].ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("entries" + "." + strconv.Itoa(i)) - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("entries" + "." + strconv.Itoa(i)) - } - return err - } - - } - - return nil -} - -// MarshalBinary interface implementation -func (m *SearchLogQuery) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *SearchLogQuery) UnmarshalBinary(b []byte) error { - var res SearchLogQuery - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/tuf.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/tuf.go deleted file mode 100644 index a5f6eff0f7..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/tuf.go +++ /dev/null @@ -1,210 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "bytes" - "context" - "encoding/json" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// TUF TUF metadata -// -// swagger:model tuf -type TUF struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec TUFSchema `json:"spec"` -} - -// Kind gets the kind of this subtype -func (m *TUF) Kind() string { - return "tuf" -} - -// SetKind sets the kind of this subtype -func (m *TUF) SetKind(val string) { -} - -// UnmarshalJSON unmarshals this object with a polymorphic type from a JSON structure -func (m *TUF) UnmarshalJSON(raw []byte) error { - var data struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec TUFSchema `json:"spec"` - } - buf := bytes.NewBuffer(raw) - dec := json.NewDecoder(buf) - dec.UseNumber() - - if err := dec.Decode(&data); err != nil { - return err - } - - var base struct { - /* Just the base type fields. Used for unmashalling polymorphic types.*/ - - Kind string `json:"kind"` - } - buf = bytes.NewBuffer(raw) - dec = json.NewDecoder(buf) - dec.UseNumber() - - if err := dec.Decode(&base); err != nil { - return err - } - - var result TUF - - if base.Kind != result.Kind() { - /* Not the type we're looking for. */ - return errors.New(422, "invalid kind value: %q", base.Kind) - } - - result.APIVersion = data.APIVersion - result.Spec = data.Spec - - *m = result - - return nil -} - -// MarshalJSON marshals this object with a polymorphic type to a JSON structure -func (m TUF) MarshalJSON() ([]byte, error) { - var b1, b2, b3 []byte - var err error - b1, err = json.Marshal(struct { - - // api version - // Required: true - // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - APIVersion *string `json:"apiVersion"` - - // spec - // Required: true - Spec TUFSchema `json:"spec"` - }{ - - APIVersion: m.APIVersion, - - Spec: m.Spec, - }) - if err != nil { - return nil, err - } - b2, err = json.Marshal(struct { - Kind string `json:"kind"` - }{ - - Kind: m.Kind(), - }) - if err != nil { - return nil, err - } - - return swag.ConcatJSON(b1, b2, b3), nil -} - -// Validate validates this tuf -func (m *TUF) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateAPIVersion(formats); err != nil { - res = append(res, err) - } - - if err := m.validateSpec(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *TUF) validateAPIVersion(formats strfmt.Registry) error { - - if err := validate.Required("apiVersion", "body", m.APIVersion); err != nil { - return err - } - - if err := validate.Pattern("apiVersion", "body", *m.APIVersion, `^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`); err != nil { - return err - } - - return nil -} - -func (m *TUF) validateSpec(formats strfmt.Registry) error { - - if m.Spec == nil { - return errors.Required("spec", "body", nil) - } - - return nil -} - -// ContextValidate validate this tuf based on the context it is used -func (m *TUF) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// MarshalBinary interface implementation -func (m *TUF) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *TUF) UnmarshalBinary(b []byte) error { - var res TUF - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/tuf_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/tuf_schema.go deleted file mode 100644 index 37dca8b68e..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/tuf_schema.go +++ /dev/null @@ -1,29 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// TUFSchema TUF Schema -// -// # Schema for TUF metadata objects -// -// swagger:model tufSchema -type TUFSchema interface{} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/tuf_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/tuf_v001_schema.go deleted file mode 100644 index 021e0ce7d3..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/tuf_v001_schema.go +++ /dev/null @@ -1,304 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// TUFV001Schema TUF v0.0.1 Schema -// -// # Schema for TUF metadata entries -// -// swagger:model tufV001Schema -type TUFV001Schema struct { - - // metadata - // Required: true - Metadata *TUFV001SchemaMetadata `json:"metadata"` - - // root - // Required: true - Root *TUFV001SchemaRoot `json:"root"` - - // TUF specification version - // Read Only: true - SpecVersion string `json:"spec_version,omitempty"` -} - -// Validate validates this tuf v001 schema -func (m *TUFV001Schema) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateMetadata(formats); err != nil { - res = append(res, err) - } - - if err := m.validateRoot(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *TUFV001Schema) validateMetadata(formats strfmt.Registry) error { - - if err := validate.Required("metadata", "body", m.Metadata); err != nil { - return err - } - - if m.Metadata != nil { - if err := m.Metadata.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("metadata") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("metadata") - } - return err - } - } - - return nil -} - -func (m *TUFV001Schema) validateRoot(formats strfmt.Registry) error { - - if err := validate.Required("root", "body", m.Root); err != nil { - return err - } - - if m.Root != nil { - if err := m.Root.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("root") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("root") - } - return err - } - } - - return nil -} - -// ContextValidate validate this tuf v001 schema based on the context it is used -func (m *TUFV001Schema) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateMetadata(ctx, formats); err != nil { - res = append(res, err) - } - - if err := m.contextValidateRoot(ctx, formats); err != nil { - res = append(res, err) - } - - if err := m.contextValidateSpecVersion(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *TUFV001Schema) contextValidateMetadata(ctx context.Context, formats strfmt.Registry) error { - - if m.Metadata != nil { - - if err := m.Metadata.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("metadata") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("metadata") - } - return err - } - } - - return nil -} - -func (m *TUFV001Schema) contextValidateRoot(ctx context.Context, formats strfmt.Registry) error { - - if m.Root != nil { - - if err := m.Root.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("root") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("root") - } - return err - } - } - - return nil -} - -func (m *TUFV001Schema) contextValidateSpecVersion(ctx context.Context, formats strfmt.Registry) error { - - if err := validate.ReadOnly(ctx, "spec_version", "body", string(m.SpecVersion)); err != nil { - return err - } - - return nil -} - -// MarshalBinary interface implementation -func (m *TUFV001Schema) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *TUFV001Schema) UnmarshalBinary(b []byte) error { - var res TUFV001Schema - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// TUFV001SchemaMetadata TUF metadata -// -// swagger:model TUFV001SchemaMetadata -type TUFV001SchemaMetadata struct { - - // Specifies the metadata inline within the document - // Required: true - Content interface{} `json:"content"` -} - -// Validate validates this TUF v001 schema metadata -func (m *TUFV001SchemaMetadata) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateContent(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *TUFV001SchemaMetadata) validateContent(formats strfmt.Registry) error { - - if m.Content == nil { - return errors.Required("metadata"+"."+"content", "body", nil) - } - - return nil -} - -// ContextValidate validates this TUF v001 schema metadata based on context it is used -func (m *TUFV001SchemaMetadata) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - return nil -} - -// MarshalBinary interface implementation -func (m *TUFV001SchemaMetadata) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *TUFV001SchemaMetadata) UnmarshalBinary(b []byte) error { - var res TUFV001SchemaMetadata - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} - -// TUFV001SchemaRoot root metadata containing about the public keys used to sign the manifest -// -// swagger:model TUFV001SchemaRoot -type TUFV001SchemaRoot struct { - - // Specifies the metadata inline within the document - // Required: true - Content interface{} `json:"content"` -} - -// Validate validates this TUF v001 schema root -func (m *TUFV001SchemaRoot) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateContent(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *TUFV001SchemaRoot) validateContent(formats strfmt.Registry) error { - - if m.Content == nil { - return errors.Required("root"+"."+"content", "body", nil) - } - - return nil -} - -// ContextValidate validates this TUF v001 schema root based on context it is used -func (m *TUFV001SchemaRoot) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - return nil -} - -// MarshalBinary interface implementation -func (m *TUFV001SchemaRoot) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *TUFV001SchemaRoot) UnmarshalBinary(b []byte) error { - var res TUFV001SchemaRoot - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/smallstep/pkcs7/pkcs7.go b/vendor/github.com/smallstep/pkcs7/pkcs7.go index f6c6dfbbb5..dd5b18380a 100644 --- a/vendor/github.com/smallstep/pkcs7/pkcs7.go +++ b/vendor/github.com/smallstep/pkcs7/pkcs7.go @@ -12,6 +12,7 @@ import ( "encoding/asn1" "errors" "fmt" + "io" "sort" "sync" @@ -26,9 +27,15 @@ type PKCS7 struct { Certificates []*x509.Certificate CRLs []pkix.CertificateList Signers []signerInfo + Hasher Hasher raw interface{} } +// Hasher is an interface defining a custom hash calculator. +type Hasher interface { + Hash(crypto.Hash, io.Reader) ([]byte, error) +} + type contentInfo struct { ContentType asn1.ObjectIdentifier Content asn1.RawValue `asn1:"explicit,optional,tag:0"` diff --git a/vendor/github.com/smallstep/pkcs7/sign.go b/vendor/github.com/smallstep/pkcs7/sign.go index 31c3654c51..74ce50d802 100644 --- a/vendor/github.com/smallstep/pkcs7/sign.go +++ b/vendor/github.com/smallstep/pkcs7/sign.go @@ -11,9 +11,54 @@ import ( "errors" "fmt" "math/big" + "sync" "time" ) +func init() { + defaultMessageDigestAlgorithm.oid = OIDDigestAlgorithmSHA1 +} + +var defaultMessageDigestAlgorithm struct { + sync.RWMutex + oid asn1.ObjectIdentifier +} + +// SetDefaultDigestAlgorithm sets the default digest algorithm +// to be used for signing operations on [SignedData]. +// +// This must be called before creating a new instance of [SignedData] +// using [NewSignedData]. +// +// When this function is not called, the default digest algorithm is SHA1. +func SetDefaultDigestAlgorithm(d asn1.ObjectIdentifier) error { + defaultMessageDigestAlgorithm.Lock() + defer defaultMessageDigestAlgorithm.Unlock() + + switch { + case d.Equal(OIDDigestAlgorithmSHA1), + d.Equal(OIDDigestAlgorithmSHA224), d.Equal(OIDDigestAlgorithmSHA256), + d.Equal(OIDDigestAlgorithmSHA384), d.Equal(OIDDigestAlgorithmSHA512), + d.Equal(OIDDigestAlgorithmDSA), d.Equal(OIDDigestAlgorithmDSASHA1), + d.Equal(OIDDigestAlgorithmECDSASHA1), d.Equal(OIDDigestAlgorithmECDSASHA256), + d.Equal(OIDDigestAlgorithmECDSASHA384), d.Equal(OIDDigestAlgorithmECDSASHA512): + break + default: + return fmt.Errorf("unsupported message digest algorithm %v", d) + } + + defaultMessageDigestAlgorithm.oid = d + + return nil +} + +func defaultMessageDigestAlgorithmOID() asn1.ObjectIdentifier { + defaultMessageDigestAlgorithm.RLock() + defer defaultMessageDigestAlgorithm.RUnlock() + + return defaultMessageDigestAlgorithm.oid +} + // SignedData is an opaque data structure for creating signed data payloads type SignedData struct { sd signedData @@ -39,7 +84,7 @@ func NewSignedData(data []byte) (*SignedData, error) { ContentInfo: ci, Version: 1, } - return &SignedData{sd: sd, data: data, digestOid: OIDDigestAlgorithmSHA1}, nil + return &SignedData{sd: sd, data: data, digestOid: defaultMessageDigestAlgorithmOID()}, nil } // SignerInfoConfig are optional values to include when adding a signer diff --git a/vendor/github.com/smallstep/pkcs7/verify.go b/vendor/github.com/smallstep/pkcs7/verify.go index 7525f918b1..f9ad34bbab 100644 --- a/vendor/github.com/smallstep/pkcs7/verify.go +++ b/vendor/github.com/smallstep/pkcs7/verify.go @@ -1,6 +1,8 @@ package pkcs7 import ( + "bytes" + "crypto" "crypto/subtle" "crypto/x509" "crypto/x509/pkix" @@ -89,9 +91,10 @@ func verifySignatureAtTime(p7 *PKCS7, signer signerInfo, truststore *x509.CertPo if err != nil { return err } - h := hash.New() - h.Write(p7.Content) - computed := h.Sum(nil) + computed, err := calculateHash(p7.Hasher, hash, p7.Content) + if err != nil { + return err + } if subtle.ConstantTimeCompare(digest, computed) != 1 { return &MessageDigestMismatchError{ ExpectedDigest: digest, @@ -145,9 +148,10 @@ func verifySignature(p7 *PKCS7, signer signerInfo, truststore *x509.CertPool) (e if err != nil { return err } - h := hash.New() - h.Write(p7.Content) - computed := h.Sum(nil) + computed, err := calculateHash(p7.Hasher, hash, p7.Content) + if err != nil { + return err + } if subtle.ConstantTimeCompare(digest, computed) != 1 { return &MessageDigestMismatchError{ ExpectedDigest: digest, @@ -363,3 +367,19 @@ func unmarshalAttribute(attrs []attribute, attributeType asn1.ObjectIdentifier, } return errors.New("pkcs7: attribute type not in attributes") } + +func calculateHash(hasher Hasher, hashFunc crypto.Hash, content []byte) (computed []byte, err error) { + if hasher != nil { + computed, err = hasher.Hash(hashFunc, bytes.NewReader(content)) + } else { + if !hashFunc.Available() { + return nil, fmt.Errorf("hash function %v not available", hashFunc) + } + + h := hashFunc.New() + _, _ = h.Write(content) + computed = h.Sum(nil) + } + + return +} diff --git a/vendor/github.com/sourcegraph/conc/Makefile b/vendor/github.com/sourcegraph/conc/Makefile new file mode 100644 index 0000000000..3e0720a123 --- /dev/null +++ b/vendor/github.com/sourcegraph/conc/Makefile @@ -0,0 +1,24 @@ +.DEFAULT_GOAL := help + +GO_BIN ?= $(shell go env GOPATH)/bin + +.PHONY: help +help: + @grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' + +$(GO_BIN)/golangci-lint: + @echo "==> Installing golangci-lint within "${GO_BIN}"" + @go install -v github.com/golangci/golangci-lint/cmd/golangci-lint@latest + +.PHONY: lint +lint: $(GO_BIN)/golangci-lint ## Run linting on Go files + @echo "==> Linting Go source files" + @golangci-lint run -v --fix -c .golangci.yml ./... + +.PHONY: test +test: ## Run tests + go test -race -v ./... -coverprofile ./coverage.txt + +.PHONY: bench +bench: ## Run benchmarks. See https://pkg.go.dev/cmd/go#hdr-Testing_flags + go test ./... -bench . -benchtime 5s -timeout 0 -run=XXX -cpu 1 -benchmem diff --git a/vendor/github.com/sourcegraph/conc/internal/multierror/multierror_go119.go b/vendor/github.com/sourcegraph/conc/internal/multierror/multierror_go119.go deleted file mode 100644 index 7087e32a8f..0000000000 --- a/vendor/github.com/sourcegraph/conc/internal/multierror/multierror_go119.go +++ /dev/null @@ -1,10 +0,0 @@ -//go:build !go1.20 -// +build !go1.20 - -package multierror - -import "go.uber.org/multierr" - -var ( - Join = multierr.Combine -) diff --git a/vendor/github.com/sourcegraph/conc/internal/multierror/multierror_go120.go b/vendor/github.com/sourcegraph/conc/internal/multierror/multierror_go120.go deleted file mode 100644 index 39cff829ac..0000000000 --- a/vendor/github.com/sourcegraph/conc/internal/multierror/multierror_go120.go +++ /dev/null @@ -1,10 +0,0 @@ -//go:build go1.20 -// +build go1.20 - -package multierror - -import "errors" - -var ( - Join = errors.Join -) diff --git a/vendor/github.com/sourcegraph/conc/iter/iter.go b/vendor/github.com/sourcegraph/conc/iter/iter.go deleted file mode 100644 index 124b4f9400..0000000000 --- a/vendor/github.com/sourcegraph/conc/iter/iter.go +++ /dev/null @@ -1,85 +0,0 @@ -package iter - -import ( - "runtime" - "sync/atomic" - - "github.com/sourcegraph/conc" -) - -// defaultMaxGoroutines returns the default maximum number of -// goroutines to use within this package. -func defaultMaxGoroutines() int { return runtime.GOMAXPROCS(0) } - -// Iterator can be used to configure the behaviour of ForEach -// and ForEachIdx. The zero value is safe to use with reasonable -// defaults. -// -// Iterator is also safe for reuse and concurrent use. -type Iterator[T any] struct { - // MaxGoroutines controls the maximum number of goroutines - // to use on this Iterator's methods. - // - // If unset, MaxGoroutines defaults to runtime.GOMAXPROCS(0). - MaxGoroutines int -} - -// ForEach executes f in parallel over each element in input. -// -// It is safe to mutate the input parameter, which makes it -// possible to map in place. -// -// ForEach always uses at most runtime.GOMAXPROCS goroutines. -// It takes roughly 2µs to start up the goroutines and adds -// an overhead of roughly 50ns per element of input. For -// a configurable goroutine limit, use a custom Iterator. -func ForEach[T any](input []T, f func(*T)) { Iterator[T]{}.ForEach(input, f) } - -// ForEach executes f in parallel over each element in input, -// using up to the Iterator's configured maximum number of -// goroutines. -// -// It is safe to mutate the input parameter, which makes it -// possible to map in place. -// -// It takes roughly 2µs to start up the goroutines and adds -// an overhead of roughly 50ns per element of input. -func (iter Iterator[T]) ForEach(input []T, f func(*T)) { - iter.ForEachIdx(input, func(_ int, t *T) { - f(t) - }) -} - -// ForEachIdx is the same as ForEach except it also provides the -// index of the element to the callback. -func ForEachIdx[T any](input []T, f func(int, *T)) { Iterator[T]{}.ForEachIdx(input, f) } - -// ForEachIdx is the same as ForEach except it also provides the -// index of the element to the callback. -func (iter Iterator[T]) ForEachIdx(input []T, f func(int, *T)) { - if iter.MaxGoroutines == 0 { - // iter is a value receiver and is hence safe to mutate - iter.MaxGoroutines = defaultMaxGoroutines() - } - - numInput := len(input) - if iter.MaxGoroutines > numInput { - // No more concurrent tasks than the number of input items. - iter.MaxGoroutines = numInput - } - - var idx atomic.Int64 - // Create the task outside the loop to avoid extra closure allocations. - task := func() { - i := int(idx.Add(1) - 1) - for ; i < numInput; i = int(idx.Add(1) - 1) { - f(i, &input[i]) - } - } - - var wg conc.WaitGroup - for i := 0; i < iter.MaxGoroutines; i++ { - wg.Go(task) - } - wg.Wait() -} diff --git a/vendor/github.com/sourcegraph/conc/iter/map.go b/vendor/github.com/sourcegraph/conc/iter/map.go deleted file mode 100644 index efbe6bfaf1..0000000000 --- a/vendor/github.com/sourcegraph/conc/iter/map.go +++ /dev/null @@ -1,65 +0,0 @@ -package iter - -import ( - "sync" - - "github.com/sourcegraph/conc/internal/multierror" -) - -// Mapper is an Iterator with a result type R. It can be used to configure -// the behaviour of Map and MapErr. The zero value is safe to use with -// reasonable defaults. -// -// Mapper is also safe for reuse and concurrent use. -type Mapper[T, R any] Iterator[T] - -// Map applies f to each element of input, returning the mapped result. -// -// Map always uses at most runtime.GOMAXPROCS goroutines. For a configurable -// goroutine limit, use a custom Mapper. -func Map[T, R any](input []T, f func(*T) R) []R { - return Mapper[T, R]{}.Map(input, f) -} - -// Map applies f to each element of input, returning the mapped result. -// -// Map uses up to the configured Mapper's maximum number of goroutines. -func (m Mapper[T, R]) Map(input []T, f func(*T) R) []R { - res := make([]R, len(input)) - Iterator[T](m).ForEachIdx(input, func(i int, t *T) { - res[i] = f(t) - }) - return res -} - -// MapErr applies f to each element of the input, returning the mapped result -// and a combined error of all returned errors. -// -// Map always uses at most runtime.GOMAXPROCS goroutines. For a configurable -// goroutine limit, use a custom Mapper. -func MapErr[T, R any](input []T, f func(*T) (R, error)) ([]R, error) { - return Mapper[T, R]{}.MapErr(input, f) -} - -// MapErr applies f to each element of the input, returning the mapped result -// and a combined error of all returned errors. -// -// Map uses up to the configured Mapper's maximum number of goroutines. -func (m Mapper[T, R]) MapErr(input []T, f func(*T) (R, error)) ([]R, error) { - var ( - res = make([]R, len(input)) - errMux sync.Mutex - errs error - ) - Iterator[T](m).ForEachIdx(input, func(i int, t *T) { - var err error - res[i], err = f(t) - if err != nil { - errMux.Lock() - // TODO: use stdlib errors once multierrors land in go 1.20 - errs = multierror.Join(errs, err) - errMux.Unlock() - } - }) - return res, errs -} diff --git a/vendor/github.com/sourcegraph/conc/pool/context_pool.go b/vendor/github.com/sourcegraph/conc/pool/context_pool.go new file mode 100644 index 0000000000..85c34e5aef --- /dev/null +++ b/vendor/github.com/sourcegraph/conc/pool/context_pool.go @@ -0,0 +1,104 @@ +package pool + +import ( + "context" +) + +// ContextPool is a pool that runs tasks that take a context. +// A new ContextPool should be created with `New().WithContext(ctx)`. +// +// The configuration methods (With*) will panic if they are used after calling +// Go() for the first time. +type ContextPool struct { + errorPool ErrorPool + + ctx context.Context + cancel context.CancelFunc + + cancelOnError bool +} + +// Go submits a task. If it returns an error, the error will be +// collected and returned by Wait(). If all goroutines in the pool +// are busy, a call to Go() will block until the task can be started. +func (p *ContextPool) Go(f func(ctx context.Context) error) { + p.errorPool.Go(func() error { + if p.cancelOnError { + // If we are cancelling on error, then we also want to cancel if a + // panic is raised. To do this, we need to recover, cancel, and then + // re-throw the caught panic. + defer func() { + if r := recover(); r != nil { + p.cancel() + panic(r) + } + }() + } + + err := f(p.ctx) + if err != nil && p.cancelOnError { + // Leaky abstraction warning: We add the error directly because + // otherwise, canceling could cause another goroutine to exit and + // return an error before this error was added, which breaks the + // expectations of WithFirstError(). + p.errorPool.addErr(err) + p.cancel() + return nil + } + return err + }) +} + +// Wait cleans up all spawned goroutines, propagates any panics, and +// returns an error if any of the tasks errored. +func (p *ContextPool) Wait() error { + // Make sure we call cancel after pool is done to avoid memory leakage. + defer p.cancel() + return p.errorPool.Wait() +} + +// WithFirstError configures the pool to only return the first error +// returned by a task. By default, Wait() will return a combined error. +// This is particularly useful for (*ContextPool).WithCancelOnError(), +// where all errors after the first are likely to be context.Canceled. +func (p *ContextPool) WithFirstError() *ContextPool { + p.panicIfInitialized() + p.errorPool.WithFirstError() + return p +} + +// WithCancelOnError configures the pool to cancel its context as soon as +// any task returns an error or panics. By default, the pool's context is not +// canceled until the parent context is canceled. +// +// In this case, all errors returned from the pool after the first will +// likely be context.Canceled - you may want to also use +// (*ContextPool).WithFirstError() to configure the pool to only return +// the first error. +func (p *ContextPool) WithCancelOnError() *ContextPool { + p.panicIfInitialized() + p.cancelOnError = true + return p +} + +// WithFailFast is an alias for the combination of WithFirstError and +// WithCancelOnError. By default, the errors from all tasks are returned and +// the pool's context is not canceled until the parent context is canceled. +func (p *ContextPool) WithFailFast() *ContextPool { + p.panicIfInitialized() + p.WithFirstError() + p.WithCancelOnError() + return p +} + +// WithMaxGoroutines limits the number of goroutines in a pool. +// Defaults to unlimited. Panics if n < 1. +func (p *ContextPool) WithMaxGoroutines(n int) *ContextPool { + p.panicIfInitialized() + p.errorPool.WithMaxGoroutines(n) + return p +} + +func (p *ContextPool) panicIfInitialized() { + p.errorPool.panicIfInitialized() +} diff --git a/vendor/github.com/sourcegraph/conc/pool/error_pool.go b/vendor/github.com/sourcegraph/conc/pool/error_pool.go new file mode 100644 index 0000000000..e1789e61b6 --- /dev/null +++ b/vendor/github.com/sourcegraph/conc/pool/error_pool.go @@ -0,0 +1,100 @@ +package pool + +import ( + "context" + "errors" + "sync" +) + +// ErrorPool is a pool that runs tasks that may return an error. +// Errors are collected and returned by Wait(). +// +// The configuration methods (With*) will panic if they are used after calling +// Go() for the first time. +// +// A new ErrorPool should be created using `New().WithErrors()`. +type ErrorPool struct { + pool Pool + + onlyFirstError bool + + mu sync.Mutex + errs []error +} + +// Go submits a task to the pool. If all goroutines in the pool +// are busy, a call to Go() will block until the task can be started. +func (p *ErrorPool) Go(f func() error) { + p.pool.Go(func() { + p.addErr(f()) + }) +} + +// Wait cleans up any spawned goroutines, propagating any panics and +// returning any errors from tasks. +func (p *ErrorPool) Wait() error { + p.pool.Wait() + + errs := p.errs + p.errs = nil // reset errs + + if len(errs) == 0 { + return nil + } else if p.onlyFirstError { + return errs[0] + } else { + return errors.Join(errs...) + } +} + +// WithContext converts the pool to a ContextPool for tasks that should +// run under the same context, such that they each respect shared cancellation. +// For example, WithCancelOnError can be configured on the returned pool to +// signal that all goroutines should be cancelled upon the first error. +func (p *ErrorPool) WithContext(ctx context.Context) *ContextPool { + p.panicIfInitialized() + ctx, cancel := context.WithCancel(ctx) + return &ContextPool{ + errorPool: p.deref(), + ctx: ctx, + cancel: cancel, + } +} + +// WithFirstError configures the pool to only return the first error +// returned by a task. By default, Wait() will return a combined error. +func (p *ErrorPool) WithFirstError() *ErrorPool { + p.panicIfInitialized() + p.onlyFirstError = true + return p +} + +// WithMaxGoroutines limits the number of goroutines in a pool. +// Defaults to unlimited. Panics if n < 1. +func (p *ErrorPool) WithMaxGoroutines(n int) *ErrorPool { + p.panicIfInitialized() + p.pool.WithMaxGoroutines(n) + return p +} + +// deref is a helper that creates a shallow copy of the pool with the same +// settings. We don't want to just dereference the pointer because that makes +// the copylock lint angry. +func (p *ErrorPool) deref() ErrorPool { + return ErrorPool{ + pool: p.pool.deref(), + onlyFirstError: p.onlyFirstError, + } +} + +func (p *ErrorPool) panicIfInitialized() { + p.pool.panicIfInitialized() +} + +func (p *ErrorPool) addErr(err error) { + if err != nil { + p.mu.Lock() + p.errs = append(p.errs, err) + p.mu.Unlock() + } +} diff --git a/vendor/github.com/sourcegraph/conc/pool/pool.go b/vendor/github.com/sourcegraph/conc/pool/pool.go new file mode 100644 index 0000000000..8f4494efb1 --- /dev/null +++ b/vendor/github.com/sourcegraph/conc/pool/pool.go @@ -0,0 +1,174 @@ +package pool + +import ( + "context" + "sync" + + "github.com/sourcegraph/conc" +) + +// New creates a new Pool. +func New() *Pool { + return &Pool{} +} + +// Pool is a pool of goroutines used to execute tasks concurrently. +// +// Tasks are submitted with Go(). Once all your tasks have been submitted, you +// must call Wait() to clean up any spawned goroutines and propagate any +// panics. +// +// Goroutines are started lazily, so creating a new pool is cheap. There will +// never be more goroutines spawned than there are tasks submitted. +// +// The configuration methods (With*) will panic if they are used after calling +// Go() for the first time. +// +// Pool is efficient, but not zero cost. It should not be used for very short +// tasks. Startup and teardown come with an overhead of around 1µs, and each +// task has an overhead of around 300ns. +type Pool struct { + handle conc.WaitGroup + limiter limiter + tasks chan func() + initOnce sync.Once +} + +// Go submits a task to be run in the pool. If all goroutines in the pool +// are busy, a call to Go() will block until the task can be started. +func (p *Pool) Go(f func()) { + p.init() + + if p.limiter == nil { + // No limit on the number of goroutines. + select { + case p.tasks <- f: + // A goroutine was available to handle the task. + default: + // No goroutine was available to handle the task. + // Spawn a new one and send it the task. + p.handle.Go(func() { + p.worker(f) + }) + } + } else { + select { + case p.limiter <- struct{}{}: + // If we are below our limit, spawn a new worker rather + // than waiting for one to become available. + p.handle.Go(func() { + p.worker(f) + }) + case p.tasks <- f: + // A worker is available and has accepted the task. + return + } + } + +} + +// Wait cleans up spawned goroutines, propagating any panics that were +// raised by a tasks. +func (p *Pool) Wait() { + p.init() + + close(p.tasks) + + // After Wait() returns, reset the struct so tasks will be reinitialized on + // next use. This better matches the behavior of sync.WaitGroup + defer func() { p.initOnce = sync.Once{} }() + + p.handle.Wait() +} + +// MaxGoroutines returns the maximum size of the pool. +func (p *Pool) MaxGoroutines() int { + return p.limiter.limit() +} + +// WithMaxGoroutines limits the number of goroutines in a pool. +// Defaults to unlimited. Panics if n < 1. +func (p *Pool) WithMaxGoroutines(n int) *Pool { + p.panicIfInitialized() + if n < 1 { + panic("max goroutines in a pool must be greater than zero") + } + p.limiter = make(limiter, n) + return p +} + +// init ensures that the pool is initialized before use. This makes the +// zero value of the pool usable. +func (p *Pool) init() { + p.initOnce.Do(func() { + p.tasks = make(chan func()) + }) +} + +// panicIfInitialized will trigger a panic if a configuration method is called +// after the pool has started any goroutines for the first time. In the case that +// new settings are needed, a new pool should be created. +func (p *Pool) panicIfInitialized() { + if p.tasks != nil { + panic("pool can not be reconfigured after calling Go() for the first time") + } +} + +// WithErrors converts the pool to an ErrorPool so the submitted tasks can +// return errors. +func (p *Pool) WithErrors() *ErrorPool { + p.panicIfInitialized() + return &ErrorPool{ + pool: p.deref(), + } +} + +// deref is a helper that creates a shallow copy of the pool with the same +// settings. We don't want to just dereference the pointer because that makes +// the copylock lint angry. +func (p *Pool) deref() Pool { + p.panicIfInitialized() + return Pool{ + limiter: p.limiter, + } +} + +// WithContext converts the pool to a ContextPool for tasks that should +// run under the same context, such that they each respect shared cancellation. +// For example, WithCancelOnError can be configured on the returned pool to +// signal that all goroutines should be cancelled upon the first error. +func (p *Pool) WithContext(ctx context.Context) *ContextPool { + p.panicIfInitialized() + ctx, cancel := context.WithCancel(ctx) + return &ContextPool{ + errorPool: p.WithErrors().deref(), + ctx: ctx, + cancel: cancel, + } +} + +func (p *Pool) worker(initialFunc func()) { + // The only time this matters is if the task panics. + // This makes it possible to spin up new workers in that case. + defer p.limiter.release() + + if initialFunc != nil { + initialFunc() + } + + for f := range p.tasks { + f() + } +} + +type limiter chan struct{} + +func (l limiter) limit() int { + return cap(l) +} + +func (l limiter) release() { + if l != nil { + <-l + } +} diff --git a/vendor/github.com/sourcegraph/conc/pool/result_context_pool.go b/vendor/github.com/sourcegraph/conc/pool/result_context_pool.go new file mode 100644 index 0000000000..6bc30dd63c --- /dev/null +++ b/vendor/github.com/sourcegraph/conc/pool/result_context_pool.go @@ -0,0 +1,85 @@ +package pool + +import ( + "context" +) + +// ResultContextPool is a pool that runs tasks that take a context and return a +// result. The context passed to the task will be canceled if any of the tasks +// return an error, which makes its functionality different than just capturing +// a context with the task closure. +// +// The configuration methods (With*) will panic if they are used after calling +// Go() for the first time. +type ResultContextPool[T any] struct { + contextPool ContextPool + agg resultAggregator[T] + collectErrored bool +} + +// Go submits a task to the pool. If all goroutines in the pool +// are busy, a call to Go() will block until the task can be started. +func (p *ResultContextPool[T]) Go(f func(context.Context) (T, error)) { + idx := p.agg.nextIndex() + p.contextPool.Go(func(ctx context.Context) error { + res, err := f(ctx) + p.agg.save(idx, res, err != nil) + return err + }) +} + +// Wait cleans up all spawned goroutines, propagates any panics, and +// returns an error if any of the tasks errored. +func (p *ResultContextPool[T]) Wait() ([]T, error) { + err := p.contextPool.Wait() + results := p.agg.collect(p.collectErrored) + p.agg = resultAggregator[T]{} + return results, err +} + +// WithCollectErrored configures the pool to still collect the result of a task +// even if the task returned an error. By default, the result of tasks that errored +// are ignored and only the error is collected. +func (p *ResultContextPool[T]) WithCollectErrored() *ResultContextPool[T] { + p.panicIfInitialized() + p.collectErrored = true + return p +} + +// WithFirstError configures the pool to only return the first error +// returned by a task. By default, Wait() will return a combined error. +func (p *ResultContextPool[T]) WithFirstError() *ResultContextPool[T] { + p.panicIfInitialized() + p.contextPool.WithFirstError() + return p +} + +// WithCancelOnError configures the pool to cancel its context as soon as +// any task returns an error. By default, the pool's context is not +// canceled until the parent context is canceled. +func (p *ResultContextPool[T]) WithCancelOnError() *ResultContextPool[T] { + p.panicIfInitialized() + p.contextPool.WithCancelOnError() + return p +} + +// WithFailFast is an alias for the combination of WithFirstError and +// WithCancelOnError. By default, the errors from all tasks are returned and +// the pool's context is not canceled until the parent context is canceled. +func (p *ResultContextPool[T]) WithFailFast() *ResultContextPool[T] { + p.panicIfInitialized() + p.contextPool.WithFailFast() + return p +} + +// WithMaxGoroutines limits the number of goroutines in a pool. +// Defaults to unlimited. Panics if n < 1. +func (p *ResultContextPool[T]) WithMaxGoroutines(n int) *ResultContextPool[T] { + p.panicIfInitialized() + p.contextPool.WithMaxGoroutines(n) + return p +} + +func (p *ResultContextPool[T]) panicIfInitialized() { + p.contextPool.panicIfInitialized() +} diff --git a/vendor/github.com/sourcegraph/conc/pool/result_error_pool.go b/vendor/github.com/sourcegraph/conc/pool/result_error_pool.go new file mode 100644 index 0000000000..832cd9bb47 --- /dev/null +++ b/vendor/github.com/sourcegraph/conc/pool/result_error_pool.go @@ -0,0 +1,80 @@ +package pool + +import ( + "context" +) + +// ResultErrorPool is a pool that executes tasks that return a generic result +// type and an error. Tasks are executed in the pool with Go(), then the +// results of the tasks are returned by Wait(). +// +// The order of the results is guaranteed to be the same as the order the +// tasks were submitted. +// +// The configuration methods (With*) will panic if they are used after calling +// Go() for the first time. +type ResultErrorPool[T any] struct { + errorPool ErrorPool + agg resultAggregator[T] + collectErrored bool +} + +// Go submits a task to the pool. If all goroutines in the pool +// are busy, a call to Go() will block until the task can be started. +func (p *ResultErrorPool[T]) Go(f func() (T, error)) { + idx := p.agg.nextIndex() + p.errorPool.Go(func() error { + res, err := f() + p.agg.save(idx, res, err != nil) + return err + }) +} + +// Wait cleans up any spawned goroutines, propagating any panics and +// returning the results and any errors from tasks. +func (p *ResultErrorPool[T]) Wait() ([]T, error) { + err := p.errorPool.Wait() + results := p.agg.collect(p.collectErrored) + p.agg = resultAggregator[T]{} // reset for reuse + return results, err +} + +// WithCollectErrored configures the pool to still collect the result of a task +// even if the task returned an error. By default, the result of tasks that errored +// are ignored and only the error is collected. +func (p *ResultErrorPool[T]) WithCollectErrored() *ResultErrorPool[T] { + p.panicIfInitialized() + p.collectErrored = true + return p +} + +// WithContext converts the pool to a ResultContextPool for tasks that should +// run under the same context, such that they each respect shared cancellation. +// For example, WithCancelOnError can be configured on the returned pool to +// signal that all goroutines should be cancelled upon the first error. +func (p *ResultErrorPool[T]) WithContext(ctx context.Context) *ResultContextPool[T] { + p.panicIfInitialized() + return &ResultContextPool[T]{ + contextPool: *p.errorPool.WithContext(ctx), + } +} + +// WithFirstError configures the pool to only return the first error +// returned by a task. By default, Wait() will return a combined error. +func (p *ResultErrorPool[T]) WithFirstError() *ResultErrorPool[T] { + p.panicIfInitialized() + p.errorPool.WithFirstError() + return p +} + +// WithMaxGoroutines limits the number of goroutines in a pool. +// Defaults to unlimited. Panics if n < 1. +func (p *ResultErrorPool[T]) WithMaxGoroutines(n int) *ResultErrorPool[T] { + p.panicIfInitialized() + p.errorPool.WithMaxGoroutines(n) + return p +} + +func (p *ResultErrorPool[T]) panicIfInitialized() { + p.errorPool.panicIfInitialized() +} diff --git a/vendor/github.com/sourcegraph/conc/pool/result_pool.go b/vendor/github.com/sourcegraph/conc/pool/result_pool.go new file mode 100644 index 0000000000..f73a77261e --- /dev/null +++ b/vendor/github.com/sourcegraph/conc/pool/result_pool.go @@ -0,0 +1,142 @@ +package pool + +import ( + "context" + "sort" + "sync" +) + +// NewWithResults creates a new ResultPool for tasks with a result of type T. +// +// The configuration methods (With*) will panic if they are used after calling +// Go() for the first time. +func NewWithResults[T any]() *ResultPool[T] { + return &ResultPool[T]{ + pool: *New(), + } +} + +// ResultPool is a pool that executes tasks that return a generic result type. +// Tasks are executed in the pool with Go(), then the results of the tasks are +// returned by Wait(). +// +// The order of the results is guaranteed to be the same as the order the +// tasks were submitted. +type ResultPool[T any] struct { + pool Pool + agg resultAggregator[T] +} + +// Go submits a task to the pool. If all goroutines in the pool +// are busy, a call to Go() will block until the task can be started. +func (p *ResultPool[T]) Go(f func() T) { + idx := p.agg.nextIndex() + p.pool.Go(func() { + p.agg.save(idx, f(), false) + }) +} + +// Wait cleans up all spawned goroutines, propagating any panics, and returning +// a slice of results from tasks that did not panic. +func (p *ResultPool[T]) Wait() []T { + p.pool.Wait() + results := p.agg.collect(true) + p.agg = resultAggregator[T]{} // reset for reuse + return results +} + +// MaxGoroutines returns the maximum size of the pool. +func (p *ResultPool[T]) MaxGoroutines() int { + return p.pool.MaxGoroutines() +} + +// WithErrors converts the pool to an ResultErrorPool so the submitted tasks +// can return errors. +func (p *ResultPool[T]) WithErrors() *ResultErrorPool[T] { + p.panicIfInitialized() + return &ResultErrorPool[T]{ + errorPool: *p.pool.WithErrors(), + } +} + +// WithContext converts the pool to a ResultContextPool for tasks that should +// run under the same context, such that they each respect shared cancellation. +// For example, WithCancelOnError can be configured on the returned pool to +// signal that all goroutines should be cancelled upon the first error. +func (p *ResultPool[T]) WithContext(ctx context.Context) *ResultContextPool[T] { + p.panicIfInitialized() + return &ResultContextPool[T]{ + contextPool: *p.pool.WithContext(ctx), + } +} + +// WithMaxGoroutines limits the number of goroutines in a pool. +// Defaults to unlimited. Panics if n < 1. +func (p *ResultPool[T]) WithMaxGoroutines(n int) *ResultPool[T] { + p.panicIfInitialized() + p.pool.WithMaxGoroutines(n) + return p +} + +func (p *ResultPool[T]) panicIfInitialized() { + p.pool.panicIfInitialized() +} + +// resultAggregator is a utility type that lets us safely append from multiple +// goroutines. The zero value is valid and ready to use. +type resultAggregator[T any] struct { + mu sync.Mutex + len int + results []T + errored []int +} + +// nextIndex reserves a slot for a result. The returned value should be passed +// to save() when adding a result to the aggregator. +func (r *resultAggregator[T]) nextIndex() int { + r.mu.Lock() + defer r.mu.Unlock() + + nextIdx := r.len + r.len += 1 + return nextIdx +} + +func (r *resultAggregator[T]) save(i int, res T, errored bool) { + r.mu.Lock() + defer r.mu.Unlock() + + if i >= len(r.results) { + old := r.results + r.results = make([]T, r.len) + copy(r.results, old) + } + + r.results[i] = res + + if errored { + r.errored = append(r.errored, i) + } +} + +// collect returns the set of aggregated results. +func (r *resultAggregator[T]) collect(collectErrored bool) []T { + if !r.mu.TryLock() { + panic("collect should not be called until all goroutines have exited") + } + + if collectErrored || len(r.errored) == 0 { + return r.results + } + + filtered := r.results[:0] + sort.Ints(r.errored) + for i, e := range r.errored { + if i == 0 { + filtered = append(filtered, r.results[:e]...) + } else { + filtered = append(filtered, r.results[r.errored[i-1]+1:e]...) + } + } + return filtered +} diff --git a/vendor/github.com/spf13/afero/.editorconfig b/vendor/github.com/spf13/afero/.editorconfig index 4492e9f9fe..a85749f190 100644 --- a/vendor/github.com/spf13/afero/.editorconfig +++ b/vendor/github.com/spf13/afero/.editorconfig @@ -10,3 +10,6 @@ trim_trailing_whitespace = true [*.go] indent_style = tab + +[{*.yml,*.yaml}] +indent_size = 2 diff --git a/vendor/github.com/spf13/afero/.golangci.yaml b/vendor/github.com/spf13/afero/.golangci.yaml index 806289a250..4f359b81af 100644 --- a/vendor/github.com/spf13/afero/.golangci.yaml +++ b/vendor/github.com/spf13/afero/.golangci.yaml @@ -1,18 +1,48 @@ -linters-settings: - gci: - sections: - - standard - - default - - prefix(github.com/spf13/afero) +version: "2" + +run: + timeout: 10m linters: - disable-all: true - enable: - - gci - - gofmt - - gofumpt - - staticcheck - -issues: - exclude-dirs: - - gcsfs/internal/stiface + enable: + - govet + - ineffassign + - misspell + - nolintlint + # - revive + - staticcheck + - unused + + disable: + - errcheck + # - staticcheck + + settings: + misspell: + locale: US + nolintlint: + allow-unused: false # report any unused nolint directives + require-specific: false # don't require nolint directives to be specific about which linter is being skipped + + exclusions: + paths: + - gcsfs/internal/stiface + +formatters: + enable: + - gci + - gofmt + - gofumpt + - goimports + - golines + + settings: + gci: + sections: + - standard + - default + - localmodule + + exclusions: + paths: + - gcsfs/internal/stiface diff --git a/vendor/github.com/spf13/afero/README.md b/vendor/github.com/spf13/afero/README.md index 86f1545543..ef67e9a77e 100644 --- a/vendor/github.com/spf13/afero/README.md +++ b/vendor/github.com/spf13/afero/README.md @@ -1,479 +1,474 @@ -![afero logo-sm](https://cloud.githubusercontent.com/assets/173412/11490338/d50e16dc-97a5-11e5-8b12-019a300d0fcb.png) +afero logo-sm -A FileSystem Abstraction System for Go -[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/spf13/afero/ci.yaml?branch=master&style=flat-square)](https://github.com/spf13/afero/actions?query=workflow%3ACI) -[![Join the chat at https://gitter.im/spf13/afero](https://badges.gitter.im/Dev%20Chat.svg)](https://gitter.im/spf13/afero?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) -[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/afero?style=flat-square)](https://goreportcard.com/report/github.com/spf13/afero) -![Go Version](https://img.shields.io/badge/go%20version-%3E=1.23-61CFDD.svg?style=flat-square) -[![PkgGoDev](https://pkg.go.dev/badge/mod/github.com/spf13/afero)](https://pkg.go.dev/mod/github.com/spf13/afero) +[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/spf13/afero/ci.yaml?branch=master&style=flat-square)](https://github.com/spf13/afero/actions?query=workflow%3ACI) +[![GoDoc](https://pkg.go.dev/badge/mod/github.com/spf13/afero)](https://pkg.go.dev/mod/github.com/spf13/afero) +[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/afero)](https://goreportcard.com/report/github.com/spf13/afero) +![Go Version](https://img.shields.io/badge/go%20version-%3E=1.23-61CFDD.svg?style=flat-square") -# Overview -Afero is a filesystem framework providing a simple, uniform and universal API -interacting with any filesystem, as an abstraction layer providing interfaces, -types and methods. Afero has an exceptionally clean interface and simple design -without needless constructors or initialization methods. +# Afero: The Universal Filesystem Abstraction for Go -Afero is also a library providing a base set of interoperable backend -filesystems that make it easy to work with, while retaining all the power -and benefit of the os and ioutil packages. +Afero is a powerful and extensible filesystem abstraction system for Go. It provides a single, unified API for interacting with diverse filesystems—including the local disk, memory, archives, and network storage. -Afero provides significant improvements over using the os package alone, most -notably the ability to create mock and testing filesystems without relying on the disk. +Afero acts as a drop-in replacement for the standard `os` package, enabling you to write modular code that is agnostic to the underlying storage, dramatically simplifies testing, and allows for sophisticated architectural patterns through filesystem composition. -It is suitable for use in any situation where you would consider using the OS -package as it provides an additional abstraction that makes it easy to use a -memory backed file system during testing. It also adds support for the http -filesystem for full interoperability. +## Why Afero? +Afero elevates filesystem interaction beyond simple file reading and writing, offering solutions for testability, flexibility, and advanced architecture. -## Afero Features +🔑 **Key Features:** -* A single consistent API for accessing a variety of filesystems -* Interoperation between a variety of file system types -* A set of interfaces to encourage and enforce interoperability between backends -* An atomic cross platform memory backed file system -* Support for compositional (union) file systems by combining multiple file systems acting as one -* Specialized backends which modify existing filesystems (Read Only, Regexp filtered) -* A set of utility functions ported from io, ioutil & hugo to be afero aware -* Wrapper for go 1.16 filesystem abstraction `io/fs.FS` +* **Universal API:** Write your code once. Run it against the local OS, in-memory storage, ZIP/TAR archives, or remote systems (SFTP, GCS). +* **Ultimate Testability:** Utilize `MemMapFs`, a fully concurrent-safe, read/write in-memory filesystem. Write fast, isolated, and reliable unit tests without touching the physical disk or worrying about cleanup. +* **Powerful Composition:** Afero's hidden superpower. Layer filesystems on top of each other to create sophisticated behaviors: + * **Sandboxing:** Use `CopyOnWriteFs` to create temporary scratch spaces that isolate changes from the base filesystem. + * **Caching:** Use `CacheOnReadFs` to automatically layer a fast cache (like memory) over a slow backend (like a network drive). + * **Security Jails:** Use `BasePathFs` to restrict application access to a specific subdirectory (chroot). +* **`os` Package Compatibility:** Afero mirrors the functions in the standard `os` package, making adoption and refactoring seamless. +* **`io/fs` Compatibility:** Fully compatible with the Go standard library's `io/fs` interfaces. -# Using Afero +## Installation -Afero is easy to use and easier to adopt. - -A few different ways you could use Afero: - -* Use the interfaces alone to define your own file system. -* Wrapper for the OS packages. -* Define different filesystems for different parts of your application. -* Use Afero for mock filesystems while testing - -## Step 1: Install Afero - -First use go get to install the latest version of the library. - - $ go get github.com/spf13/afero +```bash +go get github.com/spf13/afero +``` -Next include Afero in your application. ```go import "github.com/spf13/afero" ``` -## Step 2: Declare a backend +## Quick Start: The Power of Abstraction + +The core of Afero is the `afero.Fs` interface. By designing your functions to accept this interface rather than calling `os.*` functions directly, your code instantly becomes more flexible and testable. + +### 1. Refactor Your Code + +Change functions that rely on the `os` package to accept `afero.Fs`. -First define a package variable and set it to a pointer to a filesystem. ```go -var AppFs = afero.NewMemMapFs() +// Before: Coupled to the OS and difficult to test +// func ProcessConfiguration(path string) error { +// data, err := os.ReadFile(path) +// ... +// } -or +import "github.com/spf13/afero" -var AppFs = afero.NewOsFs() +// After: Decoupled, flexible, and testable +func ProcessConfiguration(fs afero.Fs, path string) error { + // Use Afero utility functions which mirror os/ioutil + data, err := afero.ReadFile(fs, path) + // ... process the data + return err +} ``` -It is important to note that if you repeat the composite literal you -will be using a completely new and isolated filesystem. In the case of -OsFs it will still use the same underlying filesystem but will reduce -the ability to drop in other filesystems as desired. -## Step 3: Use it like you would the OS package +### 2. Usage in Production -Throughout your application use any function and method like you normally -would. +In your production environment, inject the `OsFs` backend, which wraps the standard operating system calls. -So if my application before had: -```go -os.Open("/tmp/foo") -``` -We would replace it with: ```go -AppFs.Open("/tmp/foo") +func main() { + // Use the real OS filesystem + AppFs := afero.NewOsFs() + ProcessConfiguration(AppFs, "/etc/myapp.conf") +} ``` -`AppFs` being the variable we defined above. +### 3. Usage in Testing +In your tests, inject `MemMapFs`. This provides a blazing-fast, isolated, in-memory filesystem that requires no disk I/O and no cleanup. -## List of all available functions - -File System Methods Available: ```go -Chmod(name string, mode os.FileMode) : error -Chown(name string, uid, gid int) : error -Chtimes(name string, atime time.Time, mtime time.Time) : error -Create(name string) : File, error -Mkdir(name string, perm os.FileMode) : error -MkdirAll(path string, perm os.FileMode) : error -Name() : string -Open(name string) : File, error -OpenFile(name string, flag int, perm os.FileMode) : File, error -Remove(name string) : error -RemoveAll(path string) : error -Rename(oldname, newname string) : error -Stat(name string) : os.FileInfo, error -``` -File Interfaces and Methods Available: -```go -io.Closer -io.Reader -io.ReaderAt -io.Seeker -io.Writer -io.WriterAt - -Name() : string -Readdir(count int) : []os.FileInfo, error -Readdirnames(n int) : []string, error -Stat() : os.FileInfo, error -Sync() : error -Truncate(size int64) : error -WriteString(s string) : ret int, err error +func TestProcessConfiguration(t *testing.T) { + // Use the in-memory filesystem + AppFs := afero.NewMemMapFs() + + // Pre-populate the memory filesystem for the test + configPath := "/test/config.json" + afero.WriteFile(AppFs, configPath, []byte(`{"feature": true}`), 0644) + + // Run the test entirely in memory + err := ProcessConfiguration(AppFs, configPath) + if err != nil { + t.Fatal(err) + } +} ``` -In some applications it may make sense to define a new package that -simply exports the file system variable for easy access from anywhere. -## Using Afero's utility functions +## Afero's Superpower: Composition -Afero provides a set of functions to make it easier to use the underlying file systems. -These functions have been primarily ported from io & ioutil with some developed for Hugo. +Afero's most unique feature is its ability to combine filesystems. This allows you to build complex behaviors out of simple components, keeping your application logic clean. -The afero utilities support all afero compatible backends. +### Example 1: Sandboxing with Copy-on-Write -The list of utilities includes: +Create a temporary environment where an application can "modify" system files without affecting the actual disk. ```go -DirExists(path string) (bool, error) -Exists(path string) (bool, error) -FileContainsBytes(filename string, subslice []byte) (bool, error) -GetTempDir(subPath string) string -IsDir(path string) (bool, error) -IsEmpty(path string) (bool, error) -ReadDir(dirname string) ([]os.FileInfo, error) -ReadFile(filename string) ([]byte, error) -SafeWriteReader(path string, r io.Reader) (err error) -TempDir(dir, prefix string) (name string, err error) -TempFile(dir, prefix string) (f File, err error) -Walk(root string, walkFn filepath.WalkFunc) error -WriteFile(filename string, data []byte, perm os.FileMode) error -WriteReader(path string, r io.Reader) (err error) -``` -For a complete list see [Afero's GoDoc](https://godoc.org/github.com/spf13/afero) +// 1. The base layer is the real OS, made read-only for safety. +baseFs := afero.NewReadOnlyFs(afero.NewOsFs()) -They are available under two different approaches to use. You can either call -them directly where the first parameter of each function will be the file -system, or you can declare a new `Afero`, a custom type used to bind these -functions as methods to a given filesystem. +// 2. The overlay layer is a temporary in-memory filesystem for changes. +overlayFs := afero.NewMemMapFs() -### Calling utilities directly +// 3. Combine them. Reads fall through to the base; writes only hit the overlay. +sandboxFs := afero.NewCopyOnWriteFs(baseFs, overlayFs) -```go -fs := new(afero.MemMapFs) -f, err := afero.TempFile(fs,"", "ioutil-test") +// The application can now "modify" /etc/hosts, but the changes are isolated in memory. +afero.WriteFile(sandboxFs, "/etc/hosts", []byte("127.0.0.1 sandboxed-app"), 0644) +// The real /etc/hosts on disk is untouched. ``` -### Calling via Afero +### Example 2: Caching a Slow Filesystem -```go -fs := afero.NewMemMapFs() -afs := &afero.Afero{Fs: fs} -f, err := afs.TempFile("", "ioutil-test") -``` +Improve performance by layering a fast cache (like memory) over a slow backend (like a network drive or cloud storage). -## Using Afero for Testing +```go +import "time" -There is a large benefit to using a mock filesystem for testing. It has a -completely blank state every time it is initialized and can be easily -reproducible regardless of OS. You could create files to your heart’s content -and the file access would be fast while also saving you from all the annoying -issues with deleting temporary files, Windows file locking, etc. The MemMapFs -backend is perfect for testing. +// Assume 'remoteFs' is a slow backend (e.g., SFTP or GCS) +var remoteFs afero.Fs -* Much faster than performing I/O operations on disk -* Avoid security issues and permissions -* Far more control. 'rm -rf /' with confidence -* Test setup is far more easier to do -* No test cleanup needed +// 'cacheFs' is a fast in-memory backend +cacheFs := afero.NewMemMapFs() -One way to accomplish this is to define a variable as mentioned above. -In your application this will be set to afero.NewOsFs() during testing you -can set it to afero.NewMemMapFs(). +// Create the caching layer. Cache items for 5 minutes upon first read. +cachedFs := afero.NewCacheOnReadFs(remoteFs, cacheFs, 5*time.Minute) -It wouldn't be uncommon to have each test initialize a blank slate memory -backend. To do this I would define my `appFS = afero.NewOsFs()` somewhere -appropriate in my application code. This approach ensures that Tests are order -independent, with no test relying on the state left by an earlier test. +// The first read is slow (fetches from remote, then caches) +data1, _ := afero.ReadFile(cachedFs, "data.json") -Then in my tests I would initialize a new MemMapFs for each test: -```go -func TestExist(t *testing.T) { - appFS := afero.NewMemMapFs() - // create test files and directories - appFS.MkdirAll("src/a", 0755) - afero.WriteFile(appFS, "src/a/b", []byte("file b"), 0644) - afero.WriteFile(appFS, "src/c", []byte("file c"), 0644) - name := "src/c" - _, err := appFS.Stat(name) - if os.IsNotExist(err) { - t.Errorf("file \"%s\" does not exist.\n", name) - } -} +// The second read is instant (serves from memory cache) +data2, _ := afero.ReadFile(cachedFs, "data.json") ``` -# Available Backends +### Example 3: Security Jails (chroot) + +Restrict an application component's access to a specific subdirectory. -## Operating System Native +```go +osFs := afero.NewOsFs() -### OsFs +// Create a filesystem rooted at /home/user/public +// The application cannot access anything above this directory. +jailedFs := afero.NewBasePathFs(osFs, "/home/user/public") -The first is simply a wrapper around the native OS calls. This makes it -very easy to use as all of the calls are the same as the existing OS -calls. It also makes it trivial to have your code use the OS during -operation and a mock filesystem during testing or as needed. +// To the application, this is reading "/" +// In reality, it's reading "/home/user/public/" +dirInfo, err := afero.ReadDir(jailedFs, "/") -```go -appfs := afero.NewOsFs() -appfs.MkdirAll("src/a", 0755) +// Attempts to access parent directories fail +_, err = jailedFs.Open("../secrets.txt") // Returns an error ``` -## Memory Backed Storage +## Real-World Use Cases -### MemMapFs +### Build Cloud-Agnostic Applications -Afero also provides a fully atomic memory backed filesystem perfect for use in -mocking and to speed up unnecessary disk io when persistence isn’t -necessary. It is fully concurrent and will work within go routines -safely. +Write applications that seamlessly work with different storage backends: ```go -mm := afero.NewMemMapFs() -mm.MkdirAll("src/a", 0755) -``` +type DocumentProcessor struct { + fs afero.Fs +} + +func NewDocumentProcessor(fs afero.Fs) *DocumentProcessor { + return &DocumentProcessor{fs: fs} +} -#### InMemoryFile +func (p *DocumentProcessor) Process(inputPath, outputPath string) error { + // This code works whether fs is local disk, cloud storage, or memory + content, err := afero.ReadFile(p.fs, inputPath) + if err != nil { + return err + } + + processed := processContent(content) + return afero.WriteFile(p.fs, outputPath, processed, 0644) +} -As part of MemMapFs, Afero also provides an atomic, fully concurrent memory -backed file implementation. This can be used in other memory backed file -systems with ease. Plans are to add a radix tree memory stored file -system using InMemoryFile. +// Use with local filesystem +processor := NewDocumentProcessor(afero.NewOsFs()) -## Network Interfaces +// Use with Google Cloud Storage +processor := NewDocumentProcessor(gcsFS) -### SftpFs +// Use with in-memory filesystem for testing +processor := NewDocumentProcessor(afero.NewMemMapFs()) +``` -Afero has experimental support for secure file transfer protocol (sftp). Which can -be used to perform file operations over a encrypted channel. +### Treating Archives as Filesystems -### GCSFs +Read files directly from `.zip` or `.tar` archives without unpacking them to disk first. -Afero has experimental support for Google Cloud Storage (GCS). You can either set the -`GOOGLE_APPLICATION_CREDENTIALS_JSON` env variable to your JSON credentials or use `opts` in -`NewGcsFS` to configure access to your GCS bucket. +```go +import ( + "archive/zip" + "github.com/spf13/afero/zipfs" +) -Some known limitations of the existing implementation: -* No Chmod support - The GCS ACL could probably be mapped to *nix style permissions but that would add another level of complexity and is ignored in this version. -* No Chtimes support - Could be simulated with attributes (gcs a/m-times are set implicitly) but that's is left for another version. -* Not thread safe - Also assumes all file operations are done through the same instance of the GcsFs. File operations between different GcsFs instances are not guaranteed to be consistent. +// Assume 'zipReader' is a *zip.Reader initialized from a file or memory +var zipReader *zip.Reader +// Create a read-only ZipFs +archiveFS := zipfs.New(zipReader) -## Filtering Backends +// Read a file from within the archive using the standard Afero API +content, err := afero.ReadFile(archiveFS, "/docs/readme.md") +``` -### BasePathFs +### Serving Any Filesystem over HTTP -The BasePathFs restricts all operations to a given path within an Fs. -The given file name to the operations on this Fs will be prepended with -the base path before calling the source Fs. +Use `HttpFs` to expose any Afero filesystem—even one created dynamically in memory—through a standard Go web server. ```go -bp := afero.NewBasePathFs(afero.NewOsFs(), "/base/path") -``` +import ( + "net/http" + "github.com/spf13/afero" +) -### ReadOnlyFs +func main() { + memFS := afero.NewMemMapFs() + afero.WriteFile(memFS, "index.html", []byte("

          Hello from Memory!

          "), 0644) -A thin wrapper around the source Fs providing a read only view. + // Wrap the memory filesystem to make it compatible with http.FileServer. + httpFS := afero.NewHttpFs(memFS) -```go -fs := afero.NewReadOnlyFs(afero.NewOsFs()) -_, err := fs.Create("/file.txt") -// err = syscall.EPERM + http.Handle("/", http.FileServer(httpFS.Dir("/"))) + http.ListenAndServe(":8080", nil) +} ``` -# RegexpFs +### Testing Made Simple -A filtered view on file names, any file NOT matching -the passed regexp will be treated as non-existing. -Files not matching the regexp provided will not be created. -Directories are not filtered. +One of Afero's greatest strengths is making filesystem-dependent code easily testable: ```go -fs := afero.NewRegexpFs(afero.NewMemMapFs(), regexp.MustCompile(`\.txt$`)) -_, err := fs.Create("/file.html") -// err = syscall.ENOENT -``` +func SaveUserData(fs afero.Fs, userID string, data []byte) error { + filename := fmt.Sprintf("users/%s.json", userID) + return afero.WriteFile(fs, filename, data, 0644) +} -### HttpFs +func TestSaveUserData(t *testing.T) { + // Create a clean, fast, in-memory filesystem for testing + testFS := afero.NewMemMapFs() + + userData := []byte(`{"name": "John", "email": "john@example.com"}`) + err := SaveUserData(testFS, "123", userData) + + if err != nil { + t.Fatalf("SaveUserData failed: %v", err) + } + + // Verify the file was saved correctly + saved, err := afero.ReadFile(testFS, "users/123.json") + if err != nil { + t.Fatalf("Failed to read saved file: %v", err) + } + + if string(saved) != string(userData) { + t.Errorf("Data mismatch: got %s, want %s", saved, userData) + } +} +``` -Afero provides an http compatible backend which can wrap any of the existing -backends. +**Benefits of testing with Afero:** +- ⚡ **Fast** - No disk I/O, tests run in memory +- 🔄 **Reliable** - Each test starts with a clean slate +- 🧹 **No cleanup** - Memory is automatically freed +- 🔒 **Safe** - Can't accidentally modify real files +- 🏃 **Parallel** - Tests can run concurrently without conflicts + +## Backend Reference + +| Type | Backend | Constructor | Description | Status | +| :--- | :--- | :--- | :--- | :--- | +| **Core** | **OsFs** | `afero.NewOsFs()` | Interacts with the real operating system filesystem. Use in production. | ✅ Official | +| | **MemMapFs** | `afero.NewMemMapFs()` | A fast, atomic, concurrent-safe, in-memory filesystem. Ideal for testing. | ✅ Official | +| **Composition** | **CopyOnWriteFs**| `afero.NewCopyOnWriteFs(base, overlay)` | A read-only base with a writable overlay. Ideal for sandboxing. | ✅ Official | +| | **CacheOnReadFs**| `afero.NewCacheOnReadFs(base, cache, ttl)` | Lazily caches files from a slow base into a fast layer on first read. | ✅ Official | +| | **BasePathFs** | `afero.NewBasePathFs(source, path)` | Restricts operations to a subdirectory (chroot/jail). | ✅ Official | +| | **ReadOnlyFs** | `afero.NewReadOnlyFs(source)` | Provides a read-only view, preventing any modifications. | ✅ Official | +| | **RegexpFs** | `afero.NewRegexpFs(source, regexp)` | Filters a filesystem, only showing files that match a regex. | ✅ Official | +| **Utility** | **HttpFs** | `afero.NewHttpFs(source)` | Wraps any Afero filesystem to be served via `http.FileServer`. | ✅ Official | +| **Archives** | **ZipFs** | `zipfs.New(zipReader)` | Read-only access to files within a ZIP archive. | ✅ Official | +| | **TarFs** | `tarfs.New(tarReader)` | Read-only access to files within a TAR archive. | ✅ Official | +| **Network** | **GcsFs** | `gcsfs.NewGcsFs(...)` | Google Cloud Storage backend. | ⚡ Experimental | +| | **SftpFs** | `sftpfs.New(...)` | SFTP backend. | ⚡ Experimental | +| **3rd Party Cloud** | **S3Fs** | [`fclairamb/afero-s3`](https://github.com/fclairamb/afero-s3) | Production-ready S3 backend built on official AWS SDK. | 🔹 3rd Party | +| | **MinioFs** | [`cpyun/afero-minio`](https://github.com/cpyun/afero-minio) | MinIO object storage backend with S3 compatibility. | 🔹 3rd Party | +| | **DriveFs** | [`fclairamb/afero-gdrive`](https://github.com/fclairamb/afero-gdrive) | Google Drive backend with streaming support. | 🔹 3rd Party | +| | **DropboxFs** | [`fclairamb/afero-dropbox`](https://github.com/fclairamb/afero-dropbox) | Dropbox backend with streaming support. | 🔹 3rd Party | +| **3rd Party Specialized** | **GitFs** | [`tobiash/go-gitfs`](https://github.com/tobiash/go-gitfs) | Git repository filesystem (read-only, Afero compatible). | 🔹 3rd Party | +| | **DockerFs** | [`unmango/aferox`](https://github.com/unmango/aferox) | Docker container filesystem access. | 🔹 3rd Party | +| | **GitHubFs** | [`unmango/aferox`](https://github.com/unmango/aferox) | GitHub repository and releases filesystem. | 🔹 3rd Party | +| | **FilterFs** | [`unmango/aferox`](https://github.com/unmango/aferox) | Filesystem filtering with predicates. | 🔹 3rd Party | +| | **IgnoreFs** | [`unmango/aferox`](https://github.com/unmango/aferox) | .gitignore-aware filtering filesystem. | 🔹 3rd Party | +| | **FUSEFs** | [`JakWai01/sile-fystem`](https://github.com/JakWai01/sile-fystem) | Generic FUSE implementation using any Afero backend. | 🔹 3rd Party | + +## Afero vs. `io/fs` (Go 1.16+) + +Go 1.16 introduced the `io/fs` package, which provides a standard abstraction for **read-only** filesystems. + +Afero complements `io/fs` by focusing on different needs: + +* **Use `io/fs` when:** You only need to read files and want to conform strictly to the standard library interfaces. +* **Use Afero when:** + * Your application needs to **create, write, modify, or delete** files. + * You need to test complex read/write interactions (e.g., renaming, concurrent writes). + * You need advanced compositional features (Copy-on-Write, Caching, etc.). + +Afero is fully compatible with `io/fs`. You can wrap any Afero filesystem to satisfy the `fs.FS` interface using `afero.NewIOFS`: -The Http package requires a slightly specific version of Open which -returns an http.File type. +```go +import "io/fs" -Afero provides an httpFs file system which satisfies this requirement. -Any Afero FileSystem can be used as an httpFs. +// Create an Afero filesystem (writable) +var myAferoFs afero.Fs = afero.NewMemMapFs() -```go -httpFs := afero.NewHttpFs() -fileserver := http.FileServer(httpFs.Dir()) -http.Handle("/", fileserver) +// Convert it to a standard library fs.FS (read-only view) +var myIoFs fs.FS = afero.NewIOFS(myAferoFs) ``` -## Composite Backends +## Third-Party Backends & Ecosystem -Afero provides the ability have two filesystems (or more) act as a single -file system. +The Afero community has developed numerous backends and tools that extend the library's capabilities. Below are curated, well-maintained options organized by maturity and reliability. -### CacheOnReadFs +### Featured Community Backends -The CacheOnReadFs will lazily make copies of any accessed files from the base -layer into the overlay. Subsequent reads will be pulled from the overlay -directly permitting the request is within the cache duration of when it was -created in the overlay. +These are mature, reliable backends that we can confidently recommend for production use: -If the base filesystem is writeable, any changes to files will be -done first to the base, then to the overlay layer. Write calls to open file -handles like `Write()` or `Truncate()` to the overlay first. +#### **Amazon S3** - [`fclairamb/afero-s3`](https://github.com/fclairamb/afero-s3) +Production-ready S3 backend built on the official AWS SDK for Go. -To writing files to the overlay only, you can use the overlay Fs directly (not -via the union Fs). +```go +import "github.com/fclairamb/afero-s3" -Cache files in the layer for the given time.Duration, a cache duration of 0 -means "forever" meaning the file will not be re-requested from the base ever. +s3fs := s3.NewFs(bucket, session) +``` -A read-only base will make the overlay also read-only but still copy files -from the base to the overlay when they're not present (or outdated) in the -caching layer. +#### **MinIO** - [`cpyun/afero-minio`](https://github.com/cpyun/afero-minio) +MinIO object storage backend providing S3-compatible object storage with deduplication and optimization features. ```go -base := afero.NewOsFs() -layer := afero.NewMemMapFs() -ufs := afero.NewCacheOnReadFs(base, layer, 100 * time.Second) +import "github.com/cpyun/afero-minio" + +minioFs := miniofs.NewMinioFs(ctx, "minio://endpoint/bucket") ``` -### CopyOnWriteFs() +### Community & Specialized Backends -The CopyOnWriteFs is a read only base file system with a potentially -writeable layer on top. +#### Cloud Storage -Read operations will first look in the overlay and if not found there, will -serve the file from the base. +- **Google Drive** - [`fclairamb/afero-gdrive`](https://github.com/fclairamb/afero-gdrive) + Streaming support; no write-seeking or POSIX permissions; no files listing cache -Changes to the file system will only be made in the overlay. +- **Dropbox** - [`fclairamb/afero-dropbox`](https://github.com/fclairamb/afero-dropbox) + Streaming support; no write-seeking or POSIX permissions -Any attempt to modify a file found only in the base will copy the file to the -overlay layer before modification (including opening a file with a writable -handle). +#### Version Control Systems -Removing and Renaming files present only in the base layer is not currently -permitted. If a file is present in the base layer and the overlay, only the -overlay will be removed/renamed. +- **Git Repositories** - [`tobiash/go-gitfs`](https://github.com/tobiash/go-gitfs) + Read-only filesystem abstraction for Git repositories. Works with bare repositories and provides filesystem view of any git reference. Uses go-git for repository access. -```go - base := afero.NewOsFs() - roBase := afero.NewReadOnlyFs(base) - ufs := afero.NewCopyOnWriteFs(roBase, afero.NewMemMapFs()) +#### Container and Remote Systems - fh, _ = ufs.Create("/home/test/file2.txt") - fh.WriteString("This is a test") - fh.Close() -``` +- **Docker Containers** - [`unmango/aferox`](https://github.com/unmango/aferox) + Access Docker container filesystems as if they were local filesystems + +- **GitHub API** - [`unmango/aferox`](https://github.com/unmango/aferox) + Turn GitHub repositories, releases, and assets into browsable filesystems -In this example all write operations will only occur in memory (MemMapFs) -leaving the base filesystem (OsFs) untouched. +#### FUSE Integration +- **Generic FUSE** - [`JakWai01/sile-fystem`](https://github.com/JakWai01/sile-fystem) + Mount any Afero filesystem as a FUSE filesystem, allowing any Afero backend to be used as a real mounted filesystem -## Desired/possible backends +#### Specialized Filesystems -The following is a short list of possible backends we hope someone will -implement: +- **FAT32 Support** - [`aligator/GoFAT`](https://github.com/aligator/GoFAT) + Pure Go FAT filesystem implementation (currently read-only) -* SSH -* S3 +### Interface Adapters & Utilities -# About the project +**Cross-Interface Compatibility:** +- [`jfontan/go-billy-desfacer`](https://github.com/jfontan/go-billy-desfacer) - Adapter between Afero and go-billy interfaces (for go-git compatibility) +- [`Maldris/go-billy-afero`](https://github.com/Maldris/go-billy-afero) - Alternative wrapper for using Afero with go-billy +- [`c4milo/afero2billy`](https://github.com/c4milo/afero2billy) - Another Afero to billy filesystem adapter -## What's in the name +**Working Directory Management:** +- [`carolynvs/aferox`](https://github.com/carolynvs/aferox) - Working directory-aware filesystem wrapper -Afero comes from the latin roots Ad-Facere. +**Advanced Filtering:** +- [`unmango/aferox`](https://github.com/unmango/aferox) includes multiple specialized filesystems: + - **FilterFs** - Predicate-based file filtering + - **IgnoreFs** - .gitignore-aware filtering + - **WriterFs** - Dump writes to io.Writer for debugging -**"Ad"** is a prefix meaning "to". +#### Developer Tools & Utilities -**"Facere"** is a form of the root "faciō" making "make or do". +**nhatthm Utility Suite** - Essential tools for Afero development: +- [`nhatthm/aferocopy`](https://github.com/nhatthm/aferocopy) - Copy files between any Afero filesystems +- [`nhatthm/aferomock`](https://github.com/nhatthm/aferomock) - Mocking toolkit for testing +- [`nhatthm/aferoassert`](https://github.com/nhatthm/aferoassert) - Assertion helpers for filesystem testing -The literal meaning of afero is "to make" or "to do" which seems very fitting -for a library that allows one to make files and directories and do things with them. +### Ecosystem Showcase -The English word that shares the same roots as Afero is "affair". Affair shares -the same concept but as a noun it means "something that is made or done" or "an -object of a particular type". +**Windows Virtual Drives** - [`balazsgrill/potatodrive`](https://github.com/balazsgrill/potatodrive) +Mount any Afero filesystem as a Windows drive letter. Brilliant demonstration of Afero's power! -It's also nice that unlike some of my other libraries (hugo, cobra, viper) it -Googles very well. +### Modern Asset Embedding (Go 1.16+) -## Release Notes +Instead of third-party tools, use Go's native `//go:embed` with Afero: -See the [Releases Page](https://github.com/spf13/afero/releases). +```go +import ( + "embed" + "github.com/spf13/afero" +) + +//go:embed assets/* +var assetsFS embed.FS + +func main() { + // Convert embedded files to Afero filesystem + fs := afero.FromIOFS(assetsFS) + + // Use like any other Afero filesystem + content, _ := afero.ReadFile(fs, "assets/config.json") +} +``` ## Contributing -1. Fork it +We welcome contributions! The project is mature, but we are actively looking for contributors to help implement and stabilize network/cloud backends. + +* 🔥 **Microsoft Azure Blob Storage** +* 🔒 **Modern Encryption Backend** - Built on secure, contemporary crypto (not legacy EncFS) +* 🐙 **Canonical go-git Adapter** - Unified solution for Git integration +* 📡 **SSH/SCP Backend** - Secure remote file operations +* Stabilization of existing experimental backends (GCS, SFTP) + +To contribute: +1. Fork the repository 2. Create your feature branch (`git checkout -b my-new-feature`) 3. Commit your changes (`git commit -am 'Add some feature'`) 4. Push to the branch (`git push origin my-new-feature`) -5. Create new Pull Request - -## Releasing - -As of version 1.14.0, Afero moved implementations with third-party libraries to -their own submodules. - -Releasing a new version now requires a few steps: - -``` -VERSION=X.Y.Z -git tag -a v$VERSION -m "Release $VERSION" -git push origin v$VERSION - -cd gcsfs -go get github.com/spf13/afero@v$VERSION -go mod tidy -git commit -am "Update afero to v$VERSION" -git tag -a gcsfs/v$VERSION -m "Release gcsfs $VERSION" -git push origin gcsfs/v$VERSION -cd .. - -cd sftpfs -go get github.com/spf13/afero@v$VERSION -go mod tidy -git commit -am "Update afero to v$VERSION" -git tag -a sftpfs/v$VERSION -m "Release sftpfs $VERSION" -git push origin sftpfs/v$VERSION -cd .. - -git push -``` +5. Create a new Pull Request -TODO: move these instructions to a Makefile or something +## 📄 License -## Contributors +Afero is released under the Apache 2.0 license. See [LICENSE.txt](https://github.com/spf13/afero/blob/master/LICENSE.txt) for details. -Names in no particular order: +## 🔗 Additional Resources -* [spf13](https://github.com/spf13) -* [jaqx0r](https://github.com/jaqx0r) -* [mbertschler](https://github.com/mbertschler) -* [xor-gate](https://github.com/xor-gate) +- [📖 Full API Documentation](https://pkg.go.dev/github.com/spf13/afero) +- [🎯 Examples Repository](https://github.com/spf13/afero/tree/master/examples) +- [📋 Release Notes](https://github.com/spf13/afero/releases) +- [❓ GitHub Discussions](https://github.com/spf13/afero/discussions) -## License +--- -Afero is released under the Apache 2.0 license. See -[LICENSE.txt](https://github.com/spf13/afero/blob/master/LICENSE.txt) +*Afero comes from the Latin roots Ad-Facere, meaning "to make" or "to do" - fitting for a library that empowers you to make and do amazing things with filesystems.* diff --git a/vendor/github.com/spf13/afero/copyOnWriteFs.go b/vendor/github.com/spf13/afero/copyOnWriteFs.go index 184d6dd702..aba2879ebb 100644 --- a/vendor/github.com/spf13/afero/copyOnWriteFs.go +++ b/vendor/github.com/spf13/afero/copyOnWriteFs.go @@ -34,7 +34,8 @@ func (u *CopyOnWriteFs) isBaseFile(name string) (bool, error) { _, err := u.base.Stat(name) if err != nil { if oerr, ok := err.(*os.PathError); ok { - if oerr.Err == os.ErrNotExist || oerr.Err == syscall.ENOENT || oerr.Err == syscall.ENOTDIR { + if oerr.Err == os.ErrNotExist || oerr.Err == syscall.ENOENT || + oerr.Err == syscall.ENOTDIR { return false, nil } } @@ -237,7 +238,11 @@ func (u *CopyOnWriteFs) OpenFile(name string, flag int, perm os.FileMode) (File, return u.layer.OpenFile(name, flag, perm) } - return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOTDIR} // ...or os.ErrNotExist? + return nil, &os.PathError{ + Op: "open", + Path: name, + Err: syscall.ENOTDIR, + } // ...or os.ErrNotExist? } if b { return u.base.OpenFile(name, flag, perm) diff --git a/vendor/github.com/spf13/afero/iofs.go b/vendor/github.com/spf13/afero/iofs.go index b13155ca4a..57ba5673ec 100644 --- a/vendor/github.com/spf13/afero/iofs.go +++ b/vendor/github.com/spf13/afero/iofs.go @@ -137,7 +137,7 @@ type readDirFile struct { var _ fs.ReadDirFile = readDirFile{} func (r readDirFile) ReadDir(n int) ([]fs.DirEntry, error) { - items, err := r.File.Readdir(n) + items, err := r.Readdir(n) if err != nil { return nil, err } @@ -161,7 +161,12 @@ var _ Fs = FromIOFS{} func (f FromIOFS) Create(name string) (File, error) { return nil, notImplemented("create", name) } -func (f FromIOFS) Mkdir(name string, perm os.FileMode) error { return notImplemented("mkdir", name) } +func (f FromIOFS) Mkdir( + name string, + perm os.FileMode, +) error { + return notImplemented("mkdir", name) +} func (f FromIOFS) MkdirAll(path string, perm os.FileMode) error { return notImplemented("mkdirall", path) diff --git a/vendor/github.com/spf13/afero/lstater.go b/vendor/github.com/spf13/afero/lstater.go index 89c1bfc0a7..2dcbdb1f09 100644 --- a/vendor/github.com/spf13/afero/lstater.go +++ b/vendor/github.com/spf13/afero/lstater.go @@ -19,9 +19,9 @@ import ( // Lstater is an optional interface in Afero. It is only implemented by the // filesystems saying so. -// It will call Lstat if the filesystem iself is, or it delegates to, the os filesystem. +// It will call Lstat if the filesystem itself is, or it delegates to, the os filesystem. // Else it will call Stat. -// In addtion to the FileInfo, it will return a boolean telling whether Lstat was called or not. +// In addition to the FileInfo, it will return a boolean telling whether Lstat was called or not. type Lstater interface { LstatIfPossible(name string) (os.FileInfo, bool, error) } diff --git a/vendor/github.com/spf13/afero/mem/file.go b/vendor/github.com/spf13/afero/mem/file.go index 62fe4498e1..c77fcd40e9 100644 --- a/vendor/github.com/spf13/afero/mem/file.go +++ b/vendor/github.com/spf13/afero/mem/file.go @@ -150,7 +150,11 @@ func (f *File) Sync() error { func (f *File) Readdir(count int) (res []os.FileInfo, err error) { if !f.fileData.dir { - return nil, &os.PathError{Op: "readdir", Path: f.fileData.name, Err: errors.New("not a dir")} + return nil, &os.PathError{ + Op: "readdir", + Path: f.fileData.name, + Err: errors.New("not a dir"), + } } var outLength int64 @@ -236,7 +240,11 @@ func (f *File) Truncate(size int64) error { return ErrFileClosed } if f.readOnly { - return &os.PathError{Op: "truncate", Path: f.fileData.name, Err: errors.New("file handle is read only")} + return &os.PathError{ + Op: "truncate", + Path: f.fileData.name, + Err: errors.New("file handle is read only"), + } } if size < 0 { return ErrOutOfRange @@ -273,7 +281,11 @@ func (f *File) Write(b []byte) (n int, err error) { return 0, ErrFileClosed } if f.readOnly { - return 0, &os.PathError{Op: "write", Path: f.fileData.name, Err: errors.New("file handle is read only")} + return 0, &os.PathError{ + Op: "write", + Path: f.fileData.name, + Err: errors.New("file handle is read only"), + } } n = len(b) cur := atomic.LoadInt64(&f.at) @@ -285,7 +297,9 @@ func (f *File) Write(b []byte) (n int, err error) { tail = f.fileData.data[n+int(cur):] } if diff > 0 { - f.fileData.data = append(f.fileData.data, append(bytes.Repeat([]byte{0o0}, int(diff)), b...)...) + f.fileData.data = append( + f.fileData.data, + append(bytes.Repeat([]byte{0o0}, int(diff)), b...)...) f.fileData.data = append(f.fileData.data, tail...) } else { f.fileData.data = append(f.fileData.data[:cur], b...) diff --git a/vendor/github.com/spf13/afero/unionFile.go b/vendor/github.com/spf13/afero/unionFile.go index 62dd6c93c8..2e2253f55c 100644 --- a/vendor/github.com/spf13/afero/unionFile.go +++ b/vendor/github.com/spf13/afero/unionFile.go @@ -92,7 +92,8 @@ func (f *UnionFile) Seek(o int64, w int) (pos int64, err error) { func (f *UnionFile) Write(s []byte) (n int, err error) { if f.Layer != nil { n, err = f.Layer.Write(s) - if err == nil && f.Base != nil { // hmm, do we have fixed size files where a write may hit the EOF mark? + if err == nil && + f.Base != nil { // hmm, do we have fixed size files where a write may hit the EOF mark? _, err = f.Base.Write(s) } return n, err @@ -157,7 +158,7 @@ var defaultUnionMergeDirsFn = func(lofi, bofi []os.FileInfo) ([]os.FileInfo, err // return a single view of the overlayed directories. // At the end of the directory view, the error is io.EOF if c > 0. func (f *UnionFile) Readdir(c int) (ofi []os.FileInfo, err error) { - var merge DirsMerger = f.Merger + merge := f.Merger if merge == nil { merge = defaultUnionMergeDirsFn } diff --git a/vendor/github.com/spf13/afero/util.go b/vendor/github.com/spf13/afero/util.go index 9e4cba2746..2317688388 100644 --- a/vendor/github.com/spf13/afero/util.go +++ b/vendor/github.com/spf13/afero/util.go @@ -113,11 +113,11 @@ func GetTempDir(fs Fs, subPath string) string { if subPath != "" { // preserve windows backslash :-( if FilePathSeparator == "\\" { - subPath = strings.Replace(subPath, "\\", "____", -1) + subPath = strings.ReplaceAll(subPath, "\\", "____") } dir = dir + UnicodeSanitize((subPath)) if FilePathSeparator == "\\" { - dir = strings.Replace(dir, "____", "\\", -1) + dir = strings.ReplaceAll(dir, "____", "\\") } if exists, _ := Exists(fs, dir); exists { diff --git a/vendor/github.com/spf13/cast/.editorconfig b/vendor/github.com/spf13/cast/.editorconfig new file mode 100644 index 0000000000..a85749f190 --- /dev/null +++ b/vendor/github.com/spf13/cast/.editorconfig @@ -0,0 +1,15 @@ +root = true + +[*] +charset = utf-8 +end_of_line = lf +indent_size = 4 +indent_style = space +insert_final_newline = true +trim_trailing_whitespace = true + +[*.go] +indent_style = tab + +[{*.yml,*.yaml}] +indent_size = 2 diff --git a/vendor/github.com/spf13/cast/.golangci.yaml b/vendor/github.com/spf13/cast/.golangci.yaml new file mode 100644 index 0000000000..e00fd47aa2 --- /dev/null +++ b/vendor/github.com/spf13/cast/.golangci.yaml @@ -0,0 +1,39 @@ +version: "2" + +run: + timeout: 10m + +linters: + enable: + - errcheck + - govet + - ineffassign + - misspell + - nolintlint + # - revive + - unused + + disable: + - staticcheck + + settings: + misspell: + locale: US + nolintlint: + allow-unused: false # report any unused nolint directives + require-specific: false # don't require nolint directives to be specific about which linter is being skipped + +formatters: + enable: + - gci + - gofmt + # - gofumpt + - goimports + # - golines + + settings: + gci: + sections: + - standard + - default + - localmodule diff --git a/vendor/github.com/spf13/cast/README.md b/vendor/github.com/spf13/cast/README.md index 1be666a456..c58eccb3fd 100644 --- a/vendor/github.com/spf13/cast/README.md +++ b/vendor/github.com/spf13/cast/README.md @@ -1,9 +1,9 @@ # cast -[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/spf13/cast/test.yaml?branch=master&style=flat-square)](https://github.com/spf13/cast/actions/workflows/test.yaml) -[![PkgGoDev](https://pkg.go.dev/badge/mod/github.com/spf13/cast)](https://pkg.go.dev/mod/github.com/spf13/cast) -![Go Version](https://img.shields.io/badge/go%20version-%3E=1.16-61CFDD.svg?style=flat-square) -[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/cast?style=flat-square)](https://goreportcard.com/report/github.com/spf13/cast) +[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/spf13/cast/ci.yaml?style=flat-square)](https://github.com/spf13/cast/actions/workflows/ci.yaml) +[![go.dev reference](https://img.shields.io/badge/go.dev-reference-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/mod/github.com/spf13/cast) +![GitHub go.mod Go version](https://img.shields.io/github/go-mod/go-version/spf13/cast?style=flat-square&color=61CFDD) +[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/spf13/cast/badge?style=flat-square)](https://deps.dev/go/github.com%252Fspf13%252Fcast) Easy and safe casting from one type to another in Go @@ -73,3 +73,7 @@ the code for a complete set. var eight interface{} = 8 cast.ToInt(eight) // 8 cast.ToInt(nil) // 0 + +## License + +The project is licensed under the [MIT License](LICENSE). diff --git a/vendor/github.com/spf13/cast/alias.go b/vendor/github.com/spf13/cast/alias.go new file mode 100644 index 0000000000..855d60005d --- /dev/null +++ b/vendor/github.com/spf13/cast/alias.go @@ -0,0 +1,69 @@ +// Copyright © 2014 Steve Francia . +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. +package cast + +import ( + "reflect" + "slices" +) + +var kindNames = []string{ + reflect.String: "string", + reflect.Bool: "bool", + reflect.Int: "int", + reflect.Int8: "int8", + reflect.Int16: "int16", + reflect.Int32: "int32", + reflect.Int64: "int64", + reflect.Uint: "uint", + reflect.Uint8: "uint8", + reflect.Uint16: "uint16", + reflect.Uint32: "uint32", + reflect.Uint64: "uint64", + reflect.Float32: "float32", + reflect.Float64: "float64", +} + +var kinds = map[reflect.Kind]func(reflect.Value) any{ + reflect.String: func(v reflect.Value) any { return v.String() }, + reflect.Bool: func(v reflect.Value) any { return v.Bool() }, + reflect.Int: func(v reflect.Value) any { return int(v.Int()) }, + reflect.Int8: func(v reflect.Value) any { return int8(v.Int()) }, + reflect.Int16: func(v reflect.Value) any { return int16(v.Int()) }, + reflect.Int32: func(v reflect.Value) any { return int32(v.Int()) }, + reflect.Int64: func(v reflect.Value) any { return v.Int() }, + reflect.Uint: func(v reflect.Value) any { return uint(v.Uint()) }, + reflect.Uint8: func(v reflect.Value) any { return uint8(v.Uint()) }, + reflect.Uint16: func(v reflect.Value) any { return uint16(v.Uint()) }, + reflect.Uint32: func(v reflect.Value) any { return uint32(v.Uint()) }, + reflect.Uint64: func(v reflect.Value) any { return v.Uint() }, + reflect.Float32: func(v reflect.Value) any { return float32(v.Float()) }, + reflect.Float64: func(v reflect.Value) any { return v.Float() }, +} + +// resolveAlias attempts to resolve a named type to its underlying basic type (if possible). +// +// Pointers are expected to be indirected by this point. +func resolveAlias(i any) (any, bool) { + if i == nil { + return nil, false + } + + t := reflect.TypeOf(i) + + // Not a named type + if t.Name() == "" || slices.Contains(kindNames, t.Name()) { + return i, false + } + + resolve, ok := kinds[t.Kind()] + if !ok { // Not a supported kind + return i, false + } + + v := reflect.ValueOf(i) + + return resolve(v), true +} diff --git a/vendor/github.com/spf13/cast/basic.go b/vendor/github.com/spf13/cast/basic.go new file mode 100644 index 0000000000..fa330e207a --- /dev/null +++ b/vendor/github.com/spf13/cast/basic.go @@ -0,0 +1,131 @@ +// Copyright © 2014 Steve Francia . +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +package cast + +import ( + "encoding/json" + "fmt" + "html/template" + "strconv" + "time" +) + +// ToBoolE casts any value to a bool type. +func ToBoolE(i any) (bool, error) { + i, _ = indirect(i) + + switch b := i.(type) { + case bool: + return b, nil + case nil: + return false, nil + case int: + return b != 0, nil + case int8: + return b != 0, nil + case int16: + return b != 0, nil + case int32: + return b != 0, nil + case int64: + return b != 0, nil + case uint: + return b != 0, nil + case uint8: + return b != 0, nil + case uint16: + return b != 0, nil + case uint32: + return b != 0, nil + case uint64: + return b != 0, nil + case float32: + return b != 0, nil + case float64: + return b != 0, nil + case time.Duration: + return b != 0, nil + case string: + return strconv.ParseBool(b) + case json.Number: + v, err := ToInt64E(b) + if err == nil { + return v != 0, nil + } + + return false, fmt.Errorf(errorMsg, i, i, false) + default: + if i, ok := resolveAlias(i); ok { + return ToBoolE(i) + } + + return false, fmt.Errorf(errorMsg, i, i, false) + } +} + +// ToStringE casts any value to a string type. +func ToStringE(i any) (string, error) { + switch s := i.(type) { + case string: + return s, nil + case bool: + return strconv.FormatBool(s), nil + case float64: + return strconv.FormatFloat(s, 'f', -1, 64), nil + case float32: + return strconv.FormatFloat(float64(s), 'f', -1, 32), nil + case int: + return strconv.Itoa(s), nil + case int8: + return strconv.FormatInt(int64(s), 10), nil + case int16: + return strconv.FormatInt(int64(s), 10), nil + case int32: + return strconv.FormatInt(int64(s), 10), nil + case int64: + return strconv.FormatInt(s, 10), nil + case uint: + return strconv.FormatUint(uint64(s), 10), nil + case uint8: + return strconv.FormatUint(uint64(s), 10), nil + case uint16: + return strconv.FormatUint(uint64(s), 10), nil + case uint32: + return strconv.FormatUint(uint64(s), 10), nil + case uint64: + return strconv.FormatUint(s, 10), nil + case json.Number: + return s.String(), nil + case []byte: + return string(s), nil + case template.HTML: + return string(s), nil + case template.URL: + return string(s), nil + case template.JS: + return string(s), nil + case template.CSS: + return string(s), nil + case template.HTMLAttr: + return string(s), nil + case nil: + return "", nil + case fmt.Stringer: + return s.String(), nil + case error: + return s.Error(), nil + default: + if i, ok := indirect(i); ok { + return ToStringE(i) + } + + if i, ok := resolveAlias(i); ok { + return ToStringE(i) + } + + return "", fmt.Errorf(errorMsg, i, i, "") + } +} diff --git a/vendor/github.com/spf13/cast/cast.go b/vendor/github.com/spf13/cast/cast.go index 0cfe9418de..8d85539b35 100644 --- a/vendor/github.com/spf13/cast/cast.go +++ b/vendor/github.com/spf13/cast/cast.go @@ -8,169 +8,77 @@ package cast import "time" -// ToBool casts an interface to a bool type. -func ToBool(i interface{}) bool { - v, _ := ToBoolE(i) - return v -} - -// ToTime casts an interface to a time.Time type. -func ToTime(i interface{}) time.Time { - v, _ := ToTimeE(i) - return v -} - -func ToTimeInDefaultLocation(i interface{}, location *time.Location) time.Time { - v, _ := ToTimeInDefaultLocationE(i, location) - return v -} - -// ToDuration casts an interface to a time.Duration type. -func ToDuration(i interface{}) time.Duration { - v, _ := ToDurationE(i) - return v -} - -// ToFloat64 casts an interface to a float64 type. -func ToFloat64(i interface{}) float64 { - v, _ := ToFloat64E(i) - return v -} - -// ToFloat32 casts an interface to a float32 type. -func ToFloat32(i interface{}) float32 { - v, _ := ToFloat32E(i) - return v -} - -// ToInt64 casts an interface to an int64 type. -func ToInt64(i interface{}) int64 { - v, _ := ToInt64E(i) - return v -} - -// ToInt32 casts an interface to an int32 type. -func ToInt32(i interface{}) int32 { - v, _ := ToInt32E(i) - return v -} - -// ToInt16 casts an interface to an int16 type. -func ToInt16(i interface{}) int16 { - v, _ := ToInt16E(i) - return v -} - -// ToInt8 casts an interface to an int8 type. -func ToInt8(i interface{}) int8 { - v, _ := ToInt8E(i) - return v -} - -// ToInt casts an interface to an int type. -func ToInt(i interface{}) int { - v, _ := ToIntE(i) - return v -} - -// ToUint casts an interface to a uint type. -func ToUint(i interface{}) uint { - v, _ := ToUintE(i) - return v -} - -// ToUint64 casts an interface to a uint64 type. -func ToUint64(i interface{}) uint64 { - v, _ := ToUint64E(i) - return v -} - -// ToUint32 casts an interface to a uint32 type. -func ToUint32(i interface{}) uint32 { - v, _ := ToUint32E(i) - return v -} - -// ToUint16 casts an interface to a uint16 type. -func ToUint16(i interface{}) uint16 { - v, _ := ToUint16E(i) - return v -} - -// ToUint8 casts an interface to a uint8 type. -func ToUint8(i interface{}) uint8 { - v, _ := ToUint8E(i) - return v -} - -// ToString casts an interface to a string type. -func ToString(i interface{}) string { - v, _ := ToStringE(i) - return v -} - -// ToStringMapString casts an interface to a map[string]string type. -func ToStringMapString(i interface{}) map[string]string { - v, _ := ToStringMapStringE(i) - return v -} - -// ToStringMapStringSlice casts an interface to a map[string][]string type. -func ToStringMapStringSlice(i interface{}) map[string][]string { - v, _ := ToStringMapStringSliceE(i) - return v -} - -// ToStringMapBool casts an interface to a map[string]bool type. -func ToStringMapBool(i interface{}) map[string]bool { - v, _ := ToStringMapBoolE(i) - return v -} - -// ToStringMapInt casts an interface to a map[string]int type. -func ToStringMapInt(i interface{}) map[string]int { - v, _ := ToStringMapIntE(i) - return v -} - -// ToStringMapInt64 casts an interface to a map[string]int64 type. -func ToStringMapInt64(i interface{}) map[string]int64 { - v, _ := ToStringMapInt64E(i) - return v -} - -// ToStringMap casts an interface to a map[string]interface{} type. -func ToStringMap(i interface{}) map[string]interface{} { - v, _ := ToStringMapE(i) - return v -} - -// ToSlice casts an interface to a []interface{} type. -func ToSlice(i interface{}) []interface{} { - v, _ := ToSliceE(i) - return v -} - -// ToBoolSlice casts an interface to a []bool type. -func ToBoolSlice(i interface{}) []bool { - v, _ := ToBoolSliceE(i) - return v -} - -// ToStringSlice casts an interface to a []string type. -func ToStringSlice(i interface{}) []string { - v, _ := ToStringSliceE(i) - return v -} +const errorMsg = "unable to cast %#v of type %T to %T" +const errorMsgWith = "unable to cast %#v of type %T to %T: %w" -// ToIntSlice casts an interface to a []int type. -func ToIntSlice(i interface{}) []int { - v, _ := ToIntSliceE(i) - return v -} +// Basic is a type parameter constraint for functions accepting basic types. +// +// It represents the supported basic types this package can cast to. +type Basic interface { + string | bool | Number | time.Time | time.Duration +} + +// ToE casts any value to a [Basic] type. +func ToE[T Basic](i any) (T, error) { + var t T + + var v any + var err error + + switch any(t).(type) { + case string: + v, err = ToStringE(i) + case bool: + v, err = ToBoolE(i) + case int: + v, err = toNumberE[int](i, parseInt[int]) + case int8: + v, err = toNumberE[int8](i, parseInt[int8]) + case int16: + v, err = toNumberE[int16](i, parseInt[int16]) + case int32: + v, err = toNumberE[int32](i, parseInt[int32]) + case int64: + v, err = toNumberE[int64](i, parseInt[int64]) + case uint: + v, err = toUnsignedNumberE[uint](i, parseUint[uint]) + case uint8: + v, err = toUnsignedNumberE[uint8](i, parseUint[uint8]) + case uint16: + v, err = toUnsignedNumberE[uint16](i, parseUint[uint16]) + case uint32: + v, err = toUnsignedNumberE[uint32](i, parseUint[uint32]) + case uint64: + v, err = toUnsignedNumberE[uint64](i, parseUint[uint64]) + case float32: + v, err = toNumberE[float32](i, parseFloat[float32]) + case float64: + v, err = toNumberE[float64](i, parseFloat[float64]) + case time.Time: + v, err = ToTimeE(i) + case time.Duration: + v, err = ToDurationE(i) + } + + if err != nil { + return t, err + } + + return v.(T), nil +} + +// Must is a helper that wraps a call to a cast function and panics if the error is non-nil. +func Must[T any](i any, err error) T { + if err != nil { + panic(err) + } + + return i.(T) +} + +// To casts any value to a [Basic] type. +func To[T Basic](i any) T { + v, _ := ToE[T](i) -// ToDurationSlice casts an interface to a []time.Duration type. -func ToDurationSlice(i interface{}) []time.Duration { - v, _ := ToDurationSliceE(i) return v } diff --git a/vendor/github.com/spf13/cast/caste.go b/vendor/github.com/spf13/cast/caste.go deleted file mode 100644 index 4181a2e758..0000000000 --- a/vendor/github.com/spf13/cast/caste.go +++ /dev/null @@ -1,1510 +0,0 @@ -// Copyright © 2014 Steve Francia . -// -// Use of this source code is governed by an MIT-style -// license that can be found in the LICENSE file. - -package cast - -import ( - "encoding/json" - "errors" - "fmt" - "html/template" - "reflect" - "strconv" - "strings" - "time" -) - -var errNegativeNotAllowed = errors.New("unable to cast negative value") - -type float64EProvider interface { - Float64() (float64, error) -} - -type float64Provider interface { - Float64() float64 -} - -// ToTimeE casts an interface to a time.Time type. -func ToTimeE(i interface{}) (tim time.Time, err error) { - return ToTimeInDefaultLocationE(i, time.UTC) -} - -// ToTimeInDefaultLocationE casts an empty interface to time.Time, -// interpreting inputs without a timezone to be in the given location, -// or the local timezone if nil. -func ToTimeInDefaultLocationE(i interface{}, location *time.Location) (tim time.Time, err error) { - i = indirect(i) - - switch v := i.(type) { - case time.Time: - return v, nil - case string: - return StringToDateInDefaultLocation(v, location) - case json.Number: - s, err1 := ToInt64E(v) - if err1 != nil { - return time.Time{}, fmt.Errorf("unable to cast %#v of type %T to Time", i, i) - } - return time.Unix(s, 0), nil - case int: - return time.Unix(int64(v), 0), nil - case int64: - return time.Unix(v, 0), nil - case int32: - return time.Unix(int64(v), 0), nil - case uint: - return time.Unix(int64(v), 0), nil - case uint64: - return time.Unix(int64(v), 0), nil - case uint32: - return time.Unix(int64(v), 0), nil - default: - return time.Time{}, fmt.Errorf("unable to cast %#v of type %T to Time", i, i) - } -} - -// ToDurationE casts an interface to a time.Duration type. -func ToDurationE(i interface{}) (d time.Duration, err error) { - i = indirect(i) - - switch s := i.(type) { - case time.Duration: - return s, nil - case int, int64, int32, int16, int8, uint, uint64, uint32, uint16, uint8: - d = time.Duration(ToInt64(s)) - return - case float32, float64: - d = time.Duration(ToFloat64(s)) - return - case string: - if strings.ContainsAny(s, "nsuµmh") { - d, err = time.ParseDuration(s) - } else { - d, err = time.ParseDuration(s + "ns") - } - return - case float64EProvider: - var v float64 - v, err = s.Float64() - d = time.Duration(v) - return - case float64Provider: - d = time.Duration(s.Float64()) - return - default: - err = fmt.Errorf("unable to cast %#v of type %T to Duration", i, i) - return - } -} - -// ToBoolE casts an interface to a bool type. -func ToBoolE(i interface{}) (bool, error) { - i = indirect(i) - - switch b := i.(type) { - case bool: - return b, nil - case nil: - return false, nil - case int: - return b != 0, nil - case int64: - return b != 0, nil - case int32: - return b != 0, nil - case int16: - return b != 0, nil - case int8: - return b != 0, nil - case uint: - return b != 0, nil - case uint64: - return b != 0, nil - case uint32: - return b != 0, nil - case uint16: - return b != 0, nil - case uint8: - return b != 0, nil - case float64: - return b != 0, nil - case float32: - return b != 0, nil - case time.Duration: - return b != 0, nil - case string: - return strconv.ParseBool(i.(string)) - case json.Number: - v, err := ToInt64E(b) - if err == nil { - return v != 0, nil - } - return false, fmt.Errorf("unable to cast %#v of type %T to bool", i, i) - default: - return false, fmt.Errorf("unable to cast %#v of type %T to bool", i, i) - } -} - -// ToFloat64E casts an interface to a float64 type. -func ToFloat64E(i interface{}) (float64, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - return float64(intv), nil - } - - switch s := i.(type) { - case float64: - return s, nil - case float32: - return float64(s), nil - case int64: - return float64(s), nil - case int32: - return float64(s), nil - case int16: - return float64(s), nil - case int8: - return float64(s), nil - case uint: - return float64(s), nil - case uint64: - return float64(s), nil - case uint32: - return float64(s), nil - case uint16: - return float64(s), nil - case uint8: - return float64(s), nil - case string: - v, err := strconv.ParseFloat(s, 64) - if err == nil { - return v, nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i) - case float64EProvider: - v, err := s.Float64() - if err == nil { - return v, nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i) - case float64Provider: - return s.Float64(), nil - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i) - } -} - -// ToFloat32E casts an interface to a float32 type. -func ToFloat32E(i interface{}) (float32, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - return float32(intv), nil - } - - switch s := i.(type) { - case float64: - return float32(s), nil - case float32: - return s, nil - case int64: - return float32(s), nil - case int32: - return float32(s), nil - case int16: - return float32(s), nil - case int8: - return float32(s), nil - case uint: - return float32(s), nil - case uint64: - return float32(s), nil - case uint32: - return float32(s), nil - case uint16: - return float32(s), nil - case uint8: - return float32(s), nil - case string: - v, err := strconv.ParseFloat(s, 32) - if err == nil { - return float32(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to float32", i, i) - case float64EProvider: - v, err := s.Float64() - if err == nil { - return float32(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to float32", i, i) - case float64Provider: - return float32(s.Float64()), nil - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to float32", i, i) - } -} - -// ToInt64E casts an interface to an int64 type. -func ToInt64E(i interface{}) (int64, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - return int64(intv), nil - } - - switch s := i.(type) { - case int64: - return s, nil - case int32: - return int64(s), nil - case int16: - return int64(s), nil - case int8: - return int64(s), nil - case uint: - return int64(s), nil - case uint64: - return int64(s), nil - case uint32: - return int64(s), nil - case uint16: - return int64(s), nil - case uint8: - return int64(s), nil - case float64: - return int64(s), nil - case float32: - return int64(s), nil - case string: - v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) - if err == nil { - return v, nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to int64", i, i) - case json.Number: - return ToInt64E(string(s)) - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to int64", i, i) - } -} - -// ToInt32E casts an interface to an int32 type. -func ToInt32E(i interface{}) (int32, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - return int32(intv), nil - } - - switch s := i.(type) { - case int64: - return int32(s), nil - case int32: - return s, nil - case int16: - return int32(s), nil - case int8: - return int32(s), nil - case uint: - return int32(s), nil - case uint64: - return int32(s), nil - case uint32: - return int32(s), nil - case uint16: - return int32(s), nil - case uint8: - return int32(s), nil - case float64: - return int32(s), nil - case float32: - return int32(s), nil - case string: - v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) - if err == nil { - return int32(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to int32", i, i) - case json.Number: - return ToInt32E(string(s)) - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to int32", i, i) - } -} - -// ToInt16E casts an interface to an int16 type. -func ToInt16E(i interface{}) (int16, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - return int16(intv), nil - } - - switch s := i.(type) { - case int64: - return int16(s), nil - case int32: - return int16(s), nil - case int16: - return s, nil - case int8: - return int16(s), nil - case uint: - return int16(s), nil - case uint64: - return int16(s), nil - case uint32: - return int16(s), nil - case uint16: - return int16(s), nil - case uint8: - return int16(s), nil - case float64: - return int16(s), nil - case float32: - return int16(s), nil - case string: - v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) - if err == nil { - return int16(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to int16", i, i) - case json.Number: - return ToInt16E(string(s)) - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to int16", i, i) - } -} - -// ToInt8E casts an interface to an int8 type. -func ToInt8E(i interface{}) (int8, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - return int8(intv), nil - } - - switch s := i.(type) { - case int64: - return int8(s), nil - case int32: - return int8(s), nil - case int16: - return int8(s), nil - case int8: - return s, nil - case uint: - return int8(s), nil - case uint64: - return int8(s), nil - case uint32: - return int8(s), nil - case uint16: - return int8(s), nil - case uint8: - return int8(s), nil - case float64: - return int8(s), nil - case float32: - return int8(s), nil - case string: - v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) - if err == nil { - return int8(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to int8", i, i) - case json.Number: - return ToInt8E(string(s)) - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to int8", i, i) - } -} - -// ToIntE casts an interface to an int type. -func ToIntE(i interface{}) (int, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - return intv, nil - } - - switch s := i.(type) { - case int64: - return int(s), nil - case int32: - return int(s), nil - case int16: - return int(s), nil - case int8: - return int(s), nil - case uint: - return int(s), nil - case uint64: - return int(s), nil - case uint32: - return int(s), nil - case uint16: - return int(s), nil - case uint8: - return int(s), nil - case float64: - return int(s), nil - case float32: - return int(s), nil - case string: - v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) - if err == nil { - return int(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to int64", i, i) - case json.Number: - return ToIntE(string(s)) - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to int", i, i) - } -} - -// ToUintE casts an interface to a uint type. -func ToUintE(i interface{}) (uint, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - if intv < 0 { - return 0, errNegativeNotAllowed - } - return uint(intv), nil - } - - switch s := i.(type) { - case string: - v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) - if err == nil { - if v < 0 { - return 0, errNegativeNotAllowed - } - return uint(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to uint", i, i) - case json.Number: - return ToUintE(string(s)) - case int64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint(s), nil - case int32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint(s), nil - case int16: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint(s), nil - case int8: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint(s), nil - case uint: - return s, nil - case uint64: - return uint(s), nil - case uint32: - return uint(s), nil - case uint16: - return uint(s), nil - case uint8: - return uint(s), nil - case float64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint(s), nil - case float32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint(s), nil - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to uint", i, i) - } -} - -// ToUint64E casts an interface to a uint64 type. -func ToUint64E(i interface{}) (uint64, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - if intv < 0 { - return 0, errNegativeNotAllowed - } - return uint64(intv), nil - } - - switch s := i.(type) { - case string: - v, err := strconv.ParseUint(trimZeroDecimal(s), 0, 0) - if err == nil { - if v < 0 { - return 0, errNegativeNotAllowed - } - return v, nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to uint64", i, i) - case json.Number: - return ToUint64E(string(s)) - case int64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint64(s), nil - case int32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint64(s), nil - case int16: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint64(s), nil - case int8: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint64(s), nil - case uint: - return uint64(s), nil - case uint64: - return s, nil - case uint32: - return uint64(s), nil - case uint16: - return uint64(s), nil - case uint8: - return uint64(s), nil - case float32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint64(s), nil - case float64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint64(s), nil - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to uint64", i, i) - } -} - -// ToUint32E casts an interface to a uint32 type. -func ToUint32E(i interface{}) (uint32, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - if intv < 0 { - return 0, errNegativeNotAllowed - } - return uint32(intv), nil - } - - switch s := i.(type) { - case string: - v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) - if err == nil { - if v < 0 { - return 0, errNegativeNotAllowed - } - return uint32(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to uint32", i, i) - case json.Number: - return ToUint32E(string(s)) - case int64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint32(s), nil - case int32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint32(s), nil - case int16: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint32(s), nil - case int8: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint32(s), nil - case uint: - return uint32(s), nil - case uint64: - return uint32(s), nil - case uint32: - return s, nil - case uint16: - return uint32(s), nil - case uint8: - return uint32(s), nil - case float64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint32(s), nil - case float32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint32(s), nil - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to uint32", i, i) - } -} - -// ToUint16E casts an interface to a uint16 type. -func ToUint16E(i interface{}) (uint16, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - if intv < 0 { - return 0, errNegativeNotAllowed - } - return uint16(intv), nil - } - - switch s := i.(type) { - case string: - v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) - if err == nil { - if v < 0 { - return 0, errNegativeNotAllowed - } - return uint16(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to uint16", i, i) - case json.Number: - return ToUint16E(string(s)) - case int64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint16(s), nil - case int32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint16(s), nil - case int16: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint16(s), nil - case int8: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint16(s), nil - case uint: - return uint16(s), nil - case uint64: - return uint16(s), nil - case uint32: - return uint16(s), nil - case uint16: - return s, nil - case uint8: - return uint16(s), nil - case float64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint16(s), nil - case float32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint16(s), nil - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to uint16", i, i) - } -} - -// ToUint8E casts an interface to a uint type. -func ToUint8E(i interface{}) (uint8, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - if intv < 0 { - return 0, errNegativeNotAllowed - } - return uint8(intv), nil - } - - switch s := i.(type) { - case string: - v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) - if err == nil { - if v < 0 { - return 0, errNegativeNotAllowed - } - return uint8(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to uint8", i, i) - case json.Number: - return ToUint8E(string(s)) - case int64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint8(s), nil - case int32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint8(s), nil - case int16: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint8(s), nil - case int8: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint8(s), nil - case uint: - return uint8(s), nil - case uint64: - return uint8(s), nil - case uint32: - return uint8(s), nil - case uint16: - return uint8(s), nil - case uint8: - return s, nil - case float64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint8(s), nil - case float32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint8(s), nil - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to uint8", i, i) - } -} - -// From html/template/content.go -// Copyright 2011 The Go Authors. All rights reserved. -// indirect returns the value, after dereferencing as many times -// as necessary to reach the base type (or nil). -func indirect(a interface{}) interface{} { - if a == nil { - return nil - } - if t := reflect.TypeOf(a); t.Kind() != reflect.Ptr { - // Avoid creating a reflect.Value if it's not a pointer. - return a - } - v := reflect.ValueOf(a) - for v.Kind() == reflect.Ptr && !v.IsNil() { - v = v.Elem() - } - return v.Interface() -} - -// From html/template/content.go -// Copyright 2011 The Go Authors. All rights reserved. -// indirectToStringerOrError returns the value, after dereferencing as many times -// as necessary to reach the base type (or nil) or an implementation of fmt.Stringer -// or error, -func indirectToStringerOrError(a interface{}) interface{} { - if a == nil { - return nil - } - - errorType := reflect.TypeOf((*error)(nil)).Elem() - fmtStringerType := reflect.TypeOf((*fmt.Stringer)(nil)).Elem() - - v := reflect.ValueOf(a) - for !v.Type().Implements(fmtStringerType) && !v.Type().Implements(errorType) && v.Kind() == reflect.Ptr && !v.IsNil() { - v = v.Elem() - } - return v.Interface() -} - -// ToStringE casts an interface to a string type. -func ToStringE(i interface{}) (string, error) { - i = indirectToStringerOrError(i) - - switch s := i.(type) { - case string: - return s, nil - case bool: - return strconv.FormatBool(s), nil - case float64: - return strconv.FormatFloat(s, 'f', -1, 64), nil - case float32: - return strconv.FormatFloat(float64(s), 'f', -1, 32), nil - case int: - return strconv.Itoa(s), nil - case int64: - return strconv.FormatInt(s, 10), nil - case int32: - return strconv.Itoa(int(s)), nil - case int16: - return strconv.FormatInt(int64(s), 10), nil - case int8: - return strconv.FormatInt(int64(s), 10), nil - case uint: - return strconv.FormatUint(uint64(s), 10), nil - case uint64: - return strconv.FormatUint(uint64(s), 10), nil - case uint32: - return strconv.FormatUint(uint64(s), 10), nil - case uint16: - return strconv.FormatUint(uint64(s), 10), nil - case uint8: - return strconv.FormatUint(uint64(s), 10), nil - case json.Number: - return s.String(), nil - case []byte: - return string(s), nil - case template.HTML: - return string(s), nil - case template.URL: - return string(s), nil - case template.JS: - return string(s), nil - case template.CSS: - return string(s), nil - case template.HTMLAttr: - return string(s), nil - case nil: - return "", nil - case fmt.Stringer: - return s.String(), nil - case error: - return s.Error(), nil - default: - return "", fmt.Errorf("unable to cast %#v of type %T to string", i, i) - } -} - -// ToStringMapStringE casts an interface to a map[string]string type. -func ToStringMapStringE(i interface{}) (map[string]string, error) { - m := map[string]string{} - - switch v := i.(type) { - case map[string]string: - return v, nil - case map[string]interface{}: - for k, val := range v { - m[ToString(k)] = ToString(val) - } - return m, nil - case map[interface{}]string: - for k, val := range v { - m[ToString(k)] = ToString(val) - } - return m, nil - case map[interface{}]interface{}: - for k, val := range v { - m[ToString(k)] = ToString(val) - } - return m, nil - case string: - err := jsonStringToObject(v, &m) - return m, err - default: - return m, fmt.Errorf("unable to cast %#v of type %T to map[string]string", i, i) - } -} - -// ToStringMapStringSliceE casts an interface to a map[string][]string type. -func ToStringMapStringSliceE(i interface{}) (map[string][]string, error) { - m := map[string][]string{} - - switch v := i.(type) { - case map[string][]string: - return v, nil - case map[string][]interface{}: - for k, val := range v { - m[ToString(k)] = ToStringSlice(val) - } - return m, nil - case map[string]string: - for k, val := range v { - m[ToString(k)] = []string{val} - } - case map[string]interface{}: - for k, val := range v { - switch vt := val.(type) { - case []interface{}: - m[ToString(k)] = ToStringSlice(vt) - case []string: - m[ToString(k)] = vt - default: - m[ToString(k)] = []string{ToString(val)} - } - } - return m, nil - case map[interface{}][]string: - for k, val := range v { - m[ToString(k)] = ToStringSlice(val) - } - return m, nil - case map[interface{}]string: - for k, val := range v { - m[ToString(k)] = ToStringSlice(val) - } - return m, nil - case map[interface{}][]interface{}: - for k, val := range v { - m[ToString(k)] = ToStringSlice(val) - } - return m, nil - case map[interface{}]interface{}: - for k, val := range v { - key, err := ToStringE(k) - if err != nil { - return m, fmt.Errorf("unable to cast %#v of type %T to map[string][]string", i, i) - } - value, err := ToStringSliceE(val) - if err != nil { - return m, fmt.Errorf("unable to cast %#v of type %T to map[string][]string", i, i) - } - m[key] = value - } - case string: - err := jsonStringToObject(v, &m) - return m, err - default: - return m, fmt.Errorf("unable to cast %#v of type %T to map[string][]string", i, i) - } - return m, nil -} - -// ToStringMapBoolE casts an interface to a map[string]bool type. -func ToStringMapBoolE(i interface{}) (map[string]bool, error) { - m := map[string]bool{} - - switch v := i.(type) { - case map[interface{}]interface{}: - for k, val := range v { - m[ToString(k)] = ToBool(val) - } - return m, nil - case map[string]interface{}: - for k, val := range v { - m[ToString(k)] = ToBool(val) - } - return m, nil - case map[string]bool: - return v, nil - case string: - err := jsonStringToObject(v, &m) - return m, err - default: - return m, fmt.Errorf("unable to cast %#v of type %T to map[string]bool", i, i) - } -} - -// ToStringMapE casts an interface to a map[string]interface{} type. -func ToStringMapE(i interface{}) (map[string]interface{}, error) { - m := map[string]interface{}{} - - switch v := i.(type) { - case map[interface{}]interface{}: - for k, val := range v { - m[ToString(k)] = val - } - return m, nil - case map[string]interface{}: - return v, nil - case string: - err := jsonStringToObject(v, &m) - return m, err - default: - return m, fmt.Errorf("unable to cast %#v of type %T to map[string]interface{}", i, i) - } -} - -// ToStringMapIntE casts an interface to a map[string]int{} type. -func ToStringMapIntE(i interface{}) (map[string]int, error) { - m := map[string]int{} - if i == nil { - return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int", i, i) - } - - switch v := i.(type) { - case map[interface{}]interface{}: - for k, val := range v { - m[ToString(k)] = ToInt(val) - } - return m, nil - case map[string]interface{}: - for k, val := range v { - m[k] = ToInt(val) - } - return m, nil - case map[string]int: - return v, nil - case string: - err := jsonStringToObject(v, &m) - return m, err - } - - if reflect.TypeOf(i).Kind() != reflect.Map { - return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int", i, i) - } - - mVal := reflect.ValueOf(m) - v := reflect.ValueOf(i) - for _, keyVal := range v.MapKeys() { - val, err := ToIntE(v.MapIndex(keyVal).Interface()) - if err != nil { - return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int", i, i) - } - mVal.SetMapIndex(keyVal, reflect.ValueOf(val)) - } - return m, nil -} - -// ToStringMapInt64E casts an interface to a map[string]int64{} type. -func ToStringMapInt64E(i interface{}) (map[string]int64, error) { - m := map[string]int64{} - if i == nil { - return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int64", i, i) - } - - switch v := i.(type) { - case map[interface{}]interface{}: - for k, val := range v { - m[ToString(k)] = ToInt64(val) - } - return m, nil - case map[string]interface{}: - for k, val := range v { - m[k] = ToInt64(val) - } - return m, nil - case map[string]int64: - return v, nil - case string: - err := jsonStringToObject(v, &m) - return m, err - } - - if reflect.TypeOf(i).Kind() != reflect.Map { - return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int64", i, i) - } - mVal := reflect.ValueOf(m) - v := reflect.ValueOf(i) - for _, keyVal := range v.MapKeys() { - val, err := ToInt64E(v.MapIndex(keyVal).Interface()) - if err != nil { - return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int64", i, i) - } - mVal.SetMapIndex(keyVal, reflect.ValueOf(val)) - } - return m, nil -} - -// ToSliceE casts an interface to a []interface{} type. -func ToSliceE(i interface{}) ([]interface{}, error) { - var s []interface{} - - switch v := i.(type) { - case []interface{}: - return append(s, v...), nil - case []map[string]interface{}: - for _, u := range v { - s = append(s, u) - } - return s, nil - default: - return s, fmt.Errorf("unable to cast %#v of type %T to []interface{}", i, i) - } -} - -// ToBoolSliceE casts an interface to a []bool type. -func ToBoolSliceE(i interface{}) ([]bool, error) { - if i == nil { - return []bool{}, fmt.Errorf("unable to cast %#v of type %T to []bool", i, i) - } - - switch v := i.(type) { - case []bool: - return v, nil - } - - kind := reflect.TypeOf(i).Kind() - switch kind { - case reflect.Slice, reflect.Array: - s := reflect.ValueOf(i) - a := make([]bool, s.Len()) - for j := 0; j < s.Len(); j++ { - val, err := ToBoolE(s.Index(j).Interface()) - if err != nil { - return []bool{}, fmt.Errorf("unable to cast %#v of type %T to []bool", i, i) - } - a[j] = val - } - return a, nil - default: - return []bool{}, fmt.Errorf("unable to cast %#v of type %T to []bool", i, i) - } -} - -// ToStringSliceE casts an interface to a []string type. -func ToStringSliceE(i interface{}) ([]string, error) { - var a []string - - switch v := i.(type) { - case []interface{}: - for _, u := range v { - a = append(a, ToString(u)) - } - return a, nil - case []string: - return v, nil - case []int8: - for _, u := range v { - a = append(a, ToString(u)) - } - return a, nil - case []int: - for _, u := range v { - a = append(a, ToString(u)) - } - return a, nil - case []int32: - for _, u := range v { - a = append(a, ToString(u)) - } - return a, nil - case []int64: - for _, u := range v { - a = append(a, ToString(u)) - } - return a, nil - case []float32: - for _, u := range v { - a = append(a, ToString(u)) - } - return a, nil - case []float64: - for _, u := range v { - a = append(a, ToString(u)) - } - return a, nil - case string: - return strings.Fields(v), nil - case []error: - for _, err := range i.([]error) { - a = append(a, err.Error()) - } - return a, nil - case interface{}: - str, err := ToStringE(v) - if err != nil { - return a, fmt.Errorf("unable to cast %#v of type %T to []string", i, i) - } - return []string{str}, nil - default: - return a, fmt.Errorf("unable to cast %#v of type %T to []string", i, i) - } -} - -// ToIntSliceE casts an interface to a []int type. -func ToIntSliceE(i interface{}) ([]int, error) { - if i == nil { - return []int{}, fmt.Errorf("unable to cast %#v of type %T to []int", i, i) - } - - switch v := i.(type) { - case []int: - return v, nil - } - - kind := reflect.TypeOf(i).Kind() - switch kind { - case reflect.Slice, reflect.Array: - s := reflect.ValueOf(i) - a := make([]int, s.Len()) - for j := 0; j < s.Len(); j++ { - val, err := ToIntE(s.Index(j).Interface()) - if err != nil { - return []int{}, fmt.Errorf("unable to cast %#v of type %T to []int", i, i) - } - a[j] = val - } - return a, nil - default: - return []int{}, fmt.Errorf("unable to cast %#v of type %T to []int", i, i) - } -} - -// ToDurationSliceE casts an interface to a []time.Duration type. -func ToDurationSliceE(i interface{}) ([]time.Duration, error) { - if i == nil { - return []time.Duration{}, fmt.Errorf("unable to cast %#v of type %T to []time.Duration", i, i) - } - - switch v := i.(type) { - case []time.Duration: - return v, nil - } - - kind := reflect.TypeOf(i).Kind() - switch kind { - case reflect.Slice, reflect.Array: - s := reflect.ValueOf(i) - a := make([]time.Duration, s.Len()) - for j := 0; j < s.Len(); j++ { - val, err := ToDurationE(s.Index(j).Interface()) - if err != nil { - return []time.Duration{}, fmt.Errorf("unable to cast %#v of type %T to []time.Duration", i, i) - } - a[j] = val - } - return a, nil - default: - return []time.Duration{}, fmt.Errorf("unable to cast %#v of type %T to []time.Duration", i, i) - } -} - -// StringToDate attempts to parse a string into a time.Time type using a -// predefined list of formats. If no suitable format is found, an error is -// returned. -func StringToDate(s string) (time.Time, error) { - return parseDateWith(s, time.UTC, timeFormats) -} - -// StringToDateInDefaultLocation casts an empty interface to a time.Time, -// interpreting inputs without a timezone to be in the given location, -// or the local timezone if nil. -func StringToDateInDefaultLocation(s string, location *time.Location) (time.Time, error) { - return parseDateWith(s, location, timeFormats) -} - -type timeFormatType int - -const ( - timeFormatNoTimezone timeFormatType = iota - timeFormatNamedTimezone - timeFormatNumericTimezone - timeFormatNumericAndNamedTimezone - timeFormatTimeOnly -) - -type timeFormat struct { - format string - typ timeFormatType -} - -func (f timeFormat) hasTimezone() bool { - // We don't include the formats with only named timezones, see - // https://github.com/golang/go/issues/19694#issuecomment-289103522 - return f.typ >= timeFormatNumericTimezone && f.typ <= timeFormatNumericAndNamedTimezone -} - -var timeFormats = []timeFormat{ - // Keep common formats at the top. - {"2006-01-02", timeFormatNoTimezone}, - {time.RFC3339, timeFormatNumericTimezone}, - {"2006-01-02T15:04:05", timeFormatNoTimezone}, // iso8601 without timezone - {time.RFC1123Z, timeFormatNumericTimezone}, - {time.RFC1123, timeFormatNamedTimezone}, - {time.RFC822Z, timeFormatNumericTimezone}, - {time.RFC822, timeFormatNamedTimezone}, - {time.RFC850, timeFormatNamedTimezone}, - {"2006-01-02 15:04:05.999999999 -0700 MST", timeFormatNumericAndNamedTimezone}, // Time.String() - {"2006-01-02T15:04:05-0700", timeFormatNumericTimezone}, // RFC3339 without timezone hh:mm colon - {"2006-01-02 15:04:05Z0700", timeFormatNumericTimezone}, // RFC3339 without T or timezone hh:mm colon - {"2006-01-02 15:04:05", timeFormatNoTimezone}, - {time.ANSIC, timeFormatNoTimezone}, - {time.UnixDate, timeFormatNamedTimezone}, - {time.RubyDate, timeFormatNumericTimezone}, - {"2006-01-02 15:04:05Z07:00", timeFormatNumericTimezone}, - {"02 Jan 2006", timeFormatNoTimezone}, - {"2006-01-02 15:04:05 -07:00", timeFormatNumericTimezone}, - {"2006-01-02 15:04:05 -0700", timeFormatNumericTimezone}, - {time.Kitchen, timeFormatTimeOnly}, - {time.Stamp, timeFormatTimeOnly}, - {time.StampMilli, timeFormatTimeOnly}, - {time.StampMicro, timeFormatTimeOnly}, - {time.StampNano, timeFormatTimeOnly}, -} - -func parseDateWith(s string, location *time.Location, formats []timeFormat) (d time.Time, e error) { - for _, format := range formats { - if d, e = time.Parse(format.format, s); e == nil { - - // Some time formats have a zone name, but no offset, so it gets - // put in that zone name (not the default one passed in to us), but - // without that zone's offset. So set the location manually. - if format.typ <= timeFormatNamedTimezone { - if location == nil { - location = time.Local - } - year, month, day := d.Date() - hour, min, sec := d.Clock() - d = time.Date(year, month, day, hour, min, sec, d.Nanosecond(), location) - } - - return - } - } - return d, fmt.Errorf("unable to parse date: %s", s) -} - -// jsonStringToObject attempts to unmarshall a string as JSON into -// the object passed as pointer. -func jsonStringToObject(s string, v interface{}) error { - data := []byte(s) - return json.Unmarshal(data, v) -} - -// toInt returns the int value of v if v or v's underlying type -// is an int. -// Note that this will return false for int64 etc. types. -func toInt(v interface{}) (int, bool) { - switch v := v.(type) { - case int: - return v, true - case time.Weekday: - return int(v), true - case time.Month: - return int(v), true - default: - return 0, false - } -} - -func trimZeroDecimal(s string) string { - var foundZero bool - for i := len(s); i > 0; i-- { - switch s[i-1] { - case '.': - if foundZero { - return s[:i-1] - } - case '0': - foundZero = true - default: - return s - } - } - return s -} diff --git a/vendor/github.com/spf13/cast/indirect.go b/vendor/github.com/spf13/cast/indirect.go new file mode 100644 index 0000000000..093345f737 --- /dev/null +++ b/vendor/github.com/spf13/cast/indirect.go @@ -0,0 +1,37 @@ +// Copyright © 2014 Steve Francia . +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +package cast + +import ( + "reflect" +) + +// From html/template/content.go +// Copyright 2011 The Go Authors. All rights reserved. +// indirect returns the value, after dereferencing as many times +// as necessary to reach the base type (or nil). +func indirect(i any) (any, bool) { + if i == nil { + return nil, false + } + + if t := reflect.TypeOf(i); t.Kind() != reflect.Ptr { + // Avoid creating a reflect.Value if it's not a pointer. + return i, false + } + + v := reflect.ValueOf(i) + + for v.Kind() == reflect.Ptr || (v.Kind() == reflect.Interface && v.Elem().Kind() == reflect.Ptr) { + if v.IsNil() { + return nil, true + } + + v = v.Elem() + } + + return v.Interface(), true +} diff --git a/vendor/github.com/spf13/cast/internal/time.go b/vendor/github.com/spf13/cast/internal/time.go new file mode 100644 index 0000000000..906e9aece3 --- /dev/null +++ b/vendor/github.com/spf13/cast/internal/time.go @@ -0,0 +1,79 @@ +package internal + +import ( + "fmt" + "time" +) + +//go:generate stringer -type=TimeFormatType + +type TimeFormatType int + +const ( + TimeFormatNoTimezone TimeFormatType = iota + TimeFormatNamedTimezone + TimeFormatNumericTimezone + TimeFormatNumericAndNamedTimezone + TimeFormatTimeOnly +) + +type TimeFormat struct { + Format string + Typ TimeFormatType +} + +func (f TimeFormat) HasTimezone() bool { + // We don't include the formats with only named timezones, see + // https://github.com/golang/go/issues/19694#issuecomment-289103522 + return f.Typ >= TimeFormatNumericTimezone && f.Typ <= TimeFormatNumericAndNamedTimezone +} + +var TimeFormats = []TimeFormat{ + // Keep common formats at the top. + {"2006-01-02", TimeFormatNoTimezone}, + {time.RFC3339, TimeFormatNumericTimezone}, + {"2006-01-02T15:04:05", TimeFormatNoTimezone}, // iso8601 without timezone + {time.RFC1123Z, TimeFormatNumericTimezone}, + {time.RFC1123, TimeFormatNamedTimezone}, + {time.RFC822Z, TimeFormatNumericTimezone}, + {time.RFC822, TimeFormatNamedTimezone}, + {time.RFC850, TimeFormatNamedTimezone}, + {"2006-01-02 15:04:05.999999999 -0700 MST", TimeFormatNumericAndNamedTimezone}, // Time.String() + {"2006-01-02T15:04:05-0700", TimeFormatNumericTimezone}, // RFC3339 without timezone hh:mm colon + {"2006-01-02 15:04:05Z0700", TimeFormatNumericTimezone}, // RFC3339 without T or timezone hh:mm colon + {"2006-01-02 15:04:05", TimeFormatNoTimezone}, + {time.ANSIC, TimeFormatNoTimezone}, + {time.UnixDate, TimeFormatNamedTimezone}, + {time.RubyDate, TimeFormatNumericTimezone}, + {"2006-01-02 15:04:05Z07:00", TimeFormatNumericTimezone}, + {"02 Jan 2006", TimeFormatNoTimezone}, + {"2006-01-02 15:04:05 -07:00", TimeFormatNumericTimezone}, + {"2006-01-02 15:04:05 -0700", TimeFormatNumericTimezone}, + {time.Kitchen, TimeFormatTimeOnly}, + {time.Stamp, TimeFormatTimeOnly}, + {time.StampMilli, TimeFormatTimeOnly}, + {time.StampMicro, TimeFormatTimeOnly}, + {time.StampNano, TimeFormatTimeOnly}, +} + +func ParseDateWith(s string, location *time.Location, formats []TimeFormat) (d time.Time, e error) { + for _, format := range formats { + if d, e = time.Parse(format.Format, s); e == nil { + + // Some time formats have a zone name, but no offset, so it gets + // put in that zone name (not the default one passed in to us), but + // without that zone's offset. So set the location manually. + if format.Typ <= TimeFormatNamedTimezone { + if location == nil { + location = time.Local + } + year, month, day := d.Date() + hour, min, sec := d.Clock() + d = time.Date(year, month, day, hour, min, sec, d.Nanosecond(), location) + } + + return + } + } + return d, fmt.Errorf("unable to parse date: %s", s) +} diff --git a/vendor/github.com/spf13/cast/internal/timeformattype_string.go b/vendor/github.com/spf13/cast/internal/timeformattype_string.go new file mode 100644 index 0000000000..60a29a862b --- /dev/null +++ b/vendor/github.com/spf13/cast/internal/timeformattype_string.go @@ -0,0 +1,27 @@ +// Code generated by "stringer -type=TimeFormatType"; DO NOT EDIT. + +package internal + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[TimeFormatNoTimezone-0] + _ = x[TimeFormatNamedTimezone-1] + _ = x[TimeFormatNumericTimezone-2] + _ = x[TimeFormatNumericAndNamedTimezone-3] + _ = x[TimeFormatTimeOnly-4] +} + +const _TimeFormatType_name = "TimeFormatNoTimezoneTimeFormatNamedTimezoneTimeFormatNumericTimezoneTimeFormatNumericAndNamedTimezoneTimeFormatTimeOnly" + +var _TimeFormatType_index = [...]uint8{0, 20, 43, 68, 101, 119} + +func (i TimeFormatType) String() string { + if i < 0 || i >= TimeFormatType(len(_TimeFormatType_index)-1) { + return "TimeFormatType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _TimeFormatType_name[_TimeFormatType_index[i]:_TimeFormatType_index[i+1]] +} diff --git a/vendor/github.com/spf13/cast/map.go b/vendor/github.com/spf13/cast/map.go new file mode 100644 index 0000000000..858d4ee43b --- /dev/null +++ b/vendor/github.com/spf13/cast/map.go @@ -0,0 +1,212 @@ +// Copyright © 2014 Steve Francia . +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +package cast + +import ( + "encoding/json" + "fmt" + "reflect" +) + +func toMapE[K comparable, V any](i any, keyFn func(any) K, valFn func(any) V) (map[K]V, error) { + m := map[K]V{} + + if i == nil { + return m, fmt.Errorf(errorMsg, i, i, m) + } + + switch v := i.(type) { + case map[K]V: + return v, nil + + case map[K]any: + for k, val := range v { + m[k] = valFn(val) + } + + return m, nil + + case map[any]V: + for k, val := range v { + m[keyFn(k)] = val + } + + return m, nil + + case map[any]any: + for k, val := range v { + m[keyFn(k)] = valFn(val) + } + + return m, nil + + case string: + err := jsonStringToObject(v, &m) + return m, err + + default: + return m, fmt.Errorf(errorMsg, i, i, m) + } +} + +func toStringMapE[T any](i any, fn func(any) T) (map[string]T, error) { + return toMapE(i, ToString, fn) +} + +// ToStringMapStringE casts any value to a map[string]string type. +func ToStringMapStringE(i any) (map[string]string, error) { + return toStringMapE(i, ToString) +} + +// ToStringMapStringSliceE casts any value to a map[string][]string type. +func ToStringMapStringSliceE(i any) (map[string][]string, error) { + m := map[string][]string{} + + switch v := i.(type) { + case map[string][]string: + return v, nil + case map[string][]any: + for k, val := range v { + m[ToString(k)] = ToStringSlice(val) + } + return m, nil + case map[string]string: + for k, val := range v { + m[ToString(k)] = []string{val} + } + case map[string]any: + for k, val := range v { + switch vt := val.(type) { + case []any: + m[ToString(k)] = ToStringSlice(vt) + case []string: + m[ToString(k)] = vt + default: + m[ToString(k)] = []string{ToString(val)} + } + } + return m, nil + case map[any][]string: + for k, val := range v { + m[ToString(k)] = ToStringSlice(val) + } + return m, nil + case map[any]string: + for k, val := range v { + m[ToString(k)] = ToStringSlice(val) + } + return m, nil + case map[any][]any: + for k, val := range v { + m[ToString(k)] = ToStringSlice(val) + } + return m, nil + case map[any]any: + for k, val := range v { + key, err := ToStringE(k) + if err != nil { + return m, fmt.Errorf(errorMsg, i, i, m) + } + value, err := ToStringSliceE(val) + if err != nil { + return m, fmt.Errorf(errorMsg, i, i, m) + } + m[key] = value + } + case string: + err := jsonStringToObject(v, &m) + return m, err + default: + return m, fmt.Errorf(errorMsg, i, i, m) + } + + return m, nil +} + +// ToStringMapBoolE casts any value to a map[string]bool type. +func ToStringMapBoolE(i any) (map[string]bool, error) { + return toStringMapE(i, ToBool) +} + +// ToStringMapE casts any value to a map[string]any type. +func ToStringMapE(i any) (map[string]any, error) { + fn := func(i any) any { return i } + + return toStringMapE(i, fn) +} + +func toStringMapIntE[T int | int64](i any, fn func(any) T, fnE func(any) (T, error)) (map[string]T, error) { + m := map[string]T{} + + if i == nil { + return nil, fmt.Errorf(errorMsg, i, i, m) + } + + switch v := i.(type) { + case map[string]T: + return v, nil + + case map[string]any: + for k, val := range v { + m[k] = fn(val) + } + + return m, nil + + case map[any]T: + for k, val := range v { + m[ToString(k)] = val + } + + return m, nil + + case map[any]any: + for k, val := range v { + m[ToString(k)] = fn(val) + } + + return m, nil + + case string: + err := jsonStringToObject(v, &m) + return m, err + } + + if reflect.TypeOf(i).Kind() != reflect.Map { + return m, fmt.Errorf(errorMsg, i, i, m) + } + + mVal := reflect.ValueOf(m) + v := reflect.ValueOf(i) + + for _, keyVal := range v.MapKeys() { + val, err := fnE(v.MapIndex(keyVal).Interface()) + if err != nil { + return m, fmt.Errorf(errorMsg, i, i, m) + } + + mVal.SetMapIndex(keyVal, reflect.ValueOf(val)) + } + + return m, nil +} + +// ToStringMapIntE casts any value to a map[string]int type. +func ToStringMapIntE(i any) (map[string]int, error) { + return toStringMapIntE(i, ToInt, ToIntE) +} + +// ToStringMapInt64E casts any value to a map[string]int64 type. +func ToStringMapInt64E(i any) (map[string]int64, error) { + return toStringMapIntE(i, ToInt64, ToInt64E) +} + +// jsonStringToObject attempts to unmarshall a string as JSON into +// the object passed as pointer. +func jsonStringToObject(s string, v any) error { + data := []byte(s) + return json.Unmarshal(data, v) +} diff --git a/vendor/github.com/spf13/cast/number.go b/vendor/github.com/spf13/cast/number.go new file mode 100644 index 0000000000..a58dc4d1ed --- /dev/null +++ b/vendor/github.com/spf13/cast/number.go @@ -0,0 +1,549 @@ +// Copyright © 2014 Steve Francia . +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +package cast + +import ( + "encoding/json" + "errors" + "fmt" + "regexp" + "strconv" + "strings" + "time" +) + +var errNegativeNotAllowed = errors.New("unable to cast negative value") + +type float64EProvider interface { + Float64() (float64, error) +} + +type float64Provider interface { + Float64() float64 +} + +// Number is a type parameter constraint for functions accepting number types. +// +// It represents the supported number types this package can cast to. +type Number interface { + int | int8 | int16 | int32 | int64 | uint | uint8 | uint16 | uint32 | uint64 | float32 | float64 +} + +type integer interface { + int | int8 | int16 | int32 | int64 +} + +type unsigned interface { + uint | uint8 | uint16 | uint32 | uint64 +} + +type float interface { + float32 | float64 +} + +// ToNumberE casts any value to a [Number] type. +func ToNumberE[T Number](i any) (T, error) { + var t T + + switch any(t).(type) { + case int: + return toNumberE[T](i, parseNumber[T]) + case int8: + return toNumberE[T](i, parseNumber[T]) + case int16: + return toNumberE[T](i, parseNumber[T]) + case int32: + return toNumberE[T](i, parseNumber[T]) + case int64: + return toNumberE[T](i, parseNumber[T]) + case uint: + return toUnsignedNumberE[T](i, parseNumber[T]) + case uint8: + return toUnsignedNumberE[T](i, parseNumber[T]) + case uint16: + return toUnsignedNumberE[T](i, parseNumber[T]) + case uint32: + return toUnsignedNumberE[T](i, parseNumber[T]) + case uint64: + return toUnsignedNumberE[T](i, parseNumber[T]) + case float32: + return toNumberE[T](i, parseNumber[T]) + case float64: + return toNumberE[T](i, parseNumber[T]) + default: + return 0, fmt.Errorf("unknown number type: %T", t) + } +} + +// ToNumber casts any value to a [Number] type. +func ToNumber[T Number](i any) T { + v, _ := ToNumberE[T](i) + + return v +} + +// toNumber's semantics differ from other "to" functions. +// It returns false as the second parameter if the conversion fails. +// This is to signal other callers that they should proceed with their own conversions. +func toNumber[T Number](i any) (T, bool) { + i, _ = indirect(i) + + switch s := i.(type) { + case T: + return s, true + case int: + return T(s), true + case int8: + return T(s), true + case int16: + return T(s), true + case int32: + return T(s), true + case int64: + return T(s), true + case uint: + return T(s), true + case uint8: + return T(s), true + case uint16: + return T(s), true + case uint32: + return T(s), true + case uint64: + return T(s), true + case float32: + return T(s), true + case float64: + return T(s), true + case bool: + if s { + return 1, true + } + + return 0, true + case nil: + return 0, true + case time.Weekday: + return T(s), true + case time.Month: + return T(s), true + } + + return 0, false +} + +func toNumberE[T Number](i any, parseFn func(string) (T, error)) (T, error) { + n, ok := toNumber[T](i) + if ok { + return n, nil + } + + i, _ = indirect(i) + + switch s := i.(type) { + case string: + if s == "" { + return 0, nil + } + + v, err := parseFn(s) + if err != nil { + return 0, fmt.Errorf(errorMsgWith, i, i, n, err) + } + + return v, nil + case json.Number: + if s == "" { + return 0, nil + } + + v, err := parseFn(string(s)) + if err != nil { + return 0, fmt.Errorf(errorMsgWith, i, i, n, err) + } + + return v, nil + case float64EProvider: + if _, ok := any(n).(float64); !ok { + return 0, fmt.Errorf(errorMsg, i, i, n) + } + + v, err := s.Float64() + if err != nil { + return 0, fmt.Errorf(errorMsg, i, i, n) + } + + return T(v), nil + case float64Provider: + if _, ok := any(n).(float64); !ok { + return 0, fmt.Errorf(errorMsg, i, i, n) + } + + return T(s.Float64()), nil + default: + if i, ok := resolveAlias(i); ok { + return toNumberE(i, parseFn) + } + + return 0, fmt.Errorf(errorMsg, i, i, n) + } +} + +func toUnsignedNumber[T Number](i any) (T, bool, bool) { + i, _ = indirect(i) + + switch s := i.(type) { + case T: + return s, true, true + case int: + if s < 0 { + return 0, false, false + } + + return T(s), true, true + case int8: + if s < 0 { + return 0, false, false + } + + return T(s), true, true + case int16: + if s < 0 { + return 0, false, false + } + + return T(s), true, true + case int32: + if s < 0 { + return 0, false, false + } + + return T(s), true, true + case int64: + if s < 0 { + return 0, false, false + } + + return T(s), true, true + case uint: + return T(s), true, true + case uint8: + return T(s), true, true + case uint16: + return T(s), true, true + case uint32: + return T(s), true, true + case uint64: + return T(s), true, true + case float32: + if s < 0 { + return 0, false, false + } + + return T(s), true, true + case float64: + if s < 0 { + return 0, false, false + } + + return T(s), true, true + case bool: + if s { + return 1, true, true + } + + return 0, true, true + case nil: + return 0, true, true + case time.Weekday: + if s < 0 { + return 0, false, false + } + + return T(s), true, true + case time.Month: + if s < 0 { + return 0, false, false + } + + return T(s), true, true + } + + return 0, true, false +} + +func toUnsignedNumberE[T Number](i any, parseFn func(string) (T, error)) (T, error) { + n, valid, ok := toUnsignedNumber[T](i) + if ok { + return n, nil + } + + i, _ = indirect(i) + + if !valid { + return 0, errNegativeNotAllowed + } + + switch s := i.(type) { + case string: + if s == "" { + return 0, nil + } + + v, err := parseFn(s) + if err != nil { + return 0, fmt.Errorf(errorMsgWith, i, i, n, err) + } + + return v, nil + case json.Number: + if s == "" { + return 0, nil + } + + v, err := parseFn(string(s)) + if err != nil { + return 0, fmt.Errorf(errorMsgWith, i, i, n, err) + } + + return v, nil + case float64EProvider: + if _, ok := any(n).(float64); !ok { + return 0, fmt.Errorf(errorMsg, i, i, n) + } + + v, err := s.Float64() + if err != nil { + return 0, fmt.Errorf(errorMsg, i, i, n) + } + + if v < 0 { + return 0, errNegativeNotAllowed + } + + return T(v), nil + case float64Provider: + if _, ok := any(n).(float64); !ok { + return 0, fmt.Errorf(errorMsg, i, i, n) + } + + v := s.Float64() + + if v < 0 { + return 0, errNegativeNotAllowed + } + + return T(v), nil + default: + if i, ok := resolveAlias(i); ok { + return toUnsignedNumberE(i, parseFn) + } + + return 0, fmt.Errorf(errorMsg, i, i, n) + } +} + +func parseNumber[T Number](s string) (T, error) { + var t T + + switch any(t).(type) { + case int: + v, err := parseInt[int](s) + + return T(v), err + case int8: + v, err := parseInt[int8](s) + + return T(v), err + case int16: + v, err := parseInt[int16](s) + + return T(v), err + case int32: + v, err := parseInt[int32](s) + + return T(v), err + case int64: + v, err := parseInt[int64](s) + + return T(v), err + case uint: + v, err := parseUint[uint](s) + + return T(v), err + case uint8: + v, err := parseUint[uint8](s) + + return T(v), err + case uint16: + v, err := parseUint[uint16](s) + + return T(v), err + case uint32: + v, err := parseUint[uint32](s) + + return T(v), err + case uint64: + v, err := parseUint[uint64](s) + + return T(v), err + case float32: + v, err := strconv.ParseFloat(s, 32) + + return T(v), err + case float64: + v, err := strconv.ParseFloat(s, 64) + + return T(v), err + + default: + return 0, fmt.Errorf("unknown number type: %T", t) + } +} + +func parseInt[T integer](s string) (T, error) { + v, err := strconv.ParseInt(trimDecimal(s), 0, 0) + if err != nil { + return 0, err + } + + return T(v), nil +} + +func parseUint[T unsigned](s string) (T, error) { + v, err := strconv.ParseUint(strings.TrimLeft(trimDecimal(s), "+"), 0, 0) + if err != nil { + return 0, err + } + + return T(v), nil +} + +func parseFloat[T float](s string) (T, error) { + var t T + + var v any + var err error + + switch any(t).(type) { + case float32: + n, e := strconv.ParseFloat(s, 32) + + v = float32(n) + err = e + case float64: + n, e := strconv.ParseFloat(s, 64) + + v = float64(n) + err = e + } + + return v.(T), err +} + +// ToFloat64E casts an interface to a float64 type. +func ToFloat64E(i any) (float64, error) { + return toNumberE[float64](i, parseFloat[float64]) +} + +// ToFloat32E casts an interface to a float32 type. +func ToFloat32E(i any) (float32, error) { + return toNumberE[float32](i, parseFloat[float32]) +} + +// ToInt64E casts an interface to an int64 type. +func ToInt64E(i any) (int64, error) { + return toNumberE[int64](i, parseInt[int64]) +} + +// ToInt32E casts an interface to an int32 type. +func ToInt32E(i any) (int32, error) { + return toNumberE[int32](i, parseInt[int32]) +} + +// ToInt16E casts an interface to an int16 type. +func ToInt16E(i any) (int16, error) { + return toNumberE[int16](i, parseInt[int16]) +} + +// ToInt8E casts an interface to an int8 type. +func ToInt8E(i any) (int8, error) { + return toNumberE[int8](i, parseInt[int8]) +} + +// ToIntE casts an interface to an int type. +func ToIntE(i any) (int, error) { + return toNumberE[int](i, parseInt[int]) +} + +// ToUintE casts an interface to a uint type. +func ToUintE(i any) (uint, error) { + return toUnsignedNumberE[uint](i, parseUint[uint]) +} + +// ToUint64E casts an interface to a uint64 type. +func ToUint64E(i any) (uint64, error) { + return toUnsignedNumberE[uint64](i, parseUint[uint64]) +} + +// ToUint32E casts an interface to a uint32 type. +func ToUint32E(i any) (uint32, error) { + return toUnsignedNumberE[uint32](i, parseUint[uint32]) +} + +// ToUint16E casts an interface to a uint16 type. +func ToUint16E(i any) (uint16, error) { + return toUnsignedNumberE[uint16](i, parseUint[uint16]) +} + +// ToUint8E casts an interface to a uint type. +func ToUint8E(i any) (uint8, error) { + return toUnsignedNumberE[uint8](i, parseUint[uint8]) +} + +func trimZeroDecimal(s string) string { + var foundZero bool + for i := len(s); i > 0; i-- { + switch s[i-1] { + case '.': + if foundZero { + return s[:i-1] + } + case '0': + foundZero = true + default: + return s + } + } + return s +} + +var stringNumberRe = regexp.MustCompile(`^([-+]?\d*)(\.\d*)?$`) + +// see [BenchmarkDecimal] for details about the implementation +func trimDecimal(s string) string { + if !strings.Contains(s, ".") { + return s + } + + matches := stringNumberRe.FindStringSubmatch(s) + if matches != nil { + // matches[1] is the captured integer part with sign + s = matches[1] + + // handle special cases + switch s { + case "-", "+": + s += "0" + case "": + s = "0" + } + + return s + } + + return s +} diff --git a/vendor/github.com/spf13/cast/slice.go b/vendor/github.com/spf13/cast/slice.go new file mode 100644 index 0000000000..e6a8328c60 --- /dev/null +++ b/vendor/github.com/spf13/cast/slice.go @@ -0,0 +1,106 @@ +// Copyright © 2014 Steve Francia . +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +package cast + +import ( + "fmt" + "reflect" + "strings" +) + +// ToSliceE casts any value to a []any type. +func ToSliceE(i any) ([]any, error) { + i, _ = indirect(i) + + var s []any + + switch v := i.(type) { + case []any: + // TODO: use slices.Clone + return append(s, v...), nil + case []map[string]any: + for _, u := range v { + s = append(s, u) + } + + return s, nil + default: + return s, fmt.Errorf(errorMsg, i, i, s) + } +} + +func toSliceE[T Basic](i any) ([]T, error) { + v, ok, err := toSliceEOk[T](i) + if err != nil { + return nil, err + } + + if !ok { + return nil, fmt.Errorf(errorMsg, i, i, []T{}) + } + + return v, nil +} + +func toSliceEOk[T Basic](i any) ([]T, bool, error) { + i, _ = indirect(i) + if i == nil { + return nil, true, fmt.Errorf(errorMsg, i, i, []T{}) + } + + switch v := i.(type) { + case []T: + // TODO: clone slice + return v, true, nil + } + + kind := reflect.TypeOf(i).Kind() + switch kind { + case reflect.Slice, reflect.Array: + s := reflect.ValueOf(i) + a := make([]T, s.Len()) + + for j := 0; j < s.Len(); j++ { + val, err := ToE[T](s.Index(j).Interface()) + if err != nil { + return nil, true, fmt.Errorf(errorMsg, i, i, []T{}) + } + + a[j] = val + } + + return a, true, nil + default: + return nil, false, nil + } +} + +// ToStringSliceE casts any value to a []string type. +func ToStringSliceE(i any) ([]string, error) { + if a, ok, err := toSliceEOk[string](i); ok { + if err != nil { + return nil, err + } + + return a, nil + } + + var a []string + + switch v := i.(type) { + case string: + return strings.Fields(v), nil + case any: + str, err := ToStringE(v) + if err != nil { + return nil, fmt.Errorf(errorMsg, i, i, a) + } + + return []string{str}, nil + default: + return nil, fmt.Errorf(errorMsg, i, i, a) + } +} diff --git a/vendor/github.com/spf13/cast/time.go b/vendor/github.com/spf13/cast/time.go new file mode 100644 index 0000000000..744cd5accd --- /dev/null +++ b/vendor/github.com/spf13/cast/time.go @@ -0,0 +1,116 @@ +// Copyright © 2014 Steve Francia . +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +package cast + +import ( + "encoding/json" + "errors" + "fmt" + "strings" + "time" + + "github.com/spf13/cast/internal" +) + +// ToTimeE any value to a [time.Time] type. +func ToTimeE(i any) (time.Time, error) { + return ToTimeInDefaultLocationE(i, time.UTC) +} + +// ToTimeInDefaultLocationE casts an empty interface to [time.Time], +// interpreting inputs without a timezone to be in the given location, +// or the local timezone if nil. +func ToTimeInDefaultLocationE(i any, location *time.Location) (tim time.Time, err error) { + i, _ = indirect(i) + + switch v := i.(type) { + case time.Time: + return v, nil + case string: + return StringToDateInDefaultLocation(v, location) + case json.Number: + // Originally this used ToInt64E, but adding string float conversion broke ToTime. + // the behavior of ToTime would have changed if we continued using it. + // For now, using json.Number's own Int64 method should be good enough to preserve backwards compatibility. + v = json.Number(trimZeroDecimal(string(v))) + s, err1 := v.Int64() + if err1 != nil { + return time.Time{}, fmt.Errorf(errorMsg, i, i, time.Time{}) + } + return time.Unix(s, 0), nil + case int: + return time.Unix(int64(v), 0), nil + case int32: + return time.Unix(int64(v), 0), nil + case int64: + return time.Unix(v, 0), nil + case uint: + return time.Unix(int64(v), 0), nil + case uint32: + return time.Unix(int64(v), 0), nil + case uint64: + return time.Unix(int64(v), 0), nil + case nil: + return time.Time{}, nil + default: + return time.Time{}, fmt.Errorf(errorMsg, i, i, time.Time{}) + } +} + +// ToDurationE casts any value to a [time.Duration] type. +func ToDurationE(i any) (time.Duration, error) { + i, _ = indirect(i) + + switch s := i.(type) { + case time.Duration: + return s, nil + case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: + v, err := ToInt64E(s) + if err != nil { + // TODO: once there is better error handling, this should be easier + return 0, errors.New(strings.ReplaceAll(err.Error(), " int64", "time.Duration")) + } + + return time.Duration(v), nil + case float32, float64, float64EProvider, float64Provider: + v, err := ToFloat64E(s) + if err != nil { + // TODO: once there is better error handling, this should be easier + return 0, errors.New(strings.ReplaceAll(err.Error(), " float64", "time.Duration")) + } + + return time.Duration(v), nil + case string: + if !strings.ContainsAny(s, "nsuµmh") { + return time.ParseDuration(s + "ns") + } + + return time.ParseDuration(s) + case nil: + return time.Duration(0), nil + default: + if i, ok := resolveAlias(i); ok { + return ToDurationE(i) + } + + return 0, fmt.Errorf(errorMsg, i, i, time.Duration(0)) + } +} + +// StringToDate attempts to parse a string into a [time.Time] type using a +// predefined list of formats. +// +// If no suitable format is found, an error is returned. +func StringToDate(s string) (time.Time, error) { + return internal.ParseDateWith(s, time.UTC, internal.TimeFormats) +} + +// StringToDateInDefaultLocation casts an empty interface to a [time.Time], +// interpreting inputs without a timezone to be in the given location, +// or the local timezone if nil. +func StringToDateInDefaultLocation(s string, location *time.Location) (time.Time, error) { + return internal.ParseDateWith(s, location, internal.TimeFormats) +} diff --git a/vendor/github.com/spf13/cast/timeformattype_string.go b/vendor/github.com/spf13/cast/timeformattype_string.go deleted file mode 100644 index 1524fc82ce..0000000000 --- a/vendor/github.com/spf13/cast/timeformattype_string.go +++ /dev/null @@ -1,27 +0,0 @@ -// Code generated by "stringer -type timeFormatType"; DO NOT EDIT. - -package cast - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[timeFormatNoTimezone-0] - _ = x[timeFormatNamedTimezone-1] - _ = x[timeFormatNumericTimezone-2] - _ = x[timeFormatNumericAndNamedTimezone-3] - _ = x[timeFormatTimeOnly-4] -} - -const _timeFormatType_name = "timeFormatNoTimezonetimeFormatNamedTimezonetimeFormatNumericTimezonetimeFormatNumericAndNamedTimezonetimeFormatTimeOnly" - -var _timeFormatType_index = [...]uint8{0, 20, 43, 68, 101, 119} - -func (i timeFormatType) String() string { - if i < 0 || i >= timeFormatType(len(_timeFormatType_index)-1) { - return "timeFormatType(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _timeFormatType_name[_timeFormatType_index[i]:_timeFormatType_index[i+1]] -} diff --git a/vendor/github.com/spf13/cast/zz_generated.go b/vendor/github.com/spf13/cast/zz_generated.go new file mode 100644 index 0000000000..ce3ec0f78f --- /dev/null +++ b/vendor/github.com/spf13/cast/zz_generated.go @@ -0,0 +1,261 @@ +// Code generated by cast generator. DO NOT EDIT. + +package cast + +import "time" + +// ToBool casts any value to a(n) bool type. +func ToBool(i any) bool { + v, _ := ToBoolE(i) + return v +} + +// ToString casts any value to a(n) string type. +func ToString(i any) string { + v, _ := ToStringE(i) + return v +} + +// ToTime casts any value to a(n) time.Time type. +func ToTime(i any) time.Time { + v, _ := ToTimeE(i) + return v +} + +// ToTimeInDefaultLocation casts any value to a(n) time.Time type. +func ToTimeInDefaultLocation(i any, location *time.Location) time.Time { + v, _ := ToTimeInDefaultLocationE(i, location) + return v +} + +// ToDuration casts any value to a(n) time.Duration type. +func ToDuration(i any) time.Duration { + v, _ := ToDurationE(i) + return v +} + +// ToInt casts any value to a(n) int type. +func ToInt(i any) int { + v, _ := ToIntE(i) + return v +} + +// ToInt8 casts any value to a(n) int8 type. +func ToInt8(i any) int8 { + v, _ := ToInt8E(i) + return v +} + +// ToInt16 casts any value to a(n) int16 type. +func ToInt16(i any) int16 { + v, _ := ToInt16E(i) + return v +} + +// ToInt32 casts any value to a(n) int32 type. +func ToInt32(i any) int32 { + v, _ := ToInt32E(i) + return v +} + +// ToInt64 casts any value to a(n) int64 type. +func ToInt64(i any) int64 { + v, _ := ToInt64E(i) + return v +} + +// ToUint casts any value to a(n) uint type. +func ToUint(i any) uint { + v, _ := ToUintE(i) + return v +} + +// ToUint8 casts any value to a(n) uint8 type. +func ToUint8(i any) uint8 { + v, _ := ToUint8E(i) + return v +} + +// ToUint16 casts any value to a(n) uint16 type. +func ToUint16(i any) uint16 { + v, _ := ToUint16E(i) + return v +} + +// ToUint32 casts any value to a(n) uint32 type. +func ToUint32(i any) uint32 { + v, _ := ToUint32E(i) + return v +} + +// ToUint64 casts any value to a(n) uint64 type. +func ToUint64(i any) uint64 { + v, _ := ToUint64E(i) + return v +} + +// ToFloat32 casts any value to a(n) float32 type. +func ToFloat32(i any) float32 { + v, _ := ToFloat32E(i) + return v +} + +// ToFloat64 casts any value to a(n) float64 type. +func ToFloat64(i any) float64 { + v, _ := ToFloat64E(i) + return v +} + +// ToStringMapString casts any value to a(n) map[string]string type. +func ToStringMapString(i any) map[string]string { + v, _ := ToStringMapStringE(i) + return v +} + +// ToStringMapStringSlice casts any value to a(n) map[string][]string type. +func ToStringMapStringSlice(i any) map[string][]string { + v, _ := ToStringMapStringSliceE(i) + return v +} + +// ToStringMapBool casts any value to a(n) map[string]bool type. +func ToStringMapBool(i any) map[string]bool { + v, _ := ToStringMapBoolE(i) + return v +} + +// ToStringMapInt casts any value to a(n) map[string]int type. +func ToStringMapInt(i any) map[string]int { + v, _ := ToStringMapIntE(i) + return v +} + +// ToStringMapInt64 casts any value to a(n) map[string]int64 type. +func ToStringMapInt64(i any) map[string]int64 { + v, _ := ToStringMapInt64E(i) + return v +} + +// ToStringMap casts any value to a(n) map[string]any type. +func ToStringMap(i any) map[string]any { + v, _ := ToStringMapE(i) + return v +} + +// ToSlice casts any value to a(n) []any type. +func ToSlice(i any) []any { + v, _ := ToSliceE(i) + return v +} + +// ToBoolSlice casts any value to a(n) []bool type. +func ToBoolSlice(i any) []bool { + v, _ := ToBoolSliceE(i) + return v +} + +// ToStringSlice casts any value to a(n) []string type. +func ToStringSlice(i any) []string { + v, _ := ToStringSliceE(i) + return v +} + +// ToIntSlice casts any value to a(n) []int type. +func ToIntSlice(i any) []int { + v, _ := ToIntSliceE(i) + return v +} + +// ToInt64Slice casts any value to a(n) []int64 type. +func ToInt64Slice(i any) []int64 { + v, _ := ToInt64SliceE(i) + return v +} + +// ToUintSlice casts any value to a(n) []uint type. +func ToUintSlice(i any) []uint { + v, _ := ToUintSliceE(i) + return v +} + +// ToFloat64Slice casts any value to a(n) []float64 type. +func ToFloat64Slice(i any) []float64 { + v, _ := ToFloat64SliceE(i) + return v +} + +// ToDurationSlice casts any value to a(n) []time.Duration type. +func ToDurationSlice(i any) []time.Duration { + v, _ := ToDurationSliceE(i) + return v +} + +// ToBoolSliceE casts any value to a(n) []bool type. +func ToBoolSliceE(i any) ([]bool, error) { + return toSliceE[bool](i) +} + +// ToDurationSliceE casts any value to a(n) []time.Duration type. +func ToDurationSliceE(i any) ([]time.Duration, error) { + return toSliceE[time.Duration](i) +} + +// ToIntSliceE casts any value to a(n) []int type. +func ToIntSliceE(i any) ([]int, error) { + return toSliceE[int](i) +} + +// ToInt8SliceE casts any value to a(n) []int8 type. +func ToInt8SliceE(i any) ([]int8, error) { + return toSliceE[int8](i) +} + +// ToInt16SliceE casts any value to a(n) []int16 type. +func ToInt16SliceE(i any) ([]int16, error) { + return toSliceE[int16](i) +} + +// ToInt32SliceE casts any value to a(n) []int32 type. +func ToInt32SliceE(i any) ([]int32, error) { + return toSliceE[int32](i) +} + +// ToInt64SliceE casts any value to a(n) []int64 type. +func ToInt64SliceE(i any) ([]int64, error) { + return toSliceE[int64](i) +} + +// ToUintSliceE casts any value to a(n) []uint type. +func ToUintSliceE(i any) ([]uint, error) { + return toSliceE[uint](i) +} + +// ToUint8SliceE casts any value to a(n) []uint8 type. +func ToUint8SliceE(i any) ([]uint8, error) { + return toSliceE[uint8](i) +} + +// ToUint16SliceE casts any value to a(n) []uint16 type. +func ToUint16SliceE(i any) ([]uint16, error) { + return toSliceE[uint16](i) +} + +// ToUint32SliceE casts any value to a(n) []uint32 type. +func ToUint32SliceE(i any) ([]uint32, error) { + return toSliceE[uint32](i) +} + +// ToUint64SliceE casts any value to a(n) []uint64 type. +func ToUint64SliceE(i any) ([]uint64, error) { + return toSliceE[uint64](i) +} + +// ToFloat32SliceE casts any value to a(n) []float32 type. +func ToFloat32SliceE(i any) ([]float32, error) { + return toSliceE[float32](i) +} + +// ToFloat64SliceE casts any value to a(n) []float64 type. +func ToFloat64SliceE(i any) ([]float64, error) { + return toSliceE[float64](i) +} diff --git a/vendor/github.com/spf13/cobra/.golangci.yml b/vendor/github.com/spf13/cobra/.golangci.yml index 2c8f4808c1..6acf8ab1ea 100644 --- a/vendor/github.com/spf13/cobra/.golangci.yml +++ b/vendor/github.com/spf13/cobra/.golangci.yml @@ -12,14 +12,20 @@ # See the License for the specific language governing permissions and # limitations under the License. +version: "2" + run: - deadline: 5m + timeout: 5m + +formatters: + enable: + - gofmt + - goimports linters: - disable-all: true + default: none enable: #- bodyclose - # - deadcode ! deprecated since v1.49.0; replaced by 'unused' #- depguard #- dogsled #- dupl @@ -30,28 +36,24 @@ linters: - goconst - gocritic #- gocyclo - - gofmt - - goimports - #- gomnd #- goprintffuncname - gosec - - gosimple - govet - ineffassign #- lll - misspell + #- mnd #- nakedret #- noctx - nolintlint #- rowserrcheck - #- scopelint - staticcheck - #- structcheck ! deprecated since v1.49.0; replaced by 'unused' - - stylecheck - #- typecheck - unconvert #- unparam - unused - # - varcheck ! deprecated since v1.49.0; replaced by 'unused' #- whitespace - fast: false + exclusions: + presets: + - common-false-positives + - legacy + - std-error-handling diff --git a/vendor/github.com/spf13/cobra/README.md b/vendor/github.com/spf13/cobra/README.md index 71757151c3..8416275f48 100644 --- a/vendor/github.com/spf13/cobra/README.md +++ b/vendor/github.com/spf13/cobra/README.md @@ -1,8 +1,14 @@ - -![cobra logo](https://github.com/user-attachments/assets/cbc3adf8-0dff-46e9-a88d-5e2d971c169e) +
          Cobra is a library for creating powerful modern CLI applications. +Visit Cobra.dev for extensive documentation + + Cobra is used in many Go projects such as [Kubernetes](https://kubernetes.io/), [Hugo](https://gohugo.io), and [GitHub CLI](https://github.com/cli/cli) to name a few. [This list](site/content/projects_using_cobra.md) contains a more extensive list of projects using Cobra. @@ -11,6 +17,20 @@ name a few. [This list](site/content/projects_using_cobra.md) contains a more ex [![Go Reference](https://pkg.go.dev/badge/github.com/spf13/cobra.svg)](https://pkg.go.dev/github.com/spf13/cobra) [![Go Report Card](https://goreportcard.com/badge/github.com/spf13/cobra)](https://goreportcard.com/report/github.com/spf13/cobra) [![Slack](https://img.shields.io/badge/Slack-cobra-brightgreen)](https://gophers.slack.com/archives/CD3LP1199) +
          +
          + Supported by: +
          +
          + + Warp sponsorship + + +### [Warp, the AI terminal for devs](https://www.warp.dev/cobra) +[Try Cobra in Warp today](https://www.warp.dev/cobra)
          + +
          +
          # Overview diff --git a/vendor/github.com/spf13/cobra/SECURITY.md b/vendor/github.com/spf13/cobra/SECURITY.md new file mode 100644 index 0000000000..54e60c28c1 --- /dev/null +++ b/vendor/github.com/spf13/cobra/SECURITY.md @@ -0,0 +1,105 @@ +# Security Policy + +## Reporting a Vulnerability + +The `cobra` maintainers take security issues seriously and +we appreciate your efforts to _**responsibly**_ disclose your findings. +We will make every effort to swiftly respond and address concerns. + +To report a security vulnerability: + +1. **DO NOT** create a public GitHub issue for the vulnerability! +2. **DO NOT** create a public GitHub Pull Request with a fix for the vulnerability! +3. Send an email to `cobra-security@googlegroups.com`. +4. Include the following details in your report: + - Description of the vulnerability + - Steps to reproduce + - Potential impact of the vulnerability (to your downstream project, to the Go ecosystem, etc.) + - Any potential mitigations you've already identified +5. Allow up to 7 days for an initial response. + You should receive an acknowledgment of your report and an estimated timeline for a fix. +6. (Optional) If you have a fix and would like to contribute your patch, please work + directly with the maintainers via `cobra-security@googlegroups.com` to + coordinate pushing the patch to GitHub, cutting a new release, and disclosing the change. + +## Response Process + +When a security vulnerability report is received, the `cobra` maintainers will: + +1. Confirm receipt of the vulnerability report within 7 days. +2. Assess the report to determine if it constitutes a security vulnerability. +3. If confirmed, assign the vulnerability a severity level and create a timeline for addressing it. +4. Develop and test a fix. +5. Patch the vulnerability and make a new GitHub release: the maintainers will coordinate disclosure with the reporter. +6. Create a new GitHub Security Advisory to inform the broader Go ecosystem + +## Disclosure Policy + +The `cobra` maintainers follow a coordinated disclosure process: + +1. Security vulnerabilities will be addressed as quickly as possible. +2. A CVE (Common Vulnerabilities and Exposures) identifier will be requested for significant vulnerabilities + that are within `cobra` itself. +3. Once a fix is ready, the maintainers will: + - Release a new version containing the fix. + - Update the security advisory with details about the vulnerability. + - Credit the reporter (unless they wish to remain anonymous). + - Credit the fixer (unless they wish to remain anonymous, this may be the same as the reporter). + - Announce the vulnerability through appropriate channels + (GitHub Security Advisory, mailing lists, GitHub Releases, etc.) + +## Supported Versions + +Security fixes will typically only be released for the most recent major release. + +## Upstream Security Issues + +`cobra` generally will not accept vulnerability reports that originate in upstream +dependencies. I.e., if there is a problem in Go code that `cobra` depends on, +it is best to engage that project's maintainers and owners. + +This security policy primarily pertains only to `cobra` itself but if you believe you've +identified a problem that originates in an upstream dependency and is being widely +distributed by `cobra`, please follow the disclosure procedure above: the `cobra` +maintainers will work with you to determine the severity and ecosystem impact. + +## Security Updates and CVEs + +Information about known security vulnerabilities and CVEs affecting `cobra` will +be published as GitHub Security Advisories at +https://github.com/spf13/cobra/security/advisories. + +All users are encouraged to watch the repository and upgrade promptly when +security releases are published. + +## `cobra` Security Best Practices for Users + +When using `cobra` in your CLIs, the `cobra` maintainers recommend the following: + +1. Always use the latest version of `cobra`. +2. [Use Go modules](https://go.dev/blog/using-go-modules) for dependency management. +3. Always use the latest possible version of Go. + +## Security Best Practices for Contributors + +When contributing to `cobra`: + +1. Be mindful of security implications when adding new features or modifying existing ones. +2. Be aware of `cobra`'s extremely large reach: it is used in nearly every Go CLI + (like Kubernetes, Docker, Prometheus, etc. etc.) +3. Write tests that explicitly cover edge cases and potential issues. +4. If you discover a security issue while working on `cobra`, please report it + following the process above rather than opening a public pull request or issue that + addresses the vulnerability. +5. Take personal sec-ops seriously and secure your GitHub account: use [two-factor authentication](https://docs.github.com/en/authentication/securing-your-account-with-two-factor-authentication-2fa), + [sign your commits with a GPG or SSH key](https://docs.github.com/en/authentication/managing-commit-signature-verification/about-commit-signature-verification), + etc. + +## Acknowledgments + +The `cobra` maintainers would like to thank all security researchers and +community members who help keep cobra, its users, and the entire Go ecosystem secure through responsible disclosures!! + +--- + +*This security policy is inspired by the [Open Web Application Security Project (OWASP)](https://owasp.org/) guidelines and security best practices.* diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go index dbb2c298ba..78088db69c 100644 --- a/vendor/github.com/spf13/cobra/command.go +++ b/vendor/github.com/spf13/cobra/command.go @@ -39,7 +39,7 @@ const ( ) // FParseErrWhitelist configures Flag parse errors to be ignored -type FParseErrWhitelist flag.ParseErrorsWhitelist +type FParseErrWhitelist flag.ParseErrorsAllowlist // Group Structure to manage groups for commands type Group struct { @@ -1296,6 +1296,11 @@ Simply type ` + c.DisplayName() + ` help [path to command] for full details.`, c.Printf("Unknown help topic %#q\n", args) CheckErr(c.Root().Usage()) } else { + // FLow the context down to be used in help text + if cmd.ctx == nil { + cmd.ctx = c.ctx + } + cmd.InitDefaultHelpFlag() // make possible 'help' flag to be shown cmd.InitDefaultVersionFlag() // make possible 'version' flag to be shown CheckErr(cmd.Help()) @@ -1872,7 +1877,7 @@ func (c *Command) ParseFlags(args []string) error { c.mergePersistentFlags() // do it here after merging all flags and just before parse - c.Flags().ParseErrorsWhitelist = flag.ParseErrorsWhitelist(c.FParseErrWhitelist) + c.Flags().ParseErrorsAllowlist = flag.ParseErrorsAllowlist(c.FParseErrWhitelist) err := c.Flags().Parse(args) // Print warnings if they occurred (e.g. deprecated flag messages). @@ -2020,7 +2025,7 @@ func defaultUsageFunc(w io.Writer, in interface{}) error { fmt.Fprint(w, trimRightSpace(c.InheritedFlags().FlagUsages())) } if c.HasHelpSubCommands() { - fmt.Fprintf(w, "\n\nAdditional help topcis:") + fmt.Fprintf(w, "\n\nAdditional help topics:") for _, subcmd := range c.Commands() { if subcmd.IsAdditionalHelpTopicCommand() { fmt.Fprintf(w, "\n %s %s", rpad(subcmd.CommandPath(), subcmd.CommandPathPadding()), subcmd.Short) diff --git a/vendor/github.com/spf13/cobra/completions.go b/vendor/github.com/spf13/cobra/completions.go index a1752f7631..d3607c2d2f 100644 --- a/vendor/github.com/spf13/cobra/completions.go +++ b/vendor/github.com/spf13/cobra/completions.go @@ -115,6 +115,13 @@ type CompletionOptions struct { DisableDescriptions bool // HiddenDefaultCmd makes the default 'completion' command hidden HiddenDefaultCmd bool + // DefaultShellCompDirective sets the ShellCompDirective that is returned + // if no special directive can be determined + DefaultShellCompDirective *ShellCompDirective +} + +func (receiver *CompletionOptions) SetDefaultShellCompDirective(directive ShellCompDirective) { + receiver.DefaultShellCompDirective = &directive } // Completion is a string that can be used for completions @@ -375,7 +382,7 @@ func (c *Command) getCompletions(args []string) (*Command, []Completion, ShellCo // Error while attempting to parse flags if flagErr != nil { // If error type is flagCompError and we don't want flagCompletion we should ignore the error - if _, ok := flagErr.(*flagCompError); !(ok && !flagCompletion) { + if _, ok := flagErr.(*flagCompError); !ok || flagCompletion { return finalCmd, []Completion{}, ShellCompDirectiveDefault, flagErr } } @@ -480,6 +487,14 @@ func (c *Command) getCompletions(args []string) (*Command, []Completion, ShellCo } } else { directive = ShellCompDirectiveDefault + // check current and parent commands for a custom DefaultShellCompDirective + for cmd := finalCmd; cmd != nil; cmd = cmd.parent { + if cmd.CompletionOptions.DefaultShellCompDirective != nil { + directive = *cmd.CompletionOptions.DefaultShellCompDirective + break + } + } + if flag == nil { foundLocalNonPersistentFlag := false // If TraverseChildren is true on the root command we don't check for @@ -773,7 +788,7 @@ See each sub-command's help for details on how to use the generated script. // shell completion for it (prog __complete completion '') subCmd, cmdArgs, err := c.Find(args) if err != nil || subCmd.Name() != compCmdName && - !(subCmd.Name() == ShellCompRequestCmd && len(cmdArgs) > 1 && cmdArgs[0] == compCmdName) { + (subCmd.Name() != ShellCompRequestCmd || len(cmdArgs) <= 1 || cmdArgs[0] != compCmdName) { // The completion command is not being called or being completed so we remove it. c.RemoveCommand(completionCmd) return diff --git a/vendor/github.com/spf13/cobra/doc/man_docs.go b/vendor/github.com/spf13/cobra/doc/man_docs.go index 2138f24882..560bc20c75 100644 --- a/vendor/github.com/spf13/cobra/doc/man_docs.go +++ b/vendor/github.com/spf13/cobra/doc/man_docs.go @@ -212,7 +212,7 @@ func genMan(cmd *cobra.Command, header *GenManHeader) []byte { manPrintOptions(buf, cmd) if len(cmd.Example) > 0 { buf.WriteString("# EXAMPLE\n") - buf.WriteString(fmt.Sprintf("```\n%s\n```\n", cmd.Example)) + fmt.Fprintf(buf, "```\n%s\n```\n", cmd.Example) } if hasSeeAlso(cmd) { buf.WriteString("# SEE ALSO\n") @@ -240,7 +240,7 @@ func genMan(cmd *cobra.Command, header *GenManHeader) []byte { buf.WriteString(strings.Join(seealsos, ", ") + "\n") } if !cmd.DisableAutoGenTag { - buf.WriteString(fmt.Sprintf("# HISTORY\n%s Auto generated by spf13/cobra\n", header.Date.Format("2-Jan-2006"))) + fmt.Fprintf(buf, "# HISTORY\n%s Auto generated by spf13/cobra\n", header.Date.Format("2-Jan-2006")) } return buf.Bytes() } diff --git a/vendor/github.com/spf13/cobra/doc/md_docs.go b/vendor/github.com/spf13/cobra/doc/md_docs.go index 12592223ba..6eae7ccfb6 100644 --- a/vendor/github.com/spf13/cobra/doc/md_docs.go +++ b/vendor/github.com/spf13/cobra/doc/md_docs.go @@ -69,12 +69,12 @@ func GenMarkdownCustom(cmd *cobra.Command, w io.Writer, linkHandler func(string) } if cmd.Runnable() { - buf.WriteString(fmt.Sprintf("```\n%s\n```\n\n", cmd.UseLine())) + fmt.Fprintf(buf, "```\n%s\n```\n\n", cmd.UseLine()) } if len(cmd.Example) > 0 { buf.WriteString("### Examples\n\n") - buf.WriteString(fmt.Sprintf("```\n%s\n```\n\n", cmd.Example)) + fmt.Fprintf(buf, "```\n%s\n```\n\n", cmd.Example) } if err := printOptions(buf, cmd, name); err != nil { @@ -87,7 +87,7 @@ func GenMarkdownCustom(cmd *cobra.Command, w io.Writer, linkHandler func(string) pname := parent.CommandPath() link := pname + markdownExtension link = strings.ReplaceAll(link, " ", "_") - buf.WriteString(fmt.Sprintf("* [%s](%s)\t - %s\n", pname, linkHandler(link), parent.Short)) + fmt.Fprintf(buf, "* [%s](%s)\t - %s\n", pname, linkHandler(link), parent.Short) cmd.VisitParents(func(c *cobra.Command) { if c.DisableAutoGenTag { cmd.DisableAutoGenTag = c.DisableAutoGenTag @@ -105,7 +105,7 @@ func GenMarkdownCustom(cmd *cobra.Command, w io.Writer, linkHandler func(string) cname := name + " " + child.Name() link := cname + markdownExtension link = strings.ReplaceAll(link, " ", "_") - buf.WriteString(fmt.Sprintf("* [%s](%s)\t - %s\n", cname, linkHandler(link), child.Short)) + fmt.Fprintf(buf, "* [%s](%s)\t - %s\n", cname, linkHandler(link), child.Short) } buf.WriteString("\n") } diff --git a/vendor/github.com/spf13/cobra/doc/rest_docs.go b/vendor/github.com/spf13/cobra/doc/rest_docs.go index c33acc2baa..4901ca9801 100644 --- a/vendor/github.com/spf13/cobra/doc/rest_docs.go +++ b/vendor/github.com/spf13/cobra/doc/rest_docs.go @@ -82,13 +82,13 @@ func GenReSTCustom(cmd *cobra.Command, w io.Writer, linkHandler func(string, str buf.WriteString("\n" + long + "\n\n") if cmd.Runnable() { - buf.WriteString(fmt.Sprintf("::\n\n %s\n\n", cmd.UseLine())) + fmt.Fprintf(buf, "::\n\n %s\n\n", cmd.UseLine()) } if len(cmd.Example) > 0 { buf.WriteString("Examples\n") buf.WriteString("~~~~~~~~\n\n") - buf.WriteString(fmt.Sprintf("::\n\n%s\n\n", indentString(cmd.Example, " "))) + fmt.Fprintf(buf, "::\n\n%s\n\n", indentString(cmd.Example, " ")) } if err := printOptionsReST(buf, cmd, name); err != nil { @@ -101,7 +101,7 @@ func GenReSTCustom(cmd *cobra.Command, w io.Writer, linkHandler func(string, str parent := cmd.Parent() pname := parent.CommandPath() ref = strings.ReplaceAll(pname, " ", "_") - buf.WriteString(fmt.Sprintf("* %s \t - %s\n", linkHandler(pname, ref), parent.Short)) + fmt.Fprintf(buf, "* %s \t - %s\n", linkHandler(pname, ref), parent.Short) cmd.VisitParents(func(c *cobra.Command) { if c.DisableAutoGenTag { cmd.DisableAutoGenTag = c.DisableAutoGenTag @@ -118,7 +118,7 @@ func GenReSTCustom(cmd *cobra.Command, w io.Writer, linkHandler func(string, str } cname := name + " " + child.Name() ref = strings.ReplaceAll(cname, " ", "_") - buf.WriteString(fmt.Sprintf("* %s \t - %s\n", linkHandler(cname, ref), child.Short)) + fmt.Fprintf(buf, "* %s \t - %s\n", linkHandler(cname, ref), child.Short) } buf.WriteString("\n") } diff --git a/vendor/github.com/spf13/cobra/doc/yaml_docs.go b/vendor/github.com/spf13/cobra/doc/yaml_docs.go index 2b26d6ec0f..3719717037 100644 --- a/vendor/github.com/spf13/cobra/doc/yaml_docs.go +++ b/vendor/github.com/spf13/cobra/doc/yaml_docs.go @@ -153,7 +153,7 @@ func genFlagResult(flags *pflag.FlagSet) []cmdOption { // Todo, when we mark a shorthand is deprecated, but specify an empty message. // The flag.ShorthandDeprecated is empty as the shorthand is deprecated. // Using len(flag.ShorthandDeprecated) > 0 can't handle this, others are ok. - if !(len(flag.ShorthandDeprecated) > 0) && len(flag.Shorthand) > 0 { + if len(flag.ShorthandDeprecated) == 0 && len(flag.Shorthand) > 0 { opt := cmdOption{ flag.Name, flag.Shorthand, diff --git a/vendor/github.com/spf13/pflag/README.md b/vendor/github.com/spf13/pflag/README.md index 7eacc5bdbe..388c4e5ead 100644 --- a/vendor/github.com/spf13/pflag/README.md +++ b/vendor/github.com/spf13/pflag/README.md @@ -284,6 +284,33 @@ func main() { } ``` +### Using pflag with go test +`pflag` does not parse the shorthand versions of go test's built-in flags (i.e., those starting with `-test.`). +For more context, see issues [#63](https://github.com/spf13/pflag/issues/63) and [#238](https://github.com/spf13/pflag/issues/238) for more details. + +For example, if you use pflag in your `TestMain` function and call `pflag.Parse()` after defining your custom flags, running a test like this: +```bash +go test /your/tests -run ^YourTest -v --your-test-pflags +``` +will result in the `-v` flag being ignored. This happens because of the way pflag handles flag parsing, skipping over go test's built-in shorthand flags. +To work around this, you can use the `ParseSkippedFlags` function, which ensures that go test's flags are parsed separately using the standard flag package. + +**Example**: You want to parse go test flags that are otherwise ignore by `pflag.Parse()` +```go +import ( + goflag "flag" + flag "github.com/spf13/pflag" +) + +var ip *int = flag.Int("flagname", 1234, "help message for flagname") + +func main() { + flag.CommandLine.AddGoFlagSet(goflag.CommandLine) + flag.ParseSkippedFlags(os.Args[1:], goflag.CommandLine) + flag.Parse() +} +``` + ## More info You can see the full reference documentation of the pflag package diff --git a/vendor/github.com/spf13/pflag/bool_func.go b/vendor/github.com/spf13/pflag/bool_func.go new file mode 100644 index 0000000000..83d77afa89 --- /dev/null +++ b/vendor/github.com/spf13/pflag/bool_func.go @@ -0,0 +1,40 @@ +package pflag + +// -- func Value +type boolfuncValue func(string) error + +func (f boolfuncValue) Set(s string) error { return f(s) } + +func (f boolfuncValue) Type() string { return "boolfunc" } + +func (f boolfuncValue) String() string { return "" } // same behavior as stdlib 'flag' package + +func (f boolfuncValue) IsBoolFlag() bool { return true } + +// BoolFunc defines a func flag with specified name, callback function and usage string. +// +// The callback function will be called every time "--{name}" (or any form that matches the flag) is parsed +// on the command line. +func (f *FlagSet) BoolFunc(name string, usage string, fn func(string) error) { + f.BoolFuncP(name, "", usage, fn) +} + +// BoolFuncP is like BoolFunc, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) BoolFuncP(name, shorthand string, usage string, fn func(string) error) { + var val Value = boolfuncValue(fn) + flag := f.VarPF(val, name, shorthand, usage) + flag.NoOptDefVal = "true" +} + +// BoolFunc defines a func flag with specified name, callback function and usage string. +// +// The callback function will be called every time "--{name}" (or any form that matches the flag) is parsed +// on the command line. +func BoolFunc(name string, usage string, fn func(string) error) { + CommandLine.BoolFuncP(name, "", usage, fn) +} + +// BoolFuncP is like BoolFunc, but accepts a shorthand letter that can be used after a single dash. +func BoolFuncP(name, shorthand string, usage string, fn func(string) error) { + CommandLine.BoolFuncP(name, shorthand, usage, fn) +} diff --git a/vendor/github.com/spf13/pflag/count.go b/vendor/github.com/spf13/pflag/count.go index a0b2679f71..d49c0143c1 100644 --- a/vendor/github.com/spf13/pflag/count.go +++ b/vendor/github.com/spf13/pflag/count.go @@ -85,7 +85,7 @@ func (f *FlagSet) CountP(name, shorthand string, usage string) *int { // Count defines a count flag with specified name, default value, and usage string. // The return value is the address of an int variable that stores the value of the flag. -// A count flag will add 1 to its value evey time it is found on the command line +// A count flag will add 1 to its value every time it is found on the command line func Count(name string, usage string) *int { return CommandLine.CountP(name, "", usage) } diff --git a/vendor/github.com/spf13/pflag/errors.go b/vendor/github.com/spf13/pflag/errors.go new file mode 100644 index 0000000000..ff11b66bef --- /dev/null +++ b/vendor/github.com/spf13/pflag/errors.go @@ -0,0 +1,149 @@ +package pflag + +import "fmt" + +// notExistErrorMessageType specifies which flavor of "flag does not exist" +// is printed by NotExistError. This allows the related errors to be grouped +// under a single NotExistError struct without making a breaking change to +// the error message text. +type notExistErrorMessageType int + +const ( + flagNotExistMessage notExistErrorMessageType = iota + flagNotDefinedMessage + flagNoSuchFlagMessage + flagUnknownFlagMessage + flagUnknownShorthandFlagMessage +) + +// NotExistError is the error returned when trying to access a flag that +// does not exist in the FlagSet. +type NotExistError struct { + name string + specifiedShorthands string + messageType notExistErrorMessageType +} + +// Error implements error. +func (e *NotExistError) Error() string { + switch e.messageType { + case flagNotExistMessage: + return fmt.Sprintf("flag %q does not exist", e.name) + + case flagNotDefinedMessage: + return fmt.Sprintf("flag accessed but not defined: %s", e.name) + + case flagNoSuchFlagMessage: + return fmt.Sprintf("no such flag -%v", e.name) + + case flagUnknownFlagMessage: + return fmt.Sprintf("unknown flag: --%s", e.name) + + case flagUnknownShorthandFlagMessage: + c := rune(e.name[0]) + return fmt.Sprintf("unknown shorthand flag: %q in -%s", c, e.specifiedShorthands) + } + + panic(fmt.Errorf("unknown flagNotExistErrorMessageType: %v", e.messageType)) +} + +// GetSpecifiedName returns the name of the flag (without dashes) as it +// appeared in the parsed arguments. +func (e *NotExistError) GetSpecifiedName() string { + return e.name +} + +// GetSpecifiedShortnames returns the group of shorthand arguments +// (without dashes) that the flag appeared within. If the flag was not in a +// shorthand group, this will return an empty string. +func (e *NotExistError) GetSpecifiedShortnames() string { + return e.specifiedShorthands +} + +// ValueRequiredError is the error returned when a flag needs an argument but +// no argument was provided. +type ValueRequiredError struct { + flag *Flag + specifiedName string + specifiedShorthands string +} + +// Error implements error. +func (e *ValueRequiredError) Error() string { + if len(e.specifiedShorthands) > 0 { + c := rune(e.specifiedName[0]) + return fmt.Sprintf("flag needs an argument: %q in -%s", c, e.specifiedShorthands) + } + + return fmt.Sprintf("flag needs an argument: --%s", e.specifiedName) +} + +// GetFlag returns the flag for which the error occurred. +func (e *ValueRequiredError) GetFlag() *Flag { + return e.flag +} + +// GetSpecifiedName returns the name of the flag (without dashes) as it +// appeared in the parsed arguments. +func (e *ValueRequiredError) GetSpecifiedName() string { + return e.specifiedName +} + +// GetSpecifiedShortnames returns the group of shorthand arguments +// (without dashes) that the flag appeared within. If the flag was not in a +// shorthand group, this will return an empty string. +func (e *ValueRequiredError) GetSpecifiedShortnames() string { + return e.specifiedShorthands +} + +// InvalidValueError is the error returned when an invalid value is used +// for a flag. +type InvalidValueError struct { + flag *Flag + value string + cause error +} + +// Error implements error. +func (e *InvalidValueError) Error() string { + flag := e.flag + var flagName string + if flag.Shorthand != "" && flag.ShorthandDeprecated == "" { + flagName = fmt.Sprintf("-%s, --%s", flag.Shorthand, flag.Name) + } else { + flagName = fmt.Sprintf("--%s", flag.Name) + } + return fmt.Sprintf("invalid argument %q for %q flag: %v", e.value, flagName, e.cause) +} + +// Unwrap implements errors.Unwrap. +func (e *InvalidValueError) Unwrap() error { + return e.cause +} + +// GetFlag returns the flag for which the error occurred. +func (e *InvalidValueError) GetFlag() *Flag { + return e.flag +} + +// GetValue returns the invalid value that was provided. +func (e *InvalidValueError) GetValue() string { + return e.value +} + +// InvalidSyntaxError is the error returned when a bad flag name is passed on +// the command line. +type InvalidSyntaxError struct { + specifiedFlag string +} + +// Error implements error. +func (e *InvalidSyntaxError) Error() string { + return fmt.Sprintf("bad flag syntax: %s", e.specifiedFlag) +} + +// GetSpecifiedName returns the exact flag (with dashes) as it +// appeared in the parsed arguments. +func (e *InvalidSyntaxError) GetSpecifiedFlag() string { + return e.specifiedFlag +} diff --git a/vendor/github.com/spf13/pflag/flag.go b/vendor/github.com/spf13/pflag/flag.go index 7c058de374..2fd3c57597 100644 --- a/vendor/github.com/spf13/pflag/flag.go +++ b/vendor/github.com/spf13/pflag/flag.go @@ -27,23 +27,32 @@ unaffected. Define flags using flag.String(), Bool(), Int(), etc. This declares an integer flag, -flagname, stored in the pointer ip, with type *int. + var ip = flag.Int("flagname", 1234, "help message for flagname") + If you like, you can bind the flag to a variable using the Var() functions. + var flagvar int func init() { flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname") } + Or you can create custom flags that satisfy the Value interface (with pointer receivers) and couple them to flag parsing by + flag.Var(&flagVal, "name", "help message for flagname") + For such flags, the default value is just the initial value of the variable. After all flags are defined, call + flag.Parse() + to parse the command line into the defined flags. Flags may then be used directly. If you're using the flags themselves, they are all pointers; if you bind to variables, they're values. + fmt.Println("ip has value ", *ip) fmt.Println("flagvar has value ", flagvar) @@ -54,22 +63,26 @@ The arguments are indexed from 0 through flag.NArg()-1. The pflag package also defines some new functions that are not in flag, that give one-letter shorthands for flags. You can use these by appending 'P' to the name of any function that defines a flag. + var ip = flag.IntP("flagname", "f", 1234, "help message") var flagvar bool func init() { flag.BoolVarP(&flagvar, "boolname", "b", true, "help message") } flag.VarP(&flagval, "varname", "v", "help message") + Shorthand letters can be used with single dashes on the command line. Boolean shorthand flags can be combined with other shorthand flags. Command line flag syntax: + --flag // boolean flags only --flag=x Unlike the flag package, a single dash before an option means something different than a double dash. Single dashes signify a series of shorthand letters for flags. All but the last shorthand letter must be boolean flags. + // boolean flags -f -abc @@ -124,12 +137,17 @@ const ( PanicOnError ) -// ParseErrorsWhitelist defines the parsing errors that can be ignored -type ParseErrorsWhitelist struct { +// ParseErrorsAllowlist defines the parsing errors that can be ignored +type ParseErrorsAllowlist struct { // UnknownFlags will ignore unknown flags errors and continue parsing rest of the flags UnknownFlags bool } +// ParseErrorsWhitelist defines the parsing errors that can be ignored. +// +// Deprecated: use [ParseErrorsAllowlist] instead. This type will be removed in a future release. +type ParseErrorsWhitelist = ParseErrorsAllowlist + // NormalizedName is a flag name that has been normalized according to rules // for the FlagSet (e.g. making '-' and '_' equivalent). type NormalizedName string @@ -145,8 +163,13 @@ type FlagSet struct { // help/usage messages. SortFlags bool - // ParseErrorsWhitelist is used to configure a whitelist of errors - ParseErrorsWhitelist ParseErrorsWhitelist + // ParseErrorsAllowlist is used to configure an allowlist of errors + ParseErrorsAllowlist ParseErrorsAllowlist + + // ParseErrorsAllowlist is used to configure an allowlist of errors. + // + // Deprecated: use [FlagSet.ParseErrorsAllowlist] instead. This field will be removed in a future release. + ParseErrorsWhitelist ParseErrorsAllowlist name string parsed bool @@ -381,7 +404,7 @@ func (f *FlagSet) lookup(name NormalizedName) *Flag { func (f *FlagSet) getFlagType(name string, ftype string, convFunc func(sval string) (interface{}, error)) (interface{}, error) { flag := f.Lookup(name) if flag == nil { - err := fmt.Errorf("flag accessed but not defined: %s", name) + err := &NotExistError{name: name, messageType: flagNotDefinedMessage} return nil, err } @@ -411,7 +434,7 @@ func (f *FlagSet) ArgsLenAtDash() int { func (f *FlagSet) MarkDeprecated(name string, usageMessage string) error { flag := f.Lookup(name) if flag == nil { - return fmt.Errorf("flag %q does not exist", name) + return &NotExistError{name: name, messageType: flagNotExistMessage} } if usageMessage == "" { return fmt.Errorf("deprecated message for flag %q must be set", name) @@ -427,7 +450,7 @@ func (f *FlagSet) MarkDeprecated(name string, usageMessage string) error { func (f *FlagSet) MarkShorthandDeprecated(name string, usageMessage string) error { flag := f.Lookup(name) if flag == nil { - return fmt.Errorf("flag %q does not exist", name) + return &NotExistError{name: name, messageType: flagNotExistMessage} } if usageMessage == "" { return fmt.Errorf("deprecated message for flag %q must be set", name) @@ -441,7 +464,7 @@ func (f *FlagSet) MarkShorthandDeprecated(name string, usageMessage string) erro func (f *FlagSet) MarkHidden(name string) error { flag := f.Lookup(name) if flag == nil { - return fmt.Errorf("flag %q does not exist", name) + return &NotExistError{name: name, messageType: flagNotExistMessage} } flag.Hidden = true return nil @@ -464,18 +487,16 @@ func (f *FlagSet) Set(name, value string) error { normalName := f.normalizeFlagName(name) flag, ok := f.formal[normalName] if !ok { - return fmt.Errorf("no such flag -%v", name) + return &NotExistError{name: name, messageType: flagNoSuchFlagMessage} } err := flag.Value.Set(value) if err != nil { - var flagName string - if flag.Shorthand != "" && flag.ShorthandDeprecated == "" { - flagName = fmt.Sprintf("-%s, --%s", flag.Shorthand, flag.Name) - } else { - flagName = fmt.Sprintf("--%s", flag.Name) + return &InvalidValueError{ + flag: flag, + value: value, + cause: err, } - return fmt.Errorf("invalid argument %q for %q flag: %v", value, flagName, err) } if !flag.Changed { @@ -501,7 +522,7 @@ func (f *FlagSet) SetAnnotation(name, key string, values []string) error { normalName := f.normalizeFlagName(name) flag, ok := f.formal[normalName] if !ok { - return fmt.Errorf("no such flag -%v", name) + return &NotExistError{name: name, messageType: flagNoSuchFlagMessage} } if flag.Annotations == nil { flag.Annotations = map[string][]string{} @@ -538,7 +559,7 @@ func (f *FlagSet) PrintDefaults() { func (f *Flag) defaultIsZeroValue() bool { switch f.Value.(type) { case boolFlag: - return f.DefValue == "false" + return f.DefValue == "false" || f.DefValue == "" case *durationValue: // Beginning in Go 1.7, duration zero values are "0s" return f.DefValue == "0" || f.DefValue == "0s" @@ -551,7 +572,7 @@ func (f *Flag) defaultIsZeroValue() bool { case *intSliceValue, *stringSliceValue, *stringArrayValue: return f.DefValue == "[]" default: - switch f.Value.String() { + switch f.DefValue { case "false": return true case "": @@ -588,8 +609,10 @@ func UnquoteUsage(flag *Flag) (name string, usage string) { name = flag.Value.Type() switch name { - case "bool": + case "bool", "boolfunc": name = "" + case "func": + name = "value" case "float64": name = "float" case "int64": @@ -707,7 +730,7 @@ func (f *FlagSet) FlagUsagesWrapped(cols int) string { switch flag.Value.Type() { case "string": line += fmt.Sprintf("[=\"%s\"]", flag.NoOptDefVal) - case "bool": + case "bool", "boolfunc": if flag.NoOptDefVal != "true" { line += fmt.Sprintf("[=%s]", flag.NoOptDefVal) } @@ -911,12 +934,10 @@ func VarP(value Value, name, shorthand, usage string) { CommandLine.VarP(value, name, shorthand, usage) } -// failf prints to standard error a formatted error and usage message and +// fail prints an error message and usage message to standard error and // returns the error. -func (f *FlagSet) failf(format string, a ...interface{}) error { - err := fmt.Errorf(format, a...) +func (f *FlagSet) fail(err error) error { if f.errorHandling != ContinueOnError { - fmt.Fprintln(f.Output(), err) f.usage() } return err @@ -934,9 +955,9 @@ func (f *FlagSet) usage() { } } -//--unknown (args will be empty) -//--unknown --next-flag ... (args will be --next-flag ...) -//--unknown arg ... (args will be arg ...) +// --unknown (args will be empty) +// --unknown --next-flag ... (args will be --next-flag ...) +// --unknown arg ... (args will be arg ...) func stripUnknownFlagValue(args []string) []string { if len(args) == 0 { //--unknown @@ -960,7 +981,7 @@ func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []strin a = args name := s[2:] if len(name) == 0 || name[0] == '-' || name[0] == '=' { - err = f.failf("bad flag syntax: %s", s) + err = f.fail(&InvalidSyntaxError{specifiedFlag: s}) return } @@ -974,6 +995,8 @@ func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []strin f.usage() return a, ErrHelp case f.ParseErrorsWhitelist.UnknownFlags: + fallthrough + case f.ParseErrorsAllowlist.UnknownFlags: // --unknown=unknownval arg ... // we do not want to lose arg in this case if len(split) >= 2 { @@ -982,7 +1005,7 @@ func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []strin return stripUnknownFlagValue(a), nil default: - err = f.failf("unknown flag: --%s", name) + err = f.fail(&NotExistError{name: name, messageType: flagUnknownFlagMessage}) return } } @@ -1000,13 +1023,16 @@ func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []strin a = a[1:] } else { // '--flag' (arg was required) - err = f.failf("flag needs an argument: %s", s) + err = f.fail(&ValueRequiredError{ + flag: flag, + specifiedName: name, + }) return } err = fn(flag, value) if err != nil { - f.failf(err.Error()) + f.fail(err) } return } @@ -1014,7 +1040,7 @@ func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []strin func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parseFunc) (outShorts string, outArgs []string, err error) { outArgs = args - if strings.HasPrefix(shorthands, "test.") { + if isGotestShorthandFlag(shorthands) { return } @@ -1029,6 +1055,8 @@ func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parse err = ErrHelp return case f.ParseErrorsWhitelist.UnknownFlags: + fallthrough + case f.ParseErrorsAllowlist.UnknownFlags: // '-f=arg arg ...' // we do not want to lose arg in this case if len(shorthands) > 2 && shorthands[1] == '=' { @@ -1039,7 +1067,11 @@ func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parse outArgs = stripUnknownFlagValue(outArgs) return default: - err = f.failf("unknown shorthand flag: %q in -%s", c, shorthands) + err = f.fail(&NotExistError{ + name: string(c), + specifiedShorthands: shorthands, + messageType: flagUnknownShorthandFlagMessage, + }) return } } @@ -1062,7 +1094,11 @@ func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parse outArgs = args[1:] } else { // '-f' (arg was required) - err = f.failf("flag needs an argument: %q in -%s", c, shorthands) + err = f.fail(&ValueRequiredError{ + flag: flag, + specifiedName: string(c), + specifiedShorthands: shorthands, + }) return } @@ -1072,7 +1108,7 @@ func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parse err = fn(flag, value) if err != nil { - f.failf(err.Error()) + f.fail(err) } return } @@ -1135,12 +1171,12 @@ func (f *FlagSet) Parse(arguments []string) error { } f.parsed = true - if len(arguments) < 0 { + f.args = make([]string, 0, len(arguments)) + + if len(arguments) == 0 { return nil } - f.args = make([]string, 0, len(arguments)) - set := func(flag *Flag, value string) error { return f.Set(flag.Name, value) } @@ -1151,7 +1187,10 @@ func (f *FlagSet) Parse(arguments []string) error { case ContinueOnError: return err case ExitOnError: - fmt.Println(err) + if err == ErrHelp { + os.Exit(0) + } + fmt.Fprintln(f.Output(), err) os.Exit(2) case PanicOnError: panic(err) @@ -1177,6 +1216,10 @@ func (f *FlagSet) ParseAll(arguments []string, fn func(flag *Flag, value string) case ContinueOnError: return err case ExitOnError: + if err == ErrHelp { + os.Exit(0) + } + fmt.Fprintln(f.Output(), err) os.Exit(2) case PanicOnError: panic(err) diff --git a/vendor/github.com/spf13/pflag/func.go b/vendor/github.com/spf13/pflag/func.go new file mode 100644 index 0000000000..9f4d88f271 --- /dev/null +++ b/vendor/github.com/spf13/pflag/func.go @@ -0,0 +1,37 @@ +package pflag + +// -- func Value +type funcValue func(string) error + +func (f funcValue) Set(s string) error { return f(s) } + +func (f funcValue) Type() string { return "func" } + +func (f funcValue) String() string { return "" } // same behavior as stdlib 'flag' package + +// Func defines a func flag with specified name, callback function and usage string. +// +// The callback function will be called every time "--{name}={value}" (or equivalent) is +// parsed on the command line, with "{value}" as an argument. +func (f *FlagSet) Func(name string, usage string, fn func(string) error) { + f.FuncP(name, "", usage, fn) +} + +// FuncP is like Func, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) FuncP(name string, shorthand string, usage string, fn func(string) error) { + var val Value = funcValue(fn) + f.VarP(val, name, shorthand, usage) +} + +// Func defines a func flag with specified name, callback function and usage string. +// +// The callback function will be called every time "--{name}={value}" (or equivalent) is +// parsed on the command line, with "{value}" as an argument. +func Func(name string, usage string, fn func(string) error) { + CommandLine.FuncP(name, "", usage, fn) +} + +// FuncP is like Func, but accepts a shorthand letter that can be used after a single dash. +func FuncP(name, shorthand string, usage string, fn func(string) error) { + CommandLine.FuncP(name, shorthand, usage, fn) +} diff --git a/vendor/github.com/spf13/pflag/golangflag.go b/vendor/github.com/spf13/pflag/golangflag.go index d3dd72b7fe..e62eab5381 100644 --- a/vendor/github.com/spf13/pflag/golangflag.go +++ b/vendor/github.com/spf13/pflag/golangflag.go @@ -8,8 +8,18 @@ import ( goflag "flag" "reflect" "strings" + "time" ) +// go test flags prefixes +func isGotestFlag(flag string) bool { + return strings.HasPrefix(flag, "-test.") +} + +func isGotestShorthandFlag(flag string) bool { + return strings.HasPrefix(flag, "test.") +} + // flagValueWrapper implements pflag.Value around a flag.Value. The main // difference here is the addition of the Type method that returns a string // name of the type. As this is generally unknown, we approximate that with @@ -103,3 +113,49 @@ func (f *FlagSet) AddGoFlagSet(newSet *goflag.FlagSet) { } f.addedGoFlagSets = append(f.addedGoFlagSets, newSet) } + +// CopyToGoFlagSet will add all current flags to the given Go flag set. +// Deprecation remarks get copied into the usage description. +// Whenever possible, a flag gets added for which Go flags shows +// a proper type in the help message. +func (f *FlagSet) CopyToGoFlagSet(newSet *goflag.FlagSet) { + f.VisitAll(func(flag *Flag) { + usage := flag.Usage + if flag.Deprecated != "" { + usage += " (DEPRECATED: " + flag.Deprecated + ")" + } + + switch value := flag.Value.(type) { + case *stringValue: + newSet.StringVar((*string)(value), flag.Name, flag.DefValue, usage) + case *intValue: + newSet.IntVar((*int)(value), flag.Name, *(*int)(value), usage) + case *int64Value: + newSet.Int64Var((*int64)(value), flag.Name, *(*int64)(value), usage) + case *uintValue: + newSet.UintVar((*uint)(value), flag.Name, *(*uint)(value), usage) + case *uint64Value: + newSet.Uint64Var((*uint64)(value), flag.Name, *(*uint64)(value), usage) + case *durationValue: + newSet.DurationVar((*time.Duration)(value), flag.Name, *(*time.Duration)(value), usage) + case *float64Value: + newSet.Float64Var((*float64)(value), flag.Name, *(*float64)(value), usage) + default: + newSet.Var(flag.Value, flag.Name, usage) + } + }) +} + +// ParseSkippedFlags explicitly Parses go test flags (i.e. the one starting with '-test.') with goflag.Parse(), +// since by default those are skipped by pflag.Parse(). +// Typical usage example: `ParseGoTestFlags(os.Args[1:], goflag.CommandLine)` +func ParseSkippedFlags(osArgs []string, goFlagSet *goflag.FlagSet) error { + var skippedFlags []string + for _, f := range osArgs { + if isGotestFlag(f) { + skippedFlags = append(skippedFlags, f) + } + } + return goFlagSet.Parse(skippedFlags) +} + diff --git a/vendor/github.com/spf13/pflag/ipnet_slice.go b/vendor/github.com/spf13/pflag/ipnet_slice.go index 6b541aa879..c6e89da18d 100644 --- a/vendor/github.com/spf13/pflag/ipnet_slice.go +++ b/vendor/github.com/spf13/pflag/ipnet_slice.go @@ -73,7 +73,7 @@ func (s *ipNetSliceValue) String() string { func ipNetSliceConv(val string) (interface{}, error) { val = strings.Trim(val, "[]") - // Emtpy string would cause a slice with one (empty) entry + // Empty string would cause a slice with one (empty) entry if len(val) == 0 { return []net.IPNet{}, nil } diff --git a/vendor/github.com/spf13/pflag/string_to_string.go b/vendor/github.com/spf13/pflag/string_to_string.go index 890a01afc0..1d1e3bf91a 100644 --- a/vendor/github.com/spf13/pflag/string_to_string.go +++ b/vendor/github.com/spf13/pflag/string_to_string.go @@ -4,6 +4,7 @@ import ( "bytes" "encoding/csv" "fmt" + "sort" "strings" ) @@ -62,8 +63,15 @@ func (s *stringToStringValue) Type() string { } func (s *stringToStringValue) String() string { + keys := make([]string, 0, len(*s.value)) + for k := range *s.value { + keys = append(keys, k) + } + sort.Strings(keys) + records := make([]string, 0, len(*s.value)>>1) - for k, v := range *s.value { + for _, k := range keys { + v := (*s.value)[k] records = append(records, k+"="+v) } diff --git a/vendor/github.com/spf13/pflag/text.go b/vendor/github.com/spf13/pflag/text.go new file mode 100644 index 0000000000..886d5a3d80 --- /dev/null +++ b/vendor/github.com/spf13/pflag/text.go @@ -0,0 +1,81 @@ +package pflag + +import ( + "encoding" + "fmt" + "reflect" +) + +// following is copied from go 1.23.4 flag.go +type textValue struct{ p encoding.TextUnmarshaler } + +func newTextValue(val encoding.TextMarshaler, p encoding.TextUnmarshaler) textValue { + ptrVal := reflect.ValueOf(p) + if ptrVal.Kind() != reflect.Ptr { + panic("variable value type must be a pointer") + } + defVal := reflect.ValueOf(val) + if defVal.Kind() == reflect.Ptr { + defVal = defVal.Elem() + } + if defVal.Type() != ptrVal.Type().Elem() { + panic(fmt.Sprintf("default type does not match variable type: %v != %v", defVal.Type(), ptrVal.Type().Elem())) + } + ptrVal.Elem().Set(defVal) + return textValue{p} +} + +func (v textValue) Set(s string) error { + return v.p.UnmarshalText([]byte(s)) +} + +func (v textValue) Get() interface{} { + return v.p +} + +func (v textValue) String() string { + if m, ok := v.p.(encoding.TextMarshaler); ok { + if b, err := m.MarshalText(); err == nil { + return string(b) + } + } + return "" +} + +//end of copy + +func (v textValue) Type() string { + return reflect.ValueOf(v.p).Type().Name() +} + +// GetText set out, which implements encoding.UnmarshalText, to the value of a flag with given name +func (f *FlagSet) GetText(name string, out encoding.TextUnmarshaler) error { + flag := f.Lookup(name) + if flag == nil { + return fmt.Errorf("flag accessed but not defined: %s", name) + } + if flag.Value.Type() != reflect.TypeOf(out).Name() { + return fmt.Errorf("trying to get %s value of flag of type %s", reflect.TypeOf(out).Name(), flag.Value.Type()) + } + return out.UnmarshalText([]byte(flag.Value.String())) +} + +// TextVar defines a flag with a specified name, default value, and usage string. The argument p must be a pointer to a variable that will hold the value of the flag, and p must implement encoding.TextUnmarshaler. If the flag is used, the flag value will be passed to p's UnmarshalText method. The type of the default value must be the same as the type of p. +func (f *FlagSet) TextVar(p encoding.TextUnmarshaler, name string, value encoding.TextMarshaler, usage string) { + f.VarP(newTextValue(value, p), name, "", usage) +} + +// TextVarP is like TextVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) TextVarP(p encoding.TextUnmarshaler, name, shorthand string, value encoding.TextMarshaler, usage string) { + f.VarP(newTextValue(value, p), name, shorthand, usage) +} + +// TextVar defines a flag with a specified name, default value, and usage string. The argument p must be a pointer to a variable that will hold the value of the flag, and p must implement encoding.TextUnmarshaler. If the flag is used, the flag value will be passed to p's UnmarshalText method. The type of the default value must be the same as the type of p. +func TextVar(p encoding.TextUnmarshaler, name string, value encoding.TextMarshaler, usage string) { + CommandLine.VarP(newTextValue(value, p), name, "", usage) +} + +// TextVarP is like TextVar, but accepts a shorthand letter that can be used after a single dash. +func TextVarP(p encoding.TextUnmarshaler, name, shorthand string, value encoding.TextMarshaler, usage string) { + CommandLine.VarP(newTextValue(value, p), name, shorthand, usage) +} diff --git a/vendor/github.com/spf13/pflag/time.go b/vendor/github.com/spf13/pflag/time.go new file mode 100644 index 0000000000..3dee424791 --- /dev/null +++ b/vendor/github.com/spf13/pflag/time.go @@ -0,0 +1,124 @@ +package pflag + +import ( + "fmt" + "strings" + "time" +) + +// TimeValue adapts time.Time for use as a flag. +type timeValue struct { + *time.Time + formats []string +} + +func newTimeValue(val time.Time, p *time.Time, formats []string) *timeValue { + *p = val + return &timeValue{ + Time: p, + formats: formats, + } +} + +// Set time.Time value from string based on accepted formats. +func (d *timeValue) Set(s string) error { + s = strings.TrimSpace(s) + for _, f := range d.formats { + v, err := time.Parse(f, s) + if err != nil { + continue + } + *d.Time = v + return nil + } + + formatsString := "" + for i, f := range d.formats { + if i > 0 { + formatsString += ", " + } + formatsString += fmt.Sprintf("`%s`", f) + } + + return fmt.Errorf("invalid time format `%s` must be one of: %s", s, formatsString) +} + +// Type name for time.Time flags. +func (d *timeValue) Type() string { + return "time" +} + +func (d *timeValue) String() string { + if d.Time.IsZero() { + return "" + } else { + return d.Time.Format(time.RFC3339Nano) + } +} + +// GetTime return the time value of a flag with the given name +func (f *FlagSet) GetTime(name string) (time.Time, error) { + flag := f.Lookup(name) + if flag == nil { + err := fmt.Errorf("flag accessed but not defined: %s", name) + return time.Time{}, err + } + + if flag.Value.Type() != "time" { + err := fmt.Errorf("trying to get %s value of flag of type %s", "time", flag.Value.Type()) + return time.Time{}, err + } + + val, ok := flag.Value.(*timeValue) + if !ok { + return time.Time{}, fmt.Errorf("value %s is not a time", flag.Value) + } + + return *val.Time, nil +} + +// TimeVar defines a time.Time flag with specified name, default value, and usage string. +// The argument p points to a time.Time variable in which to store the value of the flag. +func (f *FlagSet) TimeVar(p *time.Time, name string, value time.Time, formats []string, usage string) { + f.TimeVarP(p, name, "", value, formats, usage) +} + +// TimeVarP is like TimeVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) TimeVarP(p *time.Time, name, shorthand string, value time.Time, formats []string, usage string) { + f.VarP(newTimeValue(value, p, formats), name, shorthand, usage) +} + +// TimeVar defines a time.Time flag with specified name, default value, and usage string. +// The argument p points to a time.Time variable in which to store the value of the flag. +func TimeVar(p *time.Time, name string, value time.Time, formats []string, usage string) { + CommandLine.TimeVarP(p, name, "", value, formats, usage) +} + +// TimeVarP is like TimeVar, but accepts a shorthand letter that can be used after a single dash. +func TimeVarP(p *time.Time, name, shorthand string, value time.Time, formats []string, usage string) { + CommandLine.VarP(newTimeValue(value, p, formats), name, shorthand, usage) +} + +// Time defines a time.Time flag with specified name, default value, and usage string. +// The return value is the address of a time.Time variable that stores the value of the flag. +func (f *FlagSet) Time(name string, value time.Time, formats []string, usage string) *time.Time { + return f.TimeP(name, "", value, formats, usage) +} + +// TimeP is like Time, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) TimeP(name, shorthand string, value time.Time, formats []string, usage string) *time.Time { + p := new(time.Time) + f.TimeVarP(p, name, shorthand, value, formats, usage) + return p +} + +// Time defines a time.Time flag with specified name, default value, and usage string. +// The return value is the address of a time.Time variable that stores the value of the flag. +func Time(name string, value time.Time, formats []string, usage string) *time.Time { + return CommandLine.TimeP(name, "", value, formats, usage) +} + +// TimeP is like Time, but accepts a shorthand letter that can be used after a single dash. +func TimeP(name, shorthand string, value time.Time, formats []string, usage string) *time.Time { + return CommandLine.TimeP(name, shorthand, value, formats, usage) +} diff --git a/vendor/github.com/spf13/viper/.editorconfig b/vendor/github.com/spf13/viper/.editorconfig index 1f664d13a5..faef0c91e7 100644 --- a/vendor/github.com/spf13/viper/.editorconfig +++ b/vendor/github.com/spf13/viper/.editorconfig @@ -16,3 +16,6 @@ indent_style = tab [*.nix] indent_size = 2 + +[.golangci.yaml] +indent_size = 2 diff --git a/vendor/github.com/spf13/viper/.golangci.yaml b/vendor/github.com/spf13/viper/.golangci.yaml index 474f41633c..bed0b83ecc 100644 --- a/vendor/github.com/spf13/viper/.golangci.yaml +++ b/vendor/github.com/spf13/viper/.golangci.yaml @@ -1,105 +1,118 @@ -run: - timeout: 5m +version: "2" -linters-settings: - gci: - sections: - - standard - - default - - prefix(github.com/spf13/viper) - gocritic: - # Enable multiple checks by tags. See "Tags" section in https://github.com/go-critic/go-critic#usage. - enabled-tags: - - diagnostic - - experimental - - opinionated - - style - disabled-checks: - - importShadow - - unnamedResult - goimports: - local-prefixes: github.com/spf13/viper +run: + timeout: 5m linters: - disable-all: true - enable: - - bodyclose - - dogsled - - dupl - - durationcheck - - exhaustive - - gci - - gocritic - - godot - - gofmt - - gofumpt - - goimports - - gomoddirectives - - goprintffuncname - - govet - - importas - - ineffassign - - makezero - - misspell - - nakedret - - nilerr - - noctx - - nolintlint - - prealloc - - predeclared - - revive - - rowserrcheck - - sqlclosecheck - - staticcheck - - stylecheck - - tparallel - - typecheck - - unconvert - - unparam - - unused - - wastedassign - - whitespace + enable: + - bodyclose + - dogsled + - dupl + - durationcheck + - exhaustive + - gocritic + - godot + - gomoddirectives + - goprintffuncname + - govet + - importas + - ineffassign + - makezero + - misspell + - nakedret + - nilerr + - noctx + - nolintlint + - prealloc + - predeclared + - revive + - rowserrcheck + - sqlclosecheck + - staticcheck + - tparallel + - unconvert + - unparam + - unused + - wastedassign + - whitespace - # fixme - # - cyclop - # - errcheck - # - errorlint - # - exhaustivestruct - # - forbidigo - # - forcetypeassert - # - gochecknoglobals - # - gochecknoinits - # - gocognit - # - goconst - # - gocyclo - # - gosec - # - gosimple - # - ifshort - # - lll - # - nlreturn - # - paralleltest - # - scopelint - # - thelper - # - wrapcheck + # fixme + # - cyclop + # - errcheck + # - errorlint + # - exhaustivestruct + # - forbidigo + # - forcetypeassert + # - gochecknoglobals + # - gochecknoinits + # - gocognit + # - goconst + # - gocyclo + # - gosec + # - gosimple + # - ifshort + # - lll + # - nlreturn + # - paralleltest + # - scopelint + # - thelper + # - wrapcheck - # unused - # - depguard - # - goheader - # - gomodguard + # unused + # - depguard + # - goheader + # - gomodguard - # deprecated - # - deadcode - # - structcheck - # - varcheck + # don't enable: + # - asciicheck + # - funlen + # - godox + # - goerr113 + # - gomnd + # - interfacer + # - maligned + # - nestif + # - testpackage + # - wsl - # don't enable: - # - asciicheck - # - funlen - # - godox - # - goerr113 - # - gomnd - # - interfacer - # - maligned - # - nestif - # - testpackage - # - wsl + exclusions: + rules: + - linters: + - errcheck + - noctx + path: _test.go + presets: + - comments + - std-error-handling + + settings: + misspell: + locale: US + nolintlint: + allow-unused: false # report any unused nolint directives + require-specific: false # don't require nolint directives to be specific about which linter is being skipped + gocritic: + # Enable multiple checks by tags. See "Tags" section in https://github.com/go-critic/go-critic#usage. + enabled-tags: + - diagnostic + - experimental + - opinionated + - style + disabled-checks: + - importShadow + - unnamedResult + +formatters: + enable: + - gci + - gofmt + - gofumpt + - goimports + # - golines + + settings: + gci: + sections: + - standard + - default + - localmodule diff --git a/vendor/github.com/spf13/viper/README.md b/vendor/github.com/spf13/viper/README.md index 769a5d900d..7a4c0fc305 100644 --- a/vendor/github.com/spf13/viper/README.md +++ b/vendor/github.com/spf13/viper/README.md @@ -12,7 +12,7 @@ [![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/spf13/viper/ci.yaml?branch=master&style=flat-square)](https://github.com/spf13/viper/actions?query=workflow%3ACI) [![Join the chat at https://gitter.im/spf13/viper](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/spf13/viper?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) [![Go Report Card](https://goreportcard.com/badge/github.com/spf13/viper?style=flat-square)](https://goreportcard.com/report/github.com/spf13/viper) -![Go Version](https://img.shields.io/badge/go%20version-%3E=1.21-61CFDD.svg?style=flat-square) +![Go Version](https://img.shields.io/badge/go%20version-%3E=1.23-61CFDD.svg?style=flat-square) [![PkgGoDev](https://pkg.go.dev/badge/mod/github.com/spf13/viper)](https://pkg.go.dev/mod/github.com/spf13/viper) **Go configuration with fangs!** @@ -821,7 +821,7 @@ You can use your favorite format's marshaller with the config returned by `AllSe ```go import ( - yaml "gopkg.in/yaml.v2" + yaml "go.yaml.in/yaml/v3" // ... ) diff --git a/vendor/github.com/spf13/viper/UPDATES.md b/vendor/github.com/spf13/viper/UPGRADE.md similarity index 79% rename from vendor/github.com/spf13/viper/UPDATES.md rename to vendor/github.com/spf13/viper/UPGRADE.md index ccf413ed7e..a33c965a41 100644 --- a/vendor/github.com/spf13/viper/UPDATES.md +++ b/vendor/github.com/spf13/viper/UPGRADE.md @@ -83,6 +83,27 @@ v := viper.NewWithOptions( ) ``` +### BREAKING: "github.com/mitchellh/mapstructure" depedency replaced + +The original [mapstructure](https://github.com/mitchellh/mapstructure) has been [archived](https://github.com/mitchellh/mapstructure/issues/349) and was replaced with a [fork](https://github.com/go-viper/mapstructure) maintained by Viper ([#1723](https://github.com/spf13/viper/pull/1723)). + +As a result, the package import path needs to be changed in cases where `mapstructure` is directly referenced in your code. + +For example, when providing a custom decoder config: + +```go +err := viper.Unmarshal(&appConfig, func(config *mapstructure.DecoderConfig) { + config.TagName = "yaml" +}) +``` + +The change is fairly straightforward, just replace all occurrences of the import path `github.com/mitchellh/mapstructure` with `github.com/go-viper/mapstructure/v2`: + +```diff +- import "github.com/mitchellh/mapstructure" ++ import "github.com/go-viper/mapstructure/v2" +``` + ### BREAKING: HCL, Java properties, INI removed from core In order to reduce third-party dependencies, Viper dropped support for the following formats from the core: diff --git a/vendor/github.com/spf13/viper/flake.lock b/vendor/github.com/spf13/viper/flake.lock index d76dfbddd1..0b8cfb5a83 100644 --- a/vendor/github.com/spf13/viper/flake.lock +++ b/vendor/github.com/spf13/viper/flake.lock @@ -2,30 +2,32 @@ "nodes": { "cachix": { "inputs": { - "devenv": "devenv_2", + "devenv": [ + "devenv" + ], "flake-compat": [ + "devenv" + ], + "git-hooks": [ "devenv", - "flake-compat" + "git-hooks" ], "nixpkgs": [ "devenv", "nixpkgs" - ], - "pre-commit-hooks": [ - "devenv", - "pre-commit-hooks" ] }, "locked": { - "lastModified": 1712055811, - "narHash": "sha256-7FcfMm5A/f02yyzuavJe06zLa9hcMHsagE28ADcmQvk=", + "lastModified": 1748883665, + "narHash": "sha256-R0W7uAg+BLoHjMRMQ8+oiSbTq8nkGz5RDpQ+ZfxxP3A=", "owner": "cachix", "repo": "cachix", - "rev": "02e38da89851ec7fec3356a5c04bc8349cae0e30", + "rev": "f707778d902af4d62d8dd92c269f8e70de09acbe", "type": "github" }, "original": { "owner": "cachix", + "ref": "latest", "repo": "cachix", "type": "github" } @@ -33,52 +35,21 @@ "devenv": { "inputs": { "cachix": "cachix", - "flake-compat": "flake-compat_2", - "nix": "nix_2", - "nixpkgs": "nixpkgs_2", - "pre-commit-hooks": "pre-commit-hooks" - }, - "locked": { - "lastModified": 1724763216, - "narHash": "sha256-oW2bwCrJpIzibCNK6zfIDaIQw765yMAuMSG2gyZfGv0=", - "owner": "cachix", - "repo": "devenv", - "rev": "1e4ef61205b9aa20fe04bf1c468b6a316281c4f1", - "type": "github" - }, - "original": { - "owner": "cachix", - "repo": "devenv", - "type": "github" - } - }, - "devenv_2": { - "inputs": { - "flake-compat": [ - "devenv", - "cachix", - "flake-compat" - ], + "flake-compat": "flake-compat", + "git-hooks": "git-hooks", "nix": "nix", - "nixpkgs": "nixpkgs", - "poetry2nix": "poetry2nix", - "pre-commit-hooks": [ - "devenv", - "cachix", - "pre-commit-hooks" - ] + "nixpkgs": "nixpkgs" }, "locked": { - "lastModified": 1708704632, - "narHash": "sha256-w+dOIW60FKMaHI1q5714CSibk99JfYxm0CzTinYWr+Q=", + "lastModified": 1755257397, + "narHash": "sha256-VU+OHexL2y6y7yrpEc6bZvYYwoQg6aZK1b4YxT0yZCk=", "owner": "cachix", "repo": "devenv", - "rev": "2ee4450b0f4b95a1b90f2eb5ffea98b90e48c196", + "rev": "6f9c3d4722aa253631644329f7bda60b1d3d1b97", "type": "github" }, "original": { "owner": "cachix", - "ref": "python-rewrite", "repo": "devenv", "type": "github" } @@ -86,27 +57,11 @@ "flake-compat": { "flake": false, "locked": { - "lastModified": 1673956053, - "narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=", + "lastModified": 1747046372, + "narHash": "sha256-CIVLLkVgvHYbgI2UpXvIIBJ12HWgX+fjA8Xf8PUmqCY=", "owner": "edolstra", "repo": "flake-compat", - "rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9", - "type": "github" - }, - "original": { - "owner": "edolstra", - "repo": "flake-compat", - "type": "github" - } - }, - "flake-compat_2": { - "flake": false, - "locked": { - "lastModified": 1696426674, - "narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=", - "owner": "edolstra", - "repo": "flake-compat", - "rev": "0f9255e01c2351cc7d116c072cb317785dd33b33", + "rev": "9100a0f413b0c601e0533d1d94ffd501ce2e7885", "type": "github" }, "original": { @@ -117,14 +72,18 @@ }, "flake-parts": { "inputs": { - "nixpkgs-lib": "nixpkgs-lib" + "nixpkgs-lib": [ + "devenv", + "nix", + "nixpkgs" + ] }, "locked": { - "lastModified": 1722555600, - "narHash": "sha256-XOQkdLafnb/p9ij77byFQjDf5m5QYl9b2REiVClC+x4=", + "lastModified": 1733312601, + "narHash": "sha256-4pDvzqnegAfRkPwO3wmwBhVi/Sye1mzps0zHWYnP88c=", "owner": "hercules-ci", "repo": "flake-parts", - "rev": "8471fe90ad337a8074e957b69ca4d0089218391d", + "rev": "205b12d8b7cd4802fbcb8e8ef6a0f1408781a4f9", "type": "github" }, "original": { @@ -133,39 +92,47 @@ "type": "github" } }, - "flake-utils": { + "flake-parts_2": { "inputs": { - "systems": "systems" + "nixpkgs-lib": "nixpkgs-lib" }, "locked": { - "lastModified": 1689068808, - "narHash": "sha256-6ixXo3wt24N/melDWjq70UuHQLxGV8jZvooRanIHXw0=", - "owner": "numtide", - "repo": "flake-utils", - "rev": "919d646de7be200f3bf08cb76ae1f09402b6f9b4", + "lastModified": 1754487366, + "narHash": "sha256-pHYj8gUBapuUzKV/kN/tR3Zvqc7o6gdFB9XKXIp1SQ8=", + "owner": "hercules-ci", + "repo": "flake-parts", + "rev": "af66ad14b28a127c5c0f3bbb298218fc63528a18", "type": "github" }, "original": { - "owner": "numtide", - "repo": "flake-utils", + "owner": "hercules-ci", + "repo": "flake-parts", "type": "github" } }, - "flake-utils_2": { + "git-hooks": { "inputs": { - "systems": "systems_2" + "flake-compat": [ + "devenv", + "flake-compat" + ], + "gitignore": "gitignore", + "nixpkgs": [ + "devenv", + "nixpkgs" + ] }, "locked": { - "lastModified": 1710146030, - "narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=", - "owner": "numtide", - "repo": "flake-utils", - "rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a", + "lastModified": 1750779888, + "narHash": "sha256-wibppH3g/E2lxU43ZQHC5yA/7kIKLGxVEnsnVK1BtRg=", + "owner": "cachix", + "repo": "git-hooks.nix", + "rev": "16ec914f6fb6f599ce988427d9d94efddf25fe6d", "type": "github" }, "original": { - "owner": "numtide", - "repo": "flake-utils", + "owner": "cachix", + "repo": "git-hooks.nix", "type": "github" } }, @@ -173,7 +140,7 @@ "inputs": { "nixpkgs": [ "devenv", - "pre-commit-hooks", + "git-hooks", "nixpkgs" ] }, @@ -192,165 +159,49 @@ } }, "nix": { - "inputs": { - "flake-compat": "flake-compat", - "nixpkgs": [ - "devenv", - "cachix", - "devenv", - "nixpkgs" - ], - "nixpkgs-regression": "nixpkgs-regression" - }, - "locked": { - "lastModified": 1712911606, - "narHash": "sha256-BGvBhepCufsjcUkXnEEXhEVjwdJAwPglCC2+bInc794=", - "owner": "domenkozar", - "repo": "nix", - "rev": "b24a9318ea3f3600c1e24b4a00691ee912d4de12", - "type": "github" - }, - "original": { - "owner": "domenkozar", - "ref": "devenv-2.21", - "repo": "nix", - "type": "github" - } - }, - "nix-github-actions": { - "inputs": { - "nixpkgs": [ - "devenv", - "cachix", - "devenv", - "poetry2nix", - "nixpkgs" - ] - }, - "locked": { - "lastModified": 1688870561, - "narHash": "sha256-4UYkifnPEw1nAzqqPOTL2MvWtm3sNGw1UTYTalkTcGY=", - "owner": "nix-community", - "repo": "nix-github-actions", - "rev": "165b1650b753316aa7f1787f3005a8d2da0f5301", - "type": "github" - }, - "original": { - "owner": "nix-community", - "repo": "nix-github-actions", - "type": "github" - } - }, - "nix_2": { "inputs": { "flake-compat": [ "devenv", "flake-compat" ], + "flake-parts": "flake-parts", + "git-hooks-nix": [ + "devenv", + "git-hooks" + ], "nixpkgs": [ "devenv", "nixpkgs" ], - "nixpkgs-regression": "nixpkgs-regression_2" + "nixpkgs-23-11": [ + "devenv" + ], + "nixpkgs-regression": [ + "devenv" + ] }, "locked": { - "lastModified": 1712911606, - "narHash": "sha256-BGvBhepCufsjcUkXnEEXhEVjwdJAwPglCC2+bInc794=", - "owner": "domenkozar", + "lastModified": 1755029779, + "narHash": "sha256-3+GHIYGg4U9XKUN4rg473frIVNn8YD06bjwxKS1IPrU=", + "owner": "cachix", "repo": "nix", - "rev": "b24a9318ea3f3600c1e24b4a00691ee912d4de12", + "rev": "b0972b0eee6726081d10b1199f54de6d2917f861", "type": "github" }, "original": { - "owner": "domenkozar", - "ref": "devenv-2.21", + "owner": "cachix", + "ref": "devenv-2.30", "repo": "nix", "type": "github" } }, "nixpkgs": { "locked": { - "lastModified": 1692808169, - "narHash": "sha256-x9Opq06rIiwdwGeK2Ykj69dNc2IvUH1fY55Wm7atwrE=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "9201b5ff357e781bf014d0330d18555695df7ba8", - "type": "github" - }, - "original": { - "owner": "NixOS", - "ref": "nixpkgs-unstable", - "repo": "nixpkgs", - "type": "github" - } - }, - "nixpkgs-lib": { - "locked": { - "lastModified": 1722555339, - "narHash": "sha256-uFf2QeW7eAHlYXuDktm9c25OxOyCoUOQmh5SZ9amE5Q=", - "type": "tarball", - "url": "https://github.com/NixOS/nixpkgs/archive/a5d394176e64ab29c852d03346c1fc9b0b7d33eb.tar.gz" - }, - "original": { - "type": "tarball", - "url": "https://github.com/NixOS/nixpkgs/archive/a5d394176e64ab29c852d03346c1fc9b0b7d33eb.tar.gz" - } - }, - "nixpkgs-regression": { - "locked": { - "lastModified": 1643052045, - "narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", - "type": "github" - }, - "original": { - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", - "type": "github" - } - }, - "nixpkgs-regression_2": { - "locked": { - "lastModified": 1643052045, - "narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", - "type": "github" - }, - "original": { - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", - "type": "github" - } - }, - "nixpkgs-stable": { - "locked": { - "lastModified": 1710695816, - "narHash": "sha256-3Eh7fhEID17pv9ZxrPwCLfqXnYP006RKzSs0JptsN84=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "614b4613980a522ba49f0d194531beddbb7220d3", - "type": "github" - }, - "original": { - "owner": "NixOS", - "ref": "nixos-23.11", - "repo": "nixpkgs", - "type": "github" - } - }, - "nixpkgs_2": { - "locked": { - "lastModified": 1713361204, - "narHash": "sha256-TA6EDunWTkc5FvDCqU3W2T3SFn0gRZqh6D/hJnM02MM=", + "lastModified": 1750441195, + "narHash": "sha256-yke+pm+MdgRb6c0dPt8MgDhv7fcBbdjmv1ZceNTyzKg=", "owner": "cachix", "repo": "devenv-nixpkgs", - "rev": "285676e87ad9f0ca23d8714a6ab61e7e027020c6", + "rev": "0ceffe312871b443929ff3006960d29b120dc627", "type": "github" }, "original": { @@ -360,110 +211,42 @@ "type": "github" } }, - "nixpkgs_3": { - "locked": { - "lastModified": 1724748588, - "narHash": "sha256-NlpGA4+AIf1dKNq76ps90rxowlFXUsV9x7vK/mN37JM=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "a6292e34000dc93d43bccf78338770c1c5ec8a99", - "type": "github" - }, - "original": { - "owner": "NixOS", - "ref": "nixpkgs-unstable", - "repo": "nixpkgs", - "type": "github" - } - }, - "poetry2nix": { - "inputs": { - "flake-utils": "flake-utils", - "nix-github-actions": "nix-github-actions", - "nixpkgs": [ - "devenv", - "cachix", - "devenv", - "nixpkgs" - ] - }, + "nixpkgs-lib": { "locked": { - "lastModified": 1692876271, - "narHash": "sha256-IXfZEkI0Mal5y1jr6IRWMqK8GW2/f28xJenZIPQqkY0=", + "lastModified": 1753579242, + "narHash": "sha256-zvaMGVn14/Zz8hnp4VWT9xVnhc8vuL3TStRqwk22biA=", "owner": "nix-community", - "repo": "poetry2nix", - "rev": "d5006be9c2c2417dafb2e2e5034d83fabd207ee3", + "repo": "nixpkgs.lib", + "rev": "0f36c44e01a6129be94e3ade315a5883f0228a6e", "type": "github" }, "original": { "owner": "nix-community", - "repo": "poetry2nix", + "repo": "nixpkgs.lib", "type": "github" } }, - "pre-commit-hooks": { - "inputs": { - "flake-compat": [ - "devenv", - "flake-compat" - ], - "flake-utils": "flake-utils_2", - "gitignore": "gitignore", - "nixpkgs": [ - "devenv", - "nixpkgs" - ], - "nixpkgs-stable": "nixpkgs-stable" - }, + "nixpkgs_2": { "locked": { - "lastModified": 1713775815, - "narHash": "sha256-Wu9cdYTnGQQwtT20QQMg7jzkANKQjwBD9iccfGKkfls=", - "owner": "cachix", - "repo": "pre-commit-hooks.nix", - "rev": "2ac4dcbf55ed43f3be0bae15e181f08a57af24a4", + "lastModified": 1755268003, + "narHash": "sha256-nNaeJjo861wFR0tjHDyCnHs1rbRtrMgxAKMoig9Sj/w=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "32f313e49e42f715491e1ea7b306a87c16fe0388", "type": "github" }, "original": { - "owner": "cachix", - "repo": "pre-commit-hooks.nix", + "owner": "NixOS", + "ref": "nixpkgs-unstable", + "repo": "nixpkgs", "type": "github" } }, "root": { "inputs": { "devenv": "devenv", - "flake-parts": "flake-parts", - "nixpkgs": "nixpkgs_3" - } - }, - "systems": { - "locked": { - "lastModified": 1681028828, - "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", - "owner": "nix-systems", - "repo": "default", - "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", - "type": "github" - }, - "original": { - "owner": "nix-systems", - "repo": "default", - "type": "github" - } - }, - "systems_2": { - "locked": { - "lastModified": 1681028828, - "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", - "owner": "nix-systems", - "repo": "default", - "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", - "type": "github" - }, - "original": { - "owner": "nix-systems", - "repo": "default", - "type": "github" + "flake-parts": "flake-parts_2", + "nixpkgs": "nixpkgs_2" } } }, diff --git a/vendor/github.com/spf13/viper/flake.nix b/vendor/github.com/spf13/viper/flake.nix index 52ad7d5814..a16b2e3a71 100644 --- a/vendor/github.com/spf13/viper/flake.nix +++ b/vendor/github.com/spf13/viper/flake.nix @@ -7,51 +7,55 @@ devenv.url = "github:cachix/devenv"; }; - outputs = inputs@{ flake-parts, ... }: + outputs = + inputs@{ flake-parts, ... }: flake-parts.lib.mkFlake { inherit inputs; } { imports = [ inputs.devenv.flakeModule ]; - systems = [ "x86_64-linux" "x86_64-darwin" "aarch64-darwin" ]; - - perSystem = { config, self', inputs', pkgs, system, ... }: rec { - devenv.shells = { - default = { - languages = { - go.enable = true; - go.package = pkgs.go_1_23; - }; - - pre-commit.hooks = { - nixpkgs-fmt.enable = true; - yamllint.enable = true; - }; - - packages = with pkgs; [ - gnumake - - golangci-lint - yamllint - ]; + systems = [ + "x86_64-linux" + "x86_64-darwin" + "aarch64-darwin" + ]; - scripts = { - versions.exec = '' - go version - golangci-lint version + perSystem = + { pkgs, ... }: + { + devenv.shells = { + default = { + languages = { + go.enable = true; + }; + + git-hooks.hooks = { + nixpkgs-fmt.enable = true; + yamllint.enable = true; + }; + + packages = with pkgs; [ + gnumake + + golangci-lint + yamllint + ]; + + scripts = { + versions.exec = '' + go version + golangci-lint version + ''; + }; + + enterShell = '' + versions ''; - }; - - enterShell = '' - versions - ''; - # https://github.com/cachix/devenv/issues/528#issuecomment-1556108767 - containers = pkgs.lib.mkForce { }; + # https://github.com/cachix/devenv/issues/528#issuecomment-1556108767 + containers = pkgs.lib.mkForce { }; + }; }; - - ci = devenv.shells.default; }; - }; }; } diff --git a/vendor/github.com/spf13/viper/internal/encoding/yaml/codec.go b/vendor/github.com/spf13/viper/internal/encoding/yaml/codec.go index 0368792499..a7a839fd93 100644 --- a/vendor/github.com/spf13/viper/internal/encoding/yaml/codec.go +++ b/vendor/github.com/spf13/viper/internal/encoding/yaml/codec.go @@ -1,6 +1,6 @@ package yaml -import "gopkg.in/yaml.v3" +import "go.yaml.in/yaml/v3" // Codec implements the encoding.Encoder and encoding.Decoder interfaces for YAML encoding. type Codec struct{} diff --git a/vendor/github.com/spf13/viper/remote.go b/vendor/github.com/spf13/viper/remote.go index bdde7de267..46f26721d4 100644 --- a/vendor/github.com/spf13/viper/remote.go +++ b/vendor/github.com/spf13/viper/remote.go @@ -219,7 +219,10 @@ func (v *Viper) watchKeyValueConfigOnChannel() error { for { b := <-rc reader := bytes.NewReader(b.Value) - v.unmarshalReader(reader, v.kvstore) + err := v.unmarshalReader(reader, v.kvstore) + if err != nil { + v.logger.Error(fmt.Errorf("failed to unmarshal remote config: %w", err).Error()) + } } }(respc) return nil diff --git a/vendor/github.com/spf13/viper/util.go b/vendor/github.com/spf13/viper/util.go index 2a08074bc7..d08ed46211 100644 --- a/vendor/github.com/spf13/viper/util.go +++ b/vendor/github.com/spf13/viper/util.go @@ -174,10 +174,7 @@ func parseSizeInBytes(sizeStr string) uint { } } - size := cast.ToInt(sizeStr) - if size < 0 { - size = 0 - } + size := max(cast.ToInt(sizeStr), 0) return safeMul(uint(size), multiplier) } diff --git a/vendor/github.com/spf13/viper/viper.go b/vendor/github.com/spf13/viper/viper.go index a58d757bdb..34a94798b8 100644 --- a/vendor/github.com/spf13/viper/viper.go +++ b/vendor/github.com/spf13/viper/viper.go @@ -376,7 +376,12 @@ func (v *Viper) WatchConfig() { } } }() - watcher.Add(configDir) + err = watcher.Add(configDir) + if err != nil { + v.logger.Error(fmt.Sprintf("failed to add watcher: %s", err)) + initWG.Done() + return + } initWG.Done() // done initializing the watch in this go routine, so the parent routine can move on... eventsWG.Wait() // now, wait for event loop to end in this go-routine... }() @@ -1181,11 +1186,26 @@ func (v *Viper) find(lcaseKey string, flagDefault bool) any { s = strings.TrimSuffix(s, "]") res, _ := readAsCSV(s) return res + case "boolSlice": + s := strings.TrimPrefix(flag.ValueString(), "[") + s = strings.TrimSuffix(s, "]") + res, _ := readAsCSV(s) + return cast.ToBoolSlice(res) case "intSlice": s := strings.TrimPrefix(flag.ValueString(), "[") s = strings.TrimSuffix(s, "]") res, _ := readAsCSV(s) return cast.ToIntSlice(res) + case "uintSlice": + s := strings.TrimPrefix(flag.ValueString(), "[") + s = strings.TrimSuffix(s, "]") + res, _ := readAsCSV(s) + return cast.ToUintSlice(res) + case "float64Slice": + s := strings.TrimPrefix(flag.ValueString(), "[") + s = strings.TrimSuffix(s, "]") + res, _ := readAsCSV(s) + return cast.ToFloat64Slice(res) case "durationSlice": s := strings.TrimPrefix(flag.ValueString(), "[") s = strings.TrimSuffix(s, "]") @@ -1268,11 +1288,26 @@ func (v *Viper) find(lcaseKey string, flagDefault bool) any { s = strings.TrimSuffix(s, "]") res, _ := readAsCSV(s) return res + case "boolSlice": + s := strings.TrimPrefix(flag.ValueString(), "[") + s = strings.TrimSuffix(s, "]") + res, _ := readAsCSV(s) + return cast.ToBoolSlice(res) case "intSlice": s := strings.TrimPrefix(flag.ValueString(), "[") s = strings.TrimSuffix(s, "]") res, _ := readAsCSV(s) return cast.ToIntSlice(res) + case "uintSlice": + s := strings.TrimPrefix(flag.ValueString(), "[") + s = strings.TrimSuffix(s, "]") + res, _ := readAsCSV(s) + return cast.ToUintSlice(res) + case "float64Slice": + s := strings.TrimPrefix(flag.ValueString(), "[") + s = strings.TrimSuffix(s, "]") + res, _ := readAsCSV(s) + return cast.ToFloat64Slice(res) case "stringToString": return stringToStringConv(flag.ValueString()) case "stringToInt": @@ -1670,7 +1705,10 @@ func (v *Viper) unmarshalReader(in io.Reader, c map[string]any) error { } buf := new(bytes.Buffer) - buf.ReadFrom(in) + _, err := buf.ReadFrom(in) + if err != nil { + return fmt.Errorf("failed to read configuration from input: %w", err) + } // TODO: remove this once SupportedExts is deprecated/removed if !slices.Contains(SupportedExts, format) { diff --git a/vendor/github.com/stoewer/go-strcase/.golangci.yml b/vendor/github.com/stoewer/go-strcase/.golangci.yml index 7f98d55c42..0e75d86ae0 100644 --- a/vendor/github.com/stoewer/go-strcase/.golangci.yml +++ b/vendor/github.com/stoewer/go-strcase/.golangci.yml @@ -1,26 +1,19 @@ -run: - deadline: 10m +version: "2" linters: enable: - - dupl - - goconst - - gocyclo - - godox - - gosec - - interfacer - - lll - - maligned - - misspell - - prealloc - - stylecheck - - unconvert - - unparam - - errcheck - - golint - - gofmt - disable: [] - fast: false + - dupl + - goconst + - gocyclo + - godox + - gosec + - lll + - misspell + - prealloc + - staticcheck + - unconvert + - unparam -issues: - exclude-use-default: false +formatters: + enable: + - gofmt diff --git a/vendor/github.com/stoewer/go-strcase/camel.go b/vendor/github.com/stoewer/go-strcase/camel.go index ff9e66e0ce..7a9bec7c10 100644 --- a/vendor/github.com/stoewer/go-strcase/camel.go +++ b/vendor/github.com/stoewer/go-strcase/camel.go @@ -30,6 +30,9 @@ func camelCase(s string, upper bool) string { } else if isUpper(prev) && isUpper(curr) && isLower(next) { // Assume a case like "R" for "XRequestId" buffer = append(buffer, curr) + } else if isUpper(curr) && isDigit(prev) { + // Preserve uppercase letters after numbers + buffer = append(buffer, curr) } else { buffer = append(buffer, toLower(curr)) } diff --git a/vendor/github.com/stoewer/go-strcase/helper.go b/vendor/github.com/stoewer/go-strcase/helper.go index ecad589143..96e79d6e13 100644 --- a/vendor/github.com/stoewer/go-strcase/helper.go +++ b/vendor/github.com/stoewer/go-strcase/helper.go @@ -38,6 +38,12 @@ func isSpace(ch rune) bool { return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' } +// isDigit checks if a character is a digit. More precisely it evaluates if it is +// in the range of ASCII characters '0' to '9'. +func isDigit(ch rune) bool { + return ch >= '0' && ch <= '9' +} + // isDelimiter checks if a character is some kind of whitespace or '_' or '-'. func isDelimiter(ch rune) bool { return ch == '-' || ch == '_' || isSpace(ch) diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare.go b/vendor/github.com/stretchr/testify/assert/assertion_compare.go index 7e19eba090..ffb24e8e31 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_compare.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_compare.go @@ -390,7 +390,8 @@ func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface if h, ok := t.(tHelper); ok { h.Helper() } - return compareTwoValues(t, e1, e2, []compareResult{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) + failMessage := fmt.Sprintf("\"%v\" is not greater than \"%v\"", e1, e2) + return compareTwoValues(t, e1, e2, []compareResult{compareGreater}, failMessage, msgAndArgs...) } // GreaterOrEqual asserts that the first element is greater than or equal to the second @@ -403,7 +404,8 @@ func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...in if h, ok := t.(tHelper); ok { h.Helper() } - return compareTwoValues(t, e1, e2, []compareResult{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) + failMessage := fmt.Sprintf("\"%v\" is not greater than or equal to \"%v\"", e1, e2) + return compareTwoValues(t, e1, e2, []compareResult{compareGreater, compareEqual}, failMessage, msgAndArgs...) } // Less asserts that the first element is less than the second @@ -415,7 +417,8 @@ func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) if h, ok := t.(tHelper); ok { h.Helper() } - return compareTwoValues(t, e1, e2, []compareResult{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) + failMessage := fmt.Sprintf("\"%v\" is not less than \"%v\"", e1, e2) + return compareTwoValues(t, e1, e2, []compareResult{compareLess}, failMessage, msgAndArgs...) } // LessOrEqual asserts that the first element is less than or equal to the second @@ -428,7 +431,8 @@ func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...inter if h, ok := t.(tHelper); ok { h.Helper() } - return compareTwoValues(t, e1, e2, []compareResult{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) + failMessage := fmt.Sprintf("\"%v\" is not less than or equal to \"%v\"", e1, e2) + return compareTwoValues(t, e1, e2, []compareResult{compareLess, compareEqual}, failMessage, msgAndArgs...) } // Positive asserts that the specified element is positive @@ -440,7 +444,8 @@ func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { h.Helper() } zero := reflect.Zero(reflect.TypeOf(e)) - return compareTwoValues(t, e, zero.Interface(), []compareResult{compareGreater}, "\"%v\" is not positive", msgAndArgs...) + failMessage := fmt.Sprintf("\"%v\" is not positive", e) + return compareTwoValues(t, e, zero.Interface(), []compareResult{compareGreater}, failMessage, msgAndArgs...) } // Negative asserts that the specified element is negative @@ -452,7 +457,8 @@ func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { h.Helper() } zero := reflect.Zero(reflect.TypeOf(e)) - return compareTwoValues(t, e, zero.Interface(), []compareResult{compareLess}, "\"%v\" is not negative", msgAndArgs...) + failMessage := fmt.Sprintf("\"%v\" is not negative", e) + return compareTwoValues(t, e, zero.Interface(), []compareResult{compareLess}, failMessage, msgAndArgs...) } func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []compareResult, failMessage string, msgAndArgs ...interface{}) bool { @@ -468,11 +474,11 @@ func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedCompare compareResult, isComparable := compare(e1, e2, e1Kind) if !isComparable { - return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...) + return Fail(t, fmt.Sprintf(`Can not compare type "%T"`, e1), msgAndArgs...) } if !containsValue(allowedComparesResults, compareResult) { - return Fail(t, fmt.Sprintf(failMessage, e1, e2), msgAndArgs...) + return Fail(t, failMessage, msgAndArgs...) } return true diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go index 1906341657..c592f6ad5f 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_format.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go @@ -50,10 +50,19 @@ func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string return ElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...) } -// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. +// Emptyf asserts that the given value is "empty". +// +// [Zero values] are "empty". +// +// Arrays are "empty" if every element is the zero value of the type (stricter than "empty"). +// +// Slices, maps and channels with zero length are "empty". +// +// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty". // // assert.Emptyf(t, obj, "error message %s", "formatted") +// +// [Zero values]: https://go.dev/ref/spec#The_zero_value func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -117,10 +126,8 @@ func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg stri // Errorf asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if assert.Errorf(t, err, "error message %s", "formatted") { -// assert.Equal(t, expectedErrorf, err) -// } +// actualObj, err := SomeFunction() +// assert.Errorf(t, err, "error message %s", "formatted") func Errorf(t TestingT, err error, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -438,7 +445,19 @@ func IsNonIncreasingf(t TestingT, object interface{}, msg string, args ...interf return IsNonIncreasing(t, object, append([]interface{}{msg}, args...)...) } +// IsNotTypef asserts that the specified objects are not of the same type. +// +// assert.IsNotTypef(t, &NotMyStruct{}, &MyStruct{}, "error message %s", "formatted") +func IsNotTypef(t TestingT, theType interface{}, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return IsNotType(t, theType, object, append([]interface{}{msg}, args...)...) +} + // IsTypef asserts that the specified objects are of the same type. +// +// assert.IsTypef(t, &MyStruct{}, &MyStruct{}, "error message %s", "formatted") func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -585,8 +604,7 @@ func NotElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg str return NotElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...) } -// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. +// NotEmptyf asserts that the specified object is NOT [Empty]. // // if assert.NotEmptyf(t, obj, "error message %s", "formatted") { // assert.Equal(t, "two", obj[1]) @@ -693,12 +711,15 @@ func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string, return NotSame(t, expected, actual, append([]interface{}{msg}, args...)...) } -// NotSubsetf asserts that the specified list(array, slice...) or map does NOT -// contain all elements given in the specified subset list(array, slice...) or -// map. +// NotSubsetf asserts that the list (array, slice, or map) does NOT contain all +// elements given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // assert.NotSubsetf(t, [1, 3, 4], [1, 2], "error message %s", "formatted") // assert.NotSubsetf(t, {"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") +// assert.NotSubsetf(t, [1, 3, 4], {1: "one", 2: "two"}, "error message %s", "formatted") +// assert.NotSubsetf(t, {"x": 1, "y": 2}, ["z"], "error message %s", "formatted") func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -782,11 +803,15 @@ func Samef(t TestingT, expected interface{}, actual interface{}, msg string, arg return Same(t, expected, actual, append([]interface{}{msg}, args...)...) } -// Subsetf asserts that the specified list(array, slice...) or map contains all -// elements given in the specified subset list(array, slice...) or map. +// Subsetf asserts that the list (array, slice, or map) contains all elements +// given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // assert.Subsetf(t, [1, 2, 3], [1, 2], "error message %s", "formatted") // assert.Subsetf(t, {"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") +// assert.Subsetf(t, [1, 2, 3], {1: "one", 2: "two"}, "error message %s", "formatted") +// assert.Subsetf(t, {"x": 1, "y": 2}, ["x"], "error message %s", "formatted") func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go index 21629087ba..58db928450 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go @@ -92,10 +92,19 @@ func (a *Assertions) ElementsMatchf(listA interface{}, listB interface{}, msg st return ElementsMatchf(a.t, listA, listB, msg, args...) } -// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. +// Empty asserts that the given value is "empty". +// +// [Zero values] are "empty". +// +// Arrays are "empty" if every element is the zero value of the type (stricter than "empty"). +// +// Slices, maps and channels with zero length are "empty". +// +// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty". // // a.Empty(obj) +// +// [Zero values]: https://go.dev/ref/spec#The_zero_value func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -103,10 +112,19 @@ func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool { return Empty(a.t, object, msgAndArgs...) } -// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. +// Emptyf asserts that the given value is "empty". +// +// [Zero values] are "empty". +// +// Arrays are "empty" if every element is the zero value of the type (stricter than "empty"). +// +// Slices, maps and channels with zero length are "empty". +// +// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty". // // a.Emptyf(obj, "error message %s", "formatted") +// +// [Zero values]: https://go.dev/ref/spec#The_zero_value func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -224,10 +242,8 @@ func (a *Assertions) Equalf(expected interface{}, actual interface{}, msg string // Error asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if a.Error(err) { -// assert.Equal(t, expectedError, err) -// } +// actualObj, err := SomeFunction() +// a.Error(err) func (a *Assertions) Error(err error, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -297,10 +313,8 @@ func (a *Assertions) ErrorIsf(err error, target error, msg string, args ...inter // Errorf asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if a.Errorf(err, "error message %s", "formatted") { -// assert.Equal(t, expectedErrorf, err) -// } +// actualObj, err := SomeFunction() +// a.Errorf(err, "error message %s", "formatted") func (a *Assertions) Errorf(err error, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -868,7 +882,29 @@ func (a *Assertions) IsNonIncreasingf(object interface{}, msg string, args ...in return IsNonIncreasingf(a.t, object, msg, args...) } +// IsNotType asserts that the specified objects are not of the same type. +// +// a.IsNotType(&NotMyStruct{}, &MyStruct{}) +func (a *Assertions) IsNotType(theType interface{}, object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return IsNotType(a.t, theType, object, msgAndArgs...) +} + +// IsNotTypef asserts that the specified objects are not of the same type. +// +// a.IsNotTypef(&NotMyStruct{}, &MyStruct{}, "error message %s", "formatted") +func (a *Assertions) IsNotTypef(theType interface{}, object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return IsNotTypef(a.t, theType, object, msg, args...) +} + // IsType asserts that the specified objects are of the same type. +// +// a.IsType(&MyStruct{}, &MyStruct{}) func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -877,6 +913,8 @@ func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAnd } // IsTypef asserts that the specified objects are of the same type. +// +// a.IsTypef(&MyStruct{}, &MyStruct{}, "error message %s", "formatted") func (a *Assertions) IsTypef(expectedType interface{}, object interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1162,8 +1200,7 @@ func (a *Assertions) NotElementsMatchf(listA interface{}, listB interface{}, msg return NotElementsMatchf(a.t, listA, listB, msg, args...) } -// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. +// NotEmpty asserts that the specified object is NOT [Empty]. // // if a.NotEmpty(obj) { // assert.Equal(t, "two", obj[1]) @@ -1175,8 +1212,7 @@ func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) boo return NotEmpty(a.t, object, msgAndArgs...) } -// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. +// NotEmptyf asserts that the specified object is NOT [Empty]. // // if a.NotEmptyf(obj, "error message %s", "formatted") { // assert.Equal(t, "two", obj[1]) @@ -1378,12 +1414,15 @@ func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg stri return NotSamef(a.t, expected, actual, msg, args...) } -// NotSubset asserts that the specified list(array, slice...) or map does NOT -// contain all elements given in the specified subset list(array, slice...) or -// map. +// NotSubset asserts that the list (array, slice, or map) does NOT contain all +// elements given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // a.NotSubset([1, 3, 4], [1, 2]) // a.NotSubset({"x": 1, "y": 2}, {"z": 3}) +// a.NotSubset([1, 3, 4], {1: "one", 2: "two"}) +// a.NotSubset({"x": 1, "y": 2}, ["z"]) func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1391,12 +1430,15 @@ func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs return NotSubset(a.t, list, subset, msgAndArgs...) } -// NotSubsetf asserts that the specified list(array, slice...) or map does NOT -// contain all elements given in the specified subset list(array, slice...) or -// map. +// NotSubsetf asserts that the list (array, slice, or map) does NOT contain all +// elements given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // a.NotSubsetf([1, 3, 4], [1, 2], "error message %s", "formatted") // a.NotSubsetf({"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") +// a.NotSubsetf([1, 3, 4], {1: "one", 2: "two"}, "error message %s", "formatted") +// a.NotSubsetf({"x": 1, "y": 2}, ["z"], "error message %s", "formatted") func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1556,11 +1598,15 @@ func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string, return Samef(a.t, expected, actual, msg, args...) } -// Subset asserts that the specified list(array, slice...) or map contains all -// elements given in the specified subset list(array, slice...) or map. +// Subset asserts that the list (array, slice, or map) contains all elements +// given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // a.Subset([1, 2, 3], [1, 2]) // a.Subset({"x": 1, "y": 2}, {"x": 1}) +// a.Subset([1, 2, 3], {1: "one", 2: "two"}) +// a.Subset({"x": 1, "y": 2}, ["x"]) func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1568,11 +1614,15 @@ func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ... return Subset(a.t, list, subset, msgAndArgs...) } -// Subsetf asserts that the specified list(array, slice...) or map contains all -// elements given in the specified subset list(array, slice...) or map. +// Subsetf asserts that the list (array, slice, or map) contains all elements +// given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // a.Subsetf([1, 2, 3], [1, 2], "error message %s", "formatted") // a.Subsetf({"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") +// a.Subsetf([1, 2, 3], {1: "one", 2: "two"}, "error message %s", "formatted") +// a.Subsetf({"x": 1, "y": 2}, ["x"], "error message %s", "formatted") func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() diff --git a/vendor/github.com/stretchr/testify/assert/assertion_order.go b/vendor/github.com/stretchr/testify/assert/assertion_order.go index 1d2f71824a..2fdf80fdd3 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_order.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_order.go @@ -33,7 +33,7 @@ func isOrdered(t TestingT, object interface{}, allowedComparesResults []compareR compareResult, isComparable := compare(prevValueInterface, valueInterface, firstValueKind) if !isComparable { - return Fail(t, fmt.Sprintf("Can not compare type \"%s\" and \"%s\"", reflect.TypeOf(value), reflect.TypeOf(prevValue)), msgAndArgs...) + return Fail(t, fmt.Sprintf(`Can not compare type "%T" and "%T"`, value, prevValue), msgAndArgs...) } if !containsValue(allowedComparesResults, compareResult) { diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go index 4e91332bb5..de8de0cb6c 100644 --- a/vendor/github.com/stretchr/testify/assert/assertions.go +++ b/vendor/github.com/stretchr/testify/assert/assertions.go @@ -210,59 +210,77 @@ the problem actually occurred in calling code.*/ // of each stack frame leading from the current test to the assert call that // failed. func CallerInfo() []string { - var pc uintptr - var ok bool var file string var line int var name string + const stackFrameBufferSize = 10 + pcs := make([]uintptr, stackFrameBufferSize) + callers := []string{} - for i := 0; ; i++ { - pc, file, line, ok = runtime.Caller(i) - if !ok { - // The breaks below failed to terminate the loop, and we ran off the - // end of the call stack. - break - } + offset := 1 - // This is a huge edge case, but it will panic if this is the case, see #180 - if file == "" { - break - } + for { + n := runtime.Callers(offset, pcs) - f := runtime.FuncForPC(pc) - if f == nil { - break - } - name = f.Name() - - // testing.tRunner is the standard library function that calls - // tests. Subtests are called directly by tRunner, without going through - // the Test/Benchmark/Example function that contains the t.Run calls, so - // with subtests we should break when we hit tRunner, without adding it - // to the list of callers. - if name == "testing.tRunner" { + if n == 0 { break } - parts := strings.Split(file, "/") - if len(parts) > 1 { - filename := parts[len(parts)-1] - dir := parts[len(parts)-2] - if (dir != "assert" && dir != "mock" && dir != "require") || filename == "mock_test.go" { - callers = append(callers, fmt.Sprintf("%s:%d", file, line)) + frames := runtime.CallersFrames(pcs[:n]) + + for { + frame, more := frames.Next() + pc = frame.PC + file = frame.File + line = frame.Line + + // This is a huge edge case, but it will panic if this is the case, see #180 + if file == "" { + break } - } - // Drop the package - segments := strings.Split(name, ".") - name = segments[len(segments)-1] - if isTest(name, "Test") || - isTest(name, "Benchmark") || - isTest(name, "Example") { - break + f := runtime.FuncForPC(pc) + if f == nil { + break + } + name = f.Name() + + // testing.tRunner is the standard library function that calls + // tests. Subtests are called directly by tRunner, without going through + // the Test/Benchmark/Example function that contains the t.Run calls, so + // with subtests we should break when we hit tRunner, without adding it + // to the list of callers. + if name == "testing.tRunner" { + break + } + + parts := strings.Split(file, "/") + if len(parts) > 1 { + filename := parts[len(parts)-1] + dir := parts[len(parts)-2] + if (dir != "assert" && dir != "mock" && dir != "require") || filename == "mock_test.go" { + callers = append(callers, fmt.Sprintf("%s:%d", file, line)) + } + } + + // Drop the package + dotPos := strings.LastIndexByte(name, '.') + name = name[dotPos+1:] + if isTest(name, "Test") || + isTest(name, "Benchmark") || + isTest(name, "Example") { + break + } + + if !more { + break + } } + + // Next batch + offset += cap(pcs) } return callers @@ -437,17 +455,34 @@ func NotImplements(t TestingT, interfaceObject interface{}, object interface{}, return true } +func isType(expectedType, object interface{}) bool { + return ObjectsAreEqual(reflect.TypeOf(object), reflect.TypeOf(expectedType)) +} + // IsType asserts that the specified objects are of the same type. -func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { +// +// assert.IsType(t, &MyStruct{}, &MyStruct{}) +func IsType(t TestingT, expectedType, object interface{}, msgAndArgs ...interface{}) bool { + if isType(expectedType, object) { + return true + } if h, ok := t.(tHelper); ok { h.Helper() } + return Fail(t, fmt.Sprintf("Object expected to be of type %T, but was %T", expectedType, object), msgAndArgs...) +} - if !ObjectsAreEqual(reflect.TypeOf(object), reflect.TypeOf(expectedType)) { - return Fail(t, fmt.Sprintf("Object expected to be of type %v, but was %v", reflect.TypeOf(expectedType), reflect.TypeOf(object)), msgAndArgs...) +// IsNotType asserts that the specified objects are not of the same type. +// +// assert.IsNotType(t, &NotMyStruct{}, &MyStruct{}) +func IsNotType(t TestingT, theType, object interface{}, msgAndArgs ...interface{}) bool { + if !isType(theType, object) { + return true } - - return true + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Fail(t, fmt.Sprintf("Object type expected to be different than %T", theType), msgAndArgs...) } // Equal asserts that two objects are equal. @@ -475,7 +510,6 @@ func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) } return true - } // validateEqualArgs checks whether provided arguments can be safely used in the @@ -510,8 +544,9 @@ func Same(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) b if !same { // both are pointers but not the same type & pointing to the same address return Fail(t, fmt.Sprintf("Not same: \n"+ - "expected: %p %#v\n"+ - "actual : %p %#v", expected, expected, actual, actual), msgAndArgs...) + "expected: %p %#[1]v\n"+ + "actual : %p %#[2]v", + expected, actual), msgAndArgs...) } return true @@ -530,14 +565,14 @@ func NotSame(t TestingT, expected, actual interface{}, msgAndArgs ...interface{} same, ok := samePointers(expected, actual) if !ok { - //fails when the arguments are not pointers + // fails when the arguments are not pointers return !(Fail(t, "Both arguments must be pointers", msgAndArgs...)) } if same { return Fail(t, fmt.Sprintf( - "Expected and actual point to the same object: %p %#v", - expected, expected), msgAndArgs...) + "Expected and actual point to the same object: %p %#[1]v", + expected), msgAndArgs...) } return true } @@ -549,7 +584,7 @@ func NotSame(t TestingT, expected, actual interface{}, msgAndArgs ...interface{} func samePointers(first, second interface{}) (same bool, ok bool) { firstPtr, secondPtr := reflect.ValueOf(first), reflect.ValueOf(second) if firstPtr.Kind() != reflect.Ptr || secondPtr.Kind() != reflect.Ptr { - return false, false //not both are pointers + return false, false // not both are pointers } firstType, secondType := reflect.TypeOf(first), reflect.TypeOf(second) @@ -610,7 +645,6 @@ func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interfa } return true - } // EqualExportedValues asserts that the types of two objects are equal and their public @@ -665,7 +699,6 @@ func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{} } return Equal(t, expected, actual, msgAndArgs...) - } // NotNil asserts that the specified object is not nil. @@ -715,37 +748,45 @@ func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { // isEmpty gets whether the specified object is considered empty or not. func isEmpty(object interface{}) bool { - // get nil case out of the way if object == nil { return true } - objValue := reflect.ValueOf(object) + return isEmptyValue(reflect.ValueOf(object)) +} +// isEmptyValue gets whether the specified reflect.Value is considered empty or not. +func isEmptyValue(objValue reflect.Value) bool { + if objValue.IsZero() { + return true + } + // Special cases of non-zero values that we consider empty switch objValue.Kind() { // collection types are empty when they have no element + // Note: array types are empty when they match their zero-initialized state. case reflect.Chan, reflect.Map, reflect.Slice: return objValue.Len() == 0 - // pointers are empty if nil or if the value they point to is empty + // non-nil pointers are empty if the value they point to is empty case reflect.Ptr: - if objValue.IsNil() { - return true - } - deref := objValue.Elem().Interface() - return isEmpty(deref) - // for all other types, compare against the zero value - // array types are empty when they match their zero-initialized state - default: - zero := reflect.Zero(objValue.Type()) - return reflect.DeepEqual(object, zero.Interface()) + return isEmptyValue(objValue.Elem()) } + return false } -// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. +// Empty asserts that the given value is "empty". +// +// [Zero values] are "empty". +// +// Arrays are "empty" if every element is the zero value of the type (stricter than "empty"). +// +// Slices, maps and channels with zero length are "empty". +// +// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty". // // assert.Empty(t, obj) +// +// [Zero values]: https://go.dev/ref/spec#The_zero_value func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { pass := isEmpty(object) if !pass { @@ -756,11 +797,9 @@ func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { } return pass - } -// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. +// NotEmpty asserts that the specified object is NOT [Empty]. // // if assert.NotEmpty(t, obj) { // assert.Equal(t, "two", obj[1]) @@ -775,7 +814,6 @@ func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { } return pass - } // getLen tries to get the length of an object. @@ -819,7 +857,6 @@ func True(t TestingT, value bool, msgAndArgs ...interface{}) bool { } return true - } // False asserts that the specified value is false. @@ -834,7 +871,6 @@ func False(t TestingT, value bool, msgAndArgs ...interface{}) bool { } return true - } // NotEqual asserts that the specified values are NOT equal. @@ -857,7 +893,6 @@ func NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{ } return true - } // NotEqualValues asserts that two objects are not equal even when converted to the same type @@ -880,7 +915,6 @@ func NotEqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...inte // return (true, false) if element was not found. // return (true, true) if element was found. func containsElement(list interface{}, element interface{}) (ok, found bool) { - listValue := reflect.ValueOf(list) listType := reflect.TypeOf(list) if listType == nil { @@ -915,7 +949,6 @@ func containsElement(list interface{}, element interface{}) (ok, found bool) { } } return true, false - } // Contains asserts that the specified string, list(array, slice...) or map contains the @@ -938,7 +971,6 @@ func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bo } return true - } // NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the @@ -961,14 +993,17 @@ func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) } return true - } -// Subset asserts that the specified list(array, slice...) or map contains all -// elements given in the specified subset list(array, slice...) or map. +// Subset asserts that the list (array, slice, or map) contains all elements +// given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // assert.Subset(t, [1, 2, 3], [1, 2]) // assert.Subset(t, {"x": 1, "y": 2}, {"x": 1}) +// assert.Subset(t, [1, 2, 3], {1: "one", 2: "two"}) +// assert.Subset(t, {"x": 1, "y": 2}, ["x"]) func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { if h, ok := t.(tHelper); ok { h.Helper() @@ -983,7 +1018,7 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok } subsetKind := reflect.TypeOf(subset).Kind() - if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map { + if subsetKind != reflect.Array && subsetKind != reflect.Slice && subsetKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) } @@ -1007,6 +1042,13 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok } subsetList := reflect.ValueOf(subset) + if subsetKind == reflect.Map { + keys := make([]interface{}, subsetList.Len()) + for idx, key := range subsetList.MapKeys() { + keys[idx] = key.Interface() + } + subsetList = reflect.ValueOf(keys) + } for i := 0; i < subsetList.Len(); i++ { element := subsetList.Index(i).Interface() ok, found := containsElement(list, element) @@ -1021,12 +1063,15 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok return true } -// NotSubset asserts that the specified list(array, slice...) or map does NOT -// contain all elements given in the specified subset list(array, slice...) or -// map. +// NotSubset asserts that the list (array, slice, or map) does NOT contain all +// elements given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // assert.NotSubset(t, [1, 3, 4], [1, 2]) // assert.NotSubset(t, {"x": 1, "y": 2}, {"z": 3}) +// assert.NotSubset(t, [1, 3, 4], {1: "one", 2: "two"}) +// assert.NotSubset(t, {"x": 1, "y": 2}, ["z"]) func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1041,7 +1086,7 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) } subsetKind := reflect.TypeOf(subset).Kind() - if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map { + if subsetKind != reflect.Array && subsetKind != reflect.Slice && subsetKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) } @@ -1065,11 +1110,18 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) } subsetList := reflect.ValueOf(subset) + if subsetKind == reflect.Map { + keys := make([]interface{}, subsetList.Len()) + for idx, key := range subsetList.MapKeys() { + keys[idx] = key.Interface() + } + subsetList = reflect.ValueOf(keys) + } for i := 0; i < subsetList.Len(); i++ { element := subsetList.Index(i).Interface() ok, found := containsElement(list, element) if !ok { - return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) + return Fail(t, fmt.Sprintf("%q could not be applied builtin len()", list), msgAndArgs...) } if !found { return true @@ -1591,10 +1643,8 @@ func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool { // Error asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if assert.Error(t, err) { -// assert.Equal(t, expectedError, err) -// } +// actualObj, err := SomeFunction() +// assert.Error(t, err) func Error(t TestingT, err error, msgAndArgs ...interface{}) bool { if err == nil { if h, ok := t.(tHelper); ok { @@ -1667,7 +1717,6 @@ func matchRegexp(rx interface{}, str interface{}) bool { default: return r.MatchString(fmt.Sprint(v)) } - } // Regexp asserts that a specified regexp matches a string. @@ -1703,7 +1752,6 @@ func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interf } return !match - } // Zero asserts that i is the zero value for its type. @@ -1814,6 +1862,11 @@ func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{ return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid json.\nJSON parsing error: '%s'", expected, err.Error()), msgAndArgs...) } + // Shortcut if same bytes + if actual == expected { + return true + } + if err := json.Unmarshal([]byte(actual), &actualJSONAsInterface); err != nil { return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid json.\nJSON parsing error: '%s'", actual, err.Error()), msgAndArgs...) } @@ -1832,6 +1885,11 @@ func YAMLEq(t TestingT, expected string, actual string, msgAndArgs ...interface{ return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid yaml.\nYAML parsing error: '%s'", expected, err.Error()), msgAndArgs...) } + // Shortcut if same bytes + if actual == expected { + return true + } + if err := yaml.Unmarshal([]byte(actual), &actualYAMLAsInterface); err != nil { return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid yaml.\nYAML error: '%s'", actual, err.Error()), msgAndArgs...) } @@ -1933,6 +1991,7 @@ func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick t } ch := make(chan bool, 1) + checkCond := func() { ch <- condition() } timer := time.NewTimer(waitFor) defer timer.Stop() @@ -1940,18 +1999,23 @@ func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick t ticker := time.NewTicker(tick) defer ticker.Stop() - for tick := ticker.C; ; { + var tickC <-chan time.Time + + // Check the condition once first on the initial call. + go checkCond() + + for { select { case <-timer.C: return Fail(t, "Condition never satisfied", msgAndArgs...) - case <-tick: - tick = nil - go func() { ch <- condition() }() + case <-tickC: + tickC = nil + go checkCond() case v := <-ch: if v { return true } - tick = ticker.C + tickC = ticker.C } } } @@ -1964,6 +2028,9 @@ type CollectT struct { errors []error } +// Helper is like [testing.T.Helper] but does nothing. +func (CollectT) Helper() {} + // Errorf collects the error. func (c *CollectT) Errorf(format string, args ...interface{}) { c.errors = append(c.errors, fmt.Errorf(format, args...)) @@ -2021,35 +2088,42 @@ func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time var lastFinishedTickErrs []error ch := make(chan *CollectT, 1) + checkCond := func() { + collect := new(CollectT) + defer func() { + ch <- collect + }() + condition(collect) + } + timer := time.NewTimer(waitFor) defer timer.Stop() ticker := time.NewTicker(tick) defer ticker.Stop() - for tick := ticker.C; ; { + var tickC <-chan time.Time + + // Check the condition once first on the initial call. + go checkCond() + + for { select { case <-timer.C: for _, err := range lastFinishedTickErrs { t.Errorf("%v", err) } return Fail(t, "Condition never satisfied", msgAndArgs...) - case <-tick: - tick = nil - go func() { - collect := new(CollectT) - defer func() { - ch <- collect - }() - condition(collect) - }() + case <-tickC: + tickC = nil + go checkCond() case collect := <-ch: if !collect.failed() { return true } // Keep the errors from the last ended condition, so that they can be copied to t if timeout is reached. lastFinishedTickErrs = collect.errors - tick = ticker.C + tickC = ticker.C } } } @@ -2064,6 +2138,7 @@ func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.D } ch := make(chan bool, 1) + checkCond := func() { ch <- condition() } timer := time.NewTimer(waitFor) defer timer.Stop() @@ -2071,18 +2146,23 @@ func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.D ticker := time.NewTicker(tick) defer ticker.Stop() - for tick := ticker.C; ; { + var tickC <-chan time.Time + + // Check the condition once first on the initial call. + go checkCond() + + for { select { case <-timer.C: return true - case <-tick: - tick = nil - go func() { ch <- condition() }() + case <-tickC: + tickC = nil + go checkCond() case v := <-ch: if v { return Fail(t, "Condition satisfied", msgAndArgs...) } - tick = ticker.C + tickC = ticker.C } } } @@ -2100,9 +2180,12 @@ func ErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool { var expectedText string if target != nil { expectedText = target.Error() + if err == nil { + return Fail(t, fmt.Sprintf("Expected error with %q in chain but got nil.", expectedText), msgAndArgs...) + } } - chain := buildErrorChainString(err) + chain := buildErrorChainString(err, false) return Fail(t, fmt.Sprintf("Target error should be in err chain:\n"+ "expected: %q\n"+ @@ -2125,7 +2208,7 @@ func NotErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool { expectedText = target.Error() } - chain := buildErrorChainString(err) + chain := buildErrorChainString(err, false) return Fail(t, fmt.Sprintf("Target error should not be in err chain:\n"+ "found: %q\n"+ @@ -2143,11 +2226,17 @@ func ErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interface{ return true } - chain := buildErrorChainString(err) + expectedType := reflect.TypeOf(target).Elem().String() + if err == nil { + return Fail(t, fmt.Sprintf("An error is expected but got nil.\n"+ + "expected: %s", expectedType), msgAndArgs...) + } + + chain := buildErrorChainString(err, true) return Fail(t, fmt.Sprintf("Should be in error chain:\n"+ - "expected: %q\n"+ - "in chain: %s", target, chain, + "expected: %s\n"+ + "in chain: %s", expectedType, chain, ), msgAndArgs...) } @@ -2161,24 +2250,46 @@ func NotErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interfa return true } - chain := buildErrorChainString(err) + chain := buildErrorChainString(err, true) return Fail(t, fmt.Sprintf("Target error should not be in err chain:\n"+ - "found: %q\n"+ - "in chain: %s", target, chain, + "found: %s\n"+ + "in chain: %s", reflect.TypeOf(target).Elem().String(), chain, ), msgAndArgs...) } -func buildErrorChainString(err error) string { +func unwrapAll(err error) (errs []error) { + errs = append(errs, err) + switch x := err.(type) { + case interface{ Unwrap() error }: + err = x.Unwrap() + if err == nil { + return + } + errs = append(errs, unwrapAll(err)...) + case interface{ Unwrap() []error }: + for _, err := range x.Unwrap() { + errs = append(errs, unwrapAll(err)...) + } + } + return +} + +func buildErrorChainString(err error, withType bool) string { if err == nil { return "" } - e := errors.Unwrap(err) - chain := fmt.Sprintf("%q", err.Error()) - for e != nil { - chain += fmt.Sprintf("\n\t%q", e.Error()) - e = errors.Unwrap(e) + var chain string + errs := unwrapAll(err) + for i := range errs { + if i != 0 { + chain += "\n\t" + } + chain += fmt.Sprintf("%q", errs[i].Error()) + if withType { + chain += fmt.Sprintf(" (%T)", errs[i]) + } } return chain } diff --git a/vendor/github.com/stretchr/testify/assert/doc.go b/vendor/github.com/stretchr/testify/assert/doc.go index 4953981d38..a0b953aa5c 100644 --- a/vendor/github.com/stretchr/testify/assert/doc.go +++ b/vendor/github.com/stretchr/testify/assert/doc.go @@ -1,5 +1,9 @@ // Package assert provides a set of comprehensive testing tools for use with the normal Go testing system. // +// # Note +// +// All functions in this package return a bool value indicating whether the assertion has passed. +// // # Example Usage // // The following is a complete example using assert in a standard test function: diff --git a/vendor/github.com/stretchr/testify/assert/http_assertions.go b/vendor/github.com/stretchr/testify/assert/http_assertions.go index 861ed4b7ce..5a6bb75f2c 100644 --- a/vendor/github.com/stretchr/testify/assert/http_assertions.go +++ b/vendor/github.com/stretchr/testify/assert/http_assertions.go @@ -138,7 +138,7 @@ func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, contains := strings.Contains(body, fmt.Sprint(str)) if !contains { - Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body), msgAndArgs...) + Fail(t, fmt.Sprintf("Expected response body for %q to contain %q but found %q", url+"?"+values.Encode(), str, body), msgAndArgs...) } return contains @@ -158,7 +158,7 @@ func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url strin contains := strings.Contains(body, fmt.Sprint(str)) if contains { - Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body), msgAndArgs...) + Fail(t, fmt.Sprintf("Expected response body for %q to NOT contain %q but found %q", url+"?"+values.Encode(), str, body), msgAndArgs...) } return !contains diff --git a/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go b/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go index baa0cc7d7f..5a74c4f4d5 100644 --- a/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go +++ b/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go @@ -1,5 +1,4 @@ //go:build testify_yaml_custom && !testify_yaml_fail && !testify_yaml_default -// +build testify_yaml_custom,!testify_yaml_fail,!testify_yaml_default // Package yaml is an implementation of YAML functions that calls a pluggable implementation. // diff --git a/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go b/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go index b83c6cf64c..0bae80e34a 100644 --- a/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go +++ b/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go @@ -1,5 +1,4 @@ //go:build !testify_yaml_fail && !testify_yaml_custom -// +build !testify_yaml_fail,!testify_yaml_custom // Package yaml is just an indirection to handle YAML deserialization. // diff --git a/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go b/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go index e78f7dfe69..8041803fd2 100644 --- a/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go +++ b/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go @@ -1,5 +1,4 @@ //go:build testify_yaml_fail && !testify_yaml_custom && !testify_yaml_default -// +build testify_yaml_fail,!testify_yaml_custom,!testify_yaml_default // Package yaml is an implementation of YAML functions that always fail. // diff --git a/vendor/github.com/stretchr/testify/require/doc.go b/vendor/github.com/stretchr/testify/require/doc.go index 9684347245..c8e3f94a80 100644 --- a/vendor/github.com/stretchr/testify/require/doc.go +++ b/vendor/github.com/stretchr/testify/require/doc.go @@ -23,6 +23,8 @@ // // The `require` package have same global functions as in the `assert` package, // but instead of returning a boolean result they call `t.FailNow()`. +// A consequence of this is that it must be called from the goroutine running +// the test function, not from other goroutines created during the test. // // Every assertion function also takes an optional string message as the final argument, // allowing custom error messages to be appended to the message the assertion method outputs. diff --git a/vendor/github.com/stretchr/testify/require/require.go b/vendor/github.com/stretchr/testify/require/require.go index d8921950d7..2d02f9bcef 100644 --- a/vendor/github.com/stretchr/testify/require/require.go +++ b/vendor/github.com/stretchr/testify/require/require.go @@ -117,10 +117,19 @@ func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string t.FailNow() } -// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. +// Empty asserts that the given value is "empty". +// +// [Zero values] are "empty". +// +// Arrays are "empty" if every element is the zero value of the type (stricter than "empty"). +// +// Slices, maps and channels with zero length are "empty". +// +// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty". // // require.Empty(t, obj) +// +// [Zero values]: https://go.dev/ref/spec#The_zero_value func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -131,10 +140,19 @@ func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) { t.FailNow() } -// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. +// Emptyf asserts that the given value is "empty". +// +// [Zero values] are "empty". +// +// Arrays are "empty" if every element is the zero value of the type (stricter than "empty"). +// +// Slices, maps and channels with zero length are "empty". +// +// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty". // // require.Emptyf(t, obj, "error message %s", "formatted") +// +// [Zero values]: https://go.dev/ref/spec#The_zero_value func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -279,10 +297,8 @@ func Equalf(t TestingT, expected interface{}, actual interface{}, msg string, ar // Error asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if require.Error(t, err) { -// require.Equal(t, expectedError, err) -// } +// actualObj, err := SomeFunction() +// require.Error(t, err) func Error(t TestingT, err error, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -373,10 +389,8 @@ func ErrorIsf(t TestingT, err error, target error, msg string, args ...interface // Errorf asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if require.Errorf(t, err, "error message %s", "formatted") { -// require.Equal(t, expectedErrorf, err) -// } +// actualObj, err := SomeFunction() +// require.Errorf(t, err, "error message %s", "formatted") func Errorf(t TestingT, err error, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1097,7 +1111,35 @@ func IsNonIncreasingf(t TestingT, object interface{}, msg string, args ...interf t.FailNow() } +// IsNotType asserts that the specified objects are not of the same type. +// +// require.IsNotType(t, &NotMyStruct{}, &MyStruct{}) +func IsNotType(t TestingT, theType interface{}, object interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.IsNotType(t, theType, object, msgAndArgs...) { + return + } + t.FailNow() +} + +// IsNotTypef asserts that the specified objects are not of the same type. +// +// require.IsNotTypef(t, &NotMyStruct{}, &MyStruct{}, "error message %s", "formatted") +func IsNotTypef(t TestingT, theType interface{}, object interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.IsNotTypef(t, theType, object, msg, args...) { + return + } + t.FailNow() +} + // IsType asserts that the specified objects are of the same type. +// +// require.IsType(t, &MyStruct{}, &MyStruct{}) func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1109,6 +1151,8 @@ func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs } // IsTypef asserts that the specified objects are of the same type. +// +// require.IsTypef(t, &MyStruct{}, &MyStruct{}, "error message %s", "formatted") func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1469,8 +1513,7 @@ func NotElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg str t.FailNow() } -// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. +// NotEmpty asserts that the specified object is NOT [Empty]. // // if require.NotEmpty(t, obj) { // require.Equal(t, "two", obj[1]) @@ -1485,8 +1528,7 @@ func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) { t.FailNow() } -// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. +// NotEmptyf asserts that the specified object is NOT [Empty]. // // if require.NotEmptyf(t, obj, "error message %s", "formatted") { // require.Equal(t, "two", obj[1]) @@ -1745,12 +1787,15 @@ func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string, t.FailNow() } -// NotSubset asserts that the specified list(array, slice...) or map does NOT -// contain all elements given in the specified subset list(array, slice...) or -// map. +// NotSubset asserts that the list (array, slice, or map) does NOT contain all +// elements given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // require.NotSubset(t, [1, 3, 4], [1, 2]) // require.NotSubset(t, {"x": 1, "y": 2}, {"z": 3}) +// require.NotSubset(t, [1, 3, 4], {1: "one", 2: "two"}) +// require.NotSubset(t, {"x": 1, "y": 2}, ["z"]) func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1761,12 +1806,15 @@ func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...i t.FailNow() } -// NotSubsetf asserts that the specified list(array, slice...) or map does NOT -// contain all elements given in the specified subset list(array, slice...) or -// map. +// NotSubsetf asserts that the list (array, slice, or map) does NOT contain all +// elements given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // require.NotSubsetf(t, [1, 3, 4], [1, 2], "error message %s", "formatted") // require.NotSubsetf(t, {"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") +// require.NotSubsetf(t, [1, 3, 4], {1: "one", 2: "two"}, "error message %s", "formatted") +// require.NotSubsetf(t, {"x": 1, "y": 2}, ["z"], "error message %s", "formatted") func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1971,11 +2019,15 @@ func Samef(t TestingT, expected interface{}, actual interface{}, msg string, arg t.FailNow() } -// Subset asserts that the specified list(array, slice...) or map contains all -// elements given in the specified subset list(array, slice...) or map. +// Subset asserts that the list (array, slice, or map) contains all elements +// given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // require.Subset(t, [1, 2, 3], [1, 2]) // require.Subset(t, {"x": 1, "y": 2}, {"x": 1}) +// require.Subset(t, [1, 2, 3], {1: "one", 2: "two"}) +// require.Subset(t, {"x": 1, "y": 2}, ["x"]) func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1986,11 +2038,15 @@ func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...inte t.FailNow() } -// Subsetf asserts that the specified list(array, slice...) or map contains all -// elements given in the specified subset list(array, slice...) or map. +// Subsetf asserts that the list (array, slice, or map) contains all elements +// given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // require.Subsetf(t, [1, 2, 3], [1, 2], "error message %s", "formatted") // require.Subsetf(t, {"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") +// require.Subsetf(t, [1, 2, 3], {1: "one", 2: "two"}, "error message %s", "formatted") +// require.Subsetf(t, {"x": 1, "y": 2}, ["x"], "error message %s", "formatted") func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() diff --git a/vendor/github.com/stretchr/testify/require/require_forward.go b/vendor/github.com/stretchr/testify/require/require_forward.go index 1bd87304f4..e6f7e94468 100644 --- a/vendor/github.com/stretchr/testify/require/require_forward.go +++ b/vendor/github.com/stretchr/testify/require/require_forward.go @@ -93,10 +93,19 @@ func (a *Assertions) ElementsMatchf(listA interface{}, listB interface{}, msg st ElementsMatchf(a.t, listA, listB, msg, args...) } -// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. +// Empty asserts that the given value is "empty". +// +// [Zero values] are "empty". +// +// Arrays are "empty" if every element is the zero value of the type (stricter than "empty"). +// +// Slices, maps and channels with zero length are "empty". +// +// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty". // // a.Empty(obj) +// +// [Zero values]: https://go.dev/ref/spec#The_zero_value func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -104,10 +113,19 @@ func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) { Empty(a.t, object, msgAndArgs...) } -// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. +// Emptyf asserts that the given value is "empty". +// +// [Zero values] are "empty". +// +// Arrays are "empty" if every element is the zero value of the type (stricter than "empty"). +// +// Slices, maps and channels with zero length are "empty". +// +// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty". // // a.Emptyf(obj, "error message %s", "formatted") +// +// [Zero values]: https://go.dev/ref/spec#The_zero_value func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -225,10 +243,8 @@ func (a *Assertions) Equalf(expected interface{}, actual interface{}, msg string // Error asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if a.Error(err) { -// assert.Equal(t, expectedError, err) -// } +// actualObj, err := SomeFunction() +// a.Error(err) func (a *Assertions) Error(err error, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -298,10 +314,8 @@ func (a *Assertions) ErrorIsf(err error, target error, msg string, args ...inter // Errorf asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if a.Errorf(err, "error message %s", "formatted") { -// assert.Equal(t, expectedErrorf, err) -// } +// actualObj, err := SomeFunction() +// a.Errorf(err, "error message %s", "formatted") func (a *Assertions) Errorf(err error, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -869,7 +883,29 @@ func (a *Assertions) IsNonIncreasingf(object interface{}, msg string, args ...in IsNonIncreasingf(a.t, object, msg, args...) } +// IsNotType asserts that the specified objects are not of the same type. +// +// a.IsNotType(&NotMyStruct{}, &MyStruct{}) +func (a *Assertions) IsNotType(theType interface{}, object interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + IsNotType(a.t, theType, object, msgAndArgs...) +} + +// IsNotTypef asserts that the specified objects are not of the same type. +// +// a.IsNotTypef(&NotMyStruct{}, &MyStruct{}, "error message %s", "formatted") +func (a *Assertions) IsNotTypef(theType interface{}, object interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + IsNotTypef(a.t, theType, object, msg, args...) +} + // IsType asserts that the specified objects are of the same type. +// +// a.IsType(&MyStruct{}, &MyStruct{}) func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -878,6 +914,8 @@ func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAnd } // IsTypef asserts that the specified objects are of the same type. +// +// a.IsTypef(&MyStruct{}, &MyStruct{}, "error message %s", "formatted") func (a *Assertions) IsTypef(expectedType interface{}, object interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1163,8 +1201,7 @@ func (a *Assertions) NotElementsMatchf(listA interface{}, listB interface{}, msg NotElementsMatchf(a.t, listA, listB, msg, args...) } -// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. +// NotEmpty asserts that the specified object is NOT [Empty]. // // if a.NotEmpty(obj) { // assert.Equal(t, "two", obj[1]) @@ -1176,8 +1213,7 @@ func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) { NotEmpty(a.t, object, msgAndArgs...) } -// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. +// NotEmptyf asserts that the specified object is NOT [Empty]. // // if a.NotEmptyf(obj, "error message %s", "formatted") { // assert.Equal(t, "two", obj[1]) @@ -1379,12 +1415,15 @@ func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg stri NotSamef(a.t, expected, actual, msg, args...) } -// NotSubset asserts that the specified list(array, slice...) or map does NOT -// contain all elements given in the specified subset list(array, slice...) or -// map. +// NotSubset asserts that the list (array, slice, or map) does NOT contain all +// elements given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // a.NotSubset([1, 3, 4], [1, 2]) // a.NotSubset({"x": 1, "y": 2}, {"z": 3}) +// a.NotSubset([1, 3, 4], {1: "one", 2: "two"}) +// a.NotSubset({"x": 1, "y": 2}, ["z"]) func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1392,12 +1431,15 @@ func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs NotSubset(a.t, list, subset, msgAndArgs...) } -// NotSubsetf asserts that the specified list(array, slice...) or map does NOT -// contain all elements given in the specified subset list(array, slice...) or -// map. +// NotSubsetf asserts that the list (array, slice, or map) does NOT contain all +// elements given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // a.NotSubsetf([1, 3, 4], [1, 2], "error message %s", "formatted") // a.NotSubsetf({"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") +// a.NotSubsetf([1, 3, 4], {1: "one", 2: "two"}, "error message %s", "formatted") +// a.NotSubsetf({"x": 1, "y": 2}, ["z"], "error message %s", "formatted") func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1557,11 +1599,15 @@ func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string, Samef(a.t, expected, actual, msg, args...) } -// Subset asserts that the specified list(array, slice...) or map contains all -// elements given in the specified subset list(array, slice...) or map. +// Subset asserts that the list (array, slice, or map) contains all elements +// given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // a.Subset([1, 2, 3], [1, 2]) // a.Subset({"x": 1, "y": 2}, {"x": 1}) +// a.Subset([1, 2, 3], {1: "one", 2: "two"}) +// a.Subset({"x": 1, "y": 2}, ["x"]) func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1569,11 +1615,15 @@ func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ... Subset(a.t, list, subset, msgAndArgs...) } -// Subsetf asserts that the specified list(array, slice...) or map contains all -// elements given in the specified subset list(array, slice...) or map. +// Subsetf asserts that the list (array, slice, or map) contains all elements +// given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // a.Subsetf([1, 2, 3], [1, 2], "error message %s", "formatted") // a.Subsetf({"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") +// a.Subsetf([1, 2, 3], {1: "one", 2: "two"}, "error message %s", "formatted") +// a.Subsetf({"x": 1, "y": 2}, ["x"], "error message %s", "formatted") func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() diff --git a/vendor/github.com/ulikunitz/xz/TODO.md b/vendor/github.com/ulikunitz/xz/TODO.md index c466ffeda5..8f9650c13d 100644 --- a/vendor/github.com/ulikunitz/xz/TODO.md +++ b/vendor/github.com/ulikunitz/xz/TODO.md @@ -1,9 +1,5 @@ # TODO list -## Release v0.5.x - -1. Support check flag in gxz command. - ## Release v0.6 1. Review encoder and check for lzma improvements under xz. @@ -86,6 +82,19 @@ ## Log +## 2025-08-28 + +Release v0.5.14 addresses the security vulnerability CVE-2025-58058. If you put +bytes in from of a LZMA stream, the header might not be read correctly and +memory for the dictionary buffer allocated. I have implemented mitigations for +the problem. + +### 2025-08-20 + +Release v0.5.13 addressed issue #61 regarding handling of multiple WriteClosers +together. So I added a new package xio with a WriteCloserStack to address the +issue. + ### 2024-04-03 Release v0.5.12 updates README.md and SECURITY.md to address the supply chain diff --git a/vendor/github.com/ulikunitz/xz/lzma/header.go b/vendor/github.com/ulikunitz/xz/lzma/header.go index 1ae7d80cab..34aa097e15 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/header.go +++ b/vendor/github.com/ulikunitz/xz/lzma/header.go @@ -60,36 +60,36 @@ const noHeaderSize uint64 = 1<<64 - 1 // HeaderLen provides the length of the LZMA file header. const HeaderLen = 13 -// header represents the header of an LZMA file. -type header struct { - properties Properties - dictCap int - // uncompressed size; negative value if no size is given - size int64 +// Header represents the Header of an LZMA file. +type Header struct { + Properties Properties + DictSize uint32 + // uncompressed Size; negative value if no Size is given + Size int64 } // marshalBinary marshals the header. -func (h *header) marshalBinary() (data []byte, err error) { - if err = h.properties.verify(); err != nil { +func (h *Header) marshalBinary() (data []byte, err error) { + if err = h.Properties.verify(); err != nil { return nil, err } - if !(0 <= h.dictCap && int64(h.dictCap) <= MaxDictCap) { + if !(h.DictSize <= MaxDictCap) { return nil, fmt.Errorf("lzma: DictCap %d out of range", - h.dictCap) + h.DictSize) } data = make([]byte, 13) // property byte - data[0] = h.properties.Code() + data[0] = h.Properties.Code() // dictionary capacity - putUint32LE(data[1:5], uint32(h.dictCap)) + putUint32LE(data[1:5], uint32(h.DictSize)) // uncompressed size var s uint64 - if h.size > 0 { - s = uint64(h.size) + if h.Size > 0 { + s = uint64(h.Size) } else { s = noHeaderSize } @@ -99,20 +99,20 @@ func (h *header) marshalBinary() (data []byte, err error) { } // unmarshalBinary unmarshals the header. -func (h *header) unmarshalBinary(data []byte) error { +func (h *Header) unmarshalBinary(data []byte) error { if len(data) != HeaderLen { return errors.New("lzma.unmarshalBinary: data has wrong length") } // properties var err error - if h.properties, err = PropertiesForCode(data[0]); err != nil { + if h.Properties, err = PropertiesForCode(data[0]); err != nil { return err } // dictionary capacity - h.dictCap = int(uint32LE(data[1:])) - if h.dictCap < 0 { + h.DictSize = uint32LE(data[1:]) + if int(h.DictSize) < 0 { return errors.New( "LZMA header: dictionary capacity exceeds maximum " + "integer") @@ -121,10 +121,10 @@ func (h *header) unmarshalBinary(data []byte) error { // uncompressed size s := uint64LE(data[5:]) if s == noHeaderSize { - h.size = -1 + h.Size = -1 } else { - h.size = int64(s) - if h.size < 0 { + h.Size = int64(s) + if h.Size < 0 { return errors.New( "LZMA header: uncompressed size " + "out of int64 range") @@ -134,9 +134,9 @@ func (h *header) unmarshalBinary(data []byte) error { return nil } -// validDictCap checks whether the dictionary capacity is correct. This +// validDictSize checks whether the dictionary capacity is correct. This // is used to weed out wrong file headers. -func validDictCap(dictcap int) bool { +func validDictSize(dictcap int) bool { if int64(dictcap) == MaxDictCap { return true } @@ -155,13 +155,16 @@ func validDictCap(dictcap int) bool { // dictionary sizes of 2^n or 2^n+2^(n-1) with n >= 10 or 2^32-1. If // there is an explicit size it must not exceed 256 GiB. The length of // the data argument must be HeaderLen. +// +// This function should be disregarded because there is no guarantee that LZMA +// files follow the constraints. func ValidHeader(data []byte) bool { - var h header + var h Header if err := h.unmarshalBinary(data); err != nil { return false } - if !validDictCap(h.dictCap) { + if !validDictSize(int(h.DictSize)) { return false } - return h.size < 0 || h.size <= 1<<38 + return h.Size < 0 || h.Size <= 1<<38 } diff --git a/vendor/github.com/ulikunitz/xz/lzma/reader.go b/vendor/github.com/ulikunitz/xz/lzma/reader.go index ae911c3893..eef6bea76b 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/reader.go +++ b/vendor/github.com/ulikunitz/xz/lzma/reader.go @@ -6,25 +6,32 @@ // Reader and Writer support the classic LZMA format. Reader2 and // Writer2 support the decoding and encoding of LZMA2 streams. // -// The package is written completely in Go and doesn't rely on any external +// The package is written completely in Go and does not rely on any external // library. package lzma import ( "errors" + "fmt" "io" ) // ReaderConfig stores the parameters for the reader of the classic LZMA // format. type ReaderConfig struct { + // Since v0.5.14 this parameter sets an upper limit for a .lzma file's + // dictionary size. This helps to mitigate problems with mangled + // headers. DictCap int } // fill converts the zero values of the configuration to the default values. func (c *ReaderConfig) fill() { if c.DictCap == 0 { - c.DictCap = 8 * 1024 * 1024 + // set an upper limit of 2 GiB-1 for dictionary capacity + // to address the zero prefix security issue. + c.DictCap = (1 << 31) - 1 + // original: c.DictCap = 8 * 1024 * 1024 } } @@ -39,10 +46,33 @@ func (c *ReaderConfig) Verify() error { } // Reader provides a reader for LZMA files or streams. +// +// # Security concerns +// +// Note that LZMA format doesn't support a magic marker in the header. So +// [NewReader] cannot determine whether it reads the actual header. For instance +// the LZMA stream might have a zero byte in front of the reader, leading to +// larger dictionary sizes and file sizes. The code will detect later that there +// are problems with the stream, but the dictionary has already been allocated +// and this might consume a lot of memory. +// +// Version 0.5.14 introduces built-in mitigations: +// +// - The [ReaderConfig] DictCap field is now interpreted as a limit for the +// dictionary size. +// - The default is 2 Gigabytes minus 1 byte (2^31-1 bytes). +// - Users can check with the [Reader.Header] method what the actual values are in +// their LZMA files and set a smaller limit using [ReaderConfig]. +// - The dictionary size doesn't exceed the larger of the file size and +// the minimum dictionary size. This is another measure to prevent huge +// memory allocations for the dictionary. +// - The code supports stream sizes only up to a pebibyte (1024^5). type Reader struct { - lzma io.Reader - h header - d *decoder + lzma io.Reader + header Header + // headerOrig stores the original header read from the stream. + headerOrig Header + d *decoder } // NewReader creates a new reader for an LZMA stream using the classic @@ -51,8 +81,37 @@ func NewReader(lzma io.Reader) (r *Reader, err error) { return ReaderConfig{}.NewReader(lzma) } +// ErrDictSize reports about an error of the dictionary size. +type ErrDictSize struct { + ConfigDictCap int + HeaderDictSize uint32 + Message string +} + +// Error returns the error message. +func (e *ErrDictSize) Error() string { + return e.Message +} + +func newErrDictSize(messageformat string, + configDictCap int, headerDictSize uint32, + args ...interface{}) *ErrDictSize { + newArgs := make([]interface{}, len(args)+2) + newArgs[0] = configDictCap + newArgs[1] = headerDictSize + copy(newArgs[2:], args) + return &ErrDictSize{ + ConfigDictCap: configDictCap, + HeaderDictSize: headerDictSize, + Message: fmt.Sprintf(messageformat, newArgs...), + } +} + +// We support only files not larger than 1 << 50 bytes (a pebibyte, 1024^5). +const maxStreamSize = 1 << 50 + // NewReader creates a new reader for an LZMA stream in the classic -// format. The function reads and verifies the the header of the LZMA +// format. The function reads and verifies the header of the LZMA // stream. func (c ReaderConfig) NewReader(lzma io.Reader) (r *Reader, err error) { if err = c.Verify(); err != nil { @@ -66,29 +125,63 @@ func (c ReaderConfig) NewReader(lzma io.Reader) (r *Reader, err error) { return nil, err } r = &Reader{lzma: lzma} - if err = r.h.unmarshalBinary(data); err != nil { + if err = r.header.unmarshalBinary(data); err != nil { return nil, err } - if r.h.dictCap < MinDictCap { - r.h.dictCap = MinDictCap + r.headerOrig = r.header + dictSize := int64(r.header.DictSize) + if int64(c.DictCap) < dictSize { + return nil, newErrDictSize( + "lzma: header dictionary size %[2]d exceeds configured dictionary capacity %[1]d", + c.DictCap, uint32(dictSize), + ) + } + if dictSize < MinDictCap { + dictSize = MinDictCap + } + // original code: disabled this because there is no point in increasing + // the dictionary above what is stated in the file. + /* + if int64(c.DictCap) > int64(dictSize) { + dictSize = int64(c.DictCap) + } + */ + size := r.header.Size + if size >= 0 && size < dictSize { + dictSize = size } - dictCap := r.h.dictCap - if c.DictCap > dictCap { - dictCap = c.DictCap + // Protect against modified or malicious headers. + if size > maxStreamSize { + return nil, fmt.Errorf( + "lzma: stream size %d exceeds a pebibyte (1024^5)", + size) } + if dictSize < MinDictCap { + dictSize = MinDictCap + } + + r.header.DictSize = uint32(dictSize) - state := newState(r.h.properties) - dict, err := newDecoderDict(dictCap) + state := newState(r.header.Properties) + dict, err := newDecoderDict(int(dictSize)) if err != nil { return nil, err } - r.d, err = newDecoder(ByteReader(lzma), state, dict, r.h.size) + r.d, err = newDecoder(ByteReader(lzma), state, dict, r.header.Size) if err != nil { return nil, err } return r, nil } +// Header returns the header as read from the LZMA stream. It is intended to +// allow the user to understand what parameters are typically provided in the +// headers of the LZMA files and set the DictCap field in [ReaderConfig] +// accordingly. +func (r *Reader) Header() (h Header, ok bool) { + return r.headerOrig, r.d != nil +} + // EOSMarker indicates that an EOS marker has been encountered. func (r *Reader) EOSMarker() bool { return r.d.eosMarker diff --git a/vendor/github.com/ulikunitz/xz/lzma/writer.go b/vendor/github.com/ulikunitz/xz/lzma/writer.go index e8f89811d3..f73bb73f28 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/writer.go +++ b/vendor/github.com/ulikunitz/xz/lzma/writer.go @@ -96,21 +96,21 @@ func (c *WriterConfig) Verify() error { } // header returns the header structure for this configuration. -func (c *WriterConfig) header() header { - h := header{ - properties: *c.Properties, - dictCap: c.DictCap, - size: -1, +func (c *WriterConfig) header() Header { + h := Header{ + Properties: *c.Properties, + DictSize: uint32(c.DictCap), + Size: -1, } if c.SizeInHeader { - h.size = c.Size + h.Size = c.Size } return h } // Writer writes an LZMA stream in the classic format. type Writer struct { - h header + h Header bw io.ByteWriter buf *bufio.Writer e *encoder @@ -130,12 +130,12 @@ func (c WriterConfig) NewWriter(lzma io.Writer) (w *Writer, err error) { w.buf = bufio.NewWriter(lzma) w.bw = w.buf } - state := newState(w.h.properties) - m, err := c.Matcher.new(w.h.dictCap) + state := newState(w.h.Properties) + m, err := c.Matcher.new(int(w.h.DictSize)) if err != nil { return nil, err } - dict, err := newEncoderDict(w.h.dictCap, c.BufSize, m) + dict, err := newEncoderDict(int(w.h.DictSize), c.BufSize, m) if err != nil { return nil, err } @@ -171,8 +171,8 @@ func (w *Writer) writeHeader() error { // Write puts data into the Writer. func (w *Writer) Write(p []byte) (n int, err error) { - if w.h.size >= 0 { - m := w.h.size + if w.h.Size >= 0 { + m := w.h.Size m -= w.e.Compressed() + int64(w.e.dict.Buffered()) if m < 0 { m = 0 @@ -192,9 +192,9 @@ func (w *Writer) Write(p []byte) (n int, err error) { // Close closes the writer stream. It ensures that all data from the // buffer will be compressed and the LZMA stream will be finished. func (w *Writer) Close() error { - if w.h.size >= 0 { + if w.h.Size >= 0 { n := w.e.Compressed() + int64(w.e.dict.Buffered()) - if n != w.h.size { + if n != w.h.Size { return errSize } } diff --git a/vendor/github.com/vbauerster/mpb/v8/README.md b/vendor/github.com/vbauerster/mpb/v8/README.md index af97c92a75..05088f246e 100644 --- a/vendor/github.com/vbauerster/mpb/v8/README.md +++ b/vendor/github.com/vbauerster/mpb/v8/README.md @@ -104,14 +104,14 @@ func main() { p.Wait() ``` -#### [Dynamic total](_examples/dynTotal/main.go) +#### [dynTotal example](_examples/dynTotal/main.go) -![dynamic total](_svg/godEMrCZmJkHYH1X9dN4Nm0U7.svg) +![dynTotal](_svg/godEMrCZmJkHYH1X9dN4Nm0U7.svg) -#### [Complex example](_examples/complex/main.go) +#### [complex example](_examples/complex/main.go) ![complex](_svg/wHzf1M7sd7B3zVa2scBMnjqRf.svg) -#### [Bytes counters](_examples/io/main.go) +#### [io example](_examples/io/main.go) -![byte counters](_svg/hIpTa3A5rQz65ssiVuRJu87X6.svg) +![io](_svg/hIpTa3A5rQz65ssiVuRJu87X6.svg) diff --git a/vendor/github.com/vbauerster/mpb/v8/bar.go b/vendor/github.com/vbauerster/mpb/v8/bar.go index 5eb1123d23..db4f99c292 100644 --- a/vendor/github.com/vbauerster/mpb/v8/bar.go +++ b/vendor/github.com/vbauerster/mpb/v8/bar.go @@ -145,13 +145,7 @@ func (b *Bar) Current() int64 { // operation for example. func (b *Bar) SetRefill(amount int64) { select { - case b.operateState <- func(s *bState) { - if amount < s.current { - s.refill = amount - } else { - s.refill = s.current - } - }: + case b.operateState <- func(s *bState) { s.refill = min(amount, s.current) }: case <-b.ctx.Done(): } } @@ -275,10 +269,10 @@ func (b *Bar) EwmaIncrInt64(n int64, iterDur time.Duration) { var wg sync.WaitGroup wg.Add(len(s.ewmaDecorators)) for _, d := range s.ewmaDecorators { - d := d + // d := d // NOTE: uncomment for Go < 1.22, see /doc/faq#closures_and_goroutines go func() { + defer wg.Done() d.EwmaUpdate(n, iterDur) - wg.Done() }() } s.current += n @@ -304,10 +298,10 @@ func (b *Bar) EwmaSetCurrent(current int64, iterDur time.Duration) { var wg sync.WaitGroup wg.Add(len(s.ewmaDecorators)) for _, d := range s.ewmaDecorators { - d := d + // d := d // NOTE: uncomment for Go < 1.22, see /doc/faq#closures_and_goroutines go func() { + defer wg.Done() d.EwmaUpdate(n, iterDur) - wg.Done() }() } s.current = current @@ -394,13 +388,14 @@ func (b *Bar) Wait() { } func (b *Bar) serve(bs *bState) { + defer b.container.bwg.Done() decoratorsOnShutdown := func(group []decor.Decorator) { for _, d := range group { if d, ok := unwrap(d).(decor.ShutdownListener); ok { b.container.bwg.Add(1) go func() { + defer b.container.bwg.Done() d.OnShutdown() - b.container.bwg.Done() }() } } @@ -416,7 +411,6 @@ func (b *Bar) serve(bs *bState) { bs.aborted = !bs.completed() b.bs = bs close(b.bsOk) - b.container.bwg.Done() return } } diff --git a/vendor/github.com/vbauerster/mpb/v8/bar_filler_bar.go b/vendor/github.com/vbauerster/mpb/v8/bar_filler_bar.go index 7a036d9870..4dca113d08 100644 --- a/vendor/github.com/vbauerster/mpb/v8/bar_filler_bar.go +++ b/vendor/github.com/vbauerster/mpb/v8/bar_filler_bar.go @@ -10,15 +10,15 @@ import ( const ( iLbound = iota - iRbound iRefiller iFiller iTip iPadding - components + iRbound + iLen ) -var defaultBarStyle = [components]string{"[", "]", "+", "=", ">", "-"} +var defaultBarStyle = [iLen]string{"[", "+", "=", ">", "-", "]"} // BarStyleComposer interface. type BarStyleComposer interface { @@ -44,15 +44,17 @@ type component struct { bytes []byte } -type flushSection struct { - meta func(io.Writer, []byte) error +type barSection struct { + meta func(string) string bytes []byte } -type bFiller struct { - components [components]component - meta [components]func(io.Writer, []byte) error - flush func(io.Writer, ...flushSection) error +type barSections [iLen]barSection + +type barFiller struct { + components [iLen]component + metas [iLen]func(string) string + flushOp func(barSections, io.Writer) error tip struct { onComplete bool count uint @@ -61,8 +63,8 @@ type bFiller struct { } type barStyle struct { - style [components]string - metaFuncs [components]func(io.Writer, []byte) error + style [iLen]string + metas [iLen]func(string) string tipFrames []string tipOnComplete bool rev bool @@ -75,9 +77,6 @@ func BarStyle() BarStyleComposer { style: defaultBarStyle, tipFrames: []string{defaultBarStyle[iTip]}, } - for i := range bs.metaFuncs { - bs.metaFuncs[i] = defaultMeta - } return bs } @@ -87,7 +86,7 @@ func (s barStyle) Lbound(bound string) BarStyleComposer { } func (s barStyle) LboundMeta(fn func(string) string) BarStyleComposer { - s.metaFuncs[iLbound] = makeMetaFunc(fn) + s.metas[iLbound] = fn return s } @@ -97,7 +96,7 @@ func (s barStyle) Rbound(bound string) BarStyleComposer { } func (s barStyle) RboundMeta(fn func(string) string) BarStyleComposer { - s.metaFuncs[iRbound] = makeMetaFunc(fn) + s.metas[iRbound] = fn return s } @@ -107,7 +106,7 @@ func (s barStyle) Filler(filler string) BarStyleComposer { } func (s barStyle) FillerMeta(fn func(string) string) BarStyleComposer { - s.metaFuncs[iFiller] = makeMetaFunc(fn) + s.metas[iFiller] = fn return s } @@ -117,7 +116,7 @@ func (s barStyle) Refiller(refiller string) BarStyleComposer { } func (s barStyle) RefillerMeta(fn func(string) string) BarStyleComposer { - s.metaFuncs[iRefiller] = makeMetaFunc(fn) + s.metas[iRefiller] = fn return s } @@ -127,7 +126,7 @@ func (s barStyle) Padding(padding string) BarStyleComposer { } func (s barStyle) PaddingMeta(fn func(string) string) BarStyleComposer { - s.metaFuncs[iPadding] = makeMetaFunc(fn) + s.metas[iPadding] = fn return s } @@ -139,7 +138,7 @@ func (s barStyle) Tip(frames ...string) BarStyleComposer { } func (s barStyle) TipMeta(fn func(string) string) BarStyleComposer { - s.metaFuncs[iTip] = makeMetaFunc(fn) + s.metas[iTip] = fn return s } @@ -154,9 +153,7 @@ func (s barStyle) Reverse() BarStyleComposer { } func (s barStyle) Build() BarFiller { - bf := &bFiller{ - meta: s.metaFuncs, - } + bf := &barFiller{metas: s.metas} bf.components[iLbound] = component{ width: runewidth.StringWidth(s.style[iLbound]), bytes: []byte(s.style[iLbound]), @@ -178,42 +175,22 @@ func (s barStyle) Build() BarFiller { bytes: []byte(s.style[iPadding]), } bf.tip.onComplete = s.tipOnComplete - bf.tip.frames = make([]component, len(s.tipFrames)) - for i, t := range s.tipFrames { - bf.tip.frames[i] = component{ + bf.tip.frames = make([]component, 0, len(s.tipFrames)) + for _, t := range s.tipFrames { + bf.tip.frames = append(bf.tip.frames, component{ width: runewidth.StringWidth(t), bytes: []byte(t), - } + }) } if s.rev { - bf.flush = func(w io.Writer, sections ...flushSection) error { - for i := len(sections) - 1; i >= 0; i-- { - if s := sections[i]; len(s.bytes) != 0 { - err := s.meta(w, s.bytes) - if err != nil { - return err - } - } - } - return nil - } + bf.flushOp = barSections.flushRev } else { - bf.flush = func(w io.Writer, sections ...flushSection) error { - for _, s := range sections { - if len(s.bytes) != 0 { - err := s.meta(w, s.bytes) - if err != nil { - return err - } - } - } - return nil - } + bf.flushOp = barSections.flush } return bf } -func (s *bFiller) Fill(w io.Writer, stat decor.Statistics) error { +func (s *barFiller) Fill(w io.Writer, stat decor.Statistics) error { width := internal.CheckRequestedWidth(stat.RequestedWidth, stat.AvailableWidth) // don't count brackets as progress width -= (s.components[iLbound].width + s.components[iRbound].width) @@ -221,15 +198,6 @@ func (s *bFiller) Fill(w io.Writer, stat decor.Statistics) error { return nil } - err := s.meta[iLbound](w, s.components[iLbound].bytes) - if err != nil { - return err - } - - if width == 0 { - return s.meta[iRbound](w, s.components[iRbound].bytes) - } - var tip component var refilling, filling, padding []byte var fillCount int @@ -265,26 +233,42 @@ func (s *bFiller) Fill(w io.Writer, stat decor.Statistics) error { padding = append(padding, "…"...) } - err = s.flush(w, - flushSection{s.meta[iRefiller], refilling}, - flushSection{s.meta[iFiller], filling}, - flushSection{s.meta[iTip], tip.bytes}, - flushSection{s.meta[iPadding], padding}, - ) - if err != nil { - return err + return s.flushOp(barSections{ + {s.metas[iLbound], s.components[iLbound].bytes}, + {s.metas[iRefiller], refilling}, + {s.metas[iFiller], filling}, + {s.metas[iTip], tip.bytes}, + {s.metas[iPadding], padding}, + {s.metas[iRbound], s.components[iRbound].bytes}, + }, w) +} + +func (s barSection) flush(w io.Writer) (err error) { + if s.meta != nil { + _, err = io.WriteString(w, s.meta(string(s.bytes))) + } else { + _, err = w.Write(s.bytes) } - return s.meta[iRbound](w, s.components[iRbound].bytes) + return err } -func makeMetaFunc(fn func(string) string) func(io.Writer, []byte) error { - return func(w io.Writer, p []byte) (err error) { - _, err = io.WriteString(w, fn(string(p))) - return err +func (bb barSections) flush(w io.Writer) error { + for _, s := range bb { + err := s.flush(w) + if err != nil { + return err + } } + return nil } -func defaultMeta(w io.Writer, p []byte) (err error) { - _, err = w.Write(p) - return err +func (bb barSections) flushRev(w io.Writer) error { + bb[0], bb[len(bb)-1] = bb[len(bb)-1], bb[0] + for i := len(bb) - 1; i >= 0; i-- { + err := bb[i].flush(w) + if err != nil { + return err + } + } + return nil } diff --git a/vendor/github.com/vbauerster/mpb/v8/bar_filler_spinner.go b/vendor/github.com/vbauerster/mpb/v8/bar_filler_spinner.go index c9fd463eb8..56075810c6 100644 --- a/vendor/github.com/vbauerster/mpb/v8/bar_filler_spinner.go +++ b/vendor/github.com/vbauerster/mpb/v8/bar_filler_spinner.go @@ -24,7 +24,7 @@ type SpinnerStyleComposer interface { Meta(func(string) string) SpinnerStyleComposer } -type sFiller struct { +type spinnerFiller struct { frames []string count uint meta func(string) string @@ -40,9 +40,7 @@ type spinnerStyle struct { // SpinnerStyle constructs default spinner style which can be altered via // SpinnerStyleComposer interface. func SpinnerStyle(frames ...string) SpinnerStyleComposer { - ss := spinnerStyle{ - meta: func(s string) string { return s }, - } + var ss spinnerStyle if len(frames) != 0 { ss.frames = frames } else { @@ -67,10 +65,7 @@ func (s spinnerStyle) Meta(fn func(string) string) SpinnerStyleComposer { } func (s spinnerStyle) Build() BarFiller { - sf := &sFiller{ - frames: s.frames, - meta: s.meta, - } + sf := &spinnerFiller{frames: s.frames} switch s.position { case positionLeft: sf.position = func(frame string, padWidth int) string { @@ -85,10 +80,15 @@ func (s spinnerStyle) Build() BarFiller { return strings.Repeat(" ", padWidth/2) + frame + strings.Repeat(" ", padWidth/2+padWidth%2) } } + if s.meta != nil { + sf.meta = s.meta + } else { + sf.meta = func(s string) string { return s } + } return sf } -func (s *sFiller) Fill(w io.Writer, stat decor.Statistics) error { +func (s *spinnerFiller) Fill(w io.Writer, stat decor.Statistics) error { width := internal.CheckRequestedWidth(stat.RequestedWidth, stat.AvailableWidth) frame := s.frames[s.count%uint(len(s.frames))] frameWidth := runewidth.StringWidth(frame) diff --git a/vendor/github.com/vbauerster/mpb/v8/bar_option.go b/vendor/github.com/vbauerster/mpb/v8/bar_option.go index b586f69445..6c1b7e6f19 100644 --- a/vendor/github.com/vbauerster/mpb/v8/bar_option.go +++ b/vendor/github.com/vbauerster/mpb/v8/bar_option.go @@ -86,6 +86,25 @@ func BarFillerOnComplete(message string) BarOption { }) } +// BarFillerClearOnAbort clears bar's filler on abort event. +// It's shortcut for BarFillerOnAbort(""). +func BarFillerClearOnAbort() BarOption { + return BarFillerOnAbort("") +} + +// BarFillerOnAbort replaces bar's filler with message, on abort event. +func BarFillerOnAbort(message string) BarOption { + return BarFillerMiddleware(func(base BarFiller) BarFiller { + return BarFillerFunc(func(w io.Writer, st decor.Statistics) error { + if st.Aborted { + _, err := io.WriteString(w, message) + return err + } + return base.Fill(w, st) + }) + }) +} + // BarFillerMiddleware provides a way to augment the underlying BarFiller. func BarFillerMiddleware(middle func(BarFiller) BarFiller) BarOption { if middle == nil { diff --git a/vendor/github.com/vbauerster/mpb/v8/decor/size_type.go b/vendor/github.com/vbauerster/mpb/v8/decor/size_type.go index 00d11c2187..90ecda6884 100644 --- a/vendor/github.com/vbauerster/mpb/v8/decor/size_type.go +++ b/vendor/github.com/vbauerster/mpb/v8/decor/size_type.go @@ -1,3 +1,6 @@ +//go:generate go tool stringer -type=SizeB1024 -trimprefix=_i +//go:generate go tool stringer -type=SizeB1000 -trimprefix=_ + package decor import ( @@ -5,9 +8,6 @@ import ( "strconv" ) -//go:generate stringer -type=SizeB1024 -trimprefix=_i -//go:generate stringer -type=SizeB1000 -trimprefix=_ - var ( _ fmt.Formatter = SizeB1024(0) _ fmt.Stringer = SizeB1024(0) diff --git a/vendor/github.com/vbauerster/mpb/v8/heap_manager.go b/vendor/github.com/vbauerster/mpb/v8/heap_manager.go index 23e24d8269..88efb4823c 100644 --- a/vendor/github.com/vbauerster/mpb/v8/heap_manager.go +++ b/vendor/github.com/vbauerster/mpb/v8/heap_manager.go @@ -41,7 +41,7 @@ func (m heapManager) run() { var bHeap priorityQueue var pMatrix, aMatrix map[int][]chan int - var len int + var l int var sync bool for req := range m { @@ -51,7 +51,7 @@ func (m heapManager) run() { heap.Push(&bHeap, data.bar) sync = sync || data.sync case h_sync: - if sync || len != bHeap.Len() { + if sync || l != bHeap.Len() { pMatrix = make(map[int][]chan int) aMatrix = make(map[int][]chan int) for _, b := range bHeap { @@ -64,7 +64,7 @@ func (m heapManager) run() { } } sync = false - len = bHeap.Len() + l = bHeap.Len() } drop := req.data.(<-chan struct{}) syncWidth(pMatrix, drop) @@ -106,7 +106,7 @@ func (m heapManager) run() { } case h_state: ch := req.data.(chan<- bool) - ch <- sync || len != bHeap.Len() + ch <- sync || l != bHeap.Len() case h_end: ch := req.data.(chan<- interface{}) if ch != nil { diff --git a/vendor/github.com/vbauerster/mpb/v8/priority_queue.go b/vendor/github.com/vbauerster/mpb/v8/priority_queue.go index 0863b5787b..c2f657db0a 100644 --- a/vendor/github.com/vbauerster/mpb/v8/priority_queue.go +++ b/vendor/github.com/vbauerster/mpb/v8/priority_queue.go @@ -20,18 +20,18 @@ func (pq priorityQueue) Swap(i, j int) { } func (pq *priorityQueue) Push(x interface{}) { - n := len(*pq) - bar := x.(*Bar) - bar.index = n - *pq = append(*pq, bar) + s := *pq + b := x.(*Bar) + b.index = len(s) + *pq = append(s, b) } func (pq *priorityQueue) Pop() interface{} { - old := *pq - n := len(old) - bar := old[n-1] - old[n-1] = nil // avoid memory leak - bar.index = -1 // for safety - *pq = old[:n-1] - return bar + var b *Bar + s := *pq + i := len(s) - 1 + b, s[i] = s[i], nil // nil to avoid memory leak + b.index = -1 // for safety + *pq = s[:i] + return b } diff --git a/vendor/github.com/vbauerster/mpb/v8/progress.go b/vendor/github.com/vbauerster/mpb/v8/progress.go index ee4f722953..851083c407 100644 --- a/vendor/github.com/vbauerster/mpb/v8/progress.go +++ b/vendor/github.com/vbauerster/mpb/v8/progress.go @@ -355,16 +355,18 @@ func (s *pState) render(cw *cwriter.Writer) (err error) { height = width } + var barCount int for b := range iter { + barCount++ go b.render(width) } - return s.flush(cw, height, iterPop) + return s.flush(cw, height, barCount, iterPop) } -func (s *pState) flush(cw *cwriter.Writer, height int, iter <-chan *Bar) error { - var popCount int - var rows []io.Reader +func (s *pState) flush(cw *cwriter.Writer, height, barCount int, iter <-chan *Bar) error { + var total, popCount int + rows := make([][]io.Reader, 0, barCount) for b := range iter { frame := <-b.frameCh @@ -373,15 +375,16 @@ func (s *pState) flush(cw *cwriter.Writer, height int, iter <-chan *Bar) error { b.cancel() return frame.err // b.frameCh is buffered it's ok to return here } - var usedRows int + var discarded int for i := len(frame.rows) - 1; i >= 0; i-- { - if row := frame.rows[i]; len(rows) < height { - rows = append(rows, row) - usedRows++ + if total < height { + total++ } else { - _, _ = io.Copy(io.Discard, row) + _, _ = io.Copy(io.Discard, frame.rows[i]) // Found IsInBounds + discarded++ } } + rows = append(rows, frame.rows) switch frame.shutdown { case 1: @@ -399,7 +402,7 @@ func (s *pState) flush(cw *cwriter.Writer, height int, iter <-chan *Bar) error { } case 2: if s.popCompleted && !frame.noPop { - popCount += usedRows + popCount += len(frame.rows) - discarded continue } fallthrough @@ -409,13 +412,15 @@ func (s *pState) flush(cw *cwriter.Writer, height int, iter <-chan *Bar) error { } for i := len(rows) - 1; i >= 0; i-- { - _, err := cw.ReadFrom(rows[i]) - if err != nil { - return err + for _, r := range rows[i] { + _, err := cw.ReadFrom(r) + if err != nil { + return err + } } } - return cw.Flush(len(rows) - popCount) + return cw.Flush(total - popCount) } func (s pState) makeBarState(total int64, filler BarFiller, options ...BarOption) *bState { diff --git a/vendor/github.com/xeipuuv/gojsonpointer/README.md b/vendor/github.com/xeipuuv/gojsonpointer/README.md deleted file mode 100644 index a4f5f1458f..0000000000 --- a/vendor/github.com/xeipuuv/gojsonpointer/README.md +++ /dev/null @@ -1,41 +0,0 @@ -# gojsonpointer -An implementation of JSON Pointer - Go language - -## Usage - jsonText := `{ - "name": "Bobby B", - "occupation": { - "title" : "King", - "years" : 15, - "heir" : "Joffrey B" - } - }` - - var jsonDocument map[string]interface{} - json.Unmarshal([]byte(jsonText), &jsonDocument) - - //create a JSON pointer - pointerString := "/occupation/title" - pointer, _ := NewJsonPointer(pointerString) - - //SET a new value for the "title" in the document - pointer.Set(jsonDocument, "Supreme Leader of Westeros") - - //GET the new "title" from the document - title, _, _ := pointer.Get(jsonDocument) - fmt.Println(title) //outputs "Supreme Leader of Westeros" - - //DELETE the "heir" from the document - deletePointer := NewJsonPointer("/occupation/heir") - deletePointer.Delete(jsonDocument) - - b, _ := json.Marshal(jsonDocument) - fmt.Println(string(b)) - //outputs `{"name":"Bobby B","occupation":{"title":"Supreme Leader of Westeros","years":15}}` - - -## References -https://tools.ietf.org/html/rfc6901 - -### Note -The 4.Evaluation part of the previous reference, starting with 'If the currently referenced value is a JSON array, the reference token MUST contain either...' is not implemented. diff --git a/vendor/github.com/xeipuuv/gojsonpointer/pointer.go b/vendor/github.com/xeipuuv/gojsonpointer/pointer.go deleted file mode 100644 index 798c1f1c57..0000000000 --- a/vendor/github.com/xeipuuv/gojsonpointer/pointer.go +++ /dev/null @@ -1,211 +0,0 @@ -// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author xeipuuv -// author-github https://github.com/xeipuuv -// author-mail xeipuuv@gmail.com -// -// repository-name gojsonpointer -// repository-desc An implementation of JSON Pointer - Go language -// -// description Main and unique file. -// -// created 25-02-2013 - -package gojsonpointer - -import ( - "errors" - "fmt" - "reflect" - "strconv" - "strings" -) - -const ( - const_empty_pointer = `` - const_pointer_separator = `/` - - const_invalid_start = `JSON pointer must be empty or start with a "` + const_pointer_separator + `"` -) - -type implStruct struct { - mode string // "SET" or "GET" - - inDocument interface{} - - setInValue interface{} - - getOutNode interface{} - getOutKind reflect.Kind - outError error -} - -type JsonPointer struct { - referenceTokens []string -} - -// NewJsonPointer parses the given string JSON pointer and returns an object -func NewJsonPointer(jsonPointerString string) (p JsonPointer, err error) { - - // Pointer to the root of the document - if len(jsonPointerString) == 0 { - // Keep referenceTokens nil - return - } - if jsonPointerString[0] != '/' { - return p, errors.New(const_invalid_start) - } - - p.referenceTokens = strings.Split(jsonPointerString[1:], const_pointer_separator) - return -} - -// Uses the pointer to retrieve a value from a JSON document -func (p *JsonPointer) Get(document interface{}) (interface{}, reflect.Kind, error) { - - is := &implStruct{mode: "GET", inDocument: document} - p.implementation(is) - return is.getOutNode, is.getOutKind, is.outError - -} - -// Uses the pointer to update a value from a JSON document -func (p *JsonPointer) Set(document interface{}, value interface{}) (interface{}, error) { - - is := &implStruct{mode: "SET", inDocument: document, setInValue: value} - p.implementation(is) - return document, is.outError - -} - -// Uses the pointer to delete a value from a JSON document -func (p *JsonPointer) Delete(document interface{}) (interface{}, error) { - is := &implStruct{mode: "DEL", inDocument: document} - p.implementation(is) - return document, is.outError -} - -// Both Get and Set functions use the same implementation to avoid code duplication -func (p *JsonPointer) implementation(i *implStruct) { - - kind := reflect.Invalid - - // Full document when empty - if len(p.referenceTokens) == 0 { - i.getOutNode = i.inDocument - i.outError = nil - i.getOutKind = kind - i.outError = nil - return - } - - node := i.inDocument - - previousNodes := make([]interface{}, len(p.referenceTokens)) - previousTokens := make([]string, len(p.referenceTokens)) - - for ti, token := range p.referenceTokens { - - isLastToken := ti == len(p.referenceTokens)-1 - previousNodes[ti] = node - previousTokens[ti] = token - - switch v := node.(type) { - - case map[string]interface{}: - decodedToken := decodeReferenceToken(token) - if _, ok := v[decodedToken]; ok { - node = v[decodedToken] - if isLastToken && i.mode == "SET" { - v[decodedToken] = i.setInValue - } else if isLastToken && i.mode == "DEL" { - delete(v, decodedToken) - } - } else if isLastToken && i.mode == "SET" { - v[decodedToken] = i.setInValue - } else { - i.outError = fmt.Errorf("Object has no key '%s'", decodedToken) - i.getOutKind = reflect.Map - i.getOutNode = nil - return - } - - case []interface{}: - tokenIndex, err := strconv.Atoi(token) - if err != nil { - i.outError = fmt.Errorf("Invalid array index '%s'", token) - i.getOutKind = reflect.Slice - i.getOutNode = nil - return - } - if tokenIndex < 0 || tokenIndex >= len(v) { - i.outError = fmt.Errorf("Out of bound array[0,%d] index '%d'", len(v), tokenIndex) - i.getOutKind = reflect.Slice - i.getOutNode = nil - return - } - - node = v[tokenIndex] - if isLastToken && i.mode == "SET" { - v[tokenIndex] = i.setInValue - } else if isLastToken && i.mode == "DEL" { - v[tokenIndex] = v[len(v)-1] - v[len(v)-1] = nil - v = v[:len(v)-1] - previousNodes[ti-1].(map[string]interface{})[previousTokens[ti-1]] = v - } - - default: - i.outError = fmt.Errorf("Invalid token reference '%s'", token) - i.getOutKind = reflect.ValueOf(node).Kind() - i.getOutNode = nil - return - } - - } - - i.getOutNode = node - i.getOutKind = reflect.ValueOf(node).Kind() - i.outError = nil -} - -// Pointer to string representation function -func (p *JsonPointer) String() string { - - if len(p.referenceTokens) == 0 { - return const_empty_pointer - } - - pointerString := const_pointer_separator + strings.Join(p.referenceTokens, const_pointer_separator) - - return pointerString -} - -// Specific JSON pointer encoding here -// ~0 => ~ -// ~1 => / -// ... and vice versa - -func decodeReferenceToken(token string) string { - step1 := strings.Replace(token, `~1`, `/`, -1) - step2 := strings.Replace(step1, `~0`, `~`, -1) - return step2 -} - -func encodeReferenceToken(token string) string { - step1 := strings.Replace(token, `~`, `~0`, -1) - step2 := strings.Replace(step1, `/`, `~1`, -1) - return step2 -} diff --git a/vendor/github.com/xeipuuv/gojsonreference/LICENSE-APACHE-2.0.txt b/vendor/github.com/xeipuuv/gojsonreference/LICENSE-APACHE-2.0.txt deleted file mode 100644 index 55ede8a42c..0000000000 --- a/vendor/github.com/xeipuuv/gojsonreference/LICENSE-APACHE-2.0.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2015 xeipuuv - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/xeipuuv/gojsonreference/README.md b/vendor/github.com/xeipuuv/gojsonreference/README.md deleted file mode 100644 index 9ab6e1eb13..0000000000 --- a/vendor/github.com/xeipuuv/gojsonreference/README.md +++ /dev/null @@ -1,10 +0,0 @@ -# gojsonreference -An implementation of JSON Reference - Go language - -## Dependencies -https://github.com/xeipuuv/gojsonpointer - -## References -http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07 - -http://tools.ietf.org/html/draft-pbryan-zyp-json-ref-03 diff --git a/vendor/github.com/xeipuuv/gojsonreference/reference.go b/vendor/github.com/xeipuuv/gojsonreference/reference.go deleted file mode 100644 index 6457291301..0000000000 --- a/vendor/github.com/xeipuuv/gojsonreference/reference.go +++ /dev/null @@ -1,147 +0,0 @@ -// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author xeipuuv -// author-github https://github.com/xeipuuv -// author-mail xeipuuv@gmail.com -// -// repository-name gojsonreference -// repository-desc An implementation of JSON Reference - Go language -// -// description Main and unique file. -// -// created 26-02-2013 - -package gojsonreference - -import ( - "errors" - "net/url" - "path/filepath" - "runtime" - "strings" - - "github.com/xeipuuv/gojsonpointer" -) - -const ( - const_fragment_char = `#` -) - -func NewJsonReference(jsonReferenceString string) (JsonReference, error) { - - var r JsonReference - err := r.parse(jsonReferenceString) - return r, err - -} - -type JsonReference struct { - referenceUrl *url.URL - referencePointer gojsonpointer.JsonPointer - - HasFullUrl bool - HasUrlPathOnly bool - HasFragmentOnly bool - HasFileScheme bool - HasFullFilePath bool -} - -func (r *JsonReference) GetUrl() *url.URL { - return r.referenceUrl -} - -func (r *JsonReference) GetPointer() *gojsonpointer.JsonPointer { - return &r.referencePointer -} - -func (r *JsonReference) String() string { - - if r.referenceUrl != nil { - return r.referenceUrl.String() - } - - if r.HasFragmentOnly { - return const_fragment_char + r.referencePointer.String() - } - - return r.referencePointer.String() -} - -func (r *JsonReference) IsCanonical() bool { - return (r.HasFileScheme && r.HasFullFilePath) || (!r.HasFileScheme && r.HasFullUrl) -} - -// "Constructor", parses the given string JSON reference -func (r *JsonReference) parse(jsonReferenceString string) (err error) { - - r.referenceUrl, err = url.Parse(jsonReferenceString) - if err != nil { - return - } - refUrl := r.referenceUrl - - if refUrl.Scheme != "" && refUrl.Host != "" { - r.HasFullUrl = true - } else { - if refUrl.Path != "" { - r.HasUrlPathOnly = true - } else if refUrl.RawQuery == "" && refUrl.Fragment != "" { - r.HasFragmentOnly = true - } - } - - r.HasFileScheme = refUrl.Scheme == "file" - if runtime.GOOS == "windows" { - // on Windows, a file URL may have an extra leading slash, and if it - // doesn't then its first component will be treated as the host by the - // Go runtime - if refUrl.Host == "" && strings.HasPrefix(refUrl.Path, "/") { - r.HasFullFilePath = filepath.IsAbs(refUrl.Path[1:]) - } else { - r.HasFullFilePath = filepath.IsAbs(refUrl.Host + refUrl.Path) - } - } else { - r.HasFullFilePath = filepath.IsAbs(refUrl.Path) - } - - // invalid json-pointer error means url has no json-pointer fragment. simply ignore error - r.referencePointer, _ = gojsonpointer.NewJsonPointer(refUrl.Fragment) - - return -} - -// Creates a new reference from a parent and a child -// If the child cannot inherit from the parent, an error is returned -func (r *JsonReference) Inherits(child JsonReference) (*JsonReference, error) { - if child.GetUrl() == nil { - return nil, errors.New("childUrl is nil!") - } - - if r.GetUrl() == nil { - return nil, errors.New("parentUrl is nil!") - } - - // Get a copy of the parent url to make sure we do not modify the original. - // URL reference resolving fails if the fragment of the child is empty, but the parent's is not. - // The fragment of the child must be used, so the fragment of the parent is manually removed. - parentUrl := *r.GetUrl() - parentUrl.Fragment = "" - - ref, err := NewJsonReference(parentUrl.ResolveReference(child.GetUrl()).String()) - if err != nil { - return nil, err - } - return &ref, err -} diff --git a/vendor/github.com/xeipuuv/gojsonschema/.gitignore b/vendor/github.com/xeipuuv/gojsonschema/.gitignore deleted file mode 100644 index 68e993ce3e..0000000000 --- a/vendor/github.com/xeipuuv/gojsonschema/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -*.sw[nop] -*.iml -.vscode/ diff --git a/vendor/github.com/xeipuuv/gojsonschema/.travis.yml b/vendor/github.com/xeipuuv/gojsonschema/.travis.yml deleted file mode 100644 index 3289001cd1..0000000000 --- a/vendor/github.com/xeipuuv/gojsonschema/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -language: go -go: - - "1.11" - - "1.12" - - "1.13" -before_install: - - go get github.com/xeipuuv/gojsonreference - - go get github.com/xeipuuv/gojsonpointer - - go get github.com/stretchr/testify/assert diff --git a/vendor/github.com/xeipuuv/gojsonschema/LICENSE-APACHE-2.0.txt b/vendor/github.com/xeipuuv/gojsonschema/LICENSE-APACHE-2.0.txt deleted file mode 100644 index 55ede8a42c..0000000000 --- a/vendor/github.com/xeipuuv/gojsonschema/LICENSE-APACHE-2.0.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2015 xeipuuv - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/xeipuuv/gojsonschema/README.md b/vendor/github.com/xeipuuv/gojsonschema/README.md deleted file mode 100644 index 758f26df0f..0000000000 --- a/vendor/github.com/xeipuuv/gojsonschema/README.md +++ /dev/null @@ -1,466 +0,0 @@ -[![GoDoc](https://godoc.org/github.com/xeipuuv/gojsonschema?status.svg)](https://godoc.org/github.com/xeipuuv/gojsonschema) -[![Build Status](https://travis-ci.org/xeipuuv/gojsonschema.svg)](https://travis-ci.org/xeipuuv/gojsonschema) -[![Go Report Card](https://goreportcard.com/badge/github.com/xeipuuv/gojsonschema)](https://goreportcard.com/report/github.com/xeipuuv/gojsonschema) - -# gojsonschema - -## Description - -An implementation of JSON Schema for the Go programming language. Supports draft-04, draft-06 and draft-07. - -References : - -* http://json-schema.org -* http://json-schema.org/latest/json-schema-core.html -* http://json-schema.org/latest/json-schema-validation.html - -## Installation - -``` -go get github.com/xeipuuv/gojsonschema -``` - -Dependencies : -* [github.com/xeipuuv/gojsonpointer](https://github.com/xeipuuv/gojsonpointer) -* [github.com/xeipuuv/gojsonreference](https://github.com/xeipuuv/gojsonreference) -* [github.com/stretchr/testify/assert](https://github.com/stretchr/testify#assert-package) - -## Usage - -### Example - -```go - -package main - -import ( - "fmt" - "github.com/xeipuuv/gojsonschema" -) - -func main() { - - schemaLoader := gojsonschema.NewReferenceLoader("file:///home/me/schema.json") - documentLoader := gojsonschema.NewReferenceLoader("file:///home/me/document.json") - - result, err := gojsonschema.Validate(schemaLoader, documentLoader) - if err != nil { - panic(err.Error()) - } - - if result.Valid() { - fmt.Printf("The document is valid\n") - } else { - fmt.Printf("The document is not valid. see errors :\n") - for _, desc := range result.Errors() { - fmt.Printf("- %s\n", desc) - } - } -} - - -``` - -#### Loaders - -There are various ways to load your JSON data. -In order to load your schemas and documents, -first declare an appropriate loader : - -* Web / HTTP, using a reference : - -```go -loader := gojsonschema.NewReferenceLoader("http://www.some_host.com/schema.json") -``` - -* Local file, using a reference : - -```go -loader := gojsonschema.NewReferenceLoader("file:///home/me/schema.json") -``` - -References use the URI scheme, the prefix (file://) and a full path to the file are required. - -* JSON strings : - -```go -loader := gojsonschema.NewStringLoader(`{"type": "string"}`) -``` - -* Custom Go types : - -```go -m := map[string]interface{}{"type": "string"} -loader := gojsonschema.NewGoLoader(m) -``` - -And - -```go -type Root struct { - Users []User `json:"users"` -} - -type User struct { - Name string `json:"name"` -} - -... - -data := Root{} -data.Users = append(data.Users, User{"John"}) -data.Users = append(data.Users, User{"Sophia"}) -data.Users = append(data.Users, User{"Bill"}) - -loader := gojsonschema.NewGoLoader(data) -``` - -#### Validation - -Once the loaders are set, validation is easy : - -```go -result, err := gojsonschema.Validate(schemaLoader, documentLoader) -``` - -Alternatively, you might want to load a schema only once and process to multiple validations : - -```go -schema, err := gojsonschema.NewSchema(schemaLoader) -... -result1, err := schema.Validate(documentLoader1) -... -result2, err := schema.Validate(documentLoader2) -... -// etc ... -``` - -To check the result : - -```go - if result.Valid() { - fmt.Printf("The document is valid\n") - } else { - fmt.Printf("The document is not valid. see errors :\n") - for _, err := range result.Errors() { - // Err implements the ResultError interface - fmt.Printf("- %s\n", err) - } - } -``` - - -## Loading local schemas - -By default `file` and `http(s)` references to external schemas are loaded automatically via the file system or via http(s). An external schema can also be loaded using a `SchemaLoader`. - -```go - sl := gojsonschema.NewSchemaLoader() - loader1 := gojsonschema.NewStringLoader(`{ "type" : "string" }`) - err := sl.AddSchema("http://some_host.com/string.json", loader1) -``` - -Alternatively if your schema already has an `$id` you can use the `AddSchemas` function -```go - loader2 := gojsonschema.NewStringLoader(`{ - "$id" : "http://some_host.com/maxlength.json", - "maxLength" : 5 - }`) - err = sl.AddSchemas(loader2) -``` - -The main schema should be passed to the `Compile` function. This main schema can then directly reference the added schemas without needing to download them. -```go - loader3 := gojsonschema.NewStringLoader(`{ - "$id" : "http://some_host.com/main.json", - "allOf" : [ - { "$ref" : "http://some_host.com/string.json" }, - { "$ref" : "http://some_host.com/maxlength.json" } - ] - }`) - - schema, err := sl.Compile(loader3) - - documentLoader := gojsonschema.NewStringLoader(`"hello world"`) - - result, err := schema.Validate(documentLoader) -``` - -It's also possible to pass a `ReferenceLoader` to the `Compile` function that references a loaded schema. - -```go -err = sl.AddSchemas(loader3) -schema, err := sl.Compile(gojsonschema.NewReferenceLoader("http://some_host.com/main.json")) -``` - -Schemas added by `AddSchema` and `AddSchemas` are only validated when the entire schema is compiled, unless meta-schema validation is used. - -## Using a specific draft -By default `gojsonschema` will try to detect the draft of a schema by using the `$schema` keyword and parse it in a strict draft-04, draft-06 or draft-07 mode. If `$schema` is missing, or the draft version is not explicitely set, a hybrid mode is used which merges together functionality of all drafts into one mode. - -Autodectection can be turned off with the `AutoDetect` property. Specific draft versions can be specified with the `Draft` property. - -```go -sl := gojsonschema.NewSchemaLoader() -sl.Draft = gojsonschema.Draft7 -sl.AutoDetect = false -``` - -If autodetection is on (default), a draft-07 schema can savely reference draft-04 schemas and vice-versa, as long as `$schema` is specified in all schemas. - -## Meta-schema validation -Schemas that are added using the `AddSchema`, `AddSchemas` and `Compile` can be validated against their meta-schema by setting the `Validate` property. - -The following example will produce an error as `multipleOf` must be a number. If `Validate` is off (default), this error is only returned at the `Compile` step. - -```go -sl := gojsonschema.NewSchemaLoader() -sl.Validate = true -err := sl.AddSchemas(gojsonschema.NewStringLoader(`{ - $id" : "http://some_host.com/invalid.json", - "$schema": "http://json-schema.org/draft-07/schema#", - "multipleOf" : true -}`)) - ``` -``` - ``` - -Errors returned by meta-schema validation are more readable and contain more information, which helps significantly if you are developing a schema. - -Meta-schema validation also works with a custom `$schema`. In case `$schema` is missing, or `AutoDetect` is set to `false`, the meta-schema of the used draft is used. - - -## Working with Errors - -The library handles string error codes which you can customize by creating your own gojsonschema.locale and setting it -```go -gojsonschema.Locale = YourCustomLocale{} -``` - -However, each error contains additional contextual information. - -Newer versions of `gojsonschema` may have new additional errors, so code that uses a custom locale will need to be updated when this happens. - -**err.Type()**: *string* Returns the "type" of error that occurred. Note you can also type check. See below - -Note: An error of RequiredType has an err.Type() return value of "required" - - "required": RequiredError - "invalid_type": InvalidTypeError - "number_any_of": NumberAnyOfError - "number_one_of": NumberOneOfError - "number_all_of": NumberAllOfError - "number_not": NumberNotError - "missing_dependency": MissingDependencyError - "internal": InternalError - "const": ConstEror - "enum": EnumError - "array_no_additional_items": ArrayNoAdditionalItemsError - "array_min_items": ArrayMinItemsError - "array_max_items": ArrayMaxItemsError - "unique": ItemsMustBeUniqueError - "contains" : ArrayContainsError - "array_min_properties": ArrayMinPropertiesError - "array_max_properties": ArrayMaxPropertiesError - "additional_property_not_allowed": AdditionalPropertyNotAllowedError - "invalid_property_pattern": InvalidPropertyPatternError - "invalid_property_name": InvalidPropertyNameError - "string_gte": StringLengthGTEError - "string_lte": StringLengthLTEError - "pattern": DoesNotMatchPatternError - "multiple_of": MultipleOfError - "number_gte": NumberGTEError - "number_gt": NumberGTError - "number_lte": NumberLTEError - "number_lt": NumberLTError - "condition_then" : ConditionThenError - "condition_else" : ConditionElseError - -**err.Value()**: *interface{}* Returns the value given - -**err.Context()**: *gojsonschema.JsonContext* Returns the context. This has a String() method that will print something like this: (root).firstName - -**err.Field()**: *string* Returns the fieldname in the format firstName, or for embedded properties, person.firstName. This returns the same as the String() method on *err.Context()* but removes the (root). prefix. - -**err.Description()**: *string* The error description. This is based on the locale you are using. See the beginning of this section for overwriting the locale with a custom implementation. - -**err.DescriptionFormat()**: *string* The error description format. This is relevant if you are adding custom validation errors afterwards to the result. - -**err.Details()**: *gojsonschema.ErrorDetails* Returns a map[string]interface{} of additional error details specific to the error. For example, GTE errors will have a "min" value, LTE will have a "max" value. See errors.go for a full description of all the error details. Every error always contains a "field" key that holds the value of *err.Field()* - -Note in most cases, the err.Details() will be used to generate replacement strings in your locales, and not used directly. These strings follow the text/template format i.e. -``` -{{.field}} must be greater than or equal to {{.min}} -``` - -The library allows you to specify custom template functions, should you require more complex error message handling. -```go -gojsonschema.ErrorTemplateFuncs = map[string]interface{}{ - "allcaps": func(s string) string { - return strings.ToUpper(s) - }, -} -``` - -Given the above definition, you can use the custom function `"allcaps"` in your localization templates: -``` -{{allcaps .field}} must be greater than or equal to {{.min}} -``` - -The above error message would then be rendered with the `field` value in capital letters. For example: -``` -"PASSWORD must be greater than or equal to 8" -``` - -Learn more about what types of template functions you can use in `ErrorTemplateFuncs` by referring to Go's [text/template FuncMap](https://golang.org/pkg/text/template/#FuncMap) type. - -## Formats -JSON Schema allows for optional "format" property to validate instances against well-known formats. gojsonschema ships with all of the formats defined in the spec that you can use like this: - -````json -{"type": "string", "format": "email"} -```` - -Not all formats defined in draft-07 are available. Implemented formats are: - -* `date` -* `time` -* `date-time` -* `hostname`. Subdomains that start with a number are also supported, but this means that it doesn't strictly follow [RFC1034](http://tools.ietf.org/html/rfc1034#section-3.5) and has the implication that ipv4 addresses are also recognized as valid hostnames. -* `email`. Go's email parser deviates slightly from [RFC5322](https://tools.ietf.org/html/rfc5322). Includes unicode support. -* `idn-email`. Same caveat as `email`. -* `ipv4` -* `ipv6` -* `uri`. Includes unicode support. -* `uri-reference`. Includes unicode support. -* `iri` -* `iri-reference` -* `uri-template` -* `uuid` -* `regex`. Go uses the [RE2](https://github.com/google/re2/wiki/Syntax) engine and is not [ECMA262](http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-262.pdf) compatible. -* `json-pointer` -* `relative-json-pointer` - -`email`, `uri` and `uri-reference` use the same validation code as their unicode counterparts `idn-email`, `iri` and `iri-reference`. If you rely on unicode support you should use the specific -unicode enabled formats for the sake of interoperability as other implementations might not support unicode in the regular formats. - -The validation code for `uri`, `idn-email` and their relatives use mostly standard library code. - -For repetitive or more complex formats, you can create custom format checkers and add them to gojsonschema like this: - -```go -// Define the format checker -type RoleFormatChecker struct {} - -// Ensure it meets the gojsonschema.FormatChecker interface -func (f RoleFormatChecker) IsFormat(input interface{}) bool { - - asString, ok := input.(string) - if ok == false { - return false - } - - return strings.HasPrefix("ROLE_", asString) -} - -// Add it to the library -gojsonschema.FormatCheckers.Add("role", RoleFormatChecker{}) -```` - -Now to use in your json schema: -````json -{"type": "string", "format": "role"} -```` - -Another example would be to check if the provided integer matches an id on database: - -JSON schema: -```json -{"type": "integer", "format": "ValidUserId"} -``` - -```go -// Define the format checker -type ValidUserIdFormatChecker struct {} - -// Ensure it meets the gojsonschema.FormatChecker interface -func (f ValidUserIdFormatChecker) IsFormat(input interface{}) bool { - - asFloat64, ok := input.(float64) // Numbers are always float64 here - if ok == false { - return false - } - - // XXX - // do the magic on the database looking for the int(asFloat64) - - return true -} - -// Add it to the library -gojsonschema.FormatCheckers.Add("ValidUserId", ValidUserIdFormatChecker{}) -```` - -Formats can also be removed, for example if you want to override one of the formats that is defined by default. - -```go -gojsonschema.FormatCheckers.Remove("hostname") -``` - - -## Additional custom validation -After the validation has run and you have the results, you may add additional -errors using `Result.AddError`. This is useful to maintain the same format within the resultset instead -of having to add special exceptions for your own errors. Below is an example. - -```go -type AnswerInvalidError struct { - gojsonschema.ResultErrorFields -} - -func newAnswerInvalidError(context *gojsonschema.JsonContext, value interface{}, details gojsonschema.ErrorDetails) *AnswerInvalidError { - err := AnswerInvalidError{} - err.SetContext(context) - err.SetType("custom_invalid_error") - // it is important to use SetDescriptionFormat() as this is used to call SetDescription() after it has been parsed - // using the description of err will be overridden by this. - err.SetDescriptionFormat("Answer to the Ultimate Question of Life, the Universe, and Everything is {{.answer}}") - err.SetValue(value) - err.SetDetails(details) - - return &err -} - -func main() { - // ... - schema, err := gojsonschema.NewSchema(schemaLoader) - result, err := gojsonschema.Validate(schemaLoader, documentLoader) - - if true { // some validation - jsonContext := gojsonschema.NewJsonContext("question", nil) - errDetail := gojsonschema.ErrorDetails{ - "answer": 42, - } - result.AddError( - newAnswerInvalidError( - gojsonschema.NewJsonContext("answer", jsonContext), - 52, - errDetail, - ), - errDetail, - ) - } - - return result, err - -} -``` - -This is especially useful if you want to add validation beyond what the -json schema drafts can provide such business specific logic. - -## Uses - -gojsonschema uses the following test suite : - -https://github.com/json-schema/JSON-Schema-Test-Suite diff --git a/vendor/github.com/xeipuuv/gojsonschema/draft.go b/vendor/github.com/xeipuuv/gojsonschema/draft.go deleted file mode 100644 index 61298e7aa0..0000000000 --- a/vendor/github.com/xeipuuv/gojsonschema/draft.go +++ /dev/null @@ -1,125 +0,0 @@ -// Copyright 2018 johandorland ( https://github.com/johandorland ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package gojsonschema - -import ( - "errors" - "math" - "reflect" - - "github.com/xeipuuv/gojsonreference" -) - -// Draft is a JSON-schema draft version -type Draft int - -// Supported Draft versions -const ( - Draft4 Draft = 4 - Draft6 Draft = 6 - Draft7 Draft = 7 - Hybrid Draft = math.MaxInt32 -) - -type draftConfig struct { - Version Draft - MetaSchemaURL string - MetaSchema string -} -type draftConfigs []draftConfig - -var drafts draftConfigs - -func init() { - drafts = []draftConfig{ - { - Version: Draft4, - MetaSchemaURL: "http://json-schema.org/draft-04/schema", - MetaSchema: `{"id":"http://json-schema.org/draft-04/schema#","$schema":"http://json-schema.org/draft-04/schema#","description":"Core schema meta-schema","definitions":{"schemaArray":{"type":"array","minItems":1,"items":{"$ref":"#"}},"positiveInteger":{"type":"integer","minimum":0},"positiveIntegerDefault0":{"allOf":[{"$ref":"#/definitions/positiveInteger"},{"default":0}]},"simpleTypes":{"enum":["array","boolean","integer","null","number","object","string"]},"stringArray":{"type":"array","items":{"type":"string"},"minItems":1,"uniqueItems":true}},"type":"object","properties":{"id":{"type":"string"},"$schema":{"type":"string"},"title":{"type":"string"},"description":{"type":"string"},"default":{},"multipleOf":{"type":"number","minimum":0,"exclusiveMinimum":true},"maximum":{"type":"number"},"exclusiveMaximum":{"type":"boolean","default":false},"minimum":{"type":"number"},"exclusiveMinimum":{"type":"boolean","default":false},"maxLength":{"$ref":"#/definitions/positiveInteger"},"minLength":{"$ref":"#/definitions/positiveIntegerDefault0"},"pattern":{"type":"string","format":"regex"},"additionalItems":{"anyOf":[{"type":"boolean"},{"$ref":"#"}],"default":{}},"items":{"anyOf":[{"$ref":"#"},{"$ref":"#/definitions/schemaArray"}],"default":{}},"maxItems":{"$ref":"#/definitions/positiveInteger"},"minItems":{"$ref":"#/definitions/positiveIntegerDefault0"},"uniqueItems":{"type":"boolean","default":false},"maxProperties":{"$ref":"#/definitions/positiveInteger"},"minProperties":{"$ref":"#/definitions/positiveIntegerDefault0"},"required":{"$ref":"#/definitions/stringArray"},"additionalProperties":{"anyOf":[{"type":"boolean"},{"$ref":"#"}],"default":{}},"definitions":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"properties":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"patternProperties":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"dependencies":{"type":"object","additionalProperties":{"anyOf":[{"$ref":"#"},{"$ref":"#/definitions/stringArray"}]}},"enum":{"type":"array","minItems":1,"uniqueItems":true},"type":{"anyOf":[{"$ref":"#/definitions/simpleTypes"},{"type":"array","items":{"$ref":"#/definitions/simpleTypes"},"minItems":1,"uniqueItems":true}]},"format":{"type":"string"},"allOf":{"$ref":"#/definitions/schemaArray"},"anyOf":{"$ref":"#/definitions/schemaArray"},"oneOf":{"$ref":"#/definitions/schemaArray"},"not":{"$ref":"#"}},"dependencies":{"exclusiveMaximum":["maximum"],"exclusiveMinimum":["minimum"]},"default":{}}`, - }, - { - Version: Draft6, - MetaSchemaURL: "http://json-schema.org/draft-06/schema", - MetaSchema: `{"$schema":"http://json-schema.org/draft-06/schema#","$id":"http://json-schema.org/draft-06/schema#","title":"Core schema meta-schema","definitions":{"schemaArray":{"type":"array","minItems":1,"items":{"$ref":"#"}},"nonNegativeInteger":{"type":"integer","minimum":0},"nonNegativeIntegerDefault0":{"allOf":[{"$ref":"#/definitions/nonNegativeInteger"},{"default":0}]},"simpleTypes":{"enum":["array","boolean","integer","null","number","object","string"]},"stringArray":{"type":"array","items":{"type":"string"},"uniqueItems":true,"default":[]}},"type":["object","boolean"],"properties":{"$id":{"type":"string","format":"uri-reference"},"$schema":{"type":"string","format":"uri"},"$ref":{"type":"string","format":"uri-reference"},"title":{"type":"string"},"description":{"type":"string"},"default":{},"examples":{"type":"array","items":{}},"multipleOf":{"type":"number","exclusiveMinimum":0},"maximum":{"type":"number"},"exclusiveMaximum":{"type":"number"},"minimum":{"type":"number"},"exclusiveMinimum":{"type":"number"},"maxLength":{"$ref":"#/definitions/nonNegativeInteger"},"minLength":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"pattern":{"type":"string","format":"regex"},"additionalItems":{"$ref":"#"},"items":{"anyOf":[{"$ref":"#"},{"$ref":"#/definitions/schemaArray"}],"default":{}},"maxItems":{"$ref":"#/definitions/nonNegativeInteger"},"minItems":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"uniqueItems":{"type":"boolean","default":false},"contains":{"$ref":"#"},"maxProperties":{"$ref":"#/definitions/nonNegativeInteger"},"minProperties":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"required":{"$ref":"#/definitions/stringArray"},"additionalProperties":{"$ref":"#"},"definitions":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"properties":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"patternProperties":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"dependencies":{"type":"object","additionalProperties":{"anyOf":[{"$ref":"#"},{"$ref":"#/definitions/stringArray"}]}},"propertyNames":{"$ref":"#"},"const":{},"enum":{"type":"array","minItems":1,"uniqueItems":true},"type":{"anyOf":[{"$ref":"#/definitions/simpleTypes"},{"type":"array","items":{"$ref":"#/definitions/simpleTypes"},"minItems":1,"uniqueItems":true}]},"format":{"type":"string"},"allOf":{"$ref":"#/definitions/schemaArray"},"anyOf":{"$ref":"#/definitions/schemaArray"},"oneOf":{"$ref":"#/definitions/schemaArray"},"not":{"$ref":"#"}},"default":{}}`, - }, - { - Version: Draft7, - MetaSchemaURL: "http://json-schema.org/draft-07/schema", - MetaSchema: `{"$schema":"http://json-schema.org/draft-07/schema#","$id":"http://json-schema.org/draft-07/schema#","title":"Core schema meta-schema","definitions":{"schemaArray":{"type":"array","minItems":1,"items":{"$ref":"#"}},"nonNegativeInteger":{"type":"integer","minimum":0},"nonNegativeIntegerDefault0":{"allOf":[{"$ref":"#/definitions/nonNegativeInteger"},{"default":0}]},"simpleTypes":{"enum":["array","boolean","integer","null","number","object","string"]},"stringArray":{"type":"array","items":{"type":"string"},"uniqueItems":true,"default":[]}},"type":["object","boolean"],"properties":{"$id":{"type":"string","format":"uri-reference"},"$schema":{"type":"string","format":"uri"},"$ref":{"type":"string","format":"uri-reference"},"$comment":{"type":"string"},"title":{"type":"string"},"description":{"type":"string"},"default":true,"readOnly":{"type":"boolean","default":false},"examples":{"type":"array","items":true},"multipleOf":{"type":"number","exclusiveMinimum":0},"maximum":{"type":"number"},"exclusiveMaximum":{"type":"number"},"minimum":{"type":"number"},"exclusiveMinimum":{"type":"number"},"maxLength":{"$ref":"#/definitions/nonNegativeInteger"},"minLength":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"pattern":{"type":"string","format":"regex"},"additionalItems":{"$ref":"#"},"items":{"anyOf":[{"$ref":"#"},{"$ref":"#/definitions/schemaArray"}],"default":true},"maxItems":{"$ref":"#/definitions/nonNegativeInteger"},"minItems":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"uniqueItems":{"type":"boolean","default":false},"contains":{"$ref":"#"},"maxProperties":{"$ref":"#/definitions/nonNegativeInteger"},"minProperties":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"required":{"$ref":"#/definitions/stringArray"},"additionalProperties":{"$ref":"#"},"definitions":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"properties":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"patternProperties":{"type":"object","additionalProperties":{"$ref":"#"},"propertyNames":{"format":"regex"},"default":{}},"dependencies":{"type":"object","additionalProperties":{"anyOf":[{"$ref":"#"},{"$ref":"#/definitions/stringArray"}]}},"propertyNames":{"$ref":"#"},"const":true,"enum":{"type":"array","items":true,"minItems":1,"uniqueItems":true},"type":{"anyOf":[{"$ref":"#/definitions/simpleTypes"},{"type":"array","items":{"$ref":"#/definitions/simpleTypes"},"minItems":1,"uniqueItems":true}]},"format":{"type":"string"},"contentMediaType":{"type":"string"},"contentEncoding":{"type":"string"},"if":{"$ref":"#"},"then":{"$ref":"#"},"else":{"$ref":"#"},"allOf":{"$ref":"#/definitions/schemaArray"},"anyOf":{"$ref":"#/definitions/schemaArray"},"oneOf":{"$ref":"#/definitions/schemaArray"},"not":{"$ref":"#"}},"default":true}`, - }, - } -} - -func (dc draftConfigs) GetMetaSchema(url string) string { - for _, config := range dc { - if config.MetaSchemaURL == url { - return config.MetaSchema - } - } - return "" -} -func (dc draftConfigs) GetDraftVersion(url string) *Draft { - for _, config := range dc { - if config.MetaSchemaURL == url { - return &config.Version - } - } - return nil -} -func (dc draftConfigs) GetSchemaURL(draft Draft) string { - for _, config := range dc { - if config.Version == draft { - return config.MetaSchemaURL - } - } - return "" -} - -func parseSchemaURL(documentNode interface{}) (string, *Draft, error) { - - if isKind(documentNode, reflect.Bool) { - return "", nil, nil - } - - if !isKind(documentNode, reflect.Map) { - return "", nil, errors.New("schema is invalid") - } - - m := documentNode.(map[string]interface{}) - - if existsMapKey(m, KEY_SCHEMA) { - if !isKind(m[KEY_SCHEMA], reflect.String) { - return "", nil, errors.New(formatErrorDescription( - Locale.MustBeOfType(), - ErrorDetails{ - "key": KEY_SCHEMA, - "type": TYPE_STRING, - }, - )) - } - - schemaReference, err := gojsonreference.NewJsonReference(m[KEY_SCHEMA].(string)) - - if err != nil { - return "", nil, err - } - - schema := schemaReference.String() - - return schema, drafts.GetDraftVersion(schema), nil - } - - return "", nil, nil -} diff --git a/vendor/github.com/xeipuuv/gojsonschema/errors.go b/vendor/github.com/xeipuuv/gojsonschema/errors.go deleted file mode 100644 index e4e9814f31..0000000000 --- a/vendor/github.com/xeipuuv/gojsonschema/errors.go +++ /dev/null @@ -1,364 +0,0 @@ -package gojsonschema - -import ( - "bytes" - "sync" - "text/template" -) - -var errorTemplates = errorTemplate{template.New("errors-new"), sync.RWMutex{}} - -// template.Template is not thread-safe for writing, so some locking is done -// sync.RWMutex is used for efficiently locking when new templates are created -type errorTemplate struct { - *template.Template - sync.RWMutex -} - -type ( - - // FalseError. ErrorDetails: - - FalseError struct { - ResultErrorFields - } - - // RequiredError indicates that a required field is missing - // ErrorDetails: property string - RequiredError struct { - ResultErrorFields - } - - // InvalidTypeError indicates that a field has the incorrect type - // ErrorDetails: expected, given - InvalidTypeError struct { - ResultErrorFields - } - - // NumberAnyOfError is produced in case of a failing "anyOf" validation - // ErrorDetails: - - NumberAnyOfError struct { - ResultErrorFields - } - - // NumberOneOfError is produced in case of a failing "oneOf" validation - // ErrorDetails: - - NumberOneOfError struct { - ResultErrorFields - } - - // NumberAllOfError is produced in case of a failing "allOf" validation - // ErrorDetails: - - NumberAllOfError struct { - ResultErrorFields - } - - // NumberNotError is produced if a "not" validation failed - // ErrorDetails: - - NumberNotError struct { - ResultErrorFields - } - - // MissingDependencyError is produced in case of a "missing dependency" problem - // ErrorDetails: dependency - MissingDependencyError struct { - ResultErrorFields - } - - // InternalError indicates an internal error - // ErrorDetails: error - InternalError struct { - ResultErrorFields - } - - // ConstError indicates a const error - // ErrorDetails: allowed - ConstError struct { - ResultErrorFields - } - - // EnumError indicates an enum error - // ErrorDetails: allowed - EnumError struct { - ResultErrorFields - } - - // ArrayNoAdditionalItemsError is produced if additional items were found, but not allowed - // ErrorDetails: - - ArrayNoAdditionalItemsError struct { - ResultErrorFields - } - - // ArrayMinItemsError is produced if an array contains less items than the allowed minimum - // ErrorDetails: min - ArrayMinItemsError struct { - ResultErrorFields - } - - // ArrayMaxItemsError is produced if an array contains more items than the allowed maximum - // ErrorDetails: max - ArrayMaxItemsError struct { - ResultErrorFields - } - - // ItemsMustBeUniqueError is produced if an array requires unique items, but contains non-unique items - // ErrorDetails: type, i, j - ItemsMustBeUniqueError struct { - ResultErrorFields - } - - // ArrayContainsError is produced if an array contains invalid items - // ErrorDetails: - ArrayContainsError struct { - ResultErrorFields - } - - // ArrayMinPropertiesError is produced if an object contains less properties than the allowed minimum - // ErrorDetails: min - ArrayMinPropertiesError struct { - ResultErrorFields - } - - // ArrayMaxPropertiesError is produced if an object contains more properties than the allowed maximum - // ErrorDetails: max - ArrayMaxPropertiesError struct { - ResultErrorFields - } - - // AdditionalPropertyNotAllowedError is produced if an object has additional properties, but not allowed - // ErrorDetails: property - AdditionalPropertyNotAllowedError struct { - ResultErrorFields - } - - // InvalidPropertyPatternError is produced if an pattern was found - // ErrorDetails: property, pattern - InvalidPropertyPatternError struct { - ResultErrorFields - } - - // InvalidPropertyNameError is produced if an invalid-named property was found - // ErrorDetails: property - InvalidPropertyNameError struct { - ResultErrorFields - } - - // StringLengthGTEError is produced if a string is shorter than the minimum required length - // ErrorDetails: min - StringLengthGTEError struct { - ResultErrorFields - } - - // StringLengthLTEError is produced if a string is longer than the maximum allowed length - // ErrorDetails: max - StringLengthLTEError struct { - ResultErrorFields - } - - // DoesNotMatchPatternError is produced if a string does not match the defined pattern - // ErrorDetails: pattern - DoesNotMatchPatternError struct { - ResultErrorFields - } - - // DoesNotMatchFormatError is produced if a string does not match the defined format - // ErrorDetails: format - DoesNotMatchFormatError struct { - ResultErrorFields - } - - // MultipleOfError is produced if a number is not a multiple of the defined multipleOf - // ErrorDetails: multiple - MultipleOfError struct { - ResultErrorFields - } - - // NumberGTEError is produced if a number is lower than the allowed minimum - // ErrorDetails: min - NumberGTEError struct { - ResultErrorFields - } - - // NumberGTError is produced if a number is lower than, or equal to the specified minimum, and exclusiveMinimum is set - // ErrorDetails: min - NumberGTError struct { - ResultErrorFields - } - - // NumberLTEError is produced if a number is higher than the allowed maximum - // ErrorDetails: max - NumberLTEError struct { - ResultErrorFields - } - - // NumberLTError is produced if a number is higher than, or equal to the specified maximum, and exclusiveMaximum is set - // ErrorDetails: max - NumberLTError struct { - ResultErrorFields - } - - // ConditionThenError is produced if a condition's "then" validation is invalid - // ErrorDetails: - - ConditionThenError struct { - ResultErrorFields - } - - // ConditionElseError is produced if a condition's "else" condition is invalid - // ErrorDetails: - - ConditionElseError struct { - ResultErrorFields - } -) - -// newError takes a ResultError type and sets the type, context, description, details, value, and field -func newError(err ResultError, context *JsonContext, value interface{}, locale locale, details ErrorDetails) { - var t string - var d string - switch err.(type) { - case *FalseError: - t = "false" - d = locale.False() - case *RequiredError: - t = "required" - d = locale.Required() - case *InvalidTypeError: - t = "invalid_type" - d = locale.InvalidType() - case *NumberAnyOfError: - t = "number_any_of" - d = locale.NumberAnyOf() - case *NumberOneOfError: - t = "number_one_of" - d = locale.NumberOneOf() - case *NumberAllOfError: - t = "number_all_of" - d = locale.NumberAllOf() - case *NumberNotError: - t = "number_not" - d = locale.NumberNot() - case *MissingDependencyError: - t = "missing_dependency" - d = locale.MissingDependency() - case *InternalError: - t = "internal" - d = locale.Internal() - case *ConstError: - t = "const" - d = locale.Const() - case *EnumError: - t = "enum" - d = locale.Enum() - case *ArrayNoAdditionalItemsError: - t = "array_no_additional_items" - d = locale.ArrayNoAdditionalItems() - case *ArrayMinItemsError: - t = "array_min_items" - d = locale.ArrayMinItems() - case *ArrayMaxItemsError: - t = "array_max_items" - d = locale.ArrayMaxItems() - case *ItemsMustBeUniqueError: - t = "unique" - d = locale.Unique() - case *ArrayContainsError: - t = "contains" - d = locale.ArrayContains() - case *ArrayMinPropertiesError: - t = "array_min_properties" - d = locale.ArrayMinProperties() - case *ArrayMaxPropertiesError: - t = "array_max_properties" - d = locale.ArrayMaxProperties() - case *AdditionalPropertyNotAllowedError: - t = "additional_property_not_allowed" - d = locale.AdditionalPropertyNotAllowed() - case *InvalidPropertyPatternError: - t = "invalid_property_pattern" - d = locale.InvalidPropertyPattern() - case *InvalidPropertyNameError: - t = "invalid_property_name" - d = locale.InvalidPropertyName() - case *StringLengthGTEError: - t = "string_gte" - d = locale.StringGTE() - case *StringLengthLTEError: - t = "string_lte" - d = locale.StringLTE() - case *DoesNotMatchPatternError: - t = "pattern" - d = locale.DoesNotMatchPattern() - case *DoesNotMatchFormatError: - t = "format" - d = locale.DoesNotMatchFormat() - case *MultipleOfError: - t = "multiple_of" - d = locale.MultipleOf() - case *NumberGTEError: - t = "number_gte" - d = locale.NumberGTE() - case *NumberGTError: - t = "number_gt" - d = locale.NumberGT() - case *NumberLTEError: - t = "number_lte" - d = locale.NumberLTE() - case *NumberLTError: - t = "number_lt" - d = locale.NumberLT() - case *ConditionThenError: - t = "condition_then" - d = locale.ConditionThen() - case *ConditionElseError: - t = "condition_else" - d = locale.ConditionElse() - } - - err.SetType(t) - err.SetContext(context) - err.SetValue(value) - err.SetDetails(details) - err.SetDescriptionFormat(d) - details["field"] = err.Field() - - if _, exists := details["context"]; !exists && context != nil { - details["context"] = context.String() - } - - err.SetDescription(formatErrorDescription(err.DescriptionFormat(), details)) -} - -// formatErrorDescription takes a string in the default text/template -// format and converts it to a string with replacements. The fields come -// from the ErrorDetails struct and vary for each type of error. -func formatErrorDescription(s string, details ErrorDetails) string { - - var tpl *template.Template - var descrAsBuffer bytes.Buffer - var err error - - errorTemplates.RLock() - tpl = errorTemplates.Lookup(s) - errorTemplates.RUnlock() - - if tpl == nil { - errorTemplates.Lock() - tpl = errorTemplates.New(s) - - if ErrorTemplateFuncs != nil { - tpl.Funcs(ErrorTemplateFuncs) - } - - tpl, err = tpl.Parse(s) - errorTemplates.Unlock() - - if err != nil { - return err.Error() - } - } - - err = tpl.Execute(&descrAsBuffer, details) - if err != nil { - return err.Error() - } - - return descrAsBuffer.String() -} diff --git a/vendor/github.com/xeipuuv/gojsonschema/format_checkers.go b/vendor/github.com/xeipuuv/gojsonschema/format_checkers.go deleted file mode 100644 index 873ffc7d79..0000000000 --- a/vendor/github.com/xeipuuv/gojsonschema/format_checkers.go +++ /dev/null @@ -1,368 +0,0 @@ -package gojsonschema - -import ( - "net" - "net/mail" - "net/url" - "regexp" - "strings" - "sync" - "time" -) - -type ( - // FormatChecker is the interface all formatters added to FormatCheckerChain must implement - FormatChecker interface { - // IsFormat checks if input has the correct format and type - IsFormat(input interface{}) bool - } - - // FormatCheckerChain holds the formatters - FormatCheckerChain struct { - formatters map[string]FormatChecker - } - - // EmailFormatChecker verifies email address formats - EmailFormatChecker struct{} - - // IPV4FormatChecker verifies IP addresses in the IPv4 format - IPV4FormatChecker struct{} - - // IPV6FormatChecker verifies IP addresses in the IPv6 format - IPV6FormatChecker struct{} - - // DateTimeFormatChecker verifies date/time formats per RFC3339 5.6 - // - // Valid formats: - // Partial Time: HH:MM:SS - // Full Date: YYYY-MM-DD - // Full Time: HH:MM:SSZ-07:00 - // Date Time: YYYY-MM-DDTHH:MM:SSZ-0700 - // - // Where - // YYYY = 4DIGIT year - // MM = 2DIGIT month ; 01-12 - // DD = 2DIGIT day-month ; 01-28, 01-29, 01-30, 01-31 based on month/year - // HH = 2DIGIT hour ; 00-23 - // MM = 2DIGIT ; 00-59 - // SS = 2DIGIT ; 00-58, 00-60 based on leap second rules - // T = Literal - // Z = Literal - // - // Note: Nanoseconds are also suported in all formats - // - // http://tools.ietf.org/html/rfc3339#section-5.6 - DateTimeFormatChecker struct{} - - // DateFormatChecker verifies date formats - // - // Valid format: - // Full Date: YYYY-MM-DD - // - // Where - // YYYY = 4DIGIT year - // MM = 2DIGIT month ; 01-12 - // DD = 2DIGIT day-month ; 01-28, 01-29, 01-30, 01-31 based on month/year - DateFormatChecker struct{} - - // TimeFormatChecker verifies time formats - // - // Valid formats: - // Partial Time: HH:MM:SS - // Full Time: HH:MM:SSZ-07:00 - // - // Where - // HH = 2DIGIT hour ; 00-23 - // MM = 2DIGIT ; 00-59 - // SS = 2DIGIT ; 00-58, 00-60 based on leap second rules - // T = Literal - // Z = Literal - TimeFormatChecker struct{} - - // URIFormatChecker validates a URI with a valid Scheme per RFC3986 - URIFormatChecker struct{} - - // URIReferenceFormatChecker validates a URI or relative-reference per RFC3986 - URIReferenceFormatChecker struct{} - - // URITemplateFormatChecker validates a URI template per RFC6570 - URITemplateFormatChecker struct{} - - // HostnameFormatChecker validates a hostname is in the correct format - HostnameFormatChecker struct{} - - // UUIDFormatChecker validates a UUID is in the correct format - UUIDFormatChecker struct{} - - // RegexFormatChecker validates a regex is in the correct format - RegexFormatChecker struct{} - - // JSONPointerFormatChecker validates a JSON Pointer per RFC6901 - JSONPointerFormatChecker struct{} - - // RelativeJSONPointerFormatChecker validates a relative JSON Pointer is in the correct format - RelativeJSONPointerFormatChecker struct{} -) - -var ( - // FormatCheckers holds the valid formatters, and is a public variable - // so library users can add custom formatters - FormatCheckers = FormatCheckerChain{ - formatters: map[string]FormatChecker{ - "date": DateFormatChecker{}, - "time": TimeFormatChecker{}, - "date-time": DateTimeFormatChecker{}, - "hostname": HostnameFormatChecker{}, - "email": EmailFormatChecker{}, - "idn-email": EmailFormatChecker{}, - "ipv4": IPV4FormatChecker{}, - "ipv6": IPV6FormatChecker{}, - "uri": URIFormatChecker{}, - "uri-reference": URIReferenceFormatChecker{}, - "iri": URIFormatChecker{}, - "iri-reference": URIReferenceFormatChecker{}, - "uri-template": URITemplateFormatChecker{}, - "uuid": UUIDFormatChecker{}, - "regex": RegexFormatChecker{}, - "json-pointer": JSONPointerFormatChecker{}, - "relative-json-pointer": RelativeJSONPointerFormatChecker{}, - }, - } - - // Regex credit: https://www.socketloop.com/tutorials/golang-validate-hostname - rxHostname = regexp.MustCompile(`^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$`) - - // Use a regex to make sure curly brackets are balanced properly after validating it as a AURI - rxURITemplate = regexp.MustCompile("^([^{]*({[^}]*})?)*$") - - rxUUID = regexp.MustCompile("^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$") - - rxJSONPointer = regexp.MustCompile("^(?:/(?:[^~/]|~0|~1)*)*$") - - rxRelJSONPointer = regexp.MustCompile("^(?:0|[1-9][0-9]*)(?:#|(?:/(?:[^~/]|~0|~1)*)*)$") - - lock = new(sync.RWMutex) -) - -// Add adds a FormatChecker to the FormatCheckerChain -// The name used will be the value used for the format key in your json schema -func (c *FormatCheckerChain) Add(name string, f FormatChecker) *FormatCheckerChain { - lock.Lock() - c.formatters[name] = f - lock.Unlock() - - return c -} - -// Remove deletes a FormatChecker from the FormatCheckerChain (if it exists) -func (c *FormatCheckerChain) Remove(name string) *FormatCheckerChain { - lock.Lock() - delete(c.formatters, name) - lock.Unlock() - - return c -} - -// Has checks to see if the FormatCheckerChain holds a FormatChecker with the given name -func (c *FormatCheckerChain) Has(name string) bool { - lock.RLock() - _, ok := c.formatters[name] - lock.RUnlock() - - return ok -} - -// IsFormat will check an input against a FormatChecker with the given name -// to see if it is the correct format -func (c *FormatCheckerChain) IsFormat(name string, input interface{}) bool { - lock.RLock() - f, ok := c.formatters[name] - lock.RUnlock() - - // If a format is unrecognized it should always pass validation - if !ok { - return true - } - - return f.IsFormat(input) -} - -// IsFormat checks if input is a correctly formatted e-mail address -func (f EmailFormatChecker) IsFormat(input interface{}) bool { - asString, ok := input.(string) - if !ok { - return false - } - - _, err := mail.ParseAddress(asString) - return err == nil -} - -// IsFormat checks if input is a correctly formatted IPv4-address -func (f IPV4FormatChecker) IsFormat(input interface{}) bool { - asString, ok := input.(string) - if !ok { - return false - } - - // Credit: https://github.com/asaskevich/govalidator - ip := net.ParseIP(asString) - return ip != nil && strings.Contains(asString, ".") -} - -// IsFormat checks if input is a correctly formatted IPv6=address -func (f IPV6FormatChecker) IsFormat(input interface{}) bool { - asString, ok := input.(string) - if !ok { - return false - } - - // Credit: https://github.com/asaskevich/govalidator - ip := net.ParseIP(asString) - return ip != nil && strings.Contains(asString, ":") -} - -// IsFormat checks if input is a correctly formatted date/time per RFC3339 5.6 -func (f DateTimeFormatChecker) IsFormat(input interface{}) bool { - asString, ok := input.(string) - if !ok { - return false - } - - formats := []string{ - "15:04:05", - "15:04:05Z07:00", - "2006-01-02", - time.RFC3339, - time.RFC3339Nano, - } - - for _, format := range formats { - if _, err := time.Parse(format, asString); err == nil { - return true - } - } - - return false -} - -// IsFormat checks if input is a correctly formatted date (YYYY-MM-DD) -func (f DateFormatChecker) IsFormat(input interface{}) bool { - asString, ok := input.(string) - if !ok { - return false - } - _, err := time.Parse("2006-01-02", asString) - return err == nil -} - -// IsFormat checks if input correctly formatted time (HH:MM:SS or HH:MM:SSZ-07:00) -func (f TimeFormatChecker) IsFormat(input interface{}) bool { - asString, ok := input.(string) - if !ok { - return false - } - - if _, err := time.Parse("15:04:05Z07:00", asString); err == nil { - return true - } - - _, err := time.Parse("15:04:05", asString) - return err == nil -} - -// IsFormat checks if input is correctly formatted URI with a valid Scheme per RFC3986 -func (f URIFormatChecker) IsFormat(input interface{}) bool { - asString, ok := input.(string) - if !ok { - return false - } - - u, err := url.Parse(asString) - - if err != nil || u.Scheme == "" { - return false - } - - return !strings.Contains(asString, `\`) -} - -// IsFormat checks if input is a correctly formatted URI or relative-reference per RFC3986 -func (f URIReferenceFormatChecker) IsFormat(input interface{}) bool { - asString, ok := input.(string) - if !ok { - return false - } - - _, err := url.Parse(asString) - return err == nil && !strings.Contains(asString, `\`) -} - -// IsFormat checks if input is a correctly formatted URI template per RFC6570 -func (f URITemplateFormatChecker) IsFormat(input interface{}) bool { - asString, ok := input.(string) - if !ok { - return false - } - - u, err := url.Parse(asString) - if err != nil || strings.Contains(asString, `\`) { - return false - } - - return rxURITemplate.MatchString(u.Path) -} - -// IsFormat checks if input is a correctly formatted hostname -func (f HostnameFormatChecker) IsFormat(input interface{}) bool { - asString, ok := input.(string) - if !ok { - return false - } - - return rxHostname.MatchString(asString) && len(asString) < 256 -} - -// IsFormat checks if input is a correctly formatted UUID -func (f UUIDFormatChecker) IsFormat(input interface{}) bool { - asString, ok := input.(string) - if !ok { - return false - } - - return rxUUID.MatchString(asString) -} - -// IsFormat checks if input is a correctly formatted regular expression -func (f RegexFormatChecker) IsFormat(input interface{}) bool { - asString, ok := input.(string) - if !ok { - return false - } - - if asString == "" { - return true - } - _, err := regexp.Compile(asString) - return err == nil -} - -// IsFormat checks if input is a correctly formatted JSON Pointer per RFC6901 -func (f JSONPointerFormatChecker) IsFormat(input interface{}) bool { - asString, ok := input.(string) - if !ok { - return false - } - - return rxJSONPointer.MatchString(asString) -} - -// IsFormat checks if input is a correctly formatted relative JSON Pointer -func (f RelativeJSONPointerFormatChecker) IsFormat(input interface{}) bool { - asString, ok := input.(string) - if !ok { - return false - } - - return rxRelJSONPointer.MatchString(asString) -} diff --git a/vendor/github.com/xeipuuv/gojsonschema/glide.yaml b/vendor/github.com/xeipuuv/gojsonschema/glide.yaml deleted file mode 100644 index ab6fb867c5..0000000000 --- a/vendor/github.com/xeipuuv/gojsonschema/glide.yaml +++ /dev/null @@ -1,13 +0,0 @@ -package: github.com/xeipuuv/gojsonschema -license: Apache 2.0 -import: -- package: github.com/xeipuuv/gojsonschema - -- package: github.com/xeipuuv/gojsonpointer - -- package: github.com/xeipuuv/gojsonreference - -testImport: -- package: github.com/stretchr/testify - subpackages: - - assert diff --git a/vendor/github.com/xeipuuv/gojsonschema/internalLog.go b/vendor/github.com/xeipuuv/gojsonschema/internalLog.go deleted file mode 100644 index 4ef7a8d03e..0000000000 --- a/vendor/github.com/xeipuuv/gojsonschema/internalLog.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author xeipuuv -// author-github https://github.com/xeipuuv -// author-mail xeipuuv@gmail.com -// -// repository-name gojsonschema -// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. -// -// description Very simple log wrapper. -// Used for debugging/testing purposes. -// -// created 01-01-2015 - -package gojsonschema - -import ( - "log" -) - -const internalLogEnabled = false - -func internalLog(format string, v ...interface{}) { - log.Printf(format, v...) -} diff --git a/vendor/github.com/xeipuuv/gojsonschema/jsonContext.go b/vendor/github.com/xeipuuv/gojsonschema/jsonContext.go deleted file mode 100644 index 0e979707b4..0000000000 --- a/vendor/github.com/xeipuuv/gojsonschema/jsonContext.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2013 MongoDB, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author tolsen -// author-github https://github.com/tolsen -// -// repository-name gojsonschema -// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. -// -// description Implements a persistent (immutable w/ shared structure) singly-linked list of strings for the purpose of storing a json context -// -// created 04-09-2013 - -package gojsonschema - -import "bytes" - -// JsonContext implements a persistent linked-list of strings -type JsonContext struct { - head string - tail *JsonContext -} - -// NewJsonContext creates a new JsonContext -func NewJsonContext(head string, tail *JsonContext) *JsonContext { - return &JsonContext{head, tail} -} - -// String displays the context in reverse. -// This plays well with the data structure's persistent nature with -// Cons and a json document's tree structure. -func (c *JsonContext) String(del ...string) string { - byteArr := make([]byte, 0, c.stringLen()) - buf := bytes.NewBuffer(byteArr) - c.writeStringToBuffer(buf, del) - - return buf.String() -} - -func (c *JsonContext) stringLen() int { - length := 0 - if c.tail != nil { - length = c.tail.stringLen() + 1 // add 1 for "." - } - - length += len(c.head) - return length -} - -func (c *JsonContext) writeStringToBuffer(buf *bytes.Buffer, del []string) { - if c.tail != nil { - c.tail.writeStringToBuffer(buf, del) - - if len(del) > 0 { - buf.WriteString(del[0]) - } else { - buf.WriteString(".") - } - } - - buf.WriteString(c.head) -} diff --git a/vendor/github.com/xeipuuv/gojsonschema/jsonLoader.go b/vendor/github.com/xeipuuv/gojsonschema/jsonLoader.go deleted file mode 100644 index 5d88af263e..0000000000 --- a/vendor/github.com/xeipuuv/gojsonschema/jsonLoader.go +++ /dev/null @@ -1,386 +0,0 @@ -// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author xeipuuv -// author-github https://github.com/xeipuuv -// author-mail xeipuuv@gmail.com -// -// repository-name gojsonschema -// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. -// -// description Different strategies to load JSON files. -// Includes References (file and HTTP), JSON strings and Go types. -// -// created 01-02-2015 - -package gojsonschema - -import ( - "bytes" - "encoding/json" - "errors" - "io" - "io/ioutil" - "net/http" - "net/url" - "os" - "path/filepath" - "runtime" - "strings" - - "github.com/xeipuuv/gojsonreference" -) - -var osFS = osFileSystem(os.Open) - -// JSONLoader defines the JSON loader interface -type JSONLoader interface { - JsonSource() interface{} - LoadJSON() (interface{}, error) - JsonReference() (gojsonreference.JsonReference, error) - LoaderFactory() JSONLoaderFactory -} - -// JSONLoaderFactory defines the JSON loader factory interface -type JSONLoaderFactory interface { - // New creates a new JSON loader for the given source - New(source string) JSONLoader -} - -// DefaultJSONLoaderFactory is the default JSON loader factory -type DefaultJSONLoaderFactory struct { -} - -// FileSystemJSONLoaderFactory is a JSON loader factory that uses http.FileSystem -type FileSystemJSONLoaderFactory struct { - fs http.FileSystem -} - -// New creates a new JSON loader for the given source -func (d DefaultJSONLoaderFactory) New(source string) JSONLoader { - return &jsonReferenceLoader{ - fs: osFS, - source: source, - } -} - -// New creates a new JSON loader for the given source -func (f FileSystemJSONLoaderFactory) New(source string) JSONLoader { - return &jsonReferenceLoader{ - fs: f.fs, - source: source, - } -} - -// osFileSystem is a functional wrapper for os.Open that implements http.FileSystem. -type osFileSystem func(string) (*os.File, error) - -// Opens a file with the given name -func (o osFileSystem) Open(name string) (http.File, error) { - return o(name) -} - -// JSON Reference loader -// references are used to load JSONs from files and HTTP - -type jsonReferenceLoader struct { - fs http.FileSystem - source string -} - -func (l *jsonReferenceLoader) JsonSource() interface{} { - return l.source -} - -func (l *jsonReferenceLoader) JsonReference() (gojsonreference.JsonReference, error) { - return gojsonreference.NewJsonReference(l.JsonSource().(string)) -} - -func (l *jsonReferenceLoader) LoaderFactory() JSONLoaderFactory { - return &FileSystemJSONLoaderFactory{ - fs: l.fs, - } -} - -// NewReferenceLoader returns a JSON reference loader using the given source and the local OS file system. -func NewReferenceLoader(source string) JSONLoader { - return &jsonReferenceLoader{ - fs: osFS, - source: source, - } -} - -// NewReferenceLoaderFileSystem returns a JSON reference loader using the given source and file system. -func NewReferenceLoaderFileSystem(source string, fs http.FileSystem) JSONLoader { - return &jsonReferenceLoader{ - fs: fs, - source: source, - } -} - -func (l *jsonReferenceLoader) LoadJSON() (interface{}, error) { - - var err error - - reference, err := gojsonreference.NewJsonReference(l.JsonSource().(string)) - if err != nil { - return nil, err - } - - refToURL := reference - refToURL.GetUrl().Fragment = "" - - var document interface{} - - if reference.HasFileScheme { - - filename := strings.TrimPrefix(refToURL.String(), "file://") - filename, err = url.QueryUnescape(filename) - - if err != nil { - return nil, err - } - - if runtime.GOOS == "windows" { - // on Windows, a file URL may have an extra leading slash, use slashes - // instead of backslashes, and have spaces escaped - filename = strings.TrimPrefix(filename, "/") - filename = filepath.FromSlash(filename) - } - - document, err = l.loadFromFile(filename) - if err != nil { - return nil, err - } - - } else { - - document, err = l.loadFromHTTP(refToURL.String()) - if err != nil { - return nil, err - } - - } - - return document, nil - -} - -func (l *jsonReferenceLoader) loadFromHTTP(address string) (interface{}, error) { - - // returned cached versions for metaschemas for drafts 4, 6 and 7 - // for performance and allow for easier offline use - if metaSchema := drafts.GetMetaSchema(address); metaSchema != "" { - return decodeJSONUsingNumber(strings.NewReader(metaSchema)) - } - - resp, err := http.Get(address) - if err != nil { - return nil, err - } - - // must return HTTP Status 200 OK - if resp.StatusCode != http.StatusOK { - return nil, errors.New(formatErrorDescription(Locale.HttpBadStatus(), ErrorDetails{"status": resp.Status})) - } - - bodyBuff, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, err - } - - return decodeJSONUsingNumber(bytes.NewReader(bodyBuff)) -} - -func (l *jsonReferenceLoader) loadFromFile(path string) (interface{}, error) { - f, err := l.fs.Open(path) - if err != nil { - return nil, err - } - defer f.Close() - - bodyBuff, err := ioutil.ReadAll(f) - if err != nil { - return nil, err - } - - return decodeJSONUsingNumber(bytes.NewReader(bodyBuff)) - -} - -// JSON string loader - -type jsonStringLoader struct { - source string -} - -func (l *jsonStringLoader) JsonSource() interface{} { - return l.source -} - -func (l *jsonStringLoader) JsonReference() (gojsonreference.JsonReference, error) { - return gojsonreference.NewJsonReference("#") -} - -func (l *jsonStringLoader) LoaderFactory() JSONLoaderFactory { - return &DefaultJSONLoaderFactory{} -} - -// NewStringLoader creates a new JSONLoader, taking a string as source -func NewStringLoader(source string) JSONLoader { - return &jsonStringLoader{source: source} -} - -func (l *jsonStringLoader) LoadJSON() (interface{}, error) { - - return decodeJSONUsingNumber(strings.NewReader(l.JsonSource().(string))) - -} - -// JSON bytes loader - -type jsonBytesLoader struct { - source []byte -} - -func (l *jsonBytesLoader) JsonSource() interface{} { - return l.source -} - -func (l *jsonBytesLoader) JsonReference() (gojsonreference.JsonReference, error) { - return gojsonreference.NewJsonReference("#") -} - -func (l *jsonBytesLoader) LoaderFactory() JSONLoaderFactory { - return &DefaultJSONLoaderFactory{} -} - -// NewBytesLoader creates a new JSONLoader, taking a `[]byte` as source -func NewBytesLoader(source []byte) JSONLoader { - return &jsonBytesLoader{source: source} -} - -func (l *jsonBytesLoader) LoadJSON() (interface{}, error) { - return decodeJSONUsingNumber(bytes.NewReader(l.JsonSource().([]byte))) -} - -// JSON Go (types) loader -// used to load JSONs from the code as maps, interface{}, structs ... - -type jsonGoLoader struct { - source interface{} -} - -func (l *jsonGoLoader) JsonSource() interface{} { - return l.source -} - -func (l *jsonGoLoader) JsonReference() (gojsonreference.JsonReference, error) { - return gojsonreference.NewJsonReference("#") -} - -func (l *jsonGoLoader) LoaderFactory() JSONLoaderFactory { - return &DefaultJSONLoaderFactory{} -} - -// NewGoLoader creates a new JSONLoader from a given Go struct -func NewGoLoader(source interface{}) JSONLoader { - return &jsonGoLoader{source: source} -} - -func (l *jsonGoLoader) LoadJSON() (interface{}, error) { - - // convert it to a compliant JSON first to avoid types "mismatches" - - jsonBytes, err := json.Marshal(l.JsonSource()) - if err != nil { - return nil, err - } - - return decodeJSONUsingNumber(bytes.NewReader(jsonBytes)) - -} - -type jsonIOLoader struct { - buf *bytes.Buffer -} - -// NewReaderLoader creates a new JSON loader using the provided io.Reader -func NewReaderLoader(source io.Reader) (JSONLoader, io.Reader) { - buf := &bytes.Buffer{} - return &jsonIOLoader{buf: buf}, io.TeeReader(source, buf) -} - -// NewWriterLoader creates a new JSON loader using the provided io.Writer -func NewWriterLoader(source io.Writer) (JSONLoader, io.Writer) { - buf := &bytes.Buffer{} - return &jsonIOLoader{buf: buf}, io.MultiWriter(source, buf) -} - -func (l *jsonIOLoader) JsonSource() interface{} { - return l.buf.String() -} - -func (l *jsonIOLoader) LoadJSON() (interface{}, error) { - return decodeJSONUsingNumber(l.buf) -} - -func (l *jsonIOLoader) JsonReference() (gojsonreference.JsonReference, error) { - return gojsonreference.NewJsonReference("#") -} - -func (l *jsonIOLoader) LoaderFactory() JSONLoaderFactory { - return &DefaultJSONLoaderFactory{} -} - -// JSON raw loader -// In case the JSON is already marshalled to interface{} use this loader -// This is used for testing as otherwise there is no guarantee the JSON is marshalled -// "properly" by using https://golang.org/pkg/encoding/json/#Decoder.UseNumber -type jsonRawLoader struct { - source interface{} -} - -// NewRawLoader creates a new JSON raw loader for the given source -func NewRawLoader(source interface{}) JSONLoader { - return &jsonRawLoader{source: source} -} -func (l *jsonRawLoader) JsonSource() interface{} { - return l.source -} -func (l *jsonRawLoader) LoadJSON() (interface{}, error) { - return l.source, nil -} -func (l *jsonRawLoader) JsonReference() (gojsonreference.JsonReference, error) { - return gojsonreference.NewJsonReference("#") -} -func (l *jsonRawLoader) LoaderFactory() JSONLoaderFactory { - return &DefaultJSONLoaderFactory{} -} - -func decodeJSONUsingNumber(r io.Reader) (interface{}, error) { - - var document interface{} - - decoder := json.NewDecoder(r) - decoder.UseNumber() - - err := decoder.Decode(&document) - if err != nil { - return nil, err - } - - return document, nil - -} diff --git a/vendor/github.com/xeipuuv/gojsonschema/locales.go b/vendor/github.com/xeipuuv/gojsonschema/locales.go deleted file mode 100644 index a416225cdb..0000000000 --- a/vendor/github.com/xeipuuv/gojsonschema/locales.go +++ /dev/null @@ -1,472 +0,0 @@ -// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author xeipuuv -// author-github https://github.com/xeipuuv -// author-mail xeipuuv@gmail.com -// -// repository-name gojsonschema -// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. -// -// description Contains const string and messages. -// -// created 01-01-2015 - -package gojsonschema - -type ( - // locale is an interface for defining custom error strings - locale interface { - - // False returns a format-string for "false" schema validation errors - False() string - - // Required returns a format-string for "required" schema validation errors - Required() string - - // InvalidType returns a format-string for "invalid type" schema validation errors - InvalidType() string - - // NumberAnyOf returns a format-string for "anyOf" schema validation errors - NumberAnyOf() string - - // NumberOneOf returns a format-string for "oneOf" schema validation errors - NumberOneOf() string - - // NumberAllOf returns a format-string for "allOf" schema validation errors - NumberAllOf() string - - // NumberNot returns a format-string to format a NumberNotError - NumberNot() string - - // MissingDependency returns a format-string for "missing dependency" schema validation errors - MissingDependency() string - - // Internal returns a format-string for internal errors - Internal() string - - // Const returns a format-string to format a ConstError - Const() string - - // Enum returns a format-string to format an EnumError - Enum() string - - // ArrayNotEnoughItems returns a format-string to format an error for arrays having not enough items to match positional list of schema - ArrayNotEnoughItems() string - - // ArrayNoAdditionalItems returns a format-string to format an ArrayNoAdditionalItemsError - ArrayNoAdditionalItems() string - - // ArrayMinItems returns a format-string to format an ArrayMinItemsError - ArrayMinItems() string - - // ArrayMaxItems returns a format-string to format an ArrayMaxItemsError - ArrayMaxItems() string - - // Unique returns a format-string to format an ItemsMustBeUniqueError - Unique() string - - // ArrayContains returns a format-string to format an ArrayContainsError - ArrayContains() string - - // ArrayMinProperties returns a format-string to format an ArrayMinPropertiesError - ArrayMinProperties() string - - // ArrayMaxProperties returns a format-string to format an ArrayMaxPropertiesError - ArrayMaxProperties() string - - // AdditionalPropertyNotAllowed returns a format-string to format an AdditionalPropertyNotAllowedError - AdditionalPropertyNotAllowed() string - - // InvalidPropertyPattern returns a format-string to format an InvalidPropertyPatternError - InvalidPropertyPattern() string - - // InvalidPropertyName returns a format-string to format an InvalidPropertyNameError - InvalidPropertyName() string - - // StringGTE returns a format-string to format an StringLengthGTEError - StringGTE() string - - // StringLTE returns a format-string to format an StringLengthLTEError - StringLTE() string - - // DoesNotMatchPattern returns a format-string to format an DoesNotMatchPatternError - DoesNotMatchPattern() string - - // DoesNotMatchFormat returns a format-string to format an DoesNotMatchFormatError - DoesNotMatchFormat() string - - // MultipleOf returns a format-string to format an MultipleOfError - MultipleOf() string - - // NumberGTE returns a format-string to format an NumberGTEError - NumberGTE() string - - // NumberGT returns a format-string to format an NumberGTError - NumberGT() string - - // NumberLTE returns a format-string to format an NumberLTEError - NumberLTE() string - - // NumberLT returns a format-string to format an NumberLTError - NumberLT() string - - // Schema validations - - // RegexPattern returns a format-string to format a regex-pattern error - RegexPattern() string - - // GreaterThanZero returns a format-string to format an error where a number must be greater than zero - GreaterThanZero() string - - // MustBeOfA returns a format-string to format an error where a value is of the wrong type - MustBeOfA() string - - // MustBeOfAn returns a format-string to format an error where a value is of the wrong type - MustBeOfAn() string - - // CannotBeUsedWithout returns a format-string to format a "cannot be used without" error - CannotBeUsedWithout() string - - // CannotBeGT returns a format-string to format an error where a value are greater than allowed - CannotBeGT() string - - // MustBeOfType returns a format-string to format an error where a value does not match the required type - MustBeOfType() string - - // MustBeValidRegex returns a format-string to format an error where a regex is invalid - MustBeValidRegex() string - - // MustBeValidFormat returns a format-string to format an error where a value does not match the expected format - MustBeValidFormat() string - - // MustBeGTEZero returns a format-string to format an error where a value must be greater or equal than 0 - MustBeGTEZero() string - - // KeyCannotBeGreaterThan returns a format-string to format an error where a key is greater than the maximum allowed - KeyCannotBeGreaterThan() string - - // KeyItemsMustBeOfType returns a format-string to format an error where a key is of the wrong type - KeyItemsMustBeOfType() string - - // KeyItemsMustBeUnique returns a format-string to format an error where keys are not unique - KeyItemsMustBeUnique() string - - // ReferenceMustBeCanonical returns a format-string to format a "reference must be canonical" error - ReferenceMustBeCanonical() string - - // NotAValidType returns a format-string to format an invalid type error - NotAValidType() string - - // Duplicated returns a format-string to format an error where types are duplicated - Duplicated() string - - // HttpBadStatus returns a format-string for errors when loading a schema using HTTP - HttpBadStatus() string - - // ParseError returns a format-string for JSON parsing errors - ParseError() string - - // ConditionThen returns a format-string for ConditionThenError errors - ConditionThen() string - - // ConditionElse returns a format-string for ConditionElseError errors - ConditionElse() string - - // ErrorFormat returns a format string for errors - ErrorFormat() string - } - - // DefaultLocale is the default locale for this package - DefaultLocale struct{} -) - -// False returns a format-string for "false" schema validation errors -func (l DefaultLocale) False() string { - return "False always fails validation" -} - -// Required returns a format-string for "required" schema validation errors -func (l DefaultLocale) Required() string { - return `{{.property}} is required` -} - -// InvalidType returns a format-string for "invalid type" schema validation errors -func (l DefaultLocale) InvalidType() string { - return `Invalid type. Expected: {{.expected}}, given: {{.given}}` -} - -// NumberAnyOf returns a format-string for "anyOf" schema validation errors -func (l DefaultLocale) NumberAnyOf() string { - return `Must validate at least one schema (anyOf)` -} - -// NumberOneOf returns a format-string for "oneOf" schema validation errors -func (l DefaultLocale) NumberOneOf() string { - return `Must validate one and only one schema (oneOf)` -} - -// NumberAllOf returns a format-string for "allOf" schema validation errors -func (l DefaultLocale) NumberAllOf() string { - return `Must validate all the schemas (allOf)` -} - -// NumberNot returns a format-string to format a NumberNotError -func (l DefaultLocale) NumberNot() string { - return `Must not validate the schema (not)` -} - -// MissingDependency returns a format-string for "missing dependency" schema validation errors -func (l DefaultLocale) MissingDependency() string { - return `Has a dependency on {{.dependency}}` -} - -// Internal returns a format-string for internal errors -func (l DefaultLocale) Internal() string { - return `Internal Error {{.error}}` -} - -// Const returns a format-string to format a ConstError -func (l DefaultLocale) Const() string { - return `{{.field}} does not match: {{.allowed}}` -} - -// Enum returns a format-string to format an EnumError -func (l DefaultLocale) Enum() string { - return `{{.field}} must be one of the following: {{.allowed}}` -} - -// ArrayNoAdditionalItems returns a format-string to format an ArrayNoAdditionalItemsError -func (l DefaultLocale) ArrayNoAdditionalItems() string { - return `No additional items allowed on array` -} - -// ArrayNotEnoughItems returns a format-string to format an error for arrays having not enough items to match positional list of schema -func (l DefaultLocale) ArrayNotEnoughItems() string { - return `Not enough items on array to match positional list of schema` -} - -// ArrayMinItems returns a format-string to format an ArrayMinItemsError -func (l DefaultLocale) ArrayMinItems() string { - return `Array must have at least {{.min}} items` -} - -// ArrayMaxItems returns a format-string to format an ArrayMaxItemsError -func (l DefaultLocale) ArrayMaxItems() string { - return `Array must have at most {{.max}} items` -} - -// Unique returns a format-string to format an ItemsMustBeUniqueError -func (l DefaultLocale) Unique() string { - return `{{.type}} items[{{.i}},{{.j}}] must be unique` -} - -// ArrayContains returns a format-string to format an ArrayContainsError -func (l DefaultLocale) ArrayContains() string { - return `At least one of the items must match` -} - -// ArrayMinProperties returns a format-string to format an ArrayMinPropertiesError -func (l DefaultLocale) ArrayMinProperties() string { - return `Must have at least {{.min}} properties` -} - -// ArrayMaxProperties returns a format-string to format an ArrayMaxPropertiesError -func (l DefaultLocale) ArrayMaxProperties() string { - return `Must have at most {{.max}} properties` -} - -// AdditionalPropertyNotAllowed returns a format-string to format an AdditionalPropertyNotAllowedError -func (l DefaultLocale) AdditionalPropertyNotAllowed() string { - return `Additional property {{.property}} is not allowed` -} - -// InvalidPropertyPattern returns a format-string to format an InvalidPropertyPatternError -func (l DefaultLocale) InvalidPropertyPattern() string { - return `Property "{{.property}}" does not match pattern {{.pattern}}` -} - -// InvalidPropertyName returns a format-string to format an InvalidPropertyNameError -func (l DefaultLocale) InvalidPropertyName() string { - return `Property name of "{{.property}}" does not match` -} - -// StringGTE returns a format-string to format an StringLengthGTEError -func (l DefaultLocale) StringGTE() string { - return `String length must be greater than or equal to {{.min}}` -} - -// StringLTE returns a format-string to format an StringLengthLTEError -func (l DefaultLocale) StringLTE() string { - return `String length must be less than or equal to {{.max}}` -} - -// DoesNotMatchPattern returns a format-string to format an DoesNotMatchPatternError -func (l DefaultLocale) DoesNotMatchPattern() string { - return `Does not match pattern '{{.pattern}}'` -} - -// DoesNotMatchFormat returns a format-string to format an DoesNotMatchFormatError -func (l DefaultLocale) DoesNotMatchFormat() string { - return `Does not match format '{{.format}}'` -} - -// MultipleOf returns a format-string to format an MultipleOfError -func (l DefaultLocale) MultipleOf() string { - return `Must be a multiple of {{.multiple}}` -} - -// NumberGTE returns the format string to format a NumberGTEError -func (l DefaultLocale) NumberGTE() string { - return `Must be greater than or equal to {{.min}}` -} - -// NumberGT returns the format string to format a NumberGTError -func (l DefaultLocale) NumberGT() string { - return `Must be greater than {{.min}}` -} - -// NumberLTE returns the format string to format a NumberLTEError -func (l DefaultLocale) NumberLTE() string { - return `Must be less than or equal to {{.max}}` -} - -// NumberLT returns the format string to format a NumberLTError -func (l DefaultLocale) NumberLT() string { - return `Must be less than {{.max}}` -} - -// Schema validators - -// RegexPattern returns a format-string to format a regex-pattern error -func (l DefaultLocale) RegexPattern() string { - return `Invalid regex pattern '{{.pattern}}'` -} - -// GreaterThanZero returns a format-string to format an error where a number must be greater than zero -func (l DefaultLocale) GreaterThanZero() string { - return `{{.number}} must be strictly greater than 0` -} - -// MustBeOfA returns a format-string to format an error where a value is of the wrong type -func (l DefaultLocale) MustBeOfA() string { - return `{{.x}} must be of a {{.y}}` -} - -// MustBeOfAn returns a format-string to format an error where a value is of the wrong type -func (l DefaultLocale) MustBeOfAn() string { - return `{{.x}} must be of an {{.y}}` -} - -// CannotBeUsedWithout returns a format-string to format a "cannot be used without" error -func (l DefaultLocale) CannotBeUsedWithout() string { - return `{{.x}} cannot be used without {{.y}}` -} - -// CannotBeGT returns a format-string to format an error where a value are greater than allowed -func (l DefaultLocale) CannotBeGT() string { - return `{{.x}} cannot be greater than {{.y}}` -} - -// MustBeOfType returns a format-string to format an error where a value does not match the required type -func (l DefaultLocale) MustBeOfType() string { - return `{{.key}} must be of type {{.type}}` -} - -// MustBeValidRegex returns a format-string to format an error where a regex is invalid -func (l DefaultLocale) MustBeValidRegex() string { - return `{{.key}} must be a valid regex` -} - -// MustBeValidFormat returns a format-string to format an error where a value does not match the expected format -func (l DefaultLocale) MustBeValidFormat() string { - return `{{.key}} must be a valid format {{.given}}` -} - -// MustBeGTEZero returns a format-string to format an error where a value must be greater or equal than 0 -func (l DefaultLocale) MustBeGTEZero() string { - return `{{.key}} must be greater than or equal to 0` -} - -// KeyCannotBeGreaterThan returns a format-string to format an error where a value is greater than the maximum allowed -func (l DefaultLocale) KeyCannotBeGreaterThan() string { - return `{{.key}} cannot be greater than {{.y}}` -} - -// KeyItemsMustBeOfType returns a format-string to format an error where a key is of the wrong type -func (l DefaultLocale) KeyItemsMustBeOfType() string { - return `{{.key}} items must be {{.type}}` -} - -// KeyItemsMustBeUnique returns a format-string to format an error where keys are not unique -func (l DefaultLocale) KeyItemsMustBeUnique() string { - return `{{.key}} items must be unique` -} - -// ReferenceMustBeCanonical returns a format-string to format a "reference must be canonical" error -func (l DefaultLocale) ReferenceMustBeCanonical() string { - return `Reference {{.reference}} must be canonical` -} - -// NotAValidType returns a format-string to format an invalid type error -func (l DefaultLocale) NotAValidType() string { - return `has a primitive type that is NOT VALID -- given: {{.given}} Expected valid values are:{{.expected}}` -} - -// Duplicated returns a format-string to format an error where types are duplicated -func (l DefaultLocale) Duplicated() string { - return `{{.type}} type is duplicated` -} - -// HttpBadStatus returns a format-string for errors when loading a schema using HTTP -func (l DefaultLocale) HttpBadStatus() string { - return `Could not read schema from HTTP, response status is {{.status}}` -} - -// ErrorFormat returns a format string for errors -// Replacement options: field, description, context, value -func (l DefaultLocale) ErrorFormat() string { - return `{{.field}}: {{.description}}` -} - -// ParseError returns a format-string for JSON parsing errors -func (l DefaultLocale) ParseError() string { - return `Expected: {{.expected}}, given: Invalid JSON` -} - -// ConditionThen returns a format-string for ConditionThenError errors -// If/Else -func (l DefaultLocale) ConditionThen() string { - return `Must validate "then" as "if" was valid` -} - -// ConditionElse returns a format-string for ConditionElseError errors -func (l DefaultLocale) ConditionElse() string { - return `Must validate "else" as "if" was not valid` -} - -// constants -const ( - STRING_NUMBER = "number" - STRING_ARRAY_OF_STRINGS = "array of strings" - STRING_ARRAY_OF_SCHEMAS = "array of schemas" - STRING_SCHEMA = "valid schema" - STRING_SCHEMA_OR_ARRAY_OF_STRINGS = "schema or array of strings" - STRING_PROPERTIES = "properties" - STRING_DEPENDENCY = "dependency" - STRING_PROPERTY = "property" - STRING_UNDEFINED = "undefined" - STRING_CONTEXT_ROOT = "(root)" - STRING_ROOT_SCHEMA_PROPERTY = "(root)" -) diff --git a/vendor/github.com/xeipuuv/gojsonschema/result.go b/vendor/github.com/xeipuuv/gojsonschema/result.go deleted file mode 100644 index 0a0179148b..0000000000 --- a/vendor/github.com/xeipuuv/gojsonschema/result.go +++ /dev/null @@ -1,220 +0,0 @@ -// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author xeipuuv -// author-github https://github.com/xeipuuv -// author-mail xeipuuv@gmail.com -// -// repository-name gojsonschema -// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. -// -// description Result and ResultError implementations. -// -// created 01-01-2015 - -package gojsonschema - -import ( - "fmt" - "strings" -) - -type ( - // ErrorDetails is a map of details specific to each error. - // While the values will vary, every error will contain a "field" value - ErrorDetails map[string]interface{} - - // ResultError is the interface that library errors must implement - ResultError interface { - // Field returns the field name without the root context - // i.e. firstName or person.firstName instead of (root).firstName or (root).person.firstName - Field() string - // SetType sets the error-type - SetType(string) - // Type returns the error-type - Type() string - // SetContext sets the JSON-context for the error - SetContext(*JsonContext) - // Context returns the JSON-context of the error - Context() *JsonContext - // SetDescription sets a description for the error - SetDescription(string) - // Description returns the description of the error - Description() string - // SetDescriptionFormat sets the format for the description in the default text/template format - SetDescriptionFormat(string) - // DescriptionFormat returns the format for the description in the default text/template format - DescriptionFormat() string - // SetValue sets the value related to the error - SetValue(interface{}) - // Value returns the value related to the error - Value() interface{} - // SetDetails sets the details specific to the error - SetDetails(ErrorDetails) - // Details returns details about the error - Details() ErrorDetails - // String returns a string representation of the error - String() string - } - - // ResultErrorFields holds the fields for each ResultError implementation. - // ResultErrorFields implements the ResultError interface, so custom errors - // can be defined by just embedding this type - ResultErrorFields struct { - errorType string // A string with the type of error (i.e. invalid_type) - context *JsonContext // Tree like notation of the part that failed the validation. ex (root).a.b ... - description string // A human readable error message - descriptionFormat string // A format for human readable error message - value interface{} // Value given by the JSON file that is the source of the error - details ErrorDetails - } - - // Result holds the result of a validation - Result struct { - errors []ResultError - // Scores how well the validation matched. Useful in generating - // better error messages for anyOf and oneOf. - score int - } -) - -// Field returns the field name without the root context -// i.e. firstName or person.firstName instead of (root).firstName or (root).person.firstName -func (v *ResultErrorFields) Field() string { - return strings.TrimPrefix(v.context.String(), STRING_ROOT_SCHEMA_PROPERTY+".") -} - -// SetType sets the error-type -func (v *ResultErrorFields) SetType(errorType string) { - v.errorType = errorType -} - -// Type returns the error-type -func (v *ResultErrorFields) Type() string { - return v.errorType -} - -// SetContext sets the JSON-context for the error -func (v *ResultErrorFields) SetContext(context *JsonContext) { - v.context = context -} - -// Context returns the JSON-context of the error -func (v *ResultErrorFields) Context() *JsonContext { - return v.context -} - -// SetDescription sets a description for the error -func (v *ResultErrorFields) SetDescription(description string) { - v.description = description -} - -// Description returns the description of the error -func (v *ResultErrorFields) Description() string { - return v.description -} - -// SetDescriptionFormat sets the format for the description in the default text/template format -func (v *ResultErrorFields) SetDescriptionFormat(descriptionFormat string) { - v.descriptionFormat = descriptionFormat -} - -// DescriptionFormat returns the format for the description in the default text/template format -func (v *ResultErrorFields) DescriptionFormat() string { - return v.descriptionFormat -} - -// SetValue sets the value related to the error -func (v *ResultErrorFields) SetValue(value interface{}) { - v.value = value -} - -// Value returns the value related to the error -func (v *ResultErrorFields) Value() interface{} { - return v.value -} - -// SetDetails sets the details specific to the error -func (v *ResultErrorFields) SetDetails(details ErrorDetails) { - v.details = details -} - -// Details returns details about the error -func (v *ResultErrorFields) Details() ErrorDetails { - return v.details -} - -// String returns a string representation of the error -func (v ResultErrorFields) String() string { - // as a fallback, the value is displayed go style - valueString := fmt.Sprintf("%v", v.value) - - // marshal the go value value to json - if v.value == nil { - valueString = TYPE_NULL - } else { - if vs, err := marshalToJSONString(v.value); err == nil { - if vs == nil { - valueString = TYPE_NULL - } else { - valueString = *vs - } - } - } - - return formatErrorDescription(Locale.ErrorFormat(), ErrorDetails{ - "context": v.context.String(), - "description": v.description, - "value": valueString, - "field": v.Field(), - }) -} - -// Valid indicates if no errors were found -func (v *Result) Valid() bool { - return len(v.errors) == 0 -} - -// Errors returns the errors that were found -func (v *Result) Errors() []ResultError { - return v.errors -} - -// AddError appends a fully filled error to the error set -// SetDescription() will be called with the result of the parsed err.DescriptionFormat() -func (v *Result) AddError(err ResultError, details ErrorDetails) { - if _, exists := details["context"]; !exists && err.Context() != nil { - details["context"] = err.Context().String() - } - - err.SetDescription(formatErrorDescription(err.DescriptionFormat(), details)) - - v.errors = append(v.errors, err) -} - -func (v *Result) addInternalError(err ResultError, context *JsonContext, value interface{}, details ErrorDetails) { - newError(err, context, value, Locale, details) - v.errors = append(v.errors, err) - v.score -= 2 // results in a net -1 when added to the +1 we get at the end of the validation function -} - -// Used to copy errors from a sub-schema to the main one -func (v *Result) mergeErrors(otherResult *Result) { - v.errors = append(v.errors, otherResult.Errors()...) - v.score += otherResult.score -} - -func (v *Result) incrementScore() { - v.score++ -} diff --git a/vendor/github.com/xeipuuv/gojsonschema/schema.go b/vendor/github.com/xeipuuv/gojsonschema/schema.go deleted file mode 100644 index 9e93cd7955..0000000000 --- a/vendor/github.com/xeipuuv/gojsonschema/schema.go +++ /dev/null @@ -1,1087 +0,0 @@ -// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author xeipuuv -// author-github https://github.com/xeipuuv -// author-mail xeipuuv@gmail.com -// -// repository-name gojsonschema -// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. -// -// description Defines Schema, the main entry to every subSchema. -// Contains the parsing logic and error checking. -// -// created 26-02-2013 - -package gojsonschema - -import ( - "errors" - "math/big" - "reflect" - "regexp" - "text/template" - - "github.com/xeipuuv/gojsonreference" -) - -var ( - // Locale is the default locale to use - // Library users can overwrite with their own implementation - Locale locale = DefaultLocale{} - - // ErrorTemplateFuncs allows you to define custom template funcs for use in localization. - ErrorTemplateFuncs template.FuncMap -) - -// NewSchema instances a schema using the given JSONLoader -func NewSchema(l JSONLoader) (*Schema, error) { - return NewSchemaLoader().Compile(l) -} - -// Schema holds a schema -type Schema struct { - documentReference gojsonreference.JsonReference - rootSchema *subSchema - pool *schemaPool - referencePool *schemaReferencePool -} - -func (d *Schema) parse(document interface{}, draft Draft) error { - d.rootSchema = &subSchema{property: STRING_ROOT_SCHEMA_PROPERTY, draft: &draft} - return d.parseSchema(document, d.rootSchema) -} - -// SetRootSchemaName sets the root-schema name -func (d *Schema) SetRootSchemaName(name string) { - d.rootSchema.property = name -} - -// Parses a subSchema -// -// Pretty long function ( sorry :) )... but pretty straight forward, repetitive and boring -// Not much magic involved here, most of the job is to validate the key names and their values, -// then the values are copied into subSchema struct -// -func (d *Schema) parseSchema(documentNode interface{}, currentSchema *subSchema) error { - - if currentSchema.draft == nil { - if currentSchema.parent == nil { - return errors.New("Draft not set") - } - currentSchema.draft = currentSchema.parent.draft - } - - // As of draft 6 "true" is equivalent to an empty schema "{}" and false equals "{"not":{}}" - if *currentSchema.draft >= Draft6 && isKind(documentNode, reflect.Bool) { - b := documentNode.(bool) - currentSchema.pass = &b - return nil - } - - if !isKind(documentNode, reflect.Map) { - return errors.New(formatErrorDescription( - Locale.ParseError(), - ErrorDetails{ - "expected": STRING_SCHEMA, - }, - )) - } - - m := documentNode.(map[string]interface{}) - - if currentSchema.parent == nil { - currentSchema.ref = &d.documentReference - currentSchema.id = &d.documentReference - } - - if currentSchema.id == nil && currentSchema.parent != nil { - currentSchema.id = currentSchema.parent.id - } - - // In draft 6 the id keyword was renamed to $id - // Hybrid mode uses the old id by default - var keyID string - - switch *currentSchema.draft { - case Draft4: - keyID = KEY_ID - case Hybrid: - keyID = KEY_ID_NEW - if existsMapKey(m, KEY_ID) { - keyID = KEY_ID - } - default: - keyID = KEY_ID_NEW - } - if existsMapKey(m, keyID) && !isKind(m[keyID], reflect.String) { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": TYPE_STRING, - "given": keyID, - }, - )) - } - if k, ok := m[keyID].(string); ok { - jsonReference, err := gojsonreference.NewJsonReference(k) - if err != nil { - return err - } - if currentSchema == d.rootSchema { - currentSchema.id = &jsonReference - } else { - ref, err := currentSchema.parent.id.Inherits(jsonReference) - if err != nil { - return err - } - currentSchema.id = ref - } - } - - // definitions - if existsMapKey(m, KEY_DEFINITIONS) { - if isKind(m[KEY_DEFINITIONS], reflect.Map, reflect.Bool) { - for _, dv := range m[KEY_DEFINITIONS].(map[string]interface{}) { - if isKind(dv, reflect.Map, reflect.Bool) { - - newSchema := &subSchema{property: KEY_DEFINITIONS, parent: currentSchema} - - err := d.parseSchema(dv, newSchema) - - if err != nil { - return err - } - } else { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": STRING_ARRAY_OF_SCHEMAS, - "given": KEY_DEFINITIONS, - }, - )) - } - } - } else { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": STRING_ARRAY_OF_SCHEMAS, - "given": KEY_DEFINITIONS, - }, - )) - } - - } - - // title - if existsMapKey(m, KEY_TITLE) && !isKind(m[KEY_TITLE], reflect.String) { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": TYPE_STRING, - "given": KEY_TITLE, - }, - )) - } - if k, ok := m[KEY_TITLE].(string); ok { - currentSchema.title = &k - } - - // description - if existsMapKey(m, KEY_DESCRIPTION) && !isKind(m[KEY_DESCRIPTION], reflect.String) { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": TYPE_STRING, - "given": KEY_DESCRIPTION, - }, - )) - } - if k, ok := m[KEY_DESCRIPTION].(string); ok { - currentSchema.description = &k - } - - // $ref - if existsMapKey(m, KEY_REF) && !isKind(m[KEY_REF], reflect.String) { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": TYPE_STRING, - "given": KEY_REF, - }, - )) - } - - if k, ok := m[KEY_REF].(string); ok { - - jsonReference, err := gojsonreference.NewJsonReference(k) - if err != nil { - return err - } - - currentSchema.ref = &jsonReference - - if sch, ok := d.referencePool.Get(currentSchema.ref.String()); ok { - currentSchema.refSchema = sch - } else { - err := d.parseReference(documentNode, currentSchema) - - if err != nil { - return err - } - - return nil - } - } - - // type - if existsMapKey(m, KEY_TYPE) { - if isKind(m[KEY_TYPE], reflect.String) { - if k, ok := m[KEY_TYPE].(string); ok { - err := currentSchema.types.Add(k) - if err != nil { - return err - } - } - } else { - if isKind(m[KEY_TYPE], reflect.Slice) { - arrayOfTypes := m[KEY_TYPE].([]interface{}) - for _, typeInArray := range arrayOfTypes { - if reflect.ValueOf(typeInArray).Kind() != reflect.String { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": TYPE_STRING + "/" + STRING_ARRAY_OF_STRINGS, - "given": KEY_TYPE, - }, - )) - } - if err := currentSchema.types.Add(typeInArray.(string)); err != nil { - return err - } - } - - } else { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": TYPE_STRING + "/" + STRING_ARRAY_OF_STRINGS, - "given": KEY_TYPE, - }, - )) - } - } - } - - // properties - if existsMapKey(m, KEY_PROPERTIES) { - err := d.parseProperties(m[KEY_PROPERTIES], currentSchema) - if err != nil { - return err - } - } - - // additionalProperties - if existsMapKey(m, KEY_ADDITIONAL_PROPERTIES) { - if isKind(m[KEY_ADDITIONAL_PROPERTIES], reflect.Bool) { - currentSchema.additionalProperties = m[KEY_ADDITIONAL_PROPERTIES].(bool) - } else if isKind(m[KEY_ADDITIONAL_PROPERTIES], reflect.Map) { - newSchema := &subSchema{property: KEY_ADDITIONAL_PROPERTIES, parent: currentSchema, ref: currentSchema.ref} - currentSchema.additionalProperties = newSchema - err := d.parseSchema(m[KEY_ADDITIONAL_PROPERTIES], newSchema) - if err != nil { - return errors.New(err.Error()) - } - } else { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": TYPE_BOOLEAN + "/" + STRING_SCHEMA, - "given": KEY_ADDITIONAL_PROPERTIES, - }, - )) - } - } - - // patternProperties - if existsMapKey(m, KEY_PATTERN_PROPERTIES) { - if isKind(m[KEY_PATTERN_PROPERTIES], reflect.Map) { - patternPropertiesMap := m[KEY_PATTERN_PROPERTIES].(map[string]interface{}) - if len(patternPropertiesMap) > 0 { - currentSchema.patternProperties = make(map[string]*subSchema) - for k, v := range patternPropertiesMap { - _, err := regexp.MatchString(k, "") - if err != nil { - return errors.New(formatErrorDescription( - Locale.RegexPattern(), - ErrorDetails{"pattern": k}, - )) - } - newSchema := &subSchema{property: k, parent: currentSchema, ref: currentSchema.ref} - err = d.parseSchema(v, newSchema) - if err != nil { - return errors.New(err.Error()) - } - currentSchema.patternProperties[k] = newSchema - } - } - } else { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": STRING_SCHEMA, - "given": KEY_PATTERN_PROPERTIES, - }, - )) - } - } - - // propertyNames - if existsMapKey(m, KEY_PROPERTY_NAMES) && *currentSchema.draft >= Draft6 { - if isKind(m[KEY_PROPERTY_NAMES], reflect.Map, reflect.Bool) { - newSchema := &subSchema{property: KEY_PROPERTY_NAMES, parent: currentSchema, ref: currentSchema.ref} - currentSchema.propertyNames = newSchema - err := d.parseSchema(m[KEY_PROPERTY_NAMES], newSchema) - if err != nil { - return err - } - } else { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": STRING_SCHEMA, - "given": KEY_PATTERN_PROPERTIES, - }, - )) - } - } - - // dependencies - if existsMapKey(m, KEY_DEPENDENCIES) { - err := d.parseDependencies(m[KEY_DEPENDENCIES], currentSchema) - if err != nil { - return err - } - } - - // items - if existsMapKey(m, KEY_ITEMS) { - if isKind(m[KEY_ITEMS], reflect.Slice) { - for _, itemElement := range m[KEY_ITEMS].([]interface{}) { - if isKind(itemElement, reflect.Map, reflect.Bool) { - newSchema := &subSchema{parent: currentSchema, property: KEY_ITEMS} - newSchema.ref = currentSchema.ref - currentSchema.itemsChildren = append(currentSchema.itemsChildren, newSchema) - err := d.parseSchema(itemElement, newSchema) - if err != nil { - return err - } - } else { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": STRING_SCHEMA + "/" + STRING_ARRAY_OF_SCHEMAS, - "given": KEY_ITEMS, - }, - )) - } - currentSchema.itemsChildrenIsSingleSchema = false - } - } else if isKind(m[KEY_ITEMS], reflect.Map, reflect.Bool) { - newSchema := &subSchema{parent: currentSchema, property: KEY_ITEMS} - newSchema.ref = currentSchema.ref - currentSchema.itemsChildren = append(currentSchema.itemsChildren, newSchema) - err := d.parseSchema(m[KEY_ITEMS], newSchema) - if err != nil { - return err - } - currentSchema.itemsChildrenIsSingleSchema = true - } else { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": STRING_SCHEMA + "/" + STRING_ARRAY_OF_SCHEMAS, - "given": KEY_ITEMS, - }, - )) - } - } - - // additionalItems - if existsMapKey(m, KEY_ADDITIONAL_ITEMS) { - if isKind(m[KEY_ADDITIONAL_ITEMS], reflect.Bool) { - currentSchema.additionalItems = m[KEY_ADDITIONAL_ITEMS].(bool) - } else if isKind(m[KEY_ADDITIONAL_ITEMS], reflect.Map) { - newSchema := &subSchema{property: KEY_ADDITIONAL_ITEMS, parent: currentSchema, ref: currentSchema.ref} - currentSchema.additionalItems = newSchema - err := d.parseSchema(m[KEY_ADDITIONAL_ITEMS], newSchema) - if err != nil { - return errors.New(err.Error()) - } - } else { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": TYPE_BOOLEAN + "/" + STRING_SCHEMA, - "given": KEY_ADDITIONAL_ITEMS, - }, - )) - } - } - - // validation : number / integer - - if existsMapKey(m, KEY_MULTIPLE_OF) { - multipleOfValue := mustBeNumber(m[KEY_MULTIPLE_OF]) - if multipleOfValue == nil { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": STRING_NUMBER, - "given": KEY_MULTIPLE_OF, - }, - )) - } - if multipleOfValue.Cmp(big.NewRat(0, 1)) <= 0 { - return errors.New(formatErrorDescription( - Locale.GreaterThanZero(), - ErrorDetails{"number": KEY_MULTIPLE_OF}, - )) - } - currentSchema.multipleOf = multipleOfValue - } - - if existsMapKey(m, KEY_MINIMUM) { - minimumValue := mustBeNumber(m[KEY_MINIMUM]) - if minimumValue == nil { - return errors.New(formatErrorDescription( - Locale.MustBeOfA(), - ErrorDetails{"x": KEY_MINIMUM, "y": STRING_NUMBER}, - )) - } - currentSchema.minimum = minimumValue - } - - if existsMapKey(m, KEY_EXCLUSIVE_MINIMUM) { - switch *currentSchema.draft { - case Draft4: - if !isKind(m[KEY_EXCLUSIVE_MINIMUM], reflect.Bool) { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": TYPE_BOOLEAN, - "given": KEY_EXCLUSIVE_MINIMUM, - }, - )) - } - if currentSchema.minimum == nil { - return errors.New(formatErrorDescription( - Locale.CannotBeUsedWithout(), - ErrorDetails{"x": KEY_EXCLUSIVE_MINIMUM, "y": KEY_MINIMUM}, - )) - } - if m[KEY_EXCLUSIVE_MINIMUM].(bool) { - currentSchema.exclusiveMinimum = currentSchema.minimum - currentSchema.minimum = nil - } - case Hybrid: - if isKind(m[KEY_EXCLUSIVE_MINIMUM], reflect.Bool) { - if currentSchema.minimum == nil { - return errors.New(formatErrorDescription( - Locale.CannotBeUsedWithout(), - ErrorDetails{"x": KEY_EXCLUSIVE_MINIMUM, "y": KEY_MINIMUM}, - )) - } - if m[KEY_EXCLUSIVE_MINIMUM].(bool) { - currentSchema.exclusiveMinimum = currentSchema.minimum - currentSchema.minimum = nil - } - } else if isJSONNumber(m[KEY_EXCLUSIVE_MINIMUM]) { - currentSchema.exclusiveMinimum = mustBeNumber(m[KEY_EXCLUSIVE_MINIMUM]) - } else { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": TYPE_BOOLEAN + "/" + TYPE_NUMBER, - "given": KEY_EXCLUSIVE_MINIMUM, - }, - )) - } - default: - if isJSONNumber(m[KEY_EXCLUSIVE_MINIMUM]) { - currentSchema.exclusiveMinimum = mustBeNumber(m[KEY_EXCLUSIVE_MINIMUM]) - } else { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": TYPE_NUMBER, - "given": KEY_EXCLUSIVE_MINIMUM, - }, - )) - } - } - } - - if existsMapKey(m, KEY_MAXIMUM) { - maximumValue := mustBeNumber(m[KEY_MAXIMUM]) - if maximumValue == nil { - return errors.New(formatErrorDescription( - Locale.MustBeOfA(), - ErrorDetails{"x": KEY_MAXIMUM, "y": STRING_NUMBER}, - )) - } - currentSchema.maximum = maximumValue - } - - if existsMapKey(m, KEY_EXCLUSIVE_MAXIMUM) { - switch *currentSchema.draft { - case Draft4: - if !isKind(m[KEY_EXCLUSIVE_MAXIMUM], reflect.Bool) { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": TYPE_BOOLEAN, - "given": KEY_EXCLUSIVE_MAXIMUM, - }, - )) - } - if currentSchema.maximum == nil { - return errors.New(formatErrorDescription( - Locale.CannotBeUsedWithout(), - ErrorDetails{"x": KEY_EXCLUSIVE_MAXIMUM, "y": KEY_MAXIMUM}, - )) - } - if m[KEY_EXCLUSIVE_MAXIMUM].(bool) { - currentSchema.exclusiveMaximum = currentSchema.maximum - currentSchema.maximum = nil - } - case Hybrid: - if isKind(m[KEY_EXCLUSIVE_MAXIMUM], reflect.Bool) { - if currentSchema.maximum == nil { - return errors.New(formatErrorDescription( - Locale.CannotBeUsedWithout(), - ErrorDetails{"x": KEY_EXCLUSIVE_MAXIMUM, "y": KEY_MAXIMUM}, - )) - } - if m[KEY_EXCLUSIVE_MAXIMUM].(bool) { - currentSchema.exclusiveMaximum = currentSchema.maximum - currentSchema.maximum = nil - } - } else if isJSONNumber(m[KEY_EXCLUSIVE_MAXIMUM]) { - currentSchema.exclusiveMaximum = mustBeNumber(m[KEY_EXCLUSIVE_MAXIMUM]) - } else { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": TYPE_BOOLEAN + "/" + TYPE_NUMBER, - "given": KEY_EXCLUSIVE_MAXIMUM, - }, - )) - } - default: - if isJSONNumber(m[KEY_EXCLUSIVE_MAXIMUM]) { - currentSchema.exclusiveMaximum = mustBeNumber(m[KEY_EXCLUSIVE_MAXIMUM]) - } else { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": TYPE_NUMBER, - "given": KEY_EXCLUSIVE_MAXIMUM, - }, - )) - } - } - } - - // validation : string - - if existsMapKey(m, KEY_MIN_LENGTH) { - minLengthIntegerValue := mustBeInteger(m[KEY_MIN_LENGTH]) - if minLengthIntegerValue == nil { - return errors.New(formatErrorDescription( - Locale.MustBeOfAn(), - ErrorDetails{"x": KEY_MIN_LENGTH, "y": TYPE_INTEGER}, - )) - } - if *minLengthIntegerValue < 0 { - return errors.New(formatErrorDescription( - Locale.MustBeGTEZero(), - ErrorDetails{"key": KEY_MIN_LENGTH}, - )) - } - currentSchema.minLength = minLengthIntegerValue - } - - if existsMapKey(m, KEY_MAX_LENGTH) { - maxLengthIntegerValue := mustBeInteger(m[KEY_MAX_LENGTH]) - if maxLengthIntegerValue == nil { - return errors.New(formatErrorDescription( - Locale.MustBeOfAn(), - ErrorDetails{"x": KEY_MAX_LENGTH, "y": TYPE_INTEGER}, - )) - } - if *maxLengthIntegerValue < 0 { - return errors.New(formatErrorDescription( - Locale.MustBeGTEZero(), - ErrorDetails{"key": KEY_MAX_LENGTH}, - )) - } - currentSchema.maxLength = maxLengthIntegerValue - } - - if currentSchema.minLength != nil && currentSchema.maxLength != nil { - if *currentSchema.minLength > *currentSchema.maxLength { - return errors.New(formatErrorDescription( - Locale.CannotBeGT(), - ErrorDetails{"x": KEY_MIN_LENGTH, "y": KEY_MAX_LENGTH}, - )) - } - } - - if existsMapKey(m, KEY_PATTERN) { - if isKind(m[KEY_PATTERN], reflect.String) { - regexpObject, err := regexp.Compile(m[KEY_PATTERN].(string)) - if err != nil { - return errors.New(formatErrorDescription( - Locale.MustBeValidRegex(), - ErrorDetails{"key": KEY_PATTERN}, - )) - } - currentSchema.pattern = regexpObject - } else { - return errors.New(formatErrorDescription( - Locale.MustBeOfA(), - ErrorDetails{"x": KEY_PATTERN, "y": TYPE_STRING}, - )) - } - } - - if existsMapKey(m, KEY_FORMAT) { - formatString, ok := m[KEY_FORMAT].(string) - if !ok { - return errors.New(formatErrorDescription( - Locale.MustBeOfType(), - ErrorDetails{"key": KEY_FORMAT, "type": TYPE_STRING}, - )) - } - currentSchema.format = formatString - } - - // validation : object - - if existsMapKey(m, KEY_MIN_PROPERTIES) { - minPropertiesIntegerValue := mustBeInteger(m[KEY_MIN_PROPERTIES]) - if minPropertiesIntegerValue == nil { - return errors.New(formatErrorDescription( - Locale.MustBeOfAn(), - ErrorDetails{"x": KEY_MIN_PROPERTIES, "y": TYPE_INTEGER}, - )) - } - if *minPropertiesIntegerValue < 0 { - return errors.New(formatErrorDescription( - Locale.MustBeGTEZero(), - ErrorDetails{"key": KEY_MIN_PROPERTIES}, - )) - } - currentSchema.minProperties = minPropertiesIntegerValue - } - - if existsMapKey(m, KEY_MAX_PROPERTIES) { - maxPropertiesIntegerValue := mustBeInteger(m[KEY_MAX_PROPERTIES]) - if maxPropertiesIntegerValue == nil { - return errors.New(formatErrorDescription( - Locale.MustBeOfAn(), - ErrorDetails{"x": KEY_MAX_PROPERTIES, "y": TYPE_INTEGER}, - )) - } - if *maxPropertiesIntegerValue < 0 { - return errors.New(formatErrorDescription( - Locale.MustBeGTEZero(), - ErrorDetails{"key": KEY_MAX_PROPERTIES}, - )) - } - currentSchema.maxProperties = maxPropertiesIntegerValue - } - - if currentSchema.minProperties != nil && currentSchema.maxProperties != nil { - if *currentSchema.minProperties > *currentSchema.maxProperties { - return errors.New(formatErrorDescription( - Locale.KeyCannotBeGreaterThan(), - ErrorDetails{"key": KEY_MIN_PROPERTIES, "y": KEY_MAX_PROPERTIES}, - )) - } - } - - if existsMapKey(m, KEY_REQUIRED) { - if isKind(m[KEY_REQUIRED], reflect.Slice) { - requiredValues := m[KEY_REQUIRED].([]interface{}) - for _, requiredValue := range requiredValues { - if isKind(requiredValue, reflect.String) { - if isStringInSlice(currentSchema.required, requiredValue.(string)) { - return errors.New(formatErrorDescription( - Locale.KeyItemsMustBeUnique(), - ErrorDetails{"key": KEY_REQUIRED}, - )) - } - currentSchema.required = append(currentSchema.required, requiredValue.(string)) - } else { - return errors.New(formatErrorDescription( - Locale.KeyItemsMustBeOfType(), - ErrorDetails{"key": KEY_REQUIRED, "type": TYPE_STRING}, - )) - } - } - } else { - return errors.New(formatErrorDescription( - Locale.MustBeOfAn(), - ErrorDetails{"x": KEY_REQUIRED, "y": TYPE_ARRAY}, - )) - } - } - - // validation : array - - if existsMapKey(m, KEY_MIN_ITEMS) { - minItemsIntegerValue := mustBeInteger(m[KEY_MIN_ITEMS]) - if minItemsIntegerValue == nil { - return errors.New(formatErrorDescription( - Locale.MustBeOfAn(), - ErrorDetails{"x": KEY_MIN_ITEMS, "y": TYPE_INTEGER}, - )) - } - if *minItemsIntegerValue < 0 { - return errors.New(formatErrorDescription( - Locale.MustBeGTEZero(), - ErrorDetails{"key": KEY_MIN_ITEMS}, - )) - } - currentSchema.minItems = minItemsIntegerValue - } - - if existsMapKey(m, KEY_MAX_ITEMS) { - maxItemsIntegerValue := mustBeInteger(m[KEY_MAX_ITEMS]) - if maxItemsIntegerValue == nil { - return errors.New(formatErrorDescription( - Locale.MustBeOfAn(), - ErrorDetails{"x": KEY_MAX_ITEMS, "y": TYPE_INTEGER}, - )) - } - if *maxItemsIntegerValue < 0 { - return errors.New(formatErrorDescription( - Locale.MustBeGTEZero(), - ErrorDetails{"key": KEY_MAX_ITEMS}, - )) - } - currentSchema.maxItems = maxItemsIntegerValue - } - - if existsMapKey(m, KEY_UNIQUE_ITEMS) { - if isKind(m[KEY_UNIQUE_ITEMS], reflect.Bool) { - currentSchema.uniqueItems = m[KEY_UNIQUE_ITEMS].(bool) - } else { - return errors.New(formatErrorDescription( - Locale.MustBeOfA(), - ErrorDetails{"x": KEY_UNIQUE_ITEMS, "y": TYPE_BOOLEAN}, - )) - } - } - - if existsMapKey(m, KEY_CONTAINS) && *currentSchema.draft >= Draft6 { - newSchema := &subSchema{property: KEY_CONTAINS, parent: currentSchema, ref: currentSchema.ref} - currentSchema.contains = newSchema - err := d.parseSchema(m[KEY_CONTAINS], newSchema) - if err != nil { - return err - } - } - - // validation : all - - if existsMapKey(m, KEY_CONST) && *currentSchema.draft >= Draft6 { - is, err := marshalWithoutNumber(m[KEY_CONST]) - if err != nil { - return err - } - currentSchema._const = is - } - - if existsMapKey(m, KEY_ENUM) { - if isKind(m[KEY_ENUM], reflect.Slice) { - for _, v := range m[KEY_ENUM].([]interface{}) { - is, err := marshalWithoutNumber(v) - if err != nil { - return err - } - if isStringInSlice(currentSchema.enum, *is) { - return errors.New(formatErrorDescription( - Locale.KeyItemsMustBeUnique(), - ErrorDetails{"key": KEY_ENUM}, - )) - } - currentSchema.enum = append(currentSchema.enum, *is) - } - } else { - return errors.New(formatErrorDescription( - Locale.MustBeOfAn(), - ErrorDetails{"x": KEY_ENUM, "y": TYPE_ARRAY}, - )) - } - } - - // validation : subSchema - - if existsMapKey(m, KEY_ONE_OF) { - if isKind(m[KEY_ONE_OF], reflect.Slice) { - for _, v := range m[KEY_ONE_OF].([]interface{}) { - newSchema := &subSchema{property: KEY_ONE_OF, parent: currentSchema, ref: currentSchema.ref} - currentSchema.oneOf = append(currentSchema.oneOf, newSchema) - err := d.parseSchema(v, newSchema) - if err != nil { - return err - } - } - } else { - return errors.New(formatErrorDescription( - Locale.MustBeOfAn(), - ErrorDetails{"x": KEY_ONE_OF, "y": TYPE_ARRAY}, - )) - } - } - - if existsMapKey(m, KEY_ANY_OF) { - if isKind(m[KEY_ANY_OF], reflect.Slice) { - for _, v := range m[KEY_ANY_OF].([]interface{}) { - newSchema := &subSchema{property: KEY_ANY_OF, parent: currentSchema, ref: currentSchema.ref} - currentSchema.anyOf = append(currentSchema.anyOf, newSchema) - err := d.parseSchema(v, newSchema) - if err != nil { - return err - } - } - } else { - return errors.New(formatErrorDescription( - Locale.MustBeOfAn(), - ErrorDetails{"x": KEY_ANY_OF, "y": TYPE_ARRAY}, - )) - } - } - - if existsMapKey(m, KEY_ALL_OF) { - if isKind(m[KEY_ALL_OF], reflect.Slice) { - for _, v := range m[KEY_ALL_OF].([]interface{}) { - newSchema := &subSchema{property: KEY_ALL_OF, parent: currentSchema, ref: currentSchema.ref} - currentSchema.allOf = append(currentSchema.allOf, newSchema) - err := d.parseSchema(v, newSchema) - if err != nil { - return err - } - } - } else { - return errors.New(formatErrorDescription( - Locale.MustBeOfAn(), - ErrorDetails{"x": KEY_ANY_OF, "y": TYPE_ARRAY}, - )) - } - } - - if existsMapKey(m, KEY_NOT) { - if isKind(m[KEY_NOT], reflect.Map, reflect.Bool) { - newSchema := &subSchema{property: KEY_NOT, parent: currentSchema, ref: currentSchema.ref} - currentSchema.not = newSchema - err := d.parseSchema(m[KEY_NOT], newSchema) - if err != nil { - return err - } - } else { - return errors.New(formatErrorDescription( - Locale.MustBeOfAn(), - ErrorDetails{"x": KEY_NOT, "y": TYPE_OBJECT}, - )) - } - } - - if *currentSchema.draft >= Draft7 { - if existsMapKey(m, KEY_IF) { - if isKind(m[KEY_IF], reflect.Map, reflect.Bool) { - newSchema := &subSchema{property: KEY_IF, parent: currentSchema, ref: currentSchema.ref} - currentSchema._if = newSchema - err := d.parseSchema(m[KEY_IF], newSchema) - if err != nil { - return err - } - } else { - return errors.New(formatErrorDescription( - Locale.MustBeOfAn(), - ErrorDetails{"x": KEY_IF, "y": TYPE_OBJECT}, - )) - } - } - - if existsMapKey(m, KEY_THEN) { - if isKind(m[KEY_THEN], reflect.Map, reflect.Bool) { - newSchema := &subSchema{property: KEY_THEN, parent: currentSchema, ref: currentSchema.ref} - currentSchema._then = newSchema - err := d.parseSchema(m[KEY_THEN], newSchema) - if err != nil { - return err - } - } else { - return errors.New(formatErrorDescription( - Locale.MustBeOfAn(), - ErrorDetails{"x": KEY_THEN, "y": TYPE_OBJECT}, - )) - } - } - - if existsMapKey(m, KEY_ELSE) { - if isKind(m[KEY_ELSE], reflect.Map, reflect.Bool) { - newSchema := &subSchema{property: KEY_ELSE, parent: currentSchema, ref: currentSchema.ref} - currentSchema._else = newSchema - err := d.parseSchema(m[KEY_ELSE], newSchema) - if err != nil { - return err - } - } else { - return errors.New(formatErrorDescription( - Locale.MustBeOfAn(), - ErrorDetails{"x": KEY_ELSE, "y": TYPE_OBJECT}, - )) - } - } - } - - return nil -} - -func (d *Schema) parseReference(documentNode interface{}, currentSchema *subSchema) error { - var ( - refdDocumentNode interface{} - dsp *schemaPoolDocument - err error - ) - - newSchema := &subSchema{property: KEY_REF, parent: currentSchema, ref: currentSchema.ref} - - d.referencePool.Add(currentSchema.ref.String(), newSchema) - - dsp, err = d.pool.GetDocument(*currentSchema.ref) - if err != nil { - return err - } - newSchema.id = currentSchema.ref - - refdDocumentNode = dsp.Document - newSchema.draft = dsp.Draft - - if err != nil { - return err - } - - if !isKind(refdDocumentNode, reflect.Map, reflect.Bool) { - return errors.New(formatErrorDescription( - Locale.MustBeOfType(), - ErrorDetails{"key": STRING_SCHEMA, "type": TYPE_OBJECT}, - )) - } - - err = d.parseSchema(refdDocumentNode, newSchema) - if err != nil { - return err - } - - currentSchema.refSchema = newSchema - - return nil - -} - -func (d *Schema) parseProperties(documentNode interface{}, currentSchema *subSchema) error { - - if !isKind(documentNode, reflect.Map) { - return errors.New(formatErrorDescription( - Locale.MustBeOfType(), - ErrorDetails{"key": STRING_PROPERTIES, "type": TYPE_OBJECT}, - )) - } - - m := documentNode.(map[string]interface{}) - for k := range m { - schemaProperty := k - newSchema := &subSchema{property: schemaProperty, parent: currentSchema, ref: currentSchema.ref} - currentSchema.propertiesChildren = append(currentSchema.propertiesChildren, newSchema) - err := d.parseSchema(m[k], newSchema) - if err != nil { - return err - } - } - - return nil -} - -func (d *Schema) parseDependencies(documentNode interface{}, currentSchema *subSchema) error { - - if !isKind(documentNode, reflect.Map) { - return errors.New(formatErrorDescription( - Locale.MustBeOfType(), - ErrorDetails{"key": KEY_DEPENDENCIES, "type": TYPE_OBJECT}, - )) - } - - m := documentNode.(map[string]interface{}) - currentSchema.dependencies = make(map[string]interface{}) - - for k := range m { - switch reflect.ValueOf(m[k]).Kind() { - - case reflect.Slice: - values := m[k].([]interface{}) - var valuesToRegister []string - - for _, value := range values { - if !isKind(value, reflect.String) { - return errors.New(formatErrorDescription( - Locale.MustBeOfType(), - ErrorDetails{ - "key": STRING_DEPENDENCY, - "type": STRING_SCHEMA_OR_ARRAY_OF_STRINGS, - }, - )) - } - valuesToRegister = append(valuesToRegister, value.(string)) - currentSchema.dependencies[k] = valuesToRegister - } - - case reflect.Map, reflect.Bool: - depSchema := &subSchema{property: k, parent: currentSchema, ref: currentSchema.ref} - err := d.parseSchema(m[k], depSchema) - if err != nil { - return err - } - currentSchema.dependencies[k] = depSchema - - default: - return errors.New(formatErrorDescription( - Locale.MustBeOfType(), - ErrorDetails{ - "key": STRING_DEPENDENCY, - "type": STRING_SCHEMA_OR_ARRAY_OF_STRINGS, - }, - )) - } - - } - - return nil -} diff --git a/vendor/github.com/xeipuuv/gojsonschema/schemaLoader.go b/vendor/github.com/xeipuuv/gojsonschema/schemaLoader.go deleted file mode 100644 index 20db0c1f99..0000000000 --- a/vendor/github.com/xeipuuv/gojsonschema/schemaLoader.go +++ /dev/null @@ -1,206 +0,0 @@ -// Copyright 2018 johandorland ( https://github.com/johandorland ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package gojsonschema - -import ( - "bytes" - "errors" - - "github.com/xeipuuv/gojsonreference" -) - -// SchemaLoader is used to load schemas -type SchemaLoader struct { - pool *schemaPool - AutoDetect bool - Validate bool - Draft Draft -} - -// NewSchemaLoader creates a new NewSchemaLoader -func NewSchemaLoader() *SchemaLoader { - - ps := &SchemaLoader{ - pool: &schemaPool{ - schemaPoolDocuments: make(map[string]*schemaPoolDocument), - }, - AutoDetect: true, - Validate: false, - Draft: Hybrid, - } - ps.pool.autoDetect = &ps.AutoDetect - - return ps -} - -func (sl *SchemaLoader) validateMetaschema(documentNode interface{}) error { - - var ( - schema string - err error - ) - if sl.AutoDetect { - schema, _, err = parseSchemaURL(documentNode) - if err != nil { - return err - } - } - - // If no explicit "$schema" is used, use the default metaschema associated with the draft used - if schema == "" { - if sl.Draft == Hybrid { - return nil - } - schema = drafts.GetSchemaURL(sl.Draft) - } - - //Disable validation when loading the metaschema to prevent an infinite recursive loop - sl.Validate = false - - metaSchema, err := sl.Compile(NewReferenceLoader(schema)) - - if err != nil { - return err - } - - sl.Validate = true - - result := metaSchema.validateDocument(documentNode) - - if !result.Valid() { - var res bytes.Buffer - for _, err := range result.Errors() { - res.WriteString(err.String()) - res.WriteString("\n") - } - return errors.New(res.String()) - } - - return nil -} - -// AddSchemas adds an arbritrary amount of schemas to the schema cache. As this function does not require -// an explicit URL, every schema should contain an $id, so that it can be referenced by the main schema -func (sl *SchemaLoader) AddSchemas(loaders ...JSONLoader) error { - emptyRef, _ := gojsonreference.NewJsonReference("") - - for _, loader := range loaders { - doc, err := loader.LoadJSON() - - if err != nil { - return err - } - - if sl.Validate { - if err := sl.validateMetaschema(doc); err != nil { - return err - } - } - - // Directly use the Recursive function, so that it get only added to the schema pool by $id - // and not by the ref of the document as it's empty - if err = sl.pool.parseReferences(doc, emptyRef, false); err != nil { - return err - } - } - - return nil -} - -//AddSchema adds a schema under the provided URL to the schema cache -func (sl *SchemaLoader) AddSchema(url string, loader JSONLoader) error { - - ref, err := gojsonreference.NewJsonReference(url) - - if err != nil { - return err - } - - doc, err := loader.LoadJSON() - - if err != nil { - return err - } - - if sl.Validate { - if err := sl.validateMetaschema(doc); err != nil { - return err - } - } - - return sl.pool.parseReferences(doc, ref, true) -} - -// Compile loads and compiles a schema -func (sl *SchemaLoader) Compile(rootSchema JSONLoader) (*Schema, error) { - - ref, err := rootSchema.JsonReference() - - if err != nil { - return nil, err - } - - d := Schema{} - d.pool = sl.pool - d.pool.jsonLoaderFactory = rootSchema.LoaderFactory() - d.documentReference = ref - d.referencePool = newSchemaReferencePool() - - var doc interface{} - if ref.String() != "" { - // Get document from schema pool - spd, err := d.pool.GetDocument(d.documentReference) - if err != nil { - return nil, err - } - doc = spd.Document - } else { - // Load JSON directly - doc, err = rootSchema.LoadJSON() - if err != nil { - return nil, err - } - // References need only be parsed if loading JSON directly - // as pool.GetDocument already does this for us if loading by reference - err = sl.pool.parseReferences(doc, ref, true) - if err != nil { - return nil, err - } - } - - if sl.Validate { - if err := sl.validateMetaschema(doc); err != nil { - return nil, err - } - } - - draft := sl.Draft - if sl.AutoDetect { - _, detectedDraft, err := parseSchemaURL(doc) - if err != nil { - return nil, err - } - if detectedDraft != nil { - draft = *detectedDraft - } - } - - err = d.parse(doc, draft) - if err != nil { - return nil, err - } - - return &d, nil -} diff --git a/vendor/github.com/xeipuuv/gojsonschema/schemaPool.go b/vendor/github.com/xeipuuv/gojsonschema/schemaPool.go deleted file mode 100644 index 35b1cc6306..0000000000 --- a/vendor/github.com/xeipuuv/gojsonschema/schemaPool.go +++ /dev/null @@ -1,215 +0,0 @@ -// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author xeipuuv -// author-github https://github.com/xeipuuv -// author-mail xeipuuv@gmail.com -// -// repository-name gojsonschema -// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. -// -// description Defines resources pooling. -// Eases referencing and avoids downloading the same resource twice. -// -// created 26-02-2013 - -package gojsonschema - -import ( - "errors" - "fmt" - "reflect" - - "github.com/xeipuuv/gojsonreference" -) - -type schemaPoolDocument struct { - Document interface{} - Draft *Draft -} - -type schemaPool struct { - schemaPoolDocuments map[string]*schemaPoolDocument - jsonLoaderFactory JSONLoaderFactory - autoDetect *bool -} - -func (p *schemaPool) parseReferences(document interface{}, ref gojsonreference.JsonReference, pooled bool) error { - - var ( - draft *Draft - err error - reference = ref.String() - ) - // Only the root document should be added to the schema pool if pooled is true - if _, ok := p.schemaPoolDocuments[reference]; pooled && ok { - return fmt.Errorf("Reference already exists: \"%s\"", reference) - } - - if *p.autoDetect { - _, draft, err = parseSchemaURL(document) - if err != nil { - return err - } - } - - err = p.parseReferencesRecursive(document, ref, draft) - - if pooled { - p.schemaPoolDocuments[reference] = &schemaPoolDocument{Document: document, Draft: draft} - } - - return err -} - -func (p *schemaPool) parseReferencesRecursive(document interface{}, ref gojsonreference.JsonReference, draft *Draft) error { - // parseReferencesRecursive parses a JSON document and resolves all $id and $ref references. - // For $ref references it takes into account the $id scope it is in and replaces - // the reference by the absolute resolved reference - - // When encountering errors it fails silently. Error handling is done when the schema - // is syntactically parsed and any error encountered here should also come up there. - switch m := document.(type) { - case []interface{}: - for _, v := range m { - p.parseReferencesRecursive(v, ref, draft) - } - case map[string]interface{}: - localRef := &ref - - keyID := KEY_ID_NEW - if existsMapKey(m, KEY_ID) { - keyID = KEY_ID - } - if existsMapKey(m, keyID) && isKind(m[keyID], reflect.String) { - jsonReference, err := gojsonreference.NewJsonReference(m[keyID].(string)) - if err == nil { - localRef, err = ref.Inherits(jsonReference) - if err == nil { - if _, ok := p.schemaPoolDocuments[localRef.String()]; ok { - return fmt.Errorf("Reference already exists: \"%s\"", localRef.String()) - } - p.schemaPoolDocuments[localRef.String()] = &schemaPoolDocument{Document: document, Draft: draft} - } - } - } - - if existsMapKey(m, KEY_REF) && isKind(m[KEY_REF], reflect.String) { - jsonReference, err := gojsonreference.NewJsonReference(m[KEY_REF].(string)) - if err == nil { - absoluteRef, err := localRef.Inherits(jsonReference) - if err == nil { - m[KEY_REF] = absoluteRef.String() - } - } - } - - for k, v := range m { - // const and enums should be interpreted literally, so ignore them - if k == KEY_CONST || k == KEY_ENUM { - continue - } - // Something like a property or a dependency is not a valid schema, as it might describe properties named "$ref", "$id" or "const", etc - // Therefore don't treat it like a schema. - if k == KEY_PROPERTIES || k == KEY_DEPENDENCIES || k == KEY_PATTERN_PROPERTIES { - if child, ok := v.(map[string]interface{}); ok { - for _, v := range child { - p.parseReferencesRecursive(v, *localRef, draft) - } - } - } else { - p.parseReferencesRecursive(v, *localRef, draft) - } - } - } - return nil -} - -func (p *schemaPool) GetDocument(reference gojsonreference.JsonReference) (*schemaPoolDocument, error) { - - var ( - spd *schemaPoolDocument - draft *Draft - ok bool - err error - ) - - if internalLogEnabled { - internalLog("Get Document ( %s )", reference.String()) - } - - // Create a deep copy, so we can remove the fragment part later on without altering the original - refToURL, _ := gojsonreference.NewJsonReference(reference.String()) - - // First check if the given fragment is a location independent identifier - // http://json-schema.org/latest/json-schema-core.html#rfc.section.8.2.3 - - if spd, ok = p.schemaPoolDocuments[refToURL.String()]; ok { - if internalLogEnabled { - internalLog(" From pool") - } - return spd, nil - } - - // If the given reference is not a location independent identifier, - // strip the fragment and look for a document with it's base URI - - refToURL.GetUrl().Fragment = "" - - if cachedSpd, ok := p.schemaPoolDocuments[refToURL.String()]; ok { - document, _, err := reference.GetPointer().Get(cachedSpd.Document) - - if err != nil { - return nil, err - } - - if internalLogEnabled { - internalLog(" From pool") - } - - spd = &schemaPoolDocument{Document: document, Draft: cachedSpd.Draft} - p.schemaPoolDocuments[reference.String()] = spd - - return spd, nil - } - - // It is not possible to load anything remotely that is not canonical... - if !reference.IsCanonical() { - return nil, errors.New(formatErrorDescription( - Locale.ReferenceMustBeCanonical(), - ErrorDetails{"reference": reference.String()}, - )) - } - - jsonReferenceLoader := p.jsonLoaderFactory.New(reference.String()) - document, err := jsonReferenceLoader.LoadJSON() - - if err != nil { - return nil, err - } - - // add the whole document to the pool for potential re-use - p.parseReferences(document, refToURL, true) - - _, draft, _ = parseSchemaURL(document) - - // resolve the potential fragment and also cache it - document, _, err = reference.GetPointer().Get(document) - - if err != nil { - return nil, err - } - - return &schemaPoolDocument{Document: document, Draft: draft}, nil -} diff --git a/vendor/github.com/xeipuuv/gojsonschema/schemaReferencePool.go b/vendor/github.com/xeipuuv/gojsonschema/schemaReferencePool.go deleted file mode 100644 index 6e5e1b5cdb..0000000000 --- a/vendor/github.com/xeipuuv/gojsonschema/schemaReferencePool.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author xeipuuv -// author-github https://github.com/xeipuuv -// author-mail xeipuuv@gmail.com -// -// repository-name gojsonschema -// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. -// -// description Pool of referenced schemas. -// -// created 25-06-2013 - -package gojsonschema - -import ( - "fmt" -) - -type schemaReferencePool struct { - documents map[string]*subSchema -} - -func newSchemaReferencePool() *schemaReferencePool { - - p := &schemaReferencePool{} - p.documents = make(map[string]*subSchema) - - return p -} - -func (p *schemaReferencePool) Get(ref string) (r *subSchema, o bool) { - - if internalLogEnabled { - internalLog(fmt.Sprintf("Schema Reference ( %s )", ref)) - } - - if sch, ok := p.documents[ref]; ok { - if internalLogEnabled { - internalLog(fmt.Sprintf(" From pool")) - } - return sch, true - } - - return nil, false -} - -func (p *schemaReferencePool) Add(ref string, sch *subSchema) { - - if internalLogEnabled { - internalLog(fmt.Sprintf("Add Schema Reference %s to pool", ref)) - } - if _, ok := p.documents[ref]; !ok { - p.documents[ref] = sch - } -} diff --git a/vendor/github.com/xeipuuv/gojsonschema/schemaType.go b/vendor/github.com/xeipuuv/gojsonschema/schemaType.go deleted file mode 100644 index 36b447a291..0000000000 --- a/vendor/github.com/xeipuuv/gojsonschema/schemaType.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author xeipuuv -// author-github https://github.com/xeipuuv -// author-mail xeipuuv@gmail.com -// -// repository-name gojsonschema -// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. -// -// description Helper structure to handle schema types, and the combination of them. -// -// created 28-02-2013 - -package gojsonschema - -import ( - "errors" - "fmt" - "strings" -) - -type jsonSchemaType struct { - types []string -} - -// Is the schema typed ? that is containing at least one type -// When not typed, the schema does not need any type validation -func (t *jsonSchemaType) IsTyped() bool { - return len(t.types) > 0 -} - -func (t *jsonSchemaType) Add(etype string) error { - - if !isStringInSlice(JSON_TYPES, etype) { - return errors.New(formatErrorDescription(Locale.NotAValidType(), ErrorDetails{"given": "/" + etype + "/", "expected": JSON_TYPES})) - } - - if t.Contains(etype) { - return errors.New(formatErrorDescription(Locale.Duplicated(), ErrorDetails{"type": etype})) - } - - t.types = append(t.types, etype) - - return nil -} - -func (t *jsonSchemaType) Contains(etype string) bool { - - for _, v := range t.types { - if v == etype { - return true - } - } - - return false -} - -func (t *jsonSchemaType) String() string { - - if len(t.types) == 0 { - return STRING_UNDEFINED // should never happen - } - - // Displayed as a list [type1,type2,...] - if len(t.types) > 1 { - return fmt.Sprintf("[%s]", strings.Join(t.types, ",")) - } - - // Only one type: name only - return t.types[0] -} diff --git a/vendor/github.com/xeipuuv/gojsonschema/subSchema.go b/vendor/github.com/xeipuuv/gojsonschema/subSchema.go deleted file mode 100644 index ec779812c3..0000000000 --- a/vendor/github.com/xeipuuv/gojsonschema/subSchema.go +++ /dev/null @@ -1,149 +0,0 @@ -// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author xeipuuv -// author-github https://github.com/xeipuuv -// author-mail xeipuuv@gmail.com -// -// repository-name gojsonschema -// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. -// -// description Defines the structure of a sub-subSchema. -// A sub-subSchema can contain other sub-schemas. -// -// created 27-02-2013 - -package gojsonschema - -import ( - "github.com/xeipuuv/gojsonreference" - "math/big" - "regexp" -) - -// Constants -const ( - KEY_SCHEMA = "$schema" - KEY_ID = "id" - KEY_ID_NEW = "$id" - KEY_REF = "$ref" - KEY_TITLE = "title" - KEY_DESCRIPTION = "description" - KEY_TYPE = "type" - KEY_ITEMS = "items" - KEY_ADDITIONAL_ITEMS = "additionalItems" - KEY_PROPERTIES = "properties" - KEY_PATTERN_PROPERTIES = "patternProperties" - KEY_ADDITIONAL_PROPERTIES = "additionalProperties" - KEY_PROPERTY_NAMES = "propertyNames" - KEY_DEFINITIONS = "definitions" - KEY_MULTIPLE_OF = "multipleOf" - KEY_MINIMUM = "minimum" - KEY_MAXIMUM = "maximum" - KEY_EXCLUSIVE_MINIMUM = "exclusiveMinimum" - KEY_EXCLUSIVE_MAXIMUM = "exclusiveMaximum" - KEY_MIN_LENGTH = "minLength" - KEY_MAX_LENGTH = "maxLength" - KEY_PATTERN = "pattern" - KEY_FORMAT = "format" - KEY_MIN_PROPERTIES = "minProperties" - KEY_MAX_PROPERTIES = "maxProperties" - KEY_DEPENDENCIES = "dependencies" - KEY_REQUIRED = "required" - KEY_MIN_ITEMS = "minItems" - KEY_MAX_ITEMS = "maxItems" - KEY_UNIQUE_ITEMS = "uniqueItems" - KEY_CONTAINS = "contains" - KEY_CONST = "const" - KEY_ENUM = "enum" - KEY_ONE_OF = "oneOf" - KEY_ANY_OF = "anyOf" - KEY_ALL_OF = "allOf" - KEY_NOT = "not" - KEY_IF = "if" - KEY_THEN = "then" - KEY_ELSE = "else" -) - -type subSchema struct { - draft *Draft - - // basic subSchema meta properties - id *gojsonreference.JsonReference - title *string - description *string - - property string - - // Quick pass/fail for boolean schemas - pass *bool - - // Types associated with the subSchema - types jsonSchemaType - - // Reference url - ref *gojsonreference.JsonReference - // Schema referenced - refSchema *subSchema - - // hierarchy - parent *subSchema - itemsChildren []*subSchema - itemsChildrenIsSingleSchema bool - propertiesChildren []*subSchema - - // validation : number / integer - multipleOf *big.Rat - maximum *big.Rat - exclusiveMaximum *big.Rat - minimum *big.Rat - exclusiveMinimum *big.Rat - - // validation : string - minLength *int - maxLength *int - pattern *regexp.Regexp - format string - - // validation : object - minProperties *int - maxProperties *int - required []string - - dependencies map[string]interface{} - additionalProperties interface{} - patternProperties map[string]*subSchema - propertyNames *subSchema - - // validation : array - minItems *int - maxItems *int - uniqueItems bool - contains *subSchema - - additionalItems interface{} - - // validation : all - _const *string //const is a golang keyword - enum []string - - // validation : subSchema - oneOf []*subSchema - anyOf []*subSchema - allOf []*subSchema - not *subSchema - _if *subSchema // if/else are golang keywords - _then *subSchema - _else *subSchema -} diff --git a/vendor/github.com/xeipuuv/gojsonschema/types.go b/vendor/github.com/xeipuuv/gojsonschema/types.go deleted file mode 100644 index 0e6fd51735..0000000000 --- a/vendor/github.com/xeipuuv/gojsonschema/types.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author xeipuuv -// author-github https://github.com/xeipuuv -// author-mail xeipuuv@gmail.com -// -// repository-name gojsonschema -// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. -// -// description Contains const types for schema and JSON. -// -// created 28-02-2013 - -package gojsonschema - -// Type constants -const ( - TYPE_ARRAY = `array` - TYPE_BOOLEAN = `boolean` - TYPE_INTEGER = `integer` - TYPE_NUMBER = `number` - TYPE_NULL = `null` - TYPE_OBJECT = `object` - TYPE_STRING = `string` -) - -// JSON_TYPES hosts the list of type that are supported in JSON -var JSON_TYPES []string - -// SCHEMA_TYPES hosts the list of type that are supported in schemas -var SCHEMA_TYPES []string - -func init() { - JSON_TYPES = []string{ - TYPE_ARRAY, - TYPE_BOOLEAN, - TYPE_INTEGER, - TYPE_NUMBER, - TYPE_NULL, - TYPE_OBJECT, - TYPE_STRING} - - SCHEMA_TYPES = []string{ - TYPE_ARRAY, - TYPE_BOOLEAN, - TYPE_INTEGER, - TYPE_NUMBER, - TYPE_OBJECT, - TYPE_STRING} -} diff --git a/vendor/github.com/xeipuuv/gojsonschema/utils.go b/vendor/github.com/xeipuuv/gojsonschema/utils.go deleted file mode 100644 index a17d22e3bd..0000000000 --- a/vendor/github.com/xeipuuv/gojsonschema/utils.go +++ /dev/null @@ -1,197 +0,0 @@ -// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author xeipuuv -// author-github https://github.com/xeipuuv -// author-mail xeipuuv@gmail.com -// -// repository-name gojsonschema -// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. -// -// description Various utility functions. -// -// created 26-02-2013 - -package gojsonschema - -import ( - "encoding/json" - "math/big" - "reflect" -) - -func isKind(what interface{}, kinds ...reflect.Kind) bool { - target := what - if isJSONNumber(what) { - // JSON Numbers are strings! - target = *mustBeNumber(what) - } - targetKind := reflect.ValueOf(target).Kind() - for _, kind := range kinds { - if targetKind == kind { - return true - } - } - return false -} - -func existsMapKey(m map[string]interface{}, k string) bool { - _, ok := m[k] - return ok -} - -func isStringInSlice(s []string, what string) bool { - for i := range s { - if s[i] == what { - return true - } - } - return false -} - -// indexStringInSlice returns the index of the first instance of 'what' in s or -1 if it is not found in s. -func indexStringInSlice(s []string, what string) int { - for i := range s { - if s[i] == what { - return i - } - } - return -1 -} - -func marshalToJSONString(value interface{}) (*string, error) { - - mBytes, err := json.Marshal(value) - if err != nil { - return nil, err - } - - sBytes := string(mBytes) - return &sBytes, nil -} - -func marshalWithoutNumber(value interface{}) (*string, error) { - - // The JSON is decoded using https://golang.org/pkg/encoding/json/#Decoder.UseNumber - // This means the numbers are internally still represented as strings and therefore 1.00 is unequal to 1 - // One way to eliminate these differences is to decode and encode the JSON one more time without Decoder.UseNumber - // so that these differences in representation are removed - - jsonString, err := marshalToJSONString(value) - if err != nil { - return nil, err - } - - var document interface{} - - err = json.Unmarshal([]byte(*jsonString), &document) - if err != nil { - return nil, err - } - - return marshalToJSONString(document) -} - -func isJSONNumber(what interface{}) bool { - - switch what.(type) { - - case json.Number: - return true - } - - return false -} - -func checkJSONInteger(what interface{}) (isInt bool) { - - jsonNumber := what.(json.Number) - - bigFloat, isValidNumber := new(big.Rat).SetString(string(jsonNumber)) - - return isValidNumber && bigFloat.IsInt() - -} - -// same as ECMA Number.MAX_SAFE_INTEGER and Number.MIN_SAFE_INTEGER -const ( - maxJSONFloat = float64(1<<53 - 1) // 9007199254740991.0 2^53 - 1 - minJSONFloat = -float64(1<<53 - 1) //-9007199254740991.0 -2^53 - 1 -) - -func mustBeInteger(what interface{}) *int { - - if isJSONNumber(what) { - - number := what.(json.Number) - - isInt := checkJSONInteger(number) - - if isInt { - - int64Value, err := number.Int64() - if err != nil { - return nil - } - - int32Value := int(int64Value) - return &int32Value - } - - } - - return nil -} - -func mustBeNumber(what interface{}) *big.Rat { - - if isJSONNumber(what) { - number := what.(json.Number) - float64Value, success := new(big.Rat).SetString(string(number)) - if success { - return float64Value - } - } - - return nil - -} - -func convertDocumentNode(val interface{}) interface{} { - - if lval, ok := val.([]interface{}); ok { - - res := []interface{}{} - for _, v := range lval { - res = append(res, convertDocumentNode(v)) - } - - return res - - } - - if mval, ok := val.(map[interface{}]interface{}); ok { - - res := map[string]interface{}{} - - for k, v := range mval { - res[k.(string)] = convertDocumentNode(v) - } - - return res - - } - - return val -} diff --git a/vendor/github.com/xeipuuv/gojsonschema/validation.go b/vendor/github.com/xeipuuv/gojsonschema/validation.go deleted file mode 100644 index 74091bca19..0000000000 --- a/vendor/github.com/xeipuuv/gojsonschema/validation.go +++ /dev/null @@ -1,858 +0,0 @@ -// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author xeipuuv -// author-github https://github.com/xeipuuv -// author-mail xeipuuv@gmail.com -// -// repository-name gojsonschema -// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. -// -// description Extends Schema and subSchema, implements the validation phase. -// -// created 28-02-2013 - -package gojsonschema - -import ( - "encoding/json" - "math/big" - "reflect" - "regexp" - "strconv" - "strings" - "unicode/utf8" -) - -// Validate loads and validates a JSON schema -func Validate(ls JSONLoader, ld JSONLoader) (*Result, error) { - // load schema - schema, err := NewSchema(ls) - if err != nil { - return nil, err - } - return schema.Validate(ld) -} - -// Validate loads and validates a JSON document -func (v *Schema) Validate(l JSONLoader) (*Result, error) { - root, err := l.LoadJSON() - if err != nil { - return nil, err - } - return v.validateDocument(root), nil -} - -func (v *Schema) validateDocument(root interface{}) *Result { - result := &Result{} - context := NewJsonContext(STRING_CONTEXT_ROOT, nil) - v.rootSchema.validateRecursive(v.rootSchema, root, result, context) - return result -} - -func (v *subSchema) subValidateWithContext(document interface{}, context *JsonContext) *Result { - result := &Result{} - v.validateRecursive(v, document, result, context) - return result -} - -// Walker function to validate the json recursively against the subSchema -func (v *subSchema) validateRecursive(currentSubSchema *subSchema, currentNode interface{}, result *Result, context *JsonContext) { - - if internalLogEnabled { - internalLog("validateRecursive %s", context.String()) - internalLog(" %v", currentNode) - } - - // Handle true/false schema as early as possible as all other fields will be nil - if currentSubSchema.pass != nil { - if !*currentSubSchema.pass { - result.addInternalError( - new(FalseError), - context, - currentNode, - ErrorDetails{}, - ) - } - return - } - - // Handle referenced schemas, returns directly when a $ref is found - if currentSubSchema.refSchema != nil { - v.validateRecursive(currentSubSchema.refSchema, currentNode, result, context) - return - } - - // Check for null value - if currentNode == nil { - if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_NULL) { - result.addInternalError( - new(InvalidTypeError), - context, - currentNode, - ErrorDetails{ - "expected": currentSubSchema.types.String(), - "given": TYPE_NULL, - }, - ) - return - } - - currentSubSchema.validateSchema(currentSubSchema, currentNode, result, context) - v.validateCommon(currentSubSchema, currentNode, result, context) - - } else { // Not a null value - - if isJSONNumber(currentNode) { - - value := currentNode.(json.Number) - - isInt := checkJSONInteger(value) - - validType := currentSubSchema.types.Contains(TYPE_NUMBER) || (isInt && currentSubSchema.types.Contains(TYPE_INTEGER)) - - if currentSubSchema.types.IsTyped() && !validType { - - givenType := TYPE_INTEGER - if !isInt { - givenType = TYPE_NUMBER - } - - result.addInternalError( - new(InvalidTypeError), - context, - currentNode, - ErrorDetails{ - "expected": currentSubSchema.types.String(), - "given": givenType, - }, - ) - return - } - - currentSubSchema.validateSchema(currentSubSchema, value, result, context) - v.validateNumber(currentSubSchema, value, result, context) - v.validateCommon(currentSubSchema, value, result, context) - v.validateString(currentSubSchema, value, result, context) - - } else { - - rValue := reflect.ValueOf(currentNode) - rKind := rValue.Kind() - - switch rKind { - - // Slice => JSON array - - case reflect.Slice: - - if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_ARRAY) { - result.addInternalError( - new(InvalidTypeError), - context, - currentNode, - ErrorDetails{ - "expected": currentSubSchema.types.String(), - "given": TYPE_ARRAY, - }, - ) - return - } - - castCurrentNode := currentNode.([]interface{}) - - currentSubSchema.validateSchema(currentSubSchema, castCurrentNode, result, context) - - v.validateArray(currentSubSchema, castCurrentNode, result, context) - v.validateCommon(currentSubSchema, castCurrentNode, result, context) - - // Map => JSON object - - case reflect.Map: - if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_OBJECT) { - result.addInternalError( - new(InvalidTypeError), - context, - currentNode, - ErrorDetails{ - "expected": currentSubSchema.types.String(), - "given": TYPE_OBJECT, - }, - ) - return - } - - castCurrentNode, ok := currentNode.(map[string]interface{}) - if !ok { - castCurrentNode = convertDocumentNode(currentNode).(map[string]interface{}) - } - - currentSubSchema.validateSchema(currentSubSchema, castCurrentNode, result, context) - - v.validateObject(currentSubSchema, castCurrentNode, result, context) - v.validateCommon(currentSubSchema, castCurrentNode, result, context) - - for _, pSchema := range currentSubSchema.propertiesChildren { - nextNode, ok := castCurrentNode[pSchema.property] - if ok { - subContext := NewJsonContext(pSchema.property, context) - v.validateRecursive(pSchema, nextNode, result, subContext) - } - } - - // Simple JSON values : string, number, boolean - - case reflect.Bool: - - if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_BOOLEAN) { - result.addInternalError( - new(InvalidTypeError), - context, - currentNode, - ErrorDetails{ - "expected": currentSubSchema.types.String(), - "given": TYPE_BOOLEAN, - }, - ) - return - } - - value := currentNode.(bool) - - currentSubSchema.validateSchema(currentSubSchema, value, result, context) - v.validateNumber(currentSubSchema, value, result, context) - v.validateCommon(currentSubSchema, value, result, context) - v.validateString(currentSubSchema, value, result, context) - - case reflect.String: - - if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_STRING) { - result.addInternalError( - new(InvalidTypeError), - context, - currentNode, - ErrorDetails{ - "expected": currentSubSchema.types.String(), - "given": TYPE_STRING, - }, - ) - return - } - - value := currentNode.(string) - - currentSubSchema.validateSchema(currentSubSchema, value, result, context) - v.validateNumber(currentSubSchema, value, result, context) - v.validateCommon(currentSubSchema, value, result, context) - v.validateString(currentSubSchema, value, result, context) - - } - - } - - } - - result.incrementScore() -} - -// Different kinds of validation there, subSchema / common / array / object / string... -func (v *subSchema) validateSchema(currentSubSchema *subSchema, currentNode interface{}, result *Result, context *JsonContext) { - - if internalLogEnabled { - internalLog("validateSchema %s", context.String()) - internalLog(" %v", currentNode) - } - - if len(currentSubSchema.anyOf) > 0 { - - validatedAnyOf := false - var bestValidationResult *Result - - for _, anyOfSchema := range currentSubSchema.anyOf { - if !validatedAnyOf { - validationResult := anyOfSchema.subValidateWithContext(currentNode, context) - validatedAnyOf = validationResult.Valid() - - if !validatedAnyOf && (bestValidationResult == nil || validationResult.score > bestValidationResult.score) { - bestValidationResult = validationResult - } - } - } - if !validatedAnyOf { - - result.addInternalError(new(NumberAnyOfError), context, currentNode, ErrorDetails{}) - - if bestValidationResult != nil { - // add error messages of closest matching subSchema as - // that's probably the one the user was trying to match - result.mergeErrors(bestValidationResult) - } - } - } - - if len(currentSubSchema.oneOf) > 0 { - - nbValidated := 0 - var bestValidationResult *Result - - for _, oneOfSchema := range currentSubSchema.oneOf { - validationResult := oneOfSchema.subValidateWithContext(currentNode, context) - if validationResult.Valid() { - nbValidated++ - } else if nbValidated == 0 && (bestValidationResult == nil || validationResult.score > bestValidationResult.score) { - bestValidationResult = validationResult - } - } - - if nbValidated != 1 { - - result.addInternalError(new(NumberOneOfError), context, currentNode, ErrorDetails{}) - - if nbValidated == 0 { - // add error messages of closest matching subSchema as - // that's probably the one the user was trying to match - result.mergeErrors(bestValidationResult) - } - } - - } - - if len(currentSubSchema.allOf) > 0 { - nbValidated := 0 - - for _, allOfSchema := range currentSubSchema.allOf { - validationResult := allOfSchema.subValidateWithContext(currentNode, context) - if validationResult.Valid() { - nbValidated++ - } - result.mergeErrors(validationResult) - } - - if nbValidated != len(currentSubSchema.allOf) { - result.addInternalError(new(NumberAllOfError), context, currentNode, ErrorDetails{}) - } - } - - if currentSubSchema.not != nil { - validationResult := currentSubSchema.not.subValidateWithContext(currentNode, context) - if validationResult.Valid() { - result.addInternalError(new(NumberNotError), context, currentNode, ErrorDetails{}) - } - } - - if currentSubSchema.dependencies != nil && len(currentSubSchema.dependencies) > 0 { - if isKind(currentNode, reflect.Map) { - for elementKey := range currentNode.(map[string]interface{}) { - if dependency, ok := currentSubSchema.dependencies[elementKey]; ok { - switch dependency := dependency.(type) { - - case []string: - for _, dependOnKey := range dependency { - if _, dependencyResolved := currentNode.(map[string]interface{})[dependOnKey]; !dependencyResolved { - result.addInternalError( - new(MissingDependencyError), - context, - currentNode, - ErrorDetails{"dependency": dependOnKey}, - ) - } - } - - case *subSchema: - dependency.validateRecursive(dependency, currentNode, result, context) - } - } - } - } - } - - if currentSubSchema._if != nil { - validationResultIf := currentSubSchema._if.subValidateWithContext(currentNode, context) - if currentSubSchema._then != nil && validationResultIf.Valid() { - validationResultThen := currentSubSchema._then.subValidateWithContext(currentNode, context) - if !validationResultThen.Valid() { - result.addInternalError(new(ConditionThenError), context, currentNode, ErrorDetails{}) - result.mergeErrors(validationResultThen) - } - } - if currentSubSchema._else != nil && !validationResultIf.Valid() { - validationResultElse := currentSubSchema._else.subValidateWithContext(currentNode, context) - if !validationResultElse.Valid() { - result.addInternalError(new(ConditionElseError), context, currentNode, ErrorDetails{}) - result.mergeErrors(validationResultElse) - } - } - } - - result.incrementScore() -} - -func (v *subSchema) validateCommon(currentSubSchema *subSchema, value interface{}, result *Result, context *JsonContext) { - - if internalLogEnabled { - internalLog("validateCommon %s", context.String()) - internalLog(" %v", value) - } - - // const: - if currentSubSchema._const != nil { - vString, err := marshalWithoutNumber(value) - if err != nil { - result.addInternalError(new(InternalError), context, value, ErrorDetails{"error": err}) - } - if *vString != *currentSubSchema._const { - result.addInternalError(new(ConstError), - context, - value, - ErrorDetails{ - "allowed": *currentSubSchema._const, - }, - ) - } - } - - // enum: - if len(currentSubSchema.enum) > 0 { - vString, err := marshalWithoutNumber(value) - if err != nil { - result.addInternalError(new(InternalError), context, value, ErrorDetails{"error": err}) - } - if !isStringInSlice(currentSubSchema.enum, *vString) { - result.addInternalError( - new(EnumError), - context, - value, - ErrorDetails{ - "allowed": strings.Join(currentSubSchema.enum, ", "), - }, - ) - } - } - - result.incrementScore() -} - -func (v *subSchema) validateArray(currentSubSchema *subSchema, value []interface{}, result *Result, context *JsonContext) { - - if internalLogEnabled { - internalLog("validateArray %s", context.String()) - internalLog(" %v", value) - } - - nbValues := len(value) - - // TODO explain - if currentSubSchema.itemsChildrenIsSingleSchema { - for i := range value { - subContext := NewJsonContext(strconv.Itoa(i), context) - validationResult := currentSubSchema.itemsChildren[0].subValidateWithContext(value[i], subContext) - result.mergeErrors(validationResult) - } - } else { - if currentSubSchema.itemsChildren != nil && len(currentSubSchema.itemsChildren) > 0 { - - nbItems := len(currentSubSchema.itemsChildren) - - // while we have both schemas and values, check them against each other - for i := 0; i != nbItems && i != nbValues; i++ { - subContext := NewJsonContext(strconv.Itoa(i), context) - validationResult := currentSubSchema.itemsChildren[i].subValidateWithContext(value[i], subContext) - result.mergeErrors(validationResult) - } - - if nbItems < nbValues { - // we have less schemas than elements in the instance array, - // but that might be ok if "additionalItems" is specified. - - switch currentSubSchema.additionalItems.(type) { - case bool: - if !currentSubSchema.additionalItems.(bool) { - result.addInternalError(new(ArrayNoAdditionalItemsError), context, value, ErrorDetails{}) - } - case *subSchema: - additionalItemSchema := currentSubSchema.additionalItems.(*subSchema) - for i := nbItems; i != nbValues; i++ { - subContext := NewJsonContext(strconv.Itoa(i), context) - validationResult := additionalItemSchema.subValidateWithContext(value[i], subContext) - result.mergeErrors(validationResult) - } - } - } - } - } - - // minItems & maxItems - if currentSubSchema.minItems != nil { - if nbValues < int(*currentSubSchema.minItems) { - result.addInternalError( - new(ArrayMinItemsError), - context, - value, - ErrorDetails{"min": *currentSubSchema.minItems}, - ) - } - } - if currentSubSchema.maxItems != nil { - if nbValues > int(*currentSubSchema.maxItems) { - result.addInternalError( - new(ArrayMaxItemsError), - context, - value, - ErrorDetails{"max": *currentSubSchema.maxItems}, - ) - } - } - - // uniqueItems: - if currentSubSchema.uniqueItems { - var stringifiedItems = make(map[string]int) - for j, v := range value { - vString, err := marshalWithoutNumber(v) - if err != nil { - result.addInternalError(new(InternalError), context, value, ErrorDetails{"err": err}) - } - if i, ok := stringifiedItems[*vString]; ok { - result.addInternalError( - new(ItemsMustBeUniqueError), - context, - value, - ErrorDetails{"type": TYPE_ARRAY, "i": i, "j": j}, - ) - } - stringifiedItems[*vString] = j - } - } - - // contains: - - if currentSubSchema.contains != nil { - validatedOne := false - var bestValidationResult *Result - - for i, v := range value { - subContext := NewJsonContext(strconv.Itoa(i), context) - - validationResult := currentSubSchema.contains.subValidateWithContext(v, subContext) - if validationResult.Valid() { - validatedOne = true - break - } else { - if bestValidationResult == nil || validationResult.score > bestValidationResult.score { - bestValidationResult = validationResult - } - } - } - if !validatedOne { - result.addInternalError( - new(ArrayContainsError), - context, - value, - ErrorDetails{}, - ) - if bestValidationResult != nil { - result.mergeErrors(bestValidationResult) - } - } - } - - result.incrementScore() -} - -func (v *subSchema) validateObject(currentSubSchema *subSchema, value map[string]interface{}, result *Result, context *JsonContext) { - - if internalLogEnabled { - internalLog("validateObject %s", context.String()) - internalLog(" %v", value) - } - - // minProperties & maxProperties: - if currentSubSchema.minProperties != nil { - if len(value) < int(*currentSubSchema.minProperties) { - result.addInternalError( - new(ArrayMinPropertiesError), - context, - value, - ErrorDetails{"min": *currentSubSchema.minProperties}, - ) - } - } - if currentSubSchema.maxProperties != nil { - if len(value) > int(*currentSubSchema.maxProperties) { - result.addInternalError( - new(ArrayMaxPropertiesError), - context, - value, - ErrorDetails{"max": *currentSubSchema.maxProperties}, - ) - } - } - - // required: - for _, requiredProperty := range currentSubSchema.required { - _, ok := value[requiredProperty] - if ok { - result.incrementScore() - } else { - result.addInternalError( - new(RequiredError), - context, - value, - ErrorDetails{"property": requiredProperty}, - ) - } - } - - // additionalProperty & patternProperty: - for pk := range value { - - // Check whether this property is described by "properties" - found := false - for _, spValue := range currentSubSchema.propertiesChildren { - if pk == spValue.property { - found = true - } - } - - // Check whether this property is described by "patternProperties" - ppMatch := v.validatePatternProperty(currentSubSchema, pk, value[pk], result, context) - - // If it is not described by neither "properties" nor "patternProperties" it must pass "additionalProperties" - if !found && !ppMatch { - switch ap := currentSubSchema.additionalProperties.(type) { - case bool: - // Handle the boolean case separately as it's cleaner to return a specific error than failing to pass the false schema - if !ap { - result.addInternalError( - new(AdditionalPropertyNotAllowedError), - context, - value[pk], - ErrorDetails{"property": pk}, - ) - - } - case *subSchema: - validationResult := ap.subValidateWithContext(value[pk], NewJsonContext(pk, context)) - result.mergeErrors(validationResult) - } - } - } - - // propertyNames: - if currentSubSchema.propertyNames != nil { - for pk := range value { - validationResult := currentSubSchema.propertyNames.subValidateWithContext(pk, context) - if !validationResult.Valid() { - result.addInternalError(new(InvalidPropertyNameError), - context, - value, ErrorDetails{ - "property": pk, - }) - result.mergeErrors(validationResult) - } - } - } - - result.incrementScore() -} - -func (v *subSchema) validatePatternProperty(currentSubSchema *subSchema, key string, value interface{}, result *Result, context *JsonContext) bool { - - if internalLogEnabled { - internalLog("validatePatternProperty %s", context.String()) - internalLog(" %s %v", key, value) - } - - validated := false - - for pk, pv := range currentSubSchema.patternProperties { - if matches, _ := regexp.MatchString(pk, key); matches { - validated = true - subContext := NewJsonContext(key, context) - validationResult := pv.subValidateWithContext(value, subContext) - result.mergeErrors(validationResult) - } - } - - if !validated { - return false - } - - result.incrementScore() - return true -} - -func (v *subSchema) validateString(currentSubSchema *subSchema, value interface{}, result *Result, context *JsonContext) { - - // Ignore JSON numbers - if isJSONNumber(value) { - return - } - - // Ignore non strings - if !isKind(value, reflect.String) { - return - } - - if internalLogEnabled { - internalLog("validateString %s", context.String()) - internalLog(" %v", value) - } - - stringValue := value.(string) - - // minLength & maxLength: - if currentSubSchema.minLength != nil { - if utf8.RuneCount([]byte(stringValue)) < int(*currentSubSchema.minLength) { - result.addInternalError( - new(StringLengthGTEError), - context, - value, - ErrorDetails{"min": *currentSubSchema.minLength}, - ) - } - } - if currentSubSchema.maxLength != nil { - if utf8.RuneCount([]byte(stringValue)) > int(*currentSubSchema.maxLength) { - result.addInternalError( - new(StringLengthLTEError), - context, - value, - ErrorDetails{"max": *currentSubSchema.maxLength}, - ) - } - } - - // pattern: - if currentSubSchema.pattern != nil { - if !currentSubSchema.pattern.MatchString(stringValue) { - result.addInternalError( - new(DoesNotMatchPatternError), - context, - value, - ErrorDetails{"pattern": currentSubSchema.pattern}, - ) - - } - } - - // format - if currentSubSchema.format != "" { - if !FormatCheckers.IsFormat(currentSubSchema.format, stringValue) { - result.addInternalError( - new(DoesNotMatchFormatError), - context, - value, - ErrorDetails{"format": currentSubSchema.format}, - ) - } - } - - result.incrementScore() -} - -func (v *subSchema) validateNumber(currentSubSchema *subSchema, value interface{}, result *Result, context *JsonContext) { - - // Ignore non numbers - if !isJSONNumber(value) { - return - } - - if internalLogEnabled { - internalLog("validateNumber %s", context.String()) - internalLog(" %v", value) - } - - number := value.(json.Number) - float64Value, _ := new(big.Rat).SetString(string(number)) - - // multipleOf: - if currentSubSchema.multipleOf != nil { - if q := new(big.Rat).Quo(float64Value, currentSubSchema.multipleOf); !q.IsInt() { - result.addInternalError( - new(MultipleOfError), - context, - number, - ErrorDetails{ - "multiple": new(big.Float).SetRat(currentSubSchema.multipleOf), - }, - ) - } - } - - //maximum & exclusiveMaximum: - if currentSubSchema.maximum != nil { - if float64Value.Cmp(currentSubSchema.maximum) == 1 { - result.addInternalError( - new(NumberLTEError), - context, - number, - ErrorDetails{ - "max": new(big.Float).SetRat(currentSubSchema.maximum), - }, - ) - } - } - if currentSubSchema.exclusiveMaximum != nil { - if float64Value.Cmp(currentSubSchema.exclusiveMaximum) >= 0 { - result.addInternalError( - new(NumberLTError), - context, - number, - ErrorDetails{ - "max": new(big.Float).SetRat(currentSubSchema.exclusiveMaximum), - }, - ) - } - } - - //minimum & exclusiveMinimum: - if currentSubSchema.minimum != nil { - if float64Value.Cmp(currentSubSchema.minimum) == -1 { - result.addInternalError( - new(NumberGTEError), - context, - number, - ErrorDetails{ - "min": new(big.Float).SetRat(currentSubSchema.minimum), - }, - ) - } - } - if currentSubSchema.exclusiveMinimum != nil { - if float64Value.Cmp(currentSubSchema.exclusiveMinimum) <= 0 { - result.addInternalError( - new(NumberGTError), - context, - number, - ErrorDetails{ - "min": new(big.Float).SetRat(currentSubSchema.exclusiveMinimum), - }, - ) - } - } - - // format - if currentSubSchema.format != "" { - if !FormatCheckers.IsFormat(currentSubSchema.format, float64Value) { - result.addInternalError( - new(DoesNotMatchFormatError), - context, - value, - ErrorDetails{"format": currentSubSchema.format}, - ) - } - } - - result.incrementScore() -} diff --git a/vendor/go.etcd.io/bbolt/.go-version b/vendor/go.etcd.io/bbolt/.go-version index d8c40e539c..7bdcec52d0 100644 --- a/vendor/go.etcd.io/bbolt/.go-version +++ b/vendor/go.etcd.io/bbolt/.go-version @@ -1 +1 @@ -1.23.6 +1.23.12 diff --git a/vendor/go.etcd.io/bbolt/bolt_386.go b/vendor/go.etcd.io/bbolt/bolt_386.go deleted file mode 100644 index aee25960ff..0000000000 --- a/vendor/go.etcd.io/bbolt/bolt_386.go +++ /dev/null @@ -1,7 +0,0 @@ -package bbolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0x7FFFFFFF // 2GB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0xFFFFFFF diff --git a/vendor/go.etcd.io/bbolt/bolt_aix.go b/vendor/go.etcd.io/bbolt/bolt_aix.go index 4b424ed4c4..596e540602 100644 --- a/vendor/go.etcd.io/bbolt/bolt_aix.go +++ b/vendor/go.etcd.io/bbolt/bolt_aix.go @@ -9,6 +9,8 @@ import ( "unsafe" "golang.org/x/sys/unix" + + "go.etcd.io/bbolt/internal/common" ) // flock acquires an advisory lock on a file descriptor. @@ -69,7 +71,7 @@ func mmap(db *DB, sz int) error { // Save the original byte slice and convert to a byte array pointer. db.dataref = b - db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) + db.data = (*[common.MaxMapSize]byte)(unsafe.Pointer(&b[0])) db.datasz = sz return nil } diff --git a/vendor/go.etcd.io/bbolt/bolt_amd64.go b/vendor/go.etcd.io/bbolt/bolt_amd64.go deleted file mode 100644 index 5dd8f3f2ae..0000000000 --- a/vendor/go.etcd.io/bbolt/bolt_amd64.go +++ /dev/null @@ -1,7 +0,0 @@ -package bbolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0xFFFFFFFFFFFF // 256TB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF diff --git a/vendor/go.etcd.io/bbolt/bolt_android.go b/vendor/go.etcd.io/bbolt/bolt_android.go index 11890f0d70..ac64fcf5b2 100644 --- a/vendor/go.etcd.io/bbolt/bolt_android.go +++ b/vendor/go.etcd.io/bbolt/bolt_android.go @@ -7,6 +7,8 @@ import ( "unsafe" "golang.org/x/sys/unix" + + "go.etcd.io/bbolt/internal/common" ) // flock acquires an advisory lock on a file descriptor. @@ -69,7 +71,7 @@ func mmap(db *DB, sz int) error { // Save the original byte slice and convert to a byte array pointer. db.dataref = b - db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) + db.data = (*[common.MaxMapSize]byte)(unsafe.Pointer(&b[0])) db.datasz = sz return nil } diff --git a/vendor/go.etcd.io/bbolt/bolt_arm.go b/vendor/go.etcd.io/bbolt/bolt_arm.go deleted file mode 100644 index aee25960ff..0000000000 --- a/vendor/go.etcd.io/bbolt/bolt_arm.go +++ /dev/null @@ -1,7 +0,0 @@ -package bbolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0x7FFFFFFF // 2GB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0xFFFFFFF diff --git a/vendor/go.etcd.io/bbolt/bolt_arm64.go b/vendor/go.etcd.io/bbolt/bolt_arm64.go deleted file mode 100644 index 2c67ab10cd..0000000000 --- a/vendor/go.etcd.io/bbolt/bolt_arm64.go +++ /dev/null @@ -1,9 +0,0 @@ -//go:build arm64 - -package bbolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0xFFFFFFFFFFFF // 256TB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF diff --git a/vendor/go.etcd.io/bbolt/bolt_loong64.go b/vendor/go.etcd.io/bbolt/bolt_loong64.go deleted file mode 100644 index 1ef2145c67..0000000000 --- a/vendor/go.etcd.io/bbolt/bolt_loong64.go +++ /dev/null @@ -1,9 +0,0 @@ -//go:build loong64 - -package bbolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0xFFFFFFFFFFFF // 256TB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF diff --git a/vendor/go.etcd.io/bbolt/bolt_mips64x.go b/vendor/go.etcd.io/bbolt/bolt_mips64x.go deleted file mode 100644 index f28a0512a1..0000000000 --- a/vendor/go.etcd.io/bbolt/bolt_mips64x.go +++ /dev/null @@ -1,9 +0,0 @@ -//go:build mips64 || mips64le - -package bbolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0x8000000000 // 512GB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF diff --git a/vendor/go.etcd.io/bbolt/bolt_mipsx.go b/vendor/go.etcd.io/bbolt/bolt_mipsx.go deleted file mode 100644 index 708fccdc01..0000000000 --- a/vendor/go.etcd.io/bbolt/bolt_mipsx.go +++ /dev/null @@ -1,9 +0,0 @@ -//go:build mips || mipsle - -package bbolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0x40000000 // 1GB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0xFFFFFFF diff --git a/vendor/go.etcd.io/bbolt/bolt_ppc.go b/vendor/go.etcd.io/bbolt/bolt_ppc.go deleted file mode 100644 index 6a21cf33c7..0000000000 --- a/vendor/go.etcd.io/bbolt/bolt_ppc.go +++ /dev/null @@ -1,9 +0,0 @@ -//go:build ppc - -package bbolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0x7FFFFFFF // 2GB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0xFFFFFFF diff --git a/vendor/go.etcd.io/bbolt/bolt_ppc64.go b/vendor/go.etcd.io/bbolt/bolt_ppc64.go deleted file mode 100644 index a32f246228..0000000000 --- a/vendor/go.etcd.io/bbolt/bolt_ppc64.go +++ /dev/null @@ -1,9 +0,0 @@ -//go:build ppc64 - -package bbolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0xFFFFFFFFFFFF // 256TB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF diff --git a/vendor/go.etcd.io/bbolt/bolt_ppc64le.go b/vendor/go.etcd.io/bbolt/bolt_ppc64le.go deleted file mode 100644 index 8fb60dddcb..0000000000 --- a/vendor/go.etcd.io/bbolt/bolt_ppc64le.go +++ /dev/null @@ -1,9 +0,0 @@ -//go:build ppc64le - -package bbolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0xFFFFFFFFFFFF // 256TB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF diff --git a/vendor/go.etcd.io/bbolt/bolt_riscv64.go b/vendor/go.etcd.io/bbolt/bolt_riscv64.go deleted file mode 100644 index a63d26ab21..0000000000 --- a/vendor/go.etcd.io/bbolt/bolt_riscv64.go +++ /dev/null @@ -1,9 +0,0 @@ -//go:build riscv64 - -package bbolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0xFFFFFFFFFFFF // 256TB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF diff --git a/vendor/go.etcd.io/bbolt/bolt_s390x.go b/vendor/go.etcd.io/bbolt/bolt_s390x.go deleted file mode 100644 index 749ea97e3a..0000000000 --- a/vendor/go.etcd.io/bbolt/bolt_s390x.go +++ /dev/null @@ -1,9 +0,0 @@ -//go:build s390x - -package bbolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0xFFFFFFFFFFFF // 256TB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF diff --git a/vendor/go.etcd.io/bbolt/bolt_solaris.go b/vendor/go.etcd.io/bbolt/bolt_solaris.go index babad65786..56b2ccab47 100644 --- a/vendor/go.etcd.io/bbolt/bolt_solaris.go +++ b/vendor/go.etcd.io/bbolt/bolt_solaris.go @@ -7,6 +7,8 @@ import ( "unsafe" "golang.org/x/sys/unix" + + "go.etcd.io/bbolt/internal/common" ) // flock acquires an advisory lock on a file descriptor. @@ -67,7 +69,7 @@ func mmap(db *DB, sz int) error { // Save the original byte slice and convert to a byte array pointer. db.dataref = b - db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) + db.data = (*[common.MaxMapSize]byte)(unsafe.Pointer(&b[0])) db.datasz = sz return nil } diff --git a/vendor/go.etcd.io/bbolt/bolt_unix.go b/vendor/go.etcd.io/bbolt/bolt_unix.go index d1922c2d99..f68e721f55 100644 --- a/vendor/go.etcd.io/bbolt/bolt_unix.go +++ b/vendor/go.etcd.io/bbolt/bolt_unix.go @@ -11,6 +11,7 @@ import ( "golang.org/x/sys/unix" "go.etcd.io/bbolt/errors" + "go.etcd.io/bbolt/internal/common" ) // flock acquires an advisory lock on a file descriptor. @@ -67,7 +68,7 @@ func mmap(db *DB, sz int) error { // Save the original byte slice and convert to a byte array pointer. db.dataref = b - db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) + db.data = (*[common.MaxMapSize]byte)(unsafe.Pointer(&b[0])) db.datasz = sz return nil } diff --git a/vendor/go.etcd.io/bbolt/bolt_windows.go b/vendor/go.etcd.io/bbolt/bolt_windows.go index ec21ecb85c..e99a0d6215 100644 --- a/vendor/go.etcd.io/bbolt/bolt_windows.go +++ b/vendor/go.etcd.io/bbolt/bolt_windows.go @@ -10,6 +10,7 @@ import ( "golang.org/x/sys/windows" "go.etcd.io/bbolt/errors" + "go.etcd.io/bbolt/internal/common" ) // fdatasync flushes written data to a file descriptor. @@ -95,7 +96,7 @@ func mmap(db *DB, sz int) error { } // Convert to a byte array. - db.data = (*[maxMapSize]byte)(unsafe.Pointer(addr)) + db.data = (*[common.MaxMapSize]byte)(unsafe.Pointer(addr)) db.datasz = sz return nil diff --git a/vendor/go.etcd.io/bbolt/db.go b/vendor/go.etcd.io/bbolt/db.go index 5c1947e998..622947d9cb 100644 --- a/vendor/go.etcd.io/bbolt/db.go +++ b/vendor/go.etcd.io/bbolt/db.go @@ -125,7 +125,7 @@ type DB struct { // always fails on Windows platform. //nolint dataref []byte // mmap'ed readonly, write throws SEGV - data *[maxMapSize]byte + data *[common.MaxMapSize]byte datasz int meta0 *common.Meta meta1 *common.Meta @@ -563,7 +563,7 @@ func (db *DB) mmapSize(size int) (int, error) { } // Verify the requested size is not above the maximum allowed. - if size > maxMapSize { + if size > common.MaxMapSize { return 0, errors.New("mmap too large") } @@ -581,8 +581,8 @@ func (db *DB) mmapSize(size int) (int, error) { } // If we've exceeded the max size then only grow up to the max size. - if sz > maxMapSize { - sz = maxMapSize + if sz > common.MaxMapSize { + sz = common.MaxMapSize } return int(sz), nil @@ -1080,7 +1080,7 @@ func safelyCall(fn func(*Tx) error, tx *Tx) (err error) { // then it allows you to force the database file to sync against the disk. func (db *DB) Sync() (err error) { if lg := db.Logger(); lg != discardLogger { - lg.Debug("Syncing bbolt db (%s)", db.path) + lg.Debugf("Syncing bbolt db (%s)", db.path) defer func() { if err != nil { lg.Errorf("[GOOS: %s, GOARCH: %s] syncing bbolt db (%s) failed: %v", runtime.GOOS, runtime.GOARCH, db.path, err) @@ -1309,6 +1309,12 @@ type Options struct { // If <=0, the initial map size is 0. // If initialMmapSize is smaller than the previous database size, // it takes no effect. + // + // Note: On Windows, due to platform limitations, the database file size + // will be immediately resized to match `InitialMmapSize` (aligned to page size) + // when the DB is opened. On non-Windows platforms, the file size will grow + // dynamically based on the actual amount of written data, regardless of `InitialMmapSize`. + // Refer to https://github.com/etcd-io/bbolt/issues/378#issuecomment-1378121966. InitialMmapSize int // PageSize overrides the default OS page size. diff --git a/vendor/go.etcd.io/bbolt/internal/common/bolt_386.go b/vendor/go.etcd.io/bbolt/internal/common/bolt_386.go new file mode 100644 index 0000000000..773175de3a --- /dev/null +++ b/vendor/go.etcd.io/bbolt/internal/common/bolt_386.go @@ -0,0 +1,7 @@ +package common + +// MaxMapSize represents the largest mmap size supported by Bolt. +const MaxMapSize = 0x7FFFFFFF // 2GB + +// MaxAllocSize is the size used when creating array pointers. +const MaxAllocSize = 0xFFFFFFF diff --git a/vendor/go.etcd.io/bbolt/internal/common/bolt_amd64.go b/vendor/go.etcd.io/bbolt/internal/common/bolt_amd64.go new file mode 100644 index 0000000000..9f27d91991 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/internal/common/bolt_amd64.go @@ -0,0 +1,7 @@ +package common + +// MaxMapSize represents the largest mmap size supported by Bolt. +const MaxMapSize = 0xFFFFFFFFFFFF // 256TB + +// MaxAllocSize is the size used when creating array pointers. +const MaxAllocSize = 0x7FFFFFFF diff --git a/vendor/go.etcd.io/bbolt/internal/common/bolt_arm.go b/vendor/go.etcd.io/bbolt/internal/common/bolt_arm.go new file mode 100644 index 0000000000..773175de3a --- /dev/null +++ b/vendor/go.etcd.io/bbolt/internal/common/bolt_arm.go @@ -0,0 +1,7 @@ +package common + +// MaxMapSize represents the largest mmap size supported by Bolt. +const MaxMapSize = 0x7FFFFFFF // 2GB + +// MaxAllocSize is the size used when creating array pointers. +const MaxAllocSize = 0xFFFFFFF diff --git a/vendor/go.etcd.io/bbolt/internal/common/bolt_arm64.go b/vendor/go.etcd.io/bbolt/internal/common/bolt_arm64.go new file mode 100644 index 0000000000..9022f6bca0 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/internal/common/bolt_arm64.go @@ -0,0 +1,9 @@ +//go:build arm64 + +package common + +// MaxMapSize represents the largest mmap size supported by Bolt. +const MaxMapSize = 0xFFFFFFFFFFFF // 256TB + +// MaxAllocSize is the size used when creating array pointers. +const MaxAllocSize = 0x7FFFFFFF diff --git a/vendor/go.etcd.io/bbolt/internal/common/bolt_loong64.go b/vendor/go.etcd.io/bbolt/internal/common/bolt_loong64.go new file mode 100644 index 0000000000..31277523c9 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/internal/common/bolt_loong64.go @@ -0,0 +1,9 @@ +//go:build loong64 + +package common + +// MaxMapSize represents the largest mmap size supported by Bolt. +const MaxMapSize = 0xFFFFFFFFFFFF // 256TB + +// MaxAllocSize is the size used when creating array pointers. +const MaxAllocSize = 0x7FFFFFFF diff --git a/vendor/go.etcd.io/bbolt/internal/common/bolt_mips64x.go b/vendor/go.etcd.io/bbolt/internal/common/bolt_mips64x.go new file mode 100644 index 0000000000..d930f4eddb --- /dev/null +++ b/vendor/go.etcd.io/bbolt/internal/common/bolt_mips64x.go @@ -0,0 +1,9 @@ +//go:build mips64 || mips64le + +package common + +// MaxMapSize represents the largest mmap size supported by Bolt. +const MaxMapSize = 0x8000000000 // 512GB + +// MaxAllocSize is the size used when creating array pointers. +const MaxAllocSize = 0x7FFFFFFF diff --git a/vendor/go.etcd.io/bbolt/internal/common/bolt_mipsx.go b/vendor/go.etcd.io/bbolt/internal/common/bolt_mipsx.go new file mode 100644 index 0000000000..8b1934368b --- /dev/null +++ b/vendor/go.etcd.io/bbolt/internal/common/bolt_mipsx.go @@ -0,0 +1,9 @@ +//go:build mips || mipsle + +package common + +// MaxMapSize represents the largest mmap size supported by Bolt. +const MaxMapSize = 0x40000000 // 1GB + +// MaxAllocSize is the size used when creating array pointers. +const MaxAllocSize = 0xFFFFFFF diff --git a/vendor/go.etcd.io/bbolt/internal/common/bolt_ppc.go b/vendor/go.etcd.io/bbolt/internal/common/bolt_ppc.go new file mode 100644 index 0000000000..a374e1406e --- /dev/null +++ b/vendor/go.etcd.io/bbolt/internal/common/bolt_ppc.go @@ -0,0 +1,9 @@ +//go:build ppc + +package common + +// MaxMapSize represents the largest mmap size supported by Bolt. +const MaxMapSize = 0x7FFFFFFF // 2GB + +// MaxAllocSize is the size used when creating array pointers. +const MaxAllocSize = 0xFFFFFFF diff --git a/vendor/go.etcd.io/bbolt/internal/common/bolt_ppc64.go b/vendor/go.etcd.io/bbolt/internal/common/bolt_ppc64.go new file mode 100644 index 0000000000..80288a83a2 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/internal/common/bolt_ppc64.go @@ -0,0 +1,9 @@ +//go:build ppc64 + +package common + +// MaxMapSize represents the largest mmap size supported by Bolt. +const MaxMapSize = 0xFFFFFFFFFFFF // 256TB + +// MaxAllocSize is the size used when creating array pointers. +const MaxAllocSize = 0x7FFFFFFF diff --git a/vendor/go.etcd.io/bbolt/internal/common/bolt_ppc64le.go b/vendor/go.etcd.io/bbolt/internal/common/bolt_ppc64le.go new file mode 100644 index 0000000000..77561d6872 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/internal/common/bolt_ppc64le.go @@ -0,0 +1,9 @@ +//go:build ppc64le + +package common + +// MaxMapSize represents the largest mmap size supported by Bolt. +const MaxMapSize = 0xFFFFFFFFFFFF // 256TB + +// MaxAllocSize is the size used when creating array pointers. +const MaxAllocSize = 0x7FFFFFFF diff --git a/vendor/go.etcd.io/bbolt/internal/common/bolt_riscv64.go b/vendor/go.etcd.io/bbolt/internal/common/bolt_riscv64.go new file mode 100644 index 0000000000..2a876e5f77 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/internal/common/bolt_riscv64.go @@ -0,0 +1,9 @@ +//go:build riscv64 + +package common + +// MaxMapSize represents the largest mmap size supported by Bolt. +const MaxMapSize = 0xFFFFFFFFFFFF // 256TB + +// MaxAllocSize is the size used when creating array pointers. +const MaxAllocSize = 0x7FFFFFFF diff --git a/vendor/go.etcd.io/bbolt/internal/common/bolt_s390x.go b/vendor/go.etcd.io/bbolt/internal/common/bolt_s390x.go new file mode 100644 index 0000000000..982cb7558b --- /dev/null +++ b/vendor/go.etcd.io/bbolt/internal/common/bolt_s390x.go @@ -0,0 +1,9 @@ +//go:build s390x + +package common + +// MaxMapSize represents the largest mmap size supported by Bolt. +const MaxMapSize = 0xFFFFFFFFFFFF // 256TB + +// MaxAllocSize is the size used when creating array pointers. +const MaxAllocSize = 0x7FFFFFFF diff --git a/vendor/go.etcd.io/bbolt/internal/common/types.go b/vendor/go.etcd.io/bbolt/internal/common/types.go index 8ad8279a09..18d6d69c2e 100644 --- a/vendor/go.etcd.io/bbolt/internal/common/types.go +++ b/vendor/go.etcd.io/bbolt/internal/common/types.go @@ -17,9 +17,6 @@ const Magic uint32 = 0xED0CDAED const PgidNoFreelist Pgid = 0xffffffffffffffff -// DO NOT EDIT. Copied from the "bolt" package. -const pageMaxAllocSize = 0xFFFFFFF - // IgnoreNoSync specifies whether the NoSync field of a DB is ignored when // syncing changes to a file. This is required as some operating systems, // such as OpenBSD, do not have a unified buffer cache (UBC) and writes diff --git a/vendor/go.etcd.io/bbolt/internal/common/unsafe.go b/vendor/go.etcd.io/bbolt/internal/common/unsafe.go index 9b77dd7b2b..740ffc7076 100644 --- a/vendor/go.etcd.io/bbolt/internal/common/unsafe.go +++ b/vendor/go.etcd.io/bbolt/internal/common/unsafe.go @@ -23,5 +23,5 @@ func UnsafeByteSlice(base unsafe.Pointer, offset uintptr, i, j int) []byte { // index 0. However, the wiki never says that the address must be to // the beginning of a C allocation (or even that malloc was used at // all), so this is believed to be correct. - return (*[pageMaxAllocSize]byte)(UnsafeAdd(base, offset))[i:j:j] + return (*[MaxAllocSize]byte)(UnsafeAdd(base, offset))[i:j:j] } diff --git a/vendor/go.etcd.io/bbolt/tx.go b/vendor/go.etcd.io/bbolt/tx.go index 7b5db77278..1669fb16a2 100644 --- a/vendor/go.etcd.io/bbolt/tx.go +++ b/vendor/go.etcd.io/bbolt/tx.go @@ -387,16 +387,43 @@ func (tx *Tx) Copy(w io.Writer) error { // WriteTo writes the entire database to a writer. // If err == nil then exactly tx.Size() bytes will be written into the writer. func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { - // Attempt to open reader with WriteFlag - f, err := tx.db.openFile(tx.db.path, os.O_RDONLY|tx.WriteFlag, 0) - if err != nil { - return 0, err - } - defer func() { - if cerr := f.Close(); err == nil { - err = cerr + var f *os.File + // There is a risk that between the time a read-only transaction + // is created and the time the file is actually opened, the + // underlying db file at tx.db.path may have been replaced + // (e.g. via rename). In that case, opening the file again would + // unexpectedly point to a different file, rather than the one + // the transaction was based on. + // + // To overcome this, we reuse the already opened file handle when + // WritFlag not set. When the WriteFlag is set, we reopen the file + // but verify that it still refers to the same underlying file + // (by device and inode). If it does not, we fall back to + // reusing the existing already opened file handle. + if tx.WriteFlag != 0 { + // Attempt to open reader with WriteFlag + f, err = tx.db.openFile(tx.db.path, os.O_RDONLY|tx.WriteFlag, 0) + if err != nil { + return 0, err } - }() + + if ok, err := sameFile(tx.db.file, f); !ok { + lg := tx.db.Logger() + if cerr := f.Close(); cerr != nil { + lg.Errorf("failed to close the file (%s): %v", tx.db.path, cerr) + } + lg.Warningf("The underlying file has changed, so reuse the already opened file (%s): %v", tx.db.path, err) + f = tx.db.file + } else { + defer func() { + if cerr := f.Close(); err == nil { + err = cerr + } + }() + } + } else { + f = tx.db.file + } // Generate a meta page. We use the same page data for both meta pages. buf := make([]byte, tx.db.pageSize) @@ -423,13 +450,13 @@ func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { return n, fmt.Errorf("meta 1 copy: %s", err) } - // Move past the meta pages in the file. - if _, err := f.Seek(int64(tx.db.pageSize*2), io.SeekStart); err != nil { - return n, fmt.Errorf("seek: %s", err) - } + // Copy data pages using a SectionReader to avoid affecting f's offset. + dataOffset := int64(tx.db.pageSize * 2) + dataSize := tx.Size() - dataOffset + sr := io.NewSectionReader(f, dataOffset, dataSize) // Copy data pages. - wn, err := io.CopyN(w, f, tx.Size()-int64(tx.db.pageSize*2)) + wn, err := io.CopyN(w, sr, dataSize) n += wn if err != nil { return n, err @@ -438,6 +465,19 @@ func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { return n, nil } +func sameFile(f1, f2 *os.File) (bool, error) { + fi1, err := f1.Stat() + if err != nil { + return false, fmt.Errorf("failed to get fileInfo of the first file (%s): %w", f1.Name(), err) + } + fi2, err := f2.Stat() + if err != nil { + return false, fmt.Errorf("failed to get fileInfo of the second file (%s): %w", f2.Name(), err) + } + + return os.SameFile(fi1, fi2), nil +} + // CopyFile copies the entire database to file at the given path. // A reader transaction is maintained during the copy so it is safe to continue // using the database while a copy is in progress. @@ -495,8 +535,8 @@ func (tx *Tx) write() error { // Write out page in "max allocation" sized chunks. for { sz := rem - if sz > maxAllocSize-1 { - sz = maxAllocSize - 1 + if sz > common.MaxAllocSize-1 { + sz = common.MaxAllocSize - 1 } buf := common.UnsafeByteSlice(unsafe.Pointer(p), written, 0, int(sz)) @@ -561,10 +601,13 @@ func (tx *Tx) writeMeta() error { tx.meta.Write(p) // Write the meta page to file. + tx.db.metalock.Lock() if _, err := tx.db.ops.writeAt(buf, int64(p.Id())*int64(tx.db.pageSize)); err != nil { + tx.db.metalock.Unlock() lg.Errorf("writeAt failed, pgid: %d, pageSize: %d, error: %v", p.Id(), tx.db.pageSize, err) return err } + tx.db.metalock.Unlock() if !tx.db.NoSync || common.IgnoreNoSync { // gofail: var beforeSyncMetaPage struct{} if err := fdatasync(tx.db); err != nil { diff --git a/vendor/go.mongodb.org/mongo-driver/LICENSE b/vendor/go.mongodb.org/mongo-driver/LICENSE deleted file mode 100644 index 261eeb9e9f..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bson.go b/vendor/go.mongodb.org/mongo-driver/bson/bson.go deleted file mode 100644 index a0d8185826..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bson.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -// -// Based on gopkg.in/mgo.v2/bson by Gustavo Niemeyer -// See THIRD-PARTY-NOTICES for original license terms. - -package bson // import "go.mongodb.org/mongo-driver/bson" - -import ( - "go.mongodb.org/mongo-driver/bson/primitive" -) - -// Zeroer allows custom struct types to implement a report of zero -// state. All struct types that don't implement Zeroer or where IsZero -// returns false are considered to be not zero. -type Zeroer interface { - IsZero() bool -} - -// D is an ordered representation of a BSON document. This type should be used when the order of the elements matters, -// such as MongoDB command documents. If the order of the elements does not matter, an M should be used instead. -// -// A D should not be constructed with duplicate key names, as that can cause undefined server behavior. -// -// Example usage: -// -// bson.D{{"foo", "bar"}, {"hello", "world"}, {"pi", 3.14159}} -type D = primitive.D - -// E represents a BSON element for a D. It is usually used inside a D. -type E = primitive.E - -// M is an unordered representation of a BSON document. This type should be used when the order of the elements does not -// matter. This type is handled as a regular map[string]interface{} when encoding and decoding. Elements will be -// serialized in an undefined, random order. If the order of the elements matters, a D should be used instead. -// -// Example usage: -// -// bson.M{"foo": "bar", "hello": "world", "pi": 3.14159} -type M = primitive.M - -// An A is an ordered representation of a BSON array. -// -// Example usage: -// -// bson.A{"bar", "world", 3.14159, bson.D{{"qux", 12345}}} -type A = primitive.A diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/array_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/array_codec.go deleted file mode 100644 index 652aa48b85..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/array_codec.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "reflect" - - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" -) - -// ArrayCodec is the Codec used for bsoncore.Array values. -// -// Deprecated: ArrayCodec will not be directly accessible in Go Driver 2.0. -type ArrayCodec struct{} - -var defaultArrayCodec = NewArrayCodec() - -// NewArrayCodec returns an ArrayCodec. -// -// Deprecated: NewArrayCodec will not be available in Go Driver 2.0. See -// [ArrayCodec] for more details. -func NewArrayCodec() *ArrayCodec { - return &ArrayCodec{} -} - -// EncodeValue is the ValueEncoder for bsoncore.Array values. -func (ac *ArrayCodec) EncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tCoreArray { - return ValueEncoderError{Name: "CoreArrayEncodeValue", Types: []reflect.Type{tCoreArray}, Received: val} - } - - arr := val.Interface().(bsoncore.Array) - return bsonrw.Copier{}.CopyArrayFromBytes(vw, arr) -} - -// DecodeValue is the ValueDecoder for bsoncore.Array values. -func (ac *ArrayCodec) DecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tCoreArray { - return ValueDecoderError{Name: "CoreArrayDecodeValue", Types: []reflect.Type{tCoreArray}, Received: val} - } - - if val.IsNil() { - val.Set(reflect.MakeSlice(val.Type(), 0, 0)) - } - - val.SetLen(0) - arr, err := bsonrw.Copier{}.AppendArrayBytes(val.Interface().(bsoncore.Array), vr) - val.Set(reflect.ValueOf(arr)) - return err -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go deleted file mode 100644 index 0693bd432f..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go +++ /dev/null @@ -1,382 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec // import "go.mongodb.org/mongo-driver/bson/bsoncodec" - -import ( - "fmt" - "reflect" - "strings" - - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/bson/bsontype" - "go.mongodb.org/mongo-driver/bson/primitive" -) - -var ( - emptyValue = reflect.Value{} -) - -// Marshaler is an interface implemented by types that can marshal themselves -// into a BSON document represented as bytes. The bytes returned must be a valid -// BSON document if the error is nil. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Marshaler] instead. -type Marshaler interface { - MarshalBSON() ([]byte, error) -} - -// ValueMarshaler is an interface implemented by types that can marshal -// themselves into a BSON value as bytes. The type must be the valid type for -// the bytes returned. The bytes and byte type together must be valid if the -// error is nil. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.ValueMarshaler] instead. -type ValueMarshaler interface { - MarshalBSONValue() (bsontype.Type, []byte, error) -} - -// Unmarshaler is an interface implemented by types that can unmarshal a BSON -// document representation of themselves. The BSON bytes can be assumed to be -// valid. UnmarshalBSON must copy the BSON bytes if it wishes to retain the data -// after returning. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Unmarshaler] instead. -type Unmarshaler interface { - UnmarshalBSON([]byte) error -} - -// ValueUnmarshaler is an interface implemented by types that can unmarshal a -// BSON value representation of themselves. The BSON bytes and type can be -// assumed to be valid. UnmarshalBSONValue must copy the BSON value bytes if it -// wishes to retain the data after returning. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.ValueUnmarshaler] instead. -type ValueUnmarshaler interface { - UnmarshalBSONValue(bsontype.Type, []byte) error -} - -// ValueEncoderError is an error returned from a ValueEncoder when the provided value can't be -// encoded by the ValueEncoder. -type ValueEncoderError struct { - Name string - Types []reflect.Type - Kinds []reflect.Kind - Received reflect.Value -} - -func (vee ValueEncoderError) Error() string { - typeKinds := make([]string, 0, len(vee.Types)+len(vee.Kinds)) - for _, t := range vee.Types { - typeKinds = append(typeKinds, t.String()) - } - for _, k := range vee.Kinds { - if k == reflect.Map { - typeKinds = append(typeKinds, "map[string]*") - continue - } - typeKinds = append(typeKinds, k.String()) - } - received := vee.Received.Kind().String() - if vee.Received.IsValid() { - received = vee.Received.Type().String() - } - return fmt.Sprintf("%s can only encode valid %s, but got %s", vee.Name, strings.Join(typeKinds, ", "), received) -} - -// ValueDecoderError is an error returned from a ValueDecoder when the provided value can't be -// decoded by the ValueDecoder. -type ValueDecoderError struct { - Name string - Types []reflect.Type - Kinds []reflect.Kind - Received reflect.Value -} - -func (vde ValueDecoderError) Error() string { - typeKinds := make([]string, 0, len(vde.Types)+len(vde.Kinds)) - for _, t := range vde.Types { - typeKinds = append(typeKinds, t.String()) - } - for _, k := range vde.Kinds { - if k == reflect.Map { - typeKinds = append(typeKinds, "map[string]*") - continue - } - typeKinds = append(typeKinds, k.String()) - } - received := vde.Received.Kind().String() - if vde.Received.IsValid() { - received = vde.Received.Type().String() - } - return fmt.Sprintf("%s can only decode valid and settable %s, but got %s", vde.Name, strings.Join(typeKinds, ", "), received) -} - -// EncodeContext is the contextual information required for a Codec to encode a -// value. -type EncodeContext struct { - *Registry - - // MinSize causes the Encoder to marshal Go integer values (int, int8, int16, int32, int64, - // uint, uint8, uint16, uint32, or uint64) as the minimum BSON int size (either 32 or 64 bits) - // that can represent the integer value. - // - // Deprecated: Use bson.Encoder.IntMinSize instead. - MinSize bool - - errorOnInlineDuplicates bool - stringifyMapKeysWithFmt bool - nilMapAsEmpty bool - nilSliceAsEmpty bool - nilByteSliceAsEmpty bool - omitZeroStruct bool - useJSONStructTags bool -} - -// ErrorOnInlineDuplicates causes the Encoder to return an error if there is a duplicate field in -// the marshaled BSON when the "inline" struct tag option is set. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.ErrorOnInlineDuplicates] instead. -func (ec *EncodeContext) ErrorOnInlineDuplicates() { - ec.errorOnInlineDuplicates = true -} - -// StringifyMapKeysWithFmt causes the Encoder to convert Go map keys to BSON document field name -// strings using fmt.Sprintf() instead of the default string conversion logic. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.StringifyMapKeysWithFmt] instead. -func (ec *EncodeContext) StringifyMapKeysWithFmt() { - ec.stringifyMapKeysWithFmt = true -} - -// NilMapAsEmpty causes the Encoder to marshal nil Go maps as empty BSON documents instead of BSON -// null. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilMapAsEmpty] instead. -func (ec *EncodeContext) NilMapAsEmpty() { - ec.nilMapAsEmpty = true -} - -// NilSliceAsEmpty causes the Encoder to marshal nil Go slices as empty BSON arrays instead of BSON -// null. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilSliceAsEmpty] instead. -func (ec *EncodeContext) NilSliceAsEmpty() { - ec.nilSliceAsEmpty = true -} - -// NilByteSliceAsEmpty causes the Encoder to marshal nil Go byte slices as empty BSON binary values -// instead of BSON null. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilByteSliceAsEmpty] instead. -func (ec *EncodeContext) NilByteSliceAsEmpty() { - ec.nilByteSliceAsEmpty = true -} - -// OmitZeroStruct causes the Encoder to consider the zero value for a struct (e.g. MyStruct{}) -// as empty and omit it from the marshaled BSON when the "omitempty" struct tag option is set. -// -// Note that the Encoder only examines exported struct fields when determining if a struct is the -// zero value. It considers pointers to a zero struct value (e.g. &MyStruct{}) not empty. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.OmitZeroStruct] instead. -func (ec *EncodeContext) OmitZeroStruct() { - ec.omitZeroStruct = true -} - -// UseJSONStructTags causes the Encoder to fall back to using the "json" struct tag if a "bson" -// struct tag is not specified. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.UseJSONStructTags] instead. -func (ec *EncodeContext) UseJSONStructTags() { - ec.useJSONStructTags = true -} - -// DecodeContext is the contextual information required for a Codec to decode a -// value. -type DecodeContext struct { - *Registry - - // Truncate, if true, instructs decoders to to truncate the fractional part of BSON "double" - // values when attempting to unmarshal them into a Go integer (int, int8, int16, int32, int64, - // uint, uint8, uint16, uint32, or uint64) struct field. The truncation logic does not apply to - // BSON "decimal128" values. - // - // Deprecated: Use bson.Decoder.AllowTruncatingDoubles instead. - Truncate bool - - // Ancestor is the type of a containing document. This is mainly used to determine what type - // should be used when decoding an embedded document into an empty interface. For example, if - // Ancestor is a bson.M, BSON embedded document values being decoded into an empty interface - // will be decoded into a bson.M. - // - // Deprecated: Use bson.Decoder.DefaultDocumentM or bson.Decoder.DefaultDocumentD instead. - Ancestor reflect.Type - - // defaultDocumentType specifies the Go type to decode top-level and nested BSON documents into. In particular, the - // usage for this field is restricted to data typed as "interface{}" or "map[string]interface{}". If DocumentType is - // set to a type that a BSON document cannot be unmarshaled into (e.g. "string"), unmarshalling will result in an - // error. DocumentType overrides the Ancestor field. - defaultDocumentType reflect.Type - - binaryAsSlice bool - useJSONStructTags bool - useLocalTimeZone bool - zeroMaps bool - zeroStructs bool -} - -// BinaryAsSlice causes the Decoder to unmarshal BSON binary field values that are the "Generic" or -// "Old" BSON binary subtype as a Go byte slice instead of a primitive.Binary. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.BinaryAsSlice] instead. -func (dc *DecodeContext) BinaryAsSlice() { - dc.binaryAsSlice = true -} - -// UseJSONStructTags causes the Decoder to fall back to using the "json" struct tag if a "bson" -// struct tag is not specified. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.UseJSONStructTags] instead. -func (dc *DecodeContext) UseJSONStructTags() { - dc.useJSONStructTags = true -} - -// UseLocalTimeZone causes the Decoder to unmarshal time.Time values in the local timezone instead -// of the UTC timezone. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.UseLocalTimeZone] instead. -func (dc *DecodeContext) UseLocalTimeZone() { - dc.useLocalTimeZone = true -} - -// ZeroMaps causes the Decoder to delete any existing values from Go maps in the destination value -// passed to Decode before unmarshaling BSON documents into them. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.ZeroMaps] instead. -func (dc *DecodeContext) ZeroMaps() { - dc.zeroMaps = true -} - -// ZeroStructs causes the Decoder to delete any existing values from Go structs in the destination -// value passed to Decode before unmarshaling BSON documents into them. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.ZeroStructs] instead. -func (dc *DecodeContext) ZeroStructs() { - dc.zeroStructs = true -} - -// DefaultDocumentM causes the Decoder to always unmarshal documents into the primitive.M type. This -// behavior is restricted to data typed as "interface{}" or "map[string]interface{}". -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.DefaultDocumentM] instead. -func (dc *DecodeContext) DefaultDocumentM() { - dc.defaultDocumentType = reflect.TypeOf(primitive.M{}) -} - -// DefaultDocumentD causes the Decoder to always unmarshal documents into the primitive.D type. This -// behavior is restricted to data typed as "interface{}" or "map[string]interface{}". -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.DefaultDocumentD] instead. -func (dc *DecodeContext) DefaultDocumentD() { - dc.defaultDocumentType = reflect.TypeOf(primitive.D{}) -} - -// ValueCodec is an interface for encoding and decoding a reflect.Value. -// values. -// -// Deprecated: Use [ValueEncoder] and [ValueDecoder] instead. -type ValueCodec interface { - ValueEncoder - ValueDecoder -} - -// ValueEncoder is the interface implemented by types that can handle the encoding of a value. -type ValueEncoder interface { - EncodeValue(EncodeContext, bsonrw.ValueWriter, reflect.Value) error -} - -// ValueEncoderFunc is an adapter function that allows a function with the correct signature to be -// used as a ValueEncoder. -type ValueEncoderFunc func(EncodeContext, bsonrw.ValueWriter, reflect.Value) error - -// EncodeValue implements the ValueEncoder interface. -func (fn ValueEncoderFunc) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - return fn(ec, vw, val) -} - -// ValueDecoder is the interface implemented by types that can handle the decoding of a value. -type ValueDecoder interface { - DecodeValue(DecodeContext, bsonrw.ValueReader, reflect.Value) error -} - -// ValueDecoderFunc is an adapter function that allows a function with the correct signature to be -// used as a ValueDecoder. -type ValueDecoderFunc func(DecodeContext, bsonrw.ValueReader, reflect.Value) error - -// DecodeValue implements the ValueDecoder interface. -func (fn ValueDecoderFunc) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - return fn(dc, vr, val) -} - -// typeDecoder is the interface implemented by types that can handle the decoding of a value given its type. -type typeDecoder interface { - decodeType(DecodeContext, bsonrw.ValueReader, reflect.Type) (reflect.Value, error) -} - -// typeDecoderFunc is an adapter function that allows a function with the correct signature to be used as a typeDecoder. -type typeDecoderFunc func(DecodeContext, bsonrw.ValueReader, reflect.Type) (reflect.Value, error) - -func (fn typeDecoderFunc) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - return fn(dc, vr, t) -} - -// decodeAdapter allows two functions with the correct signatures to be used as both a ValueDecoder and typeDecoder. -type decodeAdapter struct { - ValueDecoderFunc - typeDecoderFunc -} - -var _ ValueDecoder = decodeAdapter{} -var _ typeDecoder = decodeAdapter{} - -// decodeTypeOrValue calls decoder.decodeType is decoder is a typeDecoder. Otherwise, it allocates a new element of type -// t and calls decoder.DecodeValue on it. -func decodeTypeOrValue(decoder ValueDecoder, dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - td, _ := decoder.(typeDecoder) - return decodeTypeOrValueWithInfo(decoder, td, dc, vr, t, true) -} - -func decodeTypeOrValueWithInfo(vd ValueDecoder, td typeDecoder, dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type, convert bool) (reflect.Value, error) { - if td != nil { - val, err := td.decodeType(dc, vr, t) - if err == nil && convert && val.Type() != t { - // This conversion step is necessary for slices and maps. If a user declares variables like: - // - // type myBool bool - // var m map[string]myBool - // - // and tries to decode BSON bytes into the map, the decoding will fail if this conversion is not present - // because we'll try to assign a value of type bool to one of type myBool. - val = val.Convert(t) - } - return val, err - } - - val := reflect.New(t).Elem() - err := vd.DecodeValue(dc, vr, val) - return val, err -} - -// CodecZeroer is the interface implemented by Codecs that can also determine if -// a value of the type that would be encoded is zero. -// -// Deprecated: Defining custom rules for the zero/empty value will not be supported in Go Driver -// 2.0. Users who want to omit empty complex values should use a pointer field and set the value to -// nil instead. -type CodecZeroer interface { - IsTypeZero(interface{}) bool -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go deleted file mode 100644 index 0134b5a94b..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go +++ /dev/null @@ -1,138 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "fmt" - "reflect" - - "go.mongodb.org/mongo-driver/bson/bsonoptions" - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/bson/bsontype" -) - -// ByteSliceCodec is the Codec used for []byte values. -// -// Deprecated: ByteSliceCodec will not be directly configurable in Go Driver -// 2.0. To configure the byte slice encode and decode behavior, use the -// configuration methods on a [go.mongodb.org/mongo-driver/bson.Encoder] or -// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the byte slice -// encode and decode behavior for a mongo.Client, use -// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions]. -// -// For example, to configure a mongo.Client to encode nil byte slices as empty -// BSON binary values, use: -// -// opt := options.Client().SetBSONOptions(&options.BSONOptions{ -// NilByteSliceAsEmpty: true, -// }) -// -// See the deprecation notice for each field in ByteSliceCodec for the -// corresponding settings. -type ByteSliceCodec struct { - // EncodeNilAsEmpty causes EncodeValue to marshal nil Go byte slices as empty BSON binary values - // instead of BSON null. - // - // Deprecated: Use bson.Encoder.NilByteSliceAsEmpty or options.BSONOptions.NilByteSliceAsEmpty - // instead. - EncodeNilAsEmpty bool -} - -var ( - defaultByteSliceCodec = NewByteSliceCodec() - - // Assert that defaultByteSliceCodec satisfies the typeDecoder interface, which allows it to be - // used by collection type decoders (e.g. map, slice, etc) to set individual values in a - // collection. - _ typeDecoder = defaultByteSliceCodec -) - -// NewByteSliceCodec returns a ByteSliceCodec with options opts. -// -// Deprecated: NewByteSliceCodec will not be available in Go Driver 2.0. See -// [ByteSliceCodec] for more details. -func NewByteSliceCodec(opts ...*bsonoptions.ByteSliceCodecOptions) *ByteSliceCodec { - byteSliceOpt := bsonoptions.MergeByteSliceCodecOptions(opts...) - codec := ByteSliceCodec{} - if byteSliceOpt.EncodeNilAsEmpty != nil { - codec.EncodeNilAsEmpty = *byteSliceOpt.EncodeNilAsEmpty - } - return &codec -} - -// EncodeValue is the ValueEncoder for []byte. -func (bsc *ByteSliceCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tByteSlice { - return ValueEncoderError{Name: "ByteSliceEncodeValue", Types: []reflect.Type{tByteSlice}, Received: val} - } - if val.IsNil() && !bsc.EncodeNilAsEmpty && !ec.nilByteSliceAsEmpty { - return vw.WriteNull() - } - return vw.WriteBinary(val.Interface().([]byte)) -} - -func (bsc *ByteSliceCodec) decodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tByteSlice { - return emptyValue, ValueDecoderError{ - Name: "ByteSliceDecodeValue", - Types: []reflect.Type{tByteSlice}, - Received: reflect.Zero(t), - } - } - - var data []byte - var err error - switch vrType := vr.Type(); vrType { - case bsontype.String: - str, err := vr.ReadString() - if err != nil { - return emptyValue, err - } - data = []byte(str) - case bsontype.Symbol: - sym, err := vr.ReadSymbol() - if err != nil { - return emptyValue, err - } - data = []byte(sym) - case bsontype.Binary: - var subtype byte - data, subtype, err = vr.ReadBinary() - if err != nil { - return emptyValue, err - } - if subtype != bsontype.BinaryGeneric && subtype != bsontype.BinaryBinaryOld { - return emptyValue, decodeBinaryError{subtype: subtype, typeName: "[]byte"} - } - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a []byte", vrType) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(data), nil -} - -// DecodeValue is the ValueDecoder for []byte. -func (bsc *ByteSliceCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tByteSlice { - return ValueDecoderError{Name: "ByteSliceDecodeValue", Types: []reflect.Type{tByteSlice}, Received: val} - } - - elem, err := bsc.decodeType(dc, vr, tByteSlice) - if err != nil { - return err - } - - val.Set(elem) - return nil -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/codec_cache.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/codec_cache.go deleted file mode 100644 index 844b50299f..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/codec_cache.go +++ /dev/null @@ -1,166 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "reflect" - "sync" - "sync/atomic" -) - -// Runtime check that the kind encoder and decoder caches can store any valid -// reflect.Kind constant. -func init() { - if s := reflect.Kind(len(kindEncoderCache{}.entries)).String(); s != "kind27" { - panic("The capacity of kindEncoderCache is too small.\n" + - "This is due to a new type being added to reflect.Kind.") - } -} - -// statically assert array size -var _ = (kindEncoderCache{}).entries[reflect.UnsafePointer] -var _ = (kindDecoderCache{}).entries[reflect.UnsafePointer] - -type typeEncoderCache struct { - cache sync.Map // map[reflect.Type]ValueEncoder -} - -func (c *typeEncoderCache) Store(rt reflect.Type, enc ValueEncoder) { - c.cache.Store(rt, enc) -} - -func (c *typeEncoderCache) Load(rt reflect.Type) (ValueEncoder, bool) { - if v, _ := c.cache.Load(rt); v != nil { - return v.(ValueEncoder), true - } - return nil, false -} - -func (c *typeEncoderCache) LoadOrStore(rt reflect.Type, enc ValueEncoder) ValueEncoder { - if v, loaded := c.cache.LoadOrStore(rt, enc); loaded { - enc = v.(ValueEncoder) - } - return enc -} - -func (c *typeEncoderCache) Clone() *typeEncoderCache { - cc := new(typeEncoderCache) - c.cache.Range(func(k, v interface{}) bool { - if k != nil && v != nil { - cc.cache.Store(k, v) - } - return true - }) - return cc -} - -type typeDecoderCache struct { - cache sync.Map // map[reflect.Type]ValueDecoder -} - -func (c *typeDecoderCache) Store(rt reflect.Type, dec ValueDecoder) { - c.cache.Store(rt, dec) -} - -func (c *typeDecoderCache) Load(rt reflect.Type) (ValueDecoder, bool) { - if v, _ := c.cache.Load(rt); v != nil { - return v.(ValueDecoder), true - } - return nil, false -} - -func (c *typeDecoderCache) LoadOrStore(rt reflect.Type, dec ValueDecoder) ValueDecoder { - if v, loaded := c.cache.LoadOrStore(rt, dec); loaded { - dec = v.(ValueDecoder) - } - return dec -} - -func (c *typeDecoderCache) Clone() *typeDecoderCache { - cc := new(typeDecoderCache) - c.cache.Range(func(k, v interface{}) bool { - if k != nil && v != nil { - cc.cache.Store(k, v) - } - return true - }) - return cc -} - -// atomic.Value requires that all calls to Store() have the same concrete type -// so we wrap the ValueEncoder with a kindEncoderCacheEntry to ensure the type -// is always the same (since different concrete types may implement the -// ValueEncoder interface). -type kindEncoderCacheEntry struct { - enc ValueEncoder -} - -type kindEncoderCache struct { - entries [reflect.UnsafePointer + 1]atomic.Value // *kindEncoderCacheEntry -} - -func (c *kindEncoderCache) Store(rt reflect.Kind, enc ValueEncoder) { - if enc != nil && rt < reflect.Kind(len(c.entries)) { - c.entries[rt].Store(&kindEncoderCacheEntry{enc: enc}) - } -} - -func (c *kindEncoderCache) Load(rt reflect.Kind) (ValueEncoder, bool) { - if rt < reflect.Kind(len(c.entries)) { - if ent, ok := c.entries[rt].Load().(*kindEncoderCacheEntry); ok { - return ent.enc, ent.enc != nil - } - } - return nil, false -} - -func (c *kindEncoderCache) Clone() *kindEncoderCache { - cc := new(kindEncoderCache) - for i, v := range c.entries { - if val := v.Load(); val != nil { - cc.entries[i].Store(val) - } - } - return cc -} - -// atomic.Value requires that all calls to Store() have the same concrete type -// so we wrap the ValueDecoder with a kindDecoderCacheEntry to ensure the type -// is always the same (since different concrete types may implement the -// ValueDecoder interface). -type kindDecoderCacheEntry struct { - dec ValueDecoder -} - -type kindDecoderCache struct { - entries [reflect.UnsafePointer + 1]atomic.Value // *kindDecoderCacheEntry -} - -func (c *kindDecoderCache) Store(rt reflect.Kind, dec ValueDecoder) { - if rt < reflect.Kind(len(c.entries)) { - c.entries[rt].Store(&kindDecoderCacheEntry{dec: dec}) - } -} - -func (c *kindDecoderCache) Load(rt reflect.Kind) (ValueDecoder, bool) { - if rt < reflect.Kind(len(c.entries)) { - if ent, ok := c.entries[rt].Load().(*kindDecoderCacheEntry); ok { - return ent.dec, ent.dec != nil - } - } - return nil, false -} - -func (c *kindDecoderCache) Clone() *kindDecoderCache { - cc := new(kindDecoderCache) - for i, v := range c.entries { - if val := v.Load(); val != nil { - cc.entries[i].Store(val) - } - } - return cc -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/cond_addr_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/cond_addr_codec.go deleted file mode 100644 index cb8180f25c..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/cond_addr_codec.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "reflect" - - "go.mongodb.org/mongo-driver/bson/bsonrw" -) - -// condAddrEncoder is the encoder used when a pointer to the encoding value has an encoder. -type condAddrEncoder struct { - canAddrEnc ValueEncoder - elseEnc ValueEncoder -} - -var _ ValueEncoder = (*condAddrEncoder)(nil) - -// newCondAddrEncoder returns an condAddrEncoder. -func newCondAddrEncoder(canAddrEnc, elseEnc ValueEncoder) *condAddrEncoder { - encoder := condAddrEncoder{canAddrEnc: canAddrEnc, elseEnc: elseEnc} - return &encoder -} - -// EncodeValue is the ValueEncoderFunc for a value that may be addressable. -func (cae *condAddrEncoder) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if val.CanAddr() { - return cae.canAddrEnc.EncodeValue(ec, vw, val) - } - if cae.elseEnc != nil { - return cae.elseEnc.EncodeValue(ec, vw, val) - } - return ErrNoEncoder{Type: val.Type()} -} - -// condAddrDecoder is the decoder used when a pointer to the value has a decoder. -type condAddrDecoder struct { - canAddrDec ValueDecoder - elseDec ValueDecoder -} - -var _ ValueDecoder = (*condAddrDecoder)(nil) - -// newCondAddrDecoder returns an CondAddrDecoder. -func newCondAddrDecoder(canAddrDec, elseDec ValueDecoder) *condAddrDecoder { - decoder := condAddrDecoder{canAddrDec: canAddrDec, elseDec: elseDec} - return &decoder -} - -// DecodeValue is the ValueDecoderFunc for a value that may be addressable. -func (cad *condAddrDecoder) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if val.CanAddr() { - return cad.canAddrDec.DecodeValue(dc, vr, val) - } - if cad.elseDec != nil { - return cad.elseDec.DecodeValue(dc, vr, val) - } - return ErrNoDecoder{Type: val.Type()} -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go deleted file mode 100644 index 7e08aab35e..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go +++ /dev/null @@ -1,1807 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "encoding/json" - "errors" - "fmt" - "math" - "net/url" - "reflect" - "strconv" - "time" - - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/bson/bsontype" - "go.mongodb.org/mongo-driver/bson/primitive" - "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" -) - -var ( - defaultValueDecoders DefaultValueDecoders - errCannotTruncate = errors.New("float64 can only be truncated to a lower precision type when truncation is enabled") -) - -type decodeBinaryError struct { - subtype byte - typeName string -} - -func (d decodeBinaryError) Error() string { - return fmt.Sprintf("only binary values with subtype 0x00 or 0x02 can be decoded into %s, but got subtype %v", d.typeName, d.subtype) -} - -func newDefaultStructCodec() *StructCodec { - codec, err := NewStructCodec(DefaultStructTagParser) - if err != nil { - // This function is called from the codec registration path, so errors can't be propagated. If there's an error - // constructing the StructCodec, we panic to avoid losing it. - panic(fmt.Errorf("error creating default StructCodec: %w", err)) - } - return codec -} - -// DefaultValueDecoders is a namespace type for the default ValueDecoders used -// when creating a registry. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -type DefaultValueDecoders struct{} - -// RegisterDefaultDecoders will register the decoder methods attached to DefaultValueDecoders with -// the provided RegistryBuilder. -// -// There is no support for decoding map[string]interface{} because there is no decoder for -// interface{}, so users must either register this decoder themselves or use the -// EmptyInterfaceDecoder available in the bson package. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) RegisterDefaultDecoders(rb *RegistryBuilder) { - if rb == nil { - panic(errors.New("argument to RegisterDefaultDecoders must not be nil")) - } - - intDecoder := decodeAdapter{dvd.IntDecodeValue, dvd.intDecodeType} - floatDecoder := decodeAdapter{dvd.FloatDecodeValue, dvd.floatDecodeType} - - rb. - RegisterTypeDecoder(tD, ValueDecoderFunc(dvd.DDecodeValue)). - RegisterTypeDecoder(tBinary, decodeAdapter{dvd.BinaryDecodeValue, dvd.binaryDecodeType}). - RegisterTypeDecoder(tUndefined, decodeAdapter{dvd.UndefinedDecodeValue, dvd.undefinedDecodeType}). - RegisterTypeDecoder(tDateTime, decodeAdapter{dvd.DateTimeDecodeValue, dvd.dateTimeDecodeType}). - RegisterTypeDecoder(tNull, decodeAdapter{dvd.NullDecodeValue, dvd.nullDecodeType}). - RegisterTypeDecoder(tRegex, decodeAdapter{dvd.RegexDecodeValue, dvd.regexDecodeType}). - RegisterTypeDecoder(tDBPointer, decodeAdapter{dvd.DBPointerDecodeValue, dvd.dBPointerDecodeType}). - RegisterTypeDecoder(tTimestamp, decodeAdapter{dvd.TimestampDecodeValue, dvd.timestampDecodeType}). - RegisterTypeDecoder(tMinKey, decodeAdapter{dvd.MinKeyDecodeValue, dvd.minKeyDecodeType}). - RegisterTypeDecoder(tMaxKey, decodeAdapter{dvd.MaxKeyDecodeValue, dvd.maxKeyDecodeType}). - RegisterTypeDecoder(tJavaScript, decodeAdapter{dvd.JavaScriptDecodeValue, dvd.javaScriptDecodeType}). - RegisterTypeDecoder(tSymbol, decodeAdapter{dvd.SymbolDecodeValue, dvd.symbolDecodeType}). - RegisterTypeDecoder(tByteSlice, defaultByteSliceCodec). - RegisterTypeDecoder(tTime, defaultTimeCodec). - RegisterTypeDecoder(tEmpty, defaultEmptyInterfaceCodec). - RegisterTypeDecoder(tCoreArray, defaultArrayCodec). - RegisterTypeDecoder(tOID, decodeAdapter{dvd.ObjectIDDecodeValue, dvd.objectIDDecodeType}). - RegisterTypeDecoder(tDecimal, decodeAdapter{dvd.Decimal128DecodeValue, dvd.decimal128DecodeType}). - RegisterTypeDecoder(tJSONNumber, decodeAdapter{dvd.JSONNumberDecodeValue, dvd.jsonNumberDecodeType}). - RegisterTypeDecoder(tURL, decodeAdapter{dvd.URLDecodeValue, dvd.urlDecodeType}). - RegisterTypeDecoder(tCoreDocument, ValueDecoderFunc(dvd.CoreDocumentDecodeValue)). - RegisterTypeDecoder(tCodeWithScope, decodeAdapter{dvd.CodeWithScopeDecodeValue, dvd.codeWithScopeDecodeType}). - RegisterDefaultDecoder(reflect.Bool, decodeAdapter{dvd.BooleanDecodeValue, dvd.booleanDecodeType}). - RegisterDefaultDecoder(reflect.Int, intDecoder). - RegisterDefaultDecoder(reflect.Int8, intDecoder). - RegisterDefaultDecoder(reflect.Int16, intDecoder). - RegisterDefaultDecoder(reflect.Int32, intDecoder). - RegisterDefaultDecoder(reflect.Int64, intDecoder). - RegisterDefaultDecoder(reflect.Uint, defaultUIntCodec). - RegisterDefaultDecoder(reflect.Uint8, defaultUIntCodec). - RegisterDefaultDecoder(reflect.Uint16, defaultUIntCodec). - RegisterDefaultDecoder(reflect.Uint32, defaultUIntCodec). - RegisterDefaultDecoder(reflect.Uint64, defaultUIntCodec). - RegisterDefaultDecoder(reflect.Float32, floatDecoder). - RegisterDefaultDecoder(reflect.Float64, floatDecoder). - RegisterDefaultDecoder(reflect.Array, ValueDecoderFunc(dvd.ArrayDecodeValue)). - RegisterDefaultDecoder(reflect.Map, defaultMapCodec). - RegisterDefaultDecoder(reflect.Slice, defaultSliceCodec). - RegisterDefaultDecoder(reflect.String, defaultStringCodec). - RegisterDefaultDecoder(reflect.Struct, newDefaultStructCodec()). - RegisterDefaultDecoder(reflect.Ptr, NewPointerCodec()). - RegisterTypeMapEntry(bsontype.Double, tFloat64). - RegisterTypeMapEntry(bsontype.String, tString). - RegisterTypeMapEntry(bsontype.Array, tA). - RegisterTypeMapEntry(bsontype.Binary, tBinary). - RegisterTypeMapEntry(bsontype.Undefined, tUndefined). - RegisterTypeMapEntry(bsontype.ObjectID, tOID). - RegisterTypeMapEntry(bsontype.Boolean, tBool). - RegisterTypeMapEntry(bsontype.DateTime, tDateTime). - RegisterTypeMapEntry(bsontype.Regex, tRegex). - RegisterTypeMapEntry(bsontype.DBPointer, tDBPointer). - RegisterTypeMapEntry(bsontype.JavaScript, tJavaScript). - RegisterTypeMapEntry(bsontype.Symbol, tSymbol). - RegisterTypeMapEntry(bsontype.CodeWithScope, tCodeWithScope). - RegisterTypeMapEntry(bsontype.Int32, tInt32). - RegisterTypeMapEntry(bsontype.Int64, tInt64). - RegisterTypeMapEntry(bsontype.Timestamp, tTimestamp). - RegisterTypeMapEntry(bsontype.Decimal128, tDecimal). - RegisterTypeMapEntry(bsontype.MinKey, tMinKey). - RegisterTypeMapEntry(bsontype.MaxKey, tMaxKey). - RegisterTypeMapEntry(bsontype.Type(0), tD). - RegisterTypeMapEntry(bsontype.EmbeddedDocument, tD). - RegisterHookDecoder(tValueUnmarshaler, ValueDecoderFunc(dvd.ValueUnmarshalerDecodeValue)). - RegisterHookDecoder(tUnmarshaler, ValueDecoderFunc(dvd.UnmarshalerDecodeValue)) -} - -// DDecodeValue is the ValueDecoderFunc for primitive.D instances. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) DDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.IsValid() || !val.CanSet() || val.Type() != tD { - return ValueDecoderError{Name: "DDecodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val} - } - - switch vrType := vr.Type(); vrType { - case bsontype.Type(0), bsontype.EmbeddedDocument: - dc.Ancestor = tD - case bsontype.Null: - val.Set(reflect.Zero(val.Type())) - return vr.ReadNull() - default: - return fmt.Errorf("cannot decode %v into a primitive.D", vrType) - } - - dr, err := vr.ReadDocument() - if err != nil { - return err - } - - decoder, err := dc.LookupDecoder(tEmpty) - if err != nil { - return err - } - tEmptyTypeDecoder, _ := decoder.(typeDecoder) - - // Use the elements in the provided value if it's non nil. Otherwise, allocate a new D instance. - var elems primitive.D - if !val.IsNil() { - val.SetLen(0) - elems = val.Interface().(primitive.D) - } else { - elems = make(primitive.D, 0) - } - - for { - key, elemVr, err := dr.ReadElement() - if errors.Is(err, bsonrw.ErrEOD) { - break - } else if err != nil { - return err - } - - // Pass false for convert because we don't need to call reflect.Value.Convert for tEmpty. - elem, err := decodeTypeOrValueWithInfo(decoder, tEmptyTypeDecoder, dc, elemVr, tEmpty, false) - if err != nil { - return err - } - - elems = append(elems, primitive.E{Key: key, Value: elem.Interface()}) - } - - val.Set(reflect.ValueOf(elems)) - return nil -} - -func (dvd DefaultValueDecoders) booleanDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t.Kind() != reflect.Bool { - return emptyValue, ValueDecoderError{ - Name: "BooleanDecodeValue", - Kinds: []reflect.Kind{reflect.Bool}, - Received: reflect.Zero(t), - } - } - - var b bool - var err error - switch vrType := vr.Type(); vrType { - case bsontype.Int32: - i32, err := vr.ReadInt32() - if err != nil { - return emptyValue, err - } - b = (i32 != 0) - case bsontype.Int64: - i64, err := vr.ReadInt64() - if err != nil { - return emptyValue, err - } - b = (i64 != 0) - case bsontype.Double: - f64, err := vr.ReadDouble() - if err != nil { - return emptyValue, err - } - b = (f64 != 0) - case bsontype.Boolean: - b, err = vr.ReadBoolean() - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a boolean", vrType) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(b), nil -} - -// BooleanDecodeValue is the ValueDecoderFunc for bool types. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) BooleanDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.IsValid() || !val.CanSet() || val.Kind() != reflect.Bool { - return ValueDecoderError{Name: "BooleanDecodeValue", Kinds: []reflect.Kind{reflect.Bool}, Received: val} - } - - elem, err := dvd.booleanDecodeType(dctx, vr, val.Type()) - if err != nil { - return err - } - - val.SetBool(elem.Bool()) - return nil -} - -func (DefaultValueDecoders) intDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - var i64 int64 - var err error - switch vrType := vr.Type(); vrType { - case bsontype.Int32: - i32, err := vr.ReadInt32() - if err != nil { - return emptyValue, err - } - i64 = int64(i32) - case bsontype.Int64: - i64, err = vr.ReadInt64() - if err != nil { - return emptyValue, err - } - case bsontype.Double: - f64, err := vr.ReadDouble() - if err != nil { - return emptyValue, err - } - if !dc.Truncate && math.Floor(f64) != f64 { - return emptyValue, errCannotTruncate - } - if f64 > float64(math.MaxInt64) { - return emptyValue, fmt.Errorf("%g overflows int64", f64) - } - i64 = int64(f64) - case bsontype.Boolean: - b, err := vr.ReadBoolean() - if err != nil { - return emptyValue, err - } - if b { - i64 = 1 - } - case bsontype.Null: - if err = vr.ReadNull(); err != nil { - return emptyValue, err - } - case bsontype.Undefined: - if err = vr.ReadUndefined(); err != nil { - return emptyValue, err - } - default: - return emptyValue, fmt.Errorf("cannot decode %v into an integer type", vrType) - } - - switch t.Kind() { - case reflect.Int8: - if i64 < math.MinInt8 || i64 > math.MaxInt8 { - return emptyValue, fmt.Errorf("%d overflows int8", i64) - } - - return reflect.ValueOf(int8(i64)), nil - case reflect.Int16: - if i64 < math.MinInt16 || i64 > math.MaxInt16 { - return emptyValue, fmt.Errorf("%d overflows int16", i64) - } - - return reflect.ValueOf(int16(i64)), nil - case reflect.Int32: - if i64 < math.MinInt32 || i64 > math.MaxInt32 { - return emptyValue, fmt.Errorf("%d overflows int32", i64) - } - - return reflect.ValueOf(int32(i64)), nil - case reflect.Int64: - return reflect.ValueOf(i64), nil - case reflect.Int: - if int64(int(i64)) != i64 { // Can we fit this inside of an int - return emptyValue, fmt.Errorf("%d overflows int", i64) - } - - return reflect.ValueOf(int(i64)), nil - default: - return emptyValue, ValueDecoderError{ - Name: "IntDecodeValue", - Kinds: []reflect.Kind{reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int}, - Received: reflect.Zero(t), - } - } -} - -// IntDecodeValue is the ValueDecoderFunc for int types. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) IntDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() { - return ValueDecoderError{ - Name: "IntDecodeValue", - Kinds: []reflect.Kind{reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int}, - Received: val, - } - } - - elem, err := dvd.intDecodeType(dc, vr, val.Type()) - if err != nil { - return err - } - - val.SetInt(elem.Int()) - return nil -} - -// UintDecodeValue is the ValueDecoderFunc for uint types. -// -// Deprecated: UintDecodeValue is not registered by default. Use UintCodec.DecodeValue instead. -func (dvd DefaultValueDecoders) UintDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - var i64 int64 - var err error - switch vr.Type() { - case bsontype.Int32: - i32, err := vr.ReadInt32() - if err != nil { - return err - } - i64 = int64(i32) - case bsontype.Int64: - i64, err = vr.ReadInt64() - if err != nil { - return err - } - case bsontype.Double: - f64, err := vr.ReadDouble() - if err != nil { - return err - } - if !dc.Truncate && math.Floor(f64) != f64 { - return errors.New("UintDecodeValue can only truncate float64 to an integer type when truncation is enabled") - } - if f64 > float64(math.MaxInt64) { - return fmt.Errorf("%g overflows int64", f64) - } - i64 = int64(f64) - case bsontype.Boolean: - b, err := vr.ReadBoolean() - if err != nil { - return err - } - if b { - i64 = 1 - } - default: - return fmt.Errorf("cannot decode %v into an integer type", vr.Type()) - } - - if !val.CanSet() { - return ValueDecoderError{ - Name: "UintDecodeValue", - Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint}, - Received: val, - } - } - - switch val.Kind() { - case reflect.Uint8: - if i64 < 0 || i64 > math.MaxUint8 { - return fmt.Errorf("%d overflows uint8", i64) - } - case reflect.Uint16: - if i64 < 0 || i64 > math.MaxUint16 { - return fmt.Errorf("%d overflows uint16", i64) - } - case reflect.Uint32: - if i64 < 0 || i64 > math.MaxUint32 { - return fmt.Errorf("%d overflows uint32", i64) - } - case reflect.Uint64: - if i64 < 0 { - return fmt.Errorf("%d overflows uint64", i64) - } - case reflect.Uint: - if i64 < 0 || int64(uint(i64)) != i64 { // Can we fit this inside of an uint - return fmt.Errorf("%d overflows uint", i64) - } - default: - return ValueDecoderError{ - Name: "UintDecodeValue", - Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint}, - Received: val, - } - } - - val.SetUint(uint64(i64)) - return nil -} - -func (dvd DefaultValueDecoders) floatDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - var f float64 - var err error - switch vrType := vr.Type(); vrType { - case bsontype.Int32: - i32, err := vr.ReadInt32() - if err != nil { - return emptyValue, err - } - f = float64(i32) - case bsontype.Int64: - i64, err := vr.ReadInt64() - if err != nil { - return emptyValue, err - } - f = float64(i64) - case bsontype.Double: - f, err = vr.ReadDouble() - if err != nil { - return emptyValue, err - } - case bsontype.Boolean: - b, err := vr.ReadBoolean() - if err != nil { - return emptyValue, err - } - if b { - f = 1 - } - case bsontype.Null: - if err = vr.ReadNull(); err != nil { - return emptyValue, err - } - case bsontype.Undefined: - if err = vr.ReadUndefined(); err != nil { - return emptyValue, err - } - default: - return emptyValue, fmt.Errorf("cannot decode %v into a float32 or float64 type", vrType) - } - - switch t.Kind() { - case reflect.Float32: - if !dc.Truncate && float64(float32(f)) != f { - return emptyValue, errCannotTruncate - } - - return reflect.ValueOf(float32(f)), nil - case reflect.Float64: - return reflect.ValueOf(f), nil - default: - return emptyValue, ValueDecoderError{ - Name: "FloatDecodeValue", - Kinds: []reflect.Kind{reflect.Float32, reflect.Float64}, - Received: reflect.Zero(t), - } - } -} - -// FloatDecodeValue is the ValueDecoderFunc for float types. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) FloatDecodeValue(ec DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() { - return ValueDecoderError{ - Name: "FloatDecodeValue", - Kinds: []reflect.Kind{reflect.Float32, reflect.Float64}, - Received: val, - } - } - - elem, err := dvd.floatDecodeType(ec, vr, val.Type()) - if err != nil { - return err - } - - val.SetFloat(elem.Float()) - return nil -} - -// StringDecodeValue is the ValueDecoderFunc for string types. -// -// Deprecated: StringDecodeValue is not registered by default. Use StringCodec.DecodeValue instead. -func (dvd DefaultValueDecoders) StringDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - var str string - var err error - switch vr.Type() { - // TODO(GODRIVER-577): Handle JavaScript and Symbol BSON types when allowed. - case bsontype.String: - str, err = vr.ReadString() - if err != nil { - return err - } - default: - return fmt.Errorf("cannot decode %v into a string type", vr.Type()) - } - if !val.CanSet() || val.Kind() != reflect.String { - return ValueDecoderError{Name: "StringDecodeValue", Kinds: []reflect.Kind{reflect.String}, Received: val} - } - - val.SetString(str) - return nil -} - -func (DefaultValueDecoders) javaScriptDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tJavaScript { - return emptyValue, ValueDecoderError{ - Name: "JavaScriptDecodeValue", - Types: []reflect.Type{tJavaScript}, - Received: reflect.Zero(t), - } - } - - var js string - var err error - switch vrType := vr.Type(); vrType { - case bsontype.JavaScript: - js, err = vr.ReadJavascript() - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a primitive.JavaScript", vrType) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(primitive.JavaScript(js)), nil -} - -// JavaScriptDecodeValue is the ValueDecoderFunc for the primitive.JavaScript type. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) JavaScriptDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tJavaScript { - return ValueDecoderError{Name: "JavaScriptDecodeValue", Types: []reflect.Type{tJavaScript}, Received: val} - } - - elem, err := dvd.javaScriptDecodeType(dctx, vr, tJavaScript) - if err != nil { - return err - } - - val.SetString(elem.String()) - return nil -} - -func (DefaultValueDecoders) symbolDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tSymbol { - return emptyValue, ValueDecoderError{ - Name: "SymbolDecodeValue", - Types: []reflect.Type{tSymbol}, - Received: reflect.Zero(t), - } - } - - var symbol string - var err error - switch vrType := vr.Type(); vrType { - case bsontype.String: - symbol, err = vr.ReadString() - case bsontype.Symbol: - symbol, err = vr.ReadSymbol() - case bsontype.Binary: - data, subtype, err := vr.ReadBinary() - if err != nil { - return emptyValue, err - } - - if subtype != bsontype.BinaryGeneric && subtype != bsontype.BinaryBinaryOld { - return emptyValue, decodeBinaryError{subtype: subtype, typeName: "primitive.Symbol"} - } - symbol = string(data) - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a primitive.Symbol", vrType) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(primitive.Symbol(symbol)), nil -} - -// SymbolDecodeValue is the ValueDecoderFunc for the primitive.Symbol type. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) SymbolDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tSymbol { - return ValueDecoderError{Name: "SymbolDecodeValue", Types: []reflect.Type{tSymbol}, Received: val} - } - - elem, err := dvd.symbolDecodeType(dctx, vr, tSymbol) - if err != nil { - return err - } - - val.SetString(elem.String()) - return nil -} - -func (DefaultValueDecoders) binaryDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tBinary { - return emptyValue, ValueDecoderError{ - Name: "BinaryDecodeValue", - Types: []reflect.Type{tBinary}, - Received: reflect.Zero(t), - } - } - - var data []byte - var subtype byte - var err error - switch vrType := vr.Type(); vrType { - case bsontype.Binary: - data, subtype, err = vr.ReadBinary() - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a Binary", vrType) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(primitive.Binary{Subtype: subtype, Data: data}), nil -} - -// BinaryDecodeValue is the ValueDecoderFunc for Binary. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) BinaryDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tBinary { - return ValueDecoderError{Name: "BinaryDecodeValue", Types: []reflect.Type{tBinary}, Received: val} - } - - elem, err := dvd.binaryDecodeType(dc, vr, tBinary) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -func (DefaultValueDecoders) undefinedDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tUndefined { - return emptyValue, ValueDecoderError{ - Name: "UndefinedDecodeValue", - Types: []reflect.Type{tUndefined}, - Received: reflect.Zero(t), - } - } - - var err error - switch vrType := vr.Type(); vrType { - case bsontype.Undefined: - err = vr.ReadUndefined() - case bsontype.Null: - err = vr.ReadNull() - default: - return emptyValue, fmt.Errorf("cannot decode %v into an Undefined", vr.Type()) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(primitive.Undefined{}), nil -} - -// UndefinedDecodeValue is the ValueDecoderFunc for Undefined. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) UndefinedDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tUndefined { - return ValueDecoderError{Name: "UndefinedDecodeValue", Types: []reflect.Type{tUndefined}, Received: val} - } - - elem, err := dvd.undefinedDecodeType(dc, vr, tUndefined) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -// Accept both 12-byte string and pretty-printed 24-byte hex string formats. -func (dvd DefaultValueDecoders) objectIDDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tOID { - return emptyValue, ValueDecoderError{ - Name: "ObjectIDDecodeValue", - Types: []reflect.Type{tOID}, - Received: reflect.Zero(t), - } - } - - var oid primitive.ObjectID - var err error - switch vrType := vr.Type(); vrType { - case bsontype.ObjectID: - oid, err = vr.ReadObjectID() - if err != nil { - return emptyValue, err - } - case bsontype.String: - str, err := vr.ReadString() - if err != nil { - return emptyValue, err - } - if oid, err = primitive.ObjectIDFromHex(str); err == nil { - break - } - if len(str) != 12 { - return emptyValue, fmt.Errorf("an ObjectID string must be exactly 12 bytes long (got %v)", len(str)) - } - byteArr := []byte(str) - copy(oid[:], byteArr) - case bsontype.Null: - if err = vr.ReadNull(); err != nil { - return emptyValue, err - } - case bsontype.Undefined: - if err = vr.ReadUndefined(); err != nil { - return emptyValue, err - } - default: - return emptyValue, fmt.Errorf("cannot decode %v into an ObjectID", vrType) - } - - return reflect.ValueOf(oid), nil -} - -// ObjectIDDecodeValue is the ValueDecoderFunc for primitive.ObjectID. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) ObjectIDDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tOID { - return ValueDecoderError{Name: "ObjectIDDecodeValue", Types: []reflect.Type{tOID}, Received: val} - } - - elem, err := dvd.objectIDDecodeType(dc, vr, tOID) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -func (DefaultValueDecoders) dateTimeDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tDateTime { - return emptyValue, ValueDecoderError{ - Name: "DateTimeDecodeValue", - Types: []reflect.Type{tDateTime}, - Received: reflect.Zero(t), - } - } - - var dt int64 - var err error - switch vrType := vr.Type(); vrType { - case bsontype.DateTime: - dt, err = vr.ReadDateTime() - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a DateTime", vrType) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(primitive.DateTime(dt)), nil -} - -// DateTimeDecodeValue is the ValueDecoderFunc for DateTime. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) DateTimeDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tDateTime { - return ValueDecoderError{Name: "DateTimeDecodeValue", Types: []reflect.Type{tDateTime}, Received: val} - } - - elem, err := dvd.dateTimeDecodeType(dc, vr, tDateTime) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -func (DefaultValueDecoders) nullDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tNull { - return emptyValue, ValueDecoderError{ - Name: "NullDecodeValue", - Types: []reflect.Type{tNull}, - Received: reflect.Zero(t), - } - } - - var err error - switch vrType := vr.Type(); vrType { - case bsontype.Undefined: - err = vr.ReadUndefined() - case bsontype.Null: - err = vr.ReadNull() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a Null", vr.Type()) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(primitive.Null{}), nil -} - -// NullDecodeValue is the ValueDecoderFunc for Null. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) NullDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tNull { - return ValueDecoderError{Name: "NullDecodeValue", Types: []reflect.Type{tNull}, Received: val} - } - - elem, err := dvd.nullDecodeType(dc, vr, tNull) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -func (DefaultValueDecoders) regexDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tRegex { - return emptyValue, ValueDecoderError{ - Name: "RegexDecodeValue", - Types: []reflect.Type{tRegex}, - Received: reflect.Zero(t), - } - } - - var pattern, options string - var err error - switch vrType := vr.Type(); vrType { - case bsontype.Regex: - pattern, options, err = vr.ReadRegex() - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a Regex", vrType) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(primitive.Regex{Pattern: pattern, Options: options}), nil -} - -// RegexDecodeValue is the ValueDecoderFunc for Regex. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) RegexDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tRegex { - return ValueDecoderError{Name: "RegexDecodeValue", Types: []reflect.Type{tRegex}, Received: val} - } - - elem, err := dvd.regexDecodeType(dc, vr, tRegex) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -func (DefaultValueDecoders) dBPointerDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tDBPointer { - return emptyValue, ValueDecoderError{ - Name: "DBPointerDecodeValue", - Types: []reflect.Type{tDBPointer}, - Received: reflect.Zero(t), - } - } - - var ns string - var pointer primitive.ObjectID - var err error - switch vrType := vr.Type(); vrType { - case bsontype.DBPointer: - ns, pointer, err = vr.ReadDBPointer() - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a DBPointer", vrType) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(primitive.DBPointer{DB: ns, Pointer: pointer}), nil -} - -// DBPointerDecodeValue is the ValueDecoderFunc for DBPointer. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) DBPointerDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tDBPointer { - return ValueDecoderError{Name: "DBPointerDecodeValue", Types: []reflect.Type{tDBPointer}, Received: val} - } - - elem, err := dvd.dBPointerDecodeType(dc, vr, tDBPointer) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -func (DefaultValueDecoders) timestampDecodeType(_ DecodeContext, vr bsonrw.ValueReader, reflectType reflect.Type) (reflect.Value, error) { - if reflectType != tTimestamp { - return emptyValue, ValueDecoderError{ - Name: "TimestampDecodeValue", - Types: []reflect.Type{tTimestamp}, - Received: reflect.Zero(reflectType), - } - } - - var t, incr uint32 - var err error - switch vrType := vr.Type(); vrType { - case bsontype.Timestamp: - t, incr, err = vr.ReadTimestamp() - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a Timestamp", vrType) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(primitive.Timestamp{T: t, I: incr}), nil -} - -// TimestampDecodeValue is the ValueDecoderFunc for Timestamp. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) TimestampDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tTimestamp { - return ValueDecoderError{Name: "TimestampDecodeValue", Types: []reflect.Type{tTimestamp}, Received: val} - } - - elem, err := dvd.timestampDecodeType(dc, vr, tTimestamp) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -func (DefaultValueDecoders) minKeyDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tMinKey { - return emptyValue, ValueDecoderError{ - Name: "MinKeyDecodeValue", - Types: []reflect.Type{tMinKey}, - Received: reflect.Zero(t), - } - } - - var err error - switch vrType := vr.Type(); vrType { - case bsontype.MinKey: - err = vr.ReadMinKey() - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a MinKey", vr.Type()) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(primitive.MinKey{}), nil -} - -// MinKeyDecodeValue is the ValueDecoderFunc for MinKey. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) MinKeyDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tMinKey { - return ValueDecoderError{Name: "MinKeyDecodeValue", Types: []reflect.Type{tMinKey}, Received: val} - } - - elem, err := dvd.minKeyDecodeType(dc, vr, tMinKey) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -func (DefaultValueDecoders) maxKeyDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tMaxKey { - return emptyValue, ValueDecoderError{ - Name: "MaxKeyDecodeValue", - Types: []reflect.Type{tMaxKey}, - Received: reflect.Zero(t), - } - } - - var err error - switch vrType := vr.Type(); vrType { - case bsontype.MaxKey: - err = vr.ReadMaxKey() - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a MaxKey", vr.Type()) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(primitive.MaxKey{}), nil -} - -// MaxKeyDecodeValue is the ValueDecoderFunc for MaxKey. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) MaxKeyDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tMaxKey { - return ValueDecoderError{Name: "MaxKeyDecodeValue", Types: []reflect.Type{tMaxKey}, Received: val} - } - - elem, err := dvd.maxKeyDecodeType(dc, vr, tMaxKey) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -func (dvd DefaultValueDecoders) decimal128DecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tDecimal { - return emptyValue, ValueDecoderError{ - Name: "Decimal128DecodeValue", - Types: []reflect.Type{tDecimal}, - Received: reflect.Zero(t), - } - } - - var d128 primitive.Decimal128 - var err error - switch vrType := vr.Type(); vrType { - case bsontype.Decimal128: - d128, err = vr.ReadDecimal128() - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a primitive.Decimal128", vr.Type()) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(d128), nil -} - -// Decimal128DecodeValue is the ValueDecoderFunc for primitive.Decimal128. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) Decimal128DecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tDecimal { - return ValueDecoderError{Name: "Decimal128DecodeValue", Types: []reflect.Type{tDecimal}, Received: val} - } - - elem, err := dvd.decimal128DecodeType(dctx, vr, tDecimal) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -func (dvd DefaultValueDecoders) jsonNumberDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tJSONNumber { - return emptyValue, ValueDecoderError{ - Name: "JSONNumberDecodeValue", - Types: []reflect.Type{tJSONNumber}, - Received: reflect.Zero(t), - } - } - - var jsonNum json.Number - var err error - switch vrType := vr.Type(); vrType { - case bsontype.Double: - f64, err := vr.ReadDouble() - if err != nil { - return emptyValue, err - } - jsonNum = json.Number(strconv.FormatFloat(f64, 'f', -1, 64)) - case bsontype.Int32: - i32, err := vr.ReadInt32() - if err != nil { - return emptyValue, err - } - jsonNum = json.Number(strconv.FormatInt(int64(i32), 10)) - case bsontype.Int64: - i64, err := vr.ReadInt64() - if err != nil { - return emptyValue, err - } - jsonNum = json.Number(strconv.FormatInt(i64, 10)) - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a json.Number", vrType) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(jsonNum), nil -} - -// JSONNumberDecodeValue is the ValueDecoderFunc for json.Number. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) JSONNumberDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tJSONNumber { - return ValueDecoderError{Name: "JSONNumberDecodeValue", Types: []reflect.Type{tJSONNumber}, Received: val} - } - - elem, err := dvd.jsonNumberDecodeType(dc, vr, tJSONNumber) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -func (dvd DefaultValueDecoders) urlDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tURL { - return emptyValue, ValueDecoderError{ - Name: "URLDecodeValue", - Types: []reflect.Type{tURL}, - Received: reflect.Zero(t), - } - } - - urlPtr := &url.URL{} - var err error - switch vrType := vr.Type(); vrType { - case bsontype.String: - var str string // Declare str here to avoid shadowing err during the ReadString call. - str, err = vr.ReadString() - if err != nil { - return emptyValue, err - } - - urlPtr, err = url.Parse(str) - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a *url.URL", vrType) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(urlPtr).Elem(), nil -} - -// URLDecodeValue is the ValueDecoderFunc for url.URL. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) URLDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tURL { - return ValueDecoderError{Name: "URLDecodeValue", Types: []reflect.Type{tURL}, Received: val} - } - - elem, err := dvd.urlDecodeType(dc, vr, tURL) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -// TimeDecodeValue is the ValueDecoderFunc for time.Time. -// -// Deprecated: TimeDecodeValue is not registered by default. Use TimeCodec.DecodeValue instead. -func (dvd DefaultValueDecoders) TimeDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if vr.Type() != bsontype.DateTime { - return fmt.Errorf("cannot decode %v into a time.Time", vr.Type()) - } - - dt, err := vr.ReadDateTime() - if err != nil { - return err - } - - if !val.CanSet() || val.Type() != tTime { - return ValueDecoderError{Name: "TimeDecodeValue", Types: []reflect.Type{tTime}, Received: val} - } - - val.Set(reflect.ValueOf(time.Unix(dt/1000, dt%1000*1000000).UTC())) - return nil -} - -// ByteSliceDecodeValue is the ValueDecoderFunc for []byte. -// -// Deprecated: ByteSliceDecodeValue is not registered by default. Use ByteSliceCodec.DecodeValue instead. -func (dvd DefaultValueDecoders) ByteSliceDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if vr.Type() != bsontype.Binary && vr.Type() != bsontype.Null { - return fmt.Errorf("cannot decode %v into a []byte", vr.Type()) - } - - if !val.CanSet() || val.Type() != tByteSlice { - return ValueDecoderError{Name: "ByteSliceDecodeValue", Types: []reflect.Type{tByteSlice}, Received: val} - } - - if vr.Type() == bsontype.Null { - val.Set(reflect.Zero(val.Type())) - return vr.ReadNull() - } - - data, subtype, err := vr.ReadBinary() - if err != nil { - return err - } - if subtype != 0x00 { - return fmt.Errorf("ByteSliceDecodeValue can only be used to decode subtype 0x00 for %s, got %v", bsontype.Binary, subtype) - } - - val.Set(reflect.ValueOf(data)) - return nil -} - -// MapDecodeValue is the ValueDecoderFunc for map[string]* types. -// -// Deprecated: MapDecodeValue is not registered by default. Use MapCodec.DecodeValue instead. -func (dvd DefaultValueDecoders) MapDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Kind() != reflect.Map || val.Type().Key().Kind() != reflect.String { - return ValueDecoderError{Name: "MapDecodeValue", Kinds: []reflect.Kind{reflect.Map}, Received: val} - } - - switch vr.Type() { - case bsontype.Type(0), bsontype.EmbeddedDocument: - case bsontype.Null: - val.Set(reflect.Zero(val.Type())) - return vr.ReadNull() - default: - return fmt.Errorf("cannot decode %v into a %s", vr.Type(), val.Type()) - } - - dr, err := vr.ReadDocument() - if err != nil { - return err - } - - if val.IsNil() { - val.Set(reflect.MakeMap(val.Type())) - } - - eType := val.Type().Elem() - decoder, err := dc.LookupDecoder(eType) - if err != nil { - return err - } - - if eType == tEmpty { - dc.Ancestor = val.Type() - } - - keyType := val.Type().Key() - for { - key, vr, err := dr.ReadElement() - if errors.Is(err, bsonrw.ErrEOD) { - break - } - if err != nil { - return err - } - - elem := reflect.New(eType).Elem() - - err = decoder.DecodeValue(dc, vr, elem) - if err != nil { - return err - } - - val.SetMapIndex(reflect.ValueOf(key).Convert(keyType), elem) - } - return nil -} - -// ArrayDecodeValue is the ValueDecoderFunc for array types. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) ArrayDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.IsValid() || val.Kind() != reflect.Array { - return ValueDecoderError{Name: "ArrayDecodeValue", Kinds: []reflect.Kind{reflect.Array}, Received: val} - } - - switch vrType := vr.Type(); vrType { - case bsontype.Array: - case bsontype.Type(0), bsontype.EmbeddedDocument: - if val.Type().Elem() != tE { - return fmt.Errorf("cannot decode document into %s", val.Type()) - } - case bsontype.Binary: - if val.Type().Elem() != tByte { - return fmt.Errorf("ArrayDecodeValue can only be used to decode binary into a byte array, got %v", vrType) - } - data, subtype, err := vr.ReadBinary() - if err != nil { - return err - } - if subtype != bsontype.BinaryGeneric && subtype != bsontype.BinaryBinaryOld { - return fmt.Errorf("ArrayDecodeValue can only be used to decode subtype 0x00 or 0x02 for %s, got %v", bsontype.Binary, subtype) - } - - if len(data) > val.Len() { - return fmt.Errorf("more elements returned in array than can fit inside %s", val.Type()) - } - - for idx, elem := range data { - val.Index(idx).Set(reflect.ValueOf(elem)) - } - return nil - case bsontype.Null: - val.Set(reflect.Zero(val.Type())) - return vr.ReadNull() - case bsontype.Undefined: - val.Set(reflect.Zero(val.Type())) - return vr.ReadUndefined() - default: - return fmt.Errorf("cannot decode %v into an array", vrType) - } - - var elemsFunc func(DecodeContext, bsonrw.ValueReader, reflect.Value) ([]reflect.Value, error) - switch val.Type().Elem() { - case tE: - elemsFunc = dvd.decodeD - default: - elemsFunc = dvd.decodeDefault - } - - elems, err := elemsFunc(dc, vr, val) - if err != nil { - return err - } - - if len(elems) > val.Len() { - return fmt.Errorf("more elements returned in array than can fit inside %s, got %v elements", val.Type(), len(elems)) - } - - for idx, elem := range elems { - val.Index(idx).Set(elem) - } - - return nil -} - -// SliceDecodeValue is the ValueDecoderFunc for slice types. -// -// Deprecated: SliceDecodeValue is not registered by default. Use SliceCodec.DecodeValue instead. -func (dvd DefaultValueDecoders) SliceDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Kind() != reflect.Slice { - return ValueDecoderError{Name: "SliceDecodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val} - } - - switch vr.Type() { - case bsontype.Array: - case bsontype.Null: - val.Set(reflect.Zero(val.Type())) - return vr.ReadNull() - case bsontype.Type(0), bsontype.EmbeddedDocument: - if val.Type().Elem() != tE { - return fmt.Errorf("cannot decode document into %s", val.Type()) - } - default: - return fmt.Errorf("cannot decode %v into a slice", vr.Type()) - } - - var elemsFunc func(DecodeContext, bsonrw.ValueReader, reflect.Value) ([]reflect.Value, error) - switch val.Type().Elem() { - case tE: - dc.Ancestor = val.Type() - elemsFunc = dvd.decodeD - default: - elemsFunc = dvd.decodeDefault - } - - elems, err := elemsFunc(dc, vr, val) - if err != nil { - return err - } - - if val.IsNil() { - val.Set(reflect.MakeSlice(val.Type(), 0, len(elems))) - } - - val.SetLen(0) - val.Set(reflect.Append(val, elems...)) - - return nil -} - -// ValueUnmarshalerDecodeValue is the ValueDecoderFunc for ValueUnmarshaler implementations. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) ValueUnmarshalerDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.IsValid() || (!val.Type().Implements(tValueUnmarshaler) && !reflect.PtrTo(val.Type()).Implements(tValueUnmarshaler)) { - return ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val} - } - - if val.Kind() == reflect.Ptr && val.IsNil() { - if !val.CanSet() { - return ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val} - } - val.Set(reflect.New(val.Type().Elem())) - } - - if !val.Type().Implements(tValueUnmarshaler) { - if !val.CanAddr() { - return ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val} - } - val = val.Addr() // If the type doesn't implement the interface, a pointer to it must. - } - - t, src, err := bsonrw.Copier{}.CopyValueToBytes(vr) - if err != nil { - return err - } - - m, ok := val.Interface().(ValueUnmarshaler) - if !ok { - // NB: this error should be unreachable due to the above checks - return ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val} - } - return m.UnmarshalBSONValue(t, src) -} - -// UnmarshalerDecodeValue is the ValueDecoderFunc for Unmarshaler implementations. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) UnmarshalerDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.IsValid() || (!val.Type().Implements(tUnmarshaler) && !reflect.PtrTo(val.Type()).Implements(tUnmarshaler)) { - return ValueDecoderError{Name: "UnmarshalerDecodeValue", Types: []reflect.Type{tUnmarshaler}, Received: val} - } - - if val.Kind() == reflect.Ptr && val.IsNil() { - if !val.CanSet() { - return ValueDecoderError{Name: "UnmarshalerDecodeValue", Types: []reflect.Type{tUnmarshaler}, Received: val} - } - val.Set(reflect.New(val.Type().Elem())) - } - - _, src, err := bsonrw.Copier{}.CopyValueToBytes(vr) - if err != nil { - return err - } - - // If the target Go value is a pointer and the BSON field value is empty, set the value to the - // zero value of the pointer (nil) and don't call UnmarshalBSON. UnmarshalBSON has no way to - // change the pointer value from within the function (only the value at the pointer address), - // so it can't set the pointer to "nil" itself. Since the most common Go value for an empty BSON - // field value is "nil", we set "nil" here and don't call UnmarshalBSON. This behavior matches - // the behavior of the Go "encoding/json" unmarshaler when the target Go value is a pointer and - // the JSON field value is "null". - if val.Kind() == reflect.Ptr && len(src) == 0 { - val.Set(reflect.Zero(val.Type())) - return nil - } - - if !val.Type().Implements(tUnmarshaler) { - if !val.CanAddr() { - return ValueDecoderError{Name: "UnmarshalerDecodeValue", Types: []reflect.Type{tUnmarshaler}, Received: val} - } - val = val.Addr() // If the type doesn't implement the interface, a pointer to it must. - } - - m, ok := val.Interface().(Unmarshaler) - if !ok { - // NB: this error should be unreachable due to the above checks - return ValueDecoderError{Name: "UnmarshalerDecodeValue", Types: []reflect.Type{tUnmarshaler}, Received: val} - } - return m.UnmarshalBSON(src) -} - -// EmptyInterfaceDecodeValue is the ValueDecoderFunc for interface{}. -// -// Deprecated: EmptyInterfaceDecodeValue is not registered by default. Use EmptyInterfaceCodec.DecodeValue instead. -func (dvd DefaultValueDecoders) EmptyInterfaceDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tEmpty { - return ValueDecoderError{Name: "EmptyInterfaceDecodeValue", Types: []reflect.Type{tEmpty}, Received: val} - } - - rtype, err := dc.LookupTypeMapEntry(vr.Type()) - if err != nil { - switch vr.Type() { - case bsontype.EmbeddedDocument: - if dc.Ancestor != nil { - rtype = dc.Ancestor - break - } - rtype = tD - case bsontype.Null: - val.Set(reflect.Zero(val.Type())) - return vr.ReadNull() - default: - return err - } - } - - decoder, err := dc.LookupDecoder(rtype) - if err != nil { - return err - } - - elem := reflect.New(rtype).Elem() - err = decoder.DecodeValue(dc, vr, elem) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -// CoreDocumentDecodeValue is the ValueDecoderFunc for bsoncore.Document. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (DefaultValueDecoders) CoreDocumentDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tCoreDocument { - return ValueDecoderError{Name: "CoreDocumentDecodeValue", Types: []reflect.Type{tCoreDocument}, Received: val} - } - - if val.IsNil() { - val.Set(reflect.MakeSlice(val.Type(), 0, 0)) - } - - val.SetLen(0) - - cdoc, err := bsonrw.Copier{}.AppendDocumentBytes(val.Interface().(bsoncore.Document), vr) - val.Set(reflect.ValueOf(cdoc)) - return err -} - -func (dvd DefaultValueDecoders) decodeDefault(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) ([]reflect.Value, error) { - elems := make([]reflect.Value, 0) - - ar, err := vr.ReadArray() - if err != nil { - return nil, err - } - - eType := val.Type().Elem() - - decoder, err := dc.LookupDecoder(eType) - if err != nil { - return nil, err - } - eTypeDecoder, _ := decoder.(typeDecoder) - - idx := 0 - for { - vr, err := ar.ReadValue() - if errors.Is(err, bsonrw.ErrEOA) { - break - } - if err != nil { - return nil, err - } - - elem, err := decodeTypeOrValueWithInfo(decoder, eTypeDecoder, dc, vr, eType, true) - if err != nil { - return nil, newDecodeError(strconv.Itoa(idx), err) - } - elems = append(elems, elem) - idx++ - } - - return elems, nil -} - -func (dvd DefaultValueDecoders) readCodeWithScope(dc DecodeContext, vr bsonrw.ValueReader) (primitive.CodeWithScope, error) { - var cws primitive.CodeWithScope - - code, dr, err := vr.ReadCodeWithScope() - if err != nil { - return cws, err - } - - scope := reflect.New(tD).Elem() - elems, err := dvd.decodeElemsFromDocumentReader(dc, dr) - if err != nil { - return cws, err - } - - scope.Set(reflect.MakeSlice(tD, 0, len(elems))) - scope.Set(reflect.Append(scope, elems...)) - - cws = primitive.CodeWithScope{ - Code: primitive.JavaScript(code), - Scope: scope.Interface().(primitive.D), - } - return cws, nil -} - -func (dvd DefaultValueDecoders) codeWithScopeDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tCodeWithScope { - return emptyValue, ValueDecoderError{ - Name: "CodeWithScopeDecodeValue", - Types: []reflect.Type{tCodeWithScope}, - Received: reflect.Zero(t), - } - } - - var cws primitive.CodeWithScope - var err error - switch vrType := vr.Type(); vrType { - case bsontype.CodeWithScope: - cws, err = dvd.readCodeWithScope(dc, vr) - case bsontype.Null: - err = vr.ReadNull() - case bsontype.Undefined: - err = vr.ReadUndefined() - default: - return emptyValue, fmt.Errorf("cannot decode %v into a primitive.CodeWithScope", vrType) - } - if err != nil { - return emptyValue, err - } - - return reflect.ValueOf(cws), nil -} - -// CodeWithScopeDecodeValue is the ValueDecoderFunc for CodeWithScope. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value decoders registered. -func (dvd DefaultValueDecoders) CodeWithScopeDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tCodeWithScope { - return ValueDecoderError{Name: "CodeWithScopeDecodeValue", Types: []reflect.Type{tCodeWithScope}, Received: val} - } - - elem, err := dvd.codeWithScopeDecodeType(dc, vr, tCodeWithScope) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -func (dvd DefaultValueDecoders) decodeD(dc DecodeContext, vr bsonrw.ValueReader, _ reflect.Value) ([]reflect.Value, error) { - switch vr.Type() { - case bsontype.Type(0), bsontype.EmbeddedDocument: - default: - return nil, fmt.Errorf("cannot decode %v into a D", vr.Type()) - } - - dr, err := vr.ReadDocument() - if err != nil { - return nil, err - } - - return dvd.decodeElemsFromDocumentReader(dc, dr) -} - -func (DefaultValueDecoders) decodeElemsFromDocumentReader(dc DecodeContext, dr bsonrw.DocumentReader) ([]reflect.Value, error) { - decoder, err := dc.LookupDecoder(tEmpty) - if err != nil { - return nil, err - } - - elems := make([]reflect.Value, 0) - for { - key, vr, err := dr.ReadElement() - if errors.Is(err, bsonrw.ErrEOD) { - break - } - if err != nil { - return nil, err - } - - val := reflect.New(tEmpty).Elem() - err = decoder.DecodeValue(dc, vr, val) - if err != nil { - return nil, newDecodeError(key, err) - } - - elems = append(elems, reflect.ValueOf(primitive.E{Key: key, Value: val.Interface()})) - } - - return elems, nil -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go deleted file mode 100644 index 4751ae995e..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go +++ /dev/null @@ -1,856 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "encoding/json" - "errors" - "fmt" - "math" - "net/url" - "reflect" - "sync" - "time" - - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/bson/bsontype" - "go.mongodb.org/mongo-driver/bson/primitive" - "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" -) - -var defaultValueEncoders DefaultValueEncoders - -var bvwPool = bsonrw.NewBSONValueWriterPool() - -var errInvalidValue = errors.New("cannot encode invalid element") - -var sliceWriterPool = sync.Pool{ - New: func() interface{} { - sw := make(bsonrw.SliceWriter, 0) - return &sw - }, -} - -func encodeElement(ec EncodeContext, dw bsonrw.DocumentWriter, e primitive.E) error { - vw, err := dw.WriteDocumentElement(e.Key) - if err != nil { - return err - } - - if e.Value == nil { - return vw.WriteNull() - } - encoder, err := ec.LookupEncoder(reflect.TypeOf(e.Value)) - if err != nil { - return err - } - - err = encoder.EncodeValue(ec, vw, reflect.ValueOf(e.Value)) - if err != nil { - return err - } - return nil -} - -// DefaultValueEncoders is a namespace type for the default ValueEncoders used -// when creating a registry. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -type DefaultValueEncoders struct{} - -// RegisterDefaultEncoders will register the encoder methods attached to DefaultValueEncoders with -// the provided RegistryBuilder. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (dve DefaultValueEncoders) RegisterDefaultEncoders(rb *RegistryBuilder) { - if rb == nil { - panic(errors.New("argument to RegisterDefaultEncoders must not be nil")) - } - rb. - RegisterTypeEncoder(tByteSlice, defaultByteSliceCodec). - RegisterTypeEncoder(tTime, defaultTimeCodec). - RegisterTypeEncoder(tEmpty, defaultEmptyInterfaceCodec). - RegisterTypeEncoder(tCoreArray, defaultArrayCodec). - RegisterTypeEncoder(tOID, ValueEncoderFunc(dve.ObjectIDEncodeValue)). - RegisterTypeEncoder(tDecimal, ValueEncoderFunc(dve.Decimal128EncodeValue)). - RegisterTypeEncoder(tJSONNumber, ValueEncoderFunc(dve.JSONNumberEncodeValue)). - RegisterTypeEncoder(tURL, ValueEncoderFunc(dve.URLEncodeValue)). - RegisterTypeEncoder(tJavaScript, ValueEncoderFunc(dve.JavaScriptEncodeValue)). - RegisterTypeEncoder(tSymbol, ValueEncoderFunc(dve.SymbolEncodeValue)). - RegisterTypeEncoder(tBinary, ValueEncoderFunc(dve.BinaryEncodeValue)). - RegisterTypeEncoder(tUndefined, ValueEncoderFunc(dve.UndefinedEncodeValue)). - RegisterTypeEncoder(tDateTime, ValueEncoderFunc(dve.DateTimeEncodeValue)). - RegisterTypeEncoder(tNull, ValueEncoderFunc(dve.NullEncodeValue)). - RegisterTypeEncoder(tRegex, ValueEncoderFunc(dve.RegexEncodeValue)). - RegisterTypeEncoder(tDBPointer, ValueEncoderFunc(dve.DBPointerEncodeValue)). - RegisterTypeEncoder(tTimestamp, ValueEncoderFunc(dve.TimestampEncodeValue)). - RegisterTypeEncoder(tMinKey, ValueEncoderFunc(dve.MinKeyEncodeValue)). - RegisterTypeEncoder(tMaxKey, ValueEncoderFunc(dve.MaxKeyEncodeValue)). - RegisterTypeEncoder(tCoreDocument, ValueEncoderFunc(dve.CoreDocumentEncodeValue)). - RegisterTypeEncoder(tCodeWithScope, ValueEncoderFunc(dve.CodeWithScopeEncodeValue)). - RegisterDefaultEncoder(reflect.Bool, ValueEncoderFunc(dve.BooleanEncodeValue)). - RegisterDefaultEncoder(reflect.Int, ValueEncoderFunc(dve.IntEncodeValue)). - RegisterDefaultEncoder(reflect.Int8, ValueEncoderFunc(dve.IntEncodeValue)). - RegisterDefaultEncoder(reflect.Int16, ValueEncoderFunc(dve.IntEncodeValue)). - RegisterDefaultEncoder(reflect.Int32, ValueEncoderFunc(dve.IntEncodeValue)). - RegisterDefaultEncoder(reflect.Int64, ValueEncoderFunc(dve.IntEncodeValue)). - RegisterDefaultEncoder(reflect.Uint, defaultUIntCodec). - RegisterDefaultEncoder(reflect.Uint8, defaultUIntCodec). - RegisterDefaultEncoder(reflect.Uint16, defaultUIntCodec). - RegisterDefaultEncoder(reflect.Uint32, defaultUIntCodec). - RegisterDefaultEncoder(reflect.Uint64, defaultUIntCodec). - RegisterDefaultEncoder(reflect.Float32, ValueEncoderFunc(dve.FloatEncodeValue)). - RegisterDefaultEncoder(reflect.Float64, ValueEncoderFunc(dve.FloatEncodeValue)). - RegisterDefaultEncoder(reflect.Array, ValueEncoderFunc(dve.ArrayEncodeValue)). - RegisterDefaultEncoder(reflect.Map, defaultMapCodec). - RegisterDefaultEncoder(reflect.Slice, defaultSliceCodec). - RegisterDefaultEncoder(reflect.String, defaultStringCodec). - RegisterDefaultEncoder(reflect.Struct, newDefaultStructCodec()). - RegisterDefaultEncoder(reflect.Ptr, NewPointerCodec()). - RegisterHookEncoder(tValueMarshaler, ValueEncoderFunc(dve.ValueMarshalerEncodeValue)). - RegisterHookEncoder(tMarshaler, ValueEncoderFunc(dve.MarshalerEncodeValue)). - RegisterHookEncoder(tProxy, ValueEncoderFunc(dve.ProxyEncodeValue)) -} - -// BooleanEncodeValue is the ValueEncoderFunc for bool types. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (dve DefaultValueEncoders) BooleanEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Kind() != reflect.Bool { - return ValueEncoderError{Name: "BooleanEncodeValue", Kinds: []reflect.Kind{reflect.Bool}, Received: val} - } - return vw.WriteBoolean(val.Bool()) -} - -func fitsIn32Bits(i int64) bool { - return math.MinInt32 <= i && i <= math.MaxInt32 -} - -// IntEncodeValue is the ValueEncoderFunc for int types. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (dve DefaultValueEncoders) IntEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - switch val.Kind() { - case reflect.Int8, reflect.Int16, reflect.Int32: - return vw.WriteInt32(int32(val.Int())) - case reflect.Int: - i64 := val.Int() - if fitsIn32Bits(i64) { - return vw.WriteInt32(int32(i64)) - } - return vw.WriteInt64(i64) - case reflect.Int64: - i64 := val.Int() - if ec.MinSize && fitsIn32Bits(i64) { - return vw.WriteInt32(int32(i64)) - } - return vw.WriteInt64(i64) - } - - return ValueEncoderError{ - Name: "IntEncodeValue", - Kinds: []reflect.Kind{reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int}, - Received: val, - } -} - -// UintEncodeValue is the ValueEncoderFunc for uint types. -// -// Deprecated: UintEncodeValue is not registered by default. Use UintCodec.EncodeValue instead. -func (dve DefaultValueEncoders) UintEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - switch val.Kind() { - case reflect.Uint8, reflect.Uint16: - return vw.WriteInt32(int32(val.Uint())) - case reflect.Uint, reflect.Uint32, reflect.Uint64: - u64 := val.Uint() - if ec.MinSize && u64 <= math.MaxInt32 { - return vw.WriteInt32(int32(u64)) - } - if u64 > math.MaxInt64 { - return fmt.Errorf("%d overflows int64", u64) - } - return vw.WriteInt64(int64(u64)) - } - - return ValueEncoderError{ - Name: "UintEncodeValue", - Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint}, - Received: val, - } -} - -// FloatEncodeValue is the ValueEncoderFunc for float types. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (dve DefaultValueEncoders) FloatEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - switch val.Kind() { - case reflect.Float32, reflect.Float64: - return vw.WriteDouble(val.Float()) - } - - return ValueEncoderError{Name: "FloatEncodeValue", Kinds: []reflect.Kind{reflect.Float32, reflect.Float64}, Received: val} -} - -// StringEncodeValue is the ValueEncoderFunc for string types. -// -// Deprecated: StringEncodeValue is not registered by default. Use StringCodec.EncodeValue instead. -func (dve DefaultValueEncoders) StringEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if val.Kind() != reflect.String { - return ValueEncoderError{ - Name: "StringEncodeValue", - Kinds: []reflect.Kind{reflect.String}, - Received: val, - } - } - - return vw.WriteString(val.String()) -} - -// ObjectIDEncodeValue is the ValueEncoderFunc for primitive.ObjectID. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (dve DefaultValueEncoders) ObjectIDEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tOID { - return ValueEncoderError{Name: "ObjectIDEncodeValue", Types: []reflect.Type{tOID}, Received: val} - } - return vw.WriteObjectID(val.Interface().(primitive.ObjectID)) -} - -// Decimal128EncodeValue is the ValueEncoderFunc for primitive.Decimal128. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (dve DefaultValueEncoders) Decimal128EncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tDecimal { - return ValueEncoderError{Name: "Decimal128EncodeValue", Types: []reflect.Type{tDecimal}, Received: val} - } - return vw.WriteDecimal128(val.Interface().(primitive.Decimal128)) -} - -// JSONNumberEncodeValue is the ValueEncoderFunc for json.Number. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (dve DefaultValueEncoders) JSONNumberEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tJSONNumber { - return ValueEncoderError{Name: "JSONNumberEncodeValue", Types: []reflect.Type{tJSONNumber}, Received: val} - } - jsnum := val.Interface().(json.Number) - - // Attempt int first, then float64 - if i64, err := jsnum.Int64(); err == nil { - return dve.IntEncodeValue(ec, vw, reflect.ValueOf(i64)) - } - - f64, err := jsnum.Float64() - if err != nil { - return err - } - - return dve.FloatEncodeValue(ec, vw, reflect.ValueOf(f64)) -} - -// URLEncodeValue is the ValueEncoderFunc for url.URL. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (dve DefaultValueEncoders) URLEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tURL { - return ValueEncoderError{Name: "URLEncodeValue", Types: []reflect.Type{tURL}, Received: val} - } - u := val.Interface().(url.URL) - return vw.WriteString(u.String()) -} - -// TimeEncodeValue is the ValueEncoderFunc for time.TIme. -// -// Deprecated: TimeEncodeValue is not registered by default. Use TimeCodec.EncodeValue instead. -func (dve DefaultValueEncoders) TimeEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tTime { - return ValueEncoderError{Name: "TimeEncodeValue", Types: []reflect.Type{tTime}, Received: val} - } - tt := val.Interface().(time.Time) - dt := primitive.NewDateTimeFromTime(tt) - return vw.WriteDateTime(int64(dt)) -} - -// ByteSliceEncodeValue is the ValueEncoderFunc for []byte. -// -// Deprecated: ByteSliceEncodeValue is not registered by default. Use ByteSliceCodec.EncodeValue instead. -func (dve DefaultValueEncoders) ByteSliceEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tByteSlice { - return ValueEncoderError{Name: "ByteSliceEncodeValue", Types: []reflect.Type{tByteSlice}, Received: val} - } - if val.IsNil() { - return vw.WriteNull() - } - return vw.WriteBinary(val.Interface().([]byte)) -} - -// MapEncodeValue is the ValueEncoderFunc for map[string]* types. -// -// Deprecated: MapEncodeValue is not registered by default. Use MapCodec.EncodeValue instead. -func (dve DefaultValueEncoders) MapEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Kind() != reflect.Map || val.Type().Key().Kind() != reflect.String { - return ValueEncoderError{Name: "MapEncodeValue", Kinds: []reflect.Kind{reflect.Map}, Received: val} - } - - if val.IsNil() { - // If we have a nill map but we can't WriteNull, that means we're probably trying to encode - // to a TopLevel document. We can't currently tell if this is what actually happened, but if - // there's a deeper underlying problem, the error will also be returned from WriteDocument, - // so just continue. The operations on a map reflection value are valid, so we can call - // MapKeys within mapEncodeValue without a problem. - err := vw.WriteNull() - if err == nil { - return nil - } - } - - dw, err := vw.WriteDocument() - if err != nil { - return err - } - - return dve.mapEncodeValue(ec, dw, val, nil) -} - -// mapEncodeValue handles encoding of the values of a map. The collisionFn returns -// true if the provided key exists, this is mainly used for inline maps in the -// struct codec. -func (dve DefaultValueEncoders) mapEncodeValue(ec EncodeContext, dw bsonrw.DocumentWriter, val reflect.Value, collisionFn func(string) bool) error { - - elemType := val.Type().Elem() - encoder, err := ec.LookupEncoder(elemType) - if err != nil && elemType.Kind() != reflect.Interface { - return err - } - - keys := val.MapKeys() - for _, key := range keys { - if collisionFn != nil && collisionFn(key.String()) { - return fmt.Errorf("Key %s of inlined map conflicts with a struct field name", key) - } - - currEncoder, currVal, lookupErr := dve.lookupElementEncoder(ec, encoder, val.MapIndex(key)) - if lookupErr != nil && !errors.Is(lookupErr, errInvalidValue) { - return lookupErr - } - - vw, err := dw.WriteDocumentElement(key.String()) - if err != nil { - return err - } - - if errors.Is(lookupErr, errInvalidValue) { - err = vw.WriteNull() - if err != nil { - return err - } - continue - } - - err = currEncoder.EncodeValue(ec, vw, currVal) - if err != nil { - return err - } - } - - return dw.WriteDocumentEnd() -} - -// ArrayEncodeValue is the ValueEncoderFunc for array types. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (dve DefaultValueEncoders) ArrayEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Kind() != reflect.Array { - return ValueEncoderError{Name: "ArrayEncodeValue", Kinds: []reflect.Kind{reflect.Array}, Received: val} - } - - // If we have a []primitive.E we want to treat it as a document instead of as an array. - if val.Type().Elem() == tE { - dw, err := vw.WriteDocument() - if err != nil { - return err - } - - for idx := 0; idx < val.Len(); idx++ { - e := val.Index(idx).Interface().(primitive.E) - err = encodeElement(ec, dw, e) - if err != nil { - return err - } - } - - return dw.WriteDocumentEnd() - } - - // If we have a []byte we want to treat it as a binary instead of as an array. - if val.Type().Elem() == tByte { - var byteSlice []byte - for idx := 0; idx < val.Len(); idx++ { - byteSlice = append(byteSlice, val.Index(idx).Interface().(byte)) - } - return vw.WriteBinary(byteSlice) - } - - aw, err := vw.WriteArray() - if err != nil { - return err - } - - elemType := val.Type().Elem() - encoder, err := ec.LookupEncoder(elemType) - if err != nil && elemType.Kind() != reflect.Interface { - return err - } - - for idx := 0; idx < val.Len(); idx++ { - currEncoder, currVal, lookupErr := dve.lookupElementEncoder(ec, encoder, val.Index(idx)) - if lookupErr != nil && !errors.Is(lookupErr, errInvalidValue) { - return lookupErr - } - - vw, err := aw.WriteArrayElement() - if err != nil { - return err - } - - if errors.Is(lookupErr, errInvalidValue) { - err = vw.WriteNull() - if err != nil { - return err - } - continue - } - - err = currEncoder.EncodeValue(ec, vw, currVal) - if err != nil { - return err - } - } - return aw.WriteArrayEnd() -} - -// SliceEncodeValue is the ValueEncoderFunc for slice types. -// -// Deprecated: SliceEncodeValue is not registered by default. Use SliceCodec.EncodeValue instead. -func (dve DefaultValueEncoders) SliceEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Kind() != reflect.Slice { - return ValueEncoderError{Name: "SliceEncodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val} - } - - if val.IsNil() { - return vw.WriteNull() - } - - // If we have a []primitive.E we want to treat it as a document instead of as an array. - if val.Type().ConvertibleTo(tD) { - d := val.Convert(tD).Interface().(primitive.D) - - dw, err := vw.WriteDocument() - if err != nil { - return err - } - - for _, e := range d { - err = encodeElement(ec, dw, e) - if err != nil { - return err - } - } - - return dw.WriteDocumentEnd() - } - - aw, err := vw.WriteArray() - if err != nil { - return err - } - - elemType := val.Type().Elem() - encoder, err := ec.LookupEncoder(elemType) - if err != nil && elemType.Kind() != reflect.Interface { - return err - } - - for idx := 0; idx < val.Len(); idx++ { - currEncoder, currVal, lookupErr := dve.lookupElementEncoder(ec, encoder, val.Index(idx)) - if lookupErr != nil && !errors.Is(lookupErr, errInvalidValue) { - return lookupErr - } - - vw, err := aw.WriteArrayElement() - if err != nil { - return err - } - - if errors.Is(lookupErr, errInvalidValue) { - err = vw.WriteNull() - if err != nil { - return err - } - continue - } - - err = currEncoder.EncodeValue(ec, vw, currVal) - if err != nil { - return err - } - } - return aw.WriteArrayEnd() -} - -func (dve DefaultValueEncoders) lookupElementEncoder(ec EncodeContext, origEncoder ValueEncoder, currVal reflect.Value) (ValueEncoder, reflect.Value, error) { - if origEncoder != nil || (currVal.Kind() != reflect.Interface) { - return origEncoder, currVal, nil - } - currVal = currVal.Elem() - if !currVal.IsValid() { - return nil, currVal, errInvalidValue - } - currEncoder, err := ec.LookupEncoder(currVal.Type()) - - return currEncoder, currVal, err -} - -// EmptyInterfaceEncodeValue is the ValueEncoderFunc for interface{}. -// -// Deprecated: EmptyInterfaceEncodeValue is not registered by default. Use EmptyInterfaceCodec.EncodeValue instead. -func (dve DefaultValueEncoders) EmptyInterfaceEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tEmpty { - return ValueEncoderError{Name: "EmptyInterfaceEncodeValue", Types: []reflect.Type{tEmpty}, Received: val} - } - - if val.IsNil() { - return vw.WriteNull() - } - encoder, err := ec.LookupEncoder(val.Elem().Type()) - if err != nil { - return err - } - - return encoder.EncodeValue(ec, vw, val.Elem()) -} - -// ValueMarshalerEncodeValue is the ValueEncoderFunc for ValueMarshaler implementations. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (dve DefaultValueEncoders) ValueMarshalerEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - // Either val or a pointer to val must implement ValueMarshaler - switch { - case !val.IsValid(): - return ValueEncoderError{Name: "ValueMarshalerEncodeValue", Types: []reflect.Type{tValueMarshaler}, Received: val} - case val.Type().Implements(tValueMarshaler): - // If ValueMarshaler is implemented on a concrete type, make sure that val isn't a nil pointer - if isImplementationNil(val, tValueMarshaler) { - return vw.WriteNull() - } - case reflect.PtrTo(val.Type()).Implements(tValueMarshaler) && val.CanAddr(): - val = val.Addr() - default: - return ValueEncoderError{Name: "ValueMarshalerEncodeValue", Types: []reflect.Type{tValueMarshaler}, Received: val} - } - - m, ok := val.Interface().(ValueMarshaler) - if !ok { - return vw.WriteNull() - } - t, data, err := m.MarshalBSONValue() - if err != nil { - return err - } - return bsonrw.Copier{}.CopyValueFromBytes(vw, t, data) -} - -// MarshalerEncodeValue is the ValueEncoderFunc for Marshaler implementations. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (dve DefaultValueEncoders) MarshalerEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - // Either val or a pointer to val must implement Marshaler - switch { - case !val.IsValid(): - return ValueEncoderError{Name: "MarshalerEncodeValue", Types: []reflect.Type{tMarshaler}, Received: val} - case val.Type().Implements(tMarshaler): - // If Marshaler is implemented on a concrete type, make sure that val isn't a nil pointer - if isImplementationNil(val, tMarshaler) { - return vw.WriteNull() - } - case reflect.PtrTo(val.Type()).Implements(tMarshaler) && val.CanAddr(): - val = val.Addr() - default: - return ValueEncoderError{Name: "MarshalerEncodeValue", Types: []reflect.Type{tMarshaler}, Received: val} - } - - m, ok := val.Interface().(Marshaler) - if !ok { - return vw.WriteNull() - } - data, err := m.MarshalBSON() - if err != nil { - return err - } - return bsonrw.Copier{}.CopyValueFromBytes(vw, bsontype.EmbeddedDocument, data) -} - -// ProxyEncodeValue is the ValueEncoderFunc for Proxy implementations. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (dve DefaultValueEncoders) ProxyEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - // Either val or a pointer to val must implement Proxy - switch { - case !val.IsValid(): - return ValueEncoderError{Name: "ProxyEncodeValue", Types: []reflect.Type{tProxy}, Received: val} - case val.Type().Implements(tProxy): - // If Proxy is implemented on a concrete type, make sure that val isn't a nil pointer - if isImplementationNil(val, tProxy) { - return vw.WriteNull() - } - case reflect.PtrTo(val.Type()).Implements(tProxy) && val.CanAddr(): - val = val.Addr() - default: - return ValueEncoderError{Name: "ProxyEncodeValue", Types: []reflect.Type{tProxy}, Received: val} - } - - m, ok := val.Interface().(Proxy) - if !ok { - return vw.WriteNull() - } - v, err := m.ProxyBSON() - if err != nil { - return err - } - if v == nil { - encoder, err := ec.LookupEncoder(nil) - if err != nil { - return err - } - return encoder.EncodeValue(ec, vw, reflect.ValueOf(nil)) - } - vv := reflect.ValueOf(v) - switch vv.Kind() { - case reflect.Ptr, reflect.Interface: - vv = vv.Elem() - } - encoder, err := ec.LookupEncoder(vv.Type()) - if err != nil { - return err - } - return encoder.EncodeValue(ec, vw, vv) -} - -// JavaScriptEncodeValue is the ValueEncoderFunc for the primitive.JavaScript type. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (DefaultValueEncoders) JavaScriptEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tJavaScript { - return ValueEncoderError{Name: "JavaScriptEncodeValue", Types: []reflect.Type{tJavaScript}, Received: val} - } - - return vw.WriteJavascript(val.String()) -} - -// SymbolEncodeValue is the ValueEncoderFunc for the primitive.Symbol type. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (DefaultValueEncoders) SymbolEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tSymbol { - return ValueEncoderError{Name: "SymbolEncodeValue", Types: []reflect.Type{tSymbol}, Received: val} - } - - return vw.WriteSymbol(val.String()) -} - -// BinaryEncodeValue is the ValueEncoderFunc for Binary. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (DefaultValueEncoders) BinaryEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tBinary { - return ValueEncoderError{Name: "BinaryEncodeValue", Types: []reflect.Type{tBinary}, Received: val} - } - b := val.Interface().(primitive.Binary) - - return vw.WriteBinaryWithSubtype(b.Data, b.Subtype) -} - -// UndefinedEncodeValue is the ValueEncoderFunc for Undefined. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (DefaultValueEncoders) UndefinedEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tUndefined { - return ValueEncoderError{Name: "UndefinedEncodeValue", Types: []reflect.Type{tUndefined}, Received: val} - } - - return vw.WriteUndefined() -} - -// DateTimeEncodeValue is the ValueEncoderFunc for DateTime. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (DefaultValueEncoders) DateTimeEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tDateTime { - return ValueEncoderError{Name: "DateTimeEncodeValue", Types: []reflect.Type{tDateTime}, Received: val} - } - - return vw.WriteDateTime(val.Int()) -} - -// NullEncodeValue is the ValueEncoderFunc for Null. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (DefaultValueEncoders) NullEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tNull { - return ValueEncoderError{Name: "NullEncodeValue", Types: []reflect.Type{tNull}, Received: val} - } - - return vw.WriteNull() -} - -// RegexEncodeValue is the ValueEncoderFunc for Regex. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (DefaultValueEncoders) RegexEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tRegex { - return ValueEncoderError{Name: "RegexEncodeValue", Types: []reflect.Type{tRegex}, Received: val} - } - - regex := val.Interface().(primitive.Regex) - - return vw.WriteRegex(regex.Pattern, regex.Options) -} - -// DBPointerEncodeValue is the ValueEncoderFunc for DBPointer. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (DefaultValueEncoders) DBPointerEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tDBPointer { - return ValueEncoderError{Name: "DBPointerEncodeValue", Types: []reflect.Type{tDBPointer}, Received: val} - } - - dbp := val.Interface().(primitive.DBPointer) - - return vw.WriteDBPointer(dbp.DB, dbp.Pointer) -} - -// TimestampEncodeValue is the ValueEncoderFunc for Timestamp. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (DefaultValueEncoders) TimestampEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tTimestamp { - return ValueEncoderError{Name: "TimestampEncodeValue", Types: []reflect.Type{tTimestamp}, Received: val} - } - - ts := val.Interface().(primitive.Timestamp) - - return vw.WriteTimestamp(ts.T, ts.I) -} - -// MinKeyEncodeValue is the ValueEncoderFunc for MinKey. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (DefaultValueEncoders) MinKeyEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tMinKey { - return ValueEncoderError{Name: "MinKeyEncodeValue", Types: []reflect.Type{tMinKey}, Received: val} - } - - return vw.WriteMinKey() -} - -// MaxKeyEncodeValue is the ValueEncoderFunc for MaxKey. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (DefaultValueEncoders) MaxKeyEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tMaxKey { - return ValueEncoderError{Name: "MaxKeyEncodeValue", Types: []reflect.Type{tMaxKey}, Received: val} - } - - return vw.WriteMaxKey() -} - -// CoreDocumentEncodeValue is the ValueEncoderFunc for bsoncore.Document. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (DefaultValueEncoders) CoreDocumentEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tCoreDocument { - return ValueEncoderError{Name: "CoreDocumentEncodeValue", Types: []reflect.Type{tCoreDocument}, Received: val} - } - - cdoc := val.Interface().(bsoncore.Document) - - return bsonrw.Copier{}.CopyDocumentFromBytes(vw, cdoc) -} - -// CodeWithScopeEncodeValue is the ValueEncoderFunc for CodeWithScope. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default -// value encoders registered. -func (dve DefaultValueEncoders) CodeWithScopeEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tCodeWithScope { - return ValueEncoderError{Name: "CodeWithScopeEncodeValue", Types: []reflect.Type{tCodeWithScope}, Received: val} - } - - cws := val.Interface().(primitive.CodeWithScope) - - dw, err := vw.WriteCodeWithScope(string(cws.Code)) - if err != nil { - return err - } - - sw := sliceWriterPool.Get().(*bsonrw.SliceWriter) - defer sliceWriterPool.Put(sw) - *sw = (*sw)[:0] - - scopeVW := bvwPool.Get(sw) - defer bvwPool.Put(scopeVW) - - encoder, err := ec.LookupEncoder(reflect.TypeOf(cws.Scope)) - if err != nil { - return err - } - - err = encoder.EncodeValue(ec, scopeVW, reflect.ValueOf(cws.Scope)) - if err != nil { - return err - } - - err = bsonrw.Copier{}.CopyBytesToDocumentWriter(dw, *sw) - if err != nil { - return err - } - return dw.WriteDocumentEnd() -} - -// isImplementationNil returns if val is a nil pointer and inter is implemented on a concrete type -func isImplementationNil(val reflect.Value, inter reflect.Type) bool { - vt := val.Type() - for vt.Kind() == reflect.Ptr { - vt = vt.Elem() - } - return vt.Implements(inter) && val.Kind() == reflect.Ptr && val.IsNil() -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go deleted file mode 100644 index 4613e5a1ec..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2022-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -// Package bsoncodec provides a system for encoding values to BSON representations and decoding -// values from BSON representations. This package considers both binary BSON and ExtendedJSON as -// BSON representations. The types in this package enable a flexible system for handling this -// encoding and decoding. -// -// The codec system is composed of two parts: -// -// 1) ValueEncoders and ValueDecoders that handle encoding and decoding Go values to and from BSON -// representations. -// -// 2) A Registry that holds these ValueEncoders and ValueDecoders and provides methods for -// retrieving them. -// -// # ValueEncoders and ValueDecoders -// -// The ValueEncoder interface is implemented by types that can encode a provided Go type to BSON. -// The value to encode is provided as a reflect.Value and a bsonrw.ValueWriter is used within the -// EncodeValue method to actually create the BSON representation. For convenience, ValueEncoderFunc -// is provided to allow use of a function with the correct signature as a ValueEncoder. An -// EncodeContext instance is provided to allow implementations to lookup further ValueEncoders and -// to provide configuration information. -// -// The ValueDecoder interface is the inverse of the ValueEncoder. Implementations should ensure that -// the value they receive is settable. Similar to ValueEncoderFunc, ValueDecoderFunc is provided to -// allow the use of a function with the correct signature as a ValueDecoder. A DecodeContext -// instance is provided and serves similar functionality to the EncodeContext. -// -// # Registry -// -// A Registry is a store for ValueEncoders, ValueDecoders, and a type map. See the Registry type -// documentation for examples of registering various custom encoders and decoders. A Registry can -// have three main types of codecs: -// -// 1. Type encoders/decoders - These can be registered using the RegisterTypeEncoder and -// RegisterTypeDecoder methods. The registered codec will be invoked when encoding/decoding a value -// whose type matches the registered type exactly. -// If the registered type is an interface, the codec will be invoked when encoding or decoding -// values whose type is the interface, but not for values with concrete types that implement the -// interface. -// -// 2. Hook encoders/decoders - These can be registered using the RegisterHookEncoder and -// RegisterHookDecoder methods. These methods only accept interface types and the registered codecs -// will be invoked when encoding or decoding values whose types implement the interface. An example -// of a hook defined by the driver is bson.Marshaler. The driver will call the MarshalBSON method -// for any value whose type implements bson.Marshaler, regardless of the value's concrete type. -// -// 3. Type map entries - This can be used to associate a BSON type with a Go type. These type -// associations are used when decoding into a bson.D/bson.M or a struct field of type interface{}. -// For example, by default, BSON int32 and int64 values decode as Go int32 and int64 instances, -// respectively, when decoding into a bson.D. The following code would change the behavior so these -// values decode as Go int instances instead: -// -// intType := reflect.TypeOf(int(0)) -// registry.RegisterTypeMapEntry(bsontype.Int32, intType).RegisterTypeMapEntry(bsontype.Int64, intType) -// -// 4. Kind encoder/decoders - These can be registered using the RegisterDefaultEncoder and -// RegisterDefaultDecoder methods. The registered codec will be invoked when encoding or decoding -// values whose reflect.Kind matches the registered reflect.Kind as long as the value's type doesn't -// match a registered type or hook encoder/decoder first. These methods should be used to change the -// behavior for all values for a specific kind. -// -// # Registry Lookup Procedure -// -// When looking up an encoder in a Registry, the precedence rules are as follows: -// -// 1. A type encoder registered for the exact type of the value. -// -// 2. A hook encoder registered for an interface that is implemented by the value or by a pointer to -// the value. If the value matches multiple hooks (e.g. the type implements bsoncodec.Marshaler and -// bsoncodec.ValueMarshaler), the first one registered will be selected. Note that registries -// constructed using bson.NewRegistry have driver-defined hooks registered for the -// bsoncodec.Marshaler, bsoncodec.ValueMarshaler, and bsoncodec.Proxy interfaces, so those will take -// precedence over any new hooks. -// -// 3. A kind encoder registered for the value's kind. -// -// If all of these lookups fail to find an encoder, an error of type ErrNoEncoder is returned. The -// same precedence rules apply for decoders, with the exception that an error of type ErrNoDecoder -// will be returned if no decoder is found. -// -// # DefaultValueEncoders and DefaultValueDecoders -// -// The DefaultValueEncoders and DefaultValueDecoders types provide a full set of ValueEncoders and -// ValueDecoders for handling a wide range of Go types, including all of the types within the -// primitive package. To make registering these codecs easier, a helper method on each type is -// provided. For the DefaultValueEncoders type the method is called RegisterDefaultEncoders and for -// the DefaultValueDecoders type the method is called RegisterDefaultDecoders, this method also -// handles registering type map entries for each BSON type. -package bsoncodec diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go deleted file mode 100644 index 098368f071..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go +++ /dev/null @@ -1,173 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "reflect" - - "go.mongodb.org/mongo-driver/bson/bsonoptions" - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/bson/bsontype" - "go.mongodb.org/mongo-driver/bson/primitive" -) - -// EmptyInterfaceCodec is the Codec used for interface{} values. -// -// Deprecated: EmptyInterfaceCodec will not be directly configurable in Go -// Driver 2.0. To configure the empty interface encode and decode behavior, use -// the configuration methods on a [go.mongodb.org/mongo-driver/bson.Encoder] or -// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the empty interface -// encode and decode behavior for a mongo.Client, use -// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions]. -// -// For example, to configure a mongo.Client to unmarshal BSON binary field -// values as a Go byte slice, use: -// -// opt := options.Client().SetBSONOptions(&options.BSONOptions{ -// BinaryAsSlice: true, -// }) -// -// See the deprecation notice for each field in EmptyInterfaceCodec for the -// corresponding settings. -type EmptyInterfaceCodec struct { - // DecodeBinaryAsSlice causes DecodeValue to unmarshal BSON binary field values that are the - // "Generic" or "Old" BSON binary subtype as a Go byte slice instead of a primitive.Binary. - // - // Deprecated: Use bson.Decoder.BinaryAsSlice or options.BSONOptions.BinaryAsSlice instead. - DecodeBinaryAsSlice bool -} - -var ( - defaultEmptyInterfaceCodec = NewEmptyInterfaceCodec() - - // Assert that defaultEmptyInterfaceCodec satisfies the typeDecoder interface, which allows it - // to be used by collection type decoders (e.g. map, slice, etc) to set individual values in a - // collection. - _ typeDecoder = defaultEmptyInterfaceCodec -) - -// NewEmptyInterfaceCodec returns a EmptyInterfaceCodec with options opts. -// -// Deprecated: NewEmptyInterfaceCodec will not be available in Go Driver 2.0. See -// [EmptyInterfaceCodec] for more details. -func NewEmptyInterfaceCodec(opts ...*bsonoptions.EmptyInterfaceCodecOptions) *EmptyInterfaceCodec { - interfaceOpt := bsonoptions.MergeEmptyInterfaceCodecOptions(opts...) - - codec := EmptyInterfaceCodec{} - if interfaceOpt.DecodeBinaryAsSlice != nil { - codec.DecodeBinaryAsSlice = *interfaceOpt.DecodeBinaryAsSlice - } - return &codec -} - -// EncodeValue is the ValueEncoderFunc for interface{}. -func (eic EmptyInterfaceCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tEmpty { - return ValueEncoderError{Name: "EmptyInterfaceEncodeValue", Types: []reflect.Type{tEmpty}, Received: val} - } - - if val.IsNil() { - return vw.WriteNull() - } - encoder, err := ec.LookupEncoder(val.Elem().Type()) - if err != nil { - return err - } - - return encoder.EncodeValue(ec, vw, val.Elem()) -} - -func (eic EmptyInterfaceCodec) getEmptyInterfaceDecodeType(dc DecodeContext, valueType bsontype.Type) (reflect.Type, error) { - isDocument := valueType == bsontype.Type(0) || valueType == bsontype.EmbeddedDocument - if isDocument { - if dc.defaultDocumentType != nil { - // If the bsontype is an embedded document and the DocumentType is set on the DecodeContext, then return - // that type. - return dc.defaultDocumentType, nil - } - if dc.Ancestor != nil { - // Using ancestor information rather than looking up the type map entry forces consistent decoding. - // If we're decoding into a bson.D, subdocuments should also be decoded as bson.D, even if a type map entry - // has been registered. - return dc.Ancestor, nil - } - } - - rtype, err := dc.LookupTypeMapEntry(valueType) - if err == nil { - return rtype, nil - } - - if isDocument { - // For documents, fallback to looking up a type map entry for bsontype.Type(0) or bsontype.EmbeddedDocument, - // depending on the original valueType. - var lookupType bsontype.Type - switch valueType { - case bsontype.Type(0): - lookupType = bsontype.EmbeddedDocument - case bsontype.EmbeddedDocument: - lookupType = bsontype.Type(0) - } - - rtype, err = dc.LookupTypeMapEntry(lookupType) - if err == nil { - return rtype, nil - } - } - - return nil, err -} - -func (eic EmptyInterfaceCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tEmpty { - return emptyValue, ValueDecoderError{Name: "EmptyInterfaceDecodeValue", Types: []reflect.Type{tEmpty}, Received: reflect.Zero(t)} - } - - rtype, err := eic.getEmptyInterfaceDecodeType(dc, vr.Type()) - if err != nil { - switch vr.Type() { - case bsontype.Null: - return reflect.Zero(t), vr.ReadNull() - default: - return emptyValue, err - } - } - - decoder, err := dc.LookupDecoder(rtype) - if err != nil { - return emptyValue, err - } - - elem, err := decodeTypeOrValue(decoder, dc, vr, rtype) - if err != nil { - return emptyValue, err - } - - if (eic.DecodeBinaryAsSlice || dc.binaryAsSlice) && rtype == tBinary { - binElem := elem.Interface().(primitive.Binary) - if binElem.Subtype == bsontype.BinaryGeneric || binElem.Subtype == bsontype.BinaryBinaryOld { - elem = reflect.ValueOf(binElem.Data) - } - } - - return elem, nil -} - -// DecodeValue is the ValueDecoderFunc for interface{}. -func (eic EmptyInterfaceCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tEmpty { - return ValueDecoderError{Name: "EmptyInterfaceDecodeValue", Types: []reflect.Type{tEmpty}, Received: val} - } - - elem, err := eic.decodeType(dc, vr, val.Type()) - if err != nil { - return err - } - - val.Set(elem) - return nil -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go deleted file mode 100644 index d7e00ffa8d..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go +++ /dev/null @@ -1,343 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "encoding" - "errors" - "fmt" - "reflect" - "strconv" - - "go.mongodb.org/mongo-driver/bson/bsonoptions" - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/bson/bsontype" -) - -var defaultMapCodec = NewMapCodec() - -// MapCodec is the Codec used for map values. -// -// Deprecated: MapCodec will not be directly configurable in Go Driver 2.0. To -// configure the map encode and decode behavior, use the configuration methods -// on a [go.mongodb.org/mongo-driver/bson.Encoder] or -// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the map encode and -// decode behavior for a mongo.Client, use -// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions]. -// -// For example, to configure a mongo.Client to marshal nil Go maps as empty BSON -// documents, use: -// -// opt := options.Client().SetBSONOptions(&options.BSONOptions{ -// NilMapAsEmpty: true, -// }) -// -// See the deprecation notice for each field in MapCodec for the corresponding -// settings. -type MapCodec struct { - // DecodeZerosMap causes DecodeValue to delete any existing values from Go maps in the destination - // value passed to Decode before unmarshaling BSON documents into them. - // - // Deprecated: Use bson.Decoder.ZeroMaps or options.BSONOptions.ZeroMaps instead. - DecodeZerosMap bool - - // EncodeNilAsEmpty causes EncodeValue to marshal nil Go maps as empty BSON documents instead of - // BSON null. - // - // Deprecated: Use bson.Encoder.NilMapAsEmpty or options.BSONOptions.NilMapAsEmpty instead. - EncodeNilAsEmpty bool - - // EncodeKeysWithStringer causes the Encoder to convert Go map keys to BSON document field name - // strings using fmt.Sprintf() instead of the default string conversion logic. - // - // Deprecated: Use bson.Encoder.StringifyMapKeysWithFmt or - // options.BSONOptions.StringifyMapKeysWithFmt instead. - EncodeKeysWithStringer bool -} - -// KeyMarshaler is the interface implemented by an object that can marshal itself into a string key. -// This applies to types used as map keys and is similar to encoding.TextMarshaler. -type KeyMarshaler interface { - MarshalKey() (key string, err error) -} - -// KeyUnmarshaler is the interface implemented by an object that can unmarshal a string representation -// of itself. This applies to types used as map keys and is similar to encoding.TextUnmarshaler. -// -// UnmarshalKey must be able to decode the form generated by MarshalKey. -// UnmarshalKey must copy the text if it wishes to retain the text -// after returning. -type KeyUnmarshaler interface { - UnmarshalKey(key string) error -} - -// NewMapCodec returns a MapCodec with options opts. -// -// Deprecated: NewMapCodec will not be available in Go Driver 2.0. See -// [MapCodec] for more details. -func NewMapCodec(opts ...*bsonoptions.MapCodecOptions) *MapCodec { - mapOpt := bsonoptions.MergeMapCodecOptions(opts...) - - codec := MapCodec{} - if mapOpt.DecodeZerosMap != nil { - codec.DecodeZerosMap = *mapOpt.DecodeZerosMap - } - if mapOpt.EncodeNilAsEmpty != nil { - codec.EncodeNilAsEmpty = *mapOpt.EncodeNilAsEmpty - } - if mapOpt.EncodeKeysWithStringer != nil { - codec.EncodeKeysWithStringer = *mapOpt.EncodeKeysWithStringer - } - return &codec -} - -// EncodeValue is the ValueEncoder for map[*]* types. -func (mc *MapCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Kind() != reflect.Map { - return ValueEncoderError{Name: "MapEncodeValue", Kinds: []reflect.Kind{reflect.Map}, Received: val} - } - - if val.IsNil() && !mc.EncodeNilAsEmpty && !ec.nilMapAsEmpty { - // If we have a nil map but we can't WriteNull, that means we're probably trying to encode - // to a TopLevel document. We can't currently tell if this is what actually happened, but if - // there's a deeper underlying problem, the error will also be returned from WriteDocument, - // so just continue. The operations on a map reflection value are valid, so we can call - // MapKeys within mapEncodeValue without a problem. - err := vw.WriteNull() - if err == nil { - return nil - } - } - - dw, err := vw.WriteDocument() - if err != nil { - return err - } - - return mc.mapEncodeValue(ec, dw, val, nil) -} - -// mapEncodeValue handles encoding of the values of a map. The collisionFn returns -// true if the provided key exists, this is mainly used for inline maps in the -// struct codec. -func (mc *MapCodec) mapEncodeValue(ec EncodeContext, dw bsonrw.DocumentWriter, val reflect.Value, collisionFn func(string) bool) error { - - elemType := val.Type().Elem() - encoder, err := ec.LookupEncoder(elemType) - if err != nil && elemType.Kind() != reflect.Interface { - return err - } - - keys := val.MapKeys() - for _, key := range keys { - keyStr, err := mc.encodeKey(key, ec.stringifyMapKeysWithFmt) - if err != nil { - return err - } - - if collisionFn != nil && collisionFn(keyStr) { - return fmt.Errorf("Key %s of inlined map conflicts with a struct field name", key) - } - - currEncoder, currVal, lookupErr := defaultValueEncoders.lookupElementEncoder(ec, encoder, val.MapIndex(key)) - if lookupErr != nil && !errors.Is(lookupErr, errInvalidValue) { - return lookupErr - } - - vw, err := dw.WriteDocumentElement(keyStr) - if err != nil { - return err - } - - if errors.Is(lookupErr, errInvalidValue) { - err = vw.WriteNull() - if err != nil { - return err - } - continue - } - - err = currEncoder.EncodeValue(ec, vw, currVal) - if err != nil { - return err - } - } - - return dw.WriteDocumentEnd() -} - -// DecodeValue is the ValueDecoder for map[string/decimal]* types. -func (mc *MapCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if val.Kind() != reflect.Map || (!val.CanSet() && val.IsNil()) { - return ValueDecoderError{Name: "MapDecodeValue", Kinds: []reflect.Kind{reflect.Map}, Received: val} - } - - switch vrType := vr.Type(); vrType { - case bsontype.Type(0), bsontype.EmbeddedDocument: - case bsontype.Null: - val.Set(reflect.Zero(val.Type())) - return vr.ReadNull() - case bsontype.Undefined: - val.Set(reflect.Zero(val.Type())) - return vr.ReadUndefined() - default: - return fmt.Errorf("cannot decode %v into a %s", vrType, val.Type()) - } - - dr, err := vr.ReadDocument() - if err != nil { - return err - } - - if val.IsNil() { - val.Set(reflect.MakeMap(val.Type())) - } - - if val.Len() > 0 && (mc.DecodeZerosMap || dc.zeroMaps) { - clearMap(val) - } - - eType := val.Type().Elem() - decoder, err := dc.LookupDecoder(eType) - if err != nil { - return err - } - eTypeDecoder, _ := decoder.(typeDecoder) - - if eType == tEmpty { - dc.Ancestor = val.Type() - } - - keyType := val.Type().Key() - - for { - key, vr, err := dr.ReadElement() - if errors.Is(err, bsonrw.ErrEOD) { - break - } - if err != nil { - return err - } - - k, err := mc.decodeKey(key, keyType) - if err != nil { - return err - } - - elem, err := decodeTypeOrValueWithInfo(decoder, eTypeDecoder, dc, vr, eType, true) - if err != nil { - return newDecodeError(key, err) - } - - val.SetMapIndex(k, elem) - } - return nil -} - -func clearMap(m reflect.Value) { - var none reflect.Value - for _, k := range m.MapKeys() { - m.SetMapIndex(k, none) - } -} - -func (mc *MapCodec) encodeKey(val reflect.Value, encodeKeysWithStringer bool) (string, error) { - if mc.EncodeKeysWithStringer || encodeKeysWithStringer { - return fmt.Sprint(val), nil - } - - // keys of any string type are used directly - if val.Kind() == reflect.String { - return val.String(), nil - } - // KeyMarshalers are marshaled - if km, ok := val.Interface().(KeyMarshaler); ok { - if val.Kind() == reflect.Ptr && val.IsNil() { - return "", nil - } - buf, err := km.MarshalKey() - if err == nil { - return buf, nil - } - return "", err - } - // keys implement encoding.TextMarshaler are marshaled. - if km, ok := val.Interface().(encoding.TextMarshaler); ok { - if val.Kind() == reflect.Ptr && val.IsNil() { - return "", nil - } - - buf, err := km.MarshalText() - if err != nil { - return "", err - } - - return string(buf), nil - } - - switch val.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return strconv.FormatInt(val.Int(), 10), nil - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return strconv.FormatUint(val.Uint(), 10), nil - } - return "", fmt.Errorf("unsupported key type: %v", val.Type()) -} - -var keyUnmarshalerType = reflect.TypeOf((*KeyUnmarshaler)(nil)).Elem() -var textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() - -func (mc *MapCodec) decodeKey(key string, keyType reflect.Type) (reflect.Value, error) { - keyVal := reflect.ValueOf(key) - var err error - switch { - // First, if EncodeKeysWithStringer is not enabled, try to decode withKeyUnmarshaler - case !mc.EncodeKeysWithStringer && reflect.PtrTo(keyType).Implements(keyUnmarshalerType): - keyVal = reflect.New(keyType) - v := keyVal.Interface().(KeyUnmarshaler) - err = v.UnmarshalKey(key) - keyVal = keyVal.Elem() - // Try to decode encoding.TextUnmarshalers. - case reflect.PtrTo(keyType).Implements(textUnmarshalerType): - keyVal = reflect.New(keyType) - v := keyVal.Interface().(encoding.TextUnmarshaler) - err = v.UnmarshalText([]byte(key)) - keyVal = keyVal.Elem() - // Otherwise, go to type specific behavior - default: - switch keyType.Kind() { - case reflect.String: - keyVal = reflect.ValueOf(key).Convert(keyType) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - n, parseErr := strconv.ParseInt(key, 10, 64) - if parseErr != nil || reflect.Zero(keyType).OverflowInt(n) { - err = fmt.Errorf("failed to unmarshal number key %v", key) - } - keyVal = reflect.ValueOf(n).Convert(keyType) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - n, parseErr := strconv.ParseUint(key, 10, 64) - if parseErr != nil || reflect.Zero(keyType).OverflowUint(n) { - err = fmt.Errorf("failed to unmarshal number key %v", key) - break - } - keyVal = reflect.ValueOf(n).Convert(keyType) - case reflect.Float32, reflect.Float64: - if mc.EncodeKeysWithStringer { - parsed, err := strconv.ParseFloat(key, 64) - if err != nil { - return keyVal, fmt.Errorf("Map key is defined to be a decimal type (%v) but got error %w", keyType.Kind(), err) - } - keyVal = reflect.ValueOf(parsed) - break - } - fallthrough - default: - return keyVal, fmt.Errorf("unsupported key type: %v", keyType) - } - } - return keyVal, err -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/mode.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/mode.go deleted file mode 100644 index fbd9f0a9e9..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/mode.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import "fmt" - -type mode int - -const ( - _ mode = iota - mTopLevel - mDocument - mArray - mValue - mElement - mCodeWithScope - mSpacer -) - -func (m mode) String() string { - var str string - - switch m { - case mTopLevel: - str = "TopLevel" - case mDocument: - str = "DocumentMode" - case mArray: - str = "ArrayMode" - case mValue: - str = "ValueMode" - case mElement: - str = "ElementMode" - case mCodeWithScope: - str = "CodeWithScopeMode" - case mSpacer: - str = "CodeWithScopeSpacerFrame" - default: - str = "UnknownMode" - } - - return str -} - -// TransitionError is an error returned when an invalid progressing a -// ValueReader or ValueWriter state machine occurs. -type TransitionError struct { - parent mode - current mode - destination mode -} - -func (te TransitionError) Error() string { - if te.destination == mode(0) { - return fmt.Sprintf("invalid state transition: cannot read/write value while in %s", te.current) - } - if te.parent == mode(0) { - return fmt.Sprintf("invalid state transition: %s -> %s", te.current, te.destination) - } - return fmt.Sprintf("invalid state transition: %s -> %s; parent %s", te.current, te.destination, te.parent) -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go deleted file mode 100644 index ddfa4a33e1..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "reflect" - - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/bson/bsontype" -) - -var _ ValueEncoder = &PointerCodec{} -var _ ValueDecoder = &PointerCodec{} - -// PointerCodec is the Codec used for pointers. -// -// Deprecated: PointerCodec will not be directly accessible in Go Driver 2.0. To -// override the default pointer encode and decode behavior, create a new registry -// with [go.mongodb.org/mongo-driver/bson.NewRegistry] and register a new -// encoder and decoder for pointers. -// -// For example, -// -// reg := bson.NewRegistry() -// reg.RegisterKindEncoder(reflect.Ptr, myPointerEncoder) -// reg.RegisterKindDecoder(reflect.Ptr, myPointerDecoder) -type PointerCodec struct { - ecache typeEncoderCache - dcache typeDecoderCache -} - -// NewPointerCodec returns a PointerCodec that has been initialized. -// -// Deprecated: NewPointerCodec will not be available in Go Driver 2.0. See -// [PointerCodec] for more details. -func NewPointerCodec() *PointerCodec { - return &PointerCodec{} -} - -// EncodeValue handles encoding a pointer by either encoding it to BSON Null if the pointer is nil -// or looking up an encoder for the type of value the pointer points to. -func (pc *PointerCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if val.Kind() != reflect.Ptr { - if !val.IsValid() { - return vw.WriteNull() - } - return ValueEncoderError{Name: "PointerCodec.EncodeValue", Kinds: []reflect.Kind{reflect.Ptr}, Received: val} - } - - if val.IsNil() { - return vw.WriteNull() - } - - typ := val.Type() - if v, ok := pc.ecache.Load(typ); ok { - if v == nil { - return ErrNoEncoder{Type: typ} - } - return v.EncodeValue(ec, vw, val.Elem()) - } - // TODO(charlie): handle concurrent requests for the same type - enc, err := ec.LookupEncoder(typ.Elem()) - enc = pc.ecache.LoadOrStore(typ, enc) - if err != nil { - return err - } - return enc.EncodeValue(ec, vw, val.Elem()) -} - -// DecodeValue handles decoding a pointer by looking up a decoder for the type it points to and -// using that to decode. If the BSON value is Null, this method will set the pointer to nil. -func (pc *PointerCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Kind() != reflect.Ptr { - return ValueDecoderError{Name: "PointerCodec.DecodeValue", Kinds: []reflect.Kind{reflect.Ptr}, Received: val} - } - - typ := val.Type() - if vr.Type() == bsontype.Null { - val.Set(reflect.Zero(typ)) - return vr.ReadNull() - } - if vr.Type() == bsontype.Undefined { - val.Set(reflect.Zero(typ)) - return vr.ReadUndefined() - } - - if val.IsNil() { - val.Set(reflect.New(typ.Elem())) - } - - if v, ok := pc.dcache.Load(typ); ok { - if v == nil { - return ErrNoDecoder{Type: typ} - } - return v.DecodeValue(dc, vr, val.Elem()) - } - // TODO(charlie): handle concurrent requests for the same type - dec, err := dc.LookupDecoder(typ.Elem()) - dec = pc.dcache.LoadOrStore(typ, dec) - if err != nil { - return err - } - return dec.DecodeValue(dc, vr, val.Elem()) -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/proxy.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/proxy.go deleted file mode 100644 index 4cf2b01ab4..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/proxy.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -// Proxy is an interface implemented by types that cannot themselves be directly encoded. Types -// that implement this interface with have ProxyBSON called during the encoding process and that -// value will be encoded in place for the implementer. -type Proxy interface { - ProxyBSON() (interface{}, error) -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go deleted file mode 100644 index 196c491bbb..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go +++ /dev/null @@ -1,524 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "errors" - "fmt" - "reflect" - "sync" - - "go.mongodb.org/mongo-driver/bson/bsontype" -) - -// ErrNilType is returned when nil is passed to either LookupEncoder or LookupDecoder. -// -// Deprecated: ErrNilType will not be supported in Go Driver 2.0. -var ErrNilType = errors.New("cannot perform a decoder lookup on ") - -// ErrNotPointer is returned when a non-pointer type is provided to LookupDecoder. -// -// Deprecated: ErrNotPointer will not be supported in Go Driver 2.0. -var ErrNotPointer = errors.New("non-pointer provided to LookupDecoder") - -// ErrNoEncoder is returned when there wasn't an encoder available for a type. -// -// Deprecated: ErrNoEncoder will not be supported in Go Driver 2.0. -type ErrNoEncoder struct { - Type reflect.Type -} - -func (ene ErrNoEncoder) Error() string { - if ene.Type == nil { - return "no encoder found for " - } - return "no encoder found for " + ene.Type.String() -} - -// ErrNoDecoder is returned when there wasn't a decoder available for a type. -// -// Deprecated: ErrNoDecoder will not be supported in Go Driver 2.0. -type ErrNoDecoder struct { - Type reflect.Type -} - -func (end ErrNoDecoder) Error() string { - return "no decoder found for " + end.Type.String() -} - -// ErrNoTypeMapEntry is returned when there wasn't a type available for the provided BSON type. -// -// Deprecated: ErrNoTypeMapEntry will not be supported in Go Driver 2.0. -type ErrNoTypeMapEntry struct { - Type bsontype.Type -} - -func (entme ErrNoTypeMapEntry) Error() string { - return "no type map entry found for " + entme.Type.String() -} - -// ErrNotInterface is returned when the provided type is not an interface. -// -// Deprecated: ErrNotInterface will not be supported in Go Driver 2.0. -var ErrNotInterface = errors.New("The provided type is not an interface") - -// A RegistryBuilder is used to build a Registry. This type is not goroutine -// safe. -// -// Deprecated: Use Registry instead. -type RegistryBuilder struct { - registry *Registry -} - -// NewRegistryBuilder creates a new empty RegistryBuilder. -// -// Deprecated: Use NewRegistry instead. -func NewRegistryBuilder() *RegistryBuilder { - return &RegistryBuilder{ - registry: NewRegistry(), - } -} - -// RegisterCodec will register the provided ValueCodec for the provided type. -// -// Deprecated: Use Registry.RegisterTypeEncoder and Registry.RegisterTypeDecoder instead. -func (rb *RegistryBuilder) RegisterCodec(t reflect.Type, codec ValueCodec) *RegistryBuilder { - rb.RegisterTypeEncoder(t, codec) - rb.RegisterTypeDecoder(t, codec) - return rb -} - -// RegisterTypeEncoder will register the provided ValueEncoder for the provided type. -// -// The type will be used directly, so an encoder can be registered for a type and a different encoder can be registered -// for a pointer to that type. -// -// If the given type is an interface, the encoder will be called when marshaling a type that is that interface. It -// will not be called when marshaling a non-interface type that implements the interface. -// -// Deprecated: Use Registry.RegisterTypeEncoder instead. -func (rb *RegistryBuilder) RegisterTypeEncoder(t reflect.Type, enc ValueEncoder) *RegistryBuilder { - rb.registry.RegisterTypeEncoder(t, enc) - return rb -} - -// RegisterHookEncoder will register an encoder for the provided interface type t. This encoder will be called when -// marshaling a type if the type implements t or a pointer to the type implements t. If the provided type is not -// an interface (i.e. t.Kind() != reflect.Interface), this method will panic. -// -// Deprecated: Use Registry.RegisterInterfaceEncoder instead. -func (rb *RegistryBuilder) RegisterHookEncoder(t reflect.Type, enc ValueEncoder) *RegistryBuilder { - rb.registry.RegisterInterfaceEncoder(t, enc) - return rb -} - -// RegisterTypeDecoder will register the provided ValueDecoder for the provided type. -// -// The type will be used directly, so a decoder can be registered for a type and a different decoder can be registered -// for a pointer to that type. -// -// If the given type is an interface, the decoder will be called when unmarshaling into a type that is that interface. -// It will not be called when unmarshaling into a non-interface type that implements the interface. -// -// Deprecated: Use Registry.RegisterTypeDecoder instead. -func (rb *RegistryBuilder) RegisterTypeDecoder(t reflect.Type, dec ValueDecoder) *RegistryBuilder { - rb.registry.RegisterTypeDecoder(t, dec) - return rb -} - -// RegisterHookDecoder will register an decoder for the provided interface type t. This decoder will be called when -// unmarshaling into a type if the type implements t or a pointer to the type implements t. If the provided type is not -// an interface (i.e. t.Kind() != reflect.Interface), this method will panic. -// -// Deprecated: Use Registry.RegisterInterfaceDecoder instead. -func (rb *RegistryBuilder) RegisterHookDecoder(t reflect.Type, dec ValueDecoder) *RegistryBuilder { - rb.registry.RegisterInterfaceDecoder(t, dec) - return rb -} - -// RegisterEncoder registers the provided type and encoder pair. -// -// Deprecated: Use Registry.RegisterTypeEncoder or Registry.RegisterInterfaceEncoder instead. -func (rb *RegistryBuilder) RegisterEncoder(t reflect.Type, enc ValueEncoder) *RegistryBuilder { - if t == tEmpty { - rb.registry.RegisterTypeEncoder(t, enc) - return rb - } - switch t.Kind() { - case reflect.Interface: - rb.registry.RegisterInterfaceEncoder(t, enc) - default: - rb.registry.RegisterTypeEncoder(t, enc) - } - return rb -} - -// RegisterDecoder registers the provided type and decoder pair. -// -// Deprecated: Use Registry.RegisterTypeDecoder or Registry.RegisterInterfaceDecoder instead. -func (rb *RegistryBuilder) RegisterDecoder(t reflect.Type, dec ValueDecoder) *RegistryBuilder { - if t == nil { - rb.registry.RegisterTypeDecoder(t, dec) - return rb - } - if t == tEmpty { - rb.registry.RegisterTypeDecoder(t, dec) - return rb - } - switch t.Kind() { - case reflect.Interface: - rb.registry.RegisterInterfaceDecoder(t, dec) - default: - rb.registry.RegisterTypeDecoder(t, dec) - } - return rb -} - -// RegisterDefaultEncoder will register the provided ValueEncoder to the provided -// kind. -// -// Deprecated: Use Registry.RegisterKindEncoder instead. -func (rb *RegistryBuilder) RegisterDefaultEncoder(kind reflect.Kind, enc ValueEncoder) *RegistryBuilder { - rb.registry.RegisterKindEncoder(kind, enc) - return rb -} - -// RegisterDefaultDecoder will register the provided ValueDecoder to the -// provided kind. -// -// Deprecated: Use Registry.RegisterKindDecoder instead. -func (rb *RegistryBuilder) RegisterDefaultDecoder(kind reflect.Kind, dec ValueDecoder) *RegistryBuilder { - rb.registry.RegisterKindDecoder(kind, dec) - return rb -} - -// RegisterTypeMapEntry will register the provided type to the BSON type. The primary usage for this -// mapping is decoding situations where an empty interface is used and a default type needs to be -// created and decoded into. -// -// By default, BSON documents will decode into interface{} values as bson.D. To change the default type for BSON -// documents, a type map entry for bsontype.EmbeddedDocument should be registered. For example, to force BSON documents -// to decode to bson.Raw, use the following code: -// -// rb.RegisterTypeMapEntry(bsontype.EmbeddedDocument, reflect.TypeOf(bson.Raw{})) -// -// Deprecated: Use Registry.RegisterTypeMapEntry instead. -func (rb *RegistryBuilder) RegisterTypeMapEntry(bt bsontype.Type, rt reflect.Type) *RegistryBuilder { - rb.registry.RegisterTypeMapEntry(bt, rt) - return rb -} - -// Build creates a Registry from the current state of this RegistryBuilder. -// -// Deprecated: Use NewRegistry instead. -func (rb *RegistryBuilder) Build() *Registry { - r := &Registry{ - interfaceEncoders: append([]interfaceValueEncoder(nil), rb.registry.interfaceEncoders...), - interfaceDecoders: append([]interfaceValueDecoder(nil), rb.registry.interfaceDecoders...), - typeEncoders: rb.registry.typeEncoders.Clone(), - typeDecoders: rb.registry.typeDecoders.Clone(), - kindEncoders: rb.registry.kindEncoders.Clone(), - kindDecoders: rb.registry.kindDecoders.Clone(), - } - rb.registry.typeMap.Range(func(k, v interface{}) bool { - if k != nil && v != nil { - r.typeMap.Store(k, v) - } - return true - }) - return r -} - -// A Registry is used to store and retrieve codecs for types and interfaces. This type is the main -// typed passed around and Encoders and Decoders are constructed from it. -type Registry struct { - interfaceEncoders []interfaceValueEncoder - interfaceDecoders []interfaceValueDecoder - typeEncoders *typeEncoderCache - typeDecoders *typeDecoderCache - kindEncoders *kindEncoderCache - kindDecoders *kindDecoderCache - typeMap sync.Map // map[bsontype.Type]reflect.Type -} - -// NewRegistry creates a new empty Registry. -func NewRegistry() *Registry { - return &Registry{ - typeEncoders: new(typeEncoderCache), - typeDecoders: new(typeDecoderCache), - kindEncoders: new(kindEncoderCache), - kindDecoders: new(kindDecoderCache), - } -} - -// RegisterTypeEncoder registers the provided ValueEncoder for the provided type. -// -// The type will be used as provided, so an encoder can be registered for a type and a different -// encoder can be registered for a pointer to that type. -// -// If the given type is an interface, the encoder will be called when marshaling a type that is -// that interface. It will not be called when marshaling a non-interface type that implements the -// interface. To get the latter behavior, call RegisterHookEncoder instead. -// -// RegisterTypeEncoder should not be called concurrently with any other Registry method. -func (r *Registry) RegisterTypeEncoder(valueType reflect.Type, enc ValueEncoder) { - r.typeEncoders.Store(valueType, enc) -} - -// RegisterTypeDecoder registers the provided ValueDecoder for the provided type. -// -// The type will be used as provided, so a decoder can be registered for a type and a different -// decoder can be registered for a pointer to that type. -// -// If the given type is an interface, the decoder will be called when unmarshaling into a type that -// is that interface. It will not be called when unmarshaling into a non-interface type that -// implements the interface. To get the latter behavior, call RegisterHookDecoder instead. -// -// RegisterTypeDecoder should not be called concurrently with any other Registry method. -func (r *Registry) RegisterTypeDecoder(valueType reflect.Type, dec ValueDecoder) { - r.typeDecoders.Store(valueType, dec) -} - -// RegisterKindEncoder registers the provided ValueEncoder for the provided kind. -// -// Use RegisterKindEncoder to register an encoder for any type with the same underlying kind. For -// example, consider the type MyInt defined as -// -// type MyInt int32 -// -// To define an encoder for MyInt and int32, use RegisterKindEncoder like -// -// reg.RegisterKindEncoder(reflect.Int32, myEncoder) -// -// RegisterKindEncoder should not be called concurrently with any other Registry method. -func (r *Registry) RegisterKindEncoder(kind reflect.Kind, enc ValueEncoder) { - r.kindEncoders.Store(kind, enc) -} - -// RegisterKindDecoder registers the provided ValueDecoder for the provided kind. -// -// Use RegisterKindDecoder to register a decoder for any type with the same underlying kind. For -// example, consider the type MyInt defined as -// -// type MyInt int32 -// -// To define an decoder for MyInt and int32, use RegisterKindDecoder like -// -// reg.RegisterKindDecoder(reflect.Int32, myDecoder) -// -// RegisterKindDecoder should not be called concurrently with any other Registry method. -func (r *Registry) RegisterKindDecoder(kind reflect.Kind, dec ValueDecoder) { - r.kindDecoders.Store(kind, dec) -} - -// RegisterInterfaceEncoder registers an encoder for the provided interface type iface. This encoder will -// be called when marshaling a type if the type implements iface or a pointer to the type -// implements iface. If the provided type is not an interface -// (i.e. iface.Kind() != reflect.Interface), this method will panic. -// -// RegisterInterfaceEncoder should not be called concurrently with any other Registry method. -func (r *Registry) RegisterInterfaceEncoder(iface reflect.Type, enc ValueEncoder) { - if iface.Kind() != reflect.Interface { - panicStr := fmt.Errorf("RegisterInterfaceEncoder expects a type with kind reflect.Interface, "+ - "got type %s with kind %s", iface, iface.Kind()) - panic(panicStr) - } - - for idx, encoder := range r.interfaceEncoders { - if encoder.i == iface { - r.interfaceEncoders[idx].ve = enc - return - } - } - - r.interfaceEncoders = append(r.interfaceEncoders, interfaceValueEncoder{i: iface, ve: enc}) -} - -// RegisterInterfaceDecoder registers an decoder for the provided interface type iface. This decoder will -// be called when unmarshaling into a type if the type implements iface or a pointer to the type -// implements iface. If the provided type is not an interface (i.e. iface.Kind() != reflect.Interface), -// this method will panic. -// -// RegisterInterfaceDecoder should not be called concurrently with any other Registry method. -func (r *Registry) RegisterInterfaceDecoder(iface reflect.Type, dec ValueDecoder) { - if iface.Kind() != reflect.Interface { - panicStr := fmt.Errorf("RegisterInterfaceDecoder expects a type with kind reflect.Interface, "+ - "got type %s with kind %s", iface, iface.Kind()) - panic(panicStr) - } - - for idx, decoder := range r.interfaceDecoders { - if decoder.i == iface { - r.interfaceDecoders[idx].vd = dec - return - } - } - - r.interfaceDecoders = append(r.interfaceDecoders, interfaceValueDecoder{i: iface, vd: dec}) -} - -// RegisterTypeMapEntry will register the provided type to the BSON type. The primary usage for this -// mapping is decoding situations where an empty interface is used and a default type needs to be -// created and decoded into. -// -// By default, BSON documents will decode into interface{} values as bson.D. To change the default type for BSON -// documents, a type map entry for bsontype.EmbeddedDocument should be registered. For example, to force BSON documents -// to decode to bson.Raw, use the following code: -// -// reg.RegisterTypeMapEntry(bsontype.EmbeddedDocument, reflect.TypeOf(bson.Raw{})) -func (r *Registry) RegisterTypeMapEntry(bt bsontype.Type, rt reflect.Type) { - r.typeMap.Store(bt, rt) -} - -// LookupEncoder returns the first matching encoder in the Registry. It uses the following lookup -// order: -// -// 1. An encoder registered for the exact type. If the given type is an interface, an encoder -// registered using RegisterTypeEncoder for that interface will be selected. -// -// 2. An encoder registered using RegisterInterfaceEncoder for an interface implemented by the type -// or by a pointer to the type. -// -// 3. An encoder registered using RegisterKindEncoder for the kind of value. -// -// If no encoder is found, an error of type ErrNoEncoder is returned. LookupEncoder is safe for -// concurrent use by multiple goroutines after all codecs and encoders are registered. -func (r *Registry) LookupEncoder(valueType reflect.Type) (ValueEncoder, error) { - if valueType == nil { - return nil, ErrNoEncoder{Type: valueType} - } - enc, found := r.lookupTypeEncoder(valueType) - if found { - if enc == nil { - return nil, ErrNoEncoder{Type: valueType} - } - return enc, nil - } - - enc, found = r.lookupInterfaceEncoder(valueType, true) - if found { - return r.typeEncoders.LoadOrStore(valueType, enc), nil - } - - if v, ok := r.kindEncoders.Load(valueType.Kind()); ok { - return r.storeTypeEncoder(valueType, v), nil - } - return nil, ErrNoEncoder{Type: valueType} -} - -func (r *Registry) storeTypeEncoder(rt reflect.Type, enc ValueEncoder) ValueEncoder { - return r.typeEncoders.LoadOrStore(rt, enc) -} - -func (r *Registry) lookupTypeEncoder(rt reflect.Type) (ValueEncoder, bool) { - return r.typeEncoders.Load(rt) -} - -func (r *Registry) lookupInterfaceEncoder(valueType reflect.Type, allowAddr bool) (ValueEncoder, bool) { - if valueType == nil { - return nil, false - } - for _, ienc := range r.interfaceEncoders { - if valueType.Implements(ienc.i) { - return ienc.ve, true - } - if allowAddr && valueType.Kind() != reflect.Ptr && reflect.PtrTo(valueType).Implements(ienc.i) { - // if *t implements an interface, this will catch if t implements an interface further - // ahead in interfaceEncoders - defaultEnc, found := r.lookupInterfaceEncoder(valueType, false) - if !found { - defaultEnc, _ = r.kindEncoders.Load(valueType.Kind()) - } - return newCondAddrEncoder(ienc.ve, defaultEnc), true - } - } - return nil, false -} - -// LookupDecoder returns the first matching decoder in the Registry. It uses the following lookup -// order: -// -// 1. A decoder registered for the exact type. If the given type is an interface, a decoder -// registered using RegisterTypeDecoder for that interface will be selected. -// -// 2. A decoder registered using RegisterInterfaceDecoder for an interface implemented by the type or by -// a pointer to the type. -// -// 3. A decoder registered using RegisterKindDecoder for the kind of value. -// -// If no decoder is found, an error of type ErrNoDecoder is returned. LookupDecoder is safe for -// concurrent use by multiple goroutines after all codecs and decoders are registered. -func (r *Registry) LookupDecoder(valueType reflect.Type) (ValueDecoder, error) { - if valueType == nil { - return nil, ErrNilType - } - dec, found := r.lookupTypeDecoder(valueType) - if found { - if dec == nil { - return nil, ErrNoDecoder{Type: valueType} - } - return dec, nil - } - - dec, found = r.lookupInterfaceDecoder(valueType, true) - if found { - return r.storeTypeDecoder(valueType, dec), nil - } - - if v, ok := r.kindDecoders.Load(valueType.Kind()); ok { - return r.storeTypeDecoder(valueType, v), nil - } - return nil, ErrNoDecoder{Type: valueType} -} - -func (r *Registry) lookupTypeDecoder(valueType reflect.Type) (ValueDecoder, bool) { - return r.typeDecoders.Load(valueType) -} - -func (r *Registry) storeTypeDecoder(typ reflect.Type, dec ValueDecoder) ValueDecoder { - return r.typeDecoders.LoadOrStore(typ, dec) -} - -func (r *Registry) lookupInterfaceDecoder(valueType reflect.Type, allowAddr bool) (ValueDecoder, bool) { - for _, idec := range r.interfaceDecoders { - if valueType.Implements(idec.i) { - return idec.vd, true - } - if allowAddr && valueType.Kind() != reflect.Ptr && reflect.PtrTo(valueType).Implements(idec.i) { - // if *t implements an interface, this will catch if t implements an interface further - // ahead in interfaceDecoders - defaultDec, found := r.lookupInterfaceDecoder(valueType, false) - if !found { - defaultDec, _ = r.kindDecoders.Load(valueType.Kind()) - } - return newCondAddrDecoder(idec.vd, defaultDec), true - } - } - return nil, false -} - -// LookupTypeMapEntry inspects the registry's type map for a Go type for the corresponding BSON -// type. If no type is found, ErrNoTypeMapEntry is returned. -// -// LookupTypeMapEntry should not be called concurrently with any other Registry method. -func (r *Registry) LookupTypeMapEntry(bt bsontype.Type) (reflect.Type, error) { - v, ok := r.typeMap.Load(bt) - if v == nil || !ok { - return nil, ErrNoTypeMapEntry{Type: bt} - } - return v.(reflect.Type), nil -} - -type interfaceValueEncoder struct { - i reflect.Type - ve ValueEncoder -} - -type interfaceValueDecoder struct { - i reflect.Type - vd ValueDecoder -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go deleted file mode 100644 index 14c9fd2564..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go +++ /dev/null @@ -1,214 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "errors" - "fmt" - "reflect" - - "go.mongodb.org/mongo-driver/bson/bsonoptions" - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/bson/bsontype" - "go.mongodb.org/mongo-driver/bson/primitive" -) - -var defaultSliceCodec = NewSliceCodec() - -// SliceCodec is the Codec used for slice values. -// -// Deprecated: SliceCodec will not be directly configurable in Go Driver 2.0. To -// configure the slice encode and decode behavior, use the configuration methods -// on a [go.mongodb.org/mongo-driver/bson.Encoder] or -// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the slice encode and -// decode behavior for a mongo.Client, use -// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions]. -// -// For example, to configure a mongo.Client to marshal nil Go slices as empty -// BSON arrays, use: -// -// opt := options.Client().SetBSONOptions(&options.BSONOptions{ -// NilSliceAsEmpty: true, -// }) -// -// See the deprecation notice for each field in SliceCodec for the corresponding -// settings. -type SliceCodec struct { - // EncodeNilAsEmpty causes EncodeValue to marshal nil Go slices as empty BSON arrays instead of - // BSON null. - // - // Deprecated: Use bson.Encoder.NilSliceAsEmpty instead. - EncodeNilAsEmpty bool -} - -// NewSliceCodec returns a MapCodec with options opts. -// -// Deprecated: NewSliceCodec will not be available in Go Driver 2.0. See -// [SliceCodec] for more details. -func NewSliceCodec(opts ...*bsonoptions.SliceCodecOptions) *SliceCodec { - sliceOpt := bsonoptions.MergeSliceCodecOptions(opts...) - - codec := SliceCodec{} - if sliceOpt.EncodeNilAsEmpty != nil { - codec.EncodeNilAsEmpty = *sliceOpt.EncodeNilAsEmpty - } - return &codec -} - -// EncodeValue is the ValueEncoder for slice types. -func (sc SliceCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Kind() != reflect.Slice { - return ValueEncoderError{Name: "SliceEncodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val} - } - - if val.IsNil() && !sc.EncodeNilAsEmpty && !ec.nilSliceAsEmpty { - return vw.WriteNull() - } - - // If we have a []byte we want to treat it as a binary instead of as an array. - if val.Type().Elem() == tByte { - byteSlice := make([]byte, val.Len()) - reflect.Copy(reflect.ValueOf(byteSlice), val) - return vw.WriteBinary(byteSlice) - } - - // If we have a []primitive.E we want to treat it as a document instead of as an array. - if val.Type() == tD || val.Type().ConvertibleTo(tD) { - d := val.Convert(tD).Interface().(primitive.D) - - dw, err := vw.WriteDocument() - if err != nil { - return err - } - - for _, e := range d { - err = encodeElement(ec, dw, e) - if err != nil { - return err - } - } - - return dw.WriteDocumentEnd() - } - - aw, err := vw.WriteArray() - if err != nil { - return err - } - - elemType := val.Type().Elem() - encoder, err := ec.LookupEncoder(elemType) - if err != nil && elemType.Kind() != reflect.Interface { - return err - } - - for idx := 0; idx < val.Len(); idx++ { - currEncoder, currVal, lookupErr := defaultValueEncoders.lookupElementEncoder(ec, encoder, val.Index(idx)) - if lookupErr != nil && !errors.Is(lookupErr, errInvalidValue) { - return lookupErr - } - - vw, err := aw.WriteArrayElement() - if err != nil { - return err - } - - if errors.Is(lookupErr, errInvalidValue) { - err = vw.WriteNull() - if err != nil { - return err - } - continue - } - - err = currEncoder.EncodeValue(ec, vw, currVal) - if err != nil { - return err - } - } - return aw.WriteArrayEnd() -} - -// DecodeValue is the ValueDecoder for slice types. -func (sc *SliceCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Kind() != reflect.Slice { - return ValueDecoderError{Name: "SliceDecodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val} - } - - switch vrType := vr.Type(); vrType { - case bsontype.Array: - case bsontype.Null: - val.Set(reflect.Zero(val.Type())) - return vr.ReadNull() - case bsontype.Undefined: - val.Set(reflect.Zero(val.Type())) - return vr.ReadUndefined() - case bsontype.Type(0), bsontype.EmbeddedDocument: - if val.Type().Elem() != tE { - return fmt.Errorf("cannot decode document into %s", val.Type()) - } - case bsontype.Binary: - if val.Type().Elem() != tByte { - return fmt.Errorf("SliceDecodeValue can only decode a binary into a byte array, got %v", vrType) - } - data, subtype, err := vr.ReadBinary() - if err != nil { - return err - } - if subtype != bsontype.BinaryGeneric && subtype != bsontype.BinaryBinaryOld { - return fmt.Errorf("SliceDecodeValue can only be used to decode subtype 0x00 or 0x02 for %s, got %v", bsontype.Binary, subtype) - } - - if val.IsNil() { - val.Set(reflect.MakeSlice(val.Type(), 0, len(data))) - } - val.SetLen(0) - val.Set(reflect.AppendSlice(val, reflect.ValueOf(data))) - return nil - case bsontype.String: - if sliceType := val.Type().Elem(); sliceType != tByte { - return fmt.Errorf("SliceDecodeValue can only decode a string into a byte array, got %v", sliceType) - } - str, err := vr.ReadString() - if err != nil { - return err - } - byteStr := []byte(str) - - if val.IsNil() { - val.Set(reflect.MakeSlice(val.Type(), 0, len(byteStr))) - } - val.SetLen(0) - val.Set(reflect.AppendSlice(val, reflect.ValueOf(byteStr))) - return nil - default: - return fmt.Errorf("cannot decode %v into a slice", vrType) - } - - var elemsFunc func(DecodeContext, bsonrw.ValueReader, reflect.Value) ([]reflect.Value, error) - switch val.Type().Elem() { - case tE: - dc.Ancestor = val.Type() - elemsFunc = defaultValueDecoders.decodeD - default: - elemsFunc = defaultValueDecoders.decodeDefault - } - - elems, err := elemsFunc(dc, vr, val) - if err != nil { - return err - } - - if val.IsNil() { - val.Set(reflect.MakeSlice(val.Type(), 0, len(elems))) - } - - val.SetLen(0) - val.Set(reflect.Append(val, elems...)) - - return nil -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go deleted file mode 100644 index a8f885a854..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "fmt" - "reflect" - - "go.mongodb.org/mongo-driver/bson/bsonoptions" - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/bson/bsontype" -) - -// StringCodec is the Codec used for string values. -// -// Deprecated: StringCodec will not be directly accessible in Go Driver 2.0. To -// override the default string encode and decode behavior, create a new registry -// with [go.mongodb.org/mongo-driver/bson.NewRegistry] and register a new -// encoder and decoder for strings. -// -// For example, -// -// reg := bson.NewRegistry() -// reg.RegisterKindEncoder(reflect.String, myStringEncoder) -// reg.RegisterKindDecoder(reflect.String, myStringDecoder) -type StringCodec struct { - // DecodeObjectIDAsHex specifies if object IDs should be decoded as their hex representation. - // If false, a string made from the raw object ID bytes will be used. Defaults to true. - // - // Deprecated: Decoding object IDs as raw bytes will not be supported in Go Driver 2.0. - DecodeObjectIDAsHex bool -} - -var ( - defaultStringCodec = NewStringCodec() - - // Assert that defaultStringCodec satisfies the typeDecoder interface, which allows it to be - // used by collection type decoders (e.g. map, slice, etc) to set individual values in a - // collection. - _ typeDecoder = defaultStringCodec -) - -// NewStringCodec returns a StringCodec with options opts. -// -// Deprecated: NewStringCodec will not be available in Go Driver 2.0. See -// [StringCodec] for more details. -func NewStringCodec(opts ...*bsonoptions.StringCodecOptions) *StringCodec { - stringOpt := bsonoptions.MergeStringCodecOptions(opts...) - return &StringCodec{*stringOpt.DecodeObjectIDAsHex} -} - -// EncodeValue is the ValueEncoder for string types. -func (sc *StringCodec) EncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if val.Kind() != reflect.String { - return ValueEncoderError{ - Name: "StringEncodeValue", - Kinds: []reflect.Kind{reflect.String}, - Received: val, - } - } - - return vw.WriteString(val.String()) -} - -func (sc *StringCodec) decodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t.Kind() != reflect.String { - return emptyValue, ValueDecoderError{ - Name: "StringDecodeValue", - Kinds: []reflect.Kind{reflect.String}, - Received: reflect.Zero(t), - } - } - - var str string - var err error - switch vr.Type() { - case bsontype.String: - str, err = vr.ReadString() - if err != nil { - return emptyValue, err - } - case bsontype.ObjectID: - oid, err := vr.ReadObjectID() - if err != nil { - return emptyValue, err - } - if sc.DecodeObjectIDAsHex { - str = oid.Hex() - } else { - // TODO(GODRIVER-2796): Return an error here instead of decoding to a garbled string. - byteArray := [12]byte(oid) - str = string(byteArray[:]) - } - case bsontype.Symbol: - str, err = vr.ReadSymbol() - if err != nil { - return emptyValue, err - } - case bsontype.Binary: - data, subtype, err := vr.ReadBinary() - if err != nil { - return emptyValue, err - } - if subtype != bsontype.BinaryGeneric && subtype != bsontype.BinaryBinaryOld { - return emptyValue, decodeBinaryError{subtype: subtype, typeName: "string"} - } - str = string(data) - case bsontype.Null: - if err = vr.ReadNull(); err != nil { - return emptyValue, err - } - case bsontype.Undefined: - if err = vr.ReadUndefined(); err != nil { - return emptyValue, err - } - default: - return emptyValue, fmt.Errorf("cannot decode %v into a string type", vr.Type()) - } - - return reflect.ValueOf(str), nil -} - -// DecodeValue is the ValueDecoder for string types. -func (sc *StringCodec) DecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Kind() != reflect.String { - return ValueDecoderError{Name: "StringDecodeValue", Kinds: []reflect.Kind{reflect.String}, Received: val} - } - - elem, err := sc.decodeType(dctx, vr, val.Type()) - if err != nil { - return err - } - - val.SetString(elem.String()) - return nil -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go deleted file mode 100644 index f8d9690c13..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go +++ /dev/null @@ -1,736 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "errors" - "fmt" - "reflect" - "sort" - "strings" - "sync" - "time" - - "go.mongodb.org/mongo-driver/bson/bsonoptions" - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/bson/bsontype" -) - -// DecodeError represents an error that occurs when unmarshalling BSON bytes into a native Go type. -type DecodeError struct { - keys []string - wrapped error -} - -// Unwrap returns the underlying error -func (de *DecodeError) Unwrap() error { - return de.wrapped -} - -// Error implements the error interface. -func (de *DecodeError) Error() string { - // The keys are stored in reverse order because the de.keys slice is builtup while propagating the error up the - // stack of BSON keys, so we call de.Keys(), which reverses them. - keyPath := strings.Join(de.Keys(), ".") - return fmt.Sprintf("error decoding key %s: %v", keyPath, de.wrapped) -} - -// Keys returns the BSON key path that caused an error as a slice of strings. The keys in the slice are in top-down -// order. For example, if the document being unmarshalled was {a: {b: {c: 1}}} and the value for c was supposed to be -// a string, the keys slice will be ["a", "b", "c"]. -func (de *DecodeError) Keys() []string { - reversedKeys := make([]string, 0, len(de.keys)) - for idx := len(de.keys) - 1; idx >= 0; idx-- { - reversedKeys = append(reversedKeys, de.keys[idx]) - } - - return reversedKeys -} - -// Zeroer allows custom struct types to implement a report of zero -// state. All struct types that don't implement Zeroer or where IsZero -// returns false are considered to be not zero. -type Zeroer interface { - IsZero() bool -} - -// StructCodec is the Codec used for struct values. -// -// Deprecated: StructCodec will not be directly configurable in Go Driver 2.0. -// To configure the struct encode and decode behavior, use the configuration -// methods on a [go.mongodb.org/mongo-driver/bson.Encoder] or -// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the struct encode -// and decode behavior for a mongo.Client, use -// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions]. -// -// For example, to configure a mongo.Client to omit zero-value structs when -// using the "omitempty" struct tag, use: -// -// opt := options.Client().SetBSONOptions(&options.BSONOptions{ -// OmitZeroStruct: true, -// }) -// -// See the deprecation notice for each field in StructCodec for the corresponding -// settings. -type StructCodec struct { - cache sync.Map // map[reflect.Type]*structDescription - parser StructTagParser - - // DecodeZeroStruct causes DecodeValue to delete any existing values from Go structs in the - // destination value passed to Decode before unmarshaling BSON documents into them. - // - // Deprecated: Use bson.Decoder.ZeroStructs or options.BSONOptions.ZeroStructs instead. - DecodeZeroStruct bool - - // DecodeDeepZeroInline causes DecodeValue to delete any existing values from Go structs in the - // destination value passed to Decode before unmarshaling BSON documents into them. - // - // Deprecated: DecodeDeepZeroInline will not be supported in Go Driver 2.0. - DecodeDeepZeroInline bool - - // EncodeOmitDefaultStruct causes the Encoder to consider the zero value for a struct (e.g. - // MyStruct{}) as empty and omit it from the marshaled BSON when the "omitempty" struct tag - // option is set. - // - // Deprecated: Use bson.Encoder.OmitZeroStruct or options.BSONOptions.OmitZeroStruct instead. - EncodeOmitDefaultStruct bool - - // AllowUnexportedFields allows encoding and decoding values from un-exported struct fields. - // - // Deprecated: AllowUnexportedFields does not work on recent versions of Go and will not be - // supported in Go Driver 2.0. - AllowUnexportedFields bool - - // OverwriteDuplicatedInlinedFields, if false, causes EncodeValue to return an error if there is - // a duplicate field in the marshaled BSON when the "inline" struct tag option is set. The - // default value is true. - // - // Deprecated: Use bson.Encoder.ErrorOnInlineDuplicates or - // options.BSONOptions.ErrorOnInlineDuplicates instead. - OverwriteDuplicatedInlinedFields bool -} - -var _ ValueEncoder = &StructCodec{} -var _ ValueDecoder = &StructCodec{} - -// NewStructCodec returns a StructCodec that uses p for struct tag parsing. -// -// Deprecated: NewStructCodec will not be available in Go Driver 2.0. See -// [StructCodec] for more details. -func NewStructCodec(p StructTagParser, opts ...*bsonoptions.StructCodecOptions) (*StructCodec, error) { - if p == nil { - return nil, errors.New("a StructTagParser must be provided to NewStructCodec") - } - - structOpt := bsonoptions.MergeStructCodecOptions(opts...) - - codec := &StructCodec{ - parser: p, - } - - if structOpt.DecodeZeroStruct != nil { - codec.DecodeZeroStruct = *structOpt.DecodeZeroStruct - } - if structOpt.DecodeDeepZeroInline != nil { - codec.DecodeDeepZeroInline = *structOpt.DecodeDeepZeroInline - } - if structOpt.EncodeOmitDefaultStruct != nil { - codec.EncodeOmitDefaultStruct = *structOpt.EncodeOmitDefaultStruct - } - if structOpt.OverwriteDuplicatedInlinedFields != nil { - codec.OverwriteDuplicatedInlinedFields = *structOpt.OverwriteDuplicatedInlinedFields - } - if structOpt.AllowUnexportedFields != nil { - codec.AllowUnexportedFields = *structOpt.AllowUnexportedFields - } - - return codec, nil -} - -// EncodeValue handles encoding generic struct types. -func (sc *StructCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Kind() != reflect.Struct { - return ValueEncoderError{Name: "StructCodec.EncodeValue", Kinds: []reflect.Kind{reflect.Struct}, Received: val} - } - - sd, err := sc.describeStruct(ec.Registry, val.Type(), ec.useJSONStructTags, ec.errorOnInlineDuplicates) - if err != nil { - return err - } - - dw, err := vw.WriteDocument() - if err != nil { - return err - } - var rv reflect.Value - for _, desc := range sd.fl { - if desc.inline == nil { - rv = val.Field(desc.idx) - } else { - rv, err = fieldByIndexErr(val, desc.inline) - if err != nil { - continue - } - } - - desc.encoder, rv, err = defaultValueEncoders.lookupElementEncoder(ec, desc.encoder, rv) - - if err != nil && !errors.Is(err, errInvalidValue) { - return err - } - - if errors.Is(err, errInvalidValue) { - if desc.omitEmpty { - continue - } - vw2, err := dw.WriteDocumentElement(desc.name) - if err != nil { - return err - } - err = vw2.WriteNull() - if err != nil { - return err - } - continue - } - - if desc.encoder == nil { - return ErrNoEncoder{Type: rv.Type()} - } - - encoder := desc.encoder - - var empty bool - if cz, ok := encoder.(CodecZeroer); ok { - empty = cz.IsTypeZero(rv.Interface()) - } else if rv.Kind() == reflect.Interface { - // isEmpty will not treat an interface rv as an interface, so we need to check for the - // nil interface separately. - empty = rv.IsNil() - } else { - empty = isEmpty(rv, sc.EncodeOmitDefaultStruct || ec.omitZeroStruct) - } - if desc.omitEmpty && empty { - continue - } - - vw2, err := dw.WriteDocumentElement(desc.name) - if err != nil { - return err - } - - ectx := EncodeContext{ - Registry: ec.Registry, - MinSize: desc.minSize || ec.MinSize, - errorOnInlineDuplicates: ec.errorOnInlineDuplicates, - stringifyMapKeysWithFmt: ec.stringifyMapKeysWithFmt, - nilMapAsEmpty: ec.nilMapAsEmpty, - nilSliceAsEmpty: ec.nilSliceAsEmpty, - nilByteSliceAsEmpty: ec.nilByteSliceAsEmpty, - omitZeroStruct: ec.omitZeroStruct, - useJSONStructTags: ec.useJSONStructTags, - } - err = encoder.EncodeValue(ectx, vw2, rv) - if err != nil { - return err - } - } - - if sd.inlineMap >= 0 { - rv := val.Field(sd.inlineMap) - collisionFn := func(key string) bool { - _, exists := sd.fm[key] - return exists - } - - return defaultMapCodec.mapEncodeValue(ec, dw, rv, collisionFn) - } - - return dw.WriteDocumentEnd() -} - -func newDecodeError(key string, original error) error { - var de *DecodeError - if !errors.As(original, &de) { - return &DecodeError{ - keys: []string{key}, - wrapped: original, - } - } - - de.keys = append(de.keys, key) - return de -} - -// DecodeValue implements the Codec interface. -// By default, map types in val will not be cleared. If a map has existing key/value pairs, it will be extended with the new ones from vr. -// For slices, the decoder will set the length of the slice to zero and append all elements. The underlying array will not be cleared. -func (sc *StructCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Kind() != reflect.Struct { - return ValueDecoderError{Name: "StructCodec.DecodeValue", Kinds: []reflect.Kind{reflect.Struct}, Received: val} - } - - switch vrType := vr.Type(); vrType { - case bsontype.Type(0), bsontype.EmbeddedDocument: - case bsontype.Null: - if err := vr.ReadNull(); err != nil { - return err - } - - val.Set(reflect.Zero(val.Type())) - return nil - case bsontype.Undefined: - if err := vr.ReadUndefined(); err != nil { - return err - } - - val.Set(reflect.Zero(val.Type())) - return nil - default: - return fmt.Errorf("cannot decode %v into a %s", vrType, val.Type()) - } - - sd, err := sc.describeStruct(dc.Registry, val.Type(), dc.useJSONStructTags, false) - if err != nil { - return err - } - - if sc.DecodeZeroStruct || dc.zeroStructs { - val.Set(reflect.Zero(val.Type())) - } - if sc.DecodeDeepZeroInline && sd.inline { - val.Set(deepZero(val.Type())) - } - - var decoder ValueDecoder - var inlineMap reflect.Value - if sd.inlineMap >= 0 { - inlineMap = val.Field(sd.inlineMap) - decoder, err = dc.LookupDecoder(inlineMap.Type().Elem()) - if err != nil { - return err - } - } - - dr, err := vr.ReadDocument() - if err != nil { - return err - } - - for { - name, vr, err := dr.ReadElement() - if errors.Is(err, bsonrw.ErrEOD) { - break - } - if err != nil { - return err - } - - fd, exists := sd.fm[name] - if !exists { - // if the original name isn't found in the struct description, try again with the name in lowercase - // this could match if a BSON tag isn't specified because by default, describeStruct lowercases all field - // names - fd, exists = sd.fm[strings.ToLower(name)] - } - - if !exists { - if sd.inlineMap < 0 { - // The encoding/json package requires a flag to return on error for non-existent fields. - // This functionality seems appropriate for the struct codec. - err = vr.Skip() - if err != nil { - return err - } - continue - } - - if inlineMap.IsNil() { - inlineMap.Set(reflect.MakeMap(inlineMap.Type())) - } - - elem := reflect.New(inlineMap.Type().Elem()).Elem() - dc.Ancestor = inlineMap.Type() - err = decoder.DecodeValue(dc, vr, elem) - if err != nil { - return err - } - inlineMap.SetMapIndex(reflect.ValueOf(name), elem) - continue - } - - var field reflect.Value - if fd.inline == nil { - field = val.Field(fd.idx) - } else { - field, err = getInlineField(val, fd.inline) - if err != nil { - return err - } - } - - if !field.CanSet() { // Being settable is a super set of being addressable. - innerErr := fmt.Errorf("field %v is not settable", field) - return newDecodeError(fd.name, innerErr) - } - if field.Kind() == reflect.Ptr && field.IsNil() { - field.Set(reflect.New(field.Type().Elem())) - } - field = field.Addr() - - dctx := DecodeContext{ - Registry: dc.Registry, - Truncate: fd.truncate || dc.Truncate, - defaultDocumentType: dc.defaultDocumentType, - binaryAsSlice: dc.binaryAsSlice, - useJSONStructTags: dc.useJSONStructTags, - useLocalTimeZone: dc.useLocalTimeZone, - zeroMaps: dc.zeroMaps, - zeroStructs: dc.zeroStructs, - } - - if fd.decoder == nil { - return newDecodeError(fd.name, ErrNoDecoder{Type: field.Elem().Type()}) - } - - err = fd.decoder.DecodeValue(dctx, vr, field.Elem()) - if err != nil { - return newDecodeError(fd.name, err) - } - } - - return nil -} - -func isEmpty(v reflect.Value, omitZeroStruct bool) bool { - kind := v.Kind() - if (kind != reflect.Ptr || !v.IsNil()) && v.Type().Implements(tZeroer) { - return v.Interface().(Zeroer).IsZero() - } - switch kind { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Struct: - if !omitZeroStruct { - return false - } - vt := v.Type() - if vt == tTime { - return v.Interface().(time.Time).IsZero() - } - numField := vt.NumField() - for i := 0; i < numField; i++ { - ff := vt.Field(i) - if ff.PkgPath != "" && !ff.Anonymous { - continue // Private field - } - if !isEmpty(v.Field(i), omitZeroStruct) { - return false - } - } - return true - } - return !v.IsValid() || v.IsZero() -} - -type structDescription struct { - fm map[string]fieldDescription - fl []fieldDescription - inlineMap int - inline bool -} - -type fieldDescription struct { - name string // BSON key name - fieldName string // struct field name - idx int - omitEmpty bool - minSize bool - truncate bool - inline []int - encoder ValueEncoder - decoder ValueDecoder -} - -type byIndex []fieldDescription - -func (bi byIndex) Len() int { return len(bi) } - -func (bi byIndex) Swap(i, j int) { bi[i], bi[j] = bi[j], bi[i] } - -func (bi byIndex) Less(i, j int) bool { - // If a field is inlined, its index in the top level struct is stored at inline[0] - iIdx, jIdx := bi[i].idx, bi[j].idx - if len(bi[i].inline) > 0 { - iIdx = bi[i].inline[0] - } - if len(bi[j].inline) > 0 { - jIdx = bi[j].inline[0] - } - if iIdx != jIdx { - return iIdx < jIdx - } - for k, biik := range bi[i].inline { - if k >= len(bi[j].inline) { - return false - } - if biik != bi[j].inline[k] { - return biik < bi[j].inline[k] - } - } - return len(bi[i].inline) < len(bi[j].inline) -} - -func (sc *StructCodec) describeStruct( - r *Registry, - t reflect.Type, - useJSONStructTags bool, - errorOnDuplicates bool, -) (*structDescription, error) { - // We need to analyze the struct, including getting the tags, collecting - // information about inlining, and create a map of the field name to the field. - if v, ok := sc.cache.Load(t); ok { - return v.(*structDescription), nil - } - // TODO(charlie): Only describe the struct once when called - // concurrently with the same type. - ds, err := sc.describeStructSlow(r, t, useJSONStructTags, errorOnDuplicates) - if err != nil { - return nil, err - } - if v, loaded := sc.cache.LoadOrStore(t, ds); loaded { - ds = v.(*structDescription) - } - return ds, nil -} - -func (sc *StructCodec) describeStructSlow( - r *Registry, - t reflect.Type, - useJSONStructTags bool, - errorOnDuplicates bool, -) (*structDescription, error) { - numFields := t.NumField() - sd := &structDescription{ - fm: make(map[string]fieldDescription, numFields), - fl: make([]fieldDescription, 0, numFields), - inlineMap: -1, - } - - var fields []fieldDescription - for i := 0; i < numFields; i++ { - sf := t.Field(i) - if sf.PkgPath != "" && (!sc.AllowUnexportedFields || !sf.Anonymous) { - // field is private or unexported fields aren't allowed, ignore - continue - } - - sfType := sf.Type - encoder, err := r.LookupEncoder(sfType) - if err != nil { - encoder = nil - } - decoder, err := r.LookupDecoder(sfType) - if err != nil { - decoder = nil - } - - description := fieldDescription{ - fieldName: sf.Name, - idx: i, - encoder: encoder, - decoder: decoder, - } - - var stags StructTags - // If the caller requested that we use JSON struct tags, use the JSONFallbackStructTagParser - // instead of the parser defined on the codec. - if useJSONStructTags { - stags, err = JSONFallbackStructTagParser.ParseStructTags(sf) - } else { - stags, err = sc.parser.ParseStructTags(sf) - } - if err != nil { - return nil, err - } - if stags.Skip { - continue - } - description.name = stags.Name - description.omitEmpty = stags.OmitEmpty - description.minSize = stags.MinSize - description.truncate = stags.Truncate - - if stags.Inline { - sd.inline = true - switch sfType.Kind() { - case reflect.Map: - if sd.inlineMap >= 0 { - return nil, errors.New("(struct " + t.String() + ") multiple inline maps") - } - if sfType.Key() != tString { - return nil, errors.New("(struct " + t.String() + ") inline map must have a string keys") - } - sd.inlineMap = description.idx - case reflect.Ptr: - sfType = sfType.Elem() - if sfType.Kind() != reflect.Struct { - return nil, fmt.Errorf("(struct %s) inline fields must be a struct, a struct pointer, or a map", t.String()) - } - fallthrough - case reflect.Struct: - inlinesf, err := sc.describeStruct(r, sfType, useJSONStructTags, errorOnDuplicates) - if err != nil { - return nil, err - } - for _, fd := range inlinesf.fl { - if fd.inline == nil { - fd.inline = []int{i, fd.idx} - } else { - fd.inline = append([]int{i}, fd.inline...) - } - fields = append(fields, fd) - - } - default: - return nil, fmt.Errorf("(struct %s) inline fields must be a struct, a struct pointer, or a map", t.String()) - } - continue - } - fields = append(fields, description) - } - - // Sort fieldDescriptions by name and use dominance rules to determine which should be added for each name - sort.Slice(fields, func(i, j int) bool { - x := fields - // sort field by name, breaking ties with depth, then - // breaking ties with index sequence. - if x[i].name != x[j].name { - return x[i].name < x[j].name - } - if len(x[i].inline) != len(x[j].inline) { - return len(x[i].inline) < len(x[j].inline) - } - return byIndex(x).Less(i, j) - }) - - for advance, i := 0, 0; i < len(fields); i += advance { - // One iteration per name. - // Find the sequence of fields with the name of this first field. - fi := fields[i] - name := fi.name - for advance = 1; i+advance < len(fields); advance++ { - fj := fields[i+advance] - if fj.name != name { - break - } - } - if advance == 1 { // Only one field with this name - sd.fl = append(sd.fl, fi) - sd.fm[name] = fi - continue - } - dominant, ok := dominantField(fields[i : i+advance]) - if !ok || !sc.OverwriteDuplicatedInlinedFields || errorOnDuplicates { - return nil, fmt.Errorf("struct %s has duplicated key %s", t.String(), name) - } - sd.fl = append(sd.fl, dominant) - sd.fm[name] = dominant - } - - sort.Sort(byIndex(sd.fl)) - - return sd, nil -} - -// dominantField looks through the fields, all of which are known to -// have the same name, to find the single field that dominates the -// others using Go's inlining rules. If there are multiple top-level -// fields, the boolean will be false: This condition is an error in Go -// and we skip all the fields. -func dominantField(fields []fieldDescription) (fieldDescription, bool) { - // The fields are sorted in increasing index-length order, then by presence of tag. - // That means that the first field is the dominant one. We need only check - // for error cases: two fields at top level. - if len(fields) > 1 && - len(fields[0].inline) == len(fields[1].inline) { - return fieldDescription{}, false - } - return fields[0], true -} - -func fieldByIndexErr(v reflect.Value, index []int) (result reflect.Value, err error) { - defer func() { - if recovered := recover(); recovered != nil { - switch r := recovered.(type) { - case string: - err = fmt.Errorf("%s", r) - case error: - err = r - } - } - }() - - result = v.FieldByIndex(index) - return -} - -func getInlineField(val reflect.Value, index []int) (reflect.Value, error) { - field, err := fieldByIndexErr(val, index) - if err == nil { - return field, nil - } - - // if parent of this element doesn't exist, fix its parent - inlineParent := index[:len(index)-1] - var fParent reflect.Value - if fParent, err = fieldByIndexErr(val, inlineParent); err != nil { - fParent, err = getInlineField(val, inlineParent) - if err != nil { - return fParent, err - } - } - fParent.Set(reflect.New(fParent.Type().Elem())) - - return fieldByIndexErr(val, index) -} - -// DeepZero returns recursive zero object -func deepZero(st reflect.Type) (result reflect.Value) { - if st.Kind() == reflect.Struct { - numField := st.NumField() - for i := 0; i < numField; i++ { - if result == emptyValue { - result = reflect.Indirect(reflect.New(st)) - } - f := result.Field(i) - if f.CanInterface() { - if f.Type().Kind() == reflect.Struct { - result.Field(i).Set(recursivePointerTo(deepZero(f.Type().Elem()))) - } - } - } - } - return result -} - -// recursivePointerTo calls reflect.New(v.Type) but recursively for its fields inside -func recursivePointerTo(v reflect.Value) reflect.Value { - v = reflect.Indirect(v) - result := reflect.New(v.Type()) - if v.Kind() == reflect.Struct { - for i := 0; i < v.NumField(); i++ { - if f := v.Field(i); f.Kind() == reflect.Ptr { - if f.Elem().Kind() == reflect.Struct { - result.Elem().Field(i).Set(recursivePointerTo(f)) - } - } - } - } - - return result -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go deleted file mode 100644 index 18d85bfb03..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go +++ /dev/null @@ -1,148 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "reflect" - "strings" -) - -// StructTagParser returns the struct tags for a given struct field. -// -// Deprecated: Defining custom BSON struct tag parsers will not be supported in Go Driver 2.0. -type StructTagParser interface { - ParseStructTags(reflect.StructField) (StructTags, error) -} - -// StructTagParserFunc is an adapter that allows a generic function to be used -// as a StructTagParser. -// -// Deprecated: Defining custom BSON struct tag parsers will not be supported in Go Driver 2.0. -type StructTagParserFunc func(reflect.StructField) (StructTags, error) - -// ParseStructTags implements the StructTagParser interface. -func (stpf StructTagParserFunc) ParseStructTags(sf reflect.StructField) (StructTags, error) { - return stpf(sf) -} - -// StructTags represents the struct tag fields that the StructCodec uses during -// the encoding and decoding process. -// -// In the case of a struct, the lowercased field name is used as the key for each exported -// field but this behavior may be changed using a struct tag. The tag may also contain flags to -// adjust the marshalling behavior for the field. -// -// The properties are defined below: -// -// OmitEmpty Only include the field if it's not set to the zero value for the type or to -// empty slices or maps. -// -// MinSize Marshal an integer of a type larger than 32 bits value as an int32, if that's -// feasible while preserving the numeric value. -// -// Truncate When unmarshaling a BSON double, it is permitted to lose precision to fit within -// a float32. -// -// Inline Inline the field, which must be a struct or a map, causing all of its fields -// or keys to be processed as if they were part of the outer struct. For maps, -// keys must not conflict with the bson keys of other struct fields. -// -// Skip This struct field should be skipped. This is usually denoted by parsing a "-" -// for the name. -// -// Deprecated: Defining custom BSON struct tag parsers will not be supported in Go Driver 2.0. -type StructTags struct { - Name string - OmitEmpty bool - MinSize bool - Truncate bool - Inline bool - Skip bool -} - -// DefaultStructTagParser is the StructTagParser used by the StructCodec by default. -// It will handle the bson struct tag. See the documentation for StructTags to see -// what each of the returned fields means. -// -// If there is no name in the struct tag fields, the struct field name is lowercased. -// The tag formats accepted are: -// -// "[][,[,]]" -// -// `(...) bson:"[][,[,]]" (...)` -// -// An example: -// -// type T struct { -// A bool -// B int "myb" -// C string "myc,omitempty" -// D string `bson:",omitempty" json:"jsonkey"` -// E int64 ",minsize" -// F int64 "myf,omitempty,minsize" -// } -// -// A struct tag either consisting entirely of '-' or with a bson key with a -// value consisting entirely of '-' will return a StructTags with Skip true and -// the remaining fields will be their default values. -// -// Deprecated: DefaultStructTagParser will be removed in Go Driver 2.0. -var DefaultStructTagParser StructTagParserFunc = func(sf reflect.StructField) (StructTags, error) { - key := strings.ToLower(sf.Name) - tag, ok := sf.Tag.Lookup("bson") - if !ok && !strings.Contains(string(sf.Tag), ":") && len(sf.Tag) > 0 { - tag = string(sf.Tag) - } - return parseTags(key, tag) -} - -func parseTags(key string, tag string) (StructTags, error) { - var st StructTags - if tag == "-" { - st.Skip = true - return st, nil - } - - for idx, str := range strings.Split(tag, ",") { - if idx == 0 && str != "" { - key = str - } - switch str { - case "omitempty": - st.OmitEmpty = true - case "minsize": - st.MinSize = true - case "truncate": - st.Truncate = true - case "inline": - st.Inline = true - } - } - - st.Name = key - - return st, nil -} - -// JSONFallbackStructTagParser has the same behavior as DefaultStructTagParser -// but will also fallback to parsing the json tag instead on a field where the -// bson tag isn't available. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.UseJSONStructTags] and -// [go.mongodb.org/mongo-driver/bson.Decoder.UseJSONStructTags] instead. -var JSONFallbackStructTagParser StructTagParserFunc = func(sf reflect.StructField) (StructTags, error) { - key := strings.ToLower(sf.Name) - tag, ok := sf.Tag.Lookup("bson") - if !ok { - tag, ok = sf.Tag.Lookup("json") - } - if !ok && !strings.Contains(string(sf.Tag), ":") && len(sf.Tag) > 0 { - tag = string(sf.Tag) - } - - return parseTags(key, tag) -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go deleted file mode 100644 index 22fb762c41..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go +++ /dev/null @@ -1,151 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "fmt" - "reflect" - "time" - - "go.mongodb.org/mongo-driver/bson/bsonoptions" - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/bson/bsontype" - "go.mongodb.org/mongo-driver/bson/primitive" -) - -const ( - timeFormatString = "2006-01-02T15:04:05.999Z07:00" -) - -// TimeCodec is the Codec used for time.Time values. -// -// Deprecated: TimeCodec will not be directly configurable in Go Driver 2.0. -// To configure the time.Time encode and decode behavior, use the configuration -// methods on a [go.mongodb.org/mongo-driver/bson.Encoder] or -// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the time.Time encode -// and decode behavior for a mongo.Client, use -// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions]. -// -// For example, to configure a mongo.Client to ..., use: -// -// opt := options.Client().SetBSONOptions(&options.BSONOptions{ -// UseLocalTimeZone: true, -// }) -// -// See the deprecation notice for each field in TimeCodec for the corresponding -// settings. -type TimeCodec struct { - // UseLocalTimeZone specifies if we should decode into the local time zone. Defaults to false. - // - // Deprecated: Use bson.Decoder.UseLocalTimeZone or options.BSONOptions.UseLocalTimeZone - // instead. - UseLocalTimeZone bool -} - -var ( - defaultTimeCodec = NewTimeCodec() - - // Assert that defaultTimeCodec satisfies the typeDecoder interface, which allows it to be used - // by collection type decoders (e.g. map, slice, etc) to set individual values in a collection. - _ typeDecoder = defaultTimeCodec -) - -// NewTimeCodec returns a TimeCodec with options opts. -// -// Deprecated: NewTimeCodec will not be available in Go Driver 2.0. See -// [TimeCodec] for more details. -func NewTimeCodec(opts ...*bsonoptions.TimeCodecOptions) *TimeCodec { - timeOpt := bsonoptions.MergeTimeCodecOptions(opts...) - - codec := TimeCodec{} - if timeOpt.UseLocalTimeZone != nil { - codec.UseLocalTimeZone = *timeOpt.UseLocalTimeZone - } - return &codec -} - -func (tc *TimeCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - if t != tTime { - return emptyValue, ValueDecoderError{ - Name: "TimeDecodeValue", - Types: []reflect.Type{tTime}, - Received: reflect.Zero(t), - } - } - - var timeVal time.Time - switch vrType := vr.Type(); vrType { - case bsontype.DateTime: - dt, err := vr.ReadDateTime() - if err != nil { - return emptyValue, err - } - timeVal = time.Unix(dt/1000, dt%1000*1000000) - case bsontype.String: - // assume strings are in the isoTimeFormat - timeStr, err := vr.ReadString() - if err != nil { - return emptyValue, err - } - timeVal, err = time.Parse(timeFormatString, timeStr) - if err != nil { - return emptyValue, err - } - case bsontype.Int64: - i64, err := vr.ReadInt64() - if err != nil { - return emptyValue, err - } - timeVal = time.Unix(i64/1000, i64%1000*1000000) - case bsontype.Timestamp: - t, _, err := vr.ReadTimestamp() - if err != nil { - return emptyValue, err - } - timeVal = time.Unix(int64(t), 0) - case bsontype.Null: - if err := vr.ReadNull(); err != nil { - return emptyValue, err - } - case bsontype.Undefined: - if err := vr.ReadUndefined(); err != nil { - return emptyValue, err - } - default: - return emptyValue, fmt.Errorf("cannot decode %v into a time.Time", vrType) - } - - if !tc.UseLocalTimeZone && !dc.useLocalTimeZone { - timeVal = timeVal.UTC() - } - return reflect.ValueOf(timeVal), nil -} - -// DecodeValue is the ValueDecoderFunc for time.Time. -func (tc *TimeCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() || val.Type() != tTime { - return ValueDecoderError{Name: "TimeDecodeValue", Types: []reflect.Type{tTime}, Received: val} - } - - elem, err := tc.decodeType(dc, vr, tTime) - if err != nil { - return err - } - - val.Set(elem) - return nil -} - -// EncodeValue is the ValueEncoderFunc for time.TIme. -func (tc *TimeCodec) EncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - if !val.IsValid() || val.Type() != tTime { - return ValueEncoderError{Name: "TimeEncodeValue", Types: []reflect.Type{tTime}, Received: val} - } - tt := val.Interface().(time.Time) - dt := primitive.NewDateTimeFromTime(tt) - return vw.WriteDateTime(int64(dt)) -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go deleted file mode 100644 index 6ade17b7d3..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "encoding/json" - "net/url" - "reflect" - "time" - - "go.mongodb.org/mongo-driver/bson/primitive" - "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" -) - -var tBool = reflect.TypeOf(false) -var tFloat64 = reflect.TypeOf(float64(0)) -var tInt32 = reflect.TypeOf(int32(0)) -var tInt64 = reflect.TypeOf(int64(0)) -var tString = reflect.TypeOf("") -var tTime = reflect.TypeOf(time.Time{}) - -var tEmpty = reflect.TypeOf((*interface{})(nil)).Elem() -var tByteSlice = reflect.TypeOf([]byte(nil)) -var tByte = reflect.TypeOf(byte(0x00)) -var tURL = reflect.TypeOf(url.URL{}) -var tJSONNumber = reflect.TypeOf(json.Number("")) - -var tValueMarshaler = reflect.TypeOf((*ValueMarshaler)(nil)).Elem() -var tValueUnmarshaler = reflect.TypeOf((*ValueUnmarshaler)(nil)).Elem() -var tMarshaler = reflect.TypeOf((*Marshaler)(nil)).Elem() -var tUnmarshaler = reflect.TypeOf((*Unmarshaler)(nil)).Elem() -var tProxy = reflect.TypeOf((*Proxy)(nil)).Elem() -var tZeroer = reflect.TypeOf((*Zeroer)(nil)).Elem() - -var tBinary = reflect.TypeOf(primitive.Binary{}) -var tUndefined = reflect.TypeOf(primitive.Undefined{}) -var tOID = reflect.TypeOf(primitive.ObjectID{}) -var tDateTime = reflect.TypeOf(primitive.DateTime(0)) -var tNull = reflect.TypeOf(primitive.Null{}) -var tRegex = reflect.TypeOf(primitive.Regex{}) -var tCodeWithScope = reflect.TypeOf(primitive.CodeWithScope{}) -var tDBPointer = reflect.TypeOf(primitive.DBPointer{}) -var tJavaScript = reflect.TypeOf(primitive.JavaScript("")) -var tSymbol = reflect.TypeOf(primitive.Symbol("")) -var tTimestamp = reflect.TypeOf(primitive.Timestamp{}) -var tDecimal = reflect.TypeOf(primitive.Decimal128{}) -var tMinKey = reflect.TypeOf(primitive.MinKey{}) -var tMaxKey = reflect.TypeOf(primitive.MaxKey{}) -var tD = reflect.TypeOf(primitive.D{}) -var tA = reflect.TypeOf(primitive.A{}) -var tE = reflect.TypeOf(primitive.E{}) - -var tCoreDocument = reflect.TypeOf(bsoncore.Document{}) -var tCoreArray = reflect.TypeOf(bsoncore.Array{}) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go deleted file mode 100644 index 8525472769..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go +++ /dev/null @@ -1,198 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsoncodec - -import ( - "fmt" - "math" - "reflect" - - "go.mongodb.org/mongo-driver/bson/bsonoptions" - "go.mongodb.org/mongo-driver/bson/bsonrw" - "go.mongodb.org/mongo-driver/bson/bsontype" -) - -// UIntCodec is the Codec used for uint values. -// -// Deprecated: UIntCodec will not be directly configurable in Go Driver 2.0. To -// configure the uint encode and decode behavior, use the configuration methods -// on a [go.mongodb.org/mongo-driver/bson.Encoder] or -// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the uint encode and -// decode behavior for a mongo.Client, use -// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions]. -// -// For example, to configure a mongo.Client to marshal Go uint values as the -// minimum BSON int size that can represent the value, use: -// -// opt := options.Client().SetBSONOptions(&options.BSONOptions{ -// IntMinSize: true, -// }) -// -// See the deprecation notice for each field in UIntCodec for the corresponding -// settings. -type UIntCodec struct { - // EncodeToMinSize causes EncodeValue to marshal Go uint values (excluding uint64) as the - // minimum BSON int size (either 32-bit or 64-bit) that can represent the integer value. - // - // Deprecated: Use bson.Encoder.IntMinSize or options.BSONOptions.IntMinSize instead. - EncodeToMinSize bool -} - -var ( - defaultUIntCodec = NewUIntCodec() - - // Assert that defaultUIntCodec satisfies the typeDecoder interface, which allows it to be used - // by collection type decoders (e.g. map, slice, etc) to set individual values in a collection. - _ typeDecoder = defaultUIntCodec -) - -// NewUIntCodec returns a UIntCodec with options opts. -// -// Deprecated: NewUIntCodec will not be available in Go Driver 2.0. See -// [UIntCodec] for more details. -func NewUIntCodec(opts ...*bsonoptions.UIntCodecOptions) *UIntCodec { - uintOpt := bsonoptions.MergeUIntCodecOptions(opts...) - - codec := UIntCodec{} - if uintOpt.EncodeToMinSize != nil { - codec.EncodeToMinSize = *uintOpt.EncodeToMinSize - } - return &codec -} - -// EncodeValue is the ValueEncoder for uint types. -func (uic *UIntCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { - switch val.Kind() { - case reflect.Uint8, reflect.Uint16: - return vw.WriteInt32(int32(val.Uint())) - case reflect.Uint, reflect.Uint32, reflect.Uint64: - u64 := val.Uint() - - // If ec.MinSize or if encodeToMinSize is true for a non-uint64 value we should write val as an int32 - useMinSize := ec.MinSize || (uic.EncodeToMinSize && val.Kind() != reflect.Uint64) - - if u64 <= math.MaxInt32 && useMinSize { - return vw.WriteInt32(int32(u64)) - } - if u64 > math.MaxInt64 { - return fmt.Errorf("%d overflows int64", u64) - } - return vw.WriteInt64(int64(u64)) - } - - return ValueEncoderError{ - Name: "UintEncodeValue", - Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint}, - Received: val, - } -} - -func (uic *UIntCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) { - var i64 int64 - var err error - switch vrType := vr.Type(); vrType { - case bsontype.Int32: - i32, err := vr.ReadInt32() - if err != nil { - return emptyValue, err - } - i64 = int64(i32) - case bsontype.Int64: - i64, err = vr.ReadInt64() - if err != nil { - return emptyValue, err - } - case bsontype.Double: - f64, err := vr.ReadDouble() - if err != nil { - return emptyValue, err - } - if !dc.Truncate && math.Floor(f64) != f64 { - return emptyValue, errCannotTruncate - } - if f64 > float64(math.MaxInt64) { - return emptyValue, fmt.Errorf("%g overflows int64", f64) - } - i64 = int64(f64) - case bsontype.Boolean: - b, err := vr.ReadBoolean() - if err != nil { - return emptyValue, err - } - if b { - i64 = 1 - } - case bsontype.Null: - if err = vr.ReadNull(); err != nil { - return emptyValue, err - } - case bsontype.Undefined: - if err = vr.ReadUndefined(); err != nil { - return emptyValue, err - } - default: - return emptyValue, fmt.Errorf("cannot decode %v into an integer type", vrType) - } - - switch t.Kind() { - case reflect.Uint8: - if i64 < 0 || i64 > math.MaxUint8 { - return emptyValue, fmt.Errorf("%d overflows uint8", i64) - } - - return reflect.ValueOf(uint8(i64)), nil - case reflect.Uint16: - if i64 < 0 || i64 > math.MaxUint16 { - return emptyValue, fmt.Errorf("%d overflows uint16", i64) - } - - return reflect.ValueOf(uint16(i64)), nil - case reflect.Uint32: - if i64 < 0 || i64 > math.MaxUint32 { - return emptyValue, fmt.Errorf("%d overflows uint32", i64) - } - - return reflect.ValueOf(uint32(i64)), nil - case reflect.Uint64: - if i64 < 0 { - return emptyValue, fmt.Errorf("%d overflows uint64", i64) - } - - return reflect.ValueOf(uint64(i64)), nil - case reflect.Uint: - if i64 < 0 || int64(uint(i64)) != i64 { // Can we fit this inside of an uint - return emptyValue, fmt.Errorf("%d overflows uint", i64) - } - - return reflect.ValueOf(uint(i64)), nil - default: - return emptyValue, ValueDecoderError{ - Name: "UintDecodeValue", - Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint}, - Received: reflect.Zero(t), - } - } -} - -// DecodeValue is the ValueDecoder for uint types. -func (uic *UIntCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { - if !val.CanSet() { - return ValueDecoderError{ - Name: "UintDecodeValue", - Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint}, - Received: val, - } - } - - elem, err := uic.decodeType(dc, vr, val.Type()) - if err != nil { - return err - } - - val.SetUint(elem.Uint()) - return nil -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/byte_slice_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/byte_slice_codec_options.go deleted file mode 100644 index 996bd17127..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/byte_slice_codec_options.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonoptions - -// ByteSliceCodecOptions represents all possible options for byte slice encoding and decoding. -// -// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal -// and unmarshal behavior instead. -type ByteSliceCodecOptions struct { - EncodeNilAsEmpty *bool // Specifies if a nil byte slice should encode as an empty binary instead of null. Defaults to false. -} - -// ByteSliceCodec creates a new *ByteSliceCodecOptions -// -// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal -// and unmarshal behavior instead. -func ByteSliceCodec() *ByteSliceCodecOptions { - return &ByteSliceCodecOptions{} -} - -// SetEncodeNilAsEmpty specifies if a nil byte slice should encode as an empty binary instead of null. Defaults to false. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilByteSliceAsEmpty] instead. -func (bs *ByteSliceCodecOptions) SetEncodeNilAsEmpty(b bool) *ByteSliceCodecOptions { - bs.EncodeNilAsEmpty = &b - return bs -} - -// MergeByteSliceCodecOptions combines the given *ByteSliceCodecOptions into a single *ByteSliceCodecOptions in a last one wins fashion. -// -// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a -// single options struct instead. -func MergeByteSliceCodecOptions(opts ...*ByteSliceCodecOptions) *ByteSliceCodecOptions { - bs := ByteSliceCodec() - for _, opt := range opts { - if opt == nil { - continue - } - if opt.EncodeNilAsEmpty != nil { - bs.EncodeNilAsEmpty = opt.EncodeNilAsEmpty - } - } - - return bs -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/doc.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/doc.go deleted file mode 100644 index c40973c8d4..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/doc.go +++ /dev/null @@ -1,8 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2022-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -// Package bsonoptions defines the optional configurations for the BSON codecs. -package bsonoptions diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/empty_interface_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/empty_interface_codec_options.go deleted file mode 100644 index f522c7e03f..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/empty_interface_codec_options.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonoptions - -// EmptyInterfaceCodecOptions represents all possible options for interface{} encoding and decoding. -// -// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal -// and unmarshal behavior instead. -type EmptyInterfaceCodecOptions struct { - DecodeBinaryAsSlice *bool // Specifies if Old and Generic type binarys should default to []slice instead of primitive.Binary. Defaults to false. -} - -// EmptyInterfaceCodec creates a new *EmptyInterfaceCodecOptions -// -// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal -// and unmarshal behavior instead. -func EmptyInterfaceCodec() *EmptyInterfaceCodecOptions { - return &EmptyInterfaceCodecOptions{} -} - -// SetDecodeBinaryAsSlice specifies if Old and Generic type binarys should default to []slice instead of primitive.Binary. Defaults to false. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.BinaryAsSlice] instead. -func (e *EmptyInterfaceCodecOptions) SetDecodeBinaryAsSlice(b bool) *EmptyInterfaceCodecOptions { - e.DecodeBinaryAsSlice = &b - return e -} - -// MergeEmptyInterfaceCodecOptions combines the given *EmptyInterfaceCodecOptions into a single *EmptyInterfaceCodecOptions in a last one wins fashion. -// -// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a -// single options struct instead. -func MergeEmptyInterfaceCodecOptions(opts ...*EmptyInterfaceCodecOptions) *EmptyInterfaceCodecOptions { - e := EmptyInterfaceCodec() - for _, opt := range opts { - if opt == nil { - continue - } - if opt.DecodeBinaryAsSlice != nil { - e.DecodeBinaryAsSlice = opt.DecodeBinaryAsSlice - } - } - - return e -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/map_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/map_codec_options.go deleted file mode 100644 index a7a7c1d980..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/map_codec_options.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonoptions - -// MapCodecOptions represents all possible options for map encoding and decoding. -// -// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal -// and unmarshal behavior instead. -type MapCodecOptions struct { - DecodeZerosMap *bool // Specifies if the map should be zeroed before decoding into it. Defaults to false. - EncodeNilAsEmpty *bool // Specifies if a nil map should encode as an empty document instead of null. Defaults to false. - // Specifies how keys should be handled. If false, the behavior matches encoding/json, where the encoding key type must - // either be a string, an integer type, or implement bsoncodec.KeyMarshaler and the decoding key type must either be a - // string, an integer type, or implement bsoncodec.KeyUnmarshaler. If true, keys are encoded with fmt.Sprint() and the - // encoding key type must be a string, an integer type, or a float. If true, the use of Stringer will override - // TextMarshaler/TextUnmarshaler. Defaults to false. - EncodeKeysWithStringer *bool -} - -// MapCodec creates a new *MapCodecOptions -// -// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal -// and unmarshal behavior instead. -func MapCodec() *MapCodecOptions { - return &MapCodecOptions{} -} - -// SetDecodeZerosMap specifies if the map should be zeroed before decoding into it. Defaults to false. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.ZeroMaps] instead. -func (t *MapCodecOptions) SetDecodeZerosMap(b bool) *MapCodecOptions { - t.DecodeZerosMap = &b - return t -} - -// SetEncodeNilAsEmpty specifies if a nil map should encode as an empty document instead of null. Defaults to false. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilMapAsEmpty] instead. -func (t *MapCodecOptions) SetEncodeNilAsEmpty(b bool) *MapCodecOptions { - t.EncodeNilAsEmpty = &b - return t -} - -// SetEncodeKeysWithStringer specifies how keys should be handled. If false, the behavior matches encoding/json, where the -// encoding key type must either be a string, an integer type, or implement bsoncodec.KeyMarshaler and the decoding key -// type must either be a string, an integer type, or implement bsoncodec.KeyUnmarshaler. If true, keys are encoded with -// fmt.Sprint() and the encoding key type must be a string, an integer type, or a float. If true, the use of Stringer -// will override TextMarshaler/TextUnmarshaler. Defaults to false. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.StringifyMapKeysWithFmt] instead. -func (t *MapCodecOptions) SetEncodeKeysWithStringer(b bool) *MapCodecOptions { - t.EncodeKeysWithStringer = &b - return t -} - -// MergeMapCodecOptions combines the given *MapCodecOptions into a single *MapCodecOptions in a last one wins fashion. -// -// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a -// single options struct instead. -func MergeMapCodecOptions(opts ...*MapCodecOptions) *MapCodecOptions { - s := MapCodec() - for _, opt := range opts { - if opt == nil { - continue - } - if opt.DecodeZerosMap != nil { - s.DecodeZerosMap = opt.DecodeZerosMap - } - if opt.EncodeNilAsEmpty != nil { - s.EncodeNilAsEmpty = opt.EncodeNilAsEmpty - } - if opt.EncodeKeysWithStringer != nil { - s.EncodeKeysWithStringer = opt.EncodeKeysWithStringer - } - } - - return s -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/slice_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/slice_codec_options.go deleted file mode 100644 index 3c1e4f35ba..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/slice_codec_options.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonoptions - -// SliceCodecOptions represents all possible options for slice encoding and decoding. -// -// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal -// and unmarshal behavior instead. -type SliceCodecOptions struct { - EncodeNilAsEmpty *bool // Specifies if a nil slice should encode as an empty array instead of null. Defaults to false. -} - -// SliceCodec creates a new *SliceCodecOptions -// -// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal -// and unmarshal behavior instead. -func SliceCodec() *SliceCodecOptions { - return &SliceCodecOptions{} -} - -// SetEncodeNilAsEmpty specifies if a nil slice should encode as an empty array instead of null. Defaults to false. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilSliceAsEmpty] instead. -func (s *SliceCodecOptions) SetEncodeNilAsEmpty(b bool) *SliceCodecOptions { - s.EncodeNilAsEmpty = &b - return s -} - -// MergeSliceCodecOptions combines the given *SliceCodecOptions into a single *SliceCodecOptions in a last one wins fashion. -// -// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a -// single options struct instead. -func MergeSliceCodecOptions(opts ...*SliceCodecOptions) *SliceCodecOptions { - s := SliceCodec() - for _, opt := range opts { - if opt == nil { - continue - } - if opt.EncodeNilAsEmpty != nil { - s.EncodeNilAsEmpty = opt.EncodeNilAsEmpty - } - } - - return s -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/string_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/string_codec_options.go deleted file mode 100644 index f8b76f996e..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/string_codec_options.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonoptions - -var defaultDecodeOIDAsHex = true - -// StringCodecOptions represents all possible options for string encoding and decoding. -// -// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal -// and unmarshal behavior instead. -type StringCodecOptions struct { - DecodeObjectIDAsHex *bool // Specifies if we should decode ObjectID as the hex value. Defaults to true. -} - -// StringCodec creates a new *StringCodecOptions -// -// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal -// and unmarshal behavior instead. -func StringCodec() *StringCodecOptions { - return &StringCodecOptions{} -} - -// SetDecodeObjectIDAsHex specifies if object IDs should be decoded as their hex representation. If false, a string made -// from the raw object ID bytes will be used. Defaults to true. -// -// Deprecated: Decoding object IDs as raw bytes will not be supported in Go Driver 2.0. -func (t *StringCodecOptions) SetDecodeObjectIDAsHex(b bool) *StringCodecOptions { - t.DecodeObjectIDAsHex = &b - return t -} - -// MergeStringCodecOptions combines the given *StringCodecOptions into a single *StringCodecOptions in a last one wins fashion. -// -// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a -// single options struct instead. -func MergeStringCodecOptions(opts ...*StringCodecOptions) *StringCodecOptions { - s := &StringCodecOptions{&defaultDecodeOIDAsHex} - for _, opt := range opts { - if opt == nil { - continue - } - if opt.DecodeObjectIDAsHex != nil { - s.DecodeObjectIDAsHex = opt.DecodeObjectIDAsHex - } - } - - return s -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/struct_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/struct_codec_options.go deleted file mode 100644 index 1cbfa32e8b..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/struct_codec_options.go +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonoptions - -var defaultOverwriteDuplicatedInlinedFields = true - -// StructCodecOptions represents all possible options for struct encoding and decoding. -// -// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal -// and unmarshal behavior instead. -type StructCodecOptions struct { - DecodeZeroStruct *bool // Specifies if structs should be zeroed before decoding into them. Defaults to false. - DecodeDeepZeroInline *bool // Specifies if structs should be recursively zeroed when a inline value is decoded. Defaults to false. - EncodeOmitDefaultStruct *bool // Specifies if default structs should be considered empty by omitempty. Defaults to false. - AllowUnexportedFields *bool // Specifies if unexported fields should be marshaled/unmarshaled. Defaults to false. - OverwriteDuplicatedInlinedFields *bool // Specifies if fields in inlined structs can be overwritten by higher level struct fields with the same key. Defaults to true. -} - -// StructCodec creates a new *StructCodecOptions -// -// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal -// and unmarshal behavior instead. -func StructCodec() *StructCodecOptions { - return &StructCodecOptions{} -} - -// SetDecodeZeroStruct specifies if structs should be zeroed before decoding into them. Defaults to false. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.ZeroStructs] instead. -func (t *StructCodecOptions) SetDecodeZeroStruct(b bool) *StructCodecOptions { - t.DecodeZeroStruct = &b - return t -} - -// SetDecodeDeepZeroInline specifies if structs should be zeroed before decoding into them. Defaults to false. -// -// Deprecated: DecodeDeepZeroInline will not be supported in Go Driver 2.0. -func (t *StructCodecOptions) SetDecodeDeepZeroInline(b bool) *StructCodecOptions { - t.DecodeDeepZeroInline = &b - return t -} - -// SetEncodeOmitDefaultStruct specifies if default structs should be considered empty by omitempty. A default struct has all -// its values set to their default value. Defaults to false. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.OmitZeroStruct] instead. -func (t *StructCodecOptions) SetEncodeOmitDefaultStruct(b bool) *StructCodecOptions { - t.EncodeOmitDefaultStruct = &b - return t -} - -// SetOverwriteDuplicatedInlinedFields specifies if inlined struct fields can be overwritten by higher level struct fields with the -// same bson key. When true and decoding, values will be written to the outermost struct with a matching key, and when -// encoding, keys will have the value of the top-most matching field. When false, decoding and encoding will error if -// there are duplicate keys after the struct is inlined. Defaults to true. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.ErrorOnInlineDuplicates] instead. -func (t *StructCodecOptions) SetOverwriteDuplicatedInlinedFields(b bool) *StructCodecOptions { - t.OverwriteDuplicatedInlinedFields = &b - return t -} - -// SetAllowUnexportedFields specifies if unexported fields should be marshaled/unmarshaled. Defaults to false. -// -// Deprecated: AllowUnexportedFields does not work on recent versions of Go and will not be -// supported in Go Driver 2.0. -func (t *StructCodecOptions) SetAllowUnexportedFields(b bool) *StructCodecOptions { - t.AllowUnexportedFields = &b - return t -} - -// MergeStructCodecOptions combines the given *StructCodecOptions into a single *StructCodecOptions in a last one wins fashion. -// -// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a -// single options struct instead. -func MergeStructCodecOptions(opts ...*StructCodecOptions) *StructCodecOptions { - s := &StructCodecOptions{ - OverwriteDuplicatedInlinedFields: &defaultOverwriteDuplicatedInlinedFields, - } - for _, opt := range opts { - if opt == nil { - continue - } - - if opt.DecodeZeroStruct != nil { - s.DecodeZeroStruct = opt.DecodeZeroStruct - } - if opt.DecodeDeepZeroInline != nil { - s.DecodeDeepZeroInline = opt.DecodeDeepZeroInline - } - if opt.EncodeOmitDefaultStruct != nil { - s.EncodeOmitDefaultStruct = opt.EncodeOmitDefaultStruct - } - if opt.OverwriteDuplicatedInlinedFields != nil { - s.OverwriteDuplicatedInlinedFields = opt.OverwriteDuplicatedInlinedFields - } - if opt.AllowUnexportedFields != nil { - s.AllowUnexportedFields = opt.AllowUnexportedFields - } - } - - return s -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/time_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/time_codec_options.go deleted file mode 100644 index 3f38433d22..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/time_codec_options.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonoptions - -// TimeCodecOptions represents all possible options for time.Time encoding and decoding. -// -// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal -// and unmarshal behavior instead. -type TimeCodecOptions struct { - UseLocalTimeZone *bool // Specifies if we should decode into the local time zone. Defaults to false. -} - -// TimeCodec creates a new *TimeCodecOptions -// -// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal -// and unmarshal behavior instead. -func TimeCodec() *TimeCodecOptions { - return &TimeCodecOptions{} -} - -// SetUseLocalTimeZone specifies if we should decode into the local time zone. Defaults to false. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.UseLocalTimeZone] instead. -func (t *TimeCodecOptions) SetUseLocalTimeZone(b bool) *TimeCodecOptions { - t.UseLocalTimeZone = &b - return t -} - -// MergeTimeCodecOptions combines the given *TimeCodecOptions into a single *TimeCodecOptions in a last one wins fashion. -// -// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a -// single options struct instead. -func MergeTimeCodecOptions(opts ...*TimeCodecOptions) *TimeCodecOptions { - t := TimeCodec() - for _, opt := range opts { - if opt == nil { - continue - } - if opt.UseLocalTimeZone != nil { - t.UseLocalTimeZone = opt.UseLocalTimeZone - } - } - - return t -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/uint_codec_options.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/uint_codec_options.go deleted file mode 100644 index 5091e4d963..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/uint_codec_options.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonoptions - -// UIntCodecOptions represents all possible options for uint encoding and decoding. -// -// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal -// and unmarshal behavior instead. -type UIntCodecOptions struct { - EncodeToMinSize *bool // Specifies if all uints except uint64 should be decoded to minimum size bsontype. Defaults to false. -} - -// UIntCodec creates a new *UIntCodecOptions -// -// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal -// and unmarshal behavior instead. -func UIntCodec() *UIntCodecOptions { - return &UIntCodecOptions{} -} - -// SetEncodeToMinSize specifies if all uints except uint64 should be decoded to minimum size bsontype. Defaults to false. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.IntMinSize] instead. -func (u *UIntCodecOptions) SetEncodeToMinSize(b bool) *UIntCodecOptions { - u.EncodeToMinSize = &b - return u -} - -// MergeUIntCodecOptions combines the given *UIntCodecOptions into a single *UIntCodecOptions in a last one wins fashion. -// -// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a -// single options struct instead. -func MergeUIntCodecOptions(opts ...*UIntCodecOptions) *UIntCodecOptions { - u := UIntCodec() - for _, opt := range opts { - if opt == nil { - continue - } - if opt.EncodeToMinSize != nil { - u.EncodeToMinSize = opt.EncodeToMinSize - } - } - - return u -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go deleted file mode 100644 index 1e25570b85..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go +++ /dev/null @@ -1,489 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonrw - -import ( - "errors" - "fmt" - "io" - - "go.mongodb.org/mongo-driver/bson/bsontype" - "go.mongodb.org/mongo-driver/bson/primitive" - "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" -) - -// Copier is a type that allows copying between ValueReaders, ValueWriters, and -// []byte values. -// -// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be -// supported in Go Driver 2.0. -type Copier struct{} - -// NewCopier creates a new copier with the given registry. If a nil registry is provided -// a default registry is used. -// -// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be -// supported in Go Driver 2.0. -func NewCopier() Copier { - return Copier{} -} - -// CopyDocument handles copying a document from src to dst. -// -// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be -// supported in Go Driver 2.0. -func CopyDocument(dst ValueWriter, src ValueReader) error { - return Copier{}.CopyDocument(dst, src) -} - -// CopyDocument handles copying one document from the src to the dst. -// -// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be -// supported in Go Driver 2.0. -func (c Copier) CopyDocument(dst ValueWriter, src ValueReader) error { - dr, err := src.ReadDocument() - if err != nil { - return err - } - - dw, err := dst.WriteDocument() - if err != nil { - return err - } - - return c.copyDocumentCore(dw, dr) -} - -// CopyArrayFromBytes copies the values from a BSON array represented as a -// []byte to a ValueWriter. -// -// Deprecated: Copying BSON arrays using the ValueWriter and ValueReader interfaces will not be -// supported in Go Driver 2.0. -func (c Copier) CopyArrayFromBytes(dst ValueWriter, src []byte) error { - aw, err := dst.WriteArray() - if err != nil { - return err - } - - err = c.CopyBytesToArrayWriter(aw, src) - if err != nil { - return err - } - - return aw.WriteArrayEnd() -} - -// CopyDocumentFromBytes copies the values from a BSON document represented as a -// []byte to a ValueWriter. -// -// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be -// supported in Go Driver 2.0. -func (c Copier) CopyDocumentFromBytes(dst ValueWriter, src []byte) error { - dw, err := dst.WriteDocument() - if err != nil { - return err - } - - err = c.CopyBytesToDocumentWriter(dw, src) - if err != nil { - return err - } - - return dw.WriteDocumentEnd() -} - -type writeElementFn func(key string) (ValueWriter, error) - -// CopyBytesToArrayWriter copies the values from a BSON Array represented as a []byte to an -// ArrayWriter. -// -// Deprecated: Copying BSON arrays using the ArrayWriter interface will not be supported in Go -// Driver 2.0. -func (c Copier) CopyBytesToArrayWriter(dst ArrayWriter, src []byte) error { - wef := func(_ string) (ValueWriter, error) { - return dst.WriteArrayElement() - } - - return c.copyBytesToValueWriter(src, wef) -} - -// CopyBytesToDocumentWriter copies the values from a BSON document represented as a []byte to a -// DocumentWriter. -// -// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be -// supported in Go Driver 2.0. -func (c Copier) CopyBytesToDocumentWriter(dst DocumentWriter, src []byte) error { - wef := func(key string) (ValueWriter, error) { - return dst.WriteDocumentElement(key) - } - - return c.copyBytesToValueWriter(src, wef) -} - -func (c Copier) copyBytesToValueWriter(src []byte, wef writeElementFn) error { - // TODO(skriptble): Create errors types here. Anything that is a tag should be a property. - length, rem, ok := bsoncore.ReadLength(src) - if !ok { - return fmt.Errorf("couldn't read length from src, not enough bytes. length=%d", len(src)) - } - if len(src) < int(length) { - return fmt.Errorf("length read exceeds number of bytes available. length=%d bytes=%d", len(src), length) - } - rem = rem[:length-4] - - var t bsontype.Type - var key string - var val bsoncore.Value - for { - t, rem, ok = bsoncore.ReadType(rem) - if !ok { - return io.EOF - } - if t == bsontype.Type(0) { - if len(rem) != 0 { - return fmt.Errorf("document end byte found before end of document. remaining bytes=%v", rem) - } - break - } - - key, rem, ok = bsoncore.ReadKey(rem) - if !ok { - return fmt.Errorf("invalid key found. remaining bytes=%v", rem) - } - - // write as either array element or document element using writeElementFn - vw, err := wef(key) - if err != nil { - return err - } - - val, rem, ok = bsoncore.ReadValue(rem, t) - if !ok { - return fmt.Errorf("not enough bytes available to read type. bytes=%d type=%s", len(rem), t) - } - err = c.CopyValueFromBytes(vw, t, val.Data) - if err != nil { - return err - } - } - return nil -} - -// CopyDocumentToBytes copies an entire document from the ValueReader and -// returns it as bytes. -// -// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be -// supported in Go Driver 2.0. -func (c Copier) CopyDocumentToBytes(src ValueReader) ([]byte, error) { - return c.AppendDocumentBytes(nil, src) -} - -// AppendDocumentBytes functions the same as CopyDocumentToBytes, but will -// append the result to dst. -// -// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be -// supported in Go Driver 2.0. -func (c Copier) AppendDocumentBytes(dst []byte, src ValueReader) ([]byte, error) { - if br, ok := src.(BytesReader); ok { - _, dst, err := br.ReadValueBytes(dst) - return dst, err - } - - vw := vwPool.Get().(*valueWriter) - defer putValueWriter(vw) - - vw.reset(dst) - - err := c.CopyDocument(vw, src) - dst = vw.buf - return dst, err -} - -// AppendArrayBytes copies an array from the ValueReader to dst. -// -// Deprecated: Copying BSON arrays using the ValueWriter and ValueReader interfaces will not be -// supported in Go Driver 2.0. -func (c Copier) AppendArrayBytes(dst []byte, src ValueReader) ([]byte, error) { - if br, ok := src.(BytesReader); ok { - _, dst, err := br.ReadValueBytes(dst) - return dst, err - } - - vw := vwPool.Get().(*valueWriter) - defer putValueWriter(vw) - - vw.reset(dst) - - err := c.copyArray(vw, src) - dst = vw.buf - return dst, err -} - -// CopyValueFromBytes will write the value represtend by t and src to dst. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.UnmarshalValue] instead. -func (c Copier) CopyValueFromBytes(dst ValueWriter, t bsontype.Type, src []byte) error { - if wvb, ok := dst.(BytesWriter); ok { - return wvb.WriteValueBytes(t, src) - } - - vr := vrPool.Get().(*valueReader) - defer vrPool.Put(vr) - - vr.reset(src) - vr.pushElement(t) - - return c.CopyValue(dst, vr) -} - -// CopyValueToBytes copies a value from src and returns it as a bsontype.Type and a -// []byte. -// -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.MarshalValue] instead. -func (c Copier) CopyValueToBytes(src ValueReader) (bsontype.Type, []byte, error) { - return c.AppendValueBytes(nil, src) -} - -// AppendValueBytes functions the same as CopyValueToBytes, but will append the -// result to dst. -// -// Deprecated: Appending individual BSON elements to an existing slice will not be supported in Go -// Driver 2.0. -func (c Copier) AppendValueBytes(dst []byte, src ValueReader) (bsontype.Type, []byte, error) { - if br, ok := src.(BytesReader); ok { - return br.ReadValueBytes(dst) - } - - vw := vwPool.Get().(*valueWriter) - defer putValueWriter(vw) - - start := len(dst) - - vw.reset(dst) - vw.push(mElement) - - err := c.CopyValue(vw, src) - if err != nil { - return 0, dst, err - } - - return bsontype.Type(vw.buf[start]), vw.buf[start+2:], nil -} - -// CopyValue will copy a single value from src to dst. -// -// Deprecated: Copying BSON values using the ValueWriter and ValueReader interfaces will not be -// supported in Go Driver 2.0. -func (c Copier) CopyValue(dst ValueWriter, src ValueReader) error { - var err error - switch src.Type() { - case bsontype.Double: - var f64 float64 - f64, err = src.ReadDouble() - if err != nil { - break - } - err = dst.WriteDouble(f64) - case bsontype.String: - var str string - str, err = src.ReadString() - if err != nil { - return err - } - err = dst.WriteString(str) - case bsontype.EmbeddedDocument: - err = c.CopyDocument(dst, src) - case bsontype.Array: - err = c.copyArray(dst, src) - case bsontype.Binary: - var data []byte - var subtype byte - data, subtype, err = src.ReadBinary() - if err != nil { - break - } - err = dst.WriteBinaryWithSubtype(data, subtype) - case bsontype.Undefined: - err = src.ReadUndefined() - if err != nil { - break - } - err = dst.WriteUndefined() - case bsontype.ObjectID: - var oid primitive.ObjectID - oid, err = src.ReadObjectID() - if err != nil { - break - } - err = dst.WriteObjectID(oid) - case bsontype.Boolean: - var b bool - b, err = src.ReadBoolean() - if err != nil { - break - } - err = dst.WriteBoolean(b) - case bsontype.DateTime: - var dt int64 - dt, err = src.ReadDateTime() - if err != nil { - break - } - err = dst.WriteDateTime(dt) - case bsontype.Null: - err = src.ReadNull() - if err != nil { - break - } - err = dst.WriteNull() - case bsontype.Regex: - var pattern, options string - pattern, options, err = src.ReadRegex() - if err != nil { - break - } - err = dst.WriteRegex(pattern, options) - case bsontype.DBPointer: - var ns string - var pointer primitive.ObjectID - ns, pointer, err = src.ReadDBPointer() - if err != nil { - break - } - err = dst.WriteDBPointer(ns, pointer) - case bsontype.JavaScript: - var js string - js, err = src.ReadJavascript() - if err != nil { - break - } - err = dst.WriteJavascript(js) - case bsontype.Symbol: - var symbol string - symbol, err = src.ReadSymbol() - if err != nil { - break - } - err = dst.WriteSymbol(symbol) - case bsontype.CodeWithScope: - var code string - var srcScope DocumentReader - code, srcScope, err = src.ReadCodeWithScope() - if err != nil { - break - } - - var dstScope DocumentWriter - dstScope, err = dst.WriteCodeWithScope(code) - if err != nil { - break - } - err = c.copyDocumentCore(dstScope, srcScope) - case bsontype.Int32: - var i32 int32 - i32, err = src.ReadInt32() - if err != nil { - break - } - err = dst.WriteInt32(i32) - case bsontype.Timestamp: - var t, i uint32 - t, i, err = src.ReadTimestamp() - if err != nil { - break - } - err = dst.WriteTimestamp(t, i) - case bsontype.Int64: - var i64 int64 - i64, err = src.ReadInt64() - if err != nil { - break - } - err = dst.WriteInt64(i64) - case bsontype.Decimal128: - var d128 primitive.Decimal128 - d128, err = src.ReadDecimal128() - if err != nil { - break - } - err = dst.WriteDecimal128(d128) - case bsontype.MinKey: - err = src.ReadMinKey() - if err != nil { - break - } - err = dst.WriteMinKey() - case bsontype.MaxKey: - err = src.ReadMaxKey() - if err != nil { - break - } - err = dst.WriteMaxKey() - default: - err = fmt.Errorf("Cannot copy unknown BSON type %s", src.Type()) - } - - return err -} - -func (c Copier) copyArray(dst ValueWriter, src ValueReader) error { - ar, err := src.ReadArray() - if err != nil { - return err - } - - aw, err := dst.WriteArray() - if err != nil { - return err - } - - for { - vr, err := ar.ReadValue() - if errors.Is(err, ErrEOA) { - break - } - if err != nil { - return err - } - - vw, err := aw.WriteArrayElement() - if err != nil { - return err - } - - err = c.CopyValue(vw, vr) - if err != nil { - return err - } - } - - return aw.WriteArrayEnd() -} - -func (c Copier) copyDocumentCore(dw DocumentWriter, dr DocumentReader) error { - for { - key, vr, err := dr.ReadElement() - if errors.Is(err, ErrEOD) { - break - } - if err != nil { - return err - } - - vw, err := dw.WriteDocumentElement(key) - if err != nil { - return err - } - - err = c.CopyValue(vw, vr) - if err != nil { - return err - } - } - - return dw.WriteDocumentEnd() -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/doc.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/doc.go deleted file mode 100644 index 750b0d2af5..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/doc.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -// Package bsonrw contains abstractions for reading and writing -// BSON and BSON like types from sources. -package bsonrw // import "go.mongodb.org/mongo-driver/bson/bsonrw" diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go deleted file mode 100644 index bb52a0ec3d..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go +++ /dev/null @@ -1,806 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonrw - -import ( - "encoding/base64" - "encoding/hex" - "errors" - "fmt" - "io" - "strings" - - "go.mongodb.org/mongo-driver/bson/bsontype" -) - -const maxNestingDepth = 200 - -// ErrInvalidJSON indicates the JSON input is invalid -var ErrInvalidJSON = errors.New("invalid JSON input") - -type jsonParseState byte - -const ( - jpsStartState jsonParseState = iota - jpsSawBeginObject - jpsSawEndObject - jpsSawBeginArray - jpsSawEndArray - jpsSawColon - jpsSawComma - jpsSawKey - jpsSawValue - jpsDoneState - jpsInvalidState -) - -type jsonParseMode byte - -const ( - jpmInvalidMode jsonParseMode = iota - jpmObjectMode - jpmArrayMode -) - -type extJSONValue struct { - t bsontype.Type - v interface{} -} - -type extJSONObject struct { - keys []string - values []*extJSONValue -} - -type extJSONParser struct { - js *jsonScanner - s jsonParseState - m []jsonParseMode - k string - v *extJSONValue - - err error - canonical bool - depth int - maxDepth int - - emptyObject bool - relaxedUUID bool -} - -// newExtJSONParser returns a new extended JSON parser, ready to to begin -// parsing from the first character of the argued json input. It will not -// perform any read-ahead and will therefore not report any errors about -// malformed JSON at this point. -func newExtJSONParser(r io.Reader, canonical bool) *extJSONParser { - return &extJSONParser{ - js: &jsonScanner{r: r}, - s: jpsStartState, - m: []jsonParseMode{}, - canonical: canonical, - maxDepth: maxNestingDepth, - } -} - -// peekType examines the next value and returns its BSON Type -func (ejp *extJSONParser) peekType() (bsontype.Type, error) { - var t bsontype.Type - var err error - initialState := ejp.s - - ejp.advanceState() - switch ejp.s { - case jpsSawValue: - t = ejp.v.t - case jpsSawBeginArray: - t = bsontype.Array - case jpsInvalidState: - err = ejp.err - case jpsSawComma: - // in array mode, seeing a comma means we need to progress again to actually observe a type - if ejp.peekMode() == jpmArrayMode { - return ejp.peekType() - } - case jpsSawEndArray: - // this would only be a valid state if we were in array mode, so return end-of-array error - err = ErrEOA - case jpsSawBeginObject: - // peek key to determine type - ejp.advanceState() - switch ejp.s { - case jpsSawEndObject: // empty embedded document - t = bsontype.EmbeddedDocument - ejp.emptyObject = true - case jpsInvalidState: - err = ejp.err - case jpsSawKey: - if initialState == jpsStartState { - return bsontype.EmbeddedDocument, nil - } - t = wrapperKeyBSONType(ejp.k) - - // if $uuid is encountered, parse as binary subtype 4 - if ejp.k == "$uuid" { - ejp.relaxedUUID = true - t = bsontype.Binary - } - - switch t { - case bsontype.JavaScript: - // just saw $code, need to check for $scope at same level - _, err = ejp.readValue(bsontype.JavaScript) - if err != nil { - break - } - - switch ejp.s { - case jpsSawEndObject: // type is TypeJavaScript - case jpsSawComma: - ejp.advanceState() - - if ejp.s == jpsSawKey && ejp.k == "$scope" { - t = bsontype.CodeWithScope - } else { - err = fmt.Errorf("invalid extended JSON: unexpected key %s in CodeWithScope object", ejp.k) - } - case jpsInvalidState: - err = ejp.err - default: - err = ErrInvalidJSON - } - case bsontype.CodeWithScope: - err = errors.New("invalid extended JSON: code with $scope must contain $code before $scope") - } - } - } - - return t, err -} - -// readKey parses the next key and its type and returns them -func (ejp *extJSONParser) readKey() (string, bsontype.Type, error) { - if ejp.emptyObject { - ejp.emptyObject = false - return "", 0, ErrEOD - } - - // advance to key (or return with error) - switch ejp.s { - case jpsStartState: - ejp.advanceState() - if ejp.s == jpsSawBeginObject { - ejp.advanceState() - } - case jpsSawBeginObject: - ejp.advanceState() - case jpsSawValue, jpsSawEndObject, jpsSawEndArray: - ejp.advanceState() - switch ejp.s { - case jpsSawBeginObject, jpsSawComma: - ejp.advanceState() - case jpsSawEndObject: - return "", 0, ErrEOD - case jpsDoneState: - return "", 0, io.EOF - case jpsInvalidState: - return "", 0, ejp.err - default: - return "", 0, ErrInvalidJSON - } - case jpsSawKey: // do nothing (key was peeked before) - default: - return "", 0, invalidRequestError("key") - } - - // read key - var key string - - switch ejp.s { - case jpsSawKey: - key = ejp.k - case jpsSawEndObject: - return "", 0, ErrEOD - case jpsInvalidState: - return "", 0, ejp.err - default: - return "", 0, invalidRequestError("key") - } - - // check for colon - ejp.advanceState() - if err := ensureColon(ejp.s, key); err != nil { - return "", 0, err - } - - // peek at the value to determine type - t, err := ejp.peekType() - if err != nil { - return "", 0, err - } - - return key, t, nil -} - -// readValue returns the value corresponding to the Type returned by peekType -func (ejp *extJSONParser) readValue(t bsontype.Type) (*extJSONValue, error) { - if ejp.s == jpsInvalidState { - return nil, ejp.err - } - - var v *extJSONValue - - switch t { - case bsontype.Null, bsontype.Boolean, bsontype.String: - if ejp.s != jpsSawValue { - return nil, invalidRequestError(t.String()) - } - v = ejp.v - case bsontype.Int32, bsontype.Int64, bsontype.Double: - // relaxed version allows these to be literal number values - if ejp.s == jpsSawValue { - v = ejp.v - break - } - fallthrough - case bsontype.Decimal128, bsontype.Symbol, bsontype.ObjectID, bsontype.MinKey, bsontype.MaxKey, bsontype.Undefined: - switch ejp.s { - case jpsSawKey: - // read colon - ejp.advanceState() - if err := ensureColon(ejp.s, ejp.k); err != nil { - return nil, err - } - - // read value - ejp.advanceState() - if ejp.s != jpsSawValue || !ejp.ensureExtValueType(t) { - return nil, invalidJSONErrorForType("value", t) - } - - v = ejp.v - - // read end object - ejp.advanceState() - if ejp.s != jpsSawEndObject { - return nil, invalidJSONErrorForType("} after value", t) - } - default: - return nil, invalidRequestError(t.String()) - } - case bsontype.Binary, bsontype.Regex, bsontype.Timestamp, bsontype.DBPointer: - if ejp.s != jpsSawKey { - return nil, invalidRequestError(t.String()) - } - // read colon - ejp.advanceState() - if err := ensureColon(ejp.s, ejp.k); err != nil { - return nil, err - } - - ejp.advanceState() - if t == bsontype.Binary && ejp.s == jpsSawValue { - // convert relaxed $uuid format - if ejp.relaxedUUID { - defer func() { ejp.relaxedUUID = false }() - uuid, err := ejp.v.parseSymbol() - if err != nil { - return nil, err - } - - // RFC 4122 defines the length of a UUID as 36 and the hyphens in a UUID as appearing - // in the 8th, 13th, 18th, and 23rd characters. - // - // See https://tools.ietf.org/html/rfc4122#section-3 - valid := len(uuid) == 36 && - string(uuid[8]) == "-" && - string(uuid[13]) == "-" && - string(uuid[18]) == "-" && - string(uuid[23]) == "-" - if !valid { - return nil, fmt.Errorf("$uuid value does not follow RFC 4122 format regarding length and hyphens") - } - - // remove hyphens - uuidNoHyphens := strings.Replace(uuid, "-", "", -1) - if len(uuidNoHyphens) != 32 { - return nil, fmt.Errorf("$uuid value does not follow RFC 4122 format regarding length and hyphens") - } - - // convert hex to bytes - bytes, err := hex.DecodeString(uuidNoHyphens) - if err != nil { - return nil, fmt.Errorf("$uuid value does not follow RFC 4122 format regarding hex bytes: %w", err) - } - - ejp.advanceState() - if ejp.s != jpsSawEndObject { - return nil, invalidJSONErrorForType("$uuid and value and then }", bsontype.Binary) - } - - base64 := &extJSONValue{ - t: bsontype.String, - v: base64.StdEncoding.EncodeToString(bytes), - } - subType := &extJSONValue{ - t: bsontype.String, - v: "04", - } - - v = &extJSONValue{ - t: bsontype.EmbeddedDocument, - v: &extJSONObject{ - keys: []string{"base64", "subType"}, - values: []*extJSONValue{base64, subType}, - }, - } - - break - } - - // convert legacy $binary format - base64 := ejp.v - - ejp.advanceState() - if ejp.s != jpsSawComma { - return nil, invalidJSONErrorForType(",", bsontype.Binary) - } - - ejp.advanceState() - key, t, err := ejp.readKey() - if err != nil { - return nil, err - } - if key != "$type" { - return nil, invalidJSONErrorForType("$type", bsontype.Binary) - } - - subType, err := ejp.readValue(t) - if err != nil { - return nil, err - } - - ejp.advanceState() - if ejp.s != jpsSawEndObject { - return nil, invalidJSONErrorForType("2 key-value pairs and then }", bsontype.Binary) - } - - v = &extJSONValue{ - t: bsontype.EmbeddedDocument, - v: &extJSONObject{ - keys: []string{"base64", "subType"}, - values: []*extJSONValue{base64, subType}, - }, - } - break - } - - // read KV pairs - if ejp.s != jpsSawBeginObject { - return nil, invalidJSONErrorForType("{", t) - } - - keys, vals, err := ejp.readObject(2, true) - if err != nil { - return nil, err - } - - ejp.advanceState() - if ejp.s != jpsSawEndObject { - return nil, invalidJSONErrorForType("2 key-value pairs and then }", t) - } - - v = &extJSONValue{t: bsontype.EmbeddedDocument, v: &extJSONObject{keys: keys, values: vals}} - - case bsontype.DateTime: - switch ejp.s { - case jpsSawValue: - v = ejp.v - case jpsSawKey: - // read colon - ejp.advanceState() - if err := ensureColon(ejp.s, ejp.k); err != nil { - return nil, err - } - - ejp.advanceState() - switch ejp.s { - case jpsSawBeginObject: - keys, vals, err := ejp.readObject(1, true) - if err != nil { - return nil, err - } - v = &extJSONValue{t: bsontype.EmbeddedDocument, v: &extJSONObject{keys: keys, values: vals}} - case jpsSawValue: - if ejp.canonical { - return nil, invalidJSONError("{") - } - v = ejp.v - default: - if ejp.canonical { - return nil, invalidJSONErrorForType("object", t) - } - return nil, invalidJSONErrorForType("ISO-8601 Internet Date/Time Format as described in RFC-3339", t) - } - - ejp.advanceState() - if ejp.s != jpsSawEndObject { - return nil, invalidJSONErrorForType("value and then }", t) - } - default: - return nil, invalidRequestError(t.String()) - } - case bsontype.JavaScript: - switch ejp.s { - case jpsSawKey: - // read colon - ejp.advanceState() - if err := ensureColon(ejp.s, ejp.k); err != nil { - return nil, err - } - - // read value - ejp.advanceState() - if ejp.s != jpsSawValue { - return nil, invalidJSONErrorForType("value", t) - } - v = ejp.v - - // read end object or comma and just return - ejp.advanceState() - case jpsSawEndObject: - v = ejp.v - default: - return nil, invalidRequestError(t.String()) - } - case bsontype.CodeWithScope: - if ejp.s == jpsSawKey && ejp.k == "$scope" { - v = ejp.v // this is the $code string from earlier - - // read colon - ejp.advanceState() - if err := ensureColon(ejp.s, ejp.k); err != nil { - return nil, err - } - - // read { - ejp.advanceState() - if ejp.s != jpsSawBeginObject { - return nil, invalidJSONError("$scope to be embedded document") - } - } else { - return nil, invalidRequestError(t.String()) - } - case bsontype.EmbeddedDocument, bsontype.Array: - return nil, invalidRequestError(t.String()) - } - - return v, nil -} - -// readObject is a utility method for reading full objects of known (or expected) size -// it is useful for extended JSON types such as binary, datetime, regex, and timestamp -func (ejp *extJSONParser) readObject(numKeys int, started bool) ([]string, []*extJSONValue, error) { - keys := make([]string, numKeys) - vals := make([]*extJSONValue, numKeys) - - if !started { - ejp.advanceState() - if ejp.s != jpsSawBeginObject { - return nil, nil, invalidJSONError("{") - } - } - - for i := 0; i < numKeys; i++ { - key, t, err := ejp.readKey() - if err != nil { - return nil, nil, err - } - - switch ejp.s { - case jpsSawKey: - v, err := ejp.readValue(t) - if err != nil { - return nil, nil, err - } - - keys[i] = key - vals[i] = v - case jpsSawValue: - keys[i] = key - vals[i] = ejp.v - default: - return nil, nil, invalidJSONError("value") - } - } - - ejp.advanceState() - if ejp.s != jpsSawEndObject { - return nil, nil, invalidJSONError("}") - } - - return keys, vals, nil -} - -// advanceState reads the next JSON token from the scanner and transitions -// from the current state based on that token's type -func (ejp *extJSONParser) advanceState() { - if ejp.s == jpsDoneState || ejp.s == jpsInvalidState { - return - } - - jt, err := ejp.js.nextToken() - - if err != nil { - ejp.err = err - ejp.s = jpsInvalidState - return - } - - valid := ejp.validateToken(jt.t) - if !valid { - ejp.err = unexpectedTokenError(jt) - ejp.s = jpsInvalidState - return - } - - switch jt.t { - case jttBeginObject: - ejp.s = jpsSawBeginObject - ejp.pushMode(jpmObjectMode) - ejp.depth++ - - if ejp.depth > ejp.maxDepth { - ejp.err = nestingDepthError(jt.p, ejp.depth) - ejp.s = jpsInvalidState - } - case jttEndObject: - ejp.s = jpsSawEndObject - ejp.depth-- - - if ejp.popMode() != jpmObjectMode { - ejp.err = unexpectedTokenError(jt) - ejp.s = jpsInvalidState - } - case jttBeginArray: - ejp.s = jpsSawBeginArray - ejp.pushMode(jpmArrayMode) - case jttEndArray: - ejp.s = jpsSawEndArray - - if ejp.popMode() != jpmArrayMode { - ejp.err = unexpectedTokenError(jt) - ejp.s = jpsInvalidState - } - case jttColon: - ejp.s = jpsSawColon - case jttComma: - ejp.s = jpsSawComma - case jttEOF: - ejp.s = jpsDoneState - if len(ejp.m) != 0 { - ejp.err = unexpectedTokenError(jt) - ejp.s = jpsInvalidState - } - case jttString: - switch ejp.s { - case jpsSawComma: - if ejp.peekMode() == jpmArrayMode { - ejp.s = jpsSawValue - ejp.v = extendJSONToken(jt) - return - } - fallthrough - case jpsSawBeginObject: - ejp.s = jpsSawKey - ejp.k = jt.v.(string) - return - } - fallthrough - default: - ejp.s = jpsSawValue - ejp.v = extendJSONToken(jt) - } -} - -var jpsValidTransitionTokens = map[jsonParseState]map[jsonTokenType]bool{ - jpsStartState: { - jttBeginObject: true, - jttBeginArray: true, - jttInt32: true, - jttInt64: true, - jttDouble: true, - jttString: true, - jttBool: true, - jttNull: true, - jttEOF: true, - }, - jpsSawBeginObject: { - jttEndObject: true, - jttString: true, - }, - jpsSawEndObject: { - jttEndObject: true, - jttEndArray: true, - jttComma: true, - jttEOF: true, - }, - jpsSawBeginArray: { - jttBeginObject: true, - jttBeginArray: true, - jttEndArray: true, - jttInt32: true, - jttInt64: true, - jttDouble: true, - jttString: true, - jttBool: true, - jttNull: true, - }, - jpsSawEndArray: { - jttEndObject: true, - jttEndArray: true, - jttComma: true, - jttEOF: true, - }, - jpsSawColon: { - jttBeginObject: true, - jttBeginArray: true, - jttInt32: true, - jttInt64: true, - jttDouble: true, - jttString: true, - jttBool: true, - jttNull: true, - }, - jpsSawComma: { - jttBeginObject: true, - jttBeginArray: true, - jttInt32: true, - jttInt64: true, - jttDouble: true, - jttString: true, - jttBool: true, - jttNull: true, - }, - jpsSawKey: { - jttColon: true, - }, - jpsSawValue: { - jttEndObject: true, - jttEndArray: true, - jttComma: true, - jttEOF: true, - }, - jpsDoneState: {}, - jpsInvalidState: {}, -} - -func (ejp *extJSONParser) validateToken(jtt jsonTokenType) bool { - switch ejp.s { - case jpsSawEndObject: - // if we are at depth zero and the next token is a '{', - // we can consider it valid only if we are not in array mode. - if jtt == jttBeginObject && ejp.depth == 0 { - return ejp.peekMode() != jpmArrayMode - } - case jpsSawComma: - switch ejp.peekMode() { - // the only valid next token after a comma inside a document is a string (a key) - case jpmObjectMode: - return jtt == jttString - case jpmInvalidMode: - return false - } - } - - _, ok := jpsValidTransitionTokens[ejp.s][jtt] - return ok -} - -// ensureExtValueType returns true if the current value has the expected -// value type for single-key extended JSON types. For example, -// {"$numberInt": v} v must be TypeString -func (ejp *extJSONParser) ensureExtValueType(t bsontype.Type) bool { - switch t { - case bsontype.MinKey, bsontype.MaxKey: - return ejp.v.t == bsontype.Int32 - case bsontype.Undefined: - return ejp.v.t == bsontype.Boolean - case bsontype.Int32, bsontype.Int64, bsontype.Double, bsontype.Decimal128, bsontype.Symbol, bsontype.ObjectID: - return ejp.v.t == bsontype.String - default: - return false - } -} - -func (ejp *extJSONParser) pushMode(m jsonParseMode) { - ejp.m = append(ejp.m, m) -} - -func (ejp *extJSONParser) popMode() jsonParseMode { - l := len(ejp.m) - if l == 0 { - return jpmInvalidMode - } - - m := ejp.m[l-1] - ejp.m = ejp.m[:l-1] - - return m -} - -func (ejp *extJSONParser) peekMode() jsonParseMode { - l := len(ejp.m) - if l == 0 { - return jpmInvalidMode - } - - return ejp.m[l-1] -} - -func extendJSONToken(jt *jsonToken) *extJSONValue { - var t bsontype.Type - - switch jt.t { - case jttInt32: - t = bsontype.Int32 - case jttInt64: - t = bsontype.Int64 - case jttDouble: - t = bsontype.Double - case jttString: - t = bsontype.String - case jttBool: - t = bsontype.Boolean - case jttNull: - t = bsontype.Null - default: - return nil - } - - return &extJSONValue{t: t, v: jt.v} -} - -func ensureColon(s jsonParseState, key string) error { - if s != jpsSawColon { - return fmt.Errorf("invalid JSON input: missing colon after key \"%s\"", key) - } - - return nil -} - -func invalidRequestError(s string) error { - return fmt.Errorf("invalid request to read %s", s) -} - -func invalidJSONError(expected string) error { - return fmt.Errorf("invalid JSON input; expected %s", expected) -} - -func invalidJSONErrorForType(expected string, t bsontype.Type) error { - return fmt.Errorf("invalid JSON input; expected %s for %s", expected, t) -} - -func unexpectedTokenError(jt *jsonToken) error { - switch jt.t { - case jttInt32, jttInt64, jttDouble: - return fmt.Errorf("invalid JSON input; unexpected number (%v) at position %d", jt.v, jt.p) - case jttString: - return fmt.Errorf("invalid JSON input; unexpected string (\"%v\") at position %d", jt.v, jt.p) - case jttBool: - return fmt.Errorf("invalid JSON input; unexpected boolean literal (%v) at position %d", jt.v, jt.p) - case jttNull: - return fmt.Errorf("invalid JSON input; unexpected null literal at position %d", jt.p) - case jttEOF: - return fmt.Errorf("invalid JSON input; unexpected end of input at position %d", jt.p) - default: - return fmt.Errorf("invalid JSON input; unexpected %c at position %d", jt.v.(byte), jt.p) - } -} - -func nestingDepthError(p, depth int) error { - return fmt.Errorf("invalid JSON input; nesting too deep (%d levels) at position %d", depth, p) -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go deleted file mode 100644 index 59ddfc4485..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go +++ /dev/null @@ -1,653 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonrw - -import ( - "errors" - "fmt" - "io" - "sync" - - "go.mongodb.org/mongo-driver/bson/bsontype" - "go.mongodb.org/mongo-driver/bson/primitive" -) - -// ExtJSONValueReaderPool is a pool for ValueReaders that read ExtJSON. -// -// Deprecated: ExtJSONValueReaderPool will not be supported in Go Driver 2.0. -type ExtJSONValueReaderPool struct { - pool sync.Pool -} - -// NewExtJSONValueReaderPool instantiates a new ExtJSONValueReaderPool. -// -// Deprecated: ExtJSONValueReaderPool will not be supported in Go Driver 2.0. -func NewExtJSONValueReaderPool() *ExtJSONValueReaderPool { - return &ExtJSONValueReaderPool{ - pool: sync.Pool{ - New: func() interface{} { - return new(extJSONValueReader) - }, - }, - } -} - -// Get retrieves a ValueReader from the pool and uses src as the underlying ExtJSON. -// -// Deprecated: ExtJSONValueReaderPool will not be supported in Go Driver 2.0. -func (bvrp *ExtJSONValueReaderPool) Get(r io.Reader, canonical bool) (ValueReader, error) { - vr := bvrp.pool.Get().(*extJSONValueReader) - return vr.reset(r, canonical) -} - -// Put inserts a ValueReader into the pool. If the ValueReader is not a ExtJSON ValueReader nothing -// is inserted into the pool and ok will be false. -// -// Deprecated: ExtJSONValueReaderPool will not be supported in Go Driver 2.0. -func (bvrp *ExtJSONValueReaderPool) Put(vr ValueReader) (ok bool) { - bvr, ok := vr.(*extJSONValueReader) - if !ok { - return false - } - - bvr, _ = bvr.reset(nil, false) - bvrp.pool.Put(bvr) - return true -} - -type ejvrState struct { - mode mode - vType bsontype.Type - depth int -} - -// extJSONValueReader is for reading extended JSON. -type extJSONValueReader struct { - p *extJSONParser - - stack []ejvrState - frame int -} - -// NewExtJSONValueReader creates a new ValueReader from a given io.Reader -// It will interpret the JSON of r as canonical or relaxed according to the -// given canonical flag -func NewExtJSONValueReader(r io.Reader, canonical bool) (ValueReader, error) { - return newExtJSONValueReader(r, canonical) -} - -func newExtJSONValueReader(r io.Reader, canonical bool) (*extJSONValueReader, error) { - ejvr := new(extJSONValueReader) - return ejvr.reset(r, canonical) -} - -func (ejvr *extJSONValueReader) reset(r io.Reader, canonical bool) (*extJSONValueReader, error) { - p := newExtJSONParser(r, canonical) - typ, err := p.peekType() - - if err != nil { - return nil, ErrInvalidJSON - } - - var m mode - switch typ { - case bsontype.EmbeddedDocument: - m = mTopLevel - case bsontype.Array: - m = mArray - default: - m = mValue - } - - stack := make([]ejvrState, 1, 5) - stack[0] = ejvrState{ - mode: m, - vType: typ, - } - return &extJSONValueReader{ - p: p, - stack: stack, - }, nil -} - -func (ejvr *extJSONValueReader) advanceFrame() { - if ejvr.frame+1 >= len(ejvr.stack) { // We need to grow the stack - length := len(ejvr.stack) - if length+1 >= cap(ejvr.stack) { - // double it - buf := make([]ejvrState, 2*cap(ejvr.stack)+1) - copy(buf, ejvr.stack) - ejvr.stack = buf - } - ejvr.stack = ejvr.stack[:length+1] - } - ejvr.frame++ - - // Clean the stack - ejvr.stack[ejvr.frame].mode = 0 - ejvr.stack[ejvr.frame].vType = 0 - ejvr.stack[ejvr.frame].depth = 0 -} - -func (ejvr *extJSONValueReader) pushDocument() { - ejvr.advanceFrame() - - ejvr.stack[ejvr.frame].mode = mDocument - ejvr.stack[ejvr.frame].depth = ejvr.p.depth -} - -func (ejvr *extJSONValueReader) pushCodeWithScope() { - ejvr.advanceFrame() - - ejvr.stack[ejvr.frame].mode = mCodeWithScope -} - -func (ejvr *extJSONValueReader) pushArray() { - ejvr.advanceFrame() - - ejvr.stack[ejvr.frame].mode = mArray -} - -func (ejvr *extJSONValueReader) push(m mode, t bsontype.Type) { - ejvr.advanceFrame() - - ejvr.stack[ejvr.frame].mode = m - ejvr.stack[ejvr.frame].vType = t -} - -func (ejvr *extJSONValueReader) pop() { - switch ejvr.stack[ejvr.frame].mode { - case mElement, mValue: - ejvr.frame-- - case mDocument, mArray, mCodeWithScope: - ejvr.frame -= 2 // we pop twice to jump over the vrElement: vrDocument -> vrElement -> vrDocument/TopLevel/etc... - } -} - -func (ejvr *extJSONValueReader) skipObject() { - // read entire object until depth returns to 0 (last ending } or ] seen) - depth := 1 - for depth > 0 { - ejvr.p.advanceState() - - // If object is empty, raise depth and continue. When emptyObject is true, the - // parser has already read both the opening and closing brackets of an empty - // object ("{}"), so the next valid token will be part of the parent document, - // not part of the nested document. - // - // If there is a comma, there are remaining fields, emptyObject must be set back - // to false, and comma must be skipped with advanceState(). - if ejvr.p.emptyObject { - if ejvr.p.s == jpsSawComma { - ejvr.p.emptyObject = false - ejvr.p.advanceState() - } - depth-- - continue - } - - switch ejvr.p.s { - case jpsSawBeginObject, jpsSawBeginArray: - depth++ - case jpsSawEndObject, jpsSawEndArray: - depth-- - } - } -} - -func (ejvr *extJSONValueReader) invalidTransitionErr(destination mode, name string, modes []mode) error { - te := TransitionError{ - name: name, - current: ejvr.stack[ejvr.frame].mode, - destination: destination, - modes: modes, - action: "read", - } - if ejvr.frame != 0 { - te.parent = ejvr.stack[ejvr.frame-1].mode - } - return te -} - -func (ejvr *extJSONValueReader) typeError(t bsontype.Type) error { - return fmt.Errorf("positioned on %s, but attempted to read %s", ejvr.stack[ejvr.frame].vType, t) -} - -func (ejvr *extJSONValueReader) ensureElementValue(t bsontype.Type, destination mode, callerName string, addModes ...mode) error { - switch ejvr.stack[ejvr.frame].mode { - case mElement, mValue: - if ejvr.stack[ejvr.frame].vType != t { - return ejvr.typeError(t) - } - default: - modes := []mode{mElement, mValue} - if addModes != nil { - modes = append(modes, addModes...) - } - return ejvr.invalidTransitionErr(destination, callerName, modes) - } - - return nil -} - -func (ejvr *extJSONValueReader) Type() bsontype.Type { - return ejvr.stack[ejvr.frame].vType -} - -func (ejvr *extJSONValueReader) Skip() error { - switch ejvr.stack[ejvr.frame].mode { - case mElement, mValue: - default: - return ejvr.invalidTransitionErr(0, "Skip", []mode{mElement, mValue}) - } - - defer ejvr.pop() - - t := ejvr.stack[ejvr.frame].vType - switch t { - case bsontype.Array, bsontype.EmbeddedDocument, bsontype.CodeWithScope: - // read entire array, doc or CodeWithScope - ejvr.skipObject() - default: - _, err := ejvr.p.readValue(t) - if err != nil { - return err - } - } - - return nil -} - -func (ejvr *extJSONValueReader) ReadArray() (ArrayReader, error) { - switch ejvr.stack[ejvr.frame].mode { - case mTopLevel: // allow reading array from top level - case mArray: - return ejvr, nil - default: - if err := ejvr.ensureElementValue(bsontype.Array, mArray, "ReadArray", mTopLevel, mArray); err != nil { - return nil, err - } - } - - ejvr.pushArray() - - return ejvr, nil -} - -func (ejvr *extJSONValueReader) ReadBinary() (b []byte, btype byte, err error) { - if err := ejvr.ensureElementValue(bsontype.Binary, 0, "ReadBinary"); err != nil { - return nil, 0, err - } - - v, err := ejvr.p.readValue(bsontype.Binary) - if err != nil { - return nil, 0, err - } - - b, btype, err = v.parseBinary() - - ejvr.pop() - return b, btype, err -} - -func (ejvr *extJSONValueReader) ReadBoolean() (bool, error) { - if err := ejvr.ensureElementValue(bsontype.Boolean, 0, "ReadBoolean"); err != nil { - return false, err - } - - v, err := ejvr.p.readValue(bsontype.Boolean) - if err != nil { - return false, err - } - - if v.t != bsontype.Boolean { - return false, fmt.Errorf("expected type bool, but got type %s", v.t) - } - - ejvr.pop() - return v.v.(bool), nil -} - -func (ejvr *extJSONValueReader) ReadDocument() (DocumentReader, error) { - switch ejvr.stack[ejvr.frame].mode { - case mTopLevel: - return ejvr, nil - case mElement, mValue: - if ejvr.stack[ejvr.frame].vType != bsontype.EmbeddedDocument { - return nil, ejvr.typeError(bsontype.EmbeddedDocument) - } - - ejvr.pushDocument() - return ejvr, nil - default: - return nil, ejvr.invalidTransitionErr(mDocument, "ReadDocument", []mode{mTopLevel, mElement, mValue}) - } -} - -func (ejvr *extJSONValueReader) ReadCodeWithScope() (code string, dr DocumentReader, err error) { - if err = ejvr.ensureElementValue(bsontype.CodeWithScope, 0, "ReadCodeWithScope"); err != nil { - return "", nil, err - } - - v, err := ejvr.p.readValue(bsontype.CodeWithScope) - if err != nil { - return "", nil, err - } - - code, err = v.parseJavascript() - - ejvr.pushCodeWithScope() - return code, ejvr, err -} - -func (ejvr *extJSONValueReader) ReadDBPointer() (ns string, oid primitive.ObjectID, err error) { - if err = ejvr.ensureElementValue(bsontype.DBPointer, 0, "ReadDBPointer"); err != nil { - return "", primitive.NilObjectID, err - } - - v, err := ejvr.p.readValue(bsontype.DBPointer) - if err != nil { - return "", primitive.NilObjectID, err - } - - ns, oid, err = v.parseDBPointer() - - ejvr.pop() - return ns, oid, err -} - -func (ejvr *extJSONValueReader) ReadDateTime() (int64, error) { - if err := ejvr.ensureElementValue(bsontype.DateTime, 0, "ReadDateTime"); err != nil { - return 0, err - } - - v, err := ejvr.p.readValue(bsontype.DateTime) - if err != nil { - return 0, err - } - - d, err := v.parseDateTime() - - ejvr.pop() - return d, err -} - -func (ejvr *extJSONValueReader) ReadDecimal128() (primitive.Decimal128, error) { - if err := ejvr.ensureElementValue(bsontype.Decimal128, 0, "ReadDecimal128"); err != nil { - return primitive.Decimal128{}, err - } - - v, err := ejvr.p.readValue(bsontype.Decimal128) - if err != nil { - return primitive.Decimal128{}, err - } - - d, err := v.parseDecimal128() - - ejvr.pop() - return d, err -} - -func (ejvr *extJSONValueReader) ReadDouble() (float64, error) { - if err := ejvr.ensureElementValue(bsontype.Double, 0, "ReadDouble"); err != nil { - return 0, err - } - - v, err := ejvr.p.readValue(bsontype.Double) - if err != nil { - return 0, err - } - - d, err := v.parseDouble() - - ejvr.pop() - return d, err -} - -func (ejvr *extJSONValueReader) ReadInt32() (int32, error) { - if err := ejvr.ensureElementValue(bsontype.Int32, 0, "ReadInt32"); err != nil { - return 0, err - } - - v, err := ejvr.p.readValue(bsontype.Int32) - if err != nil { - return 0, err - } - - i, err := v.parseInt32() - - ejvr.pop() - return i, err -} - -func (ejvr *extJSONValueReader) ReadInt64() (int64, error) { - if err := ejvr.ensureElementValue(bsontype.Int64, 0, "ReadInt64"); err != nil { - return 0, err - } - - v, err := ejvr.p.readValue(bsontype.Int64) - if err != nil { - return 0, err - } - - i, err := v.parseInt64() - - ejvr.pop() - return i, err -} - -func (ejvr *extJSONValueReader) ReadJavascript() (code string, err error) { - if err = ejvr.ensureElementValue(bsontype.JavaScript, 0, "ReadJavascript"); err != nil { - return "", err - } - - v, err := ejvr.p.readValue(bsontype.JavaScript) - if err != nil { - return "", err - } - - code, err = v.parseJavascript() - - ejvr.pop() - return code, err -} - -func (ejvr *extJSONValueReader) ReadMaxKey() error { - if err := ejvr.ensureElementValue(bsontype.MaxKey, 0, "ReadMaxKey"); err != nil { - return err - } - - v, err := ejvr.p.readValue(bsontype.MaxKey) - if err != nil { - return err - } - - err = v.parseMinMaxKey("max") - - ejvr.pop() - return err -} - -func (ejvr *extJSONValueReader) ReadMinKey() error { - if err := ejvr.ensureElementValue(bsontype.MinKey, 0, "ReadMinKey"); err != nil { - return err - } - - v, err := ejvr.p.readValue(bsontype.MinKey) - if err != nil { - return err - } - - err = v.parseMinMaxKey("min") - - ejvr.pop() - return err -} - -func (ejvr *extJSONValueReader) ReadNull() error { - if err := ejvr.ensureElementValue(bsontype.Null, 0, "ReadNull"); err != nil { - return err - } - - v, err := ejvr.p.readValue(bsontype.Null) - if err != nil { - return err - } - - if v.t != bsontype.Null { - return fmt.Errorf("expected type null but got type %s", v.t) - } - - ejvr.pop() - return nil -} - -func (ejvr *extJSONValueReader) ReadObjectID() (primitive.ObjectID, error) { - if err := ejvr.ensureElementValue(bsontype.ObjectID, 0, "ReadObjectID"); err != nil { - return primitive.ObjectID{}, err - } - - v, err := ejvr.p.readValue(bsontype.ObjectID) - if err != nil { - return primitive.ObjectID{}, err - } - - oid, err := v.parseObjectID() - - ejvr.pop() - return oid, err -} - -func (ejvr *extJSONValueReader) ReadRegex() (pattern string, options string, err error) { - if err = ejvr.ensureElementValue(bsontype.Regex, 0, "ReadRegex"); err != nil { - return "", "", err - } - - v, err := ejvr.p.readValue(bsontype.Regex) - if err != nil { - return "", "", err - } - - pattern, options, err = v.parseRegex() - - ejvr.pop() - return pattern, options, err -} - -func (ejvr *extJSONValueReader) ReadString() (string, error) { - if err := ejvr.ensureElementValue(bsontype.String, 0, "ReadString"); err != nil { - return "", err - } - - v, err := ejvr.p.readValue(bsontype.String) - if err != nil { - return "", err - } - - if v.t != bsontype.String { - return "", fmt.Errorf("expected type string but got type %s", v.t) - } - - ejvr.pop() - return v.v.(string), nil -} - -func (ejvr *extJSONValueReader) ReadSymbol() (symbol string, err error) { - if err = ejvr.ensureElementValue(bsontype.Symbol, 0, "ReadSymbol"); err != nil { - return "", err - } - - v, err := ejvr.p.readValue(bsontype.Symbol) - if err != nil { - return "", err - } - - symbol, err = v.parseSymbol() - - ejvr.pop() - return symbol, err -} - -func (ejvr *extJSONValueReader) ReadTimestamp() (t uint32, i uint32, err error) { - if err = ejvr.ensureElementValue(bsontype.Timestamp, 0, "ReadTimestamp"); err != nil { - return 0, 0, err - } - - v, err := ejvr.p.readValue(bsontype.Timestamp) - if err != nil { - return 0, 0, err - } - - t, i, err = v.parseTimestamp() - - ejvr.pop() - return t, i, err -} - -func (ejvr *extJSONValueReader) ReadUndefined() error { - if err := ejvr.ensureElementValue(bsontype.Undefined, 0, "ReadUndefined"); err != nil { - return err - } - - v, err := ejvr.p.readValue(bsontype.Undefined) - if err != nil { - return err - } - - err = v.parseUndefined() - - ejvr.pop() - return err -} - -func (ejvr *extJSONValueReader) ReadElement() (string, ValueReader, error) { - switch ejvr.stack[ejvr.frame].mode { - case mTopLevel, mDocument, mCodeWithScope: - default: - return "", nil, ejvr.invalidTransitionErr(mElement, "ReadElement", []mode{mTopLevel, mDocument, mCodeWithScope}) - } - - name, t, err := ejvr.p.readKey() - - if err != nil { - if errors.Is(err, ErrEOD) { - if ejvr.stack[ejvr.frame].mode == mCodeWithScope { - _, err := ejvr.p.peekType() - if err != nil { - return "", nil, err - } - } - - ejvr.pop() - } - - return "", nil, err - } - - ejvr.push(mElement, t) - return name, ejvr, nil -} - -func (ejvr *extJSONValueReader) ReadValue() (ValueReader, error) { - switch ejvr.stack[ejvr.frame].mode { - case mArray: - default: - return nil, ejvr.invalidTransitionErr(mValue, "ReadValue", []mode{mArray}) - } - - t, err := ejvr.p.peekType() - if err != nil { - if errors.Is(err, ErrEOA) { - ejvr.pop() - } - - return nil, err - } - - ejvr.push(mValue, t) - return ejvr, nil -} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_tables.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_tables.go deleted file mode 100644 index ba39c9601f..0000000000 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_tables.go +++ /dev/null @@ -1,223 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -// -// Based on github.com/golang/go by The Go Authors -// See THIRD-PARTY-NOTICES for original license terms. - -package bsonrw - -import "unicode/utf8" - -// safeSet holds the value true if the ASCII character with the given array -// position can be represented inside a JSON string without any further -// escaping. -// -// All values are true except for the ASCII control characters (0-31), the -// double quote ("), and the backslash character ("\"). -var safeSet = [utf8.RuneSelf]bool{ - ' ': true, - '!': true, - '"': false, - '#': true, - '$': true, - '%': true, - '&': true, - '\'': true, - '(': true, - ')': true, - '*': true, - '+': true, - ',': true, - '-': true, - '.': true, - '/': true, - '0': true, - '1': true, - '2': true, - '3': true, - '4': true, - '5': true, - '6': true, - '7': true, - '8': true, - '9': true, - ':': true, - ';': true, - '<': true, - '=': true, - '>': true, - '?': true, - '@': true, - 'A': true, - 'B': true, - 'C': true, - 'D': true, - 'E': true, - 'F': true, - 'G': true, - 'H': true, - 'I': true, - 'J': true, - 'K': true, - 'L': true, - 'M': true, - 'N': true, - 'O': true, - 'P': true, - 'Q': true, - 'R': true, - 'S': true, - 'T': true, - 'U': true, - 'V': true, - 'W': true, - 'X': true, - 'Y': true, - 'Z': true, - '[': true, - '\\': false, - ']': true, - '^': true, - '_': true, - '`': true, - 'a': true, - 'b': true, - 'c': true, - 'd': true, - 'e': true, - 'f': true, - 'g': true, - 'h': true, - 'i': true, - 'j': true, - 'k': true, - 'l': true, - 'm': true, - 'n': true, - 'o': true, - 'p': true, - 'q': true, - 'r': true, - 's': true, - 't': true, - 'u': true, - 'v': true, - 'w': true, - 'x': true, - 'y': true, - 'z': true, - '{': true, - '|': true, - '}': true, - '~': true, - '\u007f': true, -} - -// htmlSafeSet holds the value true if the ASCII character with the given -// array position can be safely represented inside a JSON string, embedded -// inside of HTML