diff --git a/.drone/drone.yml b/.drone/drone.yml index 43e4161ef322..b234441b6770 100644 --- a/.drone/drone.yml +++ b/.drone/drone.yml @@ -119,24 +119,6 @@ trigger: type: docker --- kind: pipeline -name: Test manifests -platform: - arch: amd64 - os: linux -steps: -- commands: - - make generate-manifests - - ERR_MSG="The environment manifests are out of date. Please run 'make generate-manifests' - and commit changes!" - - if [ ! -z "$(git status --porcelain)" ]; then echo $ERR_MSG >&2; exit 1; fi - image: grafana/agent-build-image:0.30.4 - name: Regenerate environment manifests -trigger: - event: - - pull_request -type: docker ---- -kind: pipeline name: Test platform: arch: amd64 @@ -1055,94 +1037,6 @@ volumes: name: docker --- kind: pipeline -name: Publish Linux smoke container -platform: - arch: amd64 - os: linux -steps: -- commands: - - docker run --rm --privileged multiarch/qemu-user-static --reset -p yes - failure: ignore - image: grafana/agent-build-image:0.30.4 - name: Configure QEMU - volumes: - - name: docker - path: /var/run/docker.sock -- commands: - - mkdir -p $HOME/.docker - - printenv GCR_CREDS > $HOME/.docker/config.json - - docker login -u $DOCKER_LOGIN -p $DOCKER_PASSWORD - - docker buildx create --name multiarch-agent-smoke-${DRONE_COMMIT_SHA} --driver - docker-container --use - - ./tools/ci/docker-containers smoke - - docker buildx rm multiarch-agent-smoke-${DRONE_COMMIT_SHA} - environment: - DOCKER_LOGIN: - from_secret: docker_login - DOCKER_PASSWORD: - from_secret: docker_password - GCR_CREDS: - from_secret: gcr_admin - image: grafana/agent-build-image:0.30.4 - name: Publish container - volumes: - - name: docker - path: /var/run/docker.sock -trigger: - ref: - - refs/heads/main - - refs/tags/v* -type: docker -volumes: -- host: - path: /var/run/docker.sock - name: docker ---- -kind: pipeline -name: Publish Linux crow container -platform: - arch: amd64 - os: linux -steps: -- commands: - - docker run --rm --privileged multiarch/qemu-user-static --reset -p yes - failure: ignore - image: grafana/agent-build-image:0.30.4 - name: Configure QEMU - volumes: - - name: docker - path: /var/run/docker.sock -- commands: - - mkdir -p $HOME/.docker - - printenv GCR_CREDS > $HOME/.docker/config.json - - docker login -u $DOCKER_LOGIN -p $DOCKER_PASSWORD - - docker buildx create --name multiarch-agent-crow-${DRONE_COMMIT_SHA} --driver - docker-container --use - - ./tools/ci/docker-containers crow - - docker buildx rm multiarch-agent-crow-${DRONE_COMMIT_SHA} - environment: - DOCKER_LOGIN: - from_secret: docker_login - DOCKER_PASSWORD: - from_secret: docker_password - GCR_CREDS: - from_secret: gcr_admin - image: grafana/agent-build-image:0.30.4 - name: Publish container - volumes: - - name: docker - path: /var/run/docker.sock -trigger: - ref: - - refs/heads/main - - refs/tags/v* -type: docker -volumes: -- host: - path: /var/run/docker.sock - name: docker ---- -kind: pipeline name: Publish Windows agent container platform: arch: amd64 @@ -1205,8 +1099,6 @@ depends_on: - Publish Linux agent-boringcrypto container - Publish Linux agentctl container - Publish Linux agent-operator container -- Publish Linux smoke container -- Publish Linux crow container image_pull_secrets: - dockerconfigjson kind: pipeline @@ -1266,8 +1158,6 @@ depends_on: - Publish Linux agent-boringcrypto container - Publish Linux agentctl container - Publish Linux agent-operator container -- Publish Linux smoke container -- Publish Linux crow container - Publish Windows agent container - Publish Windows agentctl container image_pull_secrets: @@ -1427,6 +1317,6 @@ kind: secret name: updater_private_key --- kind: signature -hmac: 28ba52df6f22c10bf77a95386a49aff65a1c372127f7d89489ac2d3ee02ce618 +hmac: b4b3bb3578124bba1758f323695216281365054c623738d38e51cc37125277ae ... diff --git a/.drone/pipelines/publish.jsonnet b/.drone/pipelines/publish.jsonnet index 030fd9891001..6f8c4c2b202b 100644 --- a/.drone/pipelines/publish.jsonnet +++ b/.drone/pipelines/publish.jsonnet @@ -6,7 +6,7 @@ local ghTokenFilename = '/drone/src/gh-token.txt'; // job_names gets the list of job names for use in depends_on. local job_names = function(jobs) std.map(function(job) job.name, jobs); -local linux_containers = ['agent','agent-boringcrypto', 'agentctl', 'agent-operator', 'smoke', 'crow']; +local linux_containers = ['agent', 'agent-boringcrypto', 'agentctl', 'agent-operator']; local linux_containers_jobs = std.map(function(container) ( pipelines.linux('Publish Linux %s container' % container) { trigger: { @@ -163,8 +163,8 @@ linux_containers_jobs + windows_containers_jobs + [ GITHUB_APP_PRIVATE_KEY: secrets.updater_private_key.fromSecret, }, commands: [ - '/usr/bin/github-app-external-token > %s' % ghTokenFilename - ] + '/usr/bin/github-app-external-token > %s' % ghTokenFilename, + ], }, { name: 'Publish release', @@ -188,7 +188,7 @@ linux_containers_jobs + windows_containers_jobs + [ VERSION=${DRONE_TAG} RELEASE_DOC_TAG=$(echo ${DRONE_TAG} | awk -F '.' '{print $1"."$2}') ./tools/release |||, ], - } + }, ], volumes: [{ name: 'docker', diff --git a/.drone/pipelines/test.jsonnet b/.drone/pipelines/test.jsonnet index 438447f99325..6daa113d2019 100644 --- a/.drone/pipelines/test.jsonnet +++ b/.drone/pipelines/test.jsonnet @@ -50,23 +50,6 @@ local pipelines = import '../util/pipelines.jsonnet'; }], }, - pipelines.linux('Test manifests') { - trigger: { - event: ['pull_request'], - }, - steps: [{ - name: 'Regenerate environment manifests', - image: build_image.linux, - - commands: [ - 'make generate-manifests', - 'ERR_MSG="The environment manifests are out of date. Please run \'make generate-manifests\' and commit changes!"', - // "git status --porcelain" reports if there's any new, modified, or deleted files. - 'if [ ! -z "$(git status --porcelain)" ]; then echo $ERR_MSG >&2; exit 1; fi', - ], - }], - }, - pipelines.linux('Test') { trigger: { event: ['pull_request'], diff --git a/.github/workflows/helm-test.yml b/.github/workflows/helm-test.yml index 20a3745c8fe1..9d4738bcd6cb 100644 --- a/.github/workflows/helm-test.yml +++ b/.github/workflows/helm-test.yml @@ -51,7 +51,7 @@ jobs: version: v3.10.3 - name: Install Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: '3.9' check-latest: true diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml index 80540ae4848e..4b9f7077ed57 100644 --- a/.github/workflows/integration-tests.yml +++ b/.github/workflows/integration-tests.yml @@ -14,7 +14,7 @@ jobs: - name: Checkout code uses: actions/checkout@v4 - name: Setup Go - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: go-version: "1.21" - name: Set OTEL Exporter Endpoint diff --git a/.github/workflows/needs-attention.yml b/.github/workflows/needs-attention.yml index 6143be1fb2be..3e2d93a25ca6 100644 --- a/.github/workflows/needs-attention.yml +++ b/.github/workflows/needs-attention.yml @@ -10,7 +10,7 @@ jobs: needs-attention: runs-on: ubuntu-latest steps: - - uses: actions/stale@v8 + - uses: actions/stale@v9 with: days-before-stale: 30 days-before-close: -1 # never close automatically diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 229de6f55dbe..59d4fbe34540 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -18,7 +18,7 @@ jobs: - name: Checkout code uses: actions/checkout@v4 - name: Set up Go 1.21 - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: go-version: "1.21" cache: true diff --git a/.github/workflows/trivy.yml b/.github/workflows/trivy.yml index 4403f0515b02..57fd6e855873 100644 --- a/.github/workflows/trivy.yml +++ b/.github/workflows/trivy.yml @@ -26,7 +26,7 @@ jobs: - name: Checkout code uses: actions/checkout@v4 - name: Run Trivy vulnerability scanner - uses: aquasecurity/trivy-action@2b6a709cf9c4025c5438138008beaddbb02086f0 + uses: aquasecurity/trivy-action@d43c1f16c00cfd3978dde6c07f4bbcf9eb6993ca with: image-ref: 'grafana/agent:main' format: 'template' @@ -35,6 +35,6 @@ jobs: severity: 'CRITICAL,HIGH,MEDIUM,LOW' - name: Upload Trivy scan results to GitHub Security tab - uses: github/codeql-action/upload-sarif@v2 + uses: github/codeql-action/upload-sarif@v3 with: sarif_file: 'trivy-results.sarif' \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 3b83ee4d268c..a7c4c7020881 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,12 +10,50 @@ internal API changes are not present. Main (unreleased) ----------------- +### Security fixes + +- Fixes following vulnerabilities (@hainenber) + - [GO-2023-2409](https://github.com/advisories/GHSA-mhpq-9638-x6pw) + - [GO-2023-2412](https://github.com/advisories/GHSA-7ww5-4wqc-m92c) + - [CVE-2023-49568](https://github.com/advisories/GHSA-mw99-9chc-xw7r) + +### Enhancements + +- Add an option to the windows static mode installer for expanding environment vars in the yaml config. (@erikbaranowski) +- Add authentication support to `loki.source.awsfirehose` (@sberz) + +- Sort kubelet endpoint to reduce pressure on K8s's API server and watcher endpoints. (@hainenber) + +- Expose `physical_disk` collector from `windows_exporter` v0.24.0 to + Flow configuration. (@hainenber) + +### Bugfixes + +- Fix an issue in `remote.s3` where the exported content of an object would be an empty string if `remote.s3` failed to fully retrieve + the file in a single read call. (@grafana/agent-squad) + +- Utilize the `instance` Argument of `prometheus.exporter.kafka` when set. (@akhmatov-s) + +- Fix a duplicate metrics registration panic when sending metrics to an static + mode metric instance's write handler. (@tpaschalis) + +### Other changes + +- Removed support for Windows 2012 in line with Microsoft end of life. (@mattdurham) + +- Split instance ID and component groupings into separate panels for `remote write active series by component` in the Flow mixin. (@tristanburgess) + +- Updated dependency to add support for Go 1.22 (@stefanb) + +v0.39.0 (2024-01-09) +-------------------- + ### Breaking changes - `otelcol.receiver.prometheus` will drop all `otel_scope_info` metrics when converting them to OTLP. (@wildum) - If the `otel_scope_info` metric has labels `otel_scope_name` and `otel_scope_version`, - their values will be used to set OTLP Instrumentation Scope name and version respectively. - - Labels of `otel_scope_info` metrics other than `otel_scope_name` and `otel_scope_version` + their values will be used to set OTLP Instrumentation Scope name and version respectively. + - Labels of `otel_scope_info` metrics other than `otel_scope_name` and `otel_scope_version` are added as scope attributes with the matching name and version. - The `target` block in `prometheus.exporter.blackbox` requires a mandatory `name` @@ -25,6 +63,12 @@ Main (unreleased) - This change will not break any existing configurations and you can opt in to validation via the `validate_dimensions` configuration option. - Before this change, pulling metrics for azure resources with variable dimensions required one configuration per metric + dimension combination to avoid an error. - After this change, you can include all metrics and dimensions in a single configuration and the Azure APIs will only return dimensions which are valid for the various metrics. + +### Features + +- A new `discovery.ovhcloud` component for discovering scrape targets on OVHcloud. (@ptodev) + +- Allow specifying additional containers to run. (@juangom) ### Enhancements @@ -53,11 +97,12 @@ Main (unreleased) - `otelcol.receiver.prometheus` does not drop histograms without buckets anymore. (@wildum) - Added exemplars support to `otelcol.receiver.prometheus`. (@wildum) + - `mimir.rules.kubernetes` may now retry its startup on failure. (@hainenber) - Added links between compatible components in the documentation to make it easier to discover them. (@thampiotr) - + - Allow defining `HTTPClientConfig` for `discovery.ec2`. (@cmbrad) - The `remote.http` component can optionally define a request body. (@tpaschalis) @@ -71,21 +116,29 @@ Main (unreleased) - Added 'country' mmdb-type to log pipeline-stage geoip. (@superstes) - Azure exporter enhancements for flow and static mode, (@kgeckhart) - - Allows for pulling metrics at the Azure subscription level instead of resource by resource - - Disable dimension validation by default to reduce the number of exporter instances needed for full dimension coverage + - Allows for pulling metrics at the Azure subscription level instead of resource by resource + - Disable dimension validation by default to reduce the number of exporter instances needed for full dimension coverage - Add `max_cache_size` to `prometheus.relabel` to allow configurability instead of hard coded 100,000. (@mattdurham) - Add support for `http_sd_config` within a `scrape_config` for prometheus to flow config conversion. (@erikbaranowski) +- `discovery.lightsail` now supports additional parameters for configuring HTTP client settings. (@ptodev) +- Add `sample_age_limit` to remote_write config to drop samples older than a specified duration. (@marctc) + +- Handle paths in the Kubelet URL for `discovery.kubelet`. (@petewall) + +- `loki.source.docker` now deduplicates targets which report the same container + ID. (@tpaschalis) + ### Bugfixes - Update `pyroscope.ebpf` to fix a logical bug causing to profile to many kthreads instead of regular processes https://github.com/grafana/pyroscope/pull/2778 (@korniltsev) - + - Update `pyroscope.ebpf` to produce more optimal pprof profiles for python processes https://github.com/grafana/pyroscope/pull/2788 (@korniltsev) - In Static mode's `traces` subsystem, `spanmetrics` used to be generated prior to load balancing. - This could lead to inaccurate metrics. This issue only affects Agents using both `spanmetrics` and + This could lead to inaccurate metrics. This issue only affects Agents using both `spanmetrics` and `load_balancing`, when running in a load balanced cluster with more than one Agent instance. (@ptodev) - Fixes `loki.source.docker` a behavior that synced an incomplete list of targets to the tailer manager. (@FerdinandvHagen) @@ -94,11 +147,21 @@ Main (unreleased) - Add staleness tracking to labelstore to reduce memory usage. (@mattdurham) +- Fix issue where `prometheus.exporter.kafka` would crash when configuring `sasl_password`. (@rfratto) + +- Fix performance issue where perf lib where clause was not being set, leading to timeouts in collecting metrics for windows_exporter. (@mattdurham) + +- Fix nil panic when using the process collector with the windows exporter. (@mattdurham) + ### Other changes - Bump github.com/IBM/sarama from v1.41.2 to v1.42.1 -- Attatch unique Agent ID header to remote-write requests. (@captncraig) +- Attach unique Agent ID header to remote-write requests. (@captncraig) + +- Update to v2.48.1 of `github.com/prometheus/prometheus`. + Previously, a custom fork of v2.47.2 was used. + The custom fork of v2.47.2 also contained prometheus#12729 and prometheus#12677. v0.38.1 (2023-11-30) -------------------- diff --git a/Makefile b/Makefile index 024a624d2223..7bdd7fdee77f 100644 --- a/Makefile +++ b/Makefile @@ -28,8 +28,6 @@ ## agent-service Compiles cmd/grafana-agent-service to $(SERVICE_BINARY) ## agentctl Compiles cmd/grafana-agentctl to $(AGENTCTL_BINARY) ## operator Compiles cmd/grafana-agent-operator to $(OPERATOR_BINARY) -## crow Compiles tools/crow to $(CROW_BINARY) -## smoke Compiles tools/smoke to $(SMOKE_BINARY) ## ## Targets for building Docker images: ## @@ -38,8 +36,6 @@ ## agent-boringcrypto-image Builds agent Docker image with boringcrypto. ## agentctl-image Builds agentctl Docker image. ## operator-image Builds operator Docker image. -## crow-image Builds crow Docker image. -## smoke-image Builds smoke test Docker image. ## ## Targets for packaging: ## @@ -56,7 +52,6 @@ ## generate-drone Generate the Drone YAML from Jsonnet. ## generate-helm-docs Generate Helm chart documentation. ## generate-helm-tests Generate Helm chart tests. -## generate-manifests Generate production/kubernetes YAML manifests. ## generate-dashboards Generate dashboards in example/docker-compose after ## changing Jsonnet. ## generate-protos Generate protobuf files. @@ -78,8 +73,6 @@ ## AGENT_IMAGE Image name:tag built by `make agent-image` ## AGENTCTL_IMAGE Image name:tag built by `make agentctl-image` ## OPERATOR_IMAGE Image name:tag built by `make operator-image` -## CROW_IMAGE Image name:tag built by `make crow-image` -## SMOKE_IMAGE Image name:tag built by `make smoke-image` ## BUILD_IMAGE Image name:tag used by USE_CONTAINER=1 ## AGENT_BINARY Output path of `make agent` (default build/grafana-agent) ## AGENT_BORINGCRYPTO_BINARY Output path of `make agent-boringcrypto` (default build/grafana-agent-boringcrypto) @@ -87,8 +80,6 @@ ## SERVICE_BINARY Output path of `make agent-service` (default build/grafana-agent-service) ## AGENTCTL_BINARY Output path of `make agentctl` (default build/grafana-agentctl) ## OPERATOR_BINARY Output path of `make operator` (default build/grafana-agent-operator) -## CROW_BINARY Output path of `make crow` (default build/grafana-agent-crow) -## SMOKE_BINARY Output path of `make smoke` (default build/grafana-agent-smoke) ## GOOS Override OS to build binaries for ## GOARCH Override target architecture to build binaries for ## GOARM Override ARM version (6 or 7) when GOARCH=arm @@ -105,16 +96,12 @@ AGENT_IMAGE ?= grafana/agent:latest AGENT_BORINGCRYPTO_IMAGE ?= grafana/agent-boringcrypto:latest AGENTCTL_IMAGE ?= grafana/agentctl:latest OPERATOR_IMAGE ?= grafana/agent-operator:latest -CROW_IMAGE ?= us.gcr.io/kubernetes-dev/grafana/agent-crow:latest -SMOKE_IMAGE ?= us.gcr.io/kubernetes-dev/grafana/agent-smoke:latest AGENT_BINARY ?= build/grafana-agent AGENT_BORINGCRYPTO_BINARY ?= build/grafana-agent-boringcrypto FLOW_BINARY ?= build/grafana-agent-flow SERVICE_BINARY ?= build/grafana-agent-service AGENTCTL_BINARY ?= build/grafana-agentctl OPERATOR_BINARY ?= build/grafana-agent-operator -CROW_BINARY ?= build/agent-crow -SMOKE_BINARY ?= build/agent-smoke AGENTLINT_BINARY ?= build/agentlint GOOS ?= $(shell go env GOOS) GOARCH ?= $(shell go env GOARCH) @@ -126,10 +113,10 @@ GOEXPERIMENT ?= $(shell go env GOEXPERIMENT) # List of all environment variables which will propagate to the build # container. USE_CONTAINER must _not_ be included to avoid infinite recursion. PROPAGATE_VARS := \ - AGENT_IMAGE AGENTCTL_IMAGE OPERATOR_IMAGE CROW_IMAGE SMOKE_IMAGE \ + AGENT_IMAGE AGENTCTL_IMAGE OPERATOR_IMAGE \ BUILD_IMAGE GOOS GOARCH GOARM CGO_ENABLED RELEASE_BUILD \ AGENT_BINARY AGENT_BORINGCRYPTO_BINARY FLOW_BINARY AGENTCTL_BINARY OPERATOR_BINARY \ - CROW_BINARY SMOKE_BINARY VERSION GO_TAGS GOEXPERIMENT + VERSION GO_TAGS GOEXPERIMENT # # Constants for targets @@ -174,7 +161,7 @@ lint: agentlint # more without -race for packages that have known race detection issues. test: $(GO_ENV) go test $(GO_FLAGS) -race $(shell go list ./... | grep -v /integration-tests/) - $(GO_ENV) go test $(GO_FLAGS) ./pkg/integrations/node_exporter ./pkg/logs ./pkg/operator ./pkg/util/k8s ./component/otelcol/processor/tail_sampling ./component/loki/source/file + $(GO_ENV) go test $(GO_FLAGS) ./pkg/integrations/node_exporter ./pkg/logs ./pkg/operator ./pkg/util/k8s ./component/otelcol/processor/tail_sampling ./component/loki/source/file ./component/loki/source/docker test-packages: docker pull $(BUILD_IMAGE) @@ -188,8 +175,8 @@ integration-test: # Targets for building binaries # -.PHONY: binaries agent agent-boringcrypto agent-flow agentctl operator crow smoke -binaries: agent agent-boringcrypto agent-flow agentctl operator crow smoke +.PHONY: binaries agent agent-boringcrypto agent-flow agentctl operator +binaries: agent agent-boringcrypto agent-flow agentctl operator agent: ifeq ($(USE_CONTAINER),1) @@ -235,20 +222,6 @@ else $(GO_ENV) go build $(GO_FLAGS) -o $(OPERATOR_BINARY) ./cmd/grafana-agent-operator endif -crow: -ifeq ($(USE_CONTAINER),1) - $(RERUN_IN_CONTAINER) -else - $(GO_ENV) go build $(GO_FLAGS) -o $(CROW_BINARY) ./tools/crow -endif - -smoke: -ifeq ($(USE_CONTAINER),1) - $(RERUN_IN_CONTAINER) -else - $(GO_ENV) go build $(GO_FLAGS) -o $(SMOKE_BINARY) ./tools/smoke -endif - agentlint: ifeq ($(USE_CONTAINER),1) $(RERUN_IN_CONTAINER) @@ -266,8 +239,8 @@ ifneq ($(DOCKER_PLATFORM),) DOCKER_FLAGS += --platform=$(DOCKER_PLATFORM) endif -.PHONY: images agent-image agentctl-image operator-image crow-image smoke-image -images: agent-image agentctl-image operator-image crow-image smoke-image +.PHONY: images agent-image agentctl-image operator-image +images: agent-image agentctl-image operator-image agent-image: DOCKER_BUILDKIT=1 docker build $(DOCKER_FLAGS) -t $(AGENT_IMAGE) -f cmd/grafana-agent/Dockerfile . @@ -277,17 +250,13 @@ agent-boringcrypto-image: DOCKER_BUILDKIT=1 docker build $(DOCKER_FLAGS) --build-arg GOEXPERIMENT=boringcrypto -t $(AGENT_BORINGCRYPTO_IMAGE) -f cmd/grafana-agent/Dockerfile . operator-image: DOCKER_BUILDKIT=1 docker build $(DOCKER_FLAGS) -t $(OPERATOR_IMAGE) -f cmd/grafana-agent-operator/Dockerfile . -crow-image: - DOCKER_BUILDKIT=1 docker build $(DOCKER_FLAGS) -t $(CROW_IMAGE) -f tools/crow/Dockerfile . -smoke-image: - DOCKER_BUILDKIT=1 docker build $(DOCKER_FLAGS) -t $(SMOKE_IMAGE) -f tools/smoke/Dockerfile . # # Targets for generating assets # -.PHONY: generate generate-crds generate-drone generate-helm-docs generate-helm-tests generate-manifests generate-dashboards generate-protos generate-ui generate-versioned-files -generate: generate-crds generate-drone generate-helm-docs generate-helm-tests generate-manifests generate-dashboards generate-protos generate-ui generate-versioned-files generate-docs +.PHONY: generate generate-crds generate-drone generate-helm-docs generate-helm-tests generate-dashboards generate-protos generate-ui generate-versioned-files +generate: generate-crds generate-drone generate-helm-docs generate-helm-tests generate-dashboards generate-protos generate-ui generate-versioned-files generate-docs generate-crds: ifeq ($(USE_CONTAINER),1) @@ -314,13 +283,6 @@ else bash ./operations/helm/scripts/rebuild-tests.sh endif -generate-manifests: -ifeq ($(USE_CONTAINER),1) - $(RERUN_IN_CONTAINER) -else - cd production/kubernetes/build && bash build.sh -endif - generate-dashboards: ifeq ($(USE_CONTAINER),1) $(RERUN_IN_CONTAINER) @@ -381,14 +343,10 @@ info: @printf "AGENT_IMAGE = $(AGENT_IMAGE)\n" @printf "AGENTCTL_IMAGE = $(AGENTCTL_IMAGE)\n" @printf "OPERATOR_IMAGE = $(OPERATOR_IMAGE)\n" - @printf "CROW_IMAGE = $(CROW_IMAGE)\n" - @printf "SMOKE_IMAGE = $(SMOKE_IMAGE)\n" @printf "BUILD_IMAGE = $(BUILD_IMAGE)\n" @printf "AGENT_BINARY = $(AGENT_BINARY)\n" @printf "AGENTCTL_BINARY = $(AGENTCTL_BINARY)\n" @printf "OPERATOR_BINARY = $(OPERATOR_BINARY)\n" - @printf "CROW_BINARY = $(CROW_BINARY)\n" - @printf "SMOKE_BINARY = $(SMOKE_BINARY)\n" @printf "GOOS = $(GOOS)\n" @printf "GOARCH = $(GOARCH)\n" @printf "GOARM = $(GOARM)\n" diff --git a/component/all/all.go b/component/all/all.go index 3822deee7c9c..b404f27ad4eb 100644 --- a/component/all/all.go +++ b/component/all/all.go @@ -24,6 +24,7 @@ import ( _ "github.com/grafana/agent/component/discovery/nerve" // Import discovery.nerve _ "github.com/grafana/agent/component/discovery/nomad" // Import discovery.nomad _ "github.com/grafana/agent/component/discovery/openstack" // Import discovery.openstack + _ "github.com/grafana/agent/component/discovery/ovhcloud" // Import discovery.ovhcloud _ "github.com/grafana/agent/component/discovery/puppetdb" // Import discovery.puppetdb _ "github.com/grafana/agent/component/discovery/relabel" // Import discovery.relabel _ "github.com/grafana/agent/component/discovery/scaleway" // Import discovery.scaleway diff --git a/component/common/loki/wal/watcher_test.go b/component/common/loki/wal/watcher_test.go index 15644d740a28..959dad3a5ff5 100644 --- a/component/common/loki/wal/watcher_test.go +++ b/component/common/loki/wal/watcher_test.go @@ -685,9 +685,9 @@ func TestWatcher_StopAndDrainWAL(t *testing.T) { watcher.Drain() watcher.Stop() - // expecting 15s (missing 15 entries * 1 sec delay in AppendEntries) +/- 1.1s (taking into account the drain timeout + // expecting 15s (missing 15 entries * 1 sec delay in AppendEntries) +/- 2.0s (taking into account the drain timeout // has one extra second. - require.InDelta(t, time.Second*15, time.Since(now), float64(time.Millisecond*1100), "expected the drain procedure to take around 15s") + require.InDelta(t, time.Second*15, time.Since(now), float64(time.Millisecond*2000), "expected the drain procedure to take around 15s") require.Equal(t, int(writeTo.entriesReceived.Load()), 20, "expected the watcher to fully drain the WAL") }) @@ -737,9 +737,9 @@ func TestWatcher_StopAndDrainWAL(t *testing.T) { watcher.Drain() watcher.Stop() - // expecting 15s (missing 15 entries * 1 sec delay in AppendEntries) +/- 1.1s (taking into account the drain timeout + // expecting 15s (missing 15 entries * 1 sec delay in AppendEntries) +/- 2.0s (taking into account the drain timeout // has one extra second. - require.InDelta(t, time.Second*15, time.Since(now), float64(time.Millisecond*1100), "expected the drain procedure to take around 15s") + require.InDelta(t, time.Second*15, time.Since(now), float64(time.Millisecond*2000), "expected the drain procedure to take around 15s") require.Equal(t, int(writeTo.entriesReceived.Load()), 20, "expected the watcher to fully drain the WAL") }) @@ -790,7 +790,7 @@ func TestWatcher_StopAndDrainWAL(t *testing.T) { watcher.Drain() watcher.Stop() - require.InDelta(t, time.Second*10, time.Since(now), float64(time.Millisecond*1100), "expected the drain procedure to take around 15s") + require.InDelta(t, time.Second*10, time.Since(now), float64(time.Millisecond*2000), "expected the drain procedure to take around 15s") require.Less(t, int(writeTo.entriesReceived.Load()), 20, "expected watcher to have not consumed WAL fully") require.InDelta(t, 15, int(writeTo.entriesReceived.Load()), 1.0, "expected Watcher to consume at most +/- 1 entry from the WAL") }) diff --git a/component/discovery/aws/ec2.go b/component/discovery/aws/ec2.go index 7672165e05e0..dfc6d00f5d53 100644 --- a/component/discovery/aws/ec2.go +++ b/component/discovery/aws/ec2.go @@ -69,8 +69,9 @@ func (args EC2Arguments) Convert() *promaws.EC2SDConfig { } var DefaultEC2SDConfig = EC2Arguments{ - Port: 80, - RefreshInterval: 60 * time.Second, + Port: 80, + RefreshInterval: 60 * time.Second, + HTTPClientConfig: config.DefaultHTTPClientConfig, } // SetToDefault implements river.Defaulter. diff --git a/component/discovery/aws/lightsail.go b/component/discovery/aws/lightsail.go index 3f47366cc8b7..2b414a54faff 100644 --- a/component/discovery/aws/lightsail.go +++ b/component/discovery/aws/lightsail.go @@ -7,6 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/ec2metadata" "github.com/aws/aws-sdk-go/aws/session" "github.com/grafana/agent/component" + "github.com/grafana/agent/component/common/config" "github.com/grafana/agent/component/discovery" "github.com/grafana/river/rivertypes" promcfg "github.com/prometheus/common/config" @@ -27,34 +28,37 @@ func init() { // LightsailArguments is the configuration for AWS Lightsail based service discovery. type LightsailArguments struct { - Endpoint string `river:"endpoint,attr,optional"` - Region string `river:"region,attr,optional"` - AccessKey string `river:"access_key,attr,optional"` - SecretKey rivertypes.Secret `river:"secret_key,attr,optional"` - Profile string `river:"profile,attr,optional"` - RoleARN string `river:"role_arn,attr,optional"` - RefreshInterval time.Duration `river:"refresh_interval,attr,optional"` - Port int `river:"port,attr,optional"` + Endpoint string `river:"endpoint,attr,optional"` + Region string `river:"region,attr,optional"` + AccessKey string `river:"access_key,attr,optional"` + SecretKey rivertypes.Secret `river:"secret_key,attr,optional"` + Profile string `river:"profile,attr,optional"` + RoleARN string `river:"role_arn,attr,optional"` + RefreshInterval time.Duration `river:"refresh_interval,attr,optional"` + Port int `river:"port,attr,optional"` + HTTPClientConfig config.HTTPClientConfig `river:",squash"` } func (args LightsailArguments) Convert() *promaws.LightsailSDConfig { cfg := &promaws.LightsailSDConfig{ - Endpoint: args.Endpoint, - Region: args.Region, - AccessKey: args.AccessKey, - SecretKey: promcfg.Secret(args.SecretKey), - Profile: args.Profile, - RoleARN: args.RoleARN, - RefreshInterval: model.Duration(args.RefreshInterval), - Port: args.Port, + Endpoint: args.Endpoint, + Region: args.Region, + AccessKey: args.AccessKey, + SecretKey: promcfg.Secret(args.SecretKey), + Profile: args.Profile, + RoleARN: args.RoleARN, + RefreshInterval: model.Duration(args.RefreshInterval), + Port: args.Port, + HTTPClientConfig: *args.HTTPClientConfig.Convert(), } return cfg } // DefaultLightsailSDConfig is the default Lightsail SD configuration. var DefaultLightsailSDConfig = LightsailArguments{ - Port: 80, - RefreshInterval: 60 * time.Second, + Port: 80, + RefreshInterval: 60 * time.Second, + HTTPClientConfig: config.DefaultHTTPClientConfig, } // SetToDefault implements river.Defaulter. diff --git a/component/discovery/azure/azure.go b/component/discovery/azure/azure.go index 3e1ef563625c..9ed1363f5250 100644 --- a/component/discovery/azure/azure.go +++ b/component/discovery/azure/azure.go @@ -55,6 +55,8 @@ var DefaultArguments = Arguments{ Environment: azure.PublicCloud.Name, Port: 80, RefreshInterval: 5 * time.Minute, + FollowRedirects: true, + EnableHTTP2: true, } // SetToDefault implements river.Defaulter. diff --git a/component/discovery/consul/consul.go b/component/discovery/consul/consul.go index 1192bae6c6d2..de6aae2d4510 100644 --- a/component/discovery/consul/consul.go +++ b/component/discovery/consul/consul.go @@ -45,11 +45,12 @@ type Arguments struct { } var DefaultArguments = Arguments{ - Server: "localhost:8500", - TagSeparator: ",", - Scheme: "http", - AllowStale: true, - RefreshInterval: 30 * time.Second, + Server: "localhost:8500", + TagSeparator: ",", + Scheme: "http", + AllowStale: true, + RefreshInterval: 30 * time.Second, + HTTPClientConfig: config.DefaultHTTPClientConfig, } // SetToDefault implements river.Defaulter. diff --git a/component/discovery/digitalocean/digitalocean.go b/component/discovery/digitalocean/digitalocean.go index 360ef70ce818..bde15337da88 100644 --- a/component/discovery/digitalocean/digitalocean.go +++ b/component/discovery/digitalocean/digitalocean.go @@ -39,6 +39,8 @@ type Arguments struct { var DefaultArguments = Arguments{ Port: 80, RefreshInterval: time.Minute, + FollowRedirects: true, + EnableHTTP2: true, } // SetToDefault implements river.Defaulter. diff --git a/component/discovery/kubelet/kubelet.go b/component/discovery/kubelet/kubelet.go index ff952e2ada6e..1fecc1e88f8e 100644 --- a/component/discovery/kubelet/kubelet.go +++ b/component/discovery/kubelet/kubelet.go @@ -129,11 +129,10 @@ func NewKubeletDiscovery(args Arguments) (*Discovery, error) { Transport: transport, Timeout: 30 * time.Second, } - // ensure the path is the kubelet pods endpoint - args.URL.Path = "/pods" + // Append the path to the kubelet pods endpoint return &Discovery{ client: client, - url: args.URL.String(), + url: args.URL.String() + "/pods", targetNamespaces: args.Namespaces, }, nil } diff --git a/component/discovery/kubelet/kubelet_test.go b/component/discovery/kubelet/kubelet_test.go index b7d2c750d9d2..183f789aef70 100644 --- a/component/discovery/kubelet/kubelet_test.go +++ b/component/discovery/kubelet/kubelet_test.go @@ -1,12 +1,14 @@ package kubelet import ( + "net/url" "testing" "github.com/prometheus/prometheus/discovery/targetgroup" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/grafana/agent/component/common/config" "github.com/grafana/river" "github.com/stretchr/testify/require" ) @@ -105,3 +107,20 @@ func TestDiscoveryPodWithoutPod(t *testing.T) { require.NoError(t, err) require.Len(t, kubeletDiscovery.discoveredPodSources, 2) } + +func TestWithDefaultKubeletHost(t *testing.T) { + kubeletDiscovery, err := NewKubeletDiscovery(DefaultConfig) + require.NoError(t, err) + require.Equal(t, "https://localhost:10250/pods", kubeletDiscovery.url) +} + +func TestWithCustomPath(t *testing.T) { + kubeletProxyUrl, _ := url.Parse("https://kubernetes.default.svc.cluster.local:443/api/v1/nodes/cluster-node-1/proxy") + kubeletDiscovery, err := NewKubeletDiscovery(Arguments{ + URL: config.URL{ + URL: kubeletProxyUrl, + }, + }) + require.NoError(t, err) + require.Equal(t, "https://kubernetes.default.svc.cluster.local:443/api/v1/nodes/cluster-node-1/proxy/pods", kubeletDiscovery.url) +} diff --git a/component/discovery/ovhcloud/ovhcloud.go b/component/discovery/ovhcloud/ovhcloud.go new file mode 100644 index 000000000000..e3479f45a5f7 --- /dev/null +++ b/component/discovery/ovhcloud/ovhcloud.go @@ -0,0 +1,94 @@ +package ovhcloud + +import ( + "fmt" + "time" + + "github.com/grafana/agent/component" + "github.com/grafana/agent/component/discovery" + "github.com/grafana/river/rivertypes" + "github.com/prometheus/common/config" + "github.com/prometheus/common/model" + prom_discovery "github.com/prometheus/prometheus/discovery/ovhcloud" +) + +func init() { + component.Register(component.Registration{ + Name: "discovery.ovhcloud", + Args: Arguments{}, + Exports: discovery.Exports{}, + + Build: func(opts component.Options, args component.Arguments) (component.Component, error) { + return New(opts, args.(Arguments)) + }, + }) +} + +// Arguments configure the discovery.ovhcloud component. +type Arguments struct { + Endpoint string `river:"endpoint,attr,optional"` + ApplicationKey string `river:"application_key,attr"` + ApplicationSecret rivertypes.Secret `river:"application_secret,attr"` + ConsumerKey rivertypes.Secret `river:"consumer_key,attr"` + RefreshInterval time.Duration `river:"refresh_interval,attr,optional"` + Service string `river:"service,attr"` +} + +// DefaultArguments is used to initialize default values for Arguments. +var DefaultArguments = Arguments{ + Endpoint: "ovh-eu", + RefreshInterval: 60 * time.Second, +} + +// SetToDefault implements river.Defaulter. +func (args *Arguments) SetToDefault() { + *args = DefaultArguments +} + +// Validate implements river.Validator. +func (args *Arguments) Validate() error { + if args.Endpoint == "" { + return fmt.Errorf("endpoint cannot be empty") + } + + if args.ApplicationKey == "" { + return fmt.Errorf("application_key cannot be empty") + } + + if args.ApplicationSecret == "" { + return fmt.Errorf("application_secret cannot be empty") + } + + if args.ConsumerKey == "" { + return fmt.Errorf("consumer_key cannot be empty") + } + + switch args.Service { + case "dedicated_server", "vps": + // Valid value - do nothing. + default: + return fmt.Errorf("unknown service: %v", args.Service) + } + + return nil +} + +// Convert returns the upstream configuration struct. +func (args *Arguments) Convert() *prom_discovery.SDConfig { + return &prom_discovery.SDConfig{ + Endpoint: args.Endpoint, + ApplicationKey: args.ApplicationKey, + ApplicationSecret: config.Secret(args.ApplicationSecret), + ConsumerKey: config.Secret(args.ConsumerKey), + RefreshInterval: model.Duration(args.RefreshInterval), + Service: args.Service, + } +} + +// New returns a new instance of a discovery.ovhcloud component. +func New(opts component.Options, args Arguments) (*discovery.Component, error) { + return discovery.New(opts, args, func(args component.Arguments) (discovery.Discoverer, error) { + newArgs := args.(Arguments) + return prom_discovery.NewDiscovery(newArgs.Convert(), opts.Logger) + }) +} diff --git a/component/discovery/ovhcloud/ovhcloud_test.go b/component/discovery/ovhcloud/ovhcloud_test.go new file mode 100644 index 000000000000..8e579574fc67 --- /dev/null +++ b/component/discovery/ovhcloud/ovhcloud_test.go @@ -0,0 +1,135 @@ +package ovhcloud_test + +import ( + "testing" + "time" + + "github.com/grafana/agent/component/discovery/ovhcloud" + "github.com/grafana/river" + "github.com/prometheus/common/model" + prom_ovh "github.com/prometheus/prometheus/discovery/ovhcloud" + "github.com/stretchr/testify/require" +) + +func TestUnmarshal(t *testing.T) { + tests := []struct { + testName string + cfg string + expected *prom_ovh.SDConfig + errorMsg string + }{ + { + testName: "defaults", + cfg: ` + application_key = "appkey" + application_secret = "appsecret" + consumer_key = "consumerkey" + service = "dedicated_server" + `, + expected: &prom_ovh.SDConfig{ + Endpoint: ovhcloud.DefaultArguments.Endpoint, + ApplicationKey: "appkey", + ApplicationSecret: "appsecret", + ConsumerKey: "consumerkey", + RefreshInterval: model.Duration(ovhcloud.DefaultArguments.RefreshInterval), + Service: "dedicated_server", + }, + }, + { + testName: "explicit", + cfg: ` + endpoint = "custom-endpoint" + refresh_interval = "11m" + application_key = "appkey" + application_secret = "appsecret" + consumer_key = "consumerkey" + service = "vps" + `, + expected: &prom_ovh.SDConfig{ + Endpoint: "custom-endpoint", + ApplicationKey: "appkey", + ApplicationSecret: "appsecret", + ConsumerKey: "consumerkey", + RefreshInterval: model.Duration(11 * time.Minute), + Service: "vps", + }, + }, + { + testName: "empty application key", + cfg: ` + endpoint = "custom-endpoint" + refresh_interval = "11m" + application_key = "" + application_secret = "appsecret" + consumer_key = "consumerkey" + service = "vps" + `, + errorMsg: "application_key cannot be empty", + }, + { + testName: "empty application secret", + cfg: ` + endpoint = "custom-endpoint" + refresh_interval = "11m" + application_key = "appkey" + application_secret = "" + consumer_key = "consumerkey" + service = "vps" + `, + errorMsg: "application_secret cannot be empty", + }, + { + testName: "empty consumer key", + cfg: ` + endpoint = "custom-endpoint" + refresh_interval = "11m" + application_key = "appkey" + application_secret = "appsecret" + consumer_key = "" + service = "vps" + `, + errorMsg: "consumer_key cannot be empty", + }, + { + testName: "empty endpoint", + cfg: ` + endpoint = "" + refresh_interval = "11m" + application_key = "appkey" + application_secret = "appsecret" + consumer_key = "consumerkey" + service = "vps" + `, + errorMsg: "endpoint cannot be empty", + }, + { + testName: "unknown service", + cfg: ` + endpoint = "custom-endpoint" + refresh_interval = "11m" + application_key = "appkey" + application_secret = "appsecret" + consumer_key = "consumerkey" + service = "asdf" + `, + errorMsg: "unknown service: asdf", + }, + } + + for _, tc := range tests { + t.Run(tc.testName, func(t *testing.T) { + var args ovhcloud.Arguments + err := river.Unmarshal([]byte(tc.cfg), &args) + if tc.errorMsg != "" { + require.ErrorContains(t, err, tc.errorMsg) + return + } + + require.NoError(t, err) + + promArgs := args.Convert() + + require.Equal(t, tc.expected, promArgs) + }) + } +} diff --git a/component/loki/source/aws_firehose/component.go b/component/loki/source/aws_firehose/component.go index 639f8e6c7c16..12552b2f604e 100644 --- a/component/loki/source/aws_firehose/component.go +++ b/component/loki/source/aws_firehose/component.go @@ -17,6 +17,7 @@ import ( flow_relabel "github.com/grafana/agent/component/common/relabel" "github.com/grafana/agent/component/loki/source/aws_firehose/internal" "github.com/grafana/agent/pkg/util" + "github.com/grafana/river/rivertypes" ) func init() { @@ -32,6 +33,7 @@ func init() { type Arguments struct { Server *fnet.ServerConfig `river:",squash"` + AccessKey rivertypes.Secret `river:"access_key,attr,optional"` UseIncomingTimestamp bool `river:"use_incoming_timestamp,attr,optional"` ForwardTo []loki.LogsReceiver `river:"forward_to,attr"` RelabelRules flow_relabel.Rules `river:"relabel_rules,attr,optional"` @@ -131,6 +133,10 @@ func (c *Component) Update(args component.Arguments) error { handlerNeedsUpdate = true } + if c.args.AccessKey != newArgs.AccessKey { + handlerNeedsUpdate = true + } + // Since the handler is created ad-hoc for the server, and the handler depends on the relabels // consider this as a cause for server restart as well. Much simpler than adding a lock on the // handler and doing the relabel rules change on the fly @@ -159,7 +165,7 @@ func (c *Component) Update(args component.Arguments) error { if err = c.server.MountAndRun(func(router *mux.Router) { // re-create handler when server is re-computed - handler := internal.NewHandler(c, c.logger, c.handlerMetrics, c.rbs, newArgs.UseIncomingTimestamp) + handler := internal.NewHandler(c, c.logger, c.handlerMetrics, c.rbs, newArgs.UseIncomingTimestamp, string(newArgs.AccessKey)) router.Path("/awsfirehose/api/v1/push").Methods("POST").Handler(handler) }); err != nil { return err diff --git a/component/loki/source/aws_firehose/internal/handler.go b/component/loki/source/aws_firehose/internal/handler.go index 8313f1a63e7a..6a59ec0ee73b 100644 --- a/component/loki/source/aws_firehose/internal/handler.go +++ b/component/loki/source/aws_firehose/internal/handler.go @@ -5,6 +5,7 @@ import ( "bytes" "compress/gzip" "context" + "crypto/subtle" "encoding/base64" "encoding/json" "fmt" @@ -57,16 +58,18 @@ type Handler struct { sender Sender relabelRules []*relabel.Config useIncomingTs bool + accessKey string } // NewHandler creates a new handler. -func NewHandler(sender Sender, logger log.Logger, metrics *Metrics, rbs []*relabel.Config, useIncomingTs bool) *Handler { +func NewHandler(sender Sender, logger log.Logger, metrics *Metrics, rbs []*relabel.Config, useIncomingTs bool, accessKey string) *Handler { return &Handler{ metrics: metrics, logger: logger, sender: sender, relabelRules: rbs, useIncomingTs: useIncomingTs, + accessKey: accessKey, } } @@ -76,6 +79,16 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, req *http.Request) { defer req.Body.Close() level.Info(h.logger).Log("msg", "handling request") + // authenticate request if the component has an access key configured + if len(h.accessKey) > 0 { + apiHeader := req.Header.Get("X-Amz-Firehose-Access-Key") + + if subtle.ConstantTimeCompare([]byte(apiHeader), []byte(h.accessKey)) != 1 { + http.Error(w, "access key not provided or incorrect", http.StatusUnauthorized) + return + } + } + var bodyReader io.Reader = req.Body // firehose allows the user to configure gzip content-encoding, in that case // decompress in the reader during unmarshalling diff --git a/component/loki/source/aws_firehose/internal/handler_test.go b/component/loki/source/aws_firehose/internal/handler_test.go index 89a4ebecc0e2..c926dbc34bd6 100644 --- a/component/loki/source/aws_firehose/internal/handler_test.go +++ b/component/loki/source/aws_firehose/internal/handler_test.go @@ -313,7 +313,8 @@ func TestHandler(t *testing.T) { testReceiver := &receiver{entries: make([]loki.Entry, 0)} registry := prometheus.NewRegistry() - handler := NewHandler(testReceiver, logger, NewMetrics(registry), tc.Relabels, tc.UseIncomingTs) + accessKey := "" + handler := NewHandler(testReceiver, logger, NewMetrics(registry), tc.Relabels, tc.UseIncomingTs, accessKey) bs := bytes.NewBuffer(nil) var bodyReader io.Reader = strings.NewReader(tc.Body) @@ -360,6 +361,71 @@ func TestHandler(t *testing.T) { } } +func TestHandlerAuth(t *testing.T) { + type testcase struct { + // AccessKey configures the key required by the handler to accept requests + AccessKey string + + // ReqAccessKey configures the key sent in the request + ReqAccessKey string + + // ExpectedCode is the expected HTTP status code + ExpectedCode int + } + + tests := map[string]testcase{ + "auth disabled": { + AccessKey: "", + ReqAccessKey: "", + ExpectedCode: 200, + }, + "auth enabled, valid key": { + AccessKey: "fakekey", + ReqAccessKey: "fakekey", + ExpectedCode: 200, + }, + "auth enabled, invalid key": { + AccessKey: "fakekey", + ReqAccessKey: "badkey", + ExpectedCode: 401, + }, + "auth enabled, no key": { + AccessKey: "fakekey", + ReqAccessKey: "", + ExpectedCode: 401, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + w := log.NewSyncWriter(os.Stderr) + logger := log.NewLogfmtLogger(w) + + testReceiver := &receiver{entries: make([]loki.Entry, 0)} + registry := prometheus.NewRegistry() + relabeling := []*relabel.Config{} + incommingTs := false + handler := NewHandler(testReceiver, logger, NewMetrics(registry), relabeling, incommingTs, tc.AccessKey) + + body := strings.NewReader(readTestData(t, "testdata/direct_put.json")) + req, err := http.NewRequest("POST", "http://test", body) + req.Header.Set("X-Amz-Firehose-Request-Id", testRequestID) + req.Header.Set("X-Amz-Firehose-Source-Arn", testSourceARN) + req.Header.Set("X-Amz-Firehose-Protocol-Version", "1.0") + req.Header.Set("User-Agent", "Amazon Kinesis Data Firehose Agent/1.0") + if tc.ReqAccessKey != "" { + req.Header.Set("X-Amz-Firehose-Access-Key", tc.ReqAccessKey) + } + require.NoError(t, err) + + recorder := httptest.NewRecorder() + handler.ServeHTTP(recorder, req) + + require.Equal(t, tc.ExpectedCode, recorder.Code) + }) + } +} + const cwLambdaControlMessage = `CWL CONTROL MESSAGE: Checking health of destination Firehose.` var cwLambdaLogMessages = []string{ diff --git a/component/loki/source/docker/docker.go b/component/loki/source/docker/docker.go index 400b1d30b5e6..193f27f1d7a2 100644 --- a/component/loki/source/docker/docker.go +++ b/component/loki/source/docker/docker.go @@ -215,6 +215,7 @@ func (c *Component) Update(args component.Arguments) error { // Convert input targets into targets to give to tailer. targets := make([]*dt.Target, 0, len(newArgs.Targets)) + seenTargets := make(map[string]struct{}, len(newArgs.Targets)) for _, target := range newArgs.Targets { containerID, ok := target[dockerLabelContainerID] @@ -222,6 +223,10 @@ func (c *Component) Update(args component.Arguments) error { level.Debug(c.opts.Logger).Log("msg", "docker target did not include container ID label:"+dockerLabelContainerID) continue } + if _, seen := seenTargets[containerID]; seen { + continue + } + seenTargets[containerID] = struct{}{} var labels = make(model.LabelSet) for k, v := range target { diff --git a/component/loki/source/docker/docker_test.go b/component/loki/source/docker/docker_test.go index 51c2a4568cff..c4b99c47388c 100644 --- a/component/loki/source/docker/docker_test.go +++ b/component/loki/source/docker/docker_test.go @@ -1,3 +1,5 @@ +//go:build !race + package docker import ( @@ -5,9 +7,11 @@ import ( "testing" "time" + "github.com/grafana/agent/component" "github.com/grafana/agent/pkg/flow/componenttest" "github.com/grafana/agent/pkg/util" "github.com/grafana/river" + "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" ) @@ -33,3 +37,39 @@ func Test(t *testing.T) { require.NoError(t, ctrl.WaitRunning(time.Minute)) } + +func TestDuplicateTargets(t *testing.T) { + // Use host that works on all platforms (including Windows). + var cfg = ` + host = "tcp://127.0.0.1:9376" + targets = [ + {__meta_docker_container_id = "foo", __meta_docker_port_private = "8080"}, + {__meta_docker_container_id = "foo", __meta_docker_port_private = "8081"}, + ] + forward_to = [] + ` + + var args Arguments + err := river.Unmarshal([]byte(cfg), &args) + require.NoError(t, err) + + ctrl, err := componenttest.NewControllerFromID(util.TestLogger(t), "loki.source.docker") + require.NoError(t, err) + + go func() { + err := ctrl.Run(context.Background(), args) + require.NoError(t, err) + }() + + require.NoError(t, ctrl.WaitRunning(time.Minute)) + + cmp, err := New(component.Options{ + ID: "loki.source.docker.test", + Logger: util.TestFlowLogger(t), + Registerer: prometheus.NewRegistry(), + DataPath: t.TempDir(), + }, args) + require.NoError(t, err) + + require.Len(t, cmp.manager.tasks, 1) +} diff --git a/component/otelcol/connector/spanmetrics/spanmetrics_test.go b/component/otelcol/connector/spanmetrics/spanmetrics_test.go index 62f65966a36c..7a3a3f891504 100644 --- a/component/otelcol/connector/spanmetrics/spanmetrics_test.go +++ b/component/otelcol/connector/spanmetrics/spanmetrics_test.go @@ -1,10 +1,14 @@ package spanmetrics_test import ( + "context" "testing" "time" "github.com/grafana/agent/component/otelcol/connector/spanmetrics" + "github.com/grafana/agent/component/otelcol/processor/processortest" + "github.com/grafana/agent/pkg/flow/componenttest" + "github.com/grafana/agent/pkg/util" "github.com/grafana/river" "github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector" "github.com/stretchr/testify/require" @@ -331,3 +335,417 @@ func TestArguments_UnmarshalRiver(t *testing.T) { }) } } + +func testRunProcessor(t *testing.T, processorConfig string, testSignal processortest.Signal) { + ctx := componenttest.TestContext(t) + testRunProcessorWithContext(ctx, t, processorConfig, testSignal) +} + +func testRunProcessorWithContext(ctx context.Context, t *testing.T, processorConfig string, testSignal processortest.Signal) { + l := util.TestLogger(t) + + ctrl, err := componenttest.NewControllerFromID(l, "otelcol.connector.spanmetrics") + require.NoError(t, err) + + var args spanmetrics.Arguments + require.NoError(t, river.Unmarshal([]byte(processorConfig), &args)) + + // Override the arguments so signals get forwarded to the test channel. + args.Output = testSignal.MakeOutput() + + prc := processortest.ProcessorRunConfig{ + Ctx: ctx, + T: t, + Args: args, + TestSignal: testSignal, + Ctrl: ctrl, + L: l, + } + processortest.TestRunProcessor(prc) +} + +func Test_ComponentIO(t *testing.T) { + const defaultInputTrace = `{ + "resourceSpans": [{ + "resource": { + "attributes": [{ + "key": "service.name", + "value": { "stringValue": "TestSvcName" } + }, + { + "key": "res_attribute1", + "value": { "intValue": "11" } + }] + }, + "scopeSpans": [{ + "spans": [{ + "trace_id": "7bba9f33312b3dbb8b2c2c62bb7abe2d", + "span_id": "086e83747d0e381e", + "name": "TestSpan", + "attributes": [{ + "key": "attribute1", + "value": { "intValue": "78" } + }] + }] + }] + },{ + "resource": { + "attributes": [{ + "key": "service.name", + "value": { "stringValue": "TestSvcName" } + }, + { + "key": "res_attribute1", + "value": { "intValue": "11" } + }] + }, + "scopeSpans": [{ + "spans": [{ + "trace_id": "7bba9f33312b3dbb8b2c2c62bb7abe2d", + "span_id": "086e83747d0e381b", + "name": "TestSpan", + "attributes": [{ + "key": "attribute1", + "value": { "intValue": "78" } + }] + }] + }] + }] + }` + + tests := []struct { + testName string + cfg string + inputTraceJson string + expectedOutputLogJson string + }{ + { + testName: "Sum metric only", + cfg: ` + metrics_flush_interval = "1s" + histogram { + disable = true + explicit {} + } + + output { + // no-op: will be overridden by test code. + } + `, + inputTraceJson: defaultInputTrace, + expectedOutputLogJson: `{ + "resourceMetrics": [{ + "resource": { + "attributes": [{ + "key": "service.name", + "value": { "stringValue": "TestSvcName" } + }, + { + "key": "res_attribute1", + "value": { "intValue": "11" } + }] + }, + "scopeMetrics": [{ + "scope": { + "name": "spanmetricsconnector" + }, + "metrics": [{ + "name": "calls", + "sum": { + "dataPoints": [{ + "attributes": [{ + "key": "service.name", + "value": { "stringValue": "TestSvcName" } + }, + { + "key": "span.name", + "value": { "stringValue": "TestSpan" } + }, + { + "key": "span.kind", + "value": { "stringValue": "SPAN_KIND_UNSPECIFIED" } + }, + { + "key": "status.code", + "value": { "stringValue": "STATUS_CODE_UNSET" } + }], + "startTimeUnixNano": "0", + "timeUnixNano": "0", + "asInt": "2" + }], + "aggregationTemporality": 2, + "isMonotonic": true + } + }] + }] + }] + }`, + }, + { + testName: "Sum metric only for two spans", + cfg: ` + metrics_flush_interval = "1s" + histogram { + disable = true + explicit {} + } + + output { + // no-op: will be overridden by test code. + } + `, + inputTraceJson: `{ + "resourceSpans": [{ + "resource": { + "attributes": [{ + "key": "service.name", + "value": { "stringValue": "TestSvcName" } + }, + { + "key": "k8s.pod.name", + "value": { "stringValue": "first" } + }] + }, + "scopeSpans": [{ + "spans": [{ + "trace_id": "7bba9f33312b3dbb8b2c2c62bb7abe2d", + "span_id": "086e83747d0e381e", + "name": "TestSpan", + "attributes": [{ + "key": "attribute1", + "value": { "intValue": "78" } + }] + }] + }] + },{ + "resource": { + "attributes": [{ + "key": "service.name", + "value": { "stringValue": "TestSvcName" } + }, + { + "key": "k8s.pod.name", + "value": { "stringValue": "second" } + }] + }, + "scopeSpans": [{ + "spans": [{ + "trace_id": "7bba9f33312b3dbb8b2c2c62bb7abe2d", + "span_id": "086e83747d0e381b", + "name": "TestSpan", + "attributes": [{ + "key": "attribute1", + "value": { "intValue": "78" } + }] + }] + }] + }] + }`, + expectedOutputLogJson: `{ + "resourceMetrics": [{ + "resource": { + "attributes": [{ + "key": "service.name", + "value": { "stringValue": "TestSvcName" } + }, + { + "key": "k8s.pod.name", + "value": { "stringValue": "first" } + }] + }, + "scopeMetrics": [{ + "scope": { + "name": "spanmetricsconnector" + }, + "metrics": [{ + "name": "calls", + "sum": { + "dataPoints": [{ + "attributes": [{ + "key": "service.name", + "value": { "stringValue": "TestSvcName" } + }, + { + "key": "span.name", + "value": { "stringValue": "TestSpan" } + }, + { + "key": "span.kind", + "value": { "stringValue": "SPAN_KIND_UNSPECIFIED" } + }, + { + "key": "status.code", + "value": { "stringValue": "STATUS_CODE_UNSET" } + }], + "startTimeUnixNano": "0", + "timeUnixNano": "0", + "asInt": "1" + }], + "aggregationTemporality": 2, + "isMonotonic": true + } + }] + }] + }, + { + "resource": { + "attributes": [{ + "key": "service.name", + "value": { "stringValue": "TestSvcName" } + }, + { + "key": "k8s.pod.name", + "value": { "stringValue": "second" } + }] + }, + "scopeMetrics": [{ + "scope": { + "name": "spanmetricsconnector" + }, + "metrics": [{ + "name": "calls", + "sum": { + "dataPoints": [{ + "attributes": [{ + "key": "service.name", + "value": { "stringValue": "TestSvcName" } + }, + { + "key": "span.name", + "value": { "stringValue": "TestSpan" } + }, + { + "key": "span.kind", + "value": { "stringValue": "SPAN_KIND_UNSPECIFIED" } + }, + { + "key": "status.code", + "value": { "stringValue": "STATUS_CODE_UNSET" } + }], + "startTimeUnixNano": "0", + "timeUnixNano": "0", + "asInt": "1" + }], + "aggregationTemporality": 2, + "isMonotonic": true + } + }] + }] + }] + }`, + }, + { + testName: "Sum and histogram", + cfg: ` + metrics_flush_interval = "1s" + histogram { + explicit { + buckets = ["5m", "10m", "30m"] + } + } + + output { + // no-op: will be overridden by test code. + } + `, + inputTraceJson: defaultInputTrace, + expectedOutputLogJson: `{ + "resourceMetrics": [{ + "resource": { + "attributes": [{ + "key": "service.name", + "value": { "stringValue": "TestSvcName" } + }, + { + "key": "res_attribute1", + "value": { "intValue": "11" } + }] + }, + "scopeMetrics": [{ + "scope": { + "name": "spanmetricsconnector" + }, + "metrics": [{ + "name": "calls", + "sum": { + "dataPoints": [{ + "attributes": [{ + "key": "service.name", + "value": { "stringValue": "TestSvcName" } + }, + { + "key": "span.name", + "value": { "stringValue": "TestSpan" } + }, + { + "key": "span.kind", + "value": { "stringValue": "SPAN_KIND_UNSPECIFIED" } + }, + { + "key": "status.code", + "value": { "stringValue": "STATUS_CODE_UNSET" } + }], + "startTimeUnixNano": "0", + "timeUnixNano": "0", + "asInt": "2" + }], + "aggregationTemporality": 2, + "isMonotonic": true + } + }, + { + "name": "duration", + "unit": "ms", + "histogram": { + "dataPoints": [ + { + "attributes": [ + { + "key": "service.name", + "value": { + "stringValue": "TestSvcName" + } + }, + { + "key": "span.name", + "value": { + "stringValue": "TestSpan" + } + }, + { + "key": "span.kind", + "value": { + "stringValue": "SPAN_KIND_UNSPECIFIED" + } + }, + { + "key": "status.code", + "value": { + "stringValue": "STATUS_CODE_UNSET" + } + } + ], + "count": "2", + "sum": 0, + "bucketCounts": [ "2", "0", "0", "0" ], + "explicitBounds": [ 300000, 600000, 1800000 ] + } + ], + "aggregationTemporality": 2 + } + }] + }] + }] + }`, + }, + } + + for _, tt := range tests { + t.Run(tt.testName, func(t *testing.T) { + var args spanmetrics.Arguments + require.NoError(t, river.Unmarshal([]byte(tt.cfg), &args)) + + testRunProcessor(t, tt.cfg, processortest.NewTraceToMetricSignal(tt.inputTraceJson, tt.expectedOutputLogJson)) + }) + } +} diff --git a/component/otelcol/processor/processortest/processortest.go b/component/otelcol/processor/processortest/processortest.go index 3153e662c656..0298f8e9250b 100644 --- a/component/otelcol/processor/processortest/processortest.go +++ b/component/otelcol/processor/processortest/processortest.go @@ -108,6 +108,105 @@ func (s traceToLogSignal) CheckOutput(t *testing.T) { } } +// +// Trace to Metrics +// + +type traceToMetricSignal struct { + metricCh chan pmetric.Metrics + inputTrace ptrace.Traces + expectedOuutputMetric pmetric.Metrics +} + +// Any timestamps inside expectedOutputJson should be set to 0. +func NewTraceToMetricSignal(inputJson string, expectedOutputJson string) Signal { + return &traceToMetricSignal{ + metricCh: make(chan pmetric.Metrics), + inputTrace: CreateTestTraces(inputJson), + expectedOuutputMetric: CreateTestMetrics(expectedOutputJson), + } +} + +func (s traceToMetricSignal) MakeOutput() *otelcol.ConsumerArguments { + return makeMetricsOutput(s.metricCh) +} + +func (s traceToMetricSignal) ConsumeInput(ctx context.Context, consumer otelcol.Consumer) error { + return consumer.ConsumeTraces(ctx, s.inputTrace) +} + +// Set the timestamp of all data points to 0. +// This helps avoid flaky tests due to timestamps. +func setMetricTimestampToZero(metrics pmetric.Metrics) { + // Loop over all resource metrics + for i := 0; i < metrics.ResourceMetrics().Len(); i++ { + rm := metrics.ResourceMetrics().At(i) + // Loop over all metric scopes. + for j := 0; j < rm.ScopeMetrics().Len(); j++ { + sm := rm.ScopeMetrics().At(j) + // Loop over all metrics. + for k := 0; k < sm.Metrics().Len(); k++ { + m := sm.Metrics().At(k) + switch m.Type() { + case pmetric.MetricTypeSum: + // Loop over all data points. + for l := 0; l < m.Sum().DataPoints().Len(); l++ { + // Set the timestamp to 0 to avoid flaky tests. + dp := m.Sum().DataPoints().At(l) + dp.SetTimestamp(0) + dp.SetStartTimestamp(0) + } + case pmetric.MetricTypeGauge: + // Loop over all data points. + for l := 0; l < m.Gauge().DataPoints().Len(); l++ { + // Set the timestamp to 0 to avoid flaky tests. + dp := m.Gauge().DataPoints().At(l) + dp.SetTimestamp(0) + dp.SetStartTimestamp(0) + } + case pmetric.MetricTypeHistogram: + // Loop over all data points. + for l := 0; l < m.Histogram().DataPoints().Len(); l++ { + // Set the timestamp to 0 to avoid flaky tests. + dp := m.Histogram().DataPoints().At(l) + dp.SetTimestamp(0) + dp.SetStartTimestamp(0) + } + case pmetric.MetricTypeSummary: + // Loop over all data points. + for l := 0; l < m.Summary().DataPoints().Len(); l++ { + // Set the timestamp to 0 to avoid flaky tests. + dp := m.Summary().DataPoints().At(l) + dp.SetTimestamp(0) + dp.SetStartTimestamp(0) + } + } + } + } + } +} + +// Wait for the component to finish and check its output. +func (s traceToMetricSignal) CheckOutput(t *testing.T) { + // Set the timeout to a few seconds so that all components have finished. + // Components such as otelcol.connector.spanmetrics may need a few + // seconds before they output metrics. + timeout := time.Second * 5 + + select { + case <-time.After(timeout): + require.FailNow(t, "failed waiting for metrics") + case tr := <-s.metricCh: + setMetricTimestampToZero(tr) + trStr := marshalMetrics(tr) + + expStr := marshalMetrics(s.expectedOuutputMetric) + // Set a field from the json to an empty string to avoid flaky tests containing timestamps. + + require.JSONEq(t, expStr, trStr) + } +} + // // Traces // diff --git a/component/prometheus/exporter/kafka/kafka.go b/component/prometheus/exporter/kafka/kafka.go index f146c40bae3c..e57bb69cd5a1 100644 --- a/component/prometheus/exporter/kafka/kafka.go +++ b/component/prometheus/exporter/kafka/kafka.go @@ -9,7 +9,8 @@ import ( "github.com/grafana/agent/component/prometheus/exporter" "github.com/grafana/agent/pkg/integrations" "github.com/grafana/agent/pkg/integrations/kafka_exporter" - config_util "github.com/prometheus/common/config" + "github.com/grafana/river/rivertypes" + "github.com/prometheus/common/config" ) var DefaultArguments = Arguments{ @@ -24,28 +25,28 @@ var DefaultArguments = Arguments{ } type Arguments struct { - Instance string `river:"instance,attr,optional"` - KafkaURIs []string `river:"kafka_uris,attr,optional"` - UseSASL bool `river:"use_sasl,attr,optional"` - UseSASLHandshake bool `river:"use_sasl_handshake,attr,optional"` - SASLUsername string `river:"sasl_username,attr,optional"` - SASLPassword config_util.Secret `river:"sasl_password,attr,optional"` - SASLMechanism string `river:"sasl_mechanism,attr,optional"` - UseTLS bool `river:"use_tls,attr,optional"` - CAFile string `river:"ca_file,attr,optional"` - CertFile string `river:"cert_file,attr,optional"` - KeyFile string `river:"key_file,attr,optional"` - InsecureSkipVerify bool `river:"insecure_skip_verify,attr,optional"` - KafkaVersion string `river:"kafka_version,attr,optional"` - UseZooKeeperLag bool `river:"use_zookeeper_lag,attr,optional"` - ZookeeperURIs []string `river:"zookeeper_uris,attr,optional"` - ClusterName string `river:"kafka_cluster_name,attr,optional"` - MetadataRefreshInterval string `river:"metadata_refresh_interval,attr,optional"` - AllowConcurrent bool `river:"allow_concurrency,attr,optional"` - MaxOffsets int `river:"max_offsets,attr,optional"` - PruneIntervalSeconds int `river:"prune_interval_seconds,attr,optional"` - TopicsFilter string `river:"topics_filter_regex,attr,optional"` - GroupFilter string `river:"groups_filter_regex,attr,optional"` + Instance string `river:"instance,attr,optional"` + KafkaURIs []string `river:"kafka_uris,attr,optional"` + UseSASL bool `river:"use_sasl,attr,optional"` + UseSASLHandshake bool `river:"use_sasl_handshake,attr,optional"` + SASLUsername string `river:"sasl_username,attr,optional"` + SASLPassword rivertypes.Secret `river:"sasl_password,attr,optional"` + SASLMechanism string `river:"sasl_mechanism,attr,optional"` + UseTLS bool `river:"use_tls,attr,optional"` + CAFile string `river:"ca_file,attr,optional"` + CertFile string `river:"cert_file,attr,optional"` + KeyFile string `river:"key_file,attr,optional"` + InsecureSkipVerify bool `river:"insecure_skip_verify,attr,optional"` + KafkaVersion string `river:"kafka_version,attr,optional"` + UseZooKeeperLag bool `river:"use_zookeeper_lag,attr,optional"` + ZookeeperURIs []string `river:"zookeeper_uris,attr,optional"` + ClusterName string `river:"kafka_cluster_name,attr,optional"` + MetadataRefreshInterval string `river:"metadata_refresh_interval,attr,optional"` + AllowConcurrent bool `river:"allow_concurrency,attr,optional"` + MaxOffsets int `river:"max_offsets,attr,optional"` + PruneIntervalSeconds int `river:"prune_interval_seconds,attr,optional"` + TopicsFilter string `river:"topics_filter_regex,attr,optional"` + GroupFilter string `river:"groups_filter_regex,attr,optional"` } func init() { @@ -89,11 +90,12 @@ func createExporter(opts component.Options, args component.Arguments, defaultIns func (a *Arguments) Convert() *kafka_exporter.Config { return &kafka_exporter.Config{ + Instance: a.Instance, KafkaURIs: a.KafkaURIs, UseSASL: a.UseSASL, UseSASLHandshake: a.UseSASLHandshake, SASLUsername: a.SASLUsername, - SASLPassword: a.SASLPassword, + SASLPassword: config.Secret(a.SASLPassword), SASLMechanism: a.SASLMechanism, UseTLS: a.UseTLS, CAFile: a.CAFile, diff --git a/component/prometheus/exporter/kafka/kafka_test.go b/component/prometheus/exporter/kafka/kafka_test.go index 26f321dc39fa..7529677dbef6 100644 --- a/component/prometheus/exporter/kafka/kafka_test.go +++ b/component/prometheus/exporter/kafka/kafka_test.go @@ -83,6 +83,7 @@ func TestRiverConvert(t *testing.T) { } converted := orig.Convert() expected := kafka_exporter.Config{ + Instance: "example", KafkaURIs: []string{"localhost:9092", "localhost:19092"}, KafkaVersion: "2.0.0", MetadataRefreshInterval: "1m", @@ -107,3 +108,15 @@ func TestCustomizeTarget(t *testing.T) { require.Equal(t, 1, len(newTargets)) require.Equal(t, "example", newTargets[0]["instance"]) } + +func TestSASLPassword(t *testing.T) { // #6044 + var exampleRiverConfig = ` + kafka_uris = ["broker1"] + use_sasl = true + sasl_password = "foobar" + ` + + var args Arguments + err := river.Unmarshal([]byte(exampleRiverConfig), &args) + require.NoError(t, err) +} diff --git a/component/prometheus/exporter/windows/config.go b/component/prometheus/exporter/windows/config.go index cc4cb20e4b17..44568833c1cd 100644 --- a/component/prometheus/exporter/windows/config.go +++ b/component/prometheus/exporter/windows/config.go @@ -19,6 +19,7 @@ type Arguments struct { MSMQ MSMQConfig `river:"msmq,block,optional"` MSSQL MSSQLConfig `river:"mssql,block,optional"` Network NetworkConfig `river:"network,block,optional"` + PhysicalDisk PhysicalDiskConfig `river:"physical_disk,block,optional"` Process ProcessConfig `river:"process,block,optional"` ScheduledTask ScheduledTaskConfig `river:"scheduled_task,block,optional"` Service ServiceConfig `river:"service,block,optional"` @@ -38,6 +39,7 @@ func (a *Arguments) Convert() *windows_integration.Config { MSSQL: a.MSSQL.Convert(), Network: a.Network.Convert(), Process: a.Process.Convert(), + PhysicalDisk: a.PhysicalDisk.Convert(), ScheduledTask: a.ScheduledTask.Convert(), Service: a.Service.Convert(), SMTP: a.SMTP.Convert(), @@ -230,3 +232,17 @@ func (t LogicalDiskConfig) Convert() windows_integration.LogicalDiskConfig { Exclude: t.Exclude, } } + +// PhysicalDiskConfig handles settings for the windows_exporter physical disk collector +type PhysicalDiskConfig struct { + Include string `river:"include,attr,optional"` + Exclude string `river:"exclude,attr,optional"` +} + +// Convert converts the component's PhysicalDiskConfig to the integration's PhysicalDiskConfig. +func (t PhysicalDiskConfig) Convert() windows_integration.PhysicalDiskConfig { + return windows_integration.PhysicalDiskConfig{ + Include: t.Include, + Exclude: t.Exclude, + } +} diff --git a/component/prometheus/exporter/windows/config_default_windows_test.go b/component/prometheus/exporter/windows/config_default_windows_test.go index 9fddd1d635eb..7242ac42e525 100644 --- a/component/prometheus/exporter/windows/config_default_windows_test.go +++ b/component/prometheus/exporter/windows/config_default_windows_test.go @@ -25,6 +25,8 @@ func TestRiverUnmarshalWithDefaultConfig(t *testing.T) { require.Equal(t, DefaultArguments.MSSQL.EnabledClasses, args.MSSQL.EnabledClasses) require.Equal(t, DefaultArguments.Network.Exclude, args.Network.Exclude) require.Equal(t, DefaultArguments.Network.Include, args.Network.Include) + require.Equal(t, DefaultArguments.PhysicalDisk.Exclude, args.PhysicalDisk.Exclude) + require.Equal(t, DefaultArguments.PhysicalDisk.Include, args.PhysicalDisk.Include) require.Equal(t, DefaultArguments.Process.Exclude, args.Process.Exclude) require.Equal(t, DefaultArguments.Process.Include, args.Process.Include) require.Equal(t, DefaultArguments.ScheduledTask.Exclude, args.ScheduledTask.Exclude) diff --git a/component/prometheus/exporter/windows/config_windows.go b/component/prometheus/exporter/windows/config_windows.go index b634788eda8c..42270f9e241e 100644 --- a/component/prometheus/exporter/windows/config_windows.go +++ b/component/prometheus/exporter/windows/config_windows.go @@ -1,9 +1,10 @@ package windows import ( + "strings" + windows_integration "github.com/grafana/agent/pkg/integrations/windows_exporter" col "github.com/prometheus-community/windows_exporter/pkg/collector" - "strings" ) // DefaultArguments holds non-zero default options for Arguments when it is @@ -44,6 +45,10 @@ var DefaultArguments = Arguments{ Include: col.ConfigDefaults.Net.NicInclude, Exclude: col.ConfigDefaults.Net.NicExclude, }, + PhysicalDisk: PhysicalDiskConfig{ + Exclude: col.ConfigDefaults.PhysicalDisk.DiskExclude, + Include: col.ConfigDefaults.PhysicalDisk.DiskInclude, + }, Process: ProcessConfig{ BlackList: col.ConfigDefaults.Process.ProcessExclude, WhiteList: col.ConfigDefaults.Process.ProcessInclude, diff --git a/component/prometheus/exporter/windows/windows_test.go b/component/prometheus/exporter/windows/windows_test.go index 8b34164f5d7c..6f15ad6e7555 100644 --- a/component/prometheus/exporter/windows/windows_test.go +++ b/component/prometheus/exporter/windows/windows_test.go @@ -34,6 +34,11 @@ var ( service { where_clause = "where" } + + physical_disk { + include = ".+" + exclude = "" + } process { include = ".+" @@ -75,6 +80,8 @@ func TestRiverUnmarshal(t *testing.T) { require.Equal(t, "", args.SMTP.Exclude) require.Equal(t, ".+", args.SMTP.Include) require.Equal(t, "where", args.Service.Where) + require.Equal(t, "", args.PhysicalDisk.Exclude) + require.Equal(t, ".+", args.PhysicalDisk.Include) require.Equal(t, "", args.Process.Exclude) require.Equal(t, ".+", args.Process.Include) require.Equal(t, "", args.Network.Exclude) @@ -102,6 +109,8 @@ func TestConvert(t *testing.T) { require.Equal(t, "", conf.SMTP.Exclude) require.Equal(t, ".+", conf.SMTP.Include) require.Equal(t, "where", conf.Service.Where) + require.Equal(t, "", conf.PhysicalDisk.Exclude) + require.Equal(t, ".+", conf.PhysicalDisk.Include) require.Equal(t, "", conf.Process.Exclude) require.Equal(t, ".+", conf.Process.Include) require.Equal(t, "", conf.Network.Exclude) diff --git a/component/prometheus/remotewrite/types.go b/component/prometheus/remotewrite/types.go index 473a2928e246..637059aba416 100644 --- a/component/prometheus/remotewrite/types.go +++ b/component/prometheus/remotewrite/types.go @@ -35,6 +35,7 @@ var ( MinBackoff: 30 * time.Millisecond, MaxBackoff: 5 * time.Second, RetryOnHTTP429: true, + SampleAgeLimit: 0, } DefaultMetadataOptions = MetadataOptions{ @@ -141,6 +142,7 @@ type QueueOptions struct { MinBackoff time.Duration `river:"min_backoff,attr,optional"` MaxBackoff time.Duration `river:"max_backoff,attr,optional"` RetryOnHTTP429 bool `river:"retry_on_http_429,attr,optional"` + SampleAgeLimit time.Duration `river:"sample_age_limit,attr,optional"` } // SetToDefault implements river.Defaulter. @@ -164,6 +166,7 @@ func (r *QueueOptions) toPrometheusType() config.QueueConfig { MinBackoff: model.Duration(r.MinBackoff), MaxBackoff: model.Duration(r.MaxBackoff), RetryOnRateLimit: r.RetryOnHTTP429, + SampleAgeLimit: model.Duration(r.SampleAgeLimit), } } diff --git a/component/pyroscope/write/write.go b/component/pyroscope/write/write.go index 4c20797a611c..23cdbe263e2f 100644 --- a/component/pyroscope/write/write.go +++ b/component/pyroscope/write/write.go @@ -6,7 +6,7 @@ import ( "strings" "time" - "github.com/bufbuild/connect-go" + "connectrpc.com/connect" "github.com/grafana/agent/component/pyroscope" "github.com/grafana/agent/internal/agentseed" "github.com/grafana/agent/internal/useragent" diff --git a/component/pyroscope/write/write_test.go b/component/pyroscope/write/write_test.go index ddb26862f54b..d0c91d50e3f6 100644 --- a/component/pyroscope/write/write_test.go +++ b/component/pyroscope/write/write_test.go @@ -9,7 +9,7 @@ import ( "testing" "time" - "github.com/bufbuild/connect-go" + "connectrpc.com/connect" "github.com/grafana/agent/component" "github.com/grafana/agent/component/pyroscope" "github.com/grafana/agent/pkg/util" diff --git a/component/remote/s3/watcher.go b/component/remote/s3/watcher.go index 45c4df2ba417..f591b2bb2cfe 100644 --- a/component/remote/s3/watcher.go +++ b/component/remote/s3/watcher.go @@ -1,7 +1,6 @@ package s3 import ( - "errors" "io" "sync" "time" @@ -100,10 +99,15 @@ func (w *watcher) getObject(ctx context.Context) ([]byte, error) { if err != nil { return []byte{}, err } + defer output.Body.Close() + buf := make([]byte, output.ContentLength) - _, err = output.Body.Read(buf) - if !errors.Is(err, io.EOF) { + + _, err = io.ReadFull(output.Body, buf) + + if err != nil { return []byte{}, err } + return buf, nil } diff --git a/converter/internal/prometheusconvert/component/ec2.go b/converter/internal/prometheusconvert/component/ec2.go index acd89755d165..5edf6ec0bac3 100644 --- a/converter/internal/prometheusconvert/component/ec2.go +++ b/converter/internal/prometheusconvert/component/ec2.go @@ -9,7 +9,6 @@ import ( "github.com/grafana/agent/converter/internal/common" "github.com/grafana/agent/converter/internal/prometheusconvert/build" "github.com/grafana/river/rivertypes" - prom_config "github.com/prometheus/common/config" prom_aws "github.com/prometheus/prometheus/discovery/aws" ) @@ -22,27 +21,7 @@ func appendDiscoveryEC2(pb *build.PrometheusBlocks, label string, sdConfig *prom } func ValidateDiscoveryEC2(sdConfig *prom_aws.EC2SDConfig) diag.Diagnostics { - var diags diag.Diagnostics - - var nilBasicAuth *prom_config.BasicAuth - var nilAuthorization *prom_config.Authorization - var nilOAuth2 *prom_config.OAuth2 - - diags.AddAll(common.ValidateSupported(common.NotEquals, sdConfig.HTTPClientConfig.BasicAuth, nilBasicAuth, "ec2_sd_configs basic_auth", "")) - diags.AddAll(common.ValidateSupported(common.NotEquals, sdConfig.HTTPClientConfig.Authorization, nilAuthorization, "ec2_sd_configs authorization", "")) - diags.AddAll(common.ValidateSupported(common.NotEquals, sdConfig.HTTPClientConfig.OAuth2, nilOAuth2, "ec2_sd_configs oauth2", "")) - diags.AddAll(common.ValidateSupported(common.NotDeepEquals, sdConfig.HTTPClientConfig.BearerToken, prom_config.DefaultHTTPClientConfig.BearerToken, "ec2_sd_configs bearer_token", "")) - diags.AddAll(common.ValidateSupported(common.NotDeepEquals, sdConfig.HTTPClientConfig.BearerTokenFile, prom_config.DefaultHTTPClientConfig.BearerTokenFile, "ec2_sd_configs bearer_token_file", "")) - diags.AddAll(common.ValidateSupported(common.NotDeepEquals, sdConfig.HTTPClientConfig.FollowRedirects, prom_config.DefaultHTTPClientConfig.FollowRedirects, "ec2_sd_configs follow_redirects", "")) - diags.AddAll(common.ValidateSupported(common.NotDeepEquals, sdConfig.HTTPClientConfig.EnableHTTP2, prom_config.DefaultHTTPClientConfig.EnableHTTP2, "ec2_sd_configs enable_http2", "")) - diags.AddAll(common.ValidateSupported(common.NotDeepEquals, sdConfig.HTTPClientConfig.ProxyConfig, prom_config.DefaultHTTPClientConfig.ProxyConfig, "ec2_sd_configs proxy", "")) - - // Do a last check in case any of the specific checks missed anything. - if len(diags) == 0 { - diags.AddAll(common.ValidateSupported(common.NotDeepEquals, sdConfig.HTTPClientConfig, prom_config.DefaultHTTPClientConfig, "ec2_sd_configs http_client_config", "")) - } - - return diags + return common.ValidateHttpClientConfig(&sdConfig.HTTPClientConfig) } func toDiscoveryEC2(sdConfig *prom_aws.EC2SDConfig) *aws.EC2Arguments { @@ -51,15 +30,16 @@ func toDiscoveryEC2(sdConfig *prom_aws.EC2SDConfig) *aws.EC2Arguments { } return &aws.EC2Arguments{ - Endpoint: sdConfig.Endpoint, - Region: sdConfig.Region, - AccessKey: sdConfig.AccessKey, - SecretKey: rivertypes.Secret(sdConfig.SecretKey), - Profile: sdConfig.Profile, - RoleARN: sdConfig.RoleARN, - RefreshInterval: time.Duration(sdConfig.RefreshInterval), - Port: sdConfig.Port, - Filters: toEC2Filters(sdConfig.Filters), + Endpoint: sdConfig.Endpoint, + Region: sdConfig.Region, + AccessKey: sdConfig.AccessKey, + SecretKey: rivertypes.Secret(sdConfig.SecretKey), + Profile: sdConfig.Profile, + RoleARN: sdConfig.RoleARN, + RefreshInterval: time.Duration(sdConfig.RefreshInterval), + Port: sdConfig.Port, + Filters: toEC2Filters(sdConfig.Filters), + HTTPClientConfig: *common.ToHttpClientConfig(&sdConfig.HTTPClientConfig), } } diff --git a/converter/internal/prometheusconvert/component/lightsail.go b/converter/internal/prometheusconvert/component/lightsail.go index 10480bd082ff..9a97c2f506b4 100644 --- a/converter/internal/prometheusconvert/component/lightsail.go +++ b/converter/internal/prometheusconvert/component/lightsail.go @@ -9,7 +9,6 @@ import ( "github.com/grafana/agent/converter/internal/common" "github.com/grafana/agent/converter/internal/prometheusconvert/build" "github.com/grafana/river/rivertypes" - prom_config "github.com/prometheus/common/config" prom_aws "github.com/prometheus/prometheus/discovery/aws" ) @@ -22,27 +21,7 @@ func appendDiscoveryLightsail(pb *build.PrometheusBlocks, label string, sdConfig } func ValidateDiscoveryLightsail(sdConfig *prom_aws.LightsailSDConfig) diag.Diagnostics { - var diags diag.Diagnostics - - var nilBasicAuth *prom_config.BasicAuth - var nilAuthorization *prom_config.Authorization - var nilOAuth2 *prom_config.OAuth2 - - diags.AddAll(common.ValidateSupported(common.NotEquals, sdConfig.HTTPClientConfig.BasicAuth, nilBasicAuth, "lightsail_sd_configs basic_auth", "")) - diags.AddAll(common.ValidateSupported(common.NotEquals, sdConfig.HTTPClientConfig.Authorization, nilAuthorization, "lightsail_sd_configs authorization", "")) - diags.AddAll(common.ValidateSupported(common.NotEquals, sdConfig.HTTPClientConfig.OAuth2, nilOAuth2, "lightsail_sd_configs oauth2", "")) - diags.AddAll(common.ValidateSupported(common.NotDeepEquals, sdConfig.HTTPClientConfig.BearerToken, prom_config.DefaultHTTPClientConfig.BearerToken, "lightsail_sd_configs bearer_token", "")) - diags.AddAll(common.ValidateSupported(common.NotDeepEquals, sdConfig.HTTPClientConfig.BearerTokenFile, prom_config.DefaultHTTPClientConfig.BearerTokenFile, "lightsail_sd_configs bearer_token_file", "")) - diags.AddAll(common.ValidateSupported(common.NotDeepEquals, sdConfig.HTTPClientConfig.FollowRedirects, prom_config.DefaultHTTPClientConfig.FollowRedirects, "lightsail_sd_configs follow_redirects", "")) - diags.AddAll(common.ValidateSupported(common.NotDeepEquals, sdConfig.HTTPClientConfig.EnableHTTP2, prom_config.DefaultHTTPClientConfig.EnableHTTP2, "lightsail_sd_configs enable_http2", "")) - diags.AddAll(common.ValidateSupported(common.NotDeepEquals, sdConfig.HTTPClientConfig.ProxyConfig, prom_config.DefaultHTTPClientConfig.ProxyConfig, "lightsail_sd_configs proxy", "")) - - // Do a last check in case any of the specific checks missed anything. - if len(diags) == 0 { - diags.AddAll(common.ValidateSupported(common.NotDeepEquals, sdConfig.HTTPClientConfig, prom_config.DefaultHTTPClientConfig, "lightsail_sd_configs http_client_config", "")) - } - - return diags + return common.ValidateHttpClientConfig(&sdConfig.HTTPClientConfig) } func toDiscoveryLightsail(sdConfig *prom_aws.LightsailSDConfig) *aws.LightsailArguments { @@ -51,13 +30,14 @@ func toDiscoveryLightsail(sdConfig *prom_aws.LightsailSDConfig) *aws.LightsailAr } return &aws.LightsailArguments{ - Endpoint: sdConfig.Endpoint, - Region: sdConfig.Region, - AccessKey: sdConfig.AccessKey, - SecretKey: rivertypes.Secret(sdConfig.SecretKey), - Profile: sdConfig.Profile, - RoleARN: sdConfig.RoleARN, - RefreshInterval: time.Duration(sdConfig.RefreshInterval), - Port: sdConfig.Port, + Endpoint: sdConfig.Endpoint, + Region: sdConfig.Region, + AccessKey: sdConfig.AccessKey, + SecretKey: rivertypes.Secret(sdConfig.SecretKey), + Profile: sdConfig.Profile, + RoleARN: sdConfig.RoleARN, + RefreshInterval: time.Duration(sdConfig.RefreshInterval), + Port: sdConfig.Port, + HTTPClientConfig: *common.ToHttpClientConfig(&sdConfig.HTTPClientConfig), } } diff --git a/converter/internal/prometheusconvert/component/ovhcloud.go b/converter/internal/prometheusconvert/component/ovhcloud.go new file mode 100644 index 000000000000..f4a59fd525cf --- /dev/null +++ b/converter/internal/prometheusconvert/component/ovhcloud.go @@ -0,0 +1,40 @@ +package component + +import ( + "time" + + "github.com/grafana/agent/component/discovery" + "github.com/grafana/agent/component/discovery/ovhcloud" + "github.com/grafana/agent/converter/diag" + "github.com/grafana/agent/converter/internal/common" + "github.com/grafana/agent/converter/internal/prometheusconvert/build" + "github.com/grafana/river/rivertypes" + prom_discovery "github.com/prometheus/prometheus/discovery/ovhcloud" +) + +func appendDiscoveryOvhcloud(pb *build.PrometheusBlocks, label string, sdConfig *prom_discovery.SDConfig) discovery.Exports { + discoveryOvhcloudArgs := toDiscoveryOvhcloud(sdConfig) + name := []string{"discovery", "ovhcloud"} + block := common.NewBlockWithOverride(name, label, discoveryOvhcloudArgs) + pb.DiscoveryBlocks = append(pb.DiscoveryBlocks, build.NewPrometheusBlock(block, name, label, "", "")) + return common.NewDiscoveryExports("discovery.ovhcloud." + label + ".targets") +} + +func ValidateDiscoveryOvhcloud(sdConfig *prom_discovery.SDConfig) diag.Diagnostics { + return nil +} + +func toDiscoveryOvhcloud(sdConfig *prom_discovery.SDConfig) *ovhcloud.Arguments { + if sdConfig == nil { + return nil + } + + return &ovhcloud.Arguments{ + Endpoint: sdConfig.Endpoint, + ApplicationKey: sdConfig.ApplicationKey, + ApplicationSecret: rivertypes.Secret(sdConfig.ApplicationSecret), + ConsumerKey: rivertypes.Secret(sdConfig.ConsumerKey), + RefreshInterval: time.Duration(sdConfig.RefreshInterval), + Service: sdConfig.Service, + } +} diff --git a/converter/internal/prometheusconvert/component/remote_write.go b/converter/internal/prometheusconvert/component/remote_write.go index 4756f84d6674..37c4c6814a04 100644 --- a/converter/internal/prometheusconvert/component/remote_write.go +++ b/converter/internal/prometheusconvert/component/remote_write.go @@ -96,6 +96,7 @@ func toQueueOptions(queueConfig *prom_config.QueueConfig) *remotewrite.QueueOpti MinBackoff: time.Duration(queueConfig.MinBackoff), MaxBackoff: time.Duration(queueConfig.MaxBackoff), RetryOnHTTP429: queueConfig.RetryOnRateLimit, + SampleAgeLimit: time.Duration(queueConfig.SampleAgeLimit), } } diff --git a/converter/internal/prometheusconvert/component/scrape.go b/converter/internal/prometheusconvert/component/scrape.go index 651e4e2c5bc1..a2005cf85fbb 100644 --- a/converter/internal/prometheusconvert/component/scrape.go +++ b/converter/internal/prometheusconvert/component/scrape.go @@ -30,7 +30,12 @@ func AppendPrometheusScrape(pb *build.PrometheusBlocks, scrapeConfig *prom_confi func ValidatePrometheusScrape(scrapeConfig *prom_config.ScrapeConfig) diag.Diagnostics { var diags diag.Diagnostics + // https://github.com/grafana/agent/pull/5972#discussion_r1441980155 + diags.AddAll(common.ValidateSupported(common.NotEquals, scrapeConfig.TrackTimestampsStaleness, false, "scrape_configs track_timestamps_staleness", "")) + // https://github.com/prometheus/prometheus/commit/40240c9c1cb290fe95f1e61886b23fab860aeacd diags.AddAll(common.ValidateSupported(common.NotEquals, scrapeConfig.NativeHistogramBucketLimit, uint(0), "scrape_configs native_histogram_bucket_limit", "")) + // https://github.com/prometheus/prometheus/pull/12647 + diags.AddAll(common.ValidateSupported(common.NotEquals, scrapeConfig.KeepDroppedTargets, uint(0), "scrape_configs keep_dropped_targets", "")) diags.AddAll(common.ValidateHttpClientConfig(&scrapeConfig.HTTPClientConfig)) return diags diff --git a/converter/internal/prometheusconvert/component/service_discovery.go b/converter/internal/prometheusconvert/component/service_discovery.go index 52780475e63a..69c179f1ef9a 100644 --- a/converter/internal/prometheusconvert/component/service_discovery.go +++ b/converter/internal/prometheusconvert/component/service_discovery.go @@ -25,6 +25,7 @@ import ( prom_marathon "github.com/prometheus/prometheus/discovery/marathon" prom_docker "github.com/prometheus/prometheus/discovery/moby" prom_openstack "github.com/prometheus/prometheus/discovery/openstack" + prom_ovhcloud "github.com/prometheus/prometheus/discovery/ovhcloud" prom_scaleway "github.com/prometheus/prometheus/discovery/scaleway" prom_triton "github.com/prometheus/prometheus/discovery/triton" prom_xds "github.com/prometheus/prometheus/discovery/xds" @@ -100,6 +101,9 @@ func AppendServiceDiscoveryConfig(pb *build.PrometheusBlocks, serviceDiscoveryCo case *prom_docker.DockerSwarmSDConfig: labelCounts["dockerswarm"]++ return appendDiscoveryDockerswarm(pb, common.LabelWithIndex(labelCounts["dockerswarm"]-1, label), sdc) + case *prom_ovhcloud.SDConfig: + labelCounts["ovhcloud"]++ + return appendDiscoveryOvhcloud(pb, common.LabelWithIndex(labelCounts["ovhcloud"]-1, label), sdc) default: return discovery.Exports{} } @@ -151,6 +155,8 @@ func ValidateServiceDiscoveryConfig(serviceDiscoveryConfig prom_discover.Config) return ValidateDiscoveryOpenstack(sdc) case *prom_docker.DockerSwarmSDConfig: return ValidateDiscoveryDockerswarm(sdc) + case *prom_ovhcloud.SDConfig: + return ValidateDiscoveryOvhcloud(sdc) default: var diags diag.Diagnostics diags.Add(diag.SeverityLevelError, fmt.Sprintf("The converter does not support converting the provided %s service discovery.", serviceDiscoveryConfig.Name())) diff --git a/converter/internal/prometheusconvert/testdata/azure.river b/converter/internal/prometheusconvert/testdata/azure.river index 368673474b22..e1bc751bf05d 100644 --- a/converter/internal/prometheusconvert/testdata/azure.river +++ b/converter/internal/prometheusconvert/testdata/azure.river @@ -10,8 +10,6 @@ discovery.azure "prometheus1" { managed_identity { client_id = "client" } - follow_redirects = true - enable_http2 = true } discovery.azure "prometheus2" { @@ -26,8 +24,8 @@ discovery.azure "prometheus2" { managed_identity { client_id = "client" } - proxy_url = "proxy" - enable_http2 = true + proxy_url = "proxy" + follow_redirects = false } prometheus.scrape "prometheus1" { diff --git a/converter/internal/prometheusconvert/testdata/consul.river b/converter/internal/prometheusconvert/testdata/consul.river index b3d7879ed47e..ccf9e8c189c3 100644 --- a/converter/internal/prometheusconvert/testdata/consul.river +++ b/converter/internal/prometheusconvert/testdata/consul.river @@ -1,13 +1,9 @@ discovery.consul "prometheus1" { - services = ["myapp"] - follow_redirects = true - enable_http2 = true + services = ["myapp"] } discovery.consul "prometheus2" { - services = ["otherapp"] - follow_redirects = true - enable_http2 = true + services = ["otherapp"] } prometheus.scrape "prometheus1" { diff --git a/converter/internal/prometheusconvert/testdata/digitalocean.river b/converter/internal/prometheusconvert/testdata/digitalocean.river index 4e39bf9be2c6..27b0629afc15 100644 --- a/converter/internal/prometheusconvert/testdata/digitalocean.river +++ b/converter/internal/prometheusconvert/testdata/digitalocean.river @@ -1,12 +1,6 @@ -discovery.digitalocean "prometheus1" { - follow_redirects = true - enable_http2 = true -} +discovery.digitalocean "prometheus1" { } -discovery.digitalocean "prometheus2" { - follow_redirects = true - enable_http2 = true -} +discovery.digitalocean "prometheus2" { } prometheus.scrape "prometheus1" { targets = concat( diff --git a/converter/internal/prometheusconvert/testdata/discovery.river b/converter/internal/prometheusconvert/testdata/discovery.river index 4ff3cc509ce3..f2c59fa7a4f6 100644 --- a/converter/internal/prometheusconvert/testdata/discovery.river +++ b/converter/internal/prometheusconvert/testdata/discovery.river @@ -10,8 +10,6 @@ discovery.azure "prometheus1" { managed_identity { client_id = "client1" } - follow_redirects = true - enable_http2 = true } discovery.azure "prometheus1_2" { @@ -26,8 +24,6 @@ discovery.azure "prometheus1_2" { managed_identity { client_id = "client2" } - follow_redirects = true - enable_http2 = true } discovery.relabel "prometheus1" { diff --git a/converter/internal/prometheusconvert/testdata/discovery_relabel.river b/converter/internal/prometheusconvert/testdata/discovery_relabel.river index 8d37e5c3a9ad..4b1009886810 100644 --- a/converter/internal/prometheusconvert/testdata/discovery_relabel.river +++ b/converter/internal/prometheusconvert/testdata/discovery_relabel.river @@ -10,8 +10,6 @@ discovery.azure "prometheus2" { managed_identity { client_id = "client" } - follow_redirects = true - enable_http2 = true } discovery.relabel "prometheus1" { diff --git a/converter/internal/prometheusconvert/testdata/ec2.diags b/converter/internal/prometheusconvert/testdata/ec2.diags deleted file mode 100644 index 3301a9ad2213..000000000000 --- a/converter/internal/prometheusconvert/testdata/ec2.diags +++ /dev/null @@ -1 +0,0 @@ -(Error) The converter does not support converting the provided ec2_sd_configs bearer_token_file config. \ No newline at end of file diff --git a/converter/internal/prometheusconvert/testdata/ec2.river b/converter/internal/prometheusconvert/testdata/ec2.river index 22775efe8ed2..d07d133a659b 100644 --- a/converter/internal/prometheusconvert/testdata/ec2.river +++ b/converter/internal/prometheusconvert/testdata/ec2.river @@ -3,6 +3,11 @@ discovery.ec2 "prometheus1" { access_key = "YOUR_ACCESS_KEY" secret_key = "YOUR_SECRET_KEY" port = 8080 + + authorization { + type = "Bearer" + credentials_file = "/tmp/token.file" + } } discovery.ec2 "prometheus2" { diff --git a/converter/internal/prometheusconvert/testdata/lightsail.diags b/converter/internal/prometheusconvert/testdata/lightsail.diags deleted file mode 100644 index 0a96d20e3985..000000000000 --- a/converter/internal/prometheusconvert/testdata/lightsail.diags +++ /dev/null @@ -1 +0,0 @@ -(Error) The converter does not support converting the provided lightsail_sd_configs bearer_token_file config. \ No newline at end of file diff --git a/converter/internal/prometheusconvert/testdata/lightsail.river b/converter/internal/prometheusconvert/testdata/lightsail.river index 754d9c5d39ea..4e1966490532 100644 --- a/converter/internal/prometheusconvert/testdata/lightsail.river +++ b/converter/internal/prometheusconvert/testdata/lightsail.river @@ -3,6 +3,11 @@ discovery.lightsail "prometheus1" { access_key = "YOUR_ACCESS_KEY" secret_key = "YOUR_SECRET_KEY" port = 8080 + + authorization { + type = "Bearer" + credentials_file = "/tmp/token.file" + } } discovery.lightsail "prometheus2" { diff --git a/converter/internal/prometheusconvert/testdata/ovhcloud.river b/converter/internal/prometheusconvert/testdata/ovhcloud.river new file mode 100644 index 000000000000..dff1e85bcee3 --- /dev/null +++ b/converter/internal/prometheusconvert/testdata/ovhcloud.river @@ -0,0 +1,43 @@ +discovery.ovhcloud "prometheus1" { + application_key = "app_key" + application_secret = "app_secret" + consumer_key = "cons_key" + service = "vps" +} + +discovery.ovhcloud "prometheus2" { + endpoint = "ovh-us" + application_key = "app_key_2" + application_secret = "app_secret_2" + consumer_key = "cons_key_2" + refresh_interval = "14m0s" + service = "dedicated_server" +} + +prometheus.scrape "prometheus1" { + targets = concat( + discovery.ovhcloud.prometheus1.targets, + [{ + __address__ = "localhost:9090", + }], + ) + forward_to = [prometheus.remote_write.default.receiver] + job_name = "prometheus1" +} + +prometheus.scrape "prometheus2" { + targets = discovery.ovhcloud.prometheus2.targets + forward_to = [prometheus.remote_write.default.receiver] + job_name = "prometheus2" +} + +prometheus.remote_write "default" { + endpoint { + name = "remote1" + url = "http://remote-write-url1" + + queue_config { } + + metadata_config { } + } +} diff --git a/converter/internal/prometheusconvert/testdata/ovhcloud.yaml b/converter/internal/prometheusconvert/testdata/ovhcloud.yaml new file mode 100644 index 000000000000..2201686989fc --- /dev/null +++ b/converter/internal/prometheusconvert/testdata/ovhcloud.yaml @@ -0,0 +1,21 @@ +scrape_configs: + - job_name: "prometheus1" + static_configs: + - targets: ["localhost:9090"] + ovhcloud_sd_configs: + - application_key: "app_key" + application_secret: "app_secret" + consumer_key: "cons_key" + service: "vps" + - job_name: "prometheus2" + ovhcloud_sd_configs: + - application_key: "app_key_2" + application_secret: "app_secret_2" + consumer_key: "cons_key_2" + service: "dedicated_server" + endpoint: "ovh-us" + refresh_interval: "14m" + +remote_write: + - name: "remote1" + url: "http://remote-write-url1" \ No newline at end of file diff --git a/converter/internal/prometheusconvert/testdata/scrape.diags b/converter/internal/prometheusconvert/testdata/scrape.diags new file mode 100644 index 000000000000..de85de6536cf --- /dev/null +++ b/converter/internal/prometheusconvert/testdata/scrape.diags @@ -0,0 +1 @@ +(Error) The converter does not support converting the provided scrape_configs track_timestamps_staleness config. \ No newline at end of file diff --git a/converter/internal/prometheusconvert/testdata/scrape.yaml b/converter/internal/prometheusconvert/testdata/scrape.yaml index d4b1e7e203c7..54496f296005 100644 --- a/converter/internal/prometheusconvert/testdata/scrape.yaml +++ b/converter/internal/prometheusconvert/testdata/scrape.yaml @@ -6,6 +6,7 @@ global: scrape_configs: - job_name: "prometheus-1" honor_timestamps: false + track_timestamps_staleness: true scrape_interval: 10s scrape_timeout: 5s static_configs: @@ -16,6 +17,7 @@ scrape_configs: username: 'user' password: 'pass' - job_name: "prometheus2" + track_timestamps_staleness: false static_configs: - targets: ["localhost:9091"] - targets: ["localhost:9092"] diff --git a/converter/internal/prometheusconvert/testdata/unsupported.diags b/converter/internal/prometheusconvert/testdata/unsupported.diags index ccdf9bd3da88..966bd0d1e5bf 100644 --- a/converter/internal/prometheusconvert/testdata/unsupported.diags +++ b/converter/internal/prometheusconvert/testdata/unsupported.diags @@ -5,6 +5,7 @@ (Error) The converter does not support converting the provided HTTP Client no_proxy config. (Error) The converter does not support converting the provided nomad service discovery. (Error) The converter does not support converting the provided scrape_configs native_histogram_bucket_limit config. +(Error) The converter does not support converting the provided scrape_configs keep_dropped_targets config. (Error) The converter does not support converting the provided storage config. (Error) The converter does not support converting the provided tracing config. (Error) The converter does not support converting the provided HTTP Client proxy_from_environment config. diff --git a/converter/internal/prometheusconvert/testdata/unsupported.yaml b/converter/internal/prometheusconvert/testdata/unsupported.yaml index bf677c030a39..5d174c36cb8e 100644 --- a/converter/internal/prometheusconvert/testdata/unsupported.yaml +++ b/converter/internal/prometheusconvert/testdata/unsupported.yaml @@ -44,6 +44,7 @@ scrape_configs: - targets: ["localhost:9091"] scrape_classic_histograms: true native_histogram_bucket_limit: 2 + keep_dropped_targets: 1000 remote_write: - name: "remote1" diff --git a/converter/internal/promtailconvert/testdata/azure.river b/converter/internal/promtailconvert/testdata/azure.river index bfbe087b6de4..90a652e05dab 100644 --- a/converter/internal/promtailconvert/testdata/azure.river +++ b/converter/internal/promtailconvert/testdata/azure.river @@ -10,8 +10,6 @@ discovery.azure "fun" { managed_identity { client_id = "client" } - follow_redirects = true - enable_http2 = true } local.file_match "fun" { diff --git a/converter/internal/promtailconvert/testdata/consul.river b/converter/internal/promtailconvert/testdata/consul.river index 20b07e0900b3..72563a502d95 100644 --- a/converter/internal/promtailconvert/testdata/consul.river +++ b/converter/internal/promtailconvert/testdata/consul.river @@ -17,8 +17,6 @@ discovery.consul "fun" { username = "toby" password = "this_password_is_safe_innit?" } - follow_redirects = true - enable_http2 = true } discovery.relabel "fun" { diff --git a/converter/internal/promtailconvert/testdata/digitalocean.river b/converter/internal/promtailconvert/testdata/digitalocean.river index 7308cfa33489..fb71e471c56f 100644 --- a/converter/internal/promtailconvert/testdata/digitalocean.river +++ b/converter/internal/promtailconvert/testdata/digitalocean.river @@ -1,8 +1,6 @@ discovery.digitalocean "fun" { refresh_interval = "10m0s" port = 1234 - follow_redirects = true - enable_http2 = true } local.file_match "fun" { diff --git a/converter/internal/staticconvert/internal/build/builder.go b/converter/internal/staticconvert/internal/build/builder.go index 0f92fa327363..58fedf6225c2 100644 --- a/converter/internal/staticconvert/internal/build/builder.go +++ b/converter/internal/staticconvert/internal/build/builder.go @@ -42,7 +42,8 @@ import ( app_agent_receiver_v2 "github.com/grafana/agent/pkg/integrations/v2/app_agent_receiver" blackbox_exporter_v2 "github.com/grafana/agent/pkg/integrations/v2/blackbox_exporter" common_v2 "github.com/grafana/agent/pkg/integrations/v2/common" - "github.com/grafana/agent/pkg/integrations/v2/metricsutils" + eventhandler_v2 "github.com/grafana/agent/pkg/integrations/v2/eventhandler" + metricsutils_v2 "github.com/grafana/agent/pkg/integrations/v2/metricsutils" snmp_exporter_v2 "github.com/grafana/agent/pkg/integrations/v2/snmp_exporter" vmware_exporter_v2 "github.com/grafana/agent/pkg/integrations/v2/vmware_exporter" "github.com/grafana/agent/pkg/integrations/windows_exporter" @@ -229,13 +230,15 @@ func (b *IntegrationsConfigBuilder) appendV2Integrations() { case *blackbox_exporter_v2.Config: exports = b.appendBlackboxExporterV2(itg) commonConfig = itg.Common + case *eventhandler_v2.Config: + b.appendEventHandlerV2(itg) case *snmp_exporter_v2.Config: exports = b.appendSnmpExporterV2(itg) commonConfig = itg.Common case *vmware_exporter_v2.Config: exports = b.appendVmwareExporterV2(itg) commonConfig = itg.Common - case *metricsutils.ConfigShim: + case *metricsutils_v2.ConfigShim: commonConfig = itg.Common switch v1_itg := itg.Orig.(type) { case *azure_exporter.Config: diff --git a/converter/internal/staticconvert/internal/build/eventhandler.go b/converter/internal/staticconvert/internal/build/eventhandler.go new file mode 100644 index 000000000000..bf816d6d451a --- /dev/null +++ b/converter/internal/staticconvert/internal/build/eventhandler.go @@ -0,0 +1,98 @@ +package build + +import ( + "fmt" + + "github.com/grafana/agent/component/common/loki" + flow_relabel "github.com/grafana/agent/component/common/relabel" + "github.com/grafana/agent/component/loki/relabel" + "github.com/grafana/agent/component/loki/source/kubernetes_events" + "github.com/grafana/agent/converter/diag" + "github.com/grafana/agent/converter/internal/common" + eventhandler_v2 "github.com/grafana/agent/pkg/integrations/v2/eventhandler" + "github.com/grafana/river/scanner" +) + +func (b *IntegrationsConfigBuilder) appendEventHandlerV2(config *eventhandler_v2.Config) { + compLabel, err := scanner.SanitizeIdentifier(b.formatJobName(config.Name(), nil)) + if err != nil { + b.diags.Add(diag.SeverityLevelCritical, fmt.Sprintf("failed to sanitize job name: %s", err)) + } + + b.diags.AddAll(common.ValidateSupported(common.NotDeepEquals, config.SendTimeout, eventhandler_v2.DefaultConfig.SendTimeout, "eventhandler send_timeout", "this field is not configurable in flow mode")) + b.diags.AddAll(common.ValidateSupported(common.NotDeepEquals, config.CachePath, eventhandler_v2.DefaultConfig.CachePath, "eventhandler cache_path", "this field is not configurable in flow mode")) + b.diags.AddAll(common.ValidateSupported(common.NotDeepEquals, config.InformerResync, eventhandler_v2.DefaultConfig.InformerResync, "eventhandler informer_resync", "this field is not configurable in flow mode")) + b.diags.AddAll(common.ValidateSupported(common.NotDeepEquals, config.FlushInterval, eventhandler_v2.DefaultConfig.FlushInterval, "eventhandler flush_interval", "this field is not configurable in flow mode")) + + receiver := getLogsReceiver(config) + if len(config.ExtraLabels) > 0 { + receiver = b.injectExtraLabels(config, receiver, compLabel) + } + + args := toEventHandlerV2(config, receiver) + + b.f.Body().AppendBlock(common.NewBlockWithOverride( + []string{"loki", "source", "kubernetes_events"}, + compLabel, + args, + )) +} + +func (b *IntegrationsConfigBuilder) injectExtraLabels(config *eventhandler_v2.Config, receiver common.ConvertLogsReceiver, compLabel string) common.ConvertLogsReceiver { + var relabelConfigs []*flow_relabel.Config + for _, extraLabel := range config.ExtraLabels { + defaultConfig := flow_relabel.DefaultRelabelConfig + relabelConfig := &defaultConfig + relabelConfig.SourceLabels = []string{"__address__"} + relabelConfig.TargetLabel = extraLabel.Name + relabelConfig.Replacement = extraLabel.Value + + relabelConfigs = append(relabelConfigs, relabelConfig) + } + + relabelArgs := relabel.Arguments{ + ForwardTo: []loki.LogsReceiver{receiver}, + RelabelConfigs: relabelConfigs, + MaxCacheSize: relabel.DefaultArguments.MaxCacheSize, + } + + b.f.Body().AppendBlock(common.NewBlockWithOverride( + []string{"loki", "relabel"}, + compLabel, + relabelArgs, + )) + + return common.ConvertLogsReceiver{ + Expr: fmt.Sprintf("loki.relabel.%s.receiver", compLabel), + } +} + +func getLogsReceiver(config *eventhandler_v2.Config) common.ConvertLogsReceiver { + logsReceiver := common.ConvertLogsReceiver{} + if config.LogsInstance != "" { + compLabel, err := scanner.SanitizeIdentifier("logs_" + config.LogsInstance) + if err != nil { + panic(fmt.Errorf("failed to sanitize job name: %s", err)) + } + + logsReceiver.Expr = fmt.Sprintf("loki.write.%s.receiver", compLabel) + } + + return logsReceiver +} + +func toEventHandlerV2(config *eventhandler_v2.Config, receiver common.ConvertLogsReceiver) *kubernetes_events.Arguments { + defaultOverrides := kubernetes_events.DefaultArguments + defaultOverrides.Client.KubeConfig = config.KubeconfigPath + if config.Namespace != "" { + defaultOverrides.Namespaces = []string{config.Namespace} + } + + return &kubernetes_events.Arguments{ + ForwardTo: []loki.LogsReceiver{receiver}, + JobName: kubernetes_events.DefaultArguments.JobName, + Namespaces: defaultOverrides.Namespaces, + LogFormat: config.LogFormat, + Client: defaultOverrides.Client, + } +} diff --git a/converter/internal/staticconvert/internal/build/kafka_exporter.go b/converter/internal/staticconvert/internal/build/kafka_exporter.go index 25310e35a5f4..16be4275ddce 100644 --- a/converter/internal/staticconvert/internal/build/kafka_exporter.go +++ b/converter/internal/staticconvert/internal/build/kafka_exporter.go @@ -4,6 +4,7 @@ import ( "github.com/grafana/agent/component/discovery" "github.com/grafana/agent/component/prometheus/exporter/kafka" "github.com/grafana/agent/pkg/integrations/kafka_exporter" + "github.com/grafana/river/rivertypes" ) func (b *IntegrationsConfigBuilder) appendKafkaExporter(config *kafka_exporter.Config, instanceKey *string) discovery.Exports { @@ -17,7 +18,7 @@ func toKafkaExporter(config *kafka_exporter.Config) *kafka.Arguments { UseSASL: config.UseSASL, UseSASLHandshake: config.UseSASLHandshake, SASLUsername: config.SASLUsername, - SASLPassword: config.SASLPassword, + SASLPassword: rivertypes.Secret(config.SASLPassword), SASLMechanism: config.SASLMechanism, UseTLS: config.UseTLS, CAFile: config.CAFile, diff --git a/converter/internal/staticconvert/internal/build/windows_exporter.go b/converter/internal/staticconvert/internal/build/windows_exporter.go index 73aa706e8235..2f0b110a68f0 100644 --- a/converter/internal/staticconvert/internal/build/windows_exporter.go +++ b/converter/internal/staticconvert/internal/build/windows_exporter.go @@ -50,6 +50,10 @@ func toWindowsExporter(config *windows_exporter.Config) *windows.Arguments { Exclude: config.Network.Exclude, Include: config.Network.Include, }, + PhysicalDisk: windows.PhysicalDiskConfig{ + Exclude: config.PhysicalDisk.Exclude, + Include: config.PhysicalDisk.Include, + }, Process: windows.ProcessConfig{ BlackList: config.Process.BlackList, WhiteList: config.Process.WhiteList, diff --git a/converter/internal/staticconvert/testdata-v2/integrations_v2.river b/converter/internal/staticconvert/testdata-v2/integrations_v2.river index 5908cd4f0f5b..919af1b47286 100644 --- a/converter/internal/staticconvert/testdata-v2/integrations_v2.river +++ b/converter/internal/staticconvert/testdata-v2/integrations_v2.river @@ -1,6 +1,6 @@ prometheus.remote_write "metrics_default" { endpoint { - name = "default-8be96f" + name = "default-149bbd" url = "http://localhost:9009/api/prom/push" queue_config { } @@ -21,6 +21,26 @@ logging { format = "json" } +loki.relabel "integrations_eventhandler" { + forward_to = [loki.write.logs_log_config.receiver] + + rule { + source_labels = ["__address__"] + target_label = "test_label" + replacement = "test_label_value" + } + + rule { + source_labels = ["__address__"] + target_label = "test_label_2" + replacement = "test_label_value_2" + } +} + +loki.source.kubernetes_events "integrations_eventhandler" { + forward_to = [loki.relabel.integrations_eventhandler.receiver] +} + prometheus.exporter.azure "integrations_azure1" { subscriptions = ["subId"] resource_type = "Microsoft.Dashboard/grafana" diff --git a/converter/internal/staticconvert/testdata-v2/integrations_v2.yaml b/converter/internal/staticconvert/testdata-v2/integrations_v2.yaml index a335c87de163..cd0c497d15cc 100644 --- a/converter/internal/staticconvert/testdata-v2/integrations_v2.yaml +++ b/converter/internal/staticconvert/testdata-v2/integrations_v2.yaml @@ -117,6 +117,12 @@ integrations: elasticsearch_configs: - autoscrape: metrics_instance: "default" + eventhandler: + cache_path: "./.eventcache/eventhandler.cache" + logs_instance: "log_config" + extra_labels: + test_label: test_label_value + test_label_2: test_label_value_2 gcp_configs: - project_ids: - diff --git a/converter/internal/staticconvert/testdata-v2/unsupported.diags b/converter/internal/staticconvert/testdata-v2/unsupported.diags index 6337d505766f..cf356c13c1da 100644 --- a/converter/internal/staticconvert/testdata-v2/unsupported.diags +++ b/converter/internal/staticconvert/testdata-v2/unsupported.diags @@ -1,3 +1,6 @@ +(Error) The converter does not support converting the provided eventhandler send_timeout config: this field is not configurable in flow mode +(Error) The converter does not support converting the provided eventhandler cache_path config: this field is not configurable in flow mode +(Error) The converter does not support converting the provided eventhandler informer_resync config: this field is not configurable in flow mode +(Error) The converter does not support converting the provided eventhandler flush_interval config: this field is not configurable in flow mode (Warning) Please review your agent command line flags and ensure they are set in your Flow mode config file where necessary. -(Error) The converter does not support converting the provided eventhandler integration. (Error) The converter does not support converting the provided app_agent_receiver traces_instance config. \ No newline at end of file diff --git a/converter/internal/staticconvert/testdata-v2/unsupported.river b/converter/internal/staticconvert/testdata-v2/unsupported.river index 4b78abf71b43..c9585a88c5dc 100644 --- a/converter/internal/staticconvert/testdata-v2/unsupported.river +++ b/converter/internal/staticconvert/testdata-v2/unsupported.river @@ -1,6 +1,6 @@ prometheus.remote_write "metrics_default" { endpoint { - name = "default-8be96f" + name = "default-149bbd" url = "http://localhost:9009/api/prom/push" queue_config { } @@ -9,6 +9,17 @@ prometheus.remote_write "metrics_default" { } } +loki.write "logs_log_config" { + endpoint { + url = "http://localhost/loki/api/v1/push" + } + external_labels = {} +} + +loki.source.kubernetes_events "integrations_eventhandler" { + forward_to = [loki.write.logs_log_config.receiver] +} + faro.receiver "integrations_app_agent_receiver" { extra_log_labels = {} diff --git a/converter/internal/staticconvert/testdata-v2/unsupported.yaml b/converter/internal/staticconvert/testdata-v2/unsupported.yaml index 13b6c44998ad..dfce6ed22e45 100644 --- a/converter/internal/staticconvert/testdata-v2/unsupported.yaml +++ b/converter/internal/staticconvert/testdata-v2/unsupported.yaml @@ -6,6 +6,13 @@ metrics: configs: - name: default +logs: + positions_directory: /path + configs: + - name: log_config + clients: + - url: http://localhost/loki/api/v1/push + integrations: app_agent_receiver_configs: - instance: "default" @@ -14,4 +21,8 @@ integrations: host: "localhost" port: 55678 eventhandler: - cache_path: "/etc/eventhandler/eventhandler.cache" \ No newline at end of file + cache_path: "/etc/eventhandler/not_default.cache" + logs_instance: "log_config" + send_timeout: 30 + informer_resync: 30 + flush_interval: 30 \ No newline at end of file diff --git a/converter/internal/staticconvert/testdata-v2_windows/integrations_v2.river b/converter/internal/staticconvert/testdata-v2_windows/integrations_v2.river index 4f9e09a37ed4..8d95a97c8b7b 100644 --- a/converter/internal/staticconvert/testdata-v2_windows/integrations_v2.river +++ b/converter/internal/staticconvert/testdata-v2_windows/integrations_v2.river @@ -1,6 +1,6 @@ prometheus.remote_write "metrics_default" { endpoint { - name = "default-8be96f" + name = "default-149bbd" url = "http://localhost:9009/api/prom/push" queue_config { } diff --git a/converter/internal/staticconvert/testdata/prom_remote_write.river b/converter/internal/staticconvert/testdata/prom_remote_write.river index df5a9848a234..2d341fed6a5b 100644 --- a/converter/internal/staticconvert/testdata/prom_remote_write.river +++ b/converter/internal/staticconvert/testdata/prom_remote_write.river @@ -1,6 +1,6 @@ prometheus.remote_write "metrics_test1" { endpoint { - name = "test1-8be96f" + name = "test1-149bbd" url = "http://localhost:9009/api/prom/push" queue_config { } @@ -11,7 +11,7 @@ prometheus.remote_write "metrics_test1" { prometheus.remote_write "metrics_test2" { endpoint { - name = "test2-533083" + name = "test2-c6d55a" url = "http://localhost:9010/api/prom/push" send_exemplars = false @@ -23,7 +23,7 @@ prometheus.remote_write "metrics_test2" { prometheus.remote_write "metrics_test3" { endpoint { - name = "test3-a3c419" + name = "test3-aa96fd" url = "http://localhost:9011/api/prom/push" queue_config { } @@ -32,7 +32,7 @@ prometheus.remote_write "metrics_test3" { } endpoint { - name = "test3-41df1c" + name = "test3-a93240" url = "http://localhost:9012/api/prom/push" queue_config { @@ -45,7 +45,7 @@ prometheus.remote_write "metrics_test3" { prometheus.remote_write "metrics_test4_sigv4_defaults" { endpoint { - name = "test4_sigv4_defaults-c42e88" + name = "test4_sigv4_defaults-f815bf" url = "http://localhost:9012/api/prom/push" queue_config { } @@ -58,7 +58,7 @@ prometheus.remote_write "metrics_test4_sigv4_defaults" { prometheus.remote_write "metrics_test5_sigv4_explicit" { endpoint { - name = "test5_sigv4_explicit-050ad5" + name = "test5_sigv4_explicit-bc8fca" url = "http://localhost:9012/api/prom/push" queue_config { } @@ -77,7 +77,7 @@ prometheus.remote_write "metrics_test5_sigv4_explicit" { prometheus.remote_write "metrics_test6_azuread_defaults" { endpoint { - name = "test6_azuread_defaults-50e17f" + name = "test6_azuread_defaults-cc4e7e" url = "http://localhost:9012/api/prom/push" queue_config { } @@ -94,7 +94,7 @@ prometheus.remote_write "metrics_test6_azuread_defaults" { prometheus.remote_write "metrics_test7_azuread_explicit" { endpoint { - name = "test7_azuread_explicit-0f55f1" + name = "test7_azuread_explicit-9e1a3e" url = "http://localhost:9012/api/prom/push" queue_config { } diff --git a/converter/internal/staticconvert/testdata/prom_scrape.river b/converter/internal/staticconvert/testdata/prom_scrape.river index c7db1090e90f..f0afe395531e 100644 --- a/converter/internal/staticconvert/testdata/prom_scrape.river +++ b/converter/internal/staticconvert/testdata/prom_scrape.river @@ -10,9 +10,7 @@ discovery.azure "metrics_agent_promobee" { managed_identity { client_id = "client" } - proxy_url = "proxy" - follow_redirects = true - enable_http2 = true + proxy_url = "proxy" } discovery.azure "metrics_agent_promobee_2" { @@ -27,9 +25,7 @@ discovery.azure "metrics_agent_promobee_2" { managed_identity { client_id = "client" } - proxy_url = "proxy" - follow_redirects = true - enable_http2 = true + proxy_url = "proxy" } discovery.relabel "metrics_agent_promobee" { @@ -95,7 +91,7 @@ prometheus.relabel "metrics_agent_promobee" { prometheus.remote_write "metrics_agent" { endpoint { - name = "agent-6ea089" + name = "agent-36127e" url = "https://prometheus-us-central1.grafana.net/api/prom/push" basic_auth { @@ -107,6 +103,7 @@ prometheus.remote_write "metrics_agent" { max_shards = 10 batch_send_deadline = "3m0s" max_backoff = "10s" + sample_age_limit = "50s" } metadata_config { } diff --git a/converter/internal/staticconvert/testdata/prom_scrape.yaml b/converter/internal/staticconvert/testdata/prom_scrape.yaml index b81e865ef5d0..afffa13a2054 100644 --- a/converter/internal/staticconvert/testdata/prom_scrape.yaml +++ b/converter/internal/staticconvert/testdata/prom_scrape.yaml @@ -19,6 +19,7 @@ metrics: batch_send_deadline: 3m max_shards: 10 max_backoff: 10s + sample_age_limit: 50s basic_auth: username: 11111 password: my-secret-password-here diff --git a/converter/internal/staticconvert/testdata/promtail_prom.river b/converter/internal/staticconvert/testdata/promtail_prom.river index f3b810dbe704..1744d37aee5c 100644 --- a/converter/internal/staticconvert/testdata/promtail_prom.river +++ b/converter/internal/staticconvert/testdata/promtail_prom.river @@ -1,7 +1,5 @@ discovery.consul "metrics_name_jobName" { - services = ["myapp"] - follow_redirects = true - enable_http2 = true + services = ["myapp"] } prometheus.scrape "metrics_name_jobName" { @@ -20,7 +18,7 @@ prometheus.scrape "metrics_name_jobName" { prometheus.remote_write "metrics_name" { endpoint { - name = "name-8be96f" + name = "name-149bbd" url = "http://localhost:9009/api/prom/push" queue_config { } @@ -48,8 +46,6 @@ discovery.consul "logs_name_jobName" { username = "toby" password = "this_password_is_safe_innit?" } - follow_redirects = true - enable_http2 = true } discovery.relabel "logs_name_jobName" { @@ -101,8 +97,6 @@ discovery.consul "logs_name2_jobName" { username = "toby" password = "this_password_is_safe_innit?" } - follow_redirects = true - enable_http2 = true } discovery.relabel "logs_name2_jobName" { diff --git a/converter/internal/staticconvert/testdata/sanitize.river b/converter/internal/staticconvert/testdata/sanitize.river index 1bf214eda874..eaacf45291b6 100644 --- a/converter/internal/staticconvert/testdata/sanitize.river +++ b/converter/internal/staticconvert/testdata/sanitize.river @@ -1,6 +1,6 @@ prometheus.remote_write "metrics_integrations" { endpoint { - name = "integrations-717d0f" + name = "integrations-ce3432" url = "https://region.grafana.net/api/prom/push" basic_auth { diff --git a/converter/internal/staticconvert/testdata/unsupported.river b/converter/internal/staticconvert/testdata/unsupported.river index 8c0909bb6c7f..76923a6c7f06 100644 --- a/converter/internal/staticconvert/testdata/unsupported.river +++ b/converter/internal/staticconvert/testdata/unsupported.river @@ -8,7 +8,7 @@ prometheus.scrape "metrics_agent_prometheus" { prometheus.remote_write "metrics_agent" { endpoint { - name = "agent-d885f6" + name = "agent-eea444" url = "https://prometheus-us-central1.grafana.net/api/prom/push" queue_config { } @@ -41,7 +41,7 @@ prometheus.scrape "integrations_statsd_exporter" { prometheus.remote_write "integrations" { endpoint { - name = "agent-d885f6" + name = "agent-eea444" url = "https://prometheus-us-central1.grafana.net/api/prom/push" queue_config { } diff --git a/converter/internal/staticconvert/validate.go b/converter/internal/staticconvert/validate.go index fb714af9d555..2c5aeb87c1d0 100644 --- a/converter/internal/staticconvert/validate.go +++ b/converter/internal/staticconvert/validate.go @@ -37,7 +37,8 @@ import ( apache_exporter_v2 "github.com/grafana/agent/pkg/integrations/v2/apache_http" app_agent_receiver_v2 "github.com/grafana/agent/pkg/integrations/v2/app_agent_receiver" blackbox_exporter_v2 "github.com/grafana/agent/pkg/integrations/v2/blackbox_exporter" - "github.com/grafana/agent/pkg/integrations/v2/metricsutils" + eventhandler_v2 "github.com/grafana/agent/pkg/integrations/v2/eventhandler" + metricsutils_v2 "github.com/grafana/agent/pkg/integrations/v2/metricsutils" snmp_exporter_v2 "github.com/grafana/agent/pkg/integrations/v2/snmp_exporter" vmware_exporter_v2 "github.com/grafana/agent/pkg/integrations/v2/vmware_exporter" "github.com/grafana/agent/pkg/integrations/windows_exporter" @@ -171,9 +172,10 @@ func validateIntegrationsV2(integrationsConfig *v2.SubsystemOptions) diag.Diagno case *app_agent_receiver_v2.Config: diags.AddAll(common.ValidateSupported(common.NotEquals, itg.TracesInstance, "", "app_agent_receiver traces_instance", "")) case *blackbox_exporter_v2.Config: + case *eventhandler_v2.Config: case *snmp_exporter_v2.Config: case *vmware_exporter_v2.Config: - case *metricsutils.ConfigShim: + case *metricsutils_v2.ConfigShim: switch v1_itg := itg.Orig.(type) { case *azure_exporter.Config: case *cadvisor.Config: diff --git a/docs/developer/release/5-update-release-branch.md b/docs/developer/release/5-update-release-branch.md deleted file mode 100644 index 2cb91166850f..000000000000 --- a/docs/developer/release/5-update-release-branch.md +++ /dev/null @@ -1,20 +0,0 @@ -# Update Release Branch - -The `release` branch is a special branch that is used for grafana cloud to point at our install scripts and example kubernetes manifests. This is not to be confused with `release-VERSION_PREFIX` created in [Create Release Branch](./1-create-release-branch.md) - -## Before you begin - -1. The release tag should exist from completing [Tag Release](./4-tag-release.md) - -## Steps - -1. Force push the release tag to the `release` branch - - ``` - git fetch - git checkout main - git branch -f release VERSION - git push -f origin refs/heads/release - ``` - - > **NOTE**: This requires force push permissions on this branch. If this fails, reach out to one of the project maintainers for help. diff --git a/docs/developer/release/README.md b/docs/developer/release/README.md index 2c87654908f3..27bba296303b 100644 --- a/docs/developer/release/README.md +++ b/docs/developer/release/README.md @@ -8,13 +8,13 @@ The processes described here are for v0.24.0 and above. # Release Cycle -A typical release cycle is to have a Release Candidate published for at least 48 +A typical release cycle is to have a Release Candidate published for at least 48 hours followed by a Stable Release. 0 or more Patch Releases may occur between the Stable Release and the creation of the next Release Candidate. # Workflows -Once a release is scheduled, a release shepherd is determined. This person will be +Once a release is scheduled, a release shepherd is determined. This person will be responsible for ownership of the following workflows: ## Release Candidate Publish @@ -39,20 +39,18 @@ responsible for ownership of the following workflows: 2. [Update Version in Code](./3-update-version-in-code.md) 3. [Tag Release](./4-tag-release.md) 4. [Publish Release](./6-publish-release.md) -5. [Update Release Branch](./5-update-release-branch.md) -6. [Test Release](./7-test-release.md) -7. [Update Helm Charts](./8-update-helm-charts.md) -8. [Announce Release](./9-announce-release.md) -9. [Update OTEL Contrib](./10-update-otel.md) +5. [Test Release](./7-test-release.md) +6. [Update Helm Charts](./8-update-helm-charts.md) +7. [Announce Release](./9-announce-release.md) +8. [Update OTEL Contrib](./10-update-otel.md) ## Patch Release Publish (latest version) 1. [Cherry Pick Commits](./2-cherry-pick-commits.md) 2. [Update Version in Code](./3-update-version-in-code.md) 3. [Tag Release](./4-tag-release.md) 4. [Publish Release](./6-publish-release.md) -5. [Update Release Branch](./5-update-release-branch.md) -6. [Update Helm Charts](./8-update-helm-charts.md) -7. [Announce Release](./9-announce-release.md) +5. [Update Helm Charts](./8-update-helm-charts.md) +6. [Announce Release](./9-announce-release.md) ## Patch Release Publish (older version) - Not documented yet (but here are some hints) @@ -61,6 +59,5 @@ responsible for ownership of the following workflows: - cherry-pick commit[s] into it - don't update the version in the project on main - changes go into the changelog under the patch release version plus stay in unreleased - - don't update the `release` branch - don't publish in github as latest release - don't update deployment tools or helm charts diff --git a/docs/developer/writing-docs.md b/docs/developer/writing-docs.md index 7cd9be07eed3..b4c5be583024 100644 --- a/docs/developer/writing-docs.md +++ b/docs/developer/writing-docs.md @@ -66,6 +66,9 @@ The Reference section is a collection of pages that describe the Agent components and their configuration options exhaustively. This is a more narrow definition than the one found in the [writer's toolkit][]. +We have a dedicated page with the best practices for writing Reference +docs: [writing flow components documentation][writing-flow-docs]. + This is our most detailed documentation, and it should be used as a source of truth. The contents of the Reference pages should not be repeated in other parts of the documentation. @@ -76,3 +79,4 @@ Release notes contain all the notable changes in the Agent. They are updated as part of the release process. [writer's toolkit]: https://grafana.com/docs/writers-toolkit/structure/topic-types/ +[writing-flow-docs]: writing-flow-component-documentation.md diff --git a/docs/sources/_index.md b/docs/sources/_index.md index e12c414c491e..780a3800da31 100644 --- a/docs/sources/_index.md +++ b/docs/sources/_index.md @@ -9,7 +9,7 @@ title: Grafana Agent description: Grafana Agent is a flexible, performant, vendor-neutral, telemetry collector weight: 350 cascade: - AGENT_RELEASE: v0.38.1 + AGENT_RELEASE: v0.39.0 OTEL_VERSION: v0.87.0 --- @@ -77,7 +77,7 @@ Grafana Agent can collect, transform, and send data to: * Windows - * Minimum version: Windows Server 2012 or later, or Windows 10 or later. + * Minimum version: Windows Server 2016 or later, or Windows 10 or later. * Architectures: AMD64 * macOS @@ -115,6 +115,6 @@ Patch and security releases may be created at any time. [Flow mode]: "/docs/agent/ -> /docs/agent//flow" [Flow mode]: "/docs/grafana-cloud/ -> /docs/agent//flow" -[UI]: "/docs/agent/ -> /docs/agent//flow/monitoring/debugging.md#grafana-agent-flow-ui" -[UI]: "/docs/grafana-cloud/ -> /docs/agent//flow/monitoring/debugging.md#grafana-agent-flow-ui" +[UI]: "/docs/agent/ -> /docs/agent//flow/tasks/debug.md#grafana-agent-flow-ui" +[UI]: "/docs/grafana-cloud/ -> /docs/agent//flow/tasks/debug.md#grafana-agent-flow-ui" {{% /docs/reference %}} diff --git a/docs/sources/_index.md.t b/docs/sources/_index.md.t index d769d5cfffc5..549ba33ef8db 100644 --- a/docs/sources/_index.md.t +++ b/docs/sources/_index.md.t @@ -77,7 +77,7 @@ Grafana Agent can collect, transform, and send data to: * Windows - * Minimum version: Windows Server 2012 or later, or Windows 10 or later. + * Minimum version: Windows Server 2016 or later, or Windows 10 or later. * Architectures: AMD64 * macOS @@ -115,6 +115,6 @@ Patch and security releases may be created at any time. [Flow mode]: "/docs/agent/ -> /docs/agent//flow" [Flow mode]: "/docs/grafana-cloud/ -> /docs/agent//flow" -[UI]: "/docs/agent/ -> /docs/agent//flow/monitoring/debugging.md#grafana-agent-flow-ui" -[UI]: "/docs/grafana-cloud/ -> /docs/agent//flow/monitoring/debugging.md#grafana-agent-flow-ui" +[UI]: "/docs/agent/ -> /docs/agent//flow/tasks/debug.md#grafana-agent-flow-ui" +[UI]: "/docs/grafana-cloud/ -> /docs/agent//flow/tasks/debug.md#grafana-agent-flow-ui" {{% /docs/reference %}} diff --git a/docs/sources/about.md b/docs/sources/about.md index 0737815b61d6..57468c7f3e24 100644 --- a/docs/sources/about.md +++ b/docs/sources/about.md @@ -29,12 +29,12 @@ Grafana Agent is available in three different variants: [Static mode Kubernetes operator]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/operator" [Flow mode]: "/docs/agent/ -> /docs/agent//flow" [Flow mode]: "/docs/grafana-cloud/ -> /docs/agent//flow" -[Prometheus]: "/docs/agent/ -> /docs/agent//flow/getting-started/collect-prometheus-metrics.md" -[Prometheus]: "/docs/grafana-cloud/ -> /docs/agent//flow/getting-started/collect-prometheus-metrics.md" -[OTel]: "/docs/agent/ -> /docs/agent//flow/getting-started/collect-opentelemetry-data.md" -[OTel]: "/docs/grafana-cloud/ -> /docs/agent//flow/getting-started/collect-opentelemetry-data.md" -[Loki]: "/docs/agent/ -> /docs/agent//flow/getting-started/migrating-from-promtail.md" -[Loki]: "/docs/grafana-cloud/ -> /docs/agent//flow/getting-started/migrating-from-promtail.md" +[Prometheus]: "/docs/agent/ -> /docs/agent//flow/tasks/collect-prometheus-metrics.md" +[Prometheus]: "/docs/grafana-cloud/ -> /docs/agent//flow/tasks/collect-prometheus-metrics.md" +[OTel]: "/docs/agent/ -> /docs/agent//flow/tasks/collect-opentelemetry-data.md" +[OTel]: "/docs/grafana-cloud/ -> /docs/agent//flow/tasks/collect-opentelemetry-data.md" +[Loki]: "/docs/agent/ -> /docs/agent//flow/tasks/migrate/from-promtail.md" +[Loki]: "/docs/grafana-cloud/ -> /docs/agent//flow/tasks/migrate/from-promtail.md" [clustering]: "/docs/agent/ -> /docs/agent//flow/concepts/clustering/_index.md" [clustering]: "/docs/grafana-cloud/ -> /docs/agent//flow/concepts/clustering/_index.md" [rules]: "/docs/agent/ -> /docs/agent/latest/flow/reference/components/mimir.rules.kubernetes.md" diff --git a/docs/sources/assets/getting-started/loki-config.png b/docs/sources/assets/tasks/loki-config.png similarity index 100% rename from docs/sources/assets/getting-started/loki-config.png rename to docs/sources/assets/tasks/loki-config.png diff --git a/docs/sources/assets/getting-started/otlp-lgtm-graph.png b/docs/sources/assets/tasks/otlp-lgtm-graph.png similarity index 100% rename from docs/sources/assets/getting-started/otlp-lgtm-graph.png rename to docs/sources/assets/tasks/otlp-lgtm-graph.png diff --git a/docs/sources/assets/getting-started/prometheus-config.png b/docs/sources/assets/tasks/prometheus-config.png similarity index 100% rename from docs/sources/assets/getting-started/prometheus-config.png rename to docs/sources/assets/tasks/prometheus-config.png diff --git a/docs/sources/assets/getting-started/tempo-config.png b/docs/sources/assets/tasks/tempo-config.png similarity index 100% rename from docs/sources/assets/getting-started/tempo-config.png rename to docs/sources/assets/tasks/tempo-config.png diff --git a/docs/sources/flow/_index.md b/docs/sources/flow/_index.md index 7458fd016b03..1b95fbe29ae8 100644 --- a/docs/sources/flow/_index.md +++ b/docs/sources/flow/_index.md @@ -80,14 +80,14 @@ This feature is experimental, and it doesn't support all River components. * [Install][] {{< param "PRODUCT_NAME" >}}. * Learn about the core [Concepts][] of {{< param "PRODUCT_NAME" >}}. -* Follow our [Getting started][] guides for {{< param "PRODUCT_NAME" >}}. -* Follow our [Tutorials][] to get started with {{< param "PRODUCT_NAME" >}}. +* Follow our [Tutorials][] for hands-on learning of {{< param "PRODUCT_NAME" >}}. +* Consult our [Tasks][] instructions to accomplish common objectives with {{< param "PRODUCT_NAME" >}}. * Check out our [Reference][] documentation to find specific information you might be looking for. -[Install]: {{< relref "./setup/install/" >}} +[Install]: {{< relref "./get-started/install/" >}} [Concepts]: {{< relref "./concepts/" >}} -[Getting started]: {{< relref "./getting-started/" >}} +[Tasks]: {{< relref "./tasks/" >}} [Tutorials]: {{< relref "./tutorials/ ">}} [Reference]: {{< relref "./reference" >}} diff --git a/docs/sources/flow/concepts/clustering.md b/docs/sources/flow/concepts/clustering.md index 3f6f5b093639..e02a6131d4a5 100644 --- a/docs/sources/flow/concepts/clustering.md +++ b/docs/sources/flow/concepts/clustering.md @@ -76,8 +76,8 @@ Refer to [Debugging clustering issues][debugging] for additional troubleshooting [prometheus.operator.podmonitors]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.operator.podmonitors.md#clustering-beta" [prometheus.operator.servicemonitors]: "/docs/agent/ -> /docs/agent//flow/reference/components/prometheus.operator.servicemonitors.md#clustering-beta" [prometheus.operator.servicemonitors]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/prometheus.operator.servicemonitors.md#clustering-beta" -[clustering page]: "/docs/agent/ -> /docs/agent//flow/monitoring/debugging.md#clustering-page" -[clustering page]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/monitoring/debugging.md#clustering-page" -[debugging]: "/docs/agent/ -> /docs/agent//flow/monitoring/debugging.md#debugging-clustering-issues" -[debugging]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/monitoring/debugging.md#debugging-clustering-issues" +[clustering page]: "/docs/agent/ -> /docs/agent//flow/tasks/debug.md#clustering-page" +[clustering page]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/debug.md#clustering-page" +[debugging]: "/docs/agent/ -> /docs/agent//flow/tasks/debug.md#debugging-clustering-issues" +[debugging]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/debug.md#debugging-clustering-issues" {{% /docs/reference %}} \ No newline at end of file diff --git a/docs/sources/flow/get-started/_index.md b/docs/sources/flow/get-started/_index.md new file mode 100644 index 000000000000..80b48bfdaece --- /dev/null +++ b/docs/sources/flow/get-started/_index.md @@ -0,0 +1,25 @@ +--- +aliases: +- /docs/grafana-cloud/agent/flow/get-started/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/get-started/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/get-started/ +- /docs/grafana-cloud/send-data/agent/flow/get-started/ +# Previous docs aliases for backwards compatibility: +- /docs/grafana-cloud/agent/flow/setup/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/setup/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/setup/ +- /docs/grafana-cloud/send-data/agent/flow/setup/ +- ./setup/ # /docs/agent/latest/flow/setup/ +canonical: https://grafana.com/docs/agent/latest/flow/get-started/ +description: Learn how to install and use Grafana Agent Flow +menuTitle: Get started +title: Get started with Grafana Agent Flow +weight: 50 +--- + +# Get started with {{% param "PRODUCT_NAME" %}} + +This section covers topics that help you get started with {{< param "PRODUCT_NAME" >}}, +including installation, running the agent, overview of deployment topologies, and more. + +{{< section >}} diff --git a/docs/sources/flow/get-started/deploy-agent.md b/docs/sources/flow/get-started/deploy-agent.md new file mode 100644 index 000000000000..0a76e62c42df --- /dev/null +++ b/docs/sources/flow/get-started/deploy-agent.md @@ -0,0 +1,78 @@ +--- +aliases: +- /docs/grafana-cloud/agent/flow/get-started/deploy-agent/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/get-started/deploy-agent/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/get-started/deploy-agent/ +- /docs/grafana-cloud/send-data/agent/flow/get-started/deploy-agent/ +# Previous docs aliases for backwards compatibility: +- /docs/grafana-cloud/agent/flow/setup/deploy-agent/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/setup/deploy-agent/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/setup/deploy-agent/ +- /docs/grafana-cloud/send-data/agent/flow/setup/deploy-agent/ +- ../setup/deploy-agent/ # /docs/agent/latest/flow/setup/deploy-agent/ +canonical: https://grafana.com/docs/agent/latest/flow/get-started/deploy-agent/ +description: Learn about possible deployment topologies for Grafana Agent Flow +menuTitle: Deploy +title: Grafana Agent Flow deployment topologies +weight: 900 +--- + +{{< docs/shared source="agent" lookup="/deploy-agent.md" version="" >}} + +## Processing different types of telemetry in different {{< param "PRODUCT_ROOT_NAME" >}} instances + +If the load on {{< param "PRODUCT_ROOT_NAME" >}} is small, it is recommended to process all necessary telemetry signals in the same {{< param "PRODUCT_ROOT_NAME" >}} process. +For example, a single {{< param "PRODUCT_ROOT_NAME" >}} can process all of the incoming metrics, logs, traces, and profiles. + +However, if the load on the {{< param "PRODUCT_ROOT_NAME" >}}s is big, it may be beneficial to process different telemetry signals in different deployments of {{< param "PRODUCT_ROOT_NAME" >}}s. + +This provides better stability due to the isolation between processes. +For example, an overloaded {{< param "PRODUCT_ROOT_NAME" >}} processing traces won't impact an {{< param "PRODUCT_ROOT_NAME" >}} processing metrics. +Different types of signal collection require different methods for scaling: + +* "Pull" components such as `prometheus.scrape` and `pyroscope.scrape` are scaled using hashmod sharing or clustering. +* "Push" components such as `otelcol.receiver.otlp` are scaled by placing a load balancer in front of them. + +### Traces + +Scaling {{< param "PRODUCT_ROOT_NAME" >}} instances for tracing is very similar to [scaling OpenTelemetry Collector][scaling-collector] instances. +This similarity is because most {{< param "PRODUCT_NAME" >}} components used for tracing are based on components from the OTel Collector. + +[scaling-collector]: https://opentelemetry.io/docs/collector/scaling/ + +#### When to scale + +To decide whether scaling is necessary, check metrics such as: +* `receiver_refused_spans_ratio_total` from receivers such as `otelcol.receiver.otlp`. +* `processor_refused_spans_ratio_total` from processors such as `otelcol.processor.batch`. +* `exporter_send_failed_spans_ratio_total` from exporters such as `otelcol.exporter.otlp` and `otelcol.exporter.loadbalancing`. + +#### Stateful and stateless components + +In the context of tracing, a "stateful component" is a component +that needs to aggregate certain spans to work correctly. +A "stateless {{< param "PRODUCT_ROOT_NAME" >}}" is a {{< param "PRODUCT_ROOT_NAME" >}} which does not contain stateful components. + +Scaling stateful {{< param "PRODUCT_ROOT_NAME" >}}s is more difficult, because spans must be forwarded to a +specific {{< param "PRODUCT_ROOT_NAME" >}} according to a span property such as trace ID or a `service.name` attribute. +You can forward spans with `otelcol.exporter.loadbalancing`. + +Examples of stateful components: + +* `otelcol.processor.tail_sampling` +* `otelcol.connector.spanmetrics` +* `otelcol.connector.servicegraph` + + + +A "stateless component" does not need to aggregate specific spans to work correctly - +it can work correctly even if it only has some of the spans of a trace. + +A stateless {{< param "PRODUCT_ROOT_NAME" >}} can be scaled without using `otelcol.exporter.loadbalancing`. +For example, you could use an off-the-shelf load balancer to do a round-robin load balancing. + +Examples of stateless components: +* `otelcol.processor.probabilistic_sampler` +* `otelcol.processor.transform` +* `otelcol.processor.attributes` +* `otelcol.processor.span` diff --git a/docs/sources/flow/setup/install/_index.md b/docs/sources/flow/get-started/install/_index.md similarity index 69% rename from docs/sources/flow/setup/install/_index.md rename to docs/sources/flow/get-started/install/_index.md index 8305e7bf9a39..dabb07857d74 100644 --- a/docs/sources/flow/setup/install/_index.md +++ b/docs/sources/flow/get-started/install/_index.md @@ -1,13 +1,19 @@ --- aliases: +- /docs/grafana-cloud/agent/flow/get-started/install/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/get-started/install/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/get-started/install/ +- /docs/grafana-cloud/send-data/agent/flow/get-started/install/ +# Previous docs aliases for backwards compatibility: - /docs/grafana-cloud/agent/flow/setup/install/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/setup/install/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/setup/install/ - /docs/grafana-cloud/send-data/agent/flow/setup/install/ - /docs/sources/flow/install/ -canonical: https://grafana.com/docs/agent/latest/flow/setup/install/ -description: Learn how to install Grafana Agent Flow -menuTitle: Install Grafana Agent Flow +- ../setup/install/ # /docs/agent/latest/flow/setup/install/ +canonical: https://grafana.com/docs/agent/latest/flow/get-started/install/ +description: Learn how to install Grafana Agent Flow +menuTitle: Install title: Install Grafana Agent Flow weight: 50 --- diff --git a/docs/sources/flow/setup/install/binary.md b/docs/sources/flow/get-started/install/binary.md similarity index 62% rename from docs/sources/flow/setup/install/binary.md rename to docs/sources/flow/get-started/install/binary.md index d491ad86cfe5..fa304df0acb2 100644 --- a/docs/sources/flow/setup/install/binary.md +++ b/docs/sources/flow/get-started/install/binary.md @@ -1,14 +1,20 @@ --- aliases: -- ../../install/binary/ +- /docs/grafana-cloud/agent/flow/get-started/install/binary/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/get-started/install/binary/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/get-started/install/binary/ +- /docs/grafana-cloud/send-data/agent/flow/get-started/install/binary/ +# Previous docs aliases for backwards compatibility: +- ../../install/binary/ # /docs/agent/latest/flow/install/binary/ - /docs/grafana-cloud/agent/flow/setup/install/binary/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/setup/install/binary/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/setup/install/binary/ - /docs/grafana-cloud/send-data/agent/flow/setup/install/binary/ -canonical: https://grafana.com/docs/agent/latest/flow/setup/install/binary/ +- ../../setup/install/binary/ # /docs/agent/latest/flow/setup/install/binary/ +canonical: https://grafana.com/docs/agent/latest/flow/get-started/install/binary/ description: Learn how to install Grafana Agent Flow as a standalone binary menuTitle: Standalone -title: Install Grafana Agent Flow as a standalone binary +title: Install Grafana Agent Flow as a standalone binary weight: 600 --- @@ -45,12 +51,9 @@ To download {{< param "PRODUCT_NAME" >}} as a standalone binary, perform the fol ## Next steps -- [Start {{< param "PRODUCT_NAME" >}}[Start] -- [Configure {{< param "PRODUCT_NAME" >}}[Configure] +- [Run {{< param "PRODUCT_NAME" >}}][Run] {{% docs/reference %}} -[Start]: "/docs/agent/ -> /docs/agent//flow/setup/start-agent.md#standalone-binary" -[Start]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/setup/start-agent.md#standalone-binary" -[Configure]: "/docs/agent/ -> /docs/agent//flow/setup/configure" -[Configure]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/setup/configure/" +[Run]: "/docs/agent/ -> /docs/agent//flow/get-started/run/binary.md" +[Run]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/get-started/run/binary.md" {{% /docs/reference %}} diff --git a/docs/sources/flow/setup/install/docker.md b/docs/sources/flow/get-started/install/docker.md similarity index 82% rename from docs/sources/flow/setup/install/docker.md rename to docs/sources/flow/get-started/install/docker.md index 15f9e391e08e..c7884a6dc21b 100644 --- a/docs/sources/flow/setup/install/docker.md +++ b/docs/sources/flow/get-started/install/docker.md @@ -1,11 +1,17 @@ --- aliases: -- ../../install/docker/ +- /docs/grafana-cloud/agent/flow/get-started/install/docker/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/get-started/install/docker/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/get-started/install/docker/ +- /docs/grafana-cloud/send-data/agent/flow/get-started/install/docker/ +# Previous docs aliases for backwards compatibility: +- ../../install/docker/ # /docs/agent/latest/flow/install/docker/ - /docs/grafana-cloud/agent/flow/setup/install/docker/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/setup/install/docker/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/setup/install/docker/ - /docs/grafana-cloud/send-data/agent/flow/setup/install/docker/ -canonical: https://grafana.com/docs/agent/latest/flow/setup/install/docker/ +- ../../setup/install/docker/ # /docs/agent/latest/flow/setup/install/docker/ +canonical: https://grafana.com/docs/agent/latest/flow/get-started/install/docker/ description: Learn how to install Grafana Agent Flow on Docker menuTitle: Docker title: Run Grafana Agent Flow in a Docker container @@ -92,6 +98,6 @@ To verify that {{< param "PRODUCT_NAME" >}} is running successfully, navigate to {{% docs/reference %}} [run]: "/docs/agent/ -> /docs/agent//flow/reference/cli/run.md" [run]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/cli/run.md" -[UI]: "/docs/agent/ -> /docs/agent//flow/monitoring/debugging.md#grafana-agent-flow-ui" -[UI]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/monitoring/debugging.md#grafana-agent-flow-ui" +[UI]: "/docs/agent/ -> /docs/agent//flow/tasks/debug.md#grafana-agent-flow-ui" +[UI]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/debug.md#grafana-agent-flow-ui" {{% /docs/reference %}} diff --git a/docs/sources/flow/setup/install/kubernetes.md b/docs/sources/flow/get-started/install/kubernetes.md similarity index 74% rename from docs/sources/flow/setup/install/kubernetes.md rename to docs/sources/flow/get-started/install/kubernetes.md index 51ab96260ca3..9326fce4bf03 100644 --- a/docs/sources/flow/setup/install/kubernetes.md +++ b/docs/sources/flow/get-started/install/kubernetes.md @@ -1,11 +1,17 @@ --- aliases: -- ../../install/kubernetes/ +- /docs/grafana-cloud/agent/flow/get-started/install/kubernetes/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/get-started/install/kubernetes/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/get-started/install/kubernetes/ +- /docs/grafana-cloud/send-data/agent/flow/get-started/install/kubernetes/ +# Previous docs aliases for backwards compatibility: +- ../../install/kubernetes/ # /docs/agent/latest/flow/install/kubernetes/ - /docs/grafana-cloud/agent/flow/setup/install/kubernetes/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/setup/install/kubernetes/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/setup/install/kubernetes/ - /docs/grafana-cloud/send-data/agent/flow/setup/install/kubernetes/ -canonical: https://grafana.com/docs/agent/latest/flow/setup/install/kubernetes/ +- ../../setup/install/kubernetes/ # /docs/agent/latest/flow/setup/install/kubernetes/ +canonical: https://grafana.com/docs/agent/latest/flow/get-started/install/kubernetes/ description: Learn how to deploy Grafana Agent Flow on Kubernetes menuTitle: Kubernetes title: Deploy Grafana Agent Flow on Kubernetes @@ -64,6 +70,6 @@ For more information on the {{< param "PRODUCT_ROOT_NAME" >}} Helm chart, refer [Helm]: https://helm.sh {{% docs/reference %}} -[Configure]: "/docs/agent/ -> /docs/agent//flow/setup/configure/configure-kubernetes.md" -[Configure]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/setup/configure/configure-kubernetes.md" +[Configure]: "/docs/agent/ -> /docs/agent//flow/tasks/configure/configure-kubernetes.md" +[Configure]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/configure/configure-kubernetes.md" {{% /docs/reference %}} diff --git a/docs/sources/flow/setup/install/linux.md b/docs/sources/flow/get-started/install/linux.md similarity index 76% rename from docs/sources/flow/setup/install/linux.md rename to docs/sources/flow/get-started/install/linux.md index 56aae580299a..2241aeb78d0a 100644 --- a/docs/sources/flow/setup/install/linux.md +++ b/docs/sources/flow/get-started/install/linux.md @@ -1,11 +1,17 @@ --- aliases: -- ../../install/linux/ +- /docs/grafana-cloud/agent/flow/get-started/install/linux/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/get-started/install/linux/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/get-started/install/linux/ +- /docs/grafana-cloud/send-data/agent/flow/get-started/install/linux/ +# Previous docs aliases for backwards compatibility: +- ../../install/linux/ # /docs/agent/latest/flow/install/linux/ - /docs/grafana-cloud/agent/flow/setup/install/linux/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/setup/install/linux/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/setup/install/linux/ - /docs/grafana-cloud/send-data/agent/flow/setup/install/linux/ -canonical: https://grafana.com/docs/agent/latest/flow/setup/install/linux/ +- ../../setup/install/linux/ # /docs/agent/latest/flow/setup/install/linux/ +canonical: https://grafana.com/docs/agent/latest/flow/get-started/install/linux/ description: Learn how to install Grafana Agent Flow on Linux menuTitle: Linux title: Install Grafana Agent Flow on Linux @@ -119,12 +125,12 @@ To uninstall {{< param "PRODUCT_NAME" >}} on Linux, run the following commands i ## Next steps -- [Start {{< param "PRODUCT_NAME" >}}][Start] +- [Run {{< param "PRODUCT_NAME" >}}][Run] - [Configure {{< param "PRODUCT_NAME" >}}][Configure] {{% docs/reference %}} -[Start]: "/docs/agent/ -> /docs/agent//flow/setup/start-agent.md#linux" -[Start]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/setup/start-agent.md#linux" -[Configure]: "/docs/agent/ -> /docs/agent//flow/setup/configure/configure-linux.md" -[Configure]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/setup/configure/configure-linux.md" +[Run]: "/docs/agent/ -> /docs/agent//flow/get-started/run/linux.md" +[Run]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/get-started/run/linux.md" +[Configure]: "/docs/agent/ -> /docs/agent//flow/tasks/configure/configure-linux.md" +[Configure]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/configure/configure-linux.md" {{% /docs/reference %}} diff --git a/docs/sources/flow/setup/install/macos.md b/docs/sources/flow/get-started/install/macos.md similarity index 66% rename from docs/sources/flow/setup/install/macos.md rename to docs/sources/flow/get-started/install/macos.md index ad5174577aa3..9903e13ff632 100644 --- a/docs/sources/flow/setup/install/macos.md +++ b/docs/sources/flow/get-started/install/macos.md @@ -1,11 +1,17 @@ --- aliases: -- ../../install/macos/ +- /docs/grafana-cloud/agent/flow/get-started/install/macos/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/get-started/install/macos/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/get-started/install/macos/ +- /docs/grafana-cloud/send-data/agent/flow/get-started/install/macos/ +# Previous docs aliases for backwards compatibility: +- ../../install/macos/ # /docs/agent/latest/flow/install/macos/ - /docs/grafana-cloud/agent/flow/setup/install/macos/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/setup/install/macos/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/setup/install/macos/ - /docs/grafana-cloud/send-data/agent/flow/setup/install/macos/ -canonical: https://grafana.com/docs/agent/latest/flow/setup/install/macos/ +- ../../setup/install/macos/ # /docs/agent/latest/flow/setup/install/macos/ +canonical: https://grafana.com/docs/agent/latest/flow/get-started/install/macos/ description: Learn how to install Grafana AgentFlow on macOS menuTitle: macOS title: Install Grafana Agent Flow on macOS @@ -66,14 +72,14 @@ brew uninstall grafana-agent-flow ## Next steps -- [Start {{< param "PRODUCT_NAME" >}}][Start] +- [Run {{< param "PRODUCT_NAME" >}}][Run] - [Configure {{< param "PRODUCT_NAME" >}}][Configure] [Homebrew]: https://brew.sh {{% docs/reference %}} -[Start]: "/docs/agent/ -> /docs/agent//flow/setup/start-agent.md#macos" -[Start]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/setup/start-agent.md#macos" -[Configure]: "/docs/agent/ -> /docs/agent//flow/setup/configure/configure-macos.md" -[Configure]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/setup/configure/configure-macos.md" +[Run]: "/docs/agent/ -> /docs/agent//flow/get-started/run/macos.md" +[Run]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/get-started/run/macos.md" +[Configure]: "/docs/agent/ -> /docs/agent//flow/tasks/configure/configure-macos.md" +[Configure]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/configure/configure-macos.md" {{% /docs/reference %}} diff --git a/docs/sources/flow/setup/install/windows.md b/docs/sources/flow/get-started/install/windows.md similarity index 76% rename from docs/sources/flow/setup/install/windows.md rename to docs/sources/flow/get-started/install/windows.md index 765e9fdd3ac4..2be2fabc6019 100644 --- a/docs/sources/flow/setup/install/windows.md +++ b/docs/sources/flow/get-started/install/windows.md @@ -1,11 +1,17 @@ --- aliases: -- ../../install/windows/ +- /docs/grafana-cloud/agent/flow/get-started/install/windows/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/get-started/install/windows/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/get-started/install/windows/ +- /docs/grafana-cloud/send-data/agent/flow/get-started/install/windows/ +# Previous docs aliases for backwards compatibility: +- ../../install/windows/ # /docs/agent/latest/flow/install/windows/ - /docs/grafana-cloud/agent/flow/setup/install/windows/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/setup/install/windows/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/setup/install/windows/ - /docs/grafana-cloud/send-data/agent/flow/setup/install/windows/ -canonical: https://grafana.com/docs/agent/latest/flow/setup/install/windows/ +- ../../setup/install/windows/ # /docs/agent/latest/flow/setup/install/windows/ +canonical: https://grafana.com/docs/agent/latest/flow/get-started/install/windows/ description: Learn how to install Grafana Agent Flow on Windows menuTitle: Windows title: Install Grafana Agent Flow on Windows @@ -78,16 +84,16 @@ This includes any configuration files in the installation directory. ## Next steps -- [Start {{< param "PRODUCT_NAME" >}}][Start] +- [Run {{< param "PRODUCT_NAME" >}}][Start] - [Configure {{< param "PRODUCT_NAME" >}}][Configure] [latest]: https://github.com/grafana/agent/releases/latest {{% docs/reference %}} -[Start]: "/docs/agent/ -> /docs/agent//flow/setup/start-agent.md#windows" -[Start]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/setup/start-agent.md#windows" -[Configure]: "/docs/agent/ -> /docs/agent//flow/setup/configure/configure-windows.md" -[Configure]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/setup/configure/configure-windows.md" +[Run]: "/docs/agent/ -> /docs/agent//flow/get-started/run/windows.md" +[Run]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/get-started/run/windows.md" +[Configure]: "/docs/agent/ -> /docs/agent//flow/tasks/configure/configure-windows.md" +[Configure]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/configure/configure-windows.md" [data collection]: "/docs/agent/ -> /docs/agent//data-collection.md" [data collection]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/data-collection.md" {{% /docs/reference %}} diff --git a/docs/sources/flow/get-started/run/_index.md b/docs/sources/flow/get-started/run/_index.md new file mode 100644 index 000000000000..f98f8707354f --- /dev/null +++ b/docs/sources/flow/get-started/run/_index.md @@ -0,0 +1,31 @@ +--- +aliases: +- /docs/grafana-cloud/agent/flow/get-started/run/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/get-started/run/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/get-started/run/ +- /docs/grafana-cloud/send-data/agent/flow/get-started/run/ +- /docs/sources/flow/run/ +# Previous pages aliases for backwards compatibility: +- /docs/grafana-cloud/agent/flow/setup/start-agent/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/setup/start-agent/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/setup/start-agent/ +- /docs/grafana-cloud/send-data/agent/flow/setup/start-agent/ +- ../setup/start-agent/ # /docs/agent/latest/flow/setup/start-agent/ +canonical: https://grafana.com/docs/agent/latest/flow/get-started/run/ +description: Learn how to run Grafana Agent Flow +menuTitle: Run +title: Run Grafana Agent Flow +weight: 50 +--- + +# Run {{% param "PRODUCT_NAME" %}} + +Use the following pages to learn how to start, restart, and stop {{< param "PRODUCT_NAME" >}} after it is installed. +For installation instructions, refer to [Install {{< param "PRODUCT_NAME" >}}][Install]. + +{{< section >}} + +{{% docs/reference %}} +[Install]: "/docs/agent/ -> /docs/agent//flow/get-started/install/" +[Install]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/get-started/install/" +{{% /docs/reference %}} diff --git a/docs/sources/flow/get-started/run/binary.md b/docs/sources/flow/get-started/run/binary.md new file mode 100644 index 000000000000..7f9fda22ff77 --- /dev/null +++ b/docs/sources/flow/get-started/run/binary.md @@ -0,0 +1,126 @@ +--- +aliases: + - /docs/grafana-cloud/agent/flow/get-started/run/binary/ + - /docs/grafana-cloud/monitor-infrastructure/agent/flow/get-started/run/binary/ + - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/get-started/run/binary/ + - /docs/grafana-cloud/send-data/agent/flow/get-started/run/binary/ +canonical: https://grafana.com/docs/agent/latest/flow/get-started/run/binary/ +description: Learn how to run Grafana Agent Flow as a standalone binary +menuTitle: Standalone +title: Run Grafana Agent Flow as a standalone binary +weight: 600 +--- + +# Run {{% param "PRODUCT_NAME" %}} as a standalone binary + +If you [downloaded][InstallBinary] the standalone binary, you must run {{< param "PRODUCT_NAME" >}} from a terminal or command window. + +## Start {{% param "PRODUCT_NAME" %}} on Linux, macOS, or FreeBSD + +To start {{< param "PRODUCT_NAME" >}} on Linux, macOS, or FreeBSD, run the following command in a terminal window: + +```shell +AGENT_MODE=flow run +``` + +Replace the following: + +* _``_: The path to the {{< param "PRODUCT_NAME" >}} binary file. +* _``_: The path to the {{< param "PRODUCT_NAME" >}} configuration file. + +## Start {{% param "PRODUCT_NAME" %}} on Windows + +To start {{< param "PRODUCT_NAME" >}} on Windows, run the following commands in a command prompt: + +```cmd +set AGENT_MODE=flow + run +``` + +Replace the following: + +* _``_: The path to the {{< param "PRODUCT_NAME" >}} binary file. +* _``_: The path to the {{< param "PRODUCT_NAME" >}} configuration file. + +## Set up {{% param "PRODUCT_NAME" %}} as a Linux systemd service + +You can set up and manage the standalone binary for {{< param "PRODUCT_NAME" >}} as a Linux systemd service. + +{{% admonition type="note" %}} +These steps assume you have a default systemd and {{< param "PRODUCT_NAME" >}} configuration. +{{% /admonition %}} + +1. To create a new user called `grafana-agent-flow` run the following command in a terminal window: + + ```shell + sudo useradd --no-create-home --shell /bin/false grafana-agent-flow + ``` + +1. Create a service file in `/etc/systemd/system` called `grafana-agent-flow.service` with the following contents: + + ```systemd + [Unit] + Description=Vendor-neutral programmable observability pipelines. + Documentation=https://grafana.com/docs/agent/latest/flow/ + Wants=network-online.target + After=network-online.target + + [Service] + Restart=always + User=grafana-agent-flow + Environment=HOSTNAME=%H + EnvironmentFile=/etc/default/grafana-agent-flow + WorkingDirectory= + ExecStart= run $CUSTOM_ARGS --storage.path= $CONFIG_FILE + ExecReload=/usr/bin/env kill -HUP $MAINPID + TimeoutStopSec=20s + SendSIGKILL=no + + [Install] + WantedBy=multi-user.target + ``` + + Replace the following: + + * _``_: The path to the {{< param "PRODUCT_NAME" >}} binary file. + * _``_: The path to a working directory, for example `/var/lib/grafana-agent-flow`. + +1. Create an environment file in `/etc/default/` called `grafana-agent-flow` with the following contents: + + ```shell + ## Path: + ## Description: Grafana Agent Flow settings + ## Type: string + ## Default: "" + ## ServiceRestart: grafana-agent-flow + # + # Command line options for grafana-agent + # + # The configuration file holding the Grafana Agent Flow configuration. + CONFIG_FILE="" + + # User-defined arguments to pass to the run command. + CUSTOM_ARGS="" + + # Restart on system upgrade. Defaults to true. + RESTART_ON_UPGRADE=true + ``` + + Replace the following: + + * _``_: The path to the {{< param "PRODUCT_NAME" >}} configuration file. + +1. To reload the service files, run the following command in a terminal window: + + ```shell + sudo systemctl daemon-reload + ``` + +1. Use the [Linux][StartLinux] systemd commands to manage your standalone Linux installation of {{< param "PRODUCT_NAME" >}}. + +{{% docs/reference %}} +[InstallBinary]: "/docs/agent/ -> /docs/agent//flow/get-started/install/binary.md" +[InstallBinary]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/flow/get-started/install/binary.md" +[StartLinux]: "/docs/agent/ -> /docs/agent//flow/get-started/run/linux.md" +[StartLinux]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/flow/get-started/run/linux.md" +{{% /docs/reference %}} diff --git a/docs/sources/flow/get-started/run/linux.md b/docs/sources/flow/get-started/run/linux.md new file mode 100644 index 000000000000..1085aaabdf46 --- /dev/null +++ b/docs/sources/flow/get-started/run/linux.md @@ -0,0 +1,75 @@ +--- +aliases: + - /docs/grafana-cloud/agent/flow/get-started/run/linux/ + - /docs/grafana-cloud/monitor-infrastructure/agent/flow/get-started/run/linux/ + - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/get-started/run/linux/ + - /docs/grafana-cloud/send-data/agent/flow/get-started/run/linux/ +canonical: https://grafana.com/docs/agent/latest/flow/get-started/run/linux/ +description: Learn how to run Grafana Agent Flow on Linux +menuTitle: Linux +title: Run Grafana Agent Flow on Linux +weight: 300 +--- + +# Run {{% param "PRODUCT_NAME" %}} on Linux + +{{< param "PRODUCT_NAME" >}} is [installed][InstallLinux] as a [systemd][] service on Linux. + +[systemd]: https://systemd.io/ + +## Start {{% param "PRODUCT_NAME" %}} + +To start {{< param "PRODUCT_NAME" >}}, run the following command in a terminal window: + +```shell +sudo systemctl start grafana-agent-flow +``` + +(Optional) To verify that the service is running, run the following command in a terminal window: + +```shell +sudo systemctl status grafana-agent-flow +``` + +## Configure {{% param "PRODUCT_NAME" %}} to start at boot + +To automatically run {{< param "PRODUCT_NAME" >}} when the system starts, run the following command in a terminal window: + +```shell +sudo systemctl enable grafana-agent-flow.service +``` + +## Restart {{% param "PRODUCT_NAME" %}} + +To restart {{< param "PRODUCT_NAME" >}}, run the following command in a terminal window: + +```shell +sudo systemctl restart grafana-agent-flow +``` + +## Stop {{% param "PRODUCT_NAME" %}} + +To stop {{< param "PRODUCT_NAME" >}}, run the following command in a terminal window: + +```shell +sudo systemctl stop grafana-agent-flow +``` + +## View {{% param "PRODUCT_NAME" %}} logs on Linux + +To view {{< param "PRODUCT_NAME" >}} log files, run the following command in a terminal window: + +```shell +sudo journalctl -u grafana-agent-flow +``` + +## Next steps + +- [Configure {{< param "PRODUCT_NAME" >}}][Configure] + +{{% docs/reference %}} +[InstallLinux]: "/docs/agent/ -> /docs/agent//flow/get-started/install/linux.md" +[InstallLinux]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/flow/get-started/install/linux.md" +[Configure]: "/docs/agent/ -> /docs/agent//flow/tasks/configure/configure-linux.md" +[Configure]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/configure/configure-linux.md" +{{% /docs/reference %}} diff --git a/docs/sources/flow/get-started/run/macos.md b/docs/sources/flow/get-started/run/macos.md new file mode 100644 index 000000000000..8c7a055dd853 --- /dev/null +++ b/docs/sources/flow/get-started/run/macos.md @@ -0,0 +1,69 @@ +--- +aliases: + - /docs/grafana-cloud/agent/flow/get-started/run/macos/ + - /docs/grafana-cloud/monitor-infrastructure/agent/flow/get-started/run/macos/ + - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/get-started/run/macos/ + - /docs/grafana-cloud/send-data/agent/flow/get-started/run/macos/ +canonical: https://grafana.com/docs/agent/latest/flow/get-started/run/macos/ +description: Learn how to run Grafana Agent Flow on macOS +menuTitle: macOS +title: Run Grafana Agent Flow on macOS +weight: 400 +--- + +# Run {{% param "PRODUCT_NAME" %}} on macOS + +{{< param "PRODUCT_NAME" >}} is [installed][InstallMacOS] as a launchd service on macOS. + +## Start {{% param "PRODUCT_NAME" %}} + +To start {{< param "PRODUCT_NAME" >}}, run the following command in a terminal window: + +```shell +brew services start grafana-agent-flow +``` + +{{< param "PRODUCT_NAME" >}} automatically runs when the system starts. + +(Optional) To verify that the service is running, run the following command in a terminal window: + +```shell +brew services info grafana-agent-flow +``` + +## Restart {{% param "PRODUCT_NAME" %}} + +To restart {{< param "PRODUCT_NAME" >}}, run the following command in a terminal window: + +```shell +brew services restart grafana-agent-flow +``` + +## Stop {{% param "PRODUCT_NAME" %}} + +To stop {{< param "PRODUCT_NAME" >}}, run the following command in a terminal window: + +```shell +brew services stop grafana-agent-flow +``` + +## View {{% param "PRODUCT_NAME" %}} logs on macOS + +By default, logs are written to `$(brew --prefix)/var/log/grafana-agent-flow.log` and +`$(brew --prefix)/var/log/grafana-agent-flow.err.log`. + +If you followed [Configure the {{< param "PRODUCT_NAME" >}} service][ConfigureService] and changed the path where logs are written, +refer to your current copy of the {{< param "PRODUCT_NAME" >}} formula to locate your log files. + +## Next steps + +- [Configure {{< param "PRODUCT_NAME" >}}][ConfigureMacOS] + +{{% docs/reference %}} +[InstallMacOS]: "/docs/agent/ -> /docs/agent//flow/get-started/install/macos.md" +[InstallMacOS]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/flow/get-started/install/macos.md" +[ConfigureMacOS]: "/docs/agent/ -> /docs/agent//flow/tasks/configure/configure-macos.md" +[ConfigureMacOS]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/configure/configure-macos.md" +[ConfigureService]: "/docs/agent/ -> /docs/agent//flow/tasks/configure/configure-macos.md#configure-the-grafana-agent-flow-service" +[ConfigureService]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/configure/configure-macos.md#configure-the-grafana-agent-flow-service" +{{% /docs/reference %}} diff --git a/docs/sources/flow/get-started/run/windows.md b/docs/sources/flow/get-started/run/windows.md new file mode 100644 index 000000000000..2ee89710b028 --- /dev/null +++ b/docs/sources/flow/get-started/run/windows.md @@ -0,0 +1,54 @@ +--- +aliases: + - /docs/grafana-cloud/agent/flow/get-started/run/windows/ + - /docs/grafana-cloud/monitor-infrastructure/agent/flow/get-started/run/windows/ + - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/get-started/run/windows/ + - /docs/grafana-cloud/send-data/agent/flow/get-started/run/windows/ +canonical: https://grafana.com/docs/agent/latest/flow/get-started/run/windows/ +description: Learn how to run Grafana Agent Flow on Windows +menuTitle: Windows +title: Run Grafana Agent Flow on Windows +weight: 500 +--- + +# Run {{% param "PRODUCT_NAME" %}} on Windows + +{{< param "PRODUCT_NAME" >}} is [installed][InstallWindows] as a Windows Service. The service is configured to automatically run on startup. + +To verify that {{< param "PRODUCT_NAME" >}} is running as a Windows Service: + +1. Open the Windows Services manager (services.msc): + + 1. Right click on the Start Menu and select **Run**. + + 1. Type: `services.msc` and click **OK**. + +1. Scroll down to find the **{{< param "PRODUCT_NAME" >}}** service and verify that the **Status** is **Running**. + +## View {{% param "PRODUCT_NAME" %}} logs + +When running on Windows, {{< param "PRODUCT_NAME" >}} writes its logs to Windows Event +Logs with an event source name of **{{< param "PRODUCT_NAME" >}}**. + +To view the logs, perform the following steps: + +1. Open the Event Viewer: + + 1. Right click on the Start Menu and select **Run**. + + 1. Type `eventvwr` and click **OK**. + +1. In the Event Viewer, click on **Windows Logs > Application**. + +1. Search for events with the source **{{< param "PRODUCT_NAME" >}}**. + +## Next steps + +- [Configure {{< param "PRODUCT_NAME" >}}][Configure] + +{{% docs/reference %}} +[InstallWindows]: "/docs/agent/ -> /docs/agent//flow/get-started/install/windows.md" +[InstallWindows]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/flow/get-started/install/windows.md" +[Configure]: "/docs/agent/ -> /docs/agent//flow/tasks/configure/configure-windows.md" +[Configure]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/configure/configure-windows.md" +{{% /docs/reference %}} diff --git a/docs/sources/flow/getting-started/_index.md b/docs/sources/flow/getting-started/_index.md deleted file mode 100644 index bc989084f42b..000000000000 --- a/docs/sources/flow/getting-started/_index.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/getting-started/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/getting-started/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/getting-started/ -- /docs/grafana-cloud/send-data/agent/flow/getting-started/ -- getting_started/ -canonical: https://grafana.com/docs/agent/latest/flow/getting-started/ -description: Learn how to use Grafana Agent Flow -menuTitle: Get started -title: Get started with Grafana Agent Flow -weight: 200 ---- - -# Get started with {{% param "PRODUCT_NAME" %}} - -This section details guides for getting started with {{< param "PRODUCT_NAME" >}}. - -{{< section >}} diff --git a/docs/sources/flow/monitoring/_index.md b/docs/sources/flow/monitoring/_index.md deleted file mode 100644 index 729bbaa8ff89..000000000000 --- a/docs/sources/flow/monitoring/_index.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/monitoring/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/monitoring/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/monitoring/ -- /docs/grafana-cloud/send-data/agent/flow/monitoring/ -canonical: https://grafana.com/docs/agent/latest/flow/monitoring/ -description: Learn about monitoring Grafana Agent Flow -title: Monitoring Grafana Agent Flow -menuTitle: Monitoring -weight: 500 ---- - -# Monitoring {{% param "PRODUCT_NAME" %}} - -This section details various ways to monitor and debug {{< param "PRODUCT_NAME" >}}. - -{{< section >}} diff --git a/docs/sources/flow/reference/cli/convert.md b/docs/sources/flow/reference/cli/convert.md index 88c9628fde31..a9a3810ec3ee 100644 --- a/docs/sources/flow/reference/cli/convert.md +++ b/docs/sources/flow/reference/cli/convert.md @@ -82,7 +82,7 @@ This includes Prometheus features such as and many supported *_sd_configs. Unsupported features in a source configuration result in [errors]. -Refer to [Migrate from Prometheus to {{< param "PRODUCT_NAME" >}}]({{< relref "../../getting-started/migrating-from-prometheus/" >}}) for a detailed migration guide. +Refer to [Migrate from Prometheus to {{< param "PRODUCT_NAME" >}}]({{< relref "../../tasks/migrate/from-prometheus/" >}}) for a detailed migration guide. ### Promtail @@ -96,7 +96,7 @@ are supported and can be converted to {{< param "PRODUCT_NAME" >}} configuration If you have unsupported features in a source configuration, you will receive [errors] when you convert to a flow configuration. The converter will also raise warnings for configuration options that may require your attention. -Refer to [Migrate from Promtail to {{< param "PRODUCT_NAME" >}}]({{< relref "../../getting-started/migrating-from-promtail/" >}}) for a detailed migration guide. +Refer to [Migrate from Promtail to {{< param "PRODUCT_NAME" >}}]({{< relref "../../tasks/migrate/from-promtail/" >}}) for a detailed migration guide. ### Static @@ -113,4 +113,4 @@ flags with a space between each flag, for example `--extra-args="-enable-feature If you have unsupported features in a Static mode source configuration, you will receive [errors][] when you convert to a Flow mode configuration. The converter will also raise warnings for configuration options that may require your attention. -Refer to [Migrate from Grafana Agent Static to {{< param "PRODUCT_NAME" >}}]({{< relref "../../getting-started/migrating-from-static/" >}}) for a detailed migration guide. \ No newline at end of file +Refer to [Migrate from Grafana Agent Static to {{< param "PRODUCT_NAME" >}}]({{< relref "../../tasks/migrate/from-static/" >}}) for a detailed migration guide. \ No newline at end of file diff --git a/docs/sources/flow/reference/cli/run.md b/docs/sources/flow/reference/cli/run.md index 4a7adc21b7b2..4da0df47a473 100644 --- a/docs/sources/flow/reference/cli/run.md +++ b/docs/sources/flow/reference/cli/run.md @@ -171,7 +171,7 @@ transitions to the terminating state when shutting down. The current state of a clustered {{< param "PRODUCT_ROOT_NAME" >}} is shown on the clustering page in the [UI][]. -[UI]: {{< relref "../../monitoring/debugging.md#clustering-page" >}} +[UI]: {{< relref "../../tasks/debug.md#clustering-page" >}} ## Configuration conversion (beta) diff --git a/docs/sources/flow/reference/compatibility/_index.md b/docs/sources/flow/reference/compatibility/_index.md index 9e225aa74b71..88121838f571 100644 --- a/docs/sources/flow/reference/compatibility/_index.md +++ b/docs/sources/flow/reference/compatibility/_index.md @@ -20,13 +20,13 @@ that can export or consume it. {{% admonition type="note" %}} -> The type of export may not be the only requirement for chaining components together. -> The value of an attribute may matter as well as its type. -> Please refer to each component's documentation for more details on what values are acceptable. -> -> For example: -> * A Prometheus component may always expect an `"__address__"` label inside a list of targets. -> * A `string` argument may only accept certain values like "traceID" or "spanID". +The type of export may not be the only requirement for chaining components together. +The value of an attribute may matter as well as its type. +Please refer to each component's documentation for more details on what values are acceptable. + +For example: +* A Prometheus component may always expect an `"__address__"` label inside a list of targets. +* A `string` argument may only accept certain values like "traceID" or "spanID". {{% /admonition %}} @@ -68,6 +68,7 @@ The following components, grouped by namespace, _export_ Targets. - [discovery.nerve]({{< relref "../components/discovery.nerve.md" >}}) - [discovery.nomad]({{< relref "../components/discovery.nomad.md" >}}) - [discovery.openstack]({{< relref "../components/discovery.openstack.md" >}}) +- [discovery.ovhcloud]({{< relref "../components/discovery.ovhcloud.md" >}}) - [discovery.puppetdb]({{< relref "../components/discovery.puppetdb.md" >}}) - [discovery.relabel]({{< relref "../components/discovery.relabel.md" >}}) - [discovery.scaleway]({{< relref "../components/discovery.scaleway.md" >}}) diff --git a/docs/sources/flow/reference/components/discovery.consul.md b/docs/sources/flow/reference/components/discovery.consul.md index 7737131a6aa8..c63f94b8017c 100644 --- a/docs/sources/flow/reference/components/discovery.consul.md +++ b/docs/sources/flow/reference/components/discovery.consul.md @@ -70,6 +70,7 @@ basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the end authorization | [authorization][] | Configure generic authorization to the endpoint. | no oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no The `>` symbol indicates deeper levels of nesting. For example, `oauth2 > tls_config` refers to a `tls_config` block defined inside diff --git a/docs/sources/flow/reference/components/discovery.docker.md b/docs/sources/flow/reference/components/discovery.docker.md index a9143373e12b..4d6ce94d557f 100644 --- a/docs/sources/flow/reference/components/discovery.docker.md +++ b/docs/sources/flow/reference/components/discovery.docker.md @@ -60,6 +60,7 @@ basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the end authorization | [authorization][] | Configure generic authorization to the endpoint. | no oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no The `>` symbol indicates deeper levels of nesting. For example, `oauth2 > tls_config` refers to a `tls_config` block defined inside diff --git a/docs/sources/flow/reference/components/discovery.dockerswarm.md b/docs/sources/flow/reference/components/discovery.dockerswarm.md index f6db10bd440c..58c065fb06eb 100644 --- a/docs/sources/flow/reference/components/discovery.dockerswarm.md +++ b/docs/sources/flow/reference/components/discovery.dockerswarm.md @@ -48,6 +48,7 @@ The following blocks are supported inside the definition of | authorization | [authorization][] | Configure generic authorization to the endpoint. | no | | oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no | | oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no | +| tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no | The `>` symbol indicates deeper levels of nesting. For example, `oauth2 > tls_config` refers to a `tls_config` block defined inside diff --git a/docs/sources/flow/reference/components/discovery.ec2.md b/docs/sources/flow/reference/components/discovery.ec2.md index e2964d2df0a3..7f01ae48c6e0 100644 --- a/docs/sources/flow/reference/components/discovery.ec2.md +++ b/docs/sources/flow/reference/components/discovery.ec2.md @@ -39,6 +39,15 @@ Name | Type | Description | Default | Required `proxy_url` | `string` | HTTP proxy to proxy requests through. | | no `follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no `enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no +`bearer_token` | `secret` | Bearer token to authenticate with. | | no +`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no + + At most one of the following can be provided: + - [`bearer_token` argument](#arguments). + - [`bearer_token_file` argument](#arguments). + - [`basic_auth` block][basic_auth]. + - [`authorization` block][authorization]. + - [`oauth2` block][oauth2]. ## Blocks @@ -47,9 +56,21 @@ The following blocks are supported inside the definition of Hierarchy | Block | Description | Required --------- | ----- | ----------- | -------- +basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the endpoint. | no +authorization | [authorization][] | Configure generic authorization to the endpoint. | no filter | [filter][] | Filters discoverable resources. | no +oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no +oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no [filter]: #filter-block +[authorization]: #authorization-block +[oauth2]: #oauth2-block +[tls_config]: #tls_config-block + +### authorization block + +{{< docs/shared lookup="flow/reference/components/authorization-block.md" source="agent" version="" >}} ### filter block @@ -65,6 +86,14 @@ Refer to the [Filter API AWS EC2 documentation][filter api] for the list of supp [filter api]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_Filter.html +### oauth2 block + +{{< docs/shared lookup="flow/reference/components/oauth2-block.md" source="agent" version="" >}} + +### tls_config block + +{{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} + ## Exported fields The following fields are exported and can be referenced by other components: diff --git a/docs/sources/flow/reference/components/discovery.eureka.md b/docs/sources/flow/reference/components/discovery.eureka.md index 952e90af1ce4..70ab3f8f666d 100644 --- a/docs/sources/flow/reference/components/discovery.eureka.md +++ b/docs/sources/flow/reference/components/discovery.eureka.md @@ -32,8 +32,20 @@ Name | Type | Description `server` | `string` | Eureka server URL. | | yes `refresh_interval` | `duration` | Interval at which to refresh the list of targets. | `30s` | no `enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no +`bearer_token` | `secret` | Bearer token to authenticate with. | | no +`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no +`proxy_url` | `string` | HTTP proxy to proxy requests through. | | no `follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no + At most one of the following can be provided: + - [`bearer_token` argument](#arguments). + - [`bearer_token_file` argument](#arguments). + - [`basic_auth` block][basic_auth]. + - [`authorization` block][authorization]. + - [`oauth2` block][oauth2]. + +[arguments]: #arguments + ## Blocks The following blocks are supported inside the definition of `discovery.eureka`: @@ -44,6 +56,7 @@ basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the end authorization | [authorization][] | Configure generic authorization to the endpoint. | no oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no The `>` symbol indicates deeper levels of nesting. For example, `oauth2 > tls_config` refers to a `tls_config` block defined inside diff --git a/docs/sources/flow/reference/components/discovery.hetzner.md b/docs/sources/flow/reference/components/discovery.hetzner.md index ce92bda3cb2d..c6922e685f66 100644 --- a/docs/sources/flow/reference/components/discovery.hetzner.md +++ b/docs/sources/flow/reference/components/discovery.hetzner.md @@ -62,6 +62,7 @@ basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the end authorization | [authorization][] | Configure generic authorization to the endpoint. | no oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no The `>` symbol indicates deeper levels of nesting. For example, `oauth2 > tls_config` refers to a `tls_config` block defined inside diff --git a/docs/sources/flow/reference/components/discovery.http.md b/docs/sources/flow/reference/components/discovery.http.md index 17be475edbc9..50ecf42dcc06 100644 --- a/docs/sources/flow/reference/components/discovery.http.md +++ b/docs/sources/flow/reference/components/discovery.http.md @@ -94,6 +94,20 @@ Name | Type | Description --------------- | ------------------- | ------------------------------------------------------------------------------------------ |---------| -------- `url` | string | URL to scrape | | yes `refresh_interval` | `duration` | How often to refresh targets. | `"60s"` | no +`bearer_token` | `secret` | Bearer token to authenticate with. | | no +`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no +`proxy_url` | `string` | HTTP proxy to proxy requests through. | | no +`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no +`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no + + At most one of the following can be provided: + - [`bearer_token` argument](#arguments). + - [`bearer_token_file` argument](#arguments). + - [`basic_auth` block][basic_auth]. + - [`authorization` block][authorization]. + - [`oauth2` block][oauth2]. + +[arguments]: #arguments ## Blocks @@ -106,6 +120,7 @@ basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the end authorization | [authorization][] | Configure generic authorization to the endpoint. | no oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no The `>` symbol indicates deeper levels of nesting. For example, `oauth2 > tls_config` refers to a `tls_config` block defined inside diff --git a/docs/sources/flow/reference/components/discovery.ionos.md b/docs/sources/flow/reference/components/discovery.ionos.md index e06658b4b5d5..1c619a1641ac 100644 --- a/docs/sources/flow/reference/components/discovery.ionos.md +++ b/docs/sources/flow/reference/components/discovery.ionos.md @@ -31,11 +31,22 @@ The following arguments are supported: | ------------------ | ---------- | ------------------------------------------------------------ | ------- | -------- | | `datacenter_id` | `string` | The unique ID of the data center. | | yes | | `refresh_interval` | `duration` | The time after which the servers are refreshed. | `60s` | no | -| `port` | `int` | The port to scrap metrics from. | 80 | no | +| `port` | `int` | The port to scrape metrics from. | 80 | no | +| `bearer_token` | `secret` | Bearer token to authenticate with. | | no | +| `bearer_token_file`| `string` | File containing a bearer token to authenticate with. | | no | | `proxy_url` | `string` | HTTP proxy to proxy requests through. | | no | | `enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no | | `follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no | + At most one of the following can be provided: + - [`bearer_token` argument](#arguments). + - [`bearer_token_file` argument](#arguments). + - [`basic_auth` block][basic_auth]. + - [`authorization` block][authorization]. + - [`oauth2` block][oauth2]. + +[arguments]: #arguments + ## Blocks The following blocks are supported inside the definition of @@ -47,6 +58,7 @@ The following blocks are supported inside the definition of | authorization | [authorization][] | Configure generic authorization to the endpoint. | no | | oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no | | oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no | +| tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no | The `>` symbol indicates deeper levels of nesting. For example, `oauth2 > tls_config` refers to a `tls_config` block defined inside diff --git a/docs/sources/flow/reference/components/discovery.kubelet.md b/docs/sources/flow/reference/components/discovery.kubelet.md index 0000a74def4e..7ef29244a01e 100644 --- a/docs/sources/flow/reference/components/discovery.kubelet.md +++ b/docs/sources/flow/reference/components/discovery.kubelet.md @@ -35,7 +35,7 @@ The following arguments are supported: Name | Type | Description | Default | Required ---- | ---- | ----------- | ------- | -------- -`url` | `string` | URL of the Kubelet server. | | no +`url` | `string` | URL of the Kubelet server. | "https://localhost:10250" | no `bearer_token` | `secret` | Bearer token to authenticate with. | | no `bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no `refresh_interval` | `duration` | How often the Kubelet should be polled for scrape targets | `5s` | no @@ -49,6 +49,10 @@ One of the following authentication methods must be provided if kubelet authenti The `namespaces` list limits the namespaces to discover resources in. If omitted, all namespaces are searched. +`discovery.kubelet` appends a `/pods` path to `url` to request the available pods. +You can have additional paths in the `url`. +For example, if `url` is `https://kubernetes.default.svc.cluster.local:443/api/v1/nodes/cluster-node-1/proxy`, then `discovery.kubelet` sends a request on `https://kubernetes.default.svc.cluster.local:443/api/v1/nodes/cluster-node-1/proxy/pods` + ## Blocks The following blocks are supported inside the definition of diff --git a/docs/sources/flow/reference/components/discovery.kubernetes.md b/docs/sources/flow/reference/components/discovery.kubernetes.md index 49ecbd09ea12..1d4b2f9210c5 100644 --- a/docs/sources/flow/reference/components/discovery.kubernetes.md +++ b/docs/sources/flow/reference/components/discovery.kubernetes.md @@ -259,6 +259,7 @@ basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the end authorization | [authorization][] | Configure generic authorization to the endpoint. | no oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no The `>` symbol indicates deeper levels of nesting. For example, `oauth2 > tls_config` refers to a `tls_config` block defined inside diff --git a/docs/sources/flow/reference/components/discovery.kuma.md b/docs/sources/flow/reference/components/discovery.kuma.md index bef9a8ccee12..c498753f58ab 100644 --- a/docs/sources/flow/reference/components/discovery.kuma.md +++ b/docs/sources/flow/reference/components/discovery.kuma.md @@ -54,6 +54,7 @@ basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the end authorization | [authorization][] | Configure generic authorization to the endpoint. | no oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no The `>` symbol indicates deeper levels of nesting. For example, `oauth2 > tls_config` refers to a `tls_config` block defined inside diff --git a/docs/sources/flow/reference/components/discovery.linode.md b/docs/sources/flow/reference/components/discovery.linode.md index f9f4b1e4e19a..77d01dbdf4e2 100644 --- a/docs/sources/flow/reference/components/discovery.linode.md +++ b/docs/sources/flow/reference/components/discovery.linode.md @@ -54,6 +54,7 @@ Hierarchy | Block | Description | Required authorization | [authorization][] | Configure generic authorization to the endpoint. | no oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no The `>` symbol indicates deeper levels of nesting. For example, `oauth2 > tls_config` refers to a `tls_config` block defined inside diff --git a/docs/sources/flow/reference/components/discovery.marathon.md b/docs/sources/flow/reference/components/discovery.marathon.md index 4327dc502fb1..b19ddb321c2c 100644 --- a/docs/sources/flow/reference/components/discovery.marathon.md +++ b/docs/sources/flow/reference/components/discovery.marathon.md @@ -56,6 +56,7 @@ The following blocks are supported inside the definition of | authorization | [authorization][] | Configure generic authorization to the endpoint. | no | | oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no | | oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no | +| tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no | The `>` symbol indicates deeper levels of nesting. For example, `oauth2 > tls_config` refers to a `tls_config` block defined inside diff --git a/docs/sources/flow/reference/components/discovery.nomad.md b/docs/sources/flow/reference/components/discovery.nomad.md index c8bcdae99699..aebd128bb320 100644 --- a/docs/sources/flow/reference/components/discovery.nomad.md +++ b/docs/sources/flow/reference/components/discovery.nomad.md @@ -58,6 +58,7 @@ basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the end authorization | [authorization][] | Configure generic authorization to the endpoint. | no oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no The `>` symbol indicates deeper levels of nesting. For example, `oauth2 > tls_config` refers to a `tls_config` block defined inside diff --git a/docs/sources/flow/reference/components/discovery.ovhcloud.md b/docs/sources/flow/reference/components/discovery.ovhcloud.md new file mode 100644 index 000000000000..453fcb3c1cfc --- /dev/null +++ b/docs/sources/flow/reference/components/discovery.ovhcloud.md @@ -0,0 +1,165 @@ +--- +aliases: +- /docs/grafana-cloud/agent/flow/reference/components/discovery.ovhcloud/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/discovery.ovhcloud/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/discovery.ovhcloud/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/discovery.ovhcloud/ +canonical: https://grafana.com/docs/agent/latest/flow/reference/components/discovery.ovhcloud/ +description: Learn about discovery.ovhcloud +title: discovery.ovhcloud +--- + +# discovery.ovhcloud + +`discovery.ovhcloud` discovers scrape targets from OVHcloud's [dedicated servers][] and [VPS][] using their [API][]. +{{< param "PRODUCT_ROOT_NAME" >}} will periodically check the REST endpoint and create a target for every discovered server. +The public IPv4 address will be used by default - if there's none, the IPv6 address will be used. +This may be changed via relabeling with `discovery.relabel`. +For OVHcloud's [public cloud][] instances you can use `discovery.openstack`. + +[API]: https://api.ovh.com/ +[public cloud]: https://www.ovhcloud.com/en/public-cloud/ +[VPS]: https://www.ovhcloud.com/en/vps/ +[Dedicated servers]: https://www.ovhcloud.com/en/bare-metal/ + +## Usage + +```river +discovery.ovhcloud "LABEL" { + application_key = APPLICATION_KEY + application_secret = APPLICATION_SECRET + consumer_key = CONSUMER_KEY + service = SERVICE +} +``` + +## Arguments + +The following arguments are supported: + +Name | Type | Description | Default | Required +------------------ | -------------- | -------------------------------------------------------------- | ------------- | -------- +application_key | `string` | [API][] application key. | | yes +application_secret | `secret` | [API][] application secret. | | yes +consumer_key | `secret` | [API][] consumer key. | | yes +endpoint | `string` | [API][] endpoint. | "ovh-eu" | no +refresh_interval | `duration` | Refresh interval to re-read the resources list. | "60s" | no +service | `string` | Service of the targets to retrieve. | | yes + +`endpoint` must be one of the [supported API endpoints][supported-apis]. + +`service` must be either `vps` or `dedicated_server`. + +[supported-apis]: https://github.com/ovh/go-ovh#supported-apis + +## Exported fields + +The following fields are exported and can be referenced by other components: + +Name | Type | Description +--------- | ------------------- | ----------- +`targets` | `list(map(string))` | The set of targets discovered from the OVHcloud API. + +Multiple meta labels are available on `targets` and can be used by the `discovery.relabel` component. + +[VPS][] meta labels: +* `__meta_ovhcloud_vps_cluster`: the cluster of the server. +* `__meta_ovhcloud_vps_datacenter`: the datacenter of the server. +* `__meta_ovhcloud_vps_disk`: the disk of the server. +* `__meta_ovhcloud_vps_display_name`: the display name of the server. +* `__meta_ovhcloud_vps_ipv4`: the IPv4 of the server. +* `__meta_ovhcloud_vps_ipv6`: the IPv6 of the server. +* `__meta_ovhcloud_vps_keymap`: the KVM keyboard layout of the server. +* `__meta_ovhcloud_vps_maximum_additional_ip`: the maximum additional IPs of the server. +* `__meta_ovhcloud_vps_memory_limit`: the memory limit of the server. +* `__meta_ovhcloud_vps_memory`: the memory of the server. +* `__meta_ovhcloud_vps_monitoring_ip_blocks`: the monitoring IP blocks of the server. +* `__meta_ovhcloud_vps_name`: the name of the server. +* `__meta_ovhcloud_vps_netboot_mode`: the netboot mode of the server. +* `__meta_ovhcloud_vps_offer_type`: the offer type of the server. +* `__meta_ovhcloud_vps_offer`: the offer of the server. +* `__meta_ovhcloud_vps_state`: the state of the server. +* `__meta_ovhcloud_vps_vcore`: the number of virtual cores of the server. +* `__meta_ovhcloud_vps_version`: the version of the server. +* `__meta_ovhcloud_vps_zone`: the zone of the server. + +[Dedicated servers][] meta labels: +* `__meta_ovhcloud_dedicated_server_commercial_range`: the commercial range of the server. +* `__meta_ovhcloud_dedicated_server_datacenter`: the datacenter of the server. +* `__meta_ovhcloud_dedicated_server_ipv4`: the IPv4 of the server. +* `__meta_ovhcloud_dedicated_server_ipv6`: the IPv6 of the server. +* `__meta_ovhcloud_dedicated_server_link_speed`: the link speed of the server. +* `__meta_ovhcloud_dedicated_server_name`: the name of the server. +* `__meta_ovhcloud_dedicated_server_os`: the operating system of the server. +* `__meta_ovhcloud_dedicated_server_rack`: the rack of the server. +* `__meta_ovhcloud_dedicated_server_reverse`: the reverse DNS name of the server. +* `__meta_ovhcloud_dedicated_server_server_id`: the ID of the server. +* `__meta_ovhcloud_dedicated_server_state`: the state of the server. +* `__meta_ovhcloud_dedicated_server_support_level`: the support level of the server. + +## Component health + +`discovery.ovhcloud` is only reported as unhealthy when given an invalid +configuration. In those cases, exported fields retain their last healthy +values. + +## Debug information + +`discovery.ovhcloud` does not expose any component-specific debug information. + +## Debug metrics + +`discovery.ovhcloud` does not expose any component-specific debug metrics. + +## Example + +```river +discovery.ovhcloud "example" { + application_key = APPLICATION_KEY + application_secret = APPLICATION_SECRET + consumer_key = CONSUMER_KEY + service = SERVICE +} + +prometheus.scrape "demo" { + targets = discovery.ovhcloud.example.targets + forward_to = [prometheus.remote_write.demo.receiver] +} + +prometheus.remote_write "demo" { + endpoint { + url = PROMETHEUS_REMOTE_WRITE_URL + basic_auth { + username = USERNAME + password = PASSWORD + } + } +} +``` + +Replace the following: + - `APPLICATION_KEY`: The OVHcloud [API][] application key. + - `APPLICATION_SECRET`: The OVHcloud [API][] application secret. + - `CONSUMER_KEY`: The OVHcloud [API][] consumer key. + - `SERVICE`: The OVHcloud service of the targets to retrieve. + - `PROMETHEUS_REMOTE_WRITE_URL`: The URL of the Prometheus remote_write-compatible server to send metrics to. + - `USERNAME`: The username to use for authentication to the remote_write API. + - `PASSWORD`: The password to use for authentication to the remote_write API. + + + + +## Compatible components + +`discovery.ovhcloud` has exports that can be consumed by the following components: + +- Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) + +{{% admonition type="note" %}} + +Connecting some components may not be sensible or components may require further configuration to make the +connection work correctly. Refer to the linked documentation for more details. + +{{% /admonition %}} + + diff --git a/docs/sources/flow/reference/components/discovery.puppetdb.md b/docs/sources/flow/reference/components/discovery.puppetdb.md index 34e6f14db7c3..a83d8454723c 100644 --- a/docs/sources/flow/reference/components/discovery.puppetdb.md +++ b/docs/sources/flow/reference/components/discovery.puppetdb.md @@ -64,6 +64,7 @@ basic_auth | [basic_auth][] | Configure basic_auth for authenticating to the end authorization | [authorization][] | Configure generic authorization to the endpoint. | no oauth2 | [oauth2][] | Configure OAuth2 for authenticating to the endpoint. | no oauth2 > tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no +tls_config | [tls_config][] | Configure TLS settings for connecting to the endpoint. | no The `>` symbol indicates deeper levels of nesting. For example, `oauth2 > tls_config` refers to a `tls_config` block defined inside diff --git a/docs/sources/flow/reference/components/loki.process.md b/docs/sources/flow/reference/components/loki.process.md index c2793abbfe2e..d1c8ef723bd9 100644 --- a/docs/sources/flow/reference/components/loki.process.md +++ b/docs/sources/flow/reference/components/loki.process.md @@ -382,6 +382,20 @@ following key-value pair to the set of extracted data. username: agent ``` +{{% admonition type="note" %}} +Due to a limitation of the upstream jmespath library, you must wrap any string +that contains a hyphen `-` in quotes so that it's not considered a numerical +expression. + +If you don't use quotes to wrap a string that contains a hyphen, you will get +errors like: `Unexpected token at the end of the expression: tNumber` + +You can use one of two options to circumvent this issue: + +1. An escaped double quote. For example: `http_user_agent = "\"request_User-Agent\""` +1. A backtick quote. For example: ``http_user_agent = `"request_User-Agent"` `` +{{% /admonition %}} + ### stage.label_drop block The `stage.label_drop` inner block configures a processing stage that drops labels diff --git a/docs/sources/flow/reference/components/loki.source.awsfirehose.md b/docs/sources/flow/reference/components/loki.source.awsfirehose.md index 86bf634e395a..9b1d2c6d75c5 100644 --- a/docs/sources/flow/reference/components/loki.source.awsfirehose.md +++ b/docs/sources/flow/reference/components/loki.source.awsfirehose.md @@ -75,11 +75,12 @@ The component will start an HTTP server on the configured port and address with `loki.source.awsfirehose` supports the following arguments: -| Name | Type | Description | Default | Required | - |--------------------------|----------------------|------------------------------------------------------------|---------|----------| -| `forward_to` | `list(LogsReceiver)` | List of receivers to send log entries to. | | yes | +| Name | Type | Description | Default | Required | +| ------------------------ | -------------------- | -------------------------------------------------------------- | ------- | -------- | +| `forward_to` | `list(LogsReceiver)` | List of receivers to send log entries to. | | yes | | `use_incoming_timestamp` | `bool` | Whether or not to use the timestamp received from the request. | `false` | no | -| `relabel_rules` | `RelabelRules` | Relabeling rules to apply on log entries. | `{}` | no | +| `relabel_rules` | `RelabelRules` | Relabeling rules to apply on log entries. | `{}` | no | +| `access_key` | `secret` | If set, require AWS Firehose to provide a matching key. | `""` | no | The `relabel_rules` field can make use of the `rules` export value from a [`loki.relabel`][loki.relabel] component to apply one or more relabeling rules to log entries before they're forwarded diff --git a/docs/sources/flow/reference/components/loki.source.docker.md b/docs/sources/flow/reference/components/loki.source.docker.md index cbf77163d646..02bf03175b20 100644 --- a/docs/sources/flow/reference/components/loki.source.docker.md +++ b/docs/sources/flow/reference/components/loki.source.docker.md @@ -33,7 +33,7 @@ loki.source.docker "LABEL" { The component starts a new reader for each of the given `targets` and fans out log entries to the list of receivers passed in `forward_to`. -`loki.source.file` supports the following arguments: +`loki.source.docker` supports the following arguments: Name | Type | Description | Default | Required --------------- | -------------------- | -------------------- | ------- | -------- @@ -131,6 +131,14 @@ fully qualified name) to store its _positions file_. The positions file stores the read offsets so that if there is a component or Agent restart, `loki.source.docker` can pick up tailing from the same spot. +If the target's argument contains multiple entries with the same container +ID (for example as a result of `discovery.docker` picking up multiple exposed +ports or networks), `loki.source.docker` will deduplicate them, and only keep +the first of each container ID instances, based on the +`__meta_docker_container_id` label. As such, the Docker daemon is queried +for each container ID only once, and only one target will be available in the +component's debug info. + ## Example This example collects log entries from the files specified in the `targets` diff --git a/docs/sources/flow/reference/components/loki.source.file.md b/docs/sources/flow/reference/components/loki.source.file.md index 4583018d90a6..edb407593c1f 100644 --- a/docs/sources/flow/reference/components/loki.source.file.md +++ b/docs/sources/flow/reference/components/loki.source.file.md @@ -56,13 +56,13 @@ The following blocks are supported inside the definition of `loki.source.file`: | Hierarchy | Name | Description | Required | | -------------- | ------------------ | ----------------------------------------------------------------- | -------- | -| decompresssion | [decompresssion][] | Configure reading logs from compressed files. | no | +| decompression | [decompression][] | Configure reading logs from compressed files. | no | | file_watch | [file_watch][] | Configure how often files should be polled from disk for changes. | no | -[decompresssion]: #decompresssion-block +[decompression]: #decompression-block [file_watch]: #file_watch-block -### decompresssion block +### decompression block The `decompression` block contains configuration for reading logs from compressed files. The following arguments are supported: diff --git a/docs/sources/flow/reference/components/otelcol.connector.spanmetrics.md b/docs/sources/flow/reference/components/otelcol.connector.spanmetrics.md index c1e887b78c9f..23c2eaa0a24d 100644 --- a/docs/sources/flow/reference/components/otelcol.connector.spanmetrics.md +++ b/docs/sources/flow/reference/components/otelcol.connector.spanmetrics.md @@ -22,10 +22,25 @@ aggregates Request, Error and Duration (R.E.D) OpenTelemetry metrics from the sp including Errors. Multiple metrics can be aggregated if, for instance, a user wishes to view call counts just on `service.name` and `span.name`. -- **Error** counts are computed from the Request counts which have an `Error` status code metric dimension. + Requests are tracked using a `calls` metric with a `status.code` datapoint attribute set to `Ok`: + ``` + calls { service.name="shipping", span.name="get_shipping/{shippingId}", span.kind="SERVER", status.code="Ok" } + ``` + +- **Error** counts are computed from the number of spans with an `Error` status code. + + Errors are tracked using a `calls` metric with a `status.code` datapoint attribute set to `Error`: + ``` + calls { service.name="shipping", span.name="get_shipping/{shippingId}, span.kind="SERVER", status.code="Error" } + ``` - **Duration** is computed from the difference between the span start and end times and inserted - into the relevant duration histogram time bucket for each unique set dimensions. + into the relevant duration histogram time bucket for each unique set dimensions. + + Span durations are tracked using a `duration` histogram metric: + ``` + duration { service.name="shipping", span.name="get_shipping/{shippingId}", span.kind="SERVER", status.code="Ok" } + ``` > **NOTE**: `otelcol.connector.spanmetrics` is a wrapper over the upstream > OpenTelemetry Collector `spanmetrics` connector. Bug reports or feature requests @@ -52,13 +67,13 @@ otelcol.connector.spanmetrics "LABEL" { `otelcol.connector.spanmetrics` supports the following arguments: -| Name | Type | Description | Default | Required | -| ------------------------- | ---------- | ------------------------------------------------------- | -------------- | -------- | -| `dimensions_cache_size` | `number` | How many dimensions to cache. | `1000` | no | -| `aggregation_temporality` | `string` | Configures whether to reset the metrics after flushing. | `"CUMULATIVE"` | no | -| `metrics_flush_interval` | `duration` | How often to flush generated metrics. | `"15s"` | no | -| `namespace` | `string` | Metric namespace. | `""` | no | -| `exclude_dimensions` | `list(string)` | List of dimensions to be excluded from the default set of dimensions. | `false` | no | +| Name | Type | Description | Default | Required | +| ------------------------- | -------------- | --------------------------------------------------------------------- | -------------- | -------- | +| `dimensions_cache_size` | `number` | How many dimensions to cache. | `1000` | no | +| `aggregation_temporality` | `string` | Configures whether to reset the metrics after flushing. | `"CUMULATIVE"` | no | +| `metrics_flush_interval` | `duration` | How often to flush generated metrics. | `"15s"` | no | +| `namespace` | `string` | Metric namespace. | `""` | no | +| `exclude_dimensions` | `list(string)` | List of dimensions to be excluded from the default set of dimensions. | `false` | no | Adjusting `dimensions_cache_size` can improve the Agent process' memory usage. @@ -130,10 +145,10 @@ The `histogram` block configures the histogram derived from spans' durations. The following attributes are supported: -| Name | Type | Description | Default | Required | -| ------ | -------- | ------------------------------- | ------- | -------- | -| `unit` | `string` | Configures the histogram units. | `"ms"` | no | -| `disable`| `bool` | Disable all histogram metrics. | `false` | no | +| Name | Type | Description | Default | Required | +| --------- | -------- | ------------------------------- | ------- | -------- | +| `unit` | `string` | Configures the histogram units. | `"ms"` | no | +| `disable` | `bool` | Disable all histogram metrics. | `false` | no | The supported values for `unit` are: @@ -166,9 +181,9 @@ The `exemplars` block configures how to attach exemplars to histograms. The following attributes are supported: -| Name | Type | Description | Default | Required | -| ---------- | -------- | ---------------------------------------------------------------- | ------- | -------- | -| `enabled` | `bool` | Configures whether to add exemplars to histograms. | `false` | no | +| Name | Type | Description | Default | Required | +| --------- | ------ | -------------------------------------------------- | ------- | -------- | +| `enabled` | `bool` | Configures whether to add exemplars to histograms. | `false` | no | ### output block @@ -184,6 +199,348 @@ The following fields are exported and can be referenced by other components: `input` accepts `otelcol.Consumer` traces telemetry data. It does not accept metrics and logs. +## Handling of resource attributes + +[Handling of resource attributes]: #handling-of-resource-attributes + +`otelcol.connector.spanmetrics` is an OTLP-native component. As such, it aims to preserve the resource attributes of spans. + +1. For example, let's assume that there are two incoming resources spans with the same `service.name` and `k8s.pod.name` resource attributes. + {{< collapse title="Example JSON of two incoming spans." >}} + + ```json + { + "resourceSpans": [ + { + "resource": { + "attributes": [ + { + "key": "service.name", + "value": { "stringValue": "TestSvcName" } + }, + { + "key": "k8s.pod.name", + "value": { "stringValue": "first" } + } + ] + }, + "scopeSpans": [ + { + "spans": [ + { + "trace_id": "7bba9f33312b3dbb8b2c2c62bb7abe2d", + "span_id": "086e83747d0e381e", + "name": "TestSpan", + "attributes": [ + { + "key": "attribute1", + "value": { "intValue": "78" } + } + ] + } + ] + } + ] + }, + { + "resource": { + "attributes": [ + { + "key": "service.name", + "value": { "stringValue": "TestSvcName" } + }, + { + "key": "k8s.pod.name", + "value": { "stringValue": "first" } + } + ] + }, + "scopeSpans": [ + { + "spans": [ + { + "trace_id": "7bba9f33312b3dbb8b2c2c62bb7abe2d", + "span_id": "086e83747d0e381b", + "name": "TestSpan", + "attributes": [ + { + "key": "attribute1", + "value": { "intValue": "78" } + } + ] + } + ] + } + ] + } + ] + } + ``` + + {{< /collapse >}} + +1. `otelcol.connector.spanmetrics` will preserve the incoming `service.name` and `k8s.pod.name` resource attributes by attaching them to the output metrics resource. + Only one metric resource will be created, because both span resources have identical resource attributes. + {{< collapse title="Example JSON of one outgoing metric resource." >}} + + ```json + { + "resourceMetrics": [ + { + "resource": { + "attributes": [ + { + "key": "service.name", + "value": { "stringValue": "TestSvcName" } + }, + { + "key": "k8s.pod.name", + "value": { "stringValue": "first" } + } + ] + }, + "scopeMetrics": [ + { + "scope": { "name": "spanmetricsconnector" }, + "metrics": [ + { + "name": "calls", + "sum": { + "dataPoints": [ + { + "attributes": [ + { + "key": "service.name", + "value": { "stringValue": "TestSvcName" } + }, + { + "key": "span.name", + "value": { "stringValue": "TestSpan" } + }, + { + "key": "span.kind", + "value": { "stringValue": "SPAN_KIND_UNSPECIFIED" } + }, + { + "key": "status.code", + "value": { "stringValue": "STATUS_CODE_UNSET" } + } + ], + "startTimeUnixNano": "1702582936761872000", + "timeUnixNano": "1702582936761872012", + "asInt": "2" + } + ], + "aggregationTemporality": 2, + "isMonotonic": true + } + } + ] + } + ] + } + ] + } + ``` + + {{< /collapse >}} + +1. Now assume that `otelcol.connector.spanmetrics` receives two incoming resource spans, each with a different value for the `k8s.pod.name` recourse attribute. + {{< collapse title="Example JSON of two incoming spans." >}} + + ```json + { + "resourceSpans": [ + { + "resource": { + "attributes": [ + { + "key": "service.name", + "value": { "stringValue": "TestSvcName" } + }, + { + "key": "k8s.pod.name", + "value": { "stringValue": "first" } + } + ] + }, + "scopeSpans": [ + { + "spans": [ + { + "trace_id": "7bba9f33312b3dbb8b2c2c62bb7abe2d", + "span_id": "086e83747d0e381e", + "name": "TestSpan", + "attributes": [ + { + "key": "attribute1", + "value": { "intValue": "78" } + } + ] + } + ] + } + ] + }, + { + "resource": { + "attributes": [ + { + "key": "service.name", + "value": { "stringValue": "TestSvcName" } + }, + { + "key": "k8s.pod.name", + "value": { "stringValue": "second" } + } + ] + }, + "scopeSpans": [ + { + "spans": [ + { + "trace_id": "7bba9f33312b3dbb8b2c2c62bb7abe2d", + "span_id": "086e83747d0e381b", + "name": "TestSpan", + "attributes": [ + { + "key": "attribute1", + "value": { "intValue": "78" } + } + ] + } + ] + } + ] + } + ] + } + ``` + + {{< /collapse >}} + +1. To preserve the values of all resource attributes, `otelcol.connector.spanmetrics` will produce two resource metrics. + Each resource metric will have a different value for the `k8s.pod.name` recourse attribute. + This way none of the resource attributes will be lost during the generation of metrics. + {{< collapse title="Example JSON of two outgoing metric resources." >}} + ```json + { + "resourceMetrics": [ + { + "resource": { + "attributes": [ + { + "key": "service.name", + "value": { "stringValue": "TestSvcName" } + }, + { + "key": "k8s.pod.name", + "value": { "stringValue": "first" } + } + ] + }, + "scopeMetrics": [ + { + "scope": { + "name": "spanmetricsconnector" + }, + "metrics": [ + { + "name": "calls", + "sum": { + "dataPoints": [ + { + "attributes": [ + { + "key": "service.name", + "value": { "stringValue": "TestSvcName" } + }, + { + "key": "span.name", + "value": { "stringValue": "TestSpan" } + }, + { + "key": "span.kind", + "value": { "stringValue": "SPAN_KIND_UNSPECIFIED" } + }, + { + "key": "status.code", + "value": { "stringValue": "STATUS_CODE_UNSET" } + } + ], + "startTimeUnixNano": "1702582936761872000", + "timeUnixNano": "1702582936761872012", + "asInt": "1" + } + ], + "aggregationTemporality": 2, + "isMonotonic": true + } + } + ] + } + ] + }, + { + "resource": { + "attributes": [ + { + "key": "service.name", + "value": { "stringValue": "TestSvcName" } + }, + { + "key": "k8s.pod.name", + "value": { "stringValue": "second" } + } + ] + }, + "scopeMetrics": [ + { + "scope": { + "name": "spanmetricsconnector" + }, + "metrics": [ + { + "name": "calls", + "sum": { + "dataPoints": [ + { + "attributes": [ + { + "key": "service.name", + "value": { "stringValue": "TestSvcName" } + }, + { + "key": "span.name", + "value": { "stringValue": "TestSpan" } + }, + { + "key": "span.kind", + "value": { "stringValue": "SPAN_KIND_UNSPECIFIED" } + }, + { + "key": "status.code", + "value": { "stringValue": "STATUS_CODE_UNSET" } + } + ], + "startTimeUnixNano": "1702582936761872000", + "timeUnixNano": "1702582936761872012", + "asInt": "1" + } + ], + "aggregationTemporality": 2, + "isMonotonic": true + } + } + ] + } + ] + } + ] + } + ``` + {{< /collapse >}} + ## Component health `otelcol.connector.spanmetrics` is only reported as unhealthy if given an invalid @@ -259,42 +616,152 @@ otelcol.exporter.otlp "production" { ### Sending metrics via a Prometheus remote write -In order for a `target_info` metric to be generated, the incoming spans resource scope -attributes must contain `service.name` and `service.instance.id` attributes. +The generated metrics can be sent to a Prometheus-compatible database such as Grafana Mimir. +However, extra steps are required in order to make sure all metric samples are received. +This is because `otelcol.connector.spanmetrics` aims to [preserve resource attributes][Handling of resource attributes] in the metrics which it outputs. -The `target_info` metric will be generated for each resource scope, while OpenTelemetry -metric names and attributes will be normalized to be compliant with Prometheus naming rules. +Unfortunately, the [Prometheus data model][prom-data-model] has no notion of resource attributes. +This means that if `otelcol.connector.spanmetrics` outputs metrics with identical metric attributes, +but different resource attributes, `otelcol.exporter.prometheus` will convert the metrics into the same metric series. +This problem can be solved by doing **either** of the following: -```river -otelcol.receiver.otlp "default" { - http {} - grpc {} +- **Recommended approach:** Prior to `otelcol.connector.spanmetrics`, remove all resource attributes from the incoming spans which are not needed by `otelcol.connector.spanmetrics`. + {{< collapse title="Example River configuration to remove unnecessary resource attributes." >}} + ```river + otelcol.receiver.otlp "default" { + http {} + grpc {} - output { - traces = [otelcol.connector.spanmetrics.default.input] + output { + traces = [otelcol.processor.transform.default.input] + } } -} -otelcol.connector.spanmetrics "default" { - histogram { - exponential {} + // Remove all resource attributes except the ones which + // the otelcol.connector.spanmetrics needs. + // If this is not done, otelcol.exporter.prometheus may fail to + // write some samples due to an "err-mimir-sample-duplicate-timestamp" error. + // This is because the spanmetricsconnector will create a new + // metrics resource scope for each traces resource scope. + otelcol.processor.transform "default" { + error_mode = "ignore" + + trace_statements { + context = "resource" + statements = [ + // We keep only the "service.name" and "special.attr" resource attributes, + // because they are the only ones which otelcol.connector.spanmetrics needs. + // + // There is no need to list "span.name", "span.kind", and "status.code" + // here because they are properties of the span (and not resource attributes): + // https://github.com/open-telemetry/opentelemetry-proto/blob/v1.0.0/opentelemetry/proto/trace/v1/trace.proto + `keep_keys(attributes, ["service.name", "special.attr"])`, + ] + } + + output { + traces = [otelcol.connector.spanmetrics.default.input] + } } - output { - metrics = [otelcol.exporter.prometheus.default.input] + otelcol.connector.spanmetrics "default" { + histogram { + explicit {} + } + + dimension { + name = "special.attr" + } + output { + metrics = [otelcol.exporter.prometheus.default.input] + } } -} -otelcol.exporter.prometheus "default" { - forward_to = [prometheus.remote_write.mimir.receiver] -} + otelcol.exporter.prometheus "default" { + forward_to = [prometheus.remote_write.mimir.receiver] + } -prometheus.remote_write "mimir" { - endpoint { - url = "http://mimir:9009/api/v1/push" + prometheus.remote_write "mimir" { + endpoint { + url = "http://mimir:9009/api/v1/push" + } } -} -``` + ``` + {{< /collapse >}} + +- Or, after `otelcol.connector.spanmetrics`, copy each of the resource attributes as a metric datapoint attribute. +This has the advantage that the resource attributes will be visible as metric labels. +However, the {{< term "cardinality" >}}cardinality{{< /term >}} of the metrics may be much higher, which could increase the cost of storing and querying them. +The example below uses the [merge_maps][] OTTL function. + + {{< collapse title="Example River configuration to add all resource attributes as metric datapoint attributes." >}} + ```river + otelcol.receiver.otlp "default" { + http {} + grpc {} + + output { + traces = [otelcol.connector.spanmetrics.default.input] + } + } + + otelcol.connector.spanmetrics "default" { + histogram { + explicit {} + } + + dimension { + name = "special.attr" + } + output { + metrics = [otelcol.processor.transform.default.input] + } + } + + // Insert resource attributes as metric data point attributes. + otelcol.processor.transform "default" { + error_mode = "ignore" + + metric_statements { + context = "datapoint" + statements = [ + // "insert" means that a metric datapoint attribute will be inserted + // only if an attribute with the same key does not already exist. + `merge_maps(attributes, resource.attributes, "insert")`, + ] + } + + output { + metrics = [otelcol.exporter.prometheus.default.input] + } + } + + otelcol.exporter.prometheus "default" { + forward_to = [prometheus.remote_write.mimir.receiver] + } + + prometheus.remote_write "mimir" { + endpoint { + url = "http://mimir:9009/api/v1/push" + } + } + ``` + {{< /collapse >}} + +If the resource attributes are not treated in either of the ways described above, an error such as this one could be logged by `prometheus.remote_write`: +`the sample has been rejected because another sample with the same timestamp, but a different value, has already been ingested (err-mimir-sample-duplicate-timestamp)`. + +{{% admonition type="note" %}} +In order for a Prometheus `target_info` metric to be generated, the incoming spans resource scope +attributes must contain `service.name` and `service.instance.id` attributes. + +The `target_info` metric will be generated for each resource scope, while OpenTelemetry +metric names and attributes will be normalized to be compliant with Prometheus naming rules. +{{% /admonition %}} + +[merge_maps]: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/{{< param "OTEL_VERSION" >}}/pkg/ottl/ottlfuncs/README.md#merge_maps +[prom-data-model]: https://prometheus.io/docs/concepts/data_model/ + ## Compatible components diff --git a/docs/sources/flow/reference/components/otelcol.exporter.loadbalancing.md b/docs/sources/flow/reference/components/otelcol.exporter.loadbalancing.md index 60480de6677e..4552adce44ce 100644 --- a/docs/sources/flow/reference/components/otelcol.exporter.loadbalancing.md +++ b/docs/sources/flow/reference/components/otelcol.exporter.loadbalancing.md @@ -15,6 +15,8 @@ title: otelcol.exporter.loadbalancing {{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} + + `otelcol.exporter.loadbalancing` accepts logs and traces from other `otelcol` components and writes them over the network using the OpenTelemetry Protocol (OTLP) protocol. @@ -141,8 +143,9 @@ Name | Type | Description | Default | Required ### kubernetes block -You can use the `kubernetes` block to load balance across the pods of a Kubernetes service. The Agent will be notified -by the Kubernetes API whenever a new pod is added or removed from the service. +You can use the `kubernetes` block to load balance across the pods of a Kubernetes service. +The Kubernetes API notifies {{< param "PRODUCT_NAME" >}} whenever a new pod is added or removed from the service. +The `kubernetes` resolver has a much faster response time than the `dns` resolver because it doesn't require polling. The following arguments are supported: @@ -264,6 +267,86 @@ Name | Type | Description * logs * traces +## Choose a load balancing strategy + + + + +Different {{< param "PRODUCT_NAME" >}} components require different load-balancing strategies. +The use of `otelcol.exporter.loadbalancing` is only necessary for [stateful Flow components][stateful-and-stateless-components]. + +[stateful-and-stateless-components]: {{< relref "../../get-started/deploy-agent.md#stateful-and-stateless-components" >}} + +### otelcol.processor.tail_sampling + +All spans for a given trace ID must go to the same tail sampling {{< param "PRODUCT_ROOT_NAME" >}} instance. +* This can be done by configuring `otelcol.exporter.loadbalancing` with `routing_key = "traceID"`. +* If you do not configure `routing_key = "traceID"`, the sampling decision may be incorrect. + The tail sampler must have a full view of the trace when making a sampling decision. + For example, a `rate_limiting` tail sampling strategy may incorrectly pass through + more spans than expected if the spans for the same trace are spread out to more than + one {{< param "PRODUCT_NAME" >}} instance. + + + +### otelcol.connector.spanmetrics +All spans for a given `service.name` must go to the same spanmetrics {{< param "PRODUCT_ROOT_NAME" >}}. +* This can be done by configuring `otelcol.exporter.loadbalancing` with `routing_key = "service"`. +* If you do not configure `routing_key = "service"`, metrics generated from spans might be incorrect. +For example, if similar spans for the same `service.name` end up on different {{< param "PRODUCT_ROOT_NAME" >}} instances, the two {{< param "PRODUCT_ROOT_NAME" >}}s will have identical metric series for calculating span latency, errors, and number of requests. +When both {{< param "PRODUCT_ROOT_NAME" >}} instances attempt to write the metrics to a database such as Mimir, the series may clash with each other. +At best, this will lead to an error in {{< param "PRODUCT_ROOT_NAME" >}} and a rejected write to the metrics database. +At worst, it could lead to inaccurate data due to overlapping samples for the metric series. + +However, there are ways to scale `otelcol.connector.spanmetrics` without the need for a load balancer: +1. Each {{< param "PRODUCT_ROOT_NAME" >}} could add an attribute such as `collector.id` in order to make its series unique. + Then, for example, you could use a `sum by` PromQL query to aggregate the metrics from different {{< param "PRODUCT_ROOT_NAME" >}}s. + Unfortunately, an extra `collector.id` attribute has a downside that the metrics stored in the database will have higher {{< term "cardinality" >}}cardinality{{< /term >}}. +2. Spanmetrics could be generated in the backend database instead of in {{< param "PRODUCT_ROOT_NAME" >}}. + For example, span metrics can be [generated][tempo-spanmetrics] in Grafana Cloud by the Tempo traces database. + +[tempo-spanmetrics]: https://grafana.com/docs/tempo/latest/metrics-generator/span_metrics/ + +### otelcol.connector.servicegraph +It is challenging to scale `otelcol.connector.servicegraph` over multiple {{< param "PRODUCT_ROOT_NAME" >}} instances. +For `otelcol.connector.servicegraph` to work correctly, each "client" span must be paired with a "server" span to calculate metrics such as span duration. +If a "client" span goes to one {{< param "PRODUCT_ROOT_NAME" >}}, but a "server" span goes to another {{< param "PRODUCT_ROOT_NAME" >}}, then no single {{< param "PRODUCT_ROOT_NAME" >}} will be able to pair the spans and a metric won't be generated. + +`otelcol.exporter.loadbalancing` can solve this problem partially if it is configured with `routing_key = "traceID"`. +Each {{< param "PRODUCT_ROOT_NAME" >}} will then be able to calculate a service graph for each "client"/"server" pair in a trace. +It is possible to have a span with similar "server"/"client" values in a different trace, processed by another {{< param "PRODUCT_ROOT_NAME" >}}. +If two different {{< param "PRODUCT_ROOT_NAME" >}} instances process similar "server"/"client" spans, they will generate the same service graph metric series. +If the series from two {{< param "PRODUCT_ROOT_NAME" >}} are the same, this will lead to issues when writing them to the backend database. +You could differentiate the series by adding an attribute such as `"collector.id"`. +The series from different {{< param "PRODUCT_ROOT_NAME" >}}s can be aggregated using PromQL queries on the backed metrics database. +If the metrics are stored in Grafana Mimir, cardinality issues due to `"collector.id"` labels can be solved using [Adaptive Metrics][adaptive-metrics]. + +A simpler, more scalable alternative to generating service graph metrics in {{< param "PRODUCT_ROOT_NAME" >}} is to generate them entirely in the backend database. +For example, service graphs can be [generated][tempo-servicegraphs] in Grafana Cloud by the Tempo traces database. + +[tempo-servicegraphs]: https://grafana.com/docs/tempo/latest/metrics-generator/service_graphs/ +[adaptive-metrics]: https://grafana.com/docs/grafana-cloud/cost-management-and-billing/reduce-costs/metrics-costs/control-metrics-usage-via-adaptive-metrics/ + +### Mixing stateful components + +Different {{< param "PRODUCT_NAME" >}} components may require a different `routing_key` for `otelcol.exporter.loadbalancing`. +For example, `otelcol.processor.tail_sampling` requires `routing_key = "traceID"` whereas `otelcol.connector.spanmetrics` requires `routing_key = "service"`. +To load balance both types of components, two different sets of load balancers have to be set up: + +* One set of `otelcol.exporter.loadbalancing` with `routing_key = "traceID"`, sending spans to {{< param "PRODUCT_ROOT_NAME" >}}s doing tail sampling and no span metrics. +* Another set of `otelcol.exporter.loadbalancing` with `routing_key = "service"`, sending spans to {{< param "PRODUCT_ROOT_NAME" >}}s doing span metrics and no service graphs. + +Unfortunately, this can also lead to side effects. +For example, if `otelcol.connector.spanmetrics` is configured to generate exemplars, the tail sampling {{< param "PRODUCT_ROOT_NAME" >}}s might drop the trace that the exemplar points to. +There is no coordination between the tail sampling {{< param "PRODUCT_ROOT_NAME" >}}s and the span metrics {{< param "PRODUCT_ROOT_NAME" >}}s to make sure trace IDs for exemplars are kept. + + + ## Component health `otelcol.exporter.loadbalancing` is only reported as unhealthy if given an invalid @@ -274,7 +357,9 @@ configuration. `otelcol.exporter.loadbalancing` does not expose any component-specific debug information. -## Example +## Examples + +### Static resolver This example accepts OTLP logs and traces over gRPC. It then sends them in a load-balanced way to "localhost:55690" or "localhost:55700". @@ -301,6 +386,573 @@ otelcol.exporter.loadbalancing "default" { } } ``` + +### DNS resolver + +When configured with a `dns` resolver, `otelcol.exporter.loadbalancing` will do a DNS lookup +on regular intervals. Spans are exported to the addresses the DNS lookup returned. + +```river +otelcol.exporter.loadbalancing "default" { + resolver { + dns { + hostname = "grafana-agent-traces-sampling.grafana-cloud-monitoring.svc.cluster.local" + port = "34621" + interval = "5s" + timeout = "1s" + } + } + protocol { + otlp { + client {} + } + } +} +``` + +The following example shows a Kubernetes configuration that configures two sets of {{< param "PRODUCT_ROOT_NAME" >}}s: +* A pool of load-balancer {{< param "PRODUCT_ROOT_NAME" >}}s: + * Spans are received from instrumented applications via `otelcol.receiver.otlp` + * Spans are exported via `otelcol.exporter.loadbalancing`. +* A pool of sampling {{< param "PRODUCT_ROOT_NAME" >}}s: + * The sampling {{< param "PRODUCT_ROOT_NAME" >}}s run behind a headless service to enable the load-balancer {{< param "PRODUCT_ROOT_NAME" >}}s to discover them. + * Spans are received from the load-balancer {{< param "PRODUCT_ROOT_NAME" >}}s via `otelcol.receiver.otlp` + * Traces are sampled via `otelcol.processor.tail_sampling`. + * The traces are exported via `otelcol.exporter.otlp` to an OTLP-compatible database such as Tempo. + +{{< collapse title="Example Kubernetes configuration" >}} + +```yaml +apiVersion: v1 +kind: Namespace +metadata: + name: grafana-cloud-monitoring +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: k6-trace-generator + namespace: grafana-cloud-monitoring +spec: + minReadySeconds: 10 + replicas: 1 + revisionHistoryLimit: 1 + selector: + matchLabels: + name: k6-trace-generator + template: + metadata: + labels: + name: k6-trace-generator + spec: + containers: + - env: + - name: ENDPOINT + value: agent-traces-lb.grafana-cloud-monitoring.svc.cluster.local:9411 + image: ghcr.io/grafana/xk6-client-tracing:v0.0.2 + imagePullPolicy: IfNotPresent + name: k6-trace-generator +--- +apiVersion: v1 +kind: Service +metadata: + name: agent-traces-lb + namespace: grafana-cloud-monitoring +spec: + clusterIP: None + ports: + - name: agent-traces-otlp-grpc + port: 9411 + protocol: TCP + targetPort: 9411 + selector: + name: agent-traces-lb + type: ClusterIP +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: agent-traces-lb + namespace: grafana-cloud-monitoring +spec: + minReadySeconds: 10 + replicas: 1 + revisionHistoryLimit: 1 + selector: + matchLabels: + name: agent-traces-lb + template: + metadata: + labels: + name: agent-traces-lb + spec: + containers: + - args: + - run + - /etc/agent/agent_lb.river + command: + - /bin/grafana-agent + env: + - name: AGENT_MODE + value: flow + image: grafana/agent:v0.38.0 + imagePullPolicy: IfNotPresent + name: agent-traces + ports: + - containerPort: 9411 + name: otlp-grpc + protocol: TCP + - containerPort: 34621 + name: agent-lb + protocol: TCP + volumeMounts: + - mountPath: /etc/agent + name: agent-traces + volumes: + - configMap: + name: agent-traces + name: agent-traces +--- +apiVersion: v1 +kind: Service +metadata: + name: agent-traces-sampling + namespace: grafana-cloud-monitoring +spec: + clusterIP: None + ports: + - name: agent-lb + port: 34621 + protocol: TCP + targetPort: agent-lb + selector: + name: agent-traces-sampling + type: ClusterIP +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: agent-traces-sampling + namespace: grafana-cloud-monitoring +spec: + minReadySeconds: 10 + replicas: 3 + revisionHistoryLimit: 1 + selector: + matchLabels: + name: agent-traces-sampling + template: + metadata: + labels: + name: agent-traces-sampling + spec: + containers: + - args: + - run + - /etc/agent/agent_sampling.river + command: + - /bin/grafana-agent + env: + - name: AGENT_MODE + value: flow + image: grafana/agent:v0.38.0 + imagePullPolicy: IfNotPresent + name: agent-traces + ports: + - containerPort: 9411 + name: otlp-grpc + protocol: TCP + - containerPort: 34621 + name: agent-lb + protocol: TCP + volumeMounts: + - mountPath: /etc/agent + name: agent-traces + volumes: + - configMap: + name: agent-traces + name: agent-traces +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: agent-traces + namespace: grafana-cloud-monitoring +data: + agent_lb.river: | + otelcol.receiver.otlp "default" { + grpc { + endpoint = "0.0.0.0:9411" + } + output { + traces = [otelcol.exporter.loadbalancing.default.input,otelcol.exporter.logging.default.input] + } + } + + otelcol.exporter.logging "default" { + verbosity = "detailed" + } + + otelcol.exporter.loadbalancing "default" { + resolver { + dns { + hostname = "agent-traces-sampling.grafana-cloud-monitoring.svc.cluster.local" + port = "34621" + } + } + protocol { + otlp { + client { + tls { + insecure = true + } + } + } + } + } + + agent_sampling.river: | + otelcol.receiver.otlp "default" { + grpc { + endpoint = "0.0.0.0:34621" + } + output { + traces = [otelcol.exporter.otlp.default.input,otelcol.exporter.logging.default.input] + } + } + + otelcol.exporter.logging "default" { + verbosity = "detailed" + } + + otelcol.exporter.otlp "default" { + client { + endpoint = "tempo-prod-06-prod-gb-south-0.grafana.net:443" + auth = otelcol.auth.basic.creds.handler + } + } + + otelcol.auth.basic "creds" { + username = "111111" + password = "pass" + } +``` +{{< /collapse >}} + +You must fill in the correct OTLP credentials prior to running the example. +You can use [k3d][] to start the example: + + +```bash +k3d cluster create grafana-agent-lb-test +kubectl apply -f kubernetes_config.yaml +``` + +To delete the cluster, run: + +```bash +k3d cluster delete grafana-agent-lb-test +``` + +[k3d]: https://k3d.io/v5.6.0/ + +### Kubernetes resolver + +When you configure `otelcol.exporter.loadbalancing` with a `kubernetes` resolver, the Kubernetes API notifies {{< param "PRODUCT_NAME" >}} whenever a new pod is added or removed from the service. +Spans are exported to the addresses from the Kubernetes API, combined with all the possible `ports`. + +```river +otelcol.exporter.loadbalancing "default" { + resolver { + kubernetes { + service = "grafana-agent-traces-headless" + ports = [ 34621 ] + } + } + protocol { + otlp { + client {} + } + } +} +``` + +The following example shows a Kubernetes configuration that sets up two sets of {{< param "PRODUCT_ROOT_NAME" >}}s: +* A pool of load-balancer {{< param "PRODUCT_ROOT_NAME" >}}s: + * Spans are received from instrumented applications via `otelcol.receiver.otlp` + * Spans are exported via `otelcol.exporter.loadbalancing`. + * The load-balancer {{< param "PRODUCT_ROOT_NAME" >}}s will get notified by the Kubernetes API any time a pod + is added or removed from the pool of sampling {{< param "PRODUCT_ROOT_NAME" >}}s. +* A pool of sampling {{< param "PRODUCT_ROOT_NAME" >}}s: + * The sampling {{< param "PRODUCT_ROOT_NAME" >}}s do not need to run behind a headless service. + * Spans are received from the load-balancer {{< param "PRODUCT_ROOT_NAME" >}}s via `otelcol.receiver.otlp` + * Traces are sampled via `otelcol.processor.tail_sampling`. + * The traces are exported via `otelcol.exporter.otlp` to a an OTLP-compatible database such as Tempo. + + +{{< collapse title="Example Kubernetes configuration" >}} + +```yaml +apiVersion: v1 +kind: Namespace +metadata: + name: grafana-cloud-monitoring +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: agent-traces + namespace: grafana-cloud-monitoring +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: agent-traces-role + namespace: grafana-cloud-monitoring +rules: +- apiGroups: + - "" + resources: + - endpoints + verbs: + - list + - watch + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: agent-traces-rolebinding + namespace: grafana-cloud-monitoring +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: agent-traces-role +subjects: +- kind: ServiceAccount + name: agent-traces + namespace: grafana-cloud-monitoring +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: k6-trace-generator + namespace: grafana-cloud-monitoring +spec: + minReadySeconds: 10 + replicas: 1 + revisionHistoryLimit: 1 + selector: + matchLabels: + name: k6-trace-generator + template: + metadata: + labels: + name: k6-trace-generator + spec: + containers: + - env: + - name: ENDPOINT + value: agent-traces-lb.grafana-cloud-monitoring.svc.cluster.local:9411 + image: ghcr.io/grafana/xk6-client-tracing:v0.0.2 + imagePullPolicy: IfNotPresent + name: k6-trace-generator +--- +apiVersion: v1 +kind: Service +metadata: + name: agent-traces-lb + namespace: grafana-cloud-monitoring +spec: + clusterIP: None + ports: + - name: agent-traces-otlp-grpc + port: 9411 + protocol: TCP + targetPort: 9411 + selector: + name: agent-traces-lb + type: ClusterIP +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: agent-traces-lb + namespace: grafana-cloud-monitoring +spec: + minReadySeconds: 10 + replicas: 1 + revisionHistoryLimit: 1 + selector: + matchLabels: + name: agent-traces-lb + template: + metadata: + labels: + name: agent-traces-lb + spec: + containers: + - args: + - run + - /etc/agent/agent_lb.river + command: + - /bin/grafana-agent + env: + - name: AGENT_MODE + value: flow + image: grafana/agent:v0.38.0 + imagePullPolicy: IfNotPresent + name: agent-traces + ports: + - containerPort: 9411 + name: otlp-grpc + protocol: TCP + volumeMounts: + - mountPath: /etc/agent + name: agent-traces + serviceAccount: agent-traces + volumes: + - configMap: + name: agent-traces + name: agent-traces +--- +apiVersion: v1 +kind: Service +metadata: + name: agent-traces-sampling + namespace: grafana-cloud-monitoring +spec: + ports: + - name: agent-lb + port: 34621 + protocol: TCP + targetPort: agent-lb + selector: + name: agent-traces-sampling + type: ClusterIP +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: agent-traces-sampling + namespace: grafana-cloud-monitoring +spec: + minReadySeconds: 10 + replicas: 3 + revisionHistoryLimit: 1 + selector: + matchLabels: + name: agent-traces-sampling + template: + metadata: + labels: + name: agent-traces-sampling + spec: + containers: + - args: + - run + - /etc/agent/agent_sampling.river + command: + - /bin/grafana-agent + env: + - name: AGENT_MODE + value: flow + image: grafana/agent:v0.38.0 + imagePullPolicy: IfNotPresent + name: agent-traces + ports: + - containerPort: 34621 + name: agent-lb + protocol: TCP + volumeMounts: + - mountPath: /etc/agent + name: agent-traces + volumes: + - configMap: + name: agent-traces + name: agent-traces +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: agent-traces + namespace: grafana-cloud-monitoring +data: + agent_lb.river: | + otelcol.receiver.otlp "default" { + grpc { + endpoint = "0.0.0.0:9411" + } + output { + traces = [otelcol.exporter.loadbalancing.default.input,otelcol.exporter.logging.default.input] + } + } + + otelcol.exporter.logging "default" { + verbosity = "detailed" + } + + otelcol.exporter.loadbalancing "default" { + resolver { + kubernetes { + service = "agent-traces-sampling" + ports = ["34621"] + } + } + protocol { + otlp { + client { + tls { + insecure = true + } + } + } + } + } + + agent_sampling.river: | + otelcol.receiver.otlp "default" { + grpc { + endpoint = "0.0.0.0:34621" + } + output { + traces = [otelcol.exporter.otlp.default.input,otelcol.exporter.logging.default.input] + } + } + + otelcol.exporter.logging "default" { + verbosity = "detailed" + } + + otelcol.exporter.otlp "default" { + client { + endpoint = "tempo-prod-06-prod-gb-south-0.grafana.net:443" + auth = otelcol.auth.basic.creds.handler + } + } + + otelcol.auth.basic "creds" { + username = "111111" + password = "pass" + } +``` + +{{< /collapse >}} + +You must fill in the correct OTLP credentials prior to running the example. +You can use [k3d][] to start the example: + +```bash +k3d cluster create grafana-agent-lb-test +kubectl apply -f kubernetes_config.yaml +``` + +To delete the cluster, run: + +```bash +k3d cluster delete grafana-agent-lb-test +``` + ## Compatible components @@ -316,4 +968,4 @@ connection work correctly. Refer to the linked documentation for more details. {{% /admonition %}} - \ No newline at end of file + diff --git a/docs/sources/flow/reference/components/otelcol.exporter.prometheus.md b/docs/sources/flow/reference/components/otelcol.exporter.prometheus.md index 3008f22f4353..4285f34cc799 100644 --- a/docs/sources/flow/reference/components/otelcol.exporter.prometheus.md +++ b/docs/sources/flow/reference/components/otelcol.exporter.prometheus.md @@ -58,6 +58,18 @@ When `include_scope_labels` is `true` the `otel_scope_name` and When `include_target_info` is true, OpenTelemetry Collector resources are converted into `target_info` metrics. +{{% admonition type="note" %}} + +OTLP metrics can have a lot of resource attributes. +Setting `resource_to_telemetry_conversion` to `true` would convert all of them to Prometheus labels, which may not be what you want. +Instead of using `resource_to_telemetry_conversion`, most users need to use `otelcol.processor.transform` +to convert OTLP resource attributes to OTLP metric datapoint attributes before using `otelcol.exporter.prometheus`. +See [Creating Prometheus labels from OTLP resource attributes][] for an example. + +[Creating Prometheus labels from OTLP resource attributes]: #creating-prometheus-labels-from-otlp-resource-attributes + +{{% /admonition %}} + ## Exported fields The following fields are exported and can be referenced by other components: @@ -87,6 +99,8 @@ information. ## Example +## Basic usage + This example accepts metrics over OTLP and forwards it using `prometheus.remote_write`: @@ -109,6 +123,54 @@ prometheus.remote_write "mimir" { } } ``` + +## Create Prometheus labels from OTLP resource attributes + +This example uses `otelcol.processor.transform` to add extra `key1` and `key2` OTLP metric datapoint attributes from the +`key1` and `key2` OTLP resource attributes. + +`otelcol.exporter.prometheus` then converts `key1` and `key2` to Prometheus labels along with any other OTLP metric datapoint attributes. + +This avoids the need to set `resource_to_telemetry_conversion` to `true`, +which could have created too many unnecessary metric labels. + +```river +otelcol.receiver.otlp "default" { + grpc {} + + output { + metrics = [otelcol.processor.transform.default.input] + } +} + +otelcol.processor.transform "default" { + error_mode = "ignore" + + metric_statements { + context = "datapoint" + + statements = [ + `set(attributes["key1"], resource.attributes["key1"])`, + `set(attributes["key2"], resource.attributes["key2"])`, + ] + } + + output { + metrics = [otelcol.exporter.prometheus.default.input] + } +} + +otelcol.exporter.prometheus "default" { + forward_to = [prometheus.remote_write.mimir.receiver] +} + +prometheus.remote_write "mimir" { + endpoint { + url = "http://mimir:9009/api/v1/push" + } +} +``` + ## Compatible components diff --git a/docs/sources/flow/reference/components/prometheus.exporter.cloudwatch.md b/docs/sources/flow/reference/components/prometheus.exporter.cloudwatch.md index 10313796d1cb..2c1682a5fccc 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.cloudwatch.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.cloudwatch.md @@ -429,6 +429,7 @@ discovery job, the `type` field of each `discovery_job` must match either the de - Namespace: `AWS/PrivateLinkEndpoints` or Alias: `vpc-endpoint` - Namespace: `AWS/PrivateLinkServices` or Alias: `vpc-endpoint-service` - Namespace: `AWS/Prometheus` or Alias: `amp` +- Namespace: `AWS/QLDB` or Alias: `qldb` - Namespace: `AWS/RDS` or Alias: `rds` - Namespace: `AWS/Redshift` or Alias: `redshift` - Namespace: `AWS/Route53Resolver` or Alias: `route53-resolver` @@ -442,6 +443,7 @@ discovery job, the `type` field of each `discovery_job` must match either the de - Namespace: `AWS/TransitGateway` or Alias: `tgw` - Namespace: `AWS/TrustedAdvisor` or Alias: `trustedadvisor` - Namespace: `AWS/VPN` or Alias: `vpn` +- Namespace: `AWS/ClientVPN` or Alias: `clientvpn` - Namespace: `AWS/WAFV2` or Alias: `wafv2` - Namespace: `AWS/WorkSpaces` or Alias: `workspaces` - Namespace: `AWS/AOSS` or Alias: `aoss` diff --git a/docs/sources/flow/reference/components/prometheus.exporter.windows.md b/docs/sources/flow/reference/components/prometheus.exporter.windows.md index 4ad33effdd4a..8042b5458d1c 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.windows.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.windows.md @@ -254,6 +254,7 @@ Name | Description | Enabled by default [netframework_clrsecurity](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrsecurity.md) | .NET Framework Security Check metrics | [net](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.net.md) | Network interface I/O | ✓ [os](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.os.md) | OS metrics (memory, processes, users) | ✓ +[physical_disk](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.physical_disk.md) | Physical disks | ✓ [process](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.process.md) | Per-process metrics | [remote_fx](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.remote_fx.md) | RemoteFX protocol (RDP) metrics | [scheduled_task](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.scheduled_task.md) | Scheduled Tasks metrics | diff --git a/docs/sources/flow/reference/components/prometheus.remote_write.md b/docs/sources/flow/reference/components/prometheus.remote_write.md index 6348b7b9c3ff..f869343e0919 100644 --- a/docs/sources/flow/reference/components/prometheus.remote_write.md +++ b/docs/sources/flow/reference/components/prometheus.remote_write.md @@ -165,6 +165,7 @@ Name | Type | Description | Default | Required `min_backoff` | `duration` | Initial retry delay. The backoff time gets doubled for each retry. | `"30ms"` | no `max_backoff` | `duration` | Maximum retry delay. | `"5s"` | no `retry_on_http_429` | `bool` | Retry when an HTTP 429 status code is received. | `true` | no +`sample_age_limit` | `duration` | Maximum age of samples to send. | `"0s"` | no Each queue then manages a number of concurrent _shards_ which is responsible for sending a fraction of data to their respective endpoints. The number of @@ -191,6 +192,10 @@ responses should be treated as recoverable errors; other `HTTP 4xx` status code responses are never considered recoverable errors. When `retry_on_http_429` is enabled, `Retry-After` response headers from the servers are honored. +The `sample_age_limit` argument specifies the maximum age of samples to send. Any +samples older than the limit are dropped and won't be sent to the remote storage. +The default value is `0s`, which means that all samples are sent (feature is disabled). + ### metadata_config block Name | Type | Description | Default | Required diff --git a/docs/sources/flow/reference/components/prometheus.scrape.md b/docs/sources/flow/reference/components/prometheus.scrape.md index 08b009c88711..8adf775687f1 100644 --- a/docs/sources/flow/reference/components/prometheus.scrape.md +++ b/docs/sources/flow/reference/components/prometheus.scrape.md @@ -44,30 +44,30 @@ The following arguments are supported: Name | Type | Description | Default | Required ---- | ---- | ----------- | ------- | -------- -`targets` | `list(map(string))` | List of targets to scrape. | | yes -`forward_to` | `list(MetricsReceiver)` | List of receivers to send scraped metrics to. | | yes -`job_name` | `string` | The value to use for the job label if not already set. | component name | no -`extra_metrics` | `bool` | Whether extra metrics should be generated for scrape targets. | `false` | no +`targets` | `list(map(string))` | List of targets to scrape. | | yes +`forward_to` | `list(MetricsReceiver)` | List of receivers to send scraped metrics to. | | yes +`job_name` | `string` | The value to use for the job label if not already set. | component name | no +`extra_metrics` | `bool` | Whether extra metrics should be generated for scrape targets. | `false` | no `enable_protobuf_negotiation` | `bool` | Whether to enable protobuf negotiation with the client. | `false` | no -`honor_labels` | `bool` | Indicator whether the scraped metrics should remain unmodified. | `false` | no -`honor_timestamps` | `bool` | Indicator whether the scraped timestamps should be respected. | `true` | no -`params` | `map(list(string))` | A set of query parameters with which the target is scraped. | | no -`scrape_classic_histograms` | `bool` | Whether to scrape a classic histogram that is also exposed as a native histogram. | `false` | no -`scrape_interval` | `duration` | How frequently to scrape the targets of this scrape configuration. | `"60s"` | no -`scrape_timeout` | `duration` | The timeout for scraping targets of this configuration. | `"10s"` | no -`metrics_path` | `string` | The HTTP resource path on which to fetch metrics from targets. | `/metrics` | no -`scheme` | `string` | The URL scheme with which to fetch metrics from targets. | | no -`body_size_limit` | `int` | An uncompressed response body larger than this many bytes causes the scrape to fail. 0 means no limit. | | no -`sample_limit` | `uint` | More than this many samples post metric-relabeling causes the scrape to fail | | no -`target_limit` | `uint` | More than this many targets after the target relabeling causes the scrapes to fail. | | no -`label_limit` | `uint` | More than this many labels post metric-relabeling causes the scrape to fail. | | no -`label_name_length_limit` | `uint` | More than this label name length post metric-relabeling causes the scrape to fail. | | no -`label_value_length_limit` | `uint` | More than this label value length post metric-relabeling causes the scrape to fail. | | no -`bearer_token` | `secret` | Bearer token to authenticate with. | | no -`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no -`proxy_url` | `string` | HTTP proxy to proxy requests through. | | no -`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no -`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no +`honor_labels` | `bool` | Indicator whether the scraped metrics should remain unmodified. | `false` | no +`honor_timestamps` | `bool` | Indicator whether the scraped timestamps should be respected. | `true` | no +`params` | `map(list(string))` | A set of query parameters with which the target is scraped. | | no +`scrape_classic_histograms` | `bool` | Whether to scrape a classic histogram that is also exposed as a native histogram. | `false` | no +`scrape_interval` | `duration` | How frequently to scrape the targets of this scrape configuration. | `"60s"` | no +`scrape_timeout` | `duration` | The timeout for scraping targets of this configuration. | `"10s"` | no +`metrics_path` | `string` | The HTTP resource path on which to fetch metrics from targets. | `/metrics` | no +`scheme` | `string` | The URL scheme with which to fetch metrics from targets. | | no +`body_size_limit` | `int` | An uncompressed response body larger than this many bytes causes the scrape to fail. 0 means no limit. | | no +`sample_limit` | `uint` | More than this many samples post metric-relabeling causes the scrape to fail | | no +`target_limit` | `uint` | More than this many targets after the target relabeling causes the scrapes to fail. | | no +`label_limit` | `uint` | More than this many labels post metric-relabeling causes the scrape to fail. | | no +`label_name_length_limit` | `uint` | More than this label name length post metric-relabeling causes the scrape to fail. | | no +`label_value_length_limit` | `uint` | More than this label value length post metric-relabeling causes the scrape to fail. | | no +`bearer_token` | `secret` | Bearer token to authenticate with. | | no +`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no +`proxy_url` | `string` | HTTP proxy to proxy requests through. | | no +`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no +`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no At most one of the following can be provided: - [`bearer_token` argument](#arguments). diff --git a/docs/sources/flow/reference/components/pyroscope.scrape.md b/docs/sources/flow/reference/components/pyroscope.scrape.md index c2c54a83bfc8..74c1fa30e873 100644 --- a/docs/sources/flow/reference/components/pyroscope.scrape.md +++ b/docs/sources/flow/reference/components/pyroscope.scrape.md @@ -15,12 +15,35 @@ title: pyroscope.scrape {{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} -`pyroscope.scrape` configures a [pprof] scraping job for a given set of -`targets`. The scraped performance profiles are forwarded to the list of receivers passed in -`forward_to`. +`pyroscope.scrape` collects [pprof] performance profiles for a given set of HTTP `targets`. + +`pyroscope.scrape` mimcks the scraping behavior of `prometheus.scrape`. +Similarly to how Prometheus scrapes metrics via HTTP, `pyroscope.scrape` collects profiles via HTTP requests. + +Unlike Prometheus, which usually only scrapes one `/metrics` endpoint per target, +`pyroscope.scrape` may need to scrape multiple endpoints for the same target. +This is because different types of profiles are scraped on different endpoints. +For example, "mutex" profiles may be scraped on a `/debug/pprof/delta_mutex` HTTP endpoint, whereas +memory consumption may be scraped on a `/debug/pprof/allocs` HTTP endpoint. + +The profile paths, protocol scheme, scrape interval, scrape timeout, +query parameters, as well as any other settings can be configured within `pyroscope.scrape`. + +The `pyroscope.scrape` component regards a scrape as successful if it +responded with an HTTP `200 OK` status code and returned the body of a valid [pprof] profile. + +If a scrape request fails, the [debug UI][] for `pyroscope.scrape` will show: +* Detailed information about the failure. +* The time of the last successful scrape. +* The labels last used for scraping. + +The scraped performance profiles can be forwarded to components such as +`pyroscope.write` via the `forward_to` argument. Multiple `pyroscope.scrape` components can be specified by giving them different labels. +[debug UI]: {{< relref "../../tasks/debug.md" >}} + ## Usage ```river @@ -32,45 +55,106 @@ pyroscope.scrape "LABEL" { ## Arguments -The component configures and starts a new scrape job to scrape all of the -input targets. Multiple scrape jobs can be spawned for a single input target +`pyroscope.scrape` starts a new scrape job to scrape all of the input targets. +Multiple scrape jobs can be started for a single input target when scraping multiple profile types. The list of arguments that can be used to configure the block is presented below. -The scrape job name defaults to the component's unique identifier. - -Any omitted fields take on their default values. If conflicting -attributes are being passed (e.g., defining both a BearerToken and -BearerTokenFile or configuring both Basic Authorization and OAuth2 at the same -time), the component reports an error. +Any omitted arguments take on their default values. If conflicting +arguments are being passed (for example, configuring both `bearer_token` +and `bearer_token_file`), then `pyroscope.scrape` will fail to start and will report an error. The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`targets` | `list(map(string))` | List of targets to scrape. | | yes -`forward_to` | `list(ProfilesReceiver)` | List of receivers to send scraped profiles to. | | yes -`job_name` | `string` | The job name to override the job label with. | component name | no -`params` | `map(list(string))` | A set of query parameters with which the target is scraped. | | no -`scrape_interval` | `duration` | How frequently to scrape the targets of this scrape configuration. | `"15s"` | no -`scrape_timeout` | `duration` | The timeout for scraping targets of this configuration. | `"15s"` | no -`scheme` | `string` | The URL scheme with which to fetch metrics from targets. | | no -`bearer_token` | `secret` | Bearer token to authenticate with. | | no -`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no -`proxy_url` | `string` | HTTP proxy to proxy requests through. | | no -`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no -`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no - - At most one of the following can be provided: +Name | Type | Description | Default | Required +------------------- | ------------------------ | ------------------------------------------------------------------ | -------------- | -------- +`targets` | `list(map(string))` | List of targets to scrape. | | yes +`forward_to` | `list(ProfilesReceiver)` | List of receivers to send scraped profiles to. | | yes +`job_name` | `string` | The job name to override the job label with. | component name | no +`params` | `map(list(string))` | A set of query parameters with which the target is scraped. | | no +`scrape_interval` | `duration` | How frequently to scrape the targets of this scrape configuration. | `"15s"` | no +`scrape_timeout` | `duration` | The timeout for scraping targets of this configuration. Must be larger than `scrape_interval`. | `"18s"` | no +`scheme` | `string` | The URL scheme with which to fetch metrics from targets. | `"http"` | no +`bearer_token` | `secret` | Bearer token to authenticate with. | | no +`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no +`proxy_url` | `string` | HTTP proxy to proxy requests through. | | no +`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no +`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no + +At most one of the following authentication mechanisms can be provided: - [`bearer_token` argument](#arguments). - [`bearer_token_file` argument](#arguments). - [`basic_auth` block][basic_auth]. - [`authorization` block][authorization]. - [`oauth2` block][oauth2]. - [arguments]: #arguments +[arguments]: #arguments + +#### `job_name` argument + +`job_name` defaults to the component's unique identifier. + +For example, the `job_name` of `pyroscope.scrape "local" { ... }` will be `"pyroscope.scrape.local"`. + +#### `targets` argument + +The list of `targets` can be provided [statically][example_static_targets], [dynamically][example_dynamic_targets], +or a [combination of both][example_static_and_dynamic_targets]. + +The special `__address__` label _must always_ be present and corresponds to the +`:` that is used for the scrape request. + +Labels starting with a double underscore (`__`) are treated as _internal_, and are removed prior to scraping. + +The special label `service_name` is required and must always be present. +If it is not specified, `pyroscope.scrape` will attempt to infer it from +either of the following sources, in this order: +1. `__meta_kubernetes_pod_annotation_pyroscope_io_service_name` which is a `pyroscope.io/service_name` pod annotation. +2. `__meta_kubernetes_namespace` and `__meta_kubernetes_pod_container_name` +3. `__meta_docker_container_name` + +If `service_name` is not specified and could not be inferred, then it is set to `unspecified`. + +The following labels are automatically injected to the scraped profiles +so that they can be linked to a scrape target: + +| Label | Description | +|------------------|----------------------------------------------------------------- | +| `"job"` | The `job_name` that the target belongs to. | +| `"instance"` | The `__address__` or `:` of the scrape target's URL. | +| `"service_name"` | The inferred Pyroscope service name. | + +#### `scrape_interval` argument + +The `scrape_interval` typically refers to the frequency with which {{< param "PRODUCT_NAME" >}} collects performance profiles from the monitored targets. +It represents the time interval between consecutive scrapes or data collection events. +This parameter is important for controlling the trade-off between resource usage and the freshness of the collected data. + +If `scrape_interval` is short: +* Advantages: + * Fewer profiles may be lost if the application being scraped crashes. +* Disadvantages: + * Greater consumption of CPU, memory, and network resources during scrapes and remote writes. + * The backend database (Pyroscope) will consume more storage space. + +If `scrape_interval` is long: +* Advantages: + * Lower resource consumption. +* Disadvantages: + * More profiles may be lost if the application being scraped crashes. + * If the [delta argument][] is set to `true`, the batch size of + each remote write to Pyroscope may be bigger. + The Pyroscope database may need to be tuned with higher limits. + * If the [delta argument][] is set to `true`, there is a larger risk of + reaching the HTTP server timeouts of the application being scraped. + +For example, consider this situation: +* `pyroscope.scrape` is configured with a `scrape_interval` of `"60s"`. +* The application being scraped is running an HTTP server with a timeout of 30 seconds. +* Any scrape HTTP requests where the [delta argument][] is set to `true` will fail, + because they will attempt to run for 59 seconds. ## Blocks @@ -92,7 +176,7 @@ The following blocks are supported inside the definition of `pyroscope.scrape`: | profiling_config > profile.fgprof | [profile.fgprof][] | Collect [fgprof][] profiles. | no | | profiling_config > profile.godeltaprof_memory | [profile.godeltaprof_memory][] | Collect [godeltaprof][] memory profiles. | no | | profiling_config > profile.godeltaprof_mutex | [profile.godeltaprof_mutex][] | Collect [godeltaprof][] mutex profiles. | no | -| profiling_config > profile.godeltaprof_block | [profile.godeltaprof_block][] | Collect [godeltaprof][] block profiles. | no | +| profiling_config > profile.godeltaprof_block | [profile.godeltaprof_block][] | Collect [godeltaprof][] block profiles. | no | | profiling_config > profile.custom | [profile.custom][] | Collect custom profiles. | no | | clustering | [clustering][] | Configure the component for when {{< param "PRODUCT_NAME" >}} is running in clustered mode. | no | @@ -100,26 +184,32 @@ The `>` symbol indicates deeper levels of nesting. For example, `oauth2 > tls_config` refers to a `tls_config` block defined inside an `oauth2` block. +Any omitted blocks take on their default values. For example, +if `profile.mutex` is not specified in the config, +the defaults documented in [profile.mutex][] will be used. + [basic_auth]: #basic_auth-block [authorization]: #authorization-block [oauth2]: #oauth2-block [tls_config]: #tls_config-block [profiling_config]: #profiling_config-block -[profile.memory]: #profile.memory-block -[profile.block]: #profile.block-block -[profile.goroutine]: #profile.goroutine-block -[profile.mutex]: #profile.mutex-block -[profile.process_cpu]: #profile.process_cpu-block -[profile.fgprof]: #profile.fgprof-block -[profile.godeltaprof_memory]: #profile.godeltaprof_memory-block -[profile.godeltaprof_mutex]: #profile.godeltaprof_mutex-block -[profile.godeltaprof_block]: #profile.godeltaprof_block-block -[profile.custom]: #profile.custom-block +[profile.memory]: #profilememory-block +[profile.block]: #profileblock-block +[profile.goroutine]: #profilegoroutine-block +[profile.mutex]: #profilemutex-block +[profile.process_cpu]: #profileprocess_cpu-block +[profile.fgprof]: #profilefgprof-block +[profile.godeltaprof_memory]: #profilegodeltaprof_memory-block +[profile.godeltaprof_mutex]: #profilegodeltaprof_mutex-block +[profile.godeltaprof_block]: #profilegodeltaprof_block-block +[profile.custom]: #profilecustom-block [pprof]: https://github.com/google/pprof/blob/main/doc/README.md [clustering]: #clustering-beta [fgprof]: https://github.com/felixge/fgprof -[godeltaprof]: https://github.com/grafana/godeltaprof +[godeltaprof]: https://github.com/grafana/pyroscope-go/tree/main/godeltaprof + +[delta argument]: #delta-argument ### basic_auth block @@ -142,7 +232,7 @@ an `oauth2` block. The `profiling_config` block configures the profiling settings when scraping targets. -The block contains the following attributes: +The following arguments are supported: Name | Type | Description | Default | Required ---- | ---- | ----------- | ------- | -------- @@ -152,7 +242,7 @@ Name | Type | Description | Default | Required The `profile.memory` block collects profiles on memory consumption. -It accepts the following arguments: +The following arguments are supported: Name | Type | Description | Default | Required ---- | ---- | ----------- | ------- | -------- @@ -160,14 +250,13 @@ Name | Type | Description | Default | Required `path` | `string` | The path to the profile type on the target. | `"/debug/pprof/allocs"` | no `delta` | `boolean` | Whether to scrape the profile as a delta. | `false` | no -When the `delta` argument is `true`, a `seconds` query parameter is -automatically added to requests. +For more information about the `delta` argument, see the [delta argument][] section. ### profile.block block The `profile.block` block collects profiles on process blocking. -It accepts the following arguments: +The following arguments are supported: Name | Type | Description | Default | Required ---- | ---- | ----------- | ------- | -------- @@ -175,14 +264,13 @@ Name | Type | Description | Default | Required `path` | `string` | The path to the profile type on the target. | `"/debug/pprof/block"` | no `delta` | `boolean` | Whether to scrape the profile as a delta. | `false` | no -When the `delta` argument is `true`, a `seconds` query parameter is -automatically added to requests. +For more information about the `delta` argument, see the [delta argument][] section. ### profile.goroutine block The `profile.goroutine` block collects profiles on the number of goroutines. -It accepts the following arguments: +The following arguments are supported: Name | Type | Description | Default | Required ---- | ---- | ----------- | ------- | -------- @@ -190,14 +278,13 @@ Name | Type | Description | Default | Required `path` | `string` | The path to the profile type on the target. | `"/debug/pprof/goroutine"` | no `delta` | `boolean` | Whether to scrape the profile as a delta. | `false` | no -When the `delta` argument is `true`, a `seconds` query parameter is -automatically added to requests. +For more information about the `delta` argument, see the [delta argument][] section. ### profile.mutex block The `profile.mutex` block collects profiles on mutexes. -It accepts the following arguments: +The following arguments are supported: Name | Type | Description | Default | Required ---- | ---- | ----------- | ------- | -------- @@ -205,15 +292,14 @@ Name | Type | Description | Default | Required `path` | `string` | The path to the profile type on the target. | `"/debug/pprof/mutex"` | no `delta` | `boolean` | Whether to scrape the profile as a delta. | `false` | no -When the `delta` argument is `true`, a `seconds` query parameter is -automatically added to requests. +For more information about the `delta` argument, see the [delta argument][] section. ### profile.process_cpu block The `profile.process_cpu` block collects profiles on CPU consumption for the process. -It accepts the following arguments: +The following arguments are supported: Name | Type | Description | Default | Required ---- | ---- | ----------- | ------- | -------- @@ -221,14 +307,13 @@ Name | Type | Description | Default | Required `path` | `string` | The path to the profile type on the target. | `"/debug/pprof/profile"` | no `delta` | `boolean` | Whether to scrape the profile as a delta. | `true` | no -When the `delta` argument is `true`, a `seconds` query parameter is -automatically added to requests. +For more information about the `delta` argument, see the [delta argument][] section. ### profile.fgprof block The `profile.fgprof` block collects profiles from an [fgprof][] endpoint. -It accepts the following arguments: +The following arguments are supported: Name | Type | Description | Default | Required ---- | ---- | ----------- | ------- | -------- @@ -236,14 +321,13 @@ Name | Type | Description | Default | Required `path` | `string` | The path to the profile type on the target. | `"/debug/fgprof"` | no `delta` | `boolean` | Whether to scrape the profile as a delta. | `true` | no -When the `delta` argument is `true`, a `seconds` query parameter is -automatically added to requests. +For more information about the `delta` argument, see the [delta argument][] section. ### profile.godeltaprof_memory block The `profile.godeltaprof_memory` block collects profiles from [godeltaprof][] memory endpoint. The delta is computed on the target. -It accepts the following arguments: +The following arguments are supported: | Name | Type | Description | Default | Required | |-----------|-----------|---------------------------------------------|-----------------------------|----------| @@ -254,7 +338,7 @@ It accepts the following arguments: The `profile.godeltaprof_mutex` block collects profiles from [godeltaprof][] mutex endpoint. The delta is computed on the target. -It accepts the following arguments: +The following arguments are supported: | Name | Type | Description | Default | Required | |-----------|-----------|---------------------------------------------|------------------------------|----------| @@ -265,7 +349,7 @@ It accepts the following arguments: The `profile.godeltaprof_block` block collects profiles from [godeltaprof][] block endpoint. The delta is computed on the target. -It accepts the following arguments: +The following arguments are supported: | Name | Type | Description | Default | Required | |-----------|-----------|---------------------------------------------|------------------------------|----------| @@ -288,7 +372,7 @@ profile.custom "PROFILE_TYPE" { Multiple `profile.custom` blocks can be specified. Labels assigned to `profile.custom` blocks must be unique across the component. -The `profile.custom` block accepts the following arguments: +The following arguments are supported: Name | Type | Description | Default | Required ---- | ---- | ----------- | ------- | -------- @@ -297,7 +381,7 @@ Name | Type | Description | Default | Required `delta` | `boolean` | Whether to scrape the profile as a delta. | `false` | no When the `delta` argument is `true`, a `seconds` query parameter is -automatically added to requests. +automatically added to requests. The `seconds` used will be equal to `scrape_interval - 1`. ### clustering (beta) @@ -322,6 +406,19 @@ If {{< param "PRODUCT_NAME" >}} is _not_ running in clustered mode, this block i [using clustering]: {{< relref "../../concepts/clustering.md" >}} +## Common configuration + +### `delta` argument + +When the `delta` argument is `false`, the [pprof][] HTTP query will be instantaneous. + +When the `delta` argument is `true`: +* The [pprof][] HTTP query will run for a certain amount of time. +* A `seconds` parameter is automatically added to the HTTP request. +* The `seconds` used will be equal to `scrape_interval - 1`. + For example, if `scrape_interval` is `"15s"`, `seconds` will be 14 seconds. + If the HTTP endpoint is `/debug/pprof/profile`, then the HTTP query will become `/debug/pprof/profile?seconds=14` + ## Exported fields `pyroscope.scrape` does not export any fields that can be referenced by other @@ -341,67 +438,115 @@ scrape job on the component's debug endpoint. * `pyroscope_fanout_latency` (histogram): Write latency for sending to direct and indirect components. -## Scraping behavior +## Examples -The `pyroscope.scrape` component borrows the scraping behavior of Prometheus. -Prometheus, and by extension, this component, uses a pull model for scraping -profiles from a given set of _targets_. -Each scrape target is defined as a set of key-value pairs called _labels_. +[example_static_targets]: #default-endpoints-of-static-targets -The set of targets can either be _static_, or dynamically provided periodically -by a service discovery component such as `discovery.kubernetes`. The special -label `__address__` _must always_ be present and corresponds to the -`:` that is used for the scrape request. +### Default endpoints of static targets -The special label `service_name` is required and must always be present. If it's not specified, it is -attempted to be inferred from multiple sources: -- `__meta_kubernetes_pod_annotation_pyroscope_io_service_name` which is a `pyroscope.io/service_name` pod annotation. -- `__meta_kubernetes_namespace` and `__meta_kubernetes_pod_container_name` -- `__meta_docker_container_name` +The following example sets up a scrape job of a statically configured +list of targets - {{< param "PRODUCT_ROOT_NAME" >}} itself and Pyroscope. +The scraped profiles are sent to `pyroscope.write` which remote writes them to a Pyroscope database. -If `service_name` is not specified and could not be inferred it is set to `unspecified`. +```river +pyroscope.scrape "local" { + targets = [ + {"__address__" = "localhost:4100", "service_name"="pyroscope"}, + {"__address__" = "localhost:12345", "service_name"="agent"}, + ] -By default, the scrape job tries to scrape all available targets' `/debug/pprof` -endpoints using HTTP, with a scrape interval of 15 seconds and scrape timeout of -15 seconds. The profile paths, protocol scheme, scrape interval and timeout, -query parameters, as well as any other settings can be configured using the -component's arguments. + forward_to = [pyroscope.write.local.receiver] +} -The scrape job expects profiles exposed by the endpoint to follow the -[pprof] protobuf format. All profiles are then propagated -to each receiver listed in the component's `forward_to` argument. +pyroscope.write "local" { + endpoint { + url = "http://pyroscope:4100" + } +} +``` -Labels coming from targets, that start with a double underscore `__` are -treated as _internal_, and are removed prior to scraping. +These endpoints will be scraped every 15 seconds: -The `pyroscope.scrape` component regards a scrape as successful if it -responded with an HTTP `200 OK` status code and returned a body of valid [pprof] profile. +``` +http://localhost:4100/debug/pprof/allocs +http://localhost:4100/debug/pprof/block +http://localhost:4100/debug/pprof/goroutine +http://localhost:4100/debug/pprof/mutex +http://localhost:4100/debug/pprof/profile?seconds=14 + +http://localhost:12345/debug/pprof/allocs +http://localhost:12345/debug/pprof/block +http://localhost:12345/debug/pprof/goroutine +http://localhost:12345/debug/pprof/mutex +http://localhost:12345/debug/pprof/profile?seconds=14 +``` + +Note that `seconds=14` is added to the `/debug/pprof/profile` endpoint, because: +* The `delta` argument of the `profile.process_cpu` block is `true` by default. +* `scrape_interval` is `"15s"` by default. -If the scrape request fails, the component's debug UI section contains more -detailed information about the failure, the last successful scrape, as well as -the labels last used for scraping. +Also note that the `/debug/fgprof` endpoint will not be scraped, because +the `enabled` argument of the `profile.fgprof` block is `false` by default. -The following labels are automatically injected to the scraped profiles and -can help pin down a scrape target. +[example_dynamic_targets]: #default-endpoints-of-dynamic-targets -| Label | Description | -|--------------|--------------------------------------------------------------------------------------------------| -| job | The configured job name that the target belongs to. Defaults to the fully formed component name. | -| instance | The `__address__` or `:` of the scrape target's URL. | -| service_name | The inferred pyroscope service name | +### Default endpoints of dynamic targets + +```river +discovery.http "dynamic_targets" { + url = "https://example.com/scrape_targets" + refresh_interval = "15s" +} + +pyroscope.scrape "local" { + targets = [discovery.http.dynamic_targets.targets] + + forward_to = [pyroscope.write.local.receiver] +} + +pyroscope.write "local" { + endpoint { + url = "http://pyroscope:4100" + } +} +``` -## Example +[example_static_and_dynamic_targets]: #default-endpoints-of-static-and-dynamic-targets -The following example sets up the scrape job with certain attributes (profiling configuration, targets) and lets it scrape two local applications ({{< param "PRODUCT_ROOT_NAME" >}} itself and Pyroscope). -The exposed profiles are sent over to the provided list of receivers, as defined by other components. +### Default endpoints of static and dynamic targets ```river +discovery.http "dynamic_targets" { + url = "https://example.com/scrape_targets" + refresh_interval = "15s" +} + pyroscope.scrape "local" { - targets = [ + targets = [ {"__address__" = "localhost:4100", "service_name"="pyroscope"}, {"__address__" = "localhost:12345", "service_name"="agent"}, + discovery.http.dynamic_targets.targets, ] + forward_to = [pyroscope.write.local.receiver] +} + +pyroscope.write "local" { + endpoint { + url = "http://pyroscope:4100" + } +} +``` + + +### Enabling and disabling profiles + +```river +pyroscope.scrape "local" { + targets = [ + {"__address__" = "localhost:12345", "service_name"="agent"}, + ] + profiling_config { profile.fgprof { enabled = true @@ -413,22 +558,27 @@ pyroscope.scrape "local" { enabled = false } } + + forward_to = [pyroscope.write.local.receiver] } ``` -Here are the endpoints that are being scraped every 15 seconds: +These endpoints will be scraped every 15 seconds: ``` -http://localhost:4100/debug/pprof/allocs -http://localhost:4100/debug/pprof/goroutine -http://localhost:4100/debug/pprof/profile?seconds=14 -http://localhost:4100/debug/fgprof?seconds=14 http://localhost:12345/debug/pprof/allocs http://localhost:12345/debug/pprof/goroutine http://localhost:12345/debug/pprof/profile?seconds=14 http://localhost:12345/debug/fgprof?seconds=14 ``` +These endpoints will **NOT** be scraped because they are explicitly disabled: + +``` +http://localhost:12345/debug/pprof/block +http://localhost:12345/debug/pprof/mutex +``` + ## Compatible components diff --git a/docs/sources/flow/setup/_index.md b/docs/sources/flow/setup/_index.md deleted file mode 100644 index d639fa3eaea1..000000000000 --- a/docs/sources/flow/setup/_index.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/setup/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/setup/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/setup/ -- /docs/grafana-cloud/send-data/agent/flow/setup/ -canonical: https://grafana.com/docs/agent/latest/flow/setup/ -description: Learn how to install and configure Grafana Agent Flow -menuTitle: Set up Grafana Agent Flow -title: Set up Grafana Agent Flow -weight: 50 ---- - -# Set up {{% param "PRODUCT_NAME" %}} - -This section includes information that helps you install and configure {{< param "PRODUCT_NAME" >}}. - -{{< section >}} diff --git a/docs/sources/flow/setup/configure/_index.md b/docs/sources/flow/setup/configure/_index.md deleted file mode 100644 index b185bdac69a0..000000000000 --- a/docs/sources/flow/setup/configure/_index.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/setup/configure/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/setup/configure/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/setup/configure/ -- /docs/grafana-cloud/send-data/agent/flow/setup/configure/ -canonical: https://grafana.com/docs/agent/latest/flow/setup/configure/ -description: Configure Grafana Agent Flow after it is installed -menuTitle: Configure Grafana Agent Flow -title: Configure Grafana Agent Flow -weight: 150 ---- - -# Configure {{% param "PRODUCT_NAME" %}} - -You can configure {{< param "PRODUCT_NAME" >}} after it is installed. The default River configuration file for {{< param "PRODUCT_NAME" >}} is located at: - -* Linux: `/etc/grafana-agent-flow.river` -* macOS: `$(brew --prefix)/etc/grafana-agent-flow/config.river` -* Windows: `C:\Program Files\Grafana Agent Flow\config.river` - -This section includes information that helps you configure {{< param "PRODUCT_NAME" >}}. - -{{< section >}} diff --git a/docs/sources/flow/setup/deploy-agent.md b/docs/sources/flow/setup/deploy-agent.md deleted file mode 100644 index 8328e03b65b6..000000000000 --- a/docs/sources/flow/setup/deploy-agent.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/setup/deploy-agent/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/setup/deploy-agent/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/setup/deploy-agent/ -- /docs/grafana-cloud/send-data/agent/flow/setup/deploy-agent/ -canonical: https://grafana.com/docs/agent/latest/flow/setup/start-agent/ -description: Learn about possible deployment topologies for Grafana Agent Flow -menuTitle: Deploy Grafana Agent Flow -title: Grafana Agent Flow deployment topologies -weight: 900 ---- - -{{< docs/shared source="agent" lookup="/deploy-agent.md" version="" >}} - diff --git a/docs/sources/flow/setup/start-agent.md b/docs/sources/flow/setup/start-agent.md deleted file mode 100644 index b63386cf1436..000000000000 --- a/docs/sources/flow/setup/start-agent.md +++ /dev/null @@ -1,257 +0,0 @@ ---- -aliases: -- /docs/grafana-cloud/agent/flow/setup/start-agent/ -- /docs/grafana-cloud/monitor-infrastructure/agent/flow/setup/start-agent/ -- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/setup/start-agent/ -- /docs/grafana-cloud/send-data/agent/flow/setup/start-agent/ -canonical: https://grafana.com/docs/agent/latest/flow/setup/start-agent/ -description: Learn how to start, restart, and stop Grafana Agent after it is installed -menuTitle: Start Grafana Agent Flow -title: Start, restart, and stop Grafana Agent Flow -weight: 800 ---- - -# Start, restart, and stop {{% param "PRODUCT_NAME" %}} - -You can start, restart, and stop {{< param "PRODUCT_NAME" >}} after it is installed. - -## Linux - -{{< param "PRODUCT_NAME" >}} is installed as a [systemd][] service on Linux. - -[systemd]: https://systemd.io/ - -### Start {{% param "PRODUCT_NAME" %}} - -To start {{< param "PRODUCT_NAME" >}}, run the following command in a terminal window: - -```shell -sudo systemctl start grafana-agent-flow -``` - -(Optional) To verify that the service is running, run the following command in a terminal window: - -```shell -sudo systemctl status grafana-agent-flow -``` - -### Configure {{% param "PRODUCT_NAME" %}} to start at boot - -To automatically run {{< param "PRODUCT_NAME" >}} when the system starts, run the following command in a terminal window: - -```shell -sudo systemctl enable grafana-agent-flow.service -``` - -### Restart {{% param "PRODUCT_NAME" %}} - -To restart {{< param "PRODUCT_NAME" >}}, run the following command in a terminal window: - -```shell -sudo systemctl restart grafana-agent-flow -``` - -### Stop {{% param "PRODUCT_NAME" %}} - -To stop {{< param "PRODUCT_NAME" >}}, run the following command in a terminal window: - -```shell -sudo systemctl stop grafana-agent-flow -``` - -### View {{% param "PRODUCT_NAME" %}} logs on Linux - -To view {{< param "PRODUCT_NAME" >}} log files, run the following command in a terminal window: - -```shell -sudo journalctl -u grafana-agent-flow -``` - -## macOS - -{{< param "PRODUCT_NAME" >}} is installed as a launchd service on macOS. - -### Start {{% param "PRODUCT_NAME" %}} - -To start {{< param "PRODUCT_NAME" >}}, run the following command in a terminal window: - -```shell -brew services start grafana-agent-flow -``` - -{{< param "PRODUCT_NAME" >}} automatically runs when the system starts. - -(Optional) To verify that the service is running, run the following command in a terminal window: - -```shell -brew services info grafana-agent-flow -``` - -### Restart {{% param "PRODUCT_NAME" %}} - -To restart {{< param "PRODUCT_NAME" >}}, run the following command in a terminal window: - -```shell -brew services restart grafana-agent-flow -``` - -### Stop {{% param "PRODUCT_NAME" %}} - -To stop {{< param "PRODUCT_NAME" >}}, run the following command in a terminal window: - -```shell -brew services stop grafana-agent-flow -``` - -### View {{% param "PRODUCT_NAME" %}} logs on macOS - -By default, logs are written to `$(brew --prefix)/var/log/grafana-agent-flow.log` and -`$(brew --prefix)/var/log/grafana-agent-flow.err.log`. - -If you followed [Configure the {{< param "PRODUCT_NAME" >}} service][Configure] and changed the path where logs are written, -refer to your current copy of the {{< param "PRODUCT_NAME" >}} formula to locate your log files. - -## Windows - -{{< param "PRODUCT_NAME" >}} is installed as a Windows Service. The service is configured to automatically run on startup. - -To verify that {{< param "PRODUCT_NAME" >}} is running as a Windows Service: - -1. Open the Windows Services manager (services.msc): - - 1. Right click on the Start Menu and select **Run**. - - 1. Type: `services.msc` and click **OK**. - -1. Scroll down to find the **{{< param "PRODUCT_NAME" >}}** service and verify that the **Status** is **Running**. - -### View {{% param "PRODUCT_NAME" %}} logs - -When running on Windows, {{< param "PRODUCT_NAME" >}} writes its logs to Windows Event -Logs with an event source name of **{{< param "PRODUCT_NAME" >}}**. - -To view the logs, perform the following steps: - -1. Open the Event Viewer: - - 1. Right click on the Start Menu and select **Run**. - - 1. Type `eventvwr` and click **OK**. - -1. In the Event Viewer, click on **Windows Logs > Application**. - -1. Search for events with the source **{{< param "PRODUCT_NAME" >}}**. - -## Standalone binary - -If you downloaded the standalone binary, you must run {{< param "PRODUCT_NAME" >}} from a terminal or command window. - -### Start {{% param "PRODUCT_NAME" %}} on Linux, macOS, or FreeBSD - -To start {{< param "PRODUCT_NAME" >}} on Linux, macOS, or FreeBSD, run the following command in a terminal window: - -```shell -AGENT_MODE=flow run -``` - -Replace the following: - -* _``_: The path to the {{< param "PRODUCT_NAME" >}} binary file. -* _``_: The path to the {{< param "PRODUCT_NAME" >}} configuration file. - -### Start {{% param "PRODUCT_NAME" %}} on Windows - -To start {{< param "PRODUCT_NAME" >}} on Windows, run the following commands in a command prompt: - -```cmd -set AGENT_MODE=flow - run -``` - -Replace the following: - -* _``_: The path to the {{< param "PRODUCT_NAME" >}} binary file. -* _``_: The path to the {{< param "PRODUCT_NAME" >}} configuration file. - -### Set up {{% param "PRODUCT_NAME" %}} as a Linux systemd service - -You can set up and manage the standalone binary for {{< param "PRODUCT_NAME" >}} as a Linux systemd service. - -{{% admonition type="note" %}} -These steps assume you have a default systemd and {{< param "PRODUCT_NAME" >}} configuration. -{{% /admonition %}} - -1. To create a new user called `grafana-agent-flow` run the following command in a terminal window: - - ```shell - sudo useradd --no-create-home --shell /bin/false grafana-agent-flow - ``` - -1. Create a service file in `/etc/systemd/system` called `grafana-agent-flow.service` with the following contents: - - ```systemd - [Unit] - Description=Vendor-neutral programmable observability pipelines. - Documentation=https://grafana.com/docs/agent/latest/flow/ - Wants=network-online.target - After=network-online.target - - [Service] - Restart=always - User=grafana-agent-flow - Environment=HOSTNAME=%H - EnvironmentFile=/etc/default/grafana-agent-flow - WorkingDirectory= - ExecStart= run $CUSTOM_ARGS --storage.path= $CONFIG_FILE - ExecReload=/usr/bin/env kill -HUP $MAINPID - TimeoutStopSec=20s - SendSIGKILL=no - - [Install] - WantedBy=multi-user.target - ``` - - Replace the following: - - * _``_: The path to the {{< param "PRODUCT_NAME" >}} binary file. - * _``_: The path to a working directory, for example `/var/lib/grafana-agent-flow`. - -1. Create an environment file in `/etc/default/` called `grafana-agent-flow` with the following contents: - - ```shell - ## Path: - ## Description: Grafana Agent Flow settings - ## Type: string - ## Default: "" - ## ServiceRestart: grafana-agent-flow - # - # Command line options for grafana-agent - # - # The configuration file holding the Grafana Agent Flow configuration. - CONFIG_FILE="" - - # User-defined arguments to pass to the run command. - CUSTOM_ARGS="" - - # Restart on system upgrade. Defaults to true. - RESTART_ON_UPGRADE=true - ``` - - Replace the following: - - * _``_: The path to the {{< param "PRODUCT_NAME" >}} configuration file. - -1. To reload the service files, run the following command in a terminal window: - - ```shell - sudo systemctl daemon-reload - ``` - -1. Use the [Linux](#linux) systemd commands to manage your standalone Linux installation of {{< param "PRODUCT_NAME" >}}. - -[release]: https://github.com/grafana/agent/releases/latest - -{{% docs/reference %}} -[Configure]: "/docs/agent/ -> /docs/agent//flow/setup/configure/configure-macos.md#configure-the-grafana-agent-service" -[Configure]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/setup/configure/configure-macos.md#configure-the-grafana-agent-service" -{{% /docs/reference %}} diff --git a/docs/sources/flow/tasks/_index.md b/docs/sources/flow/tasks/_index.md new file mode 100644 index 000000000000..4ca62e8c1331 --- /dev/null +++ b/docs/sources/flow/tasks/_index.md @@ -0,0 +1,25 @@ +--- +aliases: +- /docs/grafana-cloud/agent/flow/tasks/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tasks/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tasks/ +- /docs/grafana-cloud/send-data/agent/flow/tasks/ +# Previous page aliases for backwards compatibility: +- /docs/grafana-cloud/agent/flow/getting-started/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/getting-started/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/getting-started/ +- /docs/grafana-cloud/send-data/agent/flow/getting-started/ +- getting_started/ # /docs/agent/latest/flow/getting_started/ +- getting-started/ # /docs/agent/latest/flow/getting-started/ +canonical: https://grafana.com/docs/agent/latest/flow/tasks/ +description: How to perform common tasks with Grafana Agent Flow +menuTitle: Tasks +title: Tasks with Grafana Agent Flow +weight: 200 +--- + +# Tasks with {{% param "PRODUCT_NAME" %}} + +This section details how to perform common tasks with {{< param "PRODUCT_NAME" >}}. + +{{< section >}} diff --git a/docs/sources/flow/getting-started/collect-opentelemetry-data.md b/docs/sources/flow/tasks/collect-opentelemetry-data.md similarity index 95% rename from docs/sources/flow/getting-started/collect-opentelemetry-data.md rename to docs/sources/flow/tasks/collect-opentelemetry-data.md index 70dee8041d9f..22248f9f70f9 100644 --- a/docs/sources/flow/getting-started/collect-opentelemetry-data.md +++ b/docs/sources/flow/tasks/collect-opentelemetry-data.md @@ -1,10 +1,16 @@ --- aliases: +- /docs/grafana-cloud/agent/flow/tasks/collect-opentelemetry-data/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tasks/collect-opentelemetry-data/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tasks/collect-opentelemetry-data/ +- /docs/grafana-cloud/send-data/agent/flow/tasks/collect-opentelemetry-data/ +# Previous page aliases for backwards compatibility: - /docs/grafana-cloud/agent/flow/getting-started/collect-opentelemetry-data/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/getting-started/collect-opentelemetry-data/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/getting-started/collect-opentelemetry-data/ - /docs/grafana-cloud/send-data/agent/flow/getting-started/collect-opentelemetry-data/ -canonical: https://grafana.com/docs/agent/latest/flow/getting-started/collect-opentelemetry-data/ +- ../getting-started/collect-opentelemetry-data/ # /docs/agent/latest/flow/getting-started/collect-opentelemetry-data/ +canonical: https://grafana.com/docs/agent/latest/flow/tasks/collect-opentelemetry-data/ description: Learn how to collect OpenTelemetry data title: Collect OpenTelemetry data weight: 300 diff --git a/docs/sources/flow/getting-started/collect-prometheus-metrics.md b/docs/sources/flow/tasks/collect-prometheus-metrics.md similarity index 96% rename from docs/sources/flow/getting-started/collect-prometheus-metrics.md rename to docs/sources/flow/tasks/collect-prometheus-metrics.md index be6d2ce71e01..350ce1ccfd4d 100644 --- a/docs/sources/flow/getting-started/collect-prometheus-metrics.md +++ b/docs/sources/flow/tasks/collect-prometheus-metrics.md @@ -1,10 +1,16 @@ --- aliases: +- /docs/grafana-cloud/agent/flow/tasks/collect-prometheus-metrics/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tasks/collect-prometheus-metrics/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tasks/collect-prometheus-metrics/ +- /docs/grafana-cloud/send-data/agent/flow/tasks/collect-prometheus-metrics/ +# Previous page aliases for backwards compatibility: - /docs/grafana-cloud/agent/flow/getting-started/collect-prometheus-metrics/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/getting-started/collect-prometheus-metrics/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/getting-started/collect-prometheus-metrics/ - /docs/grafana-cloud/send-data/agent/flow/getting-started/collect-prometheus-metrics/ -canonical: https://grafana.com/docs/agent/latest/flow/getting-started/collect-prometheus-metrics/ +- ../getting-started/collect-prometheus-metrics/ # /docs/agent/latest/flow/getting-started/collect-prometheus-metrics/ +canonical: https://grafana.com/docs/agent/latest/flow/tasks/collect-prometheus-metrics/ description: Learn how to collect and forward Prometheus metrics title: Collect and forward Prometheus metrics weight: 200 diff --git a/docs/sources/flow/getting-started/configure-agent-clustering.md b/docs/sources/flow/tasks/configure-agent-clustering.md similarity index 76% rename from docs/sources/flow/getting-started/configure-agent-clustering.md rename to docs/sources/flow/tasks/configure-agent-clustering.md index eacc61407986..d8539914fc69 100644 --- a/docs/sources/flow/getting-started/configure-agent-clustering.md +++ b/docs/sources/flow/tasks/configure-agent-clustering.md @@ -1,10 +1,16 @@ --- aliases: +- /docs/grafana-cloud/agent/flow/tasks/configure-agent-clustering/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tasks/configure-agent-clustering/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tasks/configure-agent-clustering/ +- /docs/grafana-cloud/send-data/agent/flow/tasks/configure-agent-clustering/ +# Previous page aliases for backwards compatibility: - /docs/grafana-cloud/agent/flow/getting-started/configure-agent-clustering/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/getting-started/configure-agent-clustering/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/getting-started/configure-agent-clustering/ - /docs/grafana-cloud/send-data/agent/flow/getting-started/configure-agent-clustering/ -canonical: https://grafana.com/docs/agent/latest/flow/getting-started/configure-agent-clustering/ +- ../getting-started/configure-agent-clustering/ # /docs/agent/latest/flow/getting-started/configure-agent-clustering/ +canonical: https://grafana.com/docs/agent/latest/flow/tasks/configure-agent-clustering/ description: Learn how to configure Grafana Agent clustering in an existing installation menuTitle: Configure Grafana Agent clustering title: Configure Grafana Agent clustering in an existing installation @@ -61,8 +67,8 @@ To configure clustering: [clustering]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/clustering.md" [beta]: "/docs/agent/ -> /docs/agent//stability.md#beta" [beta]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/stability.md#beta" -[install-helm]: "/docs/agent/ -> /docs/agent//flow/setup/install/kubernetes.md" -[install-helm]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/setup/install/kubernetes.md" -[UI]: "/docs/agent/ -> /docs/agent//flow/monitoring/debugging.md#component-detail-page" -[UI]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/monitoring/debugging.md#component-detail-page" +[install-helm]: "/docs/agent/ -> /docs/agent//flow/get-started/install/kubernetes.md" +[install-helm]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/get-started/install/kubernetes.md" +[UI]: "/docs/agent/ -> /docs/agent//flow/tasks/debug.md#component-detail-page" +[UI]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/debug.md#component-detail-page" {{% /docs/reference %}} diff --git a/docs/sources/flow/tasks/configure/_index.md b/docs/sources/flow/tasks/configure/_index.md new file mode 100644 index 000000000000..c44ea3dc023b --- /dev/null +++ b/docs/sources/flow/tasks/configure/_index.md @@ -0,0 +1,36 @@ +--- +aliases: +- /docs/grafana-cloud/agent/flow/tasks/configure/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tasks/configure/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tasks/configure/ +- /docs/grafana-cloud/send-data/agent/flow/tasks/configure/ +# Previous page aliases for backwards compatibility: +- /docs/grafana-cloud/agent/flow/setup/configure/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/setup/configure/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/setup/configure/ +- /docs/grafana-cloud/send-data/agent/flow/setup/configure/ +- ../setup/configure/ # /docs/agent/latest/flow/setup/configure/ +canonical: https://grafana.com/docs/agent/latest/flow/tasks/configure/ +description: Configure Grafana Agent Flow after it is installed +menuTitle: Configure +title: Configure Grafana Agent Flow +weight: 90 +--- + +# Configure {{% param "PRODUCT_NAME" %}} + +You can configure {{< param "PRODUCT_NAME" >}} after it is [installed][Install]. +The default River configuration file for {{< param "PRODUCT_NAME" >}} is located at: + +* Linux: `/etc/grafana-agent-flow.river` +* macOS: `$(brew --prefix)/etc/grafana-agent-flow/config.river` +* Windows: `C:\Program Files\Grafana Agent Flow\config.river` + +This section includes information that helps you configure {{< param "PRODUCT_NAME" >}}. + +{{< section >}} + +{{% docs/reference %}} +[Install]: "/docs/agent/ -> /docs/agent//flow/get-started/install/" +[Install]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/get-started/install/" +{{% /docs/reference %}} diff --git a/docs/sources/flow/setup/configure/configure-kubernetes.md b/docs/sources/flow/tasks/configure/configure-kubernetes.md similarity index 80% rename from docs/sources/flow/setup/configure/configure-kubernetes.md rename to docs/sources/flow/tasks/configure/configure-kubernetes.md index 0eceedd5f89d..2941f68a4281 100644 --- a/docs/sources/flow/setup/configure/configure-kubernetes.md +++ b/docs/sources/flow/tasks/configure/configure-kubernetes.md @@ -1,10 +1,16 @@ --- aliases: +- /docs/grafana-cloud/agent/flow/tasks/configure/configure-kubernetes/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tasks/configure/configure-kubernetes/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tasks/configure/configure-kubernetes/ +- /docs/grafana-cloud/send-data/agent/flow/tasks/configure/configure-kubernetes/ +# Previous page aliases for backwards compatibility: - /docs/grafana-cloud/agent/flow/setup/configure/configure-kubernetes/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/setup/configure/configure-kubernetes/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/setup/configure/configure-kubernetes/ - /docs/grafana-cloud/send-data/agent/flow/setup/configure/configure-kubernetes/ -canonical: https://grafana.com/docs/agent/latest/flow/setup/configure/configure-kubernetes/ +- ../../setup/configure/configure-kubernetes/ # /docs/agent/latest/flow/setup/configure/configure-kubernetes/ +canonical: https://grafana.com/docs/agent/latest/flow/tasks/configure/configure-kubernetes/ description: Learn how to configure Grafana Agent Flow on Kubernetes menuTitle: Kubernetes title: Configure Grafana Agent Flow on Kubernetes diff --git a/docs/sources/flow/setup/configure/configure-linux.md b/docs/sources/flow/tasks/configure/configure-linux.md similarity index 83% rename from docs/sources/flow/setup/configure/configure-linux.md rename to docs/sources/flow/tasks/configure/configure-linux.md index 3964eb416070..4b0bd3344ec6 100644 --- a/docs/sources/flow/setup/configure/configure-linux.md +++ b/docs/sources/flow/tasks/configure/configure-linux.md @@ -1,10 +1,16 @@ --- aliases: +- /docs/grafana-cloud/agent/flow/tasks/configure/configure-linux/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tasks/configure/configure-linux/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tasks/configure/configure-linux/ +- /docs/grafana-cloud/send-data/agent/flow/tasks/configure/configure-linux/ +# Previous page aliases for backwards compatibility: - /docs/grafana-cloud/agent/flow/setup/configure/configure-linux/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/setup/configure/configure-linux/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/setup/configure/configure-linux/ - /docs/grafana-cloud/send-data/agent/flow/setup/configure/configure-linux/ -canonical: https://grafana.com/docs/agent/latest/flow/setup/configure/configure-linux/ +- ../../setup/configure/configure-linux/ # /docs/agent/latest/flow/setup/configure/configure-linux/ +canonical: https://grafana.com/docs/agent/latest/flow/tasks/configure/configure-linux/ description: Learn how to configure Grafana Agent Flow on Linux menuTitle: Linux title: Configure Grafana Agent Flow on Linux @@ -93,6 +99,6 @@ To expose the UI to other machines, complete the following steps: {{% docs/reference %}} [run]: "/docs/agent/ -> /docs/agent//flow/reference/cli/run.md" [run]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/cli/run.md" -[UI]: "/docs/agent/ -> /docs/agent//flow/monitoring/debugging.md#grafana-agent-flow-ui" -[UI]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/monitoring/debugging.md#grafana-agent-flow-ui" +[UI]: "/docs/agent/ -> /docs/agent//flow/tasks/debug.md#grafana-agent-flow-ui" +[UI]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/debug.md#grafana-agent-flow-ui" {{% /docs/reference %}} diff --git a/docs/sources/flow/setup/configure/configure-macos.md b/docs/sources/flow/tasks/configure/configure-macos.md similarity index 81% rename from docs/sources/flow/setup/configure/configure-macos.md rename to docs/sources/flow/tasks/configure/configure-macos.md index 5261e75f9877..fc1c6677f579 100644 --- a/docs/sources/flow/setup/configure/configure-macos.md +++ b/docs/sources/flow/tasks/configure/configure-macos.md @@ -1,10 +1,16 @@ --- aliases: +- /docs/grafana-cloud/agent/flow/tasks/configure/configure-macos/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tasks/configure/configure-macos/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tasks/configure/configure-macos/ +- /docs/grafana-cloud/send-data/agent/flow/tasks/configure/configure-macos/ +# Previous page aliases for backwards compatibility: - /docs/grafana-cloud/agent/flow/setup/configure/configure-macos/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/setup/configure/configure-macos/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/setup/configure/configure-macos/ - /docs/grafana-cloud/send-data/agent/flow/setup/configure/configure-macos/ -canonical: https://grafana.com/docs/agent/latest/flow/setup/configure/configure-macos/ +- ../../setup/configure/configure-macos/ # /docs/agent/latest/flow/setup/configure/configure-macos/ +canonical: https://grafana.com/docs/agent/latest/flow/tasks/configure/configure-macos/ description: Learn how to configure Grafana Agent Flow on macOS menuTitle: macOS title: Configure Grafana Agent Flow on macOS @@ -82,6 +88,6 @@ To expose the UI to other machines, complete the following steps: To listen on all interfaces, replace `127.0.0.1` with `0.0.0.0`. {{% docs/reference %}} -[UI]: "/docs/agent/ -> /docs/agent//flow/monitoring/debugging.md#grafana-agent-flow-ui" -[UI]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/monitoring/debugging.md#grafana-agent-flow-ui" +[UI]: "/docs/agent/ -> /docs/agent//flow/tasks/debug.md#grafana-agent-flow-ui" +[UI]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/debug.md#grafana-agent-flow-ui" {{% /docs/reference %}} diff --git a/docs/sources/flow/setup/configure/configure-windows.md b/docs/sources/flow/tasks/configure/configure-windows.md similarity index 82% rename from docs/sources/flow/setup/configure/configure-windows.md rename to docs/sources/flow/tasks/configure/configure-windows.md index 010e6897b8ea..806579ea1359 100644 --- a/docs/sources/flow/setup/configure/configure-windows.md +++ b/docs/sources/flow/tasks/configure/configure-windows.md @@ -1,10 +1,16 @@ --- aliases: +- /docs/grafana-cloud/agent/flow/tasks/configure/configure-windows/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tasks/configure/configure-windows/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tasks/configure/configure-windows/ +- /docs/grafana-cloud/send-data/agent/flow/tasks/configure/configure-windows/ +# Previous page aliases for backwards compatibility: - /docs/grafana-cloud/agent/flow/setup/configure/configure-windows/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/setup/configure/configure-windows/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/setup/configure/configure-windows/ - /docs/grafana-cloud/send-data/agent/flow/setup/configure/configure-windows/ -canonical: https://grafana.com/docs/agent/latest/flow/setup/configure/configure-windows/ +- ../../setup/configure/configure-windows/ # /docs/agent/latest/flow/setup/configure/configure-windows/ +canonical: https://grafana.com/docs/agent/latest/flow/tasks/configure/configure-windows/ description: Learn how to configure Grafana Agent Flow on Windows menuTitle: Windows title: Configure Grafana Agent Flow on Windows @@ -90,7 +96,7 @@ To expose the UI to other machines, complete the following steps: To listen on all interfaces, replace `LISTEN_ADDR` with `0.0.0.0`. {{% docs/reference %}} -[UI]: "/docs/agent/ -> /docs/agent//flow/monitoring/debugging.md#grafana-agent-flow-ui" -[UI]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/monitoring/debugging.md#grafana-agent-flow-ui" +[UI]: "/docs/agent/ -> /docs/agent//flow/tasks/debug.md#grafana-agent-flow-ui" +[UI]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/debug.md#grafana-agent-flow-ui" {{% /docs/reference %}} diff --git a/docs/sources/flow/monitoring/debugging.md b/docs/sources/flow/tasks/debug.md similarity index 87% rename from docs/sources/flow/monitoring/debugging.md rename to docs/sources/flow/tasks/debug.md index a3d148e52487..331307a58d9b 100644 --- a/docs/sources/flow/monitoring/debugging.md +++ b/docs/sources/flow/tasks/debug.md @@ -1,16 +1,23 @@ --- aliases: +- /docs/grafana-cloud/agent/flow/tasks/debug/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tasks/debug/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tasks/debug/ +- /docs/grafana-cloud/send-data/agent/flow/tasks/debug/ +# Previous page aliases for backwards compatibility: - /docs/grafana-cloud/agent/flow/monitoring/debugging/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/monitoring/debugging/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/monitoring/debugging/ - /docs/grafana-cloud/send-data/agent/flow/monitoring/debugging/ +- ../monitoring/debugging/ # /docs/agent/latest/flow/monitoring/debugging/ canonical: https://grafana.com/docs/agent/latest/flow/monitoring/debugging/ -description: Learn about debugging -title: Debugging -weight: 300 +description: Learn about debugging issues with Grafana Agent Flow +title: Debug issues with Grafana Agent Flow +menuTitle: Debug issues +weight: 1000 --- -# Debugging +# Debug {{< param "PRODUCT_NAME" >}} Follow these steps to debug issues with {{< param "PRODUCT_NAME" >}}: @@ -32,7 +39,7 @@ Follow these steps to debug issues with {{< param "PRODUCT_NAME" >}}: ### Home page -![](../../../assets/ui_home_page.png) +![](../../assets/ui_home_page.png) The home page shows a table of components defined in the configuration file and their health. @@ -42,14 +49,14 @@ Click the {{< param "PRODUCT_ROOT_NAME" >}} logo to navigate back to the home pa ### Graph page -![](../../../assets/ui_graph_page.png) +![](../../assets/ui_graph_page.png) The **Graph** page shows a graph view of components defined in the configuration file and their health. Clicking a component in the graph navigates to the [Component detail page](#component-detail-page) for that component. ### Component detail page -![](../../../assets/ui_component_detail_page.png) +![](../../assets/ui_component_detail_page.png) The component detail page shows the following information for each component: @@ -62,7 +69,7 @@ The component detail page shows the following information for each component: ### Clustering page -![](../../../assets/ui_clustering_page.png) +![](../../assets/ui_clustering_page.png) The clustering page shows the following information for each cluster node: @@ -111,8 +118,8 @@ To debug issues when using [clustering][], check for the following symptoms. [logging]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/config-blocks/logging.md" [clustering]: "/docs/agent/ -> /docs/agent//flow/concepts/clustering.md" [clustering]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/clustering.md" -[install]: "/docs/agent/ -> /docs/agent//flow/setup/install" -[install]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/setup/install" +[install]: "/docs/agent/ -> /docs/agent//flow/get-started/install" +[install]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/get-started/install" [secret]: "/docs/agent/ -> /docs/agent//flow/concepts/config-language/expressions/types_and_values.md#secrets.md" [secret]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/config-language/expressions/types_and_values.md#secrets.md" [grafana-agent run]: "/docs/agent/ -> /docs/agent//flow/reference/cli/run.md" diff --git a/docs/sources/flow/getting-started/distribute-prometheus-scrape-load.md b/docs/sources/flow/tasks/distribute-prometheus-scrape-load.md similarity index 73% rename from docs/sources/flow/getting-started/distribute-prometheus-scrape-load.md rename to docs/sources/flow/tasks/distribute-prometheus-scrape-load.md index d881b2f0a34b..ee3a3fd9827a 100644 --- a/docs/sources/flow/getting-started/distribute-prometheus-scrape-load.md +++ b/docs/sources/flow/tasks/distribute-prometheus-scrape-load.md @@ -1,10 +1,16 @@ --- aliases: +- /docs/grafana-cloud/agent/flow/tasks/distribute-prometheus-scrape-load/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tasks/distribute-prometheus-scrape-load/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tasks/distribute-prometheus-scrape-load/ +- /docs/grafana-cloud/send-data/agent/flow/tasks/distribute-prometheus-scrape-load/ +# Previous page aliases for backwards compatibility: - /docs/grafana-cloud/agent/flow/getting-started/distribute-prometheus-scrape-load/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/getting-started/distribute-prometheus-scrape-load/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/getting-started/distribute-prometheus-scrape-load/ - /docs/grafana-cloud/send-data/agent/flow/getting-started/distribute-prometheus-scrape-load/ -canonical: https://grafana.com/docs/agent/latest/flow/getting-started/distribute-prometheus-scrape-load/ +- ../getting-started/distribute-prometheus-scrape-load/ # /docs/agent/latest/flow/getting-started/distribute-prometheus-scrape-load/ +canonical: https://grafana.com/docs/agent/latest/flow/tasks/distribute-prometheus-scrape-load/ description: Learn how to distribute your Prometheus metrics scrape load menuTitle: Distribute Prometheus metrics scrape load title: Distribute Prometheus metrics scrape load @@ -51,12 +57,12 @@ To distribute Prometheus metrics scrape load with clustering: [Clustering]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/clustering.md" [beta]: "/docs/agent/ -> /docs/agent//stability.md#beta" [beta]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/stability.md#beta" -[configure-grafana-agent]: "/docs/agent/ -> /docs/agent//flow/setup/configure" -[configure-grafana-agent]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/setup/configure" -[Configure Prometheus metrics collection]: "/docs/agent/ -> /docs/agent//flow/getting-started/collect-prometheus-metrics.md" -[Configure Prometheus metrics collection]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/getting-started/collect-prometheus-metrics.md" -[Configure clustering]: "/docs/agent/ -> /docs/agent//flow/getting-started/configure-agent-clustering.md" -[Configure clustering]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/getting-started/configure-agent-clustering.md" -[UI]: "/docs/agent/ -> /docs/agent//flow/monitoring/debugging.md#component-detail-page" -[UI]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/monitoring/debugging.md#component-detail-page" +[configure-grafana-agent]: "/docs/agent/ -> /docs/agent//flow/tasks/configure" +[configure-grafana-agent]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/configure" +[Configure Prometheus metrics collection]: "/docs/agent/ -> /docs/agent//flow/tasks/collect-prometheus-metrics.md" +[Configure Prometheus metrics collection]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/collect-prometheus-metrics.md" +[Configure clustering]: "/docs/agent/ -> /docs/agent//flow/tasks/configure-agent-clustering.md" +[Configure clustering]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/configure-agent-clustering.md" +[UI]: "/docs/agent/ -> /docs/agent//flow/tasks/debug.md#component-detail-page" +[UI]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/debug.md#component-detail-page" {{% /docs/reference %}} diff --git a/docs/sources/flow/monitoring/agent-resource-usage.md b/docs/sources/flow/tasks/estimate-resource-usage.md similarity index 76% rename from docs/sources/flow/monitoring/agent-resource-usage.md rename to docs/sources/flow/tasks/estimate-resource-usage.md index f7a44c638555..e7b066d9e8ee 100644 --- a/docs/sources/flow/monitoring/agent-resource-usage.md +++ b/docs/sources/flow/tasks/estimate-resource-usage.md @@ -1,17 +1,26 @@ --- aliases: + - /docs/agent/flow/tasks/estimate-resource-usage/ + - /docs/grafana-cloud/agent/flow/tasks/estimate-resource-usage/ + - /docs/grafana-cloud/monitor-infrastructure/agent/flow/tasks/estimate-resource-usage/ + - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tasks/estimate-resource-usage/ + - /docs/grafana-cloud/send-data/agent/flow/tasks/estimate-resource-usage/ + # Previous page aliases for backwards compatibility: - /docs/agent/flow/monitoring/resource-usage/ - /docs/grafana-cloud/agent/flow/monitoring/resource-usage/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/monitoring/resource-usage/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/monitoring/resource-usage/ - /docs/grafana-cloud/send-data/agent/flow/monitoring/resource-usage/ + - ../monitoring/resource-usage/ # /docs/agent/latest/flow/monitoring/resource-usage/ canonical: https://grafana.com/docs/agent/latest/flow/monitoring/resource-usage/ -description: Guidance for expected Agent resource usage +description: Estimate expected Agent resource usage headless: true -title: Resource usage +title: Estimate resource usage +menuTitle: Estimate resource usage +weight: 190 --- -# {{% param "PRODUCT_NAME" %}} resource usage +# Estimate {{% param "PRODUCT_NAME" %}} resource usage This page provides guidance for expected resource usage of {{% param "PRODUCT_NAME" %}} for each telemetry type, based on operational @@ -33,7 +42,7 @@ series that need to be scraped and the scrape interval. As a rule of thumb, **per each 1 million active series** and with the default scrape interval, you can expect to use approximately: -* 1.5 CPU cores +* 0.4 CPU cores * 11 GiB of memory * 1.5 MiB/s of total network bandwidth, send and receive @@ -42,7 +51,7 @@ will broadly apply to other deployment modes. For more information on how to deploy {{% param "PRODUCT_NAME" %}}, see [deploying grafana agent][]. -[deploying grafana agent]: {{< relref "../setup/deploy-agent.md" >}} +[deploying grafana agent]: {{< relref "../get-started/deploy-agent.md" >}} [clustering]: {{< relref "../concepts/clustering.md" >}} ## Loki logs diff --git a/docs/sources/flow/tasks/migrate/_index.md b/docs/sources/flow/tasks/migrate/_index.md new file mode 100644 index 000000000000..a0c98966dcb4 --- /dev/null +++ b/docs/sources/flow/tasks/migrate/_index.md @@ -0,0 +1,19 @@ +--- +aliases: +- /docs/grafana-cloud/agent/flow/tasks/migrate/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tasks/migrate/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tasks/migrate/ +- /docs/grafana-cloud/send-data/agent/flow/tasks/migrate/ +canonical: https://grafana.com/docs/agent/latest/flow/tasks/migrate/ +description: How to migrate to Grafana Agent Flow +menuTitle: Migrate +title: Migrate to Grafana Agent Flow +weight: 100 +--- + +# How to migrate to {{% param "PRODUCT_NAME" %}} + +This section details how to migrate to {{< param "PRODUCT_NAME" >}} from other +common solutions. + +{{< section >}} diff --git a/docs/sources/flow/getting-started/migrating-from-operator.md b/docs/sources/flow/tasks/migrate/from-operator.md similarity index 94% rename from docs/sources/flow/getting-started/migrating-from-operator.md rename to docs/sources/flow/tasks/migrate/from-operator.md index 5985488c2915..f035f95484ad 100644 --- a/docs/sources/flow/getting-started/migrating-from-operator.md +++ b/docs/sources/flow/tasks/migrate/from-operator.md @@ -1,15 +1,19 @@ --- aliases: +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tasks/migrate/from-operator/ +- /docs/grafana-cloud/send-data/agent/flow/tasks/migrate/from-operator/ +# Previous page aliases for backwards compatibility: - /docs/grafana-cloud/monitor-infrastructure/agent/flow/getting-started/migrating-from-operator/ - /docs/grafana-cloud/send-data/agent/flow/getting-started/migrating-from-operator/ -canonical: https://grafana.com/docs/agent/latest/flow/getting-started/migrating-from-operator/ -description: Migrating from Grafana Agent Operator to Grafana Agent Flow +- ../../getting-started/migrating-from-operator/ # /docs/agent/latest/flow/getting-started/migrating-from-operator/ +canonical: https://grafana.com/docs/agent/latest/flow/tasks/migrate/from-operator/ +description: Migrate from Grafana Agent Operator to Grafana Agent Flow menuTitle: Migrate from Operator -title: Migrating from Grafana Agent Operator to Grafana Agent Flow +title: Migrate from Grafana Agent Operator to Grafana Agent Flow weight: 320 --- -# Migrating from Grafana Agent Operator to {{% param "PRODUCT_NAME" %}} +# Migrate from Grafana Agent Operator to {{% param "PRODUCT_NAME" %}} With the release of {{< param "PRODUCT_NAME" >}}, Grafana Agent Operator is no longer the recommended way to deploy {{< param "PRODUCT_ROOT_NAME" >}} in Kubernetes. Some of the Operator functionality has moved into {{< param "PRODUCT_NAME" >}} itself, and the Helm Chart has replaced the remaining functionality. @@ -279,12 +283,12 @@ The [reference documentation][component documentation] should help convert those {{% docs/reference %}} [clustering]: "/docs/agent/ -> /docs/agent//flow/concepts/clustering" [clustering]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/clustering" -[deployment guide]: "/docs/agent/ -> /docs/agent//flow/setup/deploy-agent" -[deployment guide]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/flow/setup/deploy-agent" +[deployment guide]: "/docs/agent/ -> /docs/agent//flow/get-started/deploy-agent" +[deployment guide]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/flow/get-started/deploy-agent" [operator guide]: "/docs/agent/ -> /docs/agent//operator/deploy-agent-operator-resources.md#deploy-a-metricsinstance-resource" [operator guide]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/operator/deploy-agent-operator-resources.md#deploy-a-metricsinstance-resource" -[Helm chart]: "/docs/agent/ -> /docs/agent//flow/setup/install/kubernetes" -[Helm chart]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/flow/setup/install/kubernetes" +[Helm chart]: "/docs/agent/ -> /docs/agent//flow/get-started/install/kubernetes" +[Helm chart]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/flow/get-started/install/kubernetes" [remote.kubernetes.secret]: "/docs/agent/ -> /docs/agent//flow/reference/components/remote.kubernetes.secret.md" [remote.kubernetes.secret]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/flow/reference/components/remote.kubernetes.secret.md" [prometheus.remote_write]: "/docs/agent/ -> /docs/agent//flow/reference/components/prometheus.remote_write.md" diff --git a/docs/sources/flow/getting-started/migrating-from-prometheus.md b/docs/sources/flow/tasks/migrate/from-prometheus.md similarity index 90% rename from docs/sources/flow/getting-started/migrating-from-prometheus.md rename to docs/sources/flow/tasks/migrate/from-prometheus.md index 8d77d6914340..62fef82d3c2d 100644 --- a/docs/sources/flow/getting-started/migrating-from-prometheus.md +++ b/docs/sources/flow/tasks/migrate/from-prometheus.md @@ -1,10 +1,16 @@ --- aliases: +- /docs/grafana-cloud/agent/flow/tasks/migrate/from-prometheus/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tasks/migrate/from-prometheus/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tasks/migrate/from-prometheus/ +- /docs/grafana-cloud/send-data/agent/flow/tasks/migrate/from-prometheus/ +# Previous page aliases for backwards compatibility: - /docs/grafana-cloud/agent/flow/getting-started/migrating-from-prometheus/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/getting-started/migrating-from-prometheus/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/getting-started/migrating-from-prometheus/ - /docs/grafana-cloud/send-data/agent/flow/getting-started/migrating-from-prometheus/ -canonical: https://grafana.com/docs/agent/latest/flow/getting-started/migrating-from-prometheus/ +- ../../getting-started/migrating-from-prometheus/ # /docs/agent/latest/flow/getting-started/migrating-from-prometheus/ +canonical: https://grafana.com/docs/agent/latest/flow/tasks/migrate/from-prometheus/ description: Learn how to migrate from Prometheus to Grafana Agent Flow menuTitle: Migrate from Prometheus title: Migrate from Prometheus to Grafana Agent Flow @@ -58,7 +64,7 @@ This conversion will enable you to take full advantage of the many additional fe - _``_: The full path to the Prometheus configuration. - _``_: The full path to output the {{< param "PRODUCT_NAME" >}} configuration. -1. [Start][] {{< param "PRODUCT_NAME" >}} using the new {{< param "PRODUCT_NAME" >}} configuration from _``_: +1. [Run][] {{< param "PRODUCT_NAME" >}} using the new {{< param "PRODUCT_NAME" >}} configuration from _``_: ### Debugging @@ -125,7 +131,7 @@ This allows you to try {{< param "PRODUCT_NAME" >}} without modifying your exist > In this task, you will use the [run][] CLI command to run {{< param "PRODUCT_NAME" >}} > using a Prometheus configuration. -[Start][] {{< param "PRODUCT_NAME" >}} and include the command line flag `--config.format=prometheus`. +[Run][] {{< param "PRODUCT_NAME" >}} and include the command line flag `--config.format=prometheus`. Your configuration file must be a valid Prometheus configuration file rather than a {{< param "PRODUCT_NAME" >}} configuration file. ### Debugging @@ -250,12 +256,12 @@ The following list is specific to the convert command and not {{< param "PRODUCT [convert]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/cli/convert.md" [run]: "/docs/agent/ -> /docs/agent//flow/reference/cli/run.md" [run]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/cli/run.md" -[Start]: "/docs/agent/ -> /docs/agent//flow/setup/start-agent.md" -[Start]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/setup/start-agent.md" -[DebuggingUI]: "/docs/agent/ -> /docs/agent//flow/monitoring/debugging.md" -[DebuggingUI]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/monitoring/debugging.md" +[Run]: "/docs/agent/ -> /docs/agent//flow/get-started/run/" +[Run]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/get-started/run/" +[DebuggingUI]: "/docs/agent/ -> /docs/agent//flow/tasks/debug.md" +[DebuggingUI]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/debug.md" [River]: "/docs/agent/ -> /docs/agent//flow/concepts/config-language/_index.md" [River]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/config-language/_index.md" -[UI]: "/docs/agent/ -> /docs/agent//flow/monitoring/debugging#grafana-agent-flow-ui" -[UI]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/monitoring/debugging#grafana-agent-flow-ui" +[UI]: "/docs/agent/ -> /docs/agent//flow/tasks/debug#grafana-agent-flow-ui" +[UI]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/debug#grafana-agent-flow-ui" {{% /docs/reference %}} diff --git a/docs/sources/flow/getting-started/migrating-from-promtail.md b/docs/sources/flow/tasks/migrate/from-promtail.md similarity index 90% rename from docs/sources/flow/getting-started/migrating-from-promtail.md rename to docs/sources/flow/tasks/migrate/from-promtail.md index 4799e0e20b5f..182dec857c3b 100644 --- a/docs/sources/flow/getting-started/migrating-from-promtail.md +++ b/docs/sources/flow/tasks/migrate/from-promtail.md @@ -1,10 +1,16 @@ --- aliases: +- /docs/grafana-cloud/agent/flow/tasks/migrate/from-promtail/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tasks/migrate/from-promtail/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tasks/migrate/from-promtail/ +- /docs/grafana-cloud/send-data/agent/flow/tasks/migrate/from-promtail/ +# Previous page aliases for backwards compatibility: - /docs/grafana-cloud/agent/flow/getting-started/migrating-from-promtail/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/getting-started/migrating-from-promtail/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/getting-started/migrating-from-promtail/ - /docs/grafana-cloud/send-data/agent/flow/getting-started/migrating-from-promtail/ -canonical: https://grafana.com/docs/agent/latest/flow/getting-started/migrating-from-promtail/ +- ../../getting-started/migrating-from-promtail/ # /docs/agent/latest/flow/getting-started/migrating-from-promtail/ +canonical: https://grafana.com/docs/agent/latest/flow/tasks/migrate/from-promtail/ description: Learn how to migrate from Promtail to Grafana Agent Flow menuTitle: Migrate from Promtail title: Migrate from Promtail to Grafana Agent Flow @@ -58,7 +64,7 @@ This conversion will enable you to take full advantage of the many additional fe * _``_: The full path to the Promtail configuration. * _``_: The full path to output the {{< param "PRODUCT_NAME" >}} configuration. -1. [Start][] {{< param "PRODUCT_NAME" >}} using the new configuration from _``_: +1. [Run][] {{< param "PRODUCT_NAME" >}} using the new configuration from _``_: ### Debugging @@ -121,7 +127,7 @@ This allows you to try {{< param "PRODUCT_NAME" >}} without modifying your exist > In this task, you will use the [run][] CLI command to run {{< param "PRODUCT_NAME" >}} using a Promtail configuration. -[Start][] {{< param "PRODUCT_NAME" >}} and include the command line flag `--config.format=promtail`. +[Run][] {{< param "PRODUCT_NAME" >}} and include the command line flag `--config.format=promtail`. Your configuration file must be a valid Promtail configuration file rather than a {{< param "PRODUCT_NAME" >}} configuration file. ### Debugging @@ -233,12 +239,12 @@ The following list is specific to the convert command and not {{< param "PRODUCT [convert]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/cli/convert.md" [run]: "/docs/agent/ -> /docs/agent//flow/reference/cli/run.md" [run]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/cli/run.md" -[Start]: "/docs/agent/ -> /docs/agent//flow/setup/start-agent.md" -[Start]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/setup/start-agent.md" -[DebuggingUI]: "/docs/agent/ -> /docs/agent//flow/monitoring/debugging.md" -[DebuggingUI]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/monitoring/debugging.md" +[Run]: "/docs/agent/ -> /docs/agent//flow/get-started/run/" +[Run]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/get-started/run/" +[DebuggingUI]: "/docs/agent/ -> /docs/agent//flow/tasks/debug.md" +[DebuggingUI]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/debug.md" [River]: "/docs/agent/ -> /docs/agent//flow/concepts/config-language/_index.md" [River]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/config-language/_index.md" -[UI]: "/docs/agent/ -> /docs/agent//flow/monitoring/debugging#grafana-agent-flow-ui" -[UI]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/monitoring/debugging#grafana-agent-flow-ui" +[UI]: "/docs/agent/ -> /docs/agent//flow/tasks/debug#grafana-agent-flow-ui" +[UI]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/debug#grafana-agent-flow-ui" {{% /docs/reference %}} diff --git a/docs/sources/flow/getting-started/migrating-from-static.md b/docs/sources/flow/tasks/migrate/from-static.md similarity index 92% rename from docs/sources/flow/getting-started/migrating-from-static.md rename to docs/sources/flow/tasks/migrate/from-static.md index 8b7b1f98721e..ff006b514a5e 100644 --- a/docs/sources/flow/getting-started/migrating-from-static.md +++ b/docs/sources/flow/tasks/migrate/from-static.md @@ -1,10 +1,16 @@ --- aliases: +- /docs/grafana-cloud/agent/flow/tasks/migrate/from-static/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tasks/migrate/from-static/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tasks/migrate/from-static/ +- /docs/grafana-cloud/send-data/agent/flow/tasks/migrate/from-static/ +# Previous page aliases for backwards compatibility: - /docs/grafana-cloud/agent/flow/getting-started/migrating-from-static/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/getting-started/migrating-from-static/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/getting-started/migrating-from-static/ - /docs/grafana-cloud/send-data/agent/flow/getting-started/migrating-from-static/ -canonical: https://grafana.com/docs/agent/latest/flow/getting-started/migrating-from-static/ +- ../../getting-started/migrating-from-static/ # /docs/agent/latest/flow/getting-started/migrating-from-static/ +canonical: https://grafana.com/docs/agent/latest/flow/tasks/migrate/from-static/ description: Learn how to migrate your configuration from Grafana Agent Static to Grafana Agent Flow menuTitle: Migrate from Static to Flow title: Migrate Grafana Agent Static to Grafana Agent Flow @@ -61,7 +67,7 @@ This conversion will enable you to take full advantage of the many additional fe * _``_: The full path to the [Static][] configuration. * _`H_`: The full path to output the {{< param "PRODUCT_NAME" >}} configuration. -1. [Start][] {{< param "PRODUCT_NAME" >}} using the new {{< param "PRODUCT_NAME" >}} configuration from _``_: +1. [Run][] {{< param "PRODUCT_NAME" >}} using the new {{< param "PRODUCT_NAME" >}} configuration from _``_: ### Debugging @@ -124,7 +130,7 @@ This allows you to try {{< param "PRODUCT_NAME" >}} without modifying your exist > In this task, you will use the [run][] CLI command to run {{< param "PRODUCT_NAME" >}} using a Static configuration. -[Start][] {{< param "PRODUCT_NAME" >}} and include the command line flag `--config.format=static`. +[Run][] {{< param "PRODUCT_NAME" >}} and include the command line flag `--config.format=static`. Your configuration file must be a valid [Static] configuration file. ### Debugging @@ -368,10 +374,10 @@ The following list is specific to the convert command and not {{< param "PRODUCT [convert]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/cli/convert.md" [run]: "/docs/agent/ -> /docs/agent//flow/reference/cli/run.md" [run]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/cli/run.md" -[Start]: "/docs/agent/ -> /docs/agent//flow/setup/start-agent.md" -[Start]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/setup/start-agent.md" -[DebuggingUI]: "/docs/agent/ -> /docs/agent//flow/monitoring/debugging.md" -[DebuggingUI]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/monitoring/debugging.md" +[Run]: "/docs/agent/ -> /docs/agent//flow/get-started/run/" +[Run]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/get-started/run/" +[DebuggingUI]: "/docs/agent/ -> /docs/agent//flow/tasks/debug.md" +[DebuggingUI]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/debug.md" [River]: "/docs/agent/ -> /docs/agent//flow/concepts/config-language/" [River]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/config-language/" [Integrations next]: "/docs/agent/ -> /docs/agent//static/configuration/integrations/integrations-next/_index.md" @@ -382,14 +388,14 @@ The following list is specific to the convert command and not {{< param "PRODUCT [Agent Management]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/static/configuration/agent-management.md" [env]: "/docs/agent/ -> /docs/agent//flow/reference/stdlib/env.md" [env]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/stdlib/env.md" -[Prometheus Limitations]: "/docs/agent/ -> /docs/agent//flow/getting-started/migrating-from-prometheus.md#limitations" -[Prometheus Limitations]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/getting-started/migrating-from-prometheus.md#limitations" -[Promtail Limitations]: "/docs/agent/ -> /docs/agent//flow/getting-started/migrating-from-promtail.md#limitations" -[Promtail Limitations]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/getting-started/migrating-from-promtail.md#limitations" +[Prometheus Limitations]: "/docs/agent/ -> /docs/agent//flow/tasks/migrate/from-prometheus.md#limitations" +[Prometheus Limitations]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/migrate/from-prometheus.md#limitations" +[Promtail Limitations]: "/docs/agent/ -> /docs/agent//flow/tasks/migrate/from-promtail.md#limitations" +[Promtail Limitations]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/migrate/from-promtail.md#limitations" [Metrics]: "/docs/agent/ -> /docs/agent//static/configuration/metrics-config.md" [Metrics]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/static/configuration/metrics-config.md" [Logs]: "/docs/agent/ -> /docs/agent//static/configuration/logs-config.md" [Logs]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/static/logs-config.md" -[UI]: "/docs/agent/ -> /docs/agent//flow/monitoring/debugging#grafana-agent-flow-ui" -[UI]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/monitoring/debugging#grafana-agent-flow-ui" +[UI]: "/docs/agent/ -> /docs/agent//flow/tasks/debug#grafana-agent-flow-ui" +[UI]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/debug#grafana-agent-flow-ui" {{% /docs/reference %}} diff --git a/docs/sources/flow/tasks/monitor/_index.md b/docs/sources/flow/tasks/monitor/_index.md new file mode 100644 index 000000000000..ac23db26072c --- /dev/null +++ b/docs/sources/flow/tasks/monitor/_index.md @@ -0,0 +1,24 @@ +--- +aliases: +- /docs/grafana-cloud/agent/flow/tasks/monitor/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tasks/monitor/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tasks/monitor/ +- /docs/grafana-cloud/send-data/agent/flow/tasks/monitor/ +# Previous page aliases for backwards compatibility: +- /docs/grafana-cloud/agent/flow/monitoring/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/monitoring/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/monitoring/ +- /docs/grafana-cloud/send-data/agent/flow/monitoring/ +- ../monitoring/ # /docs/agent/latest/flow/monitoring/ +canonical: https://grafana.com/docs/agent/latest/flow/tasks/monitor/ +description: Learn about monitoring Grafana Agent Flow +title: Monitor Grafana Agent Flow +menuTitle: Monitor +weight: 110 +--- + +# How to monitor {{% param "PRODUCT_NAME" %}} + +This section details various ways to monitor and debug {{< param "PRODUCT_NAME" >}}. + +{{< section >}} diff --git a/docs/sources/flow/monitoring/component_metrics.md b/docs/sources/flow/tasks/monitor/component_metrics.md similarity index 75% rename from docs/sources/flow/monitoring/component_metrics.md rename to docs/sources/flow/tasks/monitor/component_metrics.md index 90c3769572c9..5b3693a1f182 100644 --- a/docs/sources/flow/monitoring/component_metrics.md +++ b/docs/sources/flow/tasks/monitor/component_metrics.md @@ -1,17 +1,24 @@ --- aliases: +- /docs/grafana-cloud/agent/flow/tasks/monitor/component_metrics/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tasks/monitor/component_metrics/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tasks/monitor/component_metrics/ +- /docs/grafana-cloud/send-data/agent/flow/tasks/monitor/component_metrics/ +- component-metrics/ # /docs/agent/latest/flow/tasks/monitor/component-metrics/ +# Previous page aliases for backwards compatibility: - /docs/grafana-cloud/agent/flow/monitoring/component_metrics/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/monitoring/component_metrics/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/monitoring/component_metrics/ - /docs/grafana-cloud/send-data/agent/flow/monitoring/component_metrics/ -- component-metrics/ +- ../../monitoring/component-metrics/ # /docs/agent/latest/flow/monitoring/component-metrics/ +- ../../monitoring/component_metrics/ # /docs/agent/latest/flow/monitoring/component_metrics/ canonical: https://grafana.com/docs/agent/latest/flow/monitoring/component_metrics/ -description: Learn about component metrics -title: Component metrics +description: Learn how to monitor component metrics +title: Monitor components weight: 200 --- -# Component metrics +# How to monitor components {{< param "PRODUCT_NAME" >}} [components][] may optionally expose Prometheus metrics which can be used to investigate the behavior of that component. These component-specific metrics are only generated when an instance of that component is running. diff --git a/docs/sources/flow/monitoring/controller_metrics.md b/docs/sources/flow/tasks/monitor/controller_metrics.md similarity index 74% rename from docs/sources/flow/monitoring/controller_metrics.md rename to docs/sources/flow/tasks/monitor/controller_metrics.md index e0f17a2edba9..0ba7617032aa 100644 --- a/docs/sources/flow/monitoring/controller_metrics.md +++ b/docs/sources/flow/tasks/monitor/controller_metrics.md @@ -1,17 +1,24 @@ --- aliases: +- /docs/grafana-cloud/agent/flow/tasks/monitor/controller_metrics/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tasks/monitor/controller_metrics/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tasks/monitor/controller_metrics/ +- /docs/grafana-cloud/send-data/agent/flow/tasks/monitor/controller_metrics/ +- controller-metrics/ # /docs/agent/latest/flow/tasks/monitor/controller-metrics/ +# Previous page aliases for backwards compatibility: - /docs/grafana-cloud/agent/flow/monitoring/controller_metrics/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/monitoring/controller_metrics/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/monitoring/controller_metrics/ - /docs/grafana-cloud/send-data/agent/flow/monitoring/controller_metrics/ -- controller-metrics/ +- ../../monitoring/controller-metrics/ # /docs/agent/latest/flow/monitoring/controller-metrics/ +- ../../monitoring/controller_metrics/ # /docs/agent/latest/flow/monitoring/controller_metrics/ canonical: https://grafana.com/docs/agent/latest/flow/monitoring/controller_metrics/ -description: Learn about controller metrics -title: Controller metrics +description: Learn how to monitor controller metrics +title: Monitor controller weight: 100 --- -# Controller metrics +# How to monitor controller The {{< param "PRODUCT_NAME" >}} [component controller][] exposes Prometheus metrics which you can use to investigate the controller state. diff --git a/docs/sources/flow/getting-started/opentelemetry-to-lgtm-stack.md b/docs/sources/flow/tasks/opentelemetry-to-lgtm-stack.md similarity index 93% rename from docs/sources/flow/getting-started/opentelemetry-to-lgtm-stack.md rename to docs/sources/flow/tasks/opentelemetry-to-lgtm-stack.md index 279960d79fe0..2da979078336 100644 --- a/docs/sources/flow/getting-started/opentelemetry-to-lgtm-stack.md +++ b/docs/sources/flow/tasks/opentelemetry-to-lgtm-stack.md @@ -1,10 +1,16 @@ --- aliases: +- /docs/grafana-cloud/agent/flow/tasks/opentelemetry-to-lgtm-stack/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tasks/opentelemetry-to-lgtm-stack/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tasks/opentelemetry-to-lgtm-stack/ +- /docs/grafana-cloud/send-data/agent/flow/tasks/opentelemetry-to-lgtm-stack/ +# Previous page aliases for backwards compatibility: - /docs/grafana-cloud/agent/flow/getting-started/opentelemetry-to-lgtm-stack/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/getting-started/opentelemetry-to-lgtm-stack/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/getting-started/opentelemetry-to-lgtm-stack/ - /docs/grafana-cloud/send-data/agent/flow/getting-started/opentelemetry-to-lgtm-stack/ -canonical: https://grafana.com/docs/agent/latest/flow/getting-started/opentelemetry-to-lgtm-stack/ +- ../getting-started/opentelemetry-to-lgtm-stack/ # /docs/agent/latest/flow/getting-started/opentelemetry-to-lgtm-stack/ +canonical: https://grafana.com/docs/agent/latest/flow/tasks/opentelemetry-to-lgtm-stack/ description: Learn how to collect OpenTelemetry data and forward it to the Grafana stack title: OpenTelemetry to Grafana stack @@ -38,11 +44,11 @@ This topic describes how to: * Have a set of OpenTelemetry applications ready to push telemetry data to {{< param "PRODUCT_NAME" >}}. * Identify where {{< param "PRODUCT_NAME" >}} will write received telemetry data. * Be familiar with the concept of [Components][] in {{< param "PRODUCT_NAME" >}}. -* Complete the [Collect open telemetry data][] getting started guide. You will pick up from where that guide ended. +* Complete the [Collect open telemetry data][] task. You will pick up from where that guide ended. ## The pipeline -You can start with the {{< param "PRODUCT_NAME" >}} configuration you created in the [Collect open telemetry data][] Getting Started guide. +You can start with the {{< param "PRODUCT_NAME" >}} configuration you created in the [Collect open telemetry data][] task. ```river otelcol.receiver.otlp "example" { @@ -108,7 +114,7 @@ loki.write "default" { To use Loki with basic-auth, which is required with Grafana Cloud Loki, you must configure the [loki.write][] component. You can get the Loki configuration from the Loki **Details** page in the [Grafana Cloud Portal][]: -![](../../../assets/getting-started/loki-config.png) +![](../../../assets/tasks/loki-config.png) ```river otelcol.exporter.loki "grafana_cloud_loki" { @@ -143,7 +149,7 @@ otelcol.exporter.otlp "default" { To use Tempo with basic-auth, which is required with Grafana Cloud Tempo, you must use the [otelcol.auth.basic][] component. You can get the Tempo configuration from the Tempo **Details** page in the [Grafana Cloud Portal][]: -![](../../../assets/getting-started/tempo-config.png) +![](../../../assets/tasks/tempo-config.png) ```river otelcol.exporter.otlp "grafana_cloud_tempo" { @@ -180,7 +186,7 @@ prometheus.remote_write "default" { To use Prometheus with basic-auth, which is required with Grafana Cloud Prometheus, you must configure the [prometheus.remote_write][] component. You can get the Prometheus configuration from the Prometheus **Details** page in the [Grafana Cloud Portal][]: -![](../../../assets/getting-started/prometheus-config.png) +![](../../../assets/tasks/prometheus-config.png) ```river otelcol.exporter.prometheus "grafana_cloud_prometheus" { @@ -306,7 +312,7 @@ ts=2023-05-09T09:37:15.304234Z component=otelcol.receiver.otlp.default level=inf You can now check the pipeline graphically by visiting http://localhost:12345/graph -![](../../../assets/getting-started/otlp-lgtm-graph.png) +![](../../../assets/tasks/otlp-lgtm-graph.png) [OpenTelemetry]: https://opentelemetry.io [Grafana Loki]: https://grafana.com/oss/loki/ @@ -316,8 +322,8 @@ You can now check the pipeline graphically by visiting http://localhost:12345/gr [Grafana Mimir]: https://grafana.com/oss/mimir/ {{% docs/reference %}} -[Collect open telemetry data]: "/docs/agent/ -> /docs/agent//flow/getting-started/collect-opentelemetry-data.md" -[Collect open telemetry data]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/getting-started/collect-opentelemetry-data.md" +[Collect open telemetry data]: "/docs/agent/ -> /docs/agent//flow/tasks/collect-opentelemetry-data.md" +[Collect open telemetry data]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/collect-opentelemetry-data.md" [Components]: "/docs/agent/ -> /docs/agent//flow/concepts/components.md" [Components]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/components.md" [loki.write]: "/docs/agent/ -> /docs/agent//flow/reference/components/loki.write.md" diff --git a/docs/sources/shared/wal-data-retention.md b/docs/sources/shared/wal-data-retention.md index 1d2caf844e17..973af3afb4d6 100644 --- a/docs/sources/shared/wal-data-retention.md +++ b/docs/sources/shared/wal-data-retention.md @@ -111,6 +111,6 @@ To delete the corrupted WAL: [WAL block]: /docs/agent//flow/reference/components/prometheus.remote_write#wal-block [metrics config]: /docs/agent//static/configuration/metrics-config -[Stop]: /docs/agent//flow/setup/start-agent +[Stop]: /docs/agent//flow/get-started/start-agent [wal_directory]: /docs/agent//static/configuration/metrics-config [run]: /docs/agent//flow/reference/cli/run diff --git a/docs/sources/static/configuration/integrations/cadvisor-config.md b/docs/sources/static/configuration/integrations/cadvisor-config.md index adf92dcff827..a4a33b4df219 100644 --- a/docs/sources/static/configuration/integrations/cadvisor-config.md +++ b/docs/sources/static/configuration/integrations/cadvisor-config.md @@ -60,10 +60,10 @@ Full reference of options: # cAdvisor-specific configuration options # - # Convert container labels and environment variables into labels on prometheus metrics for each container. If false, then only metrics exported are container name, first alias, and image name. + # Convert container labels and environment variables into labels on Prometheus metrics for each container. If false, then the only metrics exported are container name, first alias, and image name. `.` aren't valid in Prometheus label names, so if there are any in the container label, they will transformed to `_` when converted to the Prometheus label. [store_container_labels: | default = true] - # List of container labels to be converted to labels on prometheus metrics for each container. store_container_labels must be set to false for this to take effect. + # List of container labels to be converted to labels on Prometheus metrics for each container. store_container_labels must be set to false for this to take effect. This must match the format of the container label, not the converted Prometheus label (`.` are converted to `_` in the Prometheus label). allowlisted_container_labels: [ - ] diff --git a/docs/sources/static/configuration/integrations/cloudwatch-exporter-config.md b/docs/sources/static/configuration/integrations/cloudwatch-exporter-config.md index 8015bde84bc9..6495625b76c8 100644 --- a/docs/sources/static/configuration/integrations/cloudwatch-exporter-config.md +++ b/docs/sources/static/configuration/integrations/cloudwatch-exporter-config.md @@ -440,6 +440,7 @@ discovery job, the `type` field of each `discovery_job` must match either the de - Namespace: `AWS/PrivateLinkEndpoints` or Alias: `vpc-endpoint` - Namespace: `AWS/PrivateLinkServices` or Alias: `vpc-endpoint-service` - Namespace: `AWS/Prometheus` or Alias: `amp` +- Namespace: `AWS/QLDB` or Alias: `qldb` - Namespace: `AWS/RDS` or Alias: `rds` - Namespace: `AWS/Redshift` or Alias: `redshift` - Namespace: `AWS/Route53Resolver` or Alias: `route53-resolver` @@ -453,6 +454,7 @@ discovery job, the `type` field of each `discovery_job` must match either the de - Namespace: `AWS/TransitGateway` or Alias: `tgw` - Namespace: `AWS/TrustedAdvisor` or Alias: `trustedadvisor` - Namespace: `AWS/VPN` or Alias: `vpn` +- Namespace: `AWS/ClientVPN` or Alias: `clientvpn` - Namespace: `AWS/WAFV2` or Alias: `wafv2` - Namespace: `AWS/WorkSpaces` or Alias: `workspaces` - Namespace: `AWS/AOSS` or Alias: `aoss` diff --git a/docs/sources/static/configuration/integrations/windows-exporter-config.md b/docs/sources/static/configuration/integrations/windows-exporter-config.md index 53c58b60fe0b..7f12117ebfbc 100644 --- a/docs/sources/static/configuration/integrations/windows-exporter-config.md +++ b/docs/sources/static/configuration/integrations/windows-exporter-config.md @@ -114,6 +114,16 @@ Full reference of options: # Maps to collector.service.services-where in windows_exporter [where_clause: | default=""] + # Configuration for physical disk on Windows + physical_disk: + # Regexp of volumes to include. Disk name must both match include and not match exclude to be included. + # Maps to collector.logical_disk.disk-include in windows_exporter. + [include: | default=".+"] + + # Regexp of volumes to exclude. Disk name must both match include and not match exclude to be included. + # Maps to collector.logical_disk.disk-exclude in windows_exporter. + [exclude: | default=".+"] + # Configuration for Windows Processes process: # Regexp of processes to include. Process name must both match whitelist and not match blacklist to be included. diff --git a/docs/sources/static/configuration/traces-config.md b/docs/sources/static/configuration/traces-config.md index 57a2a724ea58..8ede4e9eb94e 100644 --- a/docs/sources/static/configuration/traces-config.md +++ b/docs/sources/static/configuration/traces-config.md @@ -308,30 +308,52 @@ tail_sampling: # It ensures that all spans of a trace are sampled in the same instance. # It works by exporting spans based on their traceID via consistent hashing. # -# Enabling this feature is required for tail_sampling to correctly work when -# different agent instances can receive spans for the same trace. +# Enabling this feature is required for "tail_sampling", "spanmetrics", and "service_graphs" +# to correctly work when spans are ingested by multiple agent instances. # # Load balancing works by layering two pipelines and consistently exporting # spans belonging to a trace to the same agent instance. # Agent instances need to be able to communicate with each other via gRPC. # +# When load_balancing is enabled: +# 1. When an Agent receives spans from the configured "receivers". +# 2. If the "attributes" processor is configured, it will run through all the spans. +# 3. The spans will be exported using the "load_balancing" configuration to any of the Agent instances. +# This may or may not be the same Agent which has already received the span. +# 4. The Agent which received the span from the loadbalancer will run these processors, +# in this order, if they are configured: +# 1. "spanmetrics" +# 2. "service_graphs" +# 3. "tail_sampling" +# 4. "automatic_logging" +# 5. "batch" +# 5. The spans are then remote written using the "remote_write" configuration. +# # Load balancing significantly increases CPU usage. This is because spans are # exported an additional time between agents. load_balancing: # resolver configures the resolution strategy for the involved backends - # It can be static, with a fixed list of hostnames, or DNS, with a hostname - # (and port) that will resolve to all IP addresses. + # It can be either "static", "dns" or "kubernetes". resolver: static: + # A fixed list of hostnames. hostnames: [ - ... ] dns: + # DNS hostname from which to resolve IP addresses. hostname: + # Port number to use with the resolved IP address when exporting spans. [ port: | default = 4317 ] # Resolver interval [ interval: | default = 5s ] # Resolver timeout [ timeout: | default = 1s ] + # The kubernetes resolver receives IP addresses of a Kubernetes service + # from the Kubernetes API. It does not require polling. The Kubernetes API + # notifies the Agent when a new pod is available and when an old pod has exited. + # + # For the kubernetes resolver to work, Agent must be running under + # a system account with "list", "watch" and "get" permissions. kubernetes: service: [ ports: | default = 4317 ] diff --git a/docs/sources/static/set-up/deploy-agent.md b/docs/sources/static/set-up/deploy-agent.md index 7af7714adaf5..5325d3b71c01 100644 --- a/docs/sources/static/set-up/deploy-agent.md +++ b/docs/sources/static/set-up/deploy-agent.md @@ -11,3 +11,383 @@ weight: 300 {{< docs/shared source="agent" lookup="/deploy-agent.md" version="" >}} +## For scalable ingestion of traces + +For small workloads, it is normal to have just one Agent handle all incoming spans with no need of load balancing. +However, for large workloads it is desirable to spread out the load of processing spans over multiple Agent instances. + +To scale the Agent for trace ingestion, do the following: +1. Set up the `load_balancing` section of the Agent's `traces` config. +2. Start multiple Agent instances, all with the same configuration, so that: + * Each Agent load balances using the same strategy. + * Each Agent processes spans in the same way. +3. The cluster of Agents is now setup for load balancing. It works as follows: + 1. Any of the Agents can receive spans from instrumented applications via the configured `receivers`. + 2. When an Agent firstly receives spans, it will forward them to any of the Agents in the cluster according to the `load_balancing` configuration. + + + +### tail_sampling + +If some of the spans for a trace end up in a different Agent, `tail_sampling` will not sample correctly. +Enabling `load_balancing` is necessary if `tail_sampling` is enabled and when there could be more than one Agent instance processing spans for the same trace. +`load_balancing` will make sure that all spans of a given trace will be processed by the same Agent instance. + +### spanmetrics + +All spans for a given `service.name` must be processed by the same `spanmetrics` Agent. +To make sure that this is the case, set up `load_balancing` with `routing_key: service`. + +### service_graphs + +It is challenging to scale `service_graphs` over multiple Agent instances. +* For `service_graphs` to work correctly, each "client" span must be paired + with a "server" span in order to calculate metrics such as span duration. +* If a "client" span goes to one Agent, but a "server" span goes to another Agent, + then no single Agent will be able to pair the spans and a metric won't be generated. + +`load_balancing` can solve this problem partially if it is configured with `routing_key: traceID`. + * Each Agent will then be able to calculate service graph for each "client"/"server" pair in a trace. + * However, it is possible to have a span with similar "server"/"client" values + in a different trace, processed by another Agent. + * If two different Agents process similar "server"/"client" spans, + they will generate the same service graph metric series. + * If the series from two Agents are the same, this will lead to issues + when writing them to the backend database. + * Users could differentiate the series by adding a label such as `"agent_id"`. + * Unfortunately, there is currently no method in the Agent to aggregate those series from different Agents and merge them into one series. + * A PromQL query could be used to aggregate the metrics from different Agents. + * If the metrics are stored in Grafana Mimir, cardinality issues due to `"agent_id"` labels can be solved using [Adaptive Metrics][adaptive-metrics]. + +A simpler, more scalable alternative to generating service graph metrics in the Agent is to generate them entirely in the backend database. +For example, service graphs can be [generated][tempo-servicegraphs] in Grafana Cloud by the Tempo traces database. + +[tempo-servicegraphs]: https://grafana.com/docs/tempo/latest/metrics-generator/service_graphs/ +[adaptive-metrics]: https://grafana.com/docs/grafana-cloud/cost-management-and-billing/reduce-costs/metrics-costs/control-metrics-usage-via-adaptive-metrics/ + +### Example Kubernetes configuration +{{< collapse title="Example Kubernetes configuration with DNS load balancing" >}} +```yaml +apiVersion: v1 +kind: Namespace +metadata: + name: grafana-cloud-monitoring +--- +apiVersion: v1 +kind: Service +metadata: + name: agent-traces + namespace: grafana-cloud-monitoring +spec: + ports: + - name: agent-traces-otlp-grpc + port: 9411 + protocol: TCP + targetPort: 9411 + selector: + name: agent-traces +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: k6-trace-generator + namespace: grafana-cloud-monitoring +spec: + minReadySeconds: 10 + replicas: 1 + revisionHistoryLimit: 1 + selector: + matchLabels: + name: k6-trace-generator + template: + metadata: + labels: + name: k6-trace-generator + spec: + containers: + - env: + - name: ENDPOINT + value: agent-traces-headless.grafana-cloud-monitoring.svc.cluster.local:9411 + image: ghcr.io/grafana/xk6-client-tracing:v0.0.2 + imagePullPolicy: IfNotPresent + name: k6-trace-generator +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: agent-traces + namespace: grafana-cloud-monitoring +spec: + minReadySeconds: 10 + replicas: 3 + revisionHistoryLimit: 1 + selector: + matchLabels: + name: agent-traces + template: + metadata: + labels: + name: agent-traces + spec: + containers: + - args: + - -config.file=/etc/agent/agent.yaml + command: + - /bin/grafana-agent + image: grafana/agent:v0.38.0 + imagePullPolicy: IfNotPresent + name: agent-traces + ports: + - containerPort: 9411 + name: otlp-grpc + protocol: TCP + - containerPort: 34621 + name: agent-lb + protocol: TCP + volumeMounts: + - mountPath: /etc/agent + name: agent-traces + volumes: + - configMap: + name: agent-traces + name: agent-traces +--- +apiVersion: v1 +kind: Service +metadata: + name: agent-traces-headless + namespace: grafana-cloud-monitoring +spec: + clusterIP: None + ports: + - name: agent-lb + port: 34621 + protocol: TCP + targetPort: agent-lb + selector: + name: agent-traces + type: ClusterIP +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: agent-traces + namespace: grafana-cloud-monitoring +data: + agent.yaml: | + traces: + configs: + - name: default + load_balancing: + exporter: + insecure: true + resolver: + dns: + hostname: agent-traces-headless.grafana-cloud-monitoring.svc.cluster.local + port: 34621 + timeout: 5s + interval: 60s + receiver_port: 34621 + receivers: + otlp: + protocols: + grpc: + endpoint: 0.0.0.0:9411 + remote_write: + - basic_auth: + username: 111111 + password: pass + endpoint: tempo-prod-06-prod-gb-south-0.grafana.net:443 + retry_on_failure: + enabled: false +``` +{{< /collapse >}} + +{{< collapse title="Example Kubernetes configuration with Kubernetes load balancing" >}} + +```yaml +apiVersion: v1 +kind: Namespace +metadata: + name: grafana-cloud-monitoring +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: grafana-agent-traces + namespace: grafana-cloud-monitoring +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: grafana-agent-traces-role + namespace: grafana-cloud-monitoring +rules: +- apiGroups: + - "" + resources: + - endpoints + verbs: + - list + - watch + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: grafana-agent-traces-rolebinding + namespace: grafana-cloud-monitoring +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: grafana-agent-traces-role +subjects: +- kind: ServiceAccount + name: grafana-agent-traces + namespace: grafana-cloud-monitoring +--- +apiVersion: v1 +kind: Service +metadata: + name: agent-traces + namespace: grafana-cloud-monitoring +spec: + ports: + - name: agent-traces-otlp-grpc + port: 9411 + protocol: TCP + targetPort: 9411 + selector: + name: agent-traces +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: k6-trace-generator + namespace: grafana-cloud-monitoring +spec: + minReadySeconds: 10 + replicas: 1 + revisionHistoryLimit: 1 + selector: + matchLabels: + name: k6-trace-generator + template: + metadata: + labels: + name: k6-trace-generator + spec: + containers: + - env: + - name: ENDPOINT + value: agent-traces-headless.grafana-cloud-monitoring.svc.cluster.local:9411 + image: ghcr.io/grafana/xk6-client-tracing:v0.0.2 + imagePullPolicy: IfNotPresent + name: k6-trace-generator +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: agent-traces + namespace: grafana-cloud-monitoring +spec: + minReadySeconds: 10 + replicas: 3 + revisionHistoryLimit: 1 + selector: + matchLabels: + name: agent-traces + template: + metadata: + labels: + name: agent-traces + spec: + containers: + - args: + - -config.file=/etc/agent/agent.yaml + command: + - /bin/grafana-agent + image: grafana/agent:v0.38.0 + imagePullPolicy: IfNotPresent + name: agent-traces + ports: + - containerPort: 9411 + name: otlp-grpc + protocol: TCP + - containerPort: 34621 + name: agent-lb + protocol: TCP + volumeMounts: + - mountPath: /etc/agent + name: agent-traces + serviceAccount: grafana-agent-traces + volumes: + - configMap: + name: agent-traces + name: agent-traces +--- +apiVersion: v1 +kind: Service +metadata: + name: agent-traces-headless + namespace: grafana-cloud-monitoring +spec: + clusterIP: None + ports: + - name: agent-lb + port: 34621 + protocol: TCP + targetPort: agent-lb + selector: + name: agent-traces + type: ClusterIP +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: agent-traces + namespace: grafana-cloud-monitoring +data: + agent.yaml: | + traces: + configs: + - name: default + load_balancing: + exporter: + insecure: true + resolver: + kubernetes: + service: agent-traces-headless + ports: + - 34621 + receiver_port: 34621 + receivers: + otlp: + protocols: + grpc: + endpoint: 0.0.0.0:9411 + remote_write: + - basic_auth: + username: 111111 + password: pass + endpoint: tempo-prod-06-prod-gb-south-0.grafana.net:443 + retry_on_failure: + enabled: false``` +``` + +{{< /collapse >}} + +You need to fill in correct OTLP credentials prior to running the above examples. +The example above can be started by using [k3d][]: + +```bash +k3d cluster create grafana-agent-lb-test +kubectl apply -f kubernetes_config.yaml +``` + +To delete the cluster, run: +```bash +k3d cluster delete grafana-agent-lb-test +``` + +[k3d]: https://k3d.io/v5.6.0/ diff --git a/docs/sources/static/set-up/install/install-agent-kubernetes.md b/docs/sources/static/set-up/install/install-agent-kubernetes.md index c8df4dd8fa33..95fdd5597b53 100644 --- a/docs/sources/static/set-up/install/install-agent-kubernetes.md +++ b/docs/sources/static/set-up/install/install-agent-kubernetes.md @@ -11,40 +11,53 @@ weight: 300 # Deploy Grafana Agent in static mode on Kubernetes -You can deploy Grafana Agent in static mode on Kubernetes. +You can use the Helm chart for Grafana Agent to deploy Grafana Agent in static mode on Kubernetes. + +## Before you begin + +* Install [Helm][] on your computer. +* Configure a Kubernetes cluster that you can use for Grafana Agent. +* Configure your local Kubernetes context to point to the cluster. + +[Helm]: https://helm.sh ## Deploy -To deploy Grafana Agent in static mode on Kubernetes, perform the following steps. +{{% admonition type="note" %}} +These instructions show you how to install the generic [Helm chart](https://github.com/grafana/agent/tree/main/operations/helm/charts/grafana-agent) for Grafana Agent. +You can deploy Grafana Agent in static mode or flow mode. The Helm chart deploys flow mode by default. +{{% /admonition %}} -1. Download one of the following manifests from GitHub and save it as `manifest.yaml`: +To deploy Grafana Agent in static mode on Kubernetes using Helm, run the following commands in a terminal window: - - Metric collection (StatefulSet): [agent-bare.yaml](https://github.com/grafana/agent/blob/main/production/kubernetes/agent-bare.yaml) - - Log collection (DaemonSet): [agent-loki.yaml](https://github.com/grafana/agent/blob/main/production/kubernetes/agent-loki.yaml) - - Trace collection (Deployment): [agent-traces.yaml](https://github.com/grafana/agent/blob/main/production/kubernetes/agent-traces.yaml) +1. Add the Grafana Helm chart repository: -1. Edit the downloaded `manifest.yaml` and replace the placeholders with information relevant to your Kubernetes deployment. + ```shell + helm repo add grafana https://grafana.github.io/helm-charts + ``` -1. Apply the modified manifest file: +1. Update the Grafana Helm chart repository: ```shell - kubectl -n default apply -f manifest.yaml + helm repo update ``` -{{% admonition type="note" %}} -The manifests do not include the `ConfigMaps` which are necessary to run Grafana Agent. -{{% /admonition %}} +1. Install Grafana Agent in static mode: -For sample configuration files and detailed instructions, refer to [Configure Kubernetes Monitoring](/docs/grafana-cloud/monitor-infrastructure/kubernetes-monitoring/configuration/) in the Grafana Cloud documentation. + ```shell + helm install grafana/grafana-agent --set agent.mode=static + ``` + Replace the following: -## Rebuild the Kubernetes manifests + - _``_: The name to use for your Grafana Agent installation, such as `grafana-agent`. -The manifests provided are created using Grafana Labs' production Tanka configs with some default values. If you want to build the YAML file with some custom values, you must install the following applications: + {{% admonition type="warning" %}} + Always pass `--set agent.mode=static` in `helm install` or `helm upgrade` commands to ensure Grafana Agent gets installed in static mode. + Alternatively, set `agent.mode` to `static` in your values.yaml file. + {{% /admonition %}} -- [Tanka](https://github.com/grafana/tanka) version 0.8 or higher -- [jsonnet-bundler](https://github.com/jsonnet-bundler/jsonnet-bundler) version 0.2.1 or higher +For more information on the Grafana Agent Helm chart, refer to the Helm chart documentation on [Artifact Hub][]. -Refer to the [`template` Tanka environment](https://github.com/grafana/agent/blob/main/production/kubernetes/build/templates) for the current settings that initialize the Grafana Agent Tanka configurations. +[Artifact Hub]: https://artifacthub.io/packages/helm/grafana/grafana-agent -To build the YAML files, run the `/build/build.sh` script, or run `make example-kubernetes` from the project's root directory. diff --git a/docs/sources/static/set-up/install/install-agent-on-windows.md b/docs/sources/static/set-up/install/install-agent-on-windows.md index 1ff2a05e61a4..ddda581a5355 100644 --- a/docs/sources/static/set-up/install/install-agent-on-windows.md +++ b/docs/sources/static/set-up/install/install-agent-on-windows.md @@ -30,7 +30,11 @@ To do a standard graphical install of Grafana Agent on Windows, perform the foll 1. Double-click on `grafana-agent-installer.exe` to install Grafana Agent. Grafana Agent is installed into the default directory `C:\Program Files\Grafana Agent`. - The [windows_exporter integration](/docs/agent/latest/static/configuration/integrations/windows-exporter-config) can be enabled with all default windows_exporter options. + + The following options are available: + + - The [windows_exporter integration][windows_exporter_config] can be enabled with all default windows_exporter options. + - The [-config.expand-env][flags] command line flag can be enabled. ## Silent install @@ -78,6 +82,24 @@ If you are using `remote_write` you must enable Windows Exporter and set the glo If you are using Powershell, make sure you use triple quotes `"""http://example.com"""` around the URL parameter. +## Silent install with `-config.expand_env` + +You can enable [-config.expand-env][flags] during a silent install. + +1. Navigate to the [latest release](https://github.com/grafana/agent/releases) on GitHub. + +1. Scroll down to the **Assets** section. + +1. Download the file called `grafana-agent-installer.exe.zip`. + +1. Unzip the downloaded file. + +1. Run the following command in PowerShell or Command Prompt: + + ```shell + PATH_TO_INSTALLER/grafana-agent-installer.exe /S /ExpandEnv true + ``` + ## Verify the installation 1. Make sure you can access `http://localhost:12345/-/healthy` and `http://localhost:12345/agent/api/v1/metrics/targets`. @@ -146,6 +168,8 @@ Refer to [windows_events](/docs/loki/latest/clients/promtail/configuration/#wind - [Configure Grafana Agent][configure] {{% docs/reference %}} +[flags]: "/docs/agent/ -> /docs/agent//static/configuration/flags" +[windows_exporter_config]: "/docs/agent/ -> /docs/agent//static/configuration/integrations/windows-exporter-config" [start]: "/docs/agent/ -> /docs/agent//static/set-up/start-agent" [start]: "/docs/grafana-cloud/ -> ../start-agent" [configure]: "/docs/agent/ -> /docs/agent//static/configuration/create-config-file" diff --git a/example/docker-compose/grafana/dashboards/template.jsonnet b/example/docker-compose/grafana/dashboards/template.jsonnet index edc460218987..157860f0a389 100644 --- a/example/docker-compose/grafana/dashboards/template.jsonnet +++ b/example/docker-compose/grafana/dashboards/template.jsonnet @@ -1,5 +1,5 @@ -local agentDashboards = import 'grafana-agent-mixin/dashboards.libsonnet'; -local agentDebugging = import 'grafana-agent-mixin/debugging.libsonnet'; +local agentDashboards = import 'agent-static-mixin/dashboards.libsonnet'; +local agentDebugging = import 'agent-static-mixin/debugging.libsonnet'; local result = agentDashboards + agentDebugging { files: { diff --git a/example/docker-compose/jsonnetfile.json b/example/docker-compose/jsonnetfile.json index 1f2735e529cf..bee24c1dcf32 100644 --- a/example/docker-compose/jsonnetfile.json +++ b/example/docker-compose/jsonnetfile.json @@ -4,7 +4,7 @@ { "source": { "local": { - "directory": "../../production/grafana-agent-mixin" + "directory": "../../operations/agent-static-mixin" } }, "version": "" diff --git a/example/docker-compose/jsonnetfile.lock.json b/example/docker-compose/jsonnetfile.lock.json index 1803372b5348..463fc7a6776b 100644 --- a/example/docker-compose/jsonnetfile.lock.json +++ b/example/docker-compose/jsonnetfile.lock.json @@ -24,7 +24,7 @@ { "source": { "local": { - "directory": "../../production/grafana-agent-mixin" + "directory": "../../operations/agent-static-mixin" } }, "version": "" diff --git a/example/k3d/README.md b/example/k3d/README.md deleted file mode 100644 index 10512b2bd92f..000000000000 --- a/example/k3d/README.md +++ /dev/null @@ -1,128 +0,0 @@ -# `k3d` Examples - -## Agent Environment - -The `k3d` example uses `k3d` and `tanka` to produce a Kubernetes environment -that implements a full Grafana Agent environment for testing. - -### Requirements - -- A Unix-y command line (macOS or Linux will do). -- Kubectl -- Docker -- [Tanka >= v0.9.2](https://github.com/grafana/tanka) -- [k3d >= v4.0.0,<= v5.2.2](https://github.com/k3d-io/k3d) -- [jsonnet-bundler >= v0.4.0](https://github.com/jsonnet-bundler/jsonnet-bundler) - -### Getting Started - -Build latest agent images with `make agent-image agentctl-image` in the project root directory if there are local changes to test. - -Run the following to create your cluster: - -```bash -# Create a new k3d cluster -./scripts/create.bash - -# Import images into k3d if they are not available on docker hub -k3d image import -c agent-k3d grafana/agent:main -k3d image import -c agent-k3d grafana/agentctl:main - -# Ensure jsonnet is up to date before applying environment -jb install -tk apply ./environment - -# Navigate to grafana.k3d.localhost:30080 in your browser to view dashboards - -# Delete the k3d cluster when you're done with it -k3d cluster delete agent-k3d -``` - -## Smoke Test Environment - -The smoke test environment is used for end-to-end validation of all three observability signals. - -### Running - -Smoke Test environment is invoked via `/scripts/smoke-test.bash` - -This tool will spin up cluster of Grafana Agent, Cortex, Avalanche, Smoke, [Crow](../../tools/crow/README.md), [Canary](https://grafana.com/docs/loki/latest/operations/loki-canary/) and Vulture instances. The Smoke deployment will then periodically kill instances and check for any failed alerts. At the end of the duration (default 3h) it will end the testing. - -For users who do not have access to the `us.gcr.io/kubernetes-dev` container registry, do the following to run the smoke test: - -* Build the Smoke and Crow images locally (from the root project directory): -``` -make grafana-agent-crow-image agent-smoke-image -``` -* Run the smoke test using `/scripts/smoke-test.bash` script. -* `Smoke` and `Crow` pods will fail because the images are not imported into the cluster. Import them running: -``` -k3d image import -c agent-smoke-test us.gcr.io/kubernetes-dev/grafana/agent-smoke:main -k3d image import -c agent-smoke-test us.gcr.io/kubernetes-dev/grafana/agent-crow:main -``` - -### What to look for? - -These alerts are viewable [here](http://prometheus.k3d.localhost:50080/alerts). - -Prometheus alerts are triggered: -- If any Crow instances are not running or Crow samples are not being propagated correctly. -- If any Canary instances are not running or Canary logs are not being propagated correctly. -- If any Vulture instances are not running or Vulture samples are not being propagated correctly. -- If any Grafana Agents are not running or Grafana Agent limits are outside their norm. - -NOTE: The alerts might be in pending until the system settles down. - -![](./assets/pending_alert.png) - -An alert firing will look similar to the below. - -![](./assets/alert_firing.png) - -If at the end of the test any issues are found they will look similar to the below. - -![](./assets/console_failure.png) - -### How to trigger an alert? - -Changing the avalanche setting for label_count to 1000, located [here](../../production/tanka/grafana-agent/smoke/avalanche/main.libsonnet). This will ensure the [GrafanaAgentMemHigh](http://prometheus.k3d.localhost:50080/graph?g0.expr=ALERTS%7Balertname%3D%22GrafanaAgentMemHigh%22%7D&g0.tab=1&g0.stacked=0&g0.show_exemplars=0.g0.range_input=1h.) alert exceeds the limit. - -![](./assets/trigger_change.png) - -For Loki Canary, the easiest way to trigger an alert is to edit its Daemonset to query for a different label that doesn't exist. -![](./assets/trigger_logs_alerts.png) -![](./assets/logs_alerts.png) - -### Architecture - -By default, a k3d cluster will be created running the following instances - -- agent-single - single instance -- agent-cluster - 3 Grafana Agents in clustered configuration -- crow-cluster - serves the agent cluster -- crow-single - serves the single agent -- cortex -- avalanche - selection of avalanche instances serving traffic -- smoke - scales avalanche replicas and introduces chaos by deleting agent pods during testing -- canary - emits logs and checks if they're stored properly -- loki -- vulture - emits traces and checks if they're stored properly -- tempo - -Crow, Canary and Vulture instances will check to see if the metrics, logs and traces that were scraped respectively, show up in the Cortex/Loki/Tempo instances. They will then emit metrics on the success of those metrics. This success/failure result will trigger an alert if it is incorrect. - -### Metrics Flow - -![](./assets/metrics_flow.png) - -### Logs Flow - -![](./assets/logs_flow.png) - -### Traces Flow - -![](./assets/traces_flow.png) - -### Avalanche - -Avalanche is used to add some additional load on the system and general testing. diff --git a/example/k3d/assets/alert_firing.png b/example/k3d/assets/alert_firing.png deleted file mode 100644 index f134cf8f2b44..000000000000 Binary files a/example/k3d/assets/alert_firing.png and /dev/null differ diff --git a/example/k3d/assets/alerts.png b/example/k3d/assets/alerts.png deleted file mode 100644 index d76f62cb0bd3..000000000000 Binary files a/example/k3d/assets/alerts.png and /dev/null differ diff --git a/example/k3d/assets/console_failure.png b/example/k3d/assets/console_failure.png deleted file mode 100644 index bf817cbeaacd..000000000000 Binary files a/example/k3d/assets/console_failure.png and /dev/null differ diff --git a/example/k3d/assets/logs_alerts.png b/example/k3d/assets/logs_alerts.png deleted file mode 100644 index e353eafab411..000000000000 Binary files a/example/k3d/assets/logs_alerts.png and /dev/null differ diff --git a/example/k3d/assets/logs_flow.mermaid b/example/k3d/assets/logs_flow.mermaid deleted file mode 100644 index 56a5542dd663..000000000000 --- a/example/k3d/assets/logs_flow.mermaid +++ /dev/null @@ -1,9 +0,0 @@ -sequenceDiagram - Canary ->>+ Canary: Logs to stdout - Agent ->>+ Canary: Reads pod logs through /var/log/pods/ - Agent ->>+ Loki: Pushes logs (remote_write) - Canary ->>+ Loki: Queries logs storage - Canary ->>+ Canary: Exposed success/failure metrics - Agent ->>+ Canary: Scrapes /metrics - Agent ->>+ Prometheus: Sends success/failure metrics - Prometheus ->>+ Prometheus: Checks alerts diff --git a/example/k3d/assets/logs_flow.png b/example/k3d/assets/logs_flow.png deleted file mode 100644 index 66075e56d52d..000000000000 Binary files a/example/k3d/assets/logs_flow.png and /dev/null differ diff --git a/example/k3d/assets/metrics_flow.png b/example/k3d/assets/metrics_flow.png deleted file mode 100644 index d6ca3cd60957..000000000000 Binary files a/example/k3d/assets/metrics_flow.png and /dev/null differ diff --git a/example/k3d/assets/metrics_flow.uml b/example/k3d/assets/metrics_flow.uml deleted file mode 100644 index d62485821163..000000000000 --- a/example/k3d/assets/metrics_flow.uml +++ /dev/null @@ -1,10 +0,0 @@ -@startuml -Crow -> Crow: Crow Generates Metrics -Agent -> Crow: Agent scrapes /metrics endpoint -Agent -> Prometheus: Agent sends results to prometheus -Crow -> Prometheus: Crow checks that metrics were written to prometheus -Crow -> Crow: Crow updates success/failure metrics and serves those via /validate -Agent -> Crow: Agent scrapes /validate -Agent -> Prometheus: Agent sends validate metrics -Prometheus -> Prometheus: Prometheus checks alerts -@enduml \ No newline at end of file diff --git a/example/k3d/assets/pending_alert.png b/example/k3d/assets/pending_alert.png deleted file mode 100644 index a8cd3fc3b5ae..000000000000 Binary files a/example/k3d/assets/pending_alert.png and /dev/null differ diff --git a/example/k3d/assets/traces_flow.mermaid b/example/k3d/assets/traces_flow.mermaid deleted file mode 100644 index af096c16af03..000000000000 --- a/example/k3d/assets/traces_flow.mermaid +++ /dev/null @@ -1,8 +0,0 @@ -sequenceDiagram - Vulture ->>+ Agent: Emits traces - Agent ->>+ Tempo: Pushes traces (remote_write) - Vulture ->>+ Tempo: Checks that traces are valid - Vulture ->>+ Vulture: Exposes validate metrics (/metrics) - Agent ->>+ Vulture: Scrapes /metrics - Agent ->>+ Prometheus: Send validate metrics - Prometheus ->>+ Prometheus: Check alerts diff --git a/example/k3d/assets/traces_flow.png b/example/k3d/assets/traces_flow.png deleted file mode 100644 index f455afa9ffde..000000000000 Binary files a/example/k3d/assets/traces_flow.png and /dev/null differ diff --git a/example/k3d/assets/trigger_change.png b/example/k3d/assets/trigger_change.png deleted file mode 100644 index 42ce8db2eecf..000000000000 Binary files a/example/k3d/assets/trigger_change.png and /dev/null differ diff --git a/example/k3d/assets/trigger_logs_alerts.png b/example/k3d/assets/trigger_logs_alerts.png deleted file mode 100644 index acccc0dac48b..000000000000 Binary files a/example/k3d/assets/trigger_logs_alerts.png and /dev/null differ diff --git a/example/k3d/environment/main.jsonnet b/example/k3d/environment/main.jsonnet deleted file mode 100644 index 6868a2632576..000000000000 --- a/example/k3d/environment/main.jsonnet +++ /dev/null @@ -1,165 +0,0 @@ -local collector = import 'collector/main.libsonnet'; -local default = import 'default/main.libsonnet'; -local etcd = import 'grafana-agent/smoke/etcd/main.libsonnet'; -local agent_cluster = import 'grafana-agent/scraping-svc/main.libsonnet'; -local k = import 'ksonnet-util/kausal.libsonnet'; -local load_generator = import 'load-generator/main.libsonnet'; - -local loki_config = import 'default/loki_config.libsonnet'; -local grafana_agent = import 'grafana-agent/v1/main.libsonnet'; - -local containerPort = k.core.v1.containerPort; -local ingress = k.networking.v1beta1.ingress; -local path = k.networking.v1beta1.httpIngressPath; -local rule = k.networking.v1beta1.ingressRule; -local service = k.core.v1.service; - -local images = { - agent: 'grafana/agent:latest', - agentctl: 'grafana/agentctl:latest', -}; - -{ - default: default.new(namespace='default') { - grafana+: { - ingress+: - ingress.new('grafana-ingress') + - ingress.mixin.spec.withRules([ - rule.withHost('grafana.k3d.localhost') + - rule.http.withPaths([ - path.withPath('/') - + path.backend.withServiceName('grafana') - + path.backend.withServicePort(80), - ]), - ]), - }, - }, - - agent: - local cluster_label = 'k3d-agent/daemonset'; - - grafana_agent.new('grafana-agent', 'default') + - grafana_agent.withImages(images) + - grafana_agent.withMetricsConfig({ - wal_directory: '/var/lib/agent/data', - global: { - scrape_interval: '1m', - external_labels: { - cluster: cluster_label, - }, - }, - }) + - grafana_agent.withMetricsInstances(grafana_agent.scrapeInstanceKubernetes { - // We want our cluster and label to remain static for this deployment, so - // if they are overwritten by a metric we will change them to the values - // set by external_labels. - scrape_configs: std.map(function(config) config { - relabel_configs+: [{ - target_label: 'cluster', - replacement: cluster_label, - }], - }, super.scrape_configs), - }) + - grafana_agent.withRemoteWrite([{ - url: 'http://cortex.default.svc.cluster.local/api/prom/push', - }]) + - grafana_agent.withLogsConfig(loki_config) + - grafana_agent.withLogsClients(grafana_agent.newLogsClient({ - scheme: 'http', - hostname: 'loki.default.svc.cluster.local', - external_labels: { cluster: cluster_label }, - })) + - grafana_agent.withTracesConfig({ - receivers: { - jaeger: { - protocols: { - thrift_http: null, - }, - }, - }, - batch: { - timeout: '5s', - send_batch_size: 1000, - }, - }) + - grafana_agent.withPortsMixin([ - containerPort.new('thrift-http', 14268) + containerPort.withProtocol('TCP'), - containerPort.new('otlp-lb', 4318) + containerPort.withProtocol('TCP'), - ]) + - grafana_agent.withTracesRemoteWrite([ - { - endpoint: 'collector.default.svc.cluster.local:4317', - insecure: true, - }, - ]) + - grafana_agent.withTracesTailSamplingConfig({ - policies: [{ - type: 'always_sample', - }], - }) + - grafana_agent.withTracesLoadBalancingConfig({ - exporter: { - insecure: true, - }, - resolver: { - dns: { - hostname: 'grafana-agent.default.svc.cluster.local', - port: 4318, - }, - }, - }), - - // Need to run ETCD for agent_cluster - etcd: etcd.new('default'), - - collector: collector.new('default'), - - load_generator: load_generator.new('default'), - - agent_cluster: - agent_cluster.new('default', 'kube-system') + - agent_cluster.withImagesMixin(images) + - agent_cluster.withConfigMixin({ - local kvstore = { - store: 'etcd', - etcd: { - endpoints: ['etcd.default.svc.cluster.local:2379'], - }, - }, - - agent_remote_write: [{ - url: 'http://cortex.default.svc.cluster.local/api/prom/push', - }], - - agent_ring_kvstore: kvstore { prefix: 'agent/ring/' }, - agent_config_kvstore: kvstore { prefix: 'agent/configs/' }, - - local cluster_label = 'k3d-agent/cluster', - agent_config+: { - metrics+: { - global+: { - external_labels+: { - cluster: cluster_label, - }, - }, - - scraping_service+: { - dangerous_allow_reading_files: true, - }, - }, - }, - - kubernetes_scrape_configs: - (grafana_agent.scrapeInstanceKubernetes { - // We want our cluster and label to remain static for this deployment, so - // if they are overwritten by a metric we will change them to the values - // set by external_labels. - scrape_configs: std.map(function(config) config { - relabel_configs+: [{ - target_label: 'cluster', - replacement: cluster_label, - }], - }, super.scrape_configs), - }).scrape_configs, - }), -} diff --git a/example/k3d/environment/spec.json b/example/k3d/environment/spec.json deleted file mode 100644 index 8c90817a7a99..000000000000 --- a/example/k3d/environment/spec.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "apiVersion": "tanka.dev/v1alpha1", - "kind": "Environment", - "metadata": { - "name": "default" - }, - "spec": { - "apiServer": "https://0.0.0.0:50443", - "namespace": "default" - } -} diff --git a/example/k3d/jsonnetfile.json b/example/k3d/jsonnetfile.json deleted file mode 100644 index ca6b50fc713d..000000000000 --- a/example/k3d/jsonnetfile.json +++ /dev/null @@ -1,76 +0,0 @@ -{ - "version": 1, - "dependencies": [ - { - "source": { - "git": { - "remote": "https://github.com/grafana/cortex-jsonnet.git", - "subdir": "cortex-mixin" - } - }, - "version": "master" - }, - { - "source": { - "git": { - "remote": "https://github.com/grafana/jsonnet-libs.git", - "subdir": "ksonnet-util" - } - }, - "version": "master" - }, - { - "source": { - "git": { - "remote": "https://github.com/grafana/loki.git", - "subdir": "production/ksonnet/loki-canary" - } - }, - "version": "main" - }, - { - "source": { - "git": { - "remote": "https://github.com/grafana/tempo.git", - "subdir": "operations/jsonnet/microservices" - } - }, - "version": "main" - }, - { - "source": { - "git": { - "remote": "https://github.com/grafana/tempo.git", - "subdir": "operations/jsonnet/single-binary" - } - }, - "version": "main" - }, - { - "source": { - "git": { - "remote": "https://github.com/jsonnet-libs/k8s-alpha.git", - "subdir": "1.14" - } - }, - "version": "master" - }, - { - "source": { - "local": { - "directory": "../../production/tanka/grafana-agent" - } - }, - "version": "" - }, - { - "source": { - "local": { - "directory": "../../production/grafana-agent-mixin" - } - }, - "version": "" - } - ], - "legacyImports": true -} diff --git a/example/k3d/jsonnetfile.lock.json b/example/k3d/jsonnetfile.lock.json deleted file mode 100644 index 9f5a71c12e44..000000000000 --- a/example/k3d/jsonnetfile.lock.json +++ /dev/null @@ -1,112 +0,0 @@ -{ - "version": 1, - "dependencies": [ - { - "source": { - "git": { - "remote": "https://github.com/grafana/cortex-jsonnet.git", - "subdir": "cortex-mixin" - } - }, - "version": "56cb5e3d73950b977ba2e4bfd7e46e2acb0b77b2", - "sum": "CcTh9gpP5UlOs0f8xrqT8f677c4GbkM5AGvl1yWcnyc=" - }, - { - "source": { - "git": { - "remote": "https://github.com/grafana/grafonnet-lib.git", - "subdir": "grafonnet" - } - }, - "version": "3626fc4dc2326931c530861ac5bebe39444f6cbf", - "sum": "gF8foHByYcB25jcUOBqP6jxk0OPifQMjPvKY0HaCk6w=" - }, - { - "source": { - "git": { - "remote": "https://github.com/grafana/jsonnet-libs.git", - "subdir": "grafana-builder" - } - }, - "version": "b9cc0f3529833096c043084c04bc7b3562a134c4", - "sum": "slxrtftVDiTlQK22ertdfrg4Epnq97gdrLI63ftUfaE=" - }, - { - "source": { - "git": { - "remote": "https://github.com/grafana/jsonnet-libs.git", - "subdir": "ksonnet-util" - } - }, - "version": "f62b65014b2c443b234af31e4e1754278e66cef9", - "sum": "A9MKQ++75leyWJR3rxL2mduAr6S9pByQZYmX0OICu2E=" - }, - { - "source": { - "git": { - "remote": "https://github.com/grafana/jsonnet-libs.git", - "subdir": "mixin-utils" - } - }, - "version": "b9cc0f3529833096c043084c04bc7b3562a134c4", - "sum": "mtTAh8vSa4Eb8ojviyZ9zE2pPq5OgwhK75qsEWkifhI=" - }, - { - "source": { - "git": { - "remote": "https://github.com/grafana/loki.git", - "subdir": "production/ksonnet/loki-canary" - } - }, - "version": "74c8cf03ba4fb2abd979a9af05bb945813a4505c", - "sum": "EIFf6m9IvdJbfGMXkzWYofoFSnHo8f+tVeUh3x/v+u0=" - }, - { - "source": { - "git": { - "remote": "https://github.com/grafana/tempo.git", - "subdir": "operations/jsonnet/microservices" - } - }, - "version": "1bf54e94e74e94cd6f68c5c01b9a19eea7ec43bf", - "sum": "gOPXxIkGZH5Vm+fSnQP/qeJU1rIkIN5ZI7CSLXFFf/k=" - }, - { - "source": { - "git": { - "remote": "https://github.com/grafana/tempo.git", - "subdir": "operations/jsonnet/single-binary" - } - }, - "version": "07e9ef2f1c25a63ac6061307a3a8453b81b5e417", - "sum": "0f7vj8dSS9a9CRPf8O9LowMHqieUm9bqgfq6uHJGIF4=" - }, - { - "source": { - "git": { - "remote": "https://github.com/jsonnet-libs/k8s-alpha.git", - "subdir": "1.14" - } - }, - "version": "5e7ef40fd1366a02e25f3216a6711848b3f92e07", - "sum": "PAkcO1sAnVTYotl1GoSOfYNyuU3PtUO/5hgzz2wDotc=" - }, - { - "source": { - "local": { - "directory": "../../production/tanka/grafana-agent" - } - }, - "version": "" - }, - { - "source": { - "local": { - "directory": "../../production/grafana-agent-mixin" - } - }, - "version": "" - } - ], - "legacyImports": false -} diff --git a/example/k3d/lib/collector/collector-config.libsonnet b/example/k3d/lib/collector/collector-config.libsonnet deleted file mode 100644 index 70b833a418f3..000000000000 --- a/example/k3d/lib/collector/collector-config.libsonnet +++ /dev/null @@ -1,28 +0,0 @@ -{ - receivers: { - otlp: { - protocols: { - grpc: null, - }, - }, - }, - - exporters: { - logging: { - loglevel: 'info', - }, - }, - - service: { - pipelines: { - traces: { - receivers: [ - 'otlp', - ], - exporters: [ - 'logging', - ], - }, - }, - }, -} diff --git a/example/k3d/lib/collector/main.libsonnet b/example/k3d/lib/collector/main.libsonnet deleted file mode 100644 index f384ecc71cfe..000000000000 --- a/example/k3d/lib/collector/main.libsonnet +++ /dev/null @@ -1,47 +0,0 @@ -local k = import 'ksonnet-util/kausal.libsonnet'; - -local configMap = k.core.v1.configMap; -local container = k.core.v1.container; -local containerPort = k.core.v1.containerPort; -local deployment = k.apps.v1.deployment; -local pvc = k.core.v1.persistentVolumeClaim; -local service = k.core.v1.service; -local volumeMount = k.core.v1.volumeMount; -local volume = k.core.v1.volume; - -{ - new(namespace=''):: { - local this = self, - - _images:: { - collector: 'otel/opentelemetry-collector:0.9.0', - }, - _config:: (import './collector-config.libsonnet'), - - configMap: - configMap.new('collector') + - configMap.mixin.metadata.withNamespace(namespace) + - configMap.withData({ - 'config.yaml': k.util.manifestYaml(this._config), - }), - - container:: - container.new('collector', this._images.collector) + - container.withPorts([ - containerPort.newNamed(name='grpc', containerPort=55680), - ]) + - container.withArgsMixin( - '--config=/etc/collector/config.yaml', - ), - - deployment: - deployment.new('collector', 1, [self.container]) + - deployment.mixin.metadata.withNamespace(namespace) + - k.util.configMapVolumeMount(this.configMap, '/etc/collector'), - - - service: - k.util.serviceFor(self.deployment) + - service.mixin.metadata.withNamespace(namespace), - }, -} diff --git a/example/k3d/lib/cortex/cortex-config.libsonnet b/example/k3d/lib/cortex/cortex-config.libsonnet deleted file mode 100644 index a7bf55b8d016..000000000000 --- a/example/k3d/lib/cortex/cortex-config.libsonnet +++ /dev/null @@ -1,91 +0,0 @@ -{ - auth_enabled: false, - - server: { - http_listen_port: 80, - grpc_listen_port: 9095, - - // Configure the server to allow messages up to 100MB. - grpc_server_max_recv_msg_size: 104857600, - grpc_server_max_send_msg_size: 104857600, - grpc_server_max_concurrent_streams: 1000, - }, - - distributor: { - shard_by_all_labels: true, - pool: { - health_check_ingesters: true, - }, - }, - - ingester_client: { - grpc_client_config: { - max_recv_msg_size: 104857600, - max_send_msg_size: 104857600, - grpc_compression: 'gzip', - }, - }, - - ingester: { - lifecycler: { - join_after: 0, - min_ready_duration: '0s', - final_sleep: '0s', - num_tokens: 512, - - ring: { - kvstore: { - store: 'inmemory', - }, - replication_factor: 1, - }, - }, - }, - - storage: { - engine: 'blocks', - }, - - blocks_storage: { - tsdb: { - dir: '/tmp/cortex/tsdb', - }, - bucket_store: { - sync_dir: '/tmp/cortex/tsdb-sync', - }, - - backend: 'filesystem', - filesystem: { - dir: '/tmp/cortex/blocks', - }, - }, - - compactor: { - data_dir: '/tmp/cortex/compactor', - sharding_ring: { - kvstore: { - store: 'inmemory', - }, - }, - }, - - frontend_worker: { - match_max_concurrent: true, - }, - - ruler: { - enable_api: true, - enable_sharding: false, - storage: { - type: 'local', - 'local': { - directory: '/tmp/cortex/rules', - }, - }, - }, - - limits: { - ingestion_rate: 250000, - ingestion_burst_size: 500000, - }, -} diff --git a/example/k3d/lib/cortex/main.libsonnet b/example/k3d/lib/cortex/main.libsonnet deleted file mode 100644 index 600f031a3347..000000000000 --- a/example/k3d/lib/cortex/main.libsonnet +++ /dev/null @@ -1,64 +0,0 @@ -local k = import 'ksonnet-util/kausal.libsonnet'; - -local configMap = k.core.v1.configMap; -local container = k.core.v1.container; -local containerPort = k.core.v1.containerPort; -local deployment = k.apps.v1.deployment; -local pvc = k.core.v1.persistentVolumeClaim; -local service = k.core.v1.service; -local volumeMount = k.core.v1.volumeMount; -local volume = k.core.v1.volume; - -{ - new(namespace=''):: { - local this = self, - - _images:: { - cortex: 'cortexproject/cortex:v1.8.1', - }, - _config:: (import './cortex-config.libsonnet'), - - configMap: - configMap.new('cortex-config') + - configMap.mixin.metadata.withNamespace(namespace) + - configMap.withData({ - 'config.yaml': k.util.manifestYaml(this._config), - }), - - container:: - container.new('cortex', this._images.cortex) + - container.withPorts([ - containerPort.newNamed(name='http-metrics', containerPort=80), - containerPort.newNamed(name='grpc', containerPort=9095), - ]) + - container.withVolumeMountsMixin( - volumeMount.new('cortex-data', '/tmp/cortex'), - ) + - container.withArgsMixin( - k.util.mapToFlags({ - 'config.file': '/etc/cortex/config.yaml', - }), - ), - - pvc: - { apiVersion: 'v1', kind: 'PersistentVolumeClaim' } + - pvc.new() + - pvc.mixin.metadata.withName('cortex-data') + - pvc.mixin.metadata.withNamespace(namespace) + - pvc.mixin.spec.withAccessModes('ReadWriteOnce') + - pvc.mixin.spec.resources.withRequests({ storage: '10Gi' }), - - deployment: - deployment.new('cortex', 1, [this.container]) + - deployment.mixin.metadata.withNamespace(namespace) + - deployment.mixin.spec.template.spec.withVolumesMixin([ - volume.fromPersistentVolumeClaim('cortex-data', 'cortex-data'), - ]) + - k.util.configMapVolumeMount(this.configMap, '/etc/cortex') + - deployment.mixin.spec.template.spec.withTerminationGracePeriodSeconds(4800), - - service: - k.util.serviceFor(this.deployment) + - service.mixin.metadata.withNamespace(namespace), - }, -} diff --git a/example/k3d/lib/default/loki_config.libsonnet b/example/k3d/lib/default/loki_config.libsonnet deleted file mode 100644 index b201e0830257..000000000000 --- a/example/k3d/lib/default/loki_config.libsonnet +++ /dev/null @@ -1,41 +0,0 @@ -local grafana_agent = import 'grafana-agent/v1/main.libsonnet'; - -grafana_agent.scrapeKubernetesLogs { - local pipeline_stages = [ - // k3d uses cri for logging - { cri: {} }, - - // Bad words metrics, used in Agent dashboard - { - regex: { - expression: '(?i)(?Ppanic:|core_dumped|failure|error|attack| bad |illegal |denied|refused|unauthorized|fatal|failed|Segmentation Fault|Corrupted)', - }, - }, - { - metrics: { - panic_total: { - type: 'Counter', - description: 'total count of panic: found in log lines', - source: 'panic', - config: { - action: 'inc', - }, - }, - bad_words_total: { - type: 'Counter', - description: 'total count of bad words found in log lines', - source: 'bad_words', - config: { - action: 'inc', - }, - }, - }, - }, - ], - - scrape_configs: [ - x { pipeline_stages: pipeline_stages } - for x - in super.scrape_configs - ], -} diff --git a/example/k3d/lib/default/main.libsonnet b/example/k3d/lib/default/main.libsonnet deleted file mode 100644 index 189c3d1f36d5..000000000000 --- a/example/k3d/lib/default/main.libsonnet +++ /dev/null @@ -1,26 +0,0 @@ -local cortex = import 'cortex/main.libsonnet'; -local datasource = import 'grafana/datasource.libsonnet'; -local grafana = import 'grafana/main.libsonnet'; -local k = import 'ksonnet-util/kausal.libsonnet'; -local loki = import 'loki/main.libsonnet'; -local metrics = import 'metrics-server/main.libsonnet'; - -local mixins = import './mixins.libsonnet'; - -{ - new(namespace=''):: { - ns: k.core.v1.namespace.new(namespace), - - grafana: - grafana.new(namespace=namespace) + - grafana.withDashboards(mixins.grafanaDashboards) + - grafana.withDataSources([ - datasource.new('Cortex', 'http://cortex.default.svc.cluster.local/api/prom'), - datasource.new('Loki', 'http://loki.default.svc.cluster.local', type='loki'), - ]), - - loki: loki.new(namespace), - - cortex: cortex.new(namespace), - }, -} diff --git a/example/k3d/lib/default/mixins.libsonnet b/example/k3d/lib/default/mixins.libsonnet deleted file mode 100644 index 4da82b233071..000000000000 --- a/example/k3d/lib/default/mixins.libsonnet +++ /dev/null @@ -1,30 +0,0 @@ -local cortex_mixin = import 'cortex-mixin/mixin.libsonnet'; -local agent_debugging_mixin = import 'grafana-agent-mixin/debugging.libsonnet'; -local agent_mixin = import 'grafana-agent-mixin/mixin.libsonnet'; - -// TODO(rfratto): bit of a hack here to be compatible with the "old" Jsonnet -// writing style. -local fix = { - dashboards+:: {}, - grafana_dashboards+:: {}, - grafanaDashboards+:: $.dashboards + $.grafana_dashboards, -}; - -fix + -cortex_mixin + -agent_debugging_mixin + -agent_mixin { - _config+: { - // We run a single-node cortex so replace the job names to all - // be the monolith. - job_names+: { - ingester: 'cortex', - distributor: 'cortex', - querier: 'cortex', - query_frontend: 'cortex', - table_manager: 'cortex', - store_gateway: 'cortex', - gateway: 'cortex', - }, - }, -} diff --git a/example/k3d/lib/grafana/config.libsonnet b/example/k3d/lib/grafana/config.libsonnet deleted file mode 100644 index 79f7d36743d9..000000000000 --- a/example/k3d/lib/grafana/config.libsonnet +++ /dev/null @@ -1,36 +0,0 @@ -{ - _images: { - grafana: 'grafana/grafana:8.0.3', - }, - - _config: { - // Optionally shard dashboards into multiple config maps. - // Set to the number of desired config maps. 0 to disable. - dashboard_config_maps: 8, - - provisioning_dir: '/etc/grafana/provisioning', - grafana_root_url: 'http://grafana.default.svc.cluster.local/', - - grafana_ini: { - sections: { - 'auth.anonymous': { - enabled: true, - org_role: 'Admin', - }, - server: { - http_port: 80, - root_url: $._config.grafana_root_url, - }, - analytics: { - reporting_enabled: false, - }, - users: { - default_theme: 'dark', - }, - explore+: { - enabled: true, - }, - }, - }, - }, -} diff --git a/example/k3d/lib/grafana/datasource.libsonnet b/example/k3d/lib/grafana/datasource.libsonnet deleted file mode 100644 index 27ff98ce7314..000000000000 --- a/example/k3d/lib/grafana/datasource.libsonnet +++ /dev/null @@ -1,25 +0,0 @@ -{ - new(name, url, default=false, method='GET', type='prometheus'):: { - apiVersion: 1, - datasources: [{ - name: name, - type: type, - access: 'proxy', - url: url, - isDefault: default, - version: 1, - editable: false, - jsonData: { - httpMethod: method, - }, - }], - }, - - withBasicAuth(username, password):: { - datasources: std.map(function(ds) ds { - basicAuth: true, - basicAuthUser: username, - basicAuthPassword: password, - }, super.datasources), - }, -} diff --git a/example/k3d/lib/grafana/main.libsonnet b/example/k3d/lib/grafana/main.libsonnet deleted file mode 100644 index 3c2d5f9014ac..000000000000 --- a/example/k3d/lib/grafana/main.libsonnet +++ /dev/null @@ -1,132 +0,0 @@ -local config = import 'config.libsonnet'; -local k = import 'ksonnet-util/kausal.libsonnet'; - -local configMap = k.core.v1.configMap; -local container = k.core.v1.container; -local containerPort = k.core.v1.containerPort; -local deployment = k.apps.v1.deployment; -local service = k.core.v1.service; - -{ - new(dashboards={}, datasources=[], namespace='default'):: { - _images:: config._images, - _config:: config._config { namespace: namespace }, - _dashboards:: {}, - _datasources:: datasources, - - local _images = self._images, - local _config = self._config, - local _dashboards = self._dashboards, - local _datasources = self._datasources, - - grafana_cm: - configMap.new('grafana-config') + - configMap.mixin.metadata.withNamespace(namespace) + - configMap.withData({ - 'grafana.ini': std.manifestIni(_config.grafana_ini), - }), - - grafana_dashboard_cm: - if _config.dashboard_config_maps > 0 - then {} - else - configMap.new('dashboards') + - configMap.mixin.metadata.withNamespace(namespace) + - configMap.withDataMixin({ - [name]: std.toString( - $.dashboards[name] - { uid: std.substr(std.md5(std.toString($.dashboards[name])), 0, 9) } - ) - for name in std.objectFields($.dashboards) - }), - - grafana_dashboard_cms: { - ['dashboard-%d' % shard]: - configMap.new('dashboards-%d' % shard) + - configMap.mixin.metadata.withNamespace(namespace) + - configMap.withDataMixin({ - [name]: std.toString( - _dashboards[name] - { uid: std.substr(std.md5(std.toString(_dashboards[name])), 0, 9) } - ) - for name in std.objectFields(_dashboards) - if std.codepoint(std.md5(name)[1]) % _config.dashboard_config_maps == shard - }) - for shard in std.range(0, _config.dashboard_config_maps - 1) - }, - - grafana_datasource_cm: - configMap.new('grafana-datasources') + - configMap.mixin.metadata.withNamespace(namespace) + - configMap.withDataMixin(std.foldl(function(acc, obj) acc { - ['%s.yml' % obj.datasources[0].name]: k.util.manifestYaml(obj), - }, self._datasources, {})), - - grafana_dashboard_provisioning_cm: - configMap.new('grafana-dashboard-provisioning') + - configMap.mixin.metadata.withNamespace(namespace) + - configMap.withData({ - 'dashboards.yml': k.util.manifestYaml({ - apiVersion: 1, - providers: [{ - name: 'dashboards', - orgId: 1, - folder: '', - folderUid: '', - type: 'file', - disableDeletion: true, - editable: false, - updateIntervalSeconds: 10, - allowUiUpdates: false, - options: { - path: '/grafana/dashboards', - }, - }], - }), - }), - - container:: - container.new('grafana', _images.grafana) + - container.withPorts(containerPort.new('grafana', 80)) + - container.withCommand([ - '/usr/share/grafana/bin/grafana-server', - '--homepath=/usr/share/grafana', - '--config=/etc/grafana-config/grafana.ini', - ]) + - k.util.resourcesRequests('10m', '40Mi'), - - deployment: - deployment.new('grafana', 1, [self.container]) + - deployment.mixin.metadata.withNamespace(namespace) + - deployment.mixin.spec.template.spec.securityContext.withRunAsUser(0) + - k.util.configMapVolumeMount(self.grafana_cm, '/etc/grafana-config') + - k.util.configMapVolumeMount(self.grafana_datasource_cm, '%(provisioning_dir)s/datasources' % _config) + - k.util.configMapVolumeMount(self.grafana_dashboard_provisioning_cm, '%(provisioning_dir)s/dashboards' % _config) + - ( - if self._config.dashboard_config_maps == 0 - then k.util.configMapVolumeMount(self.grafana_dashboard_config_map, '/grafana/dashboards') - else - std.foldr( - function(m, acc) m + acc, - [ - k.util.configVolumeMount('dashboards-%d' % shard, '/grafana/dashboards/%d' % shard) - for shard in std.range(0, self._config.dashboard_config_maps - 1) - ], - {} - ) - ) + - k.util.podPriority('critical'), - - service: - k.util.serviceFor(self.deployment) + - service.mixin.metadata.withNamespace(namespace), - }, - - // withDashboards sets the list of dashboards. Dashboards is an object where the - // key should be the filename. - withDashboards(dashboards={}):: { _dashboards:: dashboards }, - - // withDataSources sets the list of datasources. Datasources can be created - // using datasources.libsonnet. - withDataSources(datasources=[]):: { _datasources:: datasources }, -} diff --git a/example/k3d/lib/k.libsonnet b/example/k3d/lib/k.libsonnet deleted file mode 100644 index 193e217fe5b9..000000000000 --- a/example/k3d/lib/k.libsonnet +++ /dev/null @@ -1,2 +0,0 @@ -(import 'github.com/jsonnet-libs/k8s-alpha/1.14/main.libsonnet') -+ (import 'github.com/jsonnet-libs/k8s-alpha/1.14/extensions/kausal-shim.libsonnet') diff --git a/example/k3d/lib/kube-state-metrics/main.libsonnet b/example/k3d/lib/kube-state-metrics/main.libsonnet deleted file mode 100644 index 48788c8384f9..000000000000 --- a/example/k3d/lib/kube-state-metrics/main.libsonnet +++ /dev/null @@ -1,135 +0,0 @@ -local k = import 'ksonnet-util/kausal.libsonnet'; - -local container = k.core.v1.container; -local containerPort = k.core.v1.containerPort; -local deployment = k.apps.v1.deployment; -local service = k.core.v1.service; -local serviceAccount = k.core.v1.serviceAccount; -local policyRule = k.rbac.v1.policyRule; - -{ - new(namespace=''):: { - local k = (import 'ksonnet-util/kausal.libsonnet') { _config+:: { namespace: namespace } }, - - container:: - container.new('kube-state-metrics', 'k8s.gcr.io/kube-state-metrics/kube-state-metrics:v2.1.0') + - container.withPorts([ - containerPort.newNamed(name='http-metrics', containerPort=8080), - containerPort.newNamed(name='self-metrics', containerPort=8081), - ]) + - container.withArgs([ - '--port=8080', - '--telemetry-host=0.0.0.0', - '--telemetry-port=8081', - ]), - - rbac: - k.util.rbac('kube-state-metrics', [ - policyRule.withApiGroups(['']) + - policyRule.withResources([ - 'configmaps', - 'secrets', - 'nodes', - 'pods', - 'services', - 'resourcequotas', - 'replicationcontrollers', - 'limitranges', - 'persistentvolumeclaims', - 'persistentvolumes', - 'namespaces', - 'endpoints', - ]) + - policyRule.withVerbs(['list', 'watch']), - - policyRule.withApiGroups(['extensions']) + - policyRule.withResources([ - 'daemonsets', - 'deployments', - 'replicasets', - 'ingresses', - ]) + - policyRule.withVerbs(['list', 'watch']), - - policyRule.withApiGroups(['apps']) + - policyRule.withResources([ - 'daemonsets', - 'deployments', - 'replicasets', - 'statefulsets', - ]) + - policyRule.withVerbs(['list', 'watch']), - - policyRule.withApiGroups(['batch']) + - policyRule.withResources([ - 'cronjobs', - 'jobs', - ]) + - policyRule.withVerbs(['list', 'watch']), - - policyRule.withApiGroups(['autoscaling']) + - policyRule.withResources([ - 'horizontalpodautoscalers', - ]) + - policyRule.withVerbs(['list', 'watch']), - - policyRule.withApiGroups(['authorization.k8s.io']) + - policyRule.withResources(['subjectaccessreviews']) + - policyRule.withVerbs(['create']), - - policyRule.withApiGroups(['ingresses']) + - policyRule.withResources(['ingress']) + - policyRule.withVerbs(['list', 'watch']), - - policyRule.withApiGroups(['policy']) + - policyRule.withResources(['poddisruptionbudgets']) + - policyRule.withVerbs(['list', 'watch']), - - policyRule.withApiGroups(['certificates.k8s.io']) + - policyRule.withResources(['certificatesigningrequests']) + - policyRule.withVerbs(['list', 'watch']), - - policyRule.withApiGroups(['storage.k8s.io']) + - policyRule.withResources([ - 'storageclasses', - 'volumeattachments', - ]) + - policyRule.withVerbs(['list', 'watch']), - - policyRule.withApiGroups(['admissionregistration.k8s.io']) + - policyRule.withResources([ - 'mutatingwebhookconfigurations', - 'validatingwebhookconfigurations', - ]) + - policyRule.withVerbs(['list', 'watch']), - - policyRule.withApiGroups(['networking.k8s.io']) + - policyRule.withResources([ - 'networkpolicies', - 'ingresses', - ]) + - policyRule.withVerbs(['list', 'watch']), - - policyRule.withApiGroups(['coordination.k8s.io']) + - policyRule.withResources(['leases']) + - policyRule.withVerbs(['list', 'watch']), - ]) { - service_account+: - serviceAccount.mixin.metadata.withNamespace(namespace), - }, - - deployment: - deployment.new('kube-state-metrics', 1, [self.container]) + - deployment.mixin.metadata.withNamespace(namespace) + - deployment.mixin.spec.template.metadata.withAnnotationsMixin({ 'prometheus.io.scrape': 'false' }) + - deployment.mixin.spec.template.spec.withServiceAccount('kube-state-metrics') + - deployment.mixin.spec.template.spec.securityContext.withRunAsUser(65534) + - deployment.mixin.spec.template.spec.securityContext.withRunAsGroup(65534) + - deployment.mixin.spec.template.spec.securityContext.withFsGroup(0) + - k.util.podPriority('critical'), - - service: - k.util.serviceFor(self.deployment) + - service.mixin.metadata.withNamespace(namespace), - }, -} diff --git a/example/k3d/lib/load-generator/load-generator-config.json b/example/k3d/lib/load-generator/load-generator-config.json deleted file mode 100644 index 8c87e16c9350..000000000000 --- a/example/k3d/lib/load-generator/load-generator-config.json +++ /dev/null @@ -1,630 +0,0 @@ -{ - "topology": { - "services": [ - { - "serviceName": "frontend", - "tagSets": [ - { - "weight": 1, - "tags": { - "version": "v127", - "region": "us-east-1" - }, - "tagGenerators": [], - "inherit": [], - "maxLatency": 100 - }, - { - "weight": 1, - "tags": { - "version": "v125", - "region": "us-east-1" - }, - "tagGenerators": [], - "inherit": [], - "maxLatency": 100 - }, - { - "weight": 2, - "tags": { - "version": "v125", - "region": "us-west-1" - }, - "tagGenerators": [], - "inherit": [], - "maxLatency": 100 - } - ], - "routes": [ - { - "route": "/product", - "downstreamCalls": { - "productcatalogservice": "/GetProducts", - "recommendationservice": "/GetRecommendations", - "adservice": "/AdRequest" - }, - "tagSets": [ - { - "weight": 1, - "tags": { - "starter": "charmander" - }, - "tagGenerators": [ - { - "rand": { - "seed": 179867746078676, - "nextNextGaussian": 0, - "haveNextNextGaussian": false - }, - "tagGen": {}, - "valLength": 16, - "numTags": 50, - "numVals": 3000 - } - ], - "inherit": [] - }, - { - "weight": 1, - "tags": { - "starter": "squirtle" - }, - "tagGenerators": [], - "inherit": [] - }, - { - "weight": 1, - "tags": { - "starter": "bulbasaur" - }, - "tagGenerators": [], - "inherit": [] - } - ] - }, - { - "route": "/cart", - "downstreamCalls": { - "cartservice": "/GetCart", - "recommendationservice": "/GetRecommendations" - }, - "tagSets": [] - }, - { - "route": "/checkout", - "downstreamCalls": { - "checkoutservice": "/PlaceOrder" - }, - "tagSets": [ - { - "tags": {}, - "tagGenerators": [], - "inherit": [], - "maxLatency": 800 - } - ] - }, - { - "route": "/shipping", - "downstreamCalls": { - "shippingservice": "/GetQuote" - }, - "tagSets": [ - { - "tags": {}, - "tagGenerators": [], - "inherit": [], - "maxLatency": 50 - } - ] - }, - { - "route": "/currency", - "downstreamCalls": { - "currencyservice": "/GetConversion" - }, - "tagSets": [ - { - "tags": {}, - "tagGenerators": [], - "inherit": [], - "maxLatency": 50 - } - ] - } - ], - "instances": [ - "frontend-6b654dbf57-zq8dt", - "frontend-d847fdcf5-j6s2f", - "frontend-79d8c8d6c8-9sbff" - ], - "mergedTagSets": {}, - "random": { - "seed": 187004238864083, - "nextNextGaussian": 0, - "haveNextNextGaussian": false - } - }, - { - "serviceName": "productcatalogservice", - "tagSets": [ - { - "tags": { - "version": "v52" - }, - "tagGenerators": [], - "inherit": [ - "region" - ] - } - ], - "routes": [ - { - "route": "/GetProducts", - "downstreamCalls": {}, - "tagSets": [ - { - "tags": {}, - "tagGenerators": [], - "inherit": [ - "starter" - ], - "maxLatency": 100 - } - ] - }, - { - "route": "/SearchProducts", - "downstreamCalls": {}, - "tagSets": [ - { - "weight": 15, - "tags": { - "error": true, - "http.status_code": 503 - }, - "tagGenerators": [], - "inherit": [], - "maxLatency": 400 - }, - { - "weight": 85, - "tags": {}, - "tagGenerators": [], - "inherit": [], - "maxLatency": 400 - } - ] - } - ], - "instances": [ - "productcatalogservice-6b654dbf57-zq8dt", - "productcatalogservice-d847fdcf5-j6s2f" - ], - "mergedTagSets": {}, - "random": { - "seed": 238238032670139, - "nextNextGaussian": 0, - "haveNextNextGaussian": false - } - }, - { - "serviceName": "recommendationservice", - "tagSets": [ - { - "tags": { - "version": "v234", - "region": "us-east-1" - }, - "tagGenerators": [], - "inherit": [] - } - ], - "routes": [ - { - "route": "/GetRecommendations", - "downstreamCalls": { - "productcatalogservice": "/GetProducts" - }, - "tagSets": [ - { - "tags": {}, - "tagGenerators": [], - "inherit": [], - "maxLatency": 200 - } - ] - } - ], - "instances": [ - "recommendationservice-6b654dbf57-zq8dt", - "recommendationservice-d847fdcf5-j6s2f" - ], - "mergedTagSets": {}, - "random": { - "seed": 66295214032801, - "nextNextGaussian": 0, - "haveNextNextGaussian": false - } - }, - { - "serviceName": "cartservice", - "tagSets": [ - { - "tags": { - "version": "v5", - "region": "us-east-1" - }, - "tagGenerators": [], - "inherit": [] - } - ], - "routes": [ - { - "route": "/GetCart", - "downstreamCalls": {}, - "tagSets": [ - { - "tags": {}, - "tagGenerators": [], - "inherit": [], - "maxLatency": 200 - } - ] - } - ], - "instances": [ - "cartservice-6b654dbf57-zq8dt", - "cartservice-d847fdcf5-j6s2f" - ], - "mergedTagSets": {}, - "random": { - "seed": 234194353561392, - "nextNextGaussian": 0, - "haveNextNextGaussian": false - } - }, - { - "serviceName": "checkoutservice", - "tagSets": [ - { - "tags": { - "version": "v37", - "region": "us-east-1" - }, - "tagGenerators": [], - "inherit": [], - "maxLatency": 500 - } - ], - "routes": [ - { - "route": "/PlaceOrder", - "downstreamCalls": { - "paymentservice": "/CreditCardInfo", - "shippingservice": "/Address", - "currencyservice": "/GetConversion", - "cartservice": "/GetCart", - "emailservice": "/SendOrderConfirmation" - }, - "tagSets": [ - { - "weight": 25, - "tags": { - "error": true, - "http.status_code": 503 - }, - "tagGenerators": [], - "inherit": [] - }, - { - "weight": 85, - "tags": {}, - "tagGenerators": [], - "inherit": [] - } - ] - } - ], - "instances": [ - "checkoutservice-6b654dbf57-zq8dt", - "checkoutservice-d847fdcf5-j6s2f" - ], - "mergedTagSets": {}, - "random": { - "seed": 60782549660568, - "nextNextGaussian": 0, - "haveNextNextGaussian": false - } - }, - { - "serviceName": "paymentservice", - "tagSets": [ - { - "tags": { - "version": "v177", - "region": "us-east-1" - }, - "tagGenerators": [], - "inherit": [] - } - ], - "routes": [ - { - "route": "/ChargeRequest", - "downstreamCalls": { - "paymentservice": "/CreditCardInfo" - }, - "tagSets": [ - { - "tags": {}, - "tagGenerators": [], - "inherit": [], - "maxLatency": 700 - } - ] - }, - { - "route": "/CreditCardInfo", - "downstreamCalls": {}, - "tagSets": [ - { - "tags": {}, - "tagGenerators": [], - "inherit": [], - "maxLatency": 50 - } - ] - } - ], - "instances": [ - "paymentservice-6b654dbf57-zq8dt", - "paymentservice-d847fdcf5-j6s2f" - ], - "mergedTagSets": {}, - "random": { - "seed": 174850031049111, - "nextNextGaussian": 0, - "haveNextNextGaussian": false - } - }, - { - "serviceName": "shippingservice", - "tagSets": [ - { - "tags": { - "version": "v127", - "region": "us-east-1" - }, - "tagGenerators": [], - "inherit": [] - } - ], - "routes": [ - { - "route": "/GetQuote", - "downstreamCalls": { - "shippingservice": "/Address" - }, - "tagSets": [ - { - "tags": {}, - "tagGenerators": [], - "inherit": [], - "maxLatency": 250 - } - ] - }, - { - "route": "/ShipOrder", - "downstreamCalls": { - "shippingservice": "/Address" - }, - "tagSets": [ - { - "tags": {}, - "tagGenerators": [], - "inherit": [], - "maxLatency": 500 - } - ] - }, - { - "route": "/Address", - "downstreamCalls": {}, - "tagSets": [ - { - "tags": {}, - "tagGenerators": [], - "inherit": [], - "maxLatency": 100 - } - ] - } - ], - "instances": [ - "shippingservice-6b654dbf57-zq8dt", - "shippingservice-d847fdcf5-j6s2f" - ], - "mergedTagSets": {}, - "random": { - "seed": 107892261530518, - "nextNextGaussian": 0, - "haveNextNextGaussian": false - } - }, - { - "serviceName": "emailservice", - "tagSets": [ - { - "tags": { - "version": "v27", - "region": "us-east-1" - }, - "tagGenerators": [], - "inherit": [], - "maxLatency": 500 - } - ], - "routes": [ - { - "route": "/SendOrderConfirmation", - "downstreamCalls": { - "emailservice": "/OrderResult" - }, - "tagSets": [ - { - "weight": 15, - "tags": { - "error": true, - "http.status_code": 503 - }, - "tagGenerators": [], - "inherit": [] - }, - { - "weight": 85, - "tags": {}, - "tagGenerators": [], - "inherit": [] - } - ] - }, - { - "route": "/OrderResult", - "downstreamCalls": {}, - "tagSets": [ - { - "tags": {}, - "tagGenerators": [], - "inherit": [], - "maxLatency": 100 - } - ] - } - ], - "instances": [ - "emailservice-6b654dbf57-zq8dt", - "emailservice-d847fdcf5-j6s2f" - ], - "mergedTagSets": {}, - "random": { - "seed": 61175057559946, - "nextNextGaussian": 0, - "haveNextNextGaussian": false - } - }, - { - "serviceName": "currencyservice", - "tagSets": [ - { - "tags": { - "version": "v27", - "region": "us-east-1" - }, - "tagGenerators": [], - "inherit": [] - } - ], - "routes": [ - { - "route": "/GetConversion", - "downstreamCalls": { - "currencyservice": "/Money" - }, - "tagSets": [ - { - "tags": {}, - "tagGenerators": [], - "inherit": [], - "maxLatency": 100 - } - ] - }, - { - "route": "/Money", - "downstreamCalls": {}, - "tagSets": [ - { - "tags": {}, - "tagGenerators": [], - "inherit": [], - "maxLatency": 100 - } - ] - } - ], - "instances": [ - "currencyservice-6b654dbf57-zq8dt", - "currencyservice-d847fdcf5-j6s2f" - ], - "mergedTagSets": {}, - "random": { - "seed": 66219471499700, - "nextNextGaussian": 0, - "haveNextNextGaussian": false - } - }, - { - "serviceName": "adservice", - "tagSets": [ - { - "tags": {}, - "tagGenerators": [], - "inherit": [], - "maxLatency": 500 - } - ], - "routes": [ - { - "route": "/AdRequest", - "downstreamCalls": {}, - "tagSets": [] - }, - { - "route": "/Ad", - "downstreamCalls": {}, - "tagSets": [] - } - ], - "instances": [ - "adservice-6b654dbf57-zq8dt", - "adservice-d847fdcf5-j6s2f" - ], - "mergedTagSets": {}, - "random": { - "seed": 22694143111805, - "nextNextGaussian": 0, - "haveNextNextGaussian": false - } - } - ] - }, - "rootRoutes": [ - { - "service": "frontend", - "route": "/product", - "tracesPerHour": 288 - }, - { - "service": "frontend", - "route": "/cart", - "tracesPerHour": 1440 - }, - { - "service": "frontend", - "route": "/shipping", - "tracesPerHour": 48 - }, - { - "service": "frontend", - "route": "/currency", - "tracesPerHour": 20 - }, - { - "service": "frontend", - "route": "/checkout", - "tracesPerHour": 48 - } - ] -} \ No newline at end of file diff --git a/example/k3d/lib/load-generator/main.libsonnet b/example/k3d/lib/load-generator/main.libsonnet deleted file mode 100644 index 5b0d15a7ad14..000000000000 --- a/example/k3d/lib/load-generator/main.libsonnet +++ /dev/null @@ -1,54 +0,0 @@ -local k = import 'ksonnet-util/kausal.libsonnet'; - -local configMap = k.core.v1.configMap; -local container = k.core.v1.container; -local containerPort = k.core.v1.containerPort; -local deployment = k.apps.v1.deployment; -local pvc = k.core.v1.persistentVolumeClaim; -local service = k.core.v1.service; -local volumeMount = k.core.v1.volumeMount; -local volume = k.core.v1.volume; - -{ - new(namespace=''):: { - local this = self, - - _images:: { - load_generator: 'omnition/synthetic-load-generator:1.0.25', - }, - _config:: (import './load-generator-config.json'), - - configMap: - configMap.new('load-generator') + - configMap.mixin.metadata.withNamespace(namespace) + - configMap.withData({ - 'config.json': std.toString(this._config), - }), - - container:: - container.new('load-generator', this._images.load_generator) + - container.withPorts([ - containerPort.newNamed(name='grpc', containerPort=55680), - ]) + - container.withEnvMixin([ - { - name: 'TOPOLOGY_FILE', - value: '/etc/load-generator/config.json', - }, - { - name: 'JAEGER_COLLECTOR_URL', - value: 'http://grafana-agent.default.svc.cluster.local:14268', - }, - ]), - - deployment: - deployment.new('load-generator', 1, [self.container]) + - deployment.mixin.metadata.withNamespace(namespace) + - k.util.configMapVolumeMount(this.configMap, '/etc/load-generator'), - - - service: - k.util.serviceFor(self.deployment) + - service.mixin.metadata.withNamespace(namespace), - }, -} diff --git a/example/k3d/lib/loki/loki-config.libsonnet b/example/k3d/lib/loki/loki-config.libsonnet deleted file mode 100644 index 22e757cb4d4b..000000000000 --- a/example/k3d/lib/loki/loki-config.libsonnet +++ /dev/null @@ -1,61 +0,0 @@ -{ - auth_enabled: false, - - server: { - graceful_shutdown_timeout: '5s', - http_server_idle_timeout: '120s', - grpc_server_max_recv_msg_size: 1024 * 1024 * 64, - }, - - limits_config: { - enforce_metric_name: false, - reject_old_samples: true, - reject_old_samples_max_age: '24h', - }, - - ingester: { - chunk_idle_period: '5m', - chunk_retain_period: '30s', - max_transfer_retries: 1, - lifecycler: { - address: '127.0.0.1', - final_sleep: '0s', - ring: { - kvstore: { store: 'inmemory' }, - replication_factor: 1, - }, - }, - }, - - schema_config: { - configs: [{ - from: '2020-05-25', - store: 'boltdb', - object_store: 'filesystem', - schema: 'v11', - index: { - prefix: 'index_', - period: '24h', - }, - }], - }, - - storage_config: { - boltdb: { - directory: '/tmp/loki/index', - }, - - filesystem: { - directory: '/tmp/loki/chunks', - }, - }, - - chunk_store_config: { - max_look_back_period: 0, - }, - - table_manager: { - retention_deletes_enabled: true, - retention_period: '48h', - }, -} diff --git a/example/k3d/lib/loki/main.libsonnet b/example/k3d/lib/loki/main.libsonnet deleted file mode 100644 index 6fbd6bf49e98..000000000000 --- a/example/k3d/lib/loki/main.libsonnet +++ /dev/null @@ -1,64 +0,0 @@ -local k = import 'ksonnet-util/kausal.libsonnet'; - -local configMap = k.core.v1.configMap; -local container = k.core.v1.container; -local containerPort = k.core.v1.containerPort; -local deployment = k.apps.v1.deployment; -local pvc = k.core.v1.persistentVolumeClaim; -local service = k.core.v1.service; -local volumeMount = k.core.v1.volumeMount; -local volume = k.core.v1.volume; - -{ - new(namespace=''):: { - local this = self, - - _images:: { - loki: 'grafana/loki:1.4.1', - }, - _config:: (import './loki-config.libsonnet'), - - configMap: - configMap.new('loki-config') + - configMap.mixin.metadata.withNamespace(namespace) + - configMap.withData({ - 'config.yaml': k.util.manifestYaml(this._config), - }), - - container:: - container.new('loki', this._images.loki) + - container.withPorts([ - containerPort.newNamed(name='http-metrics', containerPort=80), - containerPort.newNamed(name='grpc', containerPort=9095), - ]) + - container.withVolumeMountsMixin( - volumeMount.new('loki-data', '/tmp/loki'), - ) + - container.withArgsMixin( - k.util.mapToFlags({ - 'config.file': '/etc/loki/config.yaml', - }), - ), - - pvc: - { apiVersion: 'v1', kind: 'PersistentVolumeClaim' } + - pvc.new() + - pvc.mixin.metadata.withName('loki-data') + - pvc.mixin.metadata.withNamespace(namespace) + - pvc.mixin.spec.withAccessModes('ReadWriteOnce') + - pvc.mixin.spec.resources.withRequests({ storage: '10Gi' }), - - deployment: - deployment.new('loki', 1, [this.container]) + - deployment.mixin.metadata.withNamespace(namespace) + - deployment.mixin.spec.template.spec.withVolumesMixin([ - volume.fromPersistentVolumeClaim('loki-data', 'loki-data'), - ]) + - k.util.configMapVolumeMount(this.configMap, '/etc/loki') + - deployment.mixin.spec.template.spec.withTerminationGracePeriodSeconds(4800), - - service: - k.util.serviceFor(this.deployment) + - service.mixin.metadata.withNamespace(namespace), - }, -} diff --git a/example/k3d/lib/node-exporter/main.libsonnet b/example/k3d/lib/node-exporter/main.libsonnet deleted file mode 100644 index 5f57f9926928..000000000000 --- a/example/k3d/lib/node-exporter/main.libsonnet +++ /dev/null @@ -1,35 +0,0 @@ -local k = import 'ksonnet-util/kausal.libsonnet'; - -local container = k.core.v1.container; -local containerPort = k.core.v1.containerPort; -local daemonSet = k.apps.v1.daemonSet; -local service = k.core.v1.service; - -{ - new(namespace=''):: { - container:: - container.new('node-exporter', 'quay.io/prometheus/node-exporter:v1.1.2') + - container.withPorts([ - containerPort.newNamed(name='http-metrics', containerPort=9100), - ]) + - container.withArgsMixin([ - '--path.rootfs=/host/root', - '--path.procfs=/host/proc', - '--path.sysfs=/host/sys', - '--collector.netdev.device-exclude=^veth.+$', - ]) + - container.mixin.securityContext.withPrivileged(true) + - container.mixin.securityContext.withRunAsUser(0), - - daemonSet: - daemonSet.new('node-exporter', [self.container]) + - daemonSet.mixin.metadata.withNamespace(namespace) + - daemonSet.mixin.spec.template.metadata.withAnnotationsMixin({ 'prometheus.io.scrape': 'false' }) + - daemonSet.mixin.spec.template.spec.withHostPid(true) + - daemonSet.mixin.spec.template.spec.withHostNetwork(true) + - k.util.hostVolumeMount('proc', '/proc', '/host/proc') + - k.util.hostVolumeMount('sys', '/sys', '/host/sys') + - k.util.hostVolumeMount('root', '/', '/host/root') + - k.util.podPriority('critical'), - }, -} diff --git a/example/k3d/lib/prometheus/main.libsonnet b/example/k3d/lib/prometheus/main.libsonnet deleted file mode 100644 index 74c9cf6b98b1..000000000000 --- a/example/k3d/lib/prometheus/main.libsonnet +++ /dev/null @@ -1,90 +0,0 @@ -local k = import 'ksonnet-util/kausal.libsonnet'; - -local configMap = k.core.v1.configMap; -local container = k.core.v1.container; -local containerPort = k.core.v1.containerPort; -local deployment = k.apps.v1.deployment; -local statefulSet = k.apps.v1.statefulSet; -local pvc = k.core.v1.persistentVolumeClaim; -local service = k.core.v1.service; -local volumeMount = k.core.v1.volumeMount; -local volume = k.core.v1.volume; -local serviceAccount = k.core.v1.serviceAccount; -local policyRule = k.rbac.v1.policyRule; - -{ - new(namespace=''):: { - local k = (import 'ksonnet-util/kausal.libsonnet') { _config+:: { namespace: namespace } }, - local this = self, - - _images:: { - prom: 'prom/prometheus:v2.28.0', - }, - - _config:: { - rule_files: ['/etc/prometheus/rules.yaml'], - }, - _rules:: {}, - - rbac: - k.util.rbac('prometheus', [ - policyRule.withApiGroups(['']) + - policyRule.withResources(['nodes', 'nodes/proxy', 'services', 'endpoints', 'pods']) + - policyRule.withVerbs(['get', 'list', 'watch']), - - policyRule.withNonResourceUrls('/metrics') + - policyRule.withVerbs(['get']), - ]) { - service_account+: - serviceAccount.mixin.metadata.withNamespace(namespace), - }, - - configMap: - configMap.new('prometheus') + - configMap.mixin.metadata.withNamespace(namespace) + - configMap.withData({ - 'prometheus.yaml': k.util.manifestYaml(this._config), - 'rules.yaml': k.util.manifestYaml(this._rules), - }), - - container:: - container.new('prometheus', this._images.prom) + - container.withPorts([ - containerPort.newNamed(name='http-metrics', containerPort=9090), - ]) + - container.withVolumeMountsMixin( - volumeMount.new('prometheus-data', '/data'), - ) + - container.withArgsMixin([ - '--config.file=/etc/prometheus/prometheus.yaml', - '--storage.tsdb.path=/data', - ]), - - pvc:: - { apiVersion: 'v1', kind: 'PersistentVolumeClaim' } + - pvc.new() + - pvc.mixin.metadata.withName('prometheus-data') + - pvc.mixin.metadata.withNamespace(namespace) + - pvc.mixin.spec.withAccessModes('ReadWriteOnce') + - pvc.mixin.spec.resources.withRequests({ storage: '10Gi' }), - - statefulSet: - statefulSet.new( - name='prometheus', - replicas=1, - containers=[this.container], - volumeClaims=[this.pvc] - ) + - statefulSet.mixin.spec.withServiceName('prometheus') + - k.util.configMapVolumeMount(this.configMap, '/etc/prometheus') + - statefulSet.mixin.spec.template.spec.withServiceAccountName('prometheus') + - statefulSet.mixin.metadata.withNamespace(namespace), - - service: - k.util.serviceFor(this.statefulSet) + - service.mixin.metadata.withNamespace(namespace), - }, - - withConfigMixin(config={}):: { _config+:: config }, - withRulesMixin(rules={}):: { _rules+:: rules }, -} diff --git a/example/k3d/scripts/create.bash b/example/k3d/scripts/create.bash deleted file mode 100755 index 24ec71361835..000000000000 --- a/example/k3d/scripts/create.bash +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/env bash - -EXTRA_MOUNTS="" - -if [ -f /etc/machine-id ]; then - EXTRA_MOUNTS="$EXTRA_MOUNTS -v /etc/machine-id:/etc/machine-id" -fi - -if [ -d /dev/mapper ]; then - EXTRA_MOUNTS="$EXTRA_MOUNTS -v /dev/mapper:/dev/mapper" -fi - -k3d cluster create agent-k3d \ - --port 30080:80@loadbalancer \ - --api-port 50443 \ - -v /var/lib/k3d/agent-k3d/storage/:/var/lib/rancher/k3s/storage/ \ - $EXTRA_MOUNTS \ - --kubeconfig-update-default=true \ - --kubeconfig-switch-context=true \ - --wait diff --git a/example/k3d/scripts/smoke-test.bash b/example/k3d/scripts/smoke-test.bash deleted file mode 100755 index 4ed75ae223c2..000000000000 --- a/example/k3d/scripts/smoke-test.bash +++ /dev/null @@ -1,142 +0,0 @@ -#!/usr/bin/env bash -# -# Usage: -# smoke-test.bash [-i] [-d] [-s] [-t ] -# -# Dependencies: -# k3d >=3.0 -# Tanka -# jq -# -# smoke-test.bash performs smoke tests that can be used to validate a release. -# It only validates correctness, and does not attempt to do significant load -# testing or performance benchmarks. -# -# It works by deploying a k3d cluster with two pairs of Agent deployments: one -# using the scraping service, and one using host filtering. Each Agent deployment -# monitors a unique set of correctness tools (Loki Canary, Tempo Vulture, -# Cortex text-exporter). These tools expose correctness metrics, generated by -# by querying the backends that the Grafana Agents are configured to send -# telemetry data to. -# -# Grafana and Prometheus are deployed to the cluster and are responsible for -# internal monitoring. Prometheus is configured with a set of alert rules that serve -# as test cases for the smoke tests; alerts generated within the span of the testing -# period are treated as failures. -# -# After the smoke-test duration period (defaulting to 3h), alerts will -# be checked, and the script will end. -# -# The k3d cluster is kept alive after the test for analysis. To clean up assets created -# by the script, re-run the script with the -d flag. - -set -euo pipefail - -# Constants -ROOT=$(git rev-parse --show-toplevel) -K3D_CLUSTER_NAME="agent-smoke-test" -SKIP_CREATE="" - -# Variables - -# Which function will be called -ENTRYPOINT="run" -TEST_DURATION="10800" -IMPORT_IMAGES="" - -while getopts "dt:ish" opt; do - case $opt in - d) ENTRYPOINT="cleanup" ;; - t) TEST_DURATION=$OPTARG ;; - i) IMPORT_IMAGES="yes" ;; - s) SKIP_CREATE="yes" ;; - h) - echo "Usage: $0 [-i] [-d] [-s] [-t ]" - exit 0 - ;; - *) - echo "Usage: $0 [-i] [-d] [-s] [-t ]" - exit 1 - ;; - esac -done - -# Run runs the smoke test for $TEST_DURATION. -run() { - if [[ -z "$SKIP_CREATE" ]]; then - echo "--- Creating k3d cluster $K3D_CLUSTER_NAME" - k3d cluster create $K3D_CLUSTER_NAME \ - --port 50080:80@loadbalancer \ - --api-port 50443 \ - --kubeconfig-update-default=true \ - --kubeconfig-switch-context=true \ - --wait >/dev/null - fi - - # Give the cluster a little bit of time to settle before - # applying the environment - echo "--- Waiting for cluster to warm up" - sleep 10 - - if [[ ! -z "$IMPORT_IMAGES" ]]; then - echo "--- Importing local images" - - k3d image import -c $K3D_CLUSTER_NAME \ - grafana/agent:main \ - grafana/agentctl:main \ - us.gcr.io/kubernetes-dev/grafana/agent-crow:main \ - us.gcr.io/kubernetes-dev/grafana/agent-smoke:main - fi - - (cd $ROOT/example/k3d && jb install) - tk apply $ROOT/example/k3d/smoke --dangerous-auto-approve - - # Immediately create a job to sync configs so our two Agent deployments - # are synced up as closely as possible. - kubectl --context=k3d-$K3D_CLUSTER_NAME --namespace=smoke \ - create job --from=cronjob/grafana-agent-syncer \ - grafana-agent-syncer-startup - - echo "Your environment is now running for the next $TEST_DURATION seconds." - echo "Grafana URL: http://grafana.k3d.localhost:50080" - echo "Prometheus URL: http://prometheus.k3d.localhost:50080" - echo "Check smoke test logs: " - echo " kubectl logs --namespace=smoke -f deployment/smoke-test" - sleep $TEST_DURATION - - kubectl scale -n smoke --replicas=0 deployment/smoke-test - - echo "Smoke tests complete!" - echo "Grafana URL: http://grafana.k3d.localhost:50080" - echo "Prometheus URL: http://prometheus.k3d.localhost:50080" - echo "" - echo "Getting results..." - - get_results -} - -get_results() { - NUM_ALERTS=$(curl -s -G \ - -H "Host: prometheus.k3d.localhost" \ - -d "query=count_over_time(ALERTS{alertstate=\"firing\"}[$TEST_DURATION])" \ - 'http://localhost:50080/api/v1/query' \ - | jq '.data.result | length' \ - ) - if test $NUM_ALERTS -ne 0; then - echo "FAIL: $NUM_ALERTS alerts found over the last $TEST_DURATION seconds." - echo "More information: http://prometheus.k3d.localhost:50080/graph?g0.expr=count_over_time(ALERTS{alertstate%3D%22firing%22}[$TEST_DURATION])" - - exit 1 - else - echo "PASS: 0 alerts found over the last $TEST_DURATION seconds. You're good to go!" - - exit 0 - fi -} - -cleanup() { - echo "--- Deleting k3d cluster $K3D_CLUSTER_NAME" - k3d cluster delete $K3D_CLUSTER_NAME >/dev/null -} - -$ENTRYPOINT diff --git a/example/k3d/smoke/main.jsonnet b/example/k3d/smoke/main.jsonnet deleted file mode 100644 index 553c554927e6..000000000000 --- a/example/k3d/smoke/main.jsonnet +++ /dev/null @@ -1,512 +0,0 @@ -local monitoring = import './monitoring/main.jsonnet'; -local cortex = import 'cortex/main.libsonnet'; -local canary = import 'github.com/grafana/loki/production/ksonnet/loki-canary/loki-canary.libsonnet'; -local vulture = import 'github.com/grafana/tempo/operations/jsonnet/microservices/vulture.libsonnet'; -local tempo = import 'github.com/grafana/tempo/operations/jsonnet/single-binary/tempo.libsonnet'; -local avalanche = import 'grafana-agent/smoke/avalanche/main.libsonnet'; -local crow = import 'grafana-agent/smoke/crow/main.libsonnet'; -local etcd = import 'grafana-agent/smoke/etcd/main.libsonnet'; -local smoke = import 'grafana-agent/smoke/main.libsonnet'; -local gragent = import 'grafana-agent/v2/main.libsonnet'; -local k = import 'ksonnet-util/kausal.libsonnet'; -local loki = import 'loki/main.libsonnet'; - -local namespace = k.core.v1.namespace; -local pvc = k.core.v1.persistentVolumeClaim; -local volumeMount = k.core.v1.volumeMount; -local containerPort = k.core.v1.containerPort; -local statefulset = k.apps.v1.statefulSet; -local service = k.core.v1.service; -local configMap = k.core.v1.configMap; -local deployment = k.apps.v1.deployment; -local daemonSet = k.apps.v1.daemonSet; - -local images = { - agent: 'grafana/agent:main', - agentctl: 'grafana/agentctl:main', -}; - -local new_crow(name, selector) = - crow.new(name, namespace='smoke', config={ - args+: { - 'crow.prometheus-addr': 'http://cortex/api/prom', - 'crow.extra-selectors': selector, - }, - }); - -local new_smoke(name) = smoke.new(name, namespace='smoke', config={ - mutationFrequency: '5m', - chaosFrequency: '30m', -}); - - -local smoke = { - ns: namespace.new('smoke'), - - cortex: cortex.new('smoke'), - - tempo: tempo { - _config+:: { - namespace: 'smoke', - tempo: { - port: 3200, - replicas: 1, - headless_service_name: 'localhost', - }, - pvc_size: '30Gi', - pvc_storage_class: 'local-path', - receivers: { - jaeger: { - protocols: { - thrift_http: null, - }, - }, - otlp: { - protocols: { - grpc: { - endpoint: '0.0.0.0:4317', - }, - }, - }, - }, - }, - tempo_config+: { - querier: { - frontend_worker: { - frontend_address: 'localhost:9095', - }, - }, - }, - tempo_statefulset+: - statefulset.mixin.metadata.withNamespace('smoke'), - tempo_service+: - service.mixin.metadata.withNamespace('smoke'), - tempo_headless_service+: - service.mixin.metadata.withNamespace('smoke'), - tempo_query_configmap+: - configMap.mixin.metadata.withNamespace('smoke'), - tempo_configmap+: - configMap.mixin.metadata.withNamespace('smoke'), - }, - - loki: loki.new(namespace='smoke'), - - // https://grafana.com/docs/loki/latest/operations/loki-canary/ - canary: canary { - loki_canary_args+:: { - addr: 'loki:80', - port: '80', - tls: false, - labelname: 'instance', - labelvalue: '$(POD_NAME)', - interval: '1s', - 'metric-test-interval': '30m', - 'metric-test-range': '2h', - size: 1024, - wait: '3m', - }, - _config+:: { - namespace: 'smoke', - }, - loki_canary_daemonset+: - daemonSet.mixin.metadata.withNamespace('smoke'), - }, - - // Needed to run agent cluster - etcd: etcd.new('smoke'), - - avalanche: avalanche.new(replicas=3, namespace='smoke', config={ - // We're going to be running a lot of these and we're not trying to test - // for load, so reduce the cardinality and churn rate. - metric_count: 1000, - series_interval: 300, - metric_interval: 600, - }), - - smoke_test: new_smoke('smoke-test'), - - crows: [ - new_crow('crow-single', 'cluster="grafana-agent"'), - new_crow('crow-cluster', 'cluster="grafana-agent-cluster"'), - ], - - vulture: vulture { - _images+:: { - tempo_vulture: 'grafana/tempo-vulture:latest', - }, - _config+:: { - vulture: { - replicas: 1, - tempoPushUrl: 'http://grafana-agent', - tempoQueryUrl: 'http://tempo:3200', - tempoOrgId: '', - tempoRetentionDuration: '336h', - tempoSearchBackoffDuration: '0s', - tempoReadBackoffDuration: '10s', - tempoWriteBackoffDuration: '10s', - }, - }, - tempo_vulture_deployment+: - deployment.mixin.metadata.withNamespace('smoke'), - }, - - local metric_instances(crow_name) = [{ - name: 'crow', - remote_write: [ - { - url: 'http://cortex/api/prom/push', - write_relabel_configs: [ - { - source_labels: ['__name__'], - regex: 'avalanche_.*', - action: 'drop', - }, - ], - }, - { - url: 'http://smoke-test:19090/api/prom/push', - write_relabel_configs: [ - { - source_labels: ['__name__'], - regex: 'avalanche_.*', - action: 'keep', - }, - ], - }, - ], - scrape_configs: [ - { - job_name: 'crow', - metrics_path: '/validate', - - kubernetes_sd_configs: [{ role: 'pod' }], - tls_config: { - ca_file: '/var/run/secrets/kubernetes.io/serviceaccount/ca.crt', - }, - bearer_token_file: '/var/run/secrets/kubernetes.io/serviceaccount/token', - - relabel_configs: [{ - source_labels: ['__meta_kubernetes_namespace'], - regex: 'smoke', - action: 'keep', - }, { - source_labels: ['__meta_kubernetes_pod_container_name'], - regex: crow_name, - action: 'keep', - }], - }, - ], - }, { - name: 'avalanche', - remote_write: [ - { - url: 'http://cortex/api/prom/push', - write_relabel_configs: [ - { - source_labels: ['__name__'], - regex: 'avalanche_.*', - action: 'drop', - }, - ], - }, - { - url: 'http://smoke-test:19090/api/prom/push', - write_relabel_configs: [ - { - source_labels: ['__name__'], - regex: 'avalanche_.*', - action: 'keep', - }, - ], - }, - ], - scrape_configs: [ - { - job_name: 'avalanche', - kubernetes_sd_configs: [{ role: 'pod' }], - tls_config: { - ca_file: '/var/run/secrets/kubernetes.io/serviceaccount/ca.crt', - }, - bearer_token_file: '/var/run/secrets/kubernetes.io/serviceaccount/token', - - relabel_configs: [{ - source_labels: ['__meta_kubernetes_namespace'], - regex: 'smoke', - action: 'keep', - }, { - source_labels: ['__meta_kubernetes_pod_container_name'], - regex: 'avalanche', - action: 'keep', - }], - }, - ], - }, { - name: 'vulture', - remote_write: [ - { - url: 'http://cortex/api/prom/push', - write_relabel_configs: [ - { - source_labels: ['__name__'], - regex: 'avalanche_.*', - action: 'drop', - }, - ], - }, - { - url: 'http://smoke-test:19090/api/prom/push', - write_relabel_configs: [ - { - source_labels: ['__name__'], - regex: 'avalanche_.*', - action: 'keep', - }, - ], - }, - ], - scrape_configs: [ - { - job_name: 'vulture', - kubernetes_sd_configs: [{ role: 'pod' }], - tls_config: { - ca_file: '/var/run/secrets/kubernetes.io/serviceaccount/ca.crt', - }, - bearer_token_file: '/var/run/secrets/kubernetes.io/serviceaccount/token', - - relabel_configs: [{ - source_labels: ['__meta_kubernetes_namespace'], - regex: 'smoke', - action: 'keep', - }, { - source_labels: ['__meta_kubernetes_pod_container_name'], - regex: 'avalanche', - action: 'keep', - }], - }, - ], - }, { - name: 'canary', - remote_write: [ - { - url: 'http://cortex/api/prom/push', - write_relabel_configs: [ - { - source_labels: ['__name__'], - regex: 'avalanche_.*', - action: 'drop', - }, - ], - }, - { - url: 'http://smoke-test:19090/api/prom/push', - write_relabel_configs: [ - { - source_labels: ['__name__'], - regex: 'avalanche_.*', - action: 'keep', - }, - ], - }, - ], - scrape_configs: [ - { - job_name: 'canary', - kubernetes_sd_configs: [{ role: 'pod' }], - tls_config: { - ca_file: '/var/run/secrets/kubernetes.io/serviceaccount/ca.crt', - }, - bearer_token_file: '/var/run/secrets/kubernetes.io/serviceaccount/token', - - relabel_configs: [ - { - source_labels: ['__meta_kubernetes_namespace'], - regex: 'smoke', - action: 'keep', - }, - { - source_labels: ['__meta_kubernetes_pod_container_name'], - regex: 'canary', - action: 'keep', - }, - ], - }, - ], - }], - - local logs_instances() = [{ - name: 'write-loki', - clients: [{ - url: 'http://loki/loki/api/v1/push', - basic_auth: { - username: '104334', - password: 'noauth', - }, - external_labels: { - cluster: 'grafana-agent', - }, - - }], - scrape_configs: [{ - job_name: 'write-canary-output', - kubernetes_sd_configs: [{ role: 'pod' }], - pipeline_stages: [ - { cri: {} }, - ], - relabel_configs: [ - { - source_labels: ['__meta_kubernetes_namespace'], - regex: 'smoke', - action: 'keep', - }, - { - source_labels: ['__meta_kubernetes_pod_container_name'], - regex: 'loki-canary', - action: 'keep', - }, - { - action: 'replace', - source_labels: ['__meta_kubernetes_pod_uid', '__meta_kubernetes_pod_container_name'], - target_label: '__path__', - separator: '/', - replacement: '/var/log/pods/*$1/*.log', - }, - { - action: 'replace', - source_labels: ['__meta_kubernetes_pod_name'], - target_label: 'pod', - }, - { - action: 'replace', - source_labels: ['__meta_kubernetes_pod_name'], - target_label: 'instance', - }, - ], - }], - }], - - normal_agent: - gragent.new(name='grafana-agent', namespace='smoke') + - gragent.withImagesMixin(images) + - gragent.withStatefulSetController( - replicas=1, - volumeClaims=[ - pvc.new() + - pvc.mixin.metadata.withName('agent-wal') + - pvc.mixin.metadata.withNamespace('smoke') + - pvc.mixin.spec.withAccessModes('ReadWriteOnce') + - pvc.mixin.spec.resources.withRequests({ storage: '5Gi' }), - ], - ) + - gragent.withVolumeMountsMixin([volumeMount.new('agent-wal', '/var/lib/agent')]) + - gragent.withService() + - gragent.withPortsMixin([ - containerPort.new('thrift-grpc', 14250) + containerPort.withProtocol('TCP'), - ]) + - gragent.withLogVolumeMounts() + - gragent.withAgentConfig({ - server: { log_level: 'debug' }, - logs: { - positions_directory: '/var/lib/agent/logs-positions', - configs: logs_instances(), - }, - - prometheus: { - global: { - scrape_interval: '1m', - external_labels: { - cluster: 'grafana-agent', - }, - }, - wal_directory: '/var/lib/agent/data', - configs: metric_instances('crow-single'), - }, - traces: { - configs: [ - { - name: 'vulture', - receivers: { - jaeger: { - protocols: { - grpc: null, - }, - }, - }, - remote_write: [ - { - endpoint: 'tempo:4317', - insecure: true, - }, - ], - batch: { - timeout: '5s', - send_batch_size: 100, - }, - }, - ], - }, - }), - - cluster_agent: - gragent.new(name='grafana-agent-cluster', namespace='smoke') + - gragent.withImagesMixin(images) + - gragent.withStatefulSetController( - replicas=3, - volumeClaims=[ - pvc.new() + - pvc.mixin.metadata.withName('agent-cluster-wal') + - pvc.mixin.metadata.withNamespace('smoke') + - pvc.mixin.spec.withAccessModes('ReadWriteOnce') + - pvc.mixin.spec.resources.withRequests({ storage: '5Gi' }), - ], - ) + - gragent.withVolumeMountsMixin([volumeMount.new('agent-cluster-wal', '/var/lib/agent')]) + - gragent.withService() + - gragent.withLogVolumeMounts() + - gragent.withAgentConfig({ - server: { log_level: 'debug' }, - - prometheus: { - global: { - scrape_interval: '1m', - external_labels: { - cluster: 'grafana-agent-cluster', - }, - }, - wal_directory: '/var/lib/agent/data', - - scraping_service: { - enabled: true, - dangerous_allow_reading_files: true, - kvstore: { - store: 'etcd', - etcd: { endpoints: ['etcd:2379'] }, - }, - lifecycler: { - ring: { - kvstore: { - store: 'etcd', - etcd: { endpoints: ['etcd:2379'] }, - }, - }, - }, - }, - }, - }), - - // Spawn a syncer so our cluster gets the same scrape jobs as our - // normal agent. - sycner: gragent.newSyncer( - name='grafana-agent-syncer', - namespace='smoke', - config={ - image: images.agentctl, - api: 'http://grafana-agent-cluster.smoke.svc.cluster.local', - configs: metric_instances('crow-cluster'), - } - ), -}; - -{ - monitoring: monitoring, - smoke: smoke, -} diff --git a/example/k3d/smoke/monitoring/main.jsonnet b/example/k3d/smoke/monitoring/main.jsonnet deleted file mode 100644 index 4e6a7738ec97..000000000000 --- a/example/k3d/smoke/monitoring/main.jsonnet +++ /dev/null @@ -1,61 +0,0 @@ -local grafana_mixins = import 'default/mixins.libsonnet'; -local datasource = import 'grafana/datasource.libsonnet'; -local grafana = import 'grafana/main.libsonnet'; -local k = import 'ksonnet-util/kausal.libsonnet'; -local kube_state_metrics = import 'kube-state-metrics/main.libsonnet'; -local node_exporter = import 'node-exporter/main.libsonnet'; -local prometheus = import 'prometheus/main.libsonnet'; - -local namespace = k.core.v1.namespace; -local ingress = k.networking.v1beta1.ingress; -local rule = k.networking.v1beta1.ingressRule; -local path = k.networking.v1beta1.httpIngressPath; - - -local prometheus_monitoring = import './prometheus_monitoring.libsonnet'; - -{ - ns: namespace.new('monitoring'), - - grafana: - grafana.new(namespace='monitoring') + - grafana.withDashboards(grafana_mixins.grafanaDashboards) + - grafana.withDataSources([ - datasource.new('Prometheus', 'http://prometheus.monitoring.svc.cluster.local:9090', default='true'), - datasource.new('Cortex', 'http://cortex.smoke.svc.cluster.local/api/prom'), - ]), - - prometheus: - prometheus.new(namespace='monitoring') + - prometheus.withConfigMixin(prometheus_monitoring.config) + - prometheus.withRulesMixin(prometheus_monitoring.rules), - - node_exporter: node_exporter.new(namespace='monitoring'), - kube_state_metrics: kube_state_metrics.new(namespace='monitoring'), - - ingresses: { - prometheus: - ingress.new('prometheus') + - ingress.mixin.metadata.withNamespace('monitoring') + - ingress.mixin.spec.withRules([ - rule.withHost('prometheus.k3d.localhost') + - rule.http.withPaths([ - path.withPath('/') + - path.backend.withServiceName('prometheus') + - path.backend.withServicePort(9090), - ]), - ]), - - grafana: - ingress.new('grafana') + - ingress.mixin.metadata.withNamespace('monitoring') + - ingress.mixin.spec.withRules([ - rule.withHost('grafana.k3d.localhost') + - rule.http.withPaths([ - path.withPath('/') + - path.backend.withServiceName('grafana') + - path.backend.withServicePort(80), - ]), - ]), - }, -} diff --git a/example/k3d/smoke/monitoring/prometheus_monitoring.libsonnet b/example/k3d/smoke/monitoring/prometheus_monitoring.libsonnet deleted file mode 100644 index 0f793508c2f7..000000000000 --- a/example/k3d/smoke/monitoring/prometheus_monitoring.libsonnet +++ /dev/null @@ -1,282 +0,0 @@ -local agent_prometheus = import 'grafana-agent/v1/lib/metrics.libsonnet'; - -{ - config: { - global: { - scrape_interval: '1m', - }, - scrape_configs: agent_prometheus.scrapeInstanceKubernetes.scrape_configs, - }, - - rules: { - groups: [ - { - name: 'GrafanaAgentChecks', - rules: [ - // Basic sanity checks: ensure that Agents exist, are up, - // and haven't been flapping. - { - alert: 'GrafanaAgentMissing', - expr: ||| - absent(up{ namespace="smoke", pod="grafana-agent-0" }) == 1 or - absent(up{ namespace="smoke", pod="grafana-agent-cluster-0" }) == 1 or - absent(up{ namespace="smoke", pod="grafana-agent-cluster-1" }) == 1 or - absent(up{ namespace="smoke", pod="grafana-agent-cluster-2" }) == 1 - |||, - 'for': '5m', - annotations: { - summary: '{{ $labels.pod }} is not running.', - }, - }, - { - alert: 'GrafanaAgentDown', - expr: ||| - up{ - namespace="smoke", - pod=~"grafana-agent-(0|cluster-0|cluster-1|cluster-2)", - } == 0 - |||, - 'for': '5m', - annotations: { - summary: '{{ $labels.job }} is down', - }, - }, - { - alert: 'GrafanaAgentFlapping', - expr: ||| - avg_over_time(up{ - namespace="smoke", - pod=~"grafana-agent-(0|cluster-0|cluster-1|cluster-2)", - }[5m]) < 1 - |||, - 'for': '15m', - annotations: { - summary: '{{ $labels.job }} is flapping', - }, - }, - - // Checks that the CPU usage doesn't go too high. This was generated - // from main where the CPU usage hovered around 2-3% per pod. - // - // TODO: something less guessworky here. - { - alert: 'GrafanaAgentCPUHigh', - expr: ||| - rate(container_cpu_usage_seconds_total{namespace="smoke", pod=~"grafana-agent-.*"}[1m]) > 0.05 - |||, - 'for': '5m', - annotations: { - summary: '{{ $labels.pod }} is using more than 5% CPU over the last 5 minutes', - }, - }, - - // We assume roughly ~8KB per series. Check that each deployment - // doesn't go too far above this. - // - // We aggregate the memory of the scraping service together since an individual - // node with a really small number of active series will throw this metric off. - { - alert: 'GrafanaAgentMemHigh', - expr: ||| - sum without (pod, instance) (go_memstats_heap_inuse_bytes{job=~"smoke/grafana-agent.*"}) / - sum without (pod, instance, instance_group_name) (agent_wal_storage_active_series{job=~"smoke/grafana-agent.*"}) / 1e3 > 10 - |||, - 'for': '5m', - annotations: { - summary: '{{ $labels.job }} has used more than 10KB per series for more than 5 minutes', - }, - }, - ], - }, - { - name: 'CrowChecks', - rules: [ - { - alert: 'CrowMissing', - expr: ||| - absent(up{container="crow-single"}) == 1 or - absent(up{container="crow-cluster"}) == 1 - |||, - 'for': '5m', - annotations: { - summary: '{{ $labels.container }} is not running.', - }, - }, - { - alert: 'CrowDown', - expr: ||| - up{job=~"smoke/crow-.*"} == 0 - |||, - 'for': '5m', - annotations: { - summary: 'Crow {{ $labels.job }} is down.', - }, - }, - { - alert: 'CrowFlapping', - expr: ||| - avg_over_time(up{job=~"smoke/crow-.*"}[5m]) < 1 - |||, - 'for': '15m', - annotations: { - summary: 'Crow {{ $labels.job }} is flapping.', - }, - }, - { - alert: 'CrowNotScraped', - expr: ||| - rate(crow_test_samples_total[1m]) == 0 - |||, - 'for': '5m', - annotations: { - summary: 'Crow {{ $labels.job }} is not being scraped.', - }, - }, - { - alert: 'CrowFailures', - expr: ||| - ( - rate(crow_test_sample_results_total{result="success"}[1m]) - / ignoring(result) rate(crow_test_samples_total[1m]) - ) < 1 - |||, - 'for': '5m', - annotations: { - summary: 'Crow {{ $labels.job }} has had failures for at least 5m', - }, - }, - ], - }, - { - name: 'VultureChecks', - rules: [ - { - alert: 'VultureMissing', - expr: ||| - absent(up{container="vulture"}) == 1 - |||, - 'for': '5m', - annotations: { - summary: '{{ $labels.container }} is not running.', - }, - }, - { - alert: 'VultureDown', - expr: ||| - up{job=~"smoke/vulture"} == 0 - |||, - 'for': '5m', - annotations: { - summary: 'Vulture {{ $labels.job }} is down.', - }, - }, - { - alert: 'VultureFlapping', - expr: ||| - avg_over_time(up{job=~"smoke/vulture"}[5m]) < 1 - |||, - 'for': '15m', - annotations: { - summary: 'Vulture {{ $labels.job }} is flapping.', - }, - }, - { - alert: 'VultureNotScraped', - expr: ||| - rate(tempo_vulture_trace_total[1m]) == 0 - |||, - 'for': '5m', - annotations: { - summary: 'Vulture {{ $labels.job }} is not being scraped.', - }, - }, - { - alert: 'VultureFailures', - expr: ||| - (rate(tempo_vulture_error_total[5m]) / rate(tempo_vulture_trace_total[5m])) > 0.3 - |||, - 'for': '5m', - annotations: { - summary: 'Vulture {{ $labels.job }} has had failures for at least 5m', - }, - }, - ], - }, - { - name: 'CanaryChecks', - rules: [ - { - alert: 'CanaryMissing', - expr: ||| - absent(up{container="loki-canary"}) == 1 - |||, - 'for': '5m', - annotations: { - summary: '{{ $labels.container }} is not running.', - }, - }, - { - alert: 'CanaryDown', - expr: ||| - up{job=~"smoke/loki-canary"} == 0 - |||, - 'for': '5m', - annotations: { - summary: ' Canary is down.', - }, - }, - { - alert: 'CanaryNotScraped', - expr: ||| - rate(loki_canary_entries_total[1m]) == 0 - |||, - 'for': '5m', - annotations: { - summary: 'Canary is not being scraped.', - }, - }, - { - alert: 'CanaryMissingEntries', - expr: ||| - (rate(loki_canary_missing_entries_total[2m])) > 0 - |||, - 'for': '2m', - annotations: { - summary: 'Canary has had missing entries for at least 2m', - }, - }, - { - alert: 'CanarySpotChecksMissingEntries', - expr: ||| - (rate(loki_canary_spot_check_missing_entries_total[2m])) > 0 - |||, - 'for': '2m', - annotations: { - summary: 'Canary has had missing spot check entries for at least 2m', - }, - }, - { - alert: 'CanaryWebsocketMissingEntries', - expr: ||| - (rate(loki_canary_websocket_missing_entries_total[2m])) > 0 - |||, - 'for': '2m', - annotations: { - summary: 'Canary has had missing websocket entries for at least 2m', - }, - }, - { - alert: 'CanaryUnexpectedEntries', - expr: ||| - (rate(loki_canary_unexpected_entries_total[2m])) > 0 - |||, - 'for': '2m', - annotations: { - summary: 'Canary has had unexpected entries for at least 2m', - }, - }, - ], - }, - ], - }, -} diff --git a/example/k3d/smoke/spec.json b/example/k3d/smoke/spec.json deleted file mode 100644 index 8c90817a7a99..000000000000 --- a/example/k3d/smoke/spec.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "apiVersion": "tanka.dev/v1alpha1", - "kind": "Environment", - "metadata": { - "name": "default" - }, - "spec": { - "apiServer": "https://0.0.0.0:50443", - "namespace": "default" - } -} diff --git a/go.mod b/go.mod index 4614e9124d44..8da98c5aad41 100644 --- a/go.mod +++ b/go.mod @@ -13,14 +13,13 @@ require ( github.com/PuerkitoBio/rehttp v1.1.0 github.com/alecthomas/kingpin/v2 v2.4.0 github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 - github.com/aws/aws-sdk-go v1.45.24 - github.com/aws/aws-sdk-go-v2 v1.21.1 - github.com/aws/aws-sdk-go-v2/config v1.18.44 + github.com/aws/aws-sdk-go v1.45.25 + github.com/aws/aws-sdk-go-v2 v1.24.0 + github.com/aws/aws-sdk-go-v2/config v1.26.2 github.com/aws/aws-sdk-go-v2/service/s3 v1.34.1 github.com/bmatcuk/doublestar v1.3.4 - github.com/bufbuild/connect-go v1.10.0 github.com/buger/jsonparser v1.1.1 - github.com/burningalchemist/sql_exporter v0.0.0-20221222155641-2ff59aa75200 + github.com/burningalchemist/sql_exporter v0.0.0-20240103092044-466b38b6abc4 github.com/cespare/xxhash/v2 v2.2.0 github.com/cilium/ebpf v0.12.3 // indirect github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf @@ -34,10 +33,10 @@ require ( github.com/fortytw2/leaktest v1.3.0 github.com/fsnotify/fsnotify v1.6.0 github.com/github/smimesign v0.2.0 - github.com/go-git/go-git/v5 v5.4.2 + github.com/go-git/go-git/v5 v5.11.0 github.com/go-kit/log v0.2.1 github.com/go-logfmt/logfmt v0.6.0 - github.com/go-logr/logr v1.3.0 + github.com/go-logr/logr v1.4.1 github.com/go-sourcemap/sourcemap v2.1.3+incompatible github.com/go-sql-driver/mysql v1.7.1 github.com/gogo/protobuf v1.3.2 @@ -57,9 +56,9 @@ require ( github.com/grafana/go-gelf/v2 v2.0.1 // Loki main commit where the Prometheus dependency matches ours. TODO(@tpaschalis) Update to kXYZ branch once it's available github.com/grafana/loki v1.6.2-0.20231004111112-07cbef92268a - github.com/grafana/pyroscope-go/godeltaprof v0.1.3 - github.com/grafana/pyroscope/api v0.2.0 - github.com/grafana/pyroscope/ebpf v0.4.0 + github.com/grafana/pyroscope-go/godeltaprof v0.1.6 + github.com/grafana/pyroscope/api v0.4.0 + github.com/grafana/pyroscope/ebpf v0.4.1 github.com/grafana/regexp v0.0.0-20221123153739-15dc172cd2db github.com/grafana/river v0.3.0 github.com/grafana/snowflake-prometheus-exporter v0.0.0-20221213150626-862cad8e9538 @@ -68,7 +67,7 @@ require ( github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 github.com/hashicorp/consul/api v1.25.1 github.com/hashicorp/go-cleanhttp v0.5.2 - github.com/hashicorp/go-discover v0.0.0-20220105235006-b95dfa40aaed + github.com/hashicorp/go-discover v0.0.0-20230724184603-e89ebd1b2f65 github.com/hashicorp/go-multierror v1.1.1 github.com/hashicorp/golang-lru v1.0.2 github.com/hashicorp/golang-lru/v2 v2.0.5 @@ -87,14 +86,14 @@ require ( github.com/jmespath/go-jmespath v0.4.0 github.com/json-iterator/go v1.1.12 github.com/klauspost/compress v1.17.3 - github.com/lib/pq v1.10.7 + github.com/lib/pq v1.10.9 github.com/mackerelio/go-osstat v0.2.3 - github.com/miekg/dns v1.1.55 + github.com/miekg/dns v1.1.56 github.com/mitchellh/mapstructure v1.5.1-0.20220423185008-bf980b35cac4 github.com/mitchellh/reflectwalk v1.0.2 github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f github.com/ncabatoff/process-exporter v0.7.10 - github.com/nerdswords/yet-another-cloudwatch-exporter v0.54.0 + github.com/nerdswords/yet-another-cloudwatch-exporter v0.55.0 github.com/ohler55/ojg v1.20.0 // indirect github.com/oklog/run v1.1.0 github.com/olekukonko/tablewriter v0.0.5 @@ -126,7 +125,7 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver v0.87.0 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.87.0 github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e - github.com/opentracing-contrib/go-stdlib v1.0.0 + github.com/opentracing-contrib/go-stdlib v1.0.0 // indirect github.com/opentracing/opentracing-go v1.2.0 github.com/ory/dockertest/v3 v3.8.1 github.com/oschwald/geoip2-golang v1.9.0 @@ -142,7 +141,7 @@ require ( github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.66.0 github.com/prometheus-operator/prometheus-operator/pkg/client v0.66.0 github.com/prometheus/blackbox_exporter v0.24.1-0.20230623125439-bd22efa1c900 - github.com/prometheus/client_golang v1.17.0 + github.com/prometheus/client_golang v1.18.0 github.com/prometheus/client_model v0.5.0 github.com/prometheus/common v0.45.0 github.com/prometheus/consul_exporter v0.8.0 @@ -150,7 +149,7 @@ require ( github.com/prometheus/mysqld_exporter v0.14.0 github.com/prometheus/node_exporter v1.6.0 github.com/prometheus/procfs v0.12.0 - github.com/prometheus/prometheus v1.99.0 + github.com/prometheus/prometheus v0.48.1 github.com/prometheus/snmp_exporter v0.24.1 github.com/prometheus/statsd_exporter v0.22.8 github.com/richardartoul/molecule v1.0.1-0.20221107223329-32cfee06a052 @@ -212,16 +211,16 @@ require ( go.uber.org/goleak v1.2.1 go.uber.org/multierr v1.11.0 go.uber.org/zap v1.26.0 - golang.org/x/crypto v0.15.0 + golang.org/x/crypto v0.17.0 golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa - golang.org/x/net v0.18.0 + golang.org/x/net v0.19.0 golang.org/x/oauth2 v0.13.0 - golang.org/x/sys v0.14.1-0.20231108175955-e4099bfacb8c + golang.org/x/sys v0.15.0 golang.org/x/text v0.14.0 golang.org/x/time v0.3.0 - google.golang.org/api v0.146.0 + google.golang.org/api v0.149.0 google.golang.org/grpc v1.59.0 - google.golang.org/protobuf v1.31.0 + google.golang.org/protobuf v1.32.0 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 gotest.tools v2.2.0+incompatible @@ -236,10 +235,10 @@ require ( ) require ( - cloud.google.com/go v0.110.8 // indirect - cloud.google.com/go/compute v1.23.0 // indirect + cloud.google.com/go v0.110.10 // indirect + cloud.google.com/go/compute v1.23.3 // indirect cloud.google.com/go/compute/metadata v0.2.4-0.20230617002413-005d2dfb6b68 // indirect - cloud.google.com/go/iam v1.1.2 // indirect + cloud.google.com/go/iam v1.1.5 // indirect contrib.go.opencensus.io/exporter/prometheus v0.4.2 // indirect github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 // indirect github.com/99designs/keyring v1.2.2 // indirect @@ -271,8 +270,7 @@ require ( github.com/Microsoft/go-winio v0.6.1 // indirect github.com/Microsoft/hcsshim v0.11.4 // indirect github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect - github.com/ProtonMail/go-crypto v0.0.0-20210920160938-87db9fbc61c7 // indirect - github.com/acomagu/bufpipe v1.0.3 // indirect + github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 // indirect github.com/alecthomas/participle/v2 v2.1.0 // indirect github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 // indirect github.com/andybalholm/brotli v1.0.5 // indirect @@ -283,21 +281,21 @@ require ( github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/avvmoto/buf-readerat v0.0.0-20171115124131-a17c8cb89270 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.13.42 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.12 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.16.13 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.10 // indirect github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.69 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.42 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.36 // indirect - github.com/aws/aws-sdk-go-v2/internal/ini v1.3.44 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.9 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.9 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.7.2 // indirect github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.26 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.11 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 // indirect github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.29 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.36 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.9 // indirect github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.14.3 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.15.1 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.17.2 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.23.1 // indirect - github.com/aws/smithy-go v1.15.0 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.18.5 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.5 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.26.6 // indirect + github.com/aws/smithy-go v1.19.0 // indirect github.com/beevik/ntp v1.3.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver v3.5.2-0.20180723201105-3c1074078d32+incompatible // indirect @@ -313,7 +311,7 @@ require ( github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 // indirect github.com/containerd/cgroups v1.1.0 // indirect github.com/containerd/console v1.0.3 // indirect - github.com/containerd/containerd v1.7.6 // indirect + github.com/containerd/containerd v1.7.11 // indirect github.com/containerd/continuity v0.4.2 // indirect github.com/containerd/ttrpc v1.2.2 // indirect github.com/coreos/go-semver v0.3.1 // indirect @@ -326,13 +324,13 @@ require ( github.com/dennwc/varint v1.0.0 // indirect github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect - github.com/digitalocean/godo v1.99.0 // indirect + github.com/digitalocean/godo v1.104.1 // indirect github.com/dimchansky/utfbom v1.1.1 github.com/docker/cli v23.0.3+incompatible // indirect github.com/docker/distribution v2.8.2+incompatible // indirect github.com/docker/go-units v0.5.0 // indirect github.com/dustin/go-humanize v1.0.1 // indirect - github.com/dvsekhvalnov/jose2go v1.5.0 // indirect + github.com/dvsekhvalnov/jose2go v1.6.0 // indirect github.com/eapache/go-resiliency v1.4.0 // indirect github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 // indirect github.com/eapache/queue v1.1.0 // indirect @@ -342,11 +340,10 @@ require ( github.com/elastic/go-windows v1.0.1 // indirect github.com/ema/qdisc v1.0.0 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect - github.com/emirpasic/gods v1.12.0 // indirect + github.com/emirpasic/gods v1.18.1 // indirect github.com/envoyproxy/go-control-plane v0.11.1 // indirect github.com/envoyproxy/protoc-gen-validate v1.0.2 // indirect github.com/euank/go-kmsg-parser v2.0.0+incompatible // indirect - github.com/evanphx/json-patch v5.6.0+incompatible // indirect github.com/evanphx/json-patch/v5 v5.6.0 // indirect github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb // indirect github.com/fatih/camelcase v1.0.0 // indirect @@ -354,8 +351,8 @@ require ( github.com/felixge/httpsnoop v1.0.3 // indirect github.com/form3tech-oss/jwt-go v3.2.5+incompatible // indirect github.com/gabriel-vasile/mimetype v1.4.2 // indirect - github.com/go-git/gcfg v1.5.0 // indirect - github.com/go-git/go-billy/v5 v5.3.1 // indirect + github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect + github.com/go-git/go-billy/v5 v5.5.0 // indirect github.com/go-kit/kit v0.13.0 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.3.0 // indirect @@ -379,7 +376,7 @@ require ( github.com/gogo/googleapis v1.4.1 // indirect github.com/gogo/status v1.1.1 // indirect github.com/golang-jwt/jwt/v4 v4.5.0 // indirect - github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe // indirect + github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 // indirect github.com/golang-sql/sqlexp v0.1.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/gomodule/redigo v1.8.9 // indirect @@ -389,13 +386,13 @@ require ( github.com/google/gofuzz v1.2.0 // indirect github.com/google/s2a-go v0.1.7 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.3.1 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect github.com/googleapis/gax-go/v2 v2.12.0 // indirect - github.com/gophercloud/gophercloud v1.5.0 // indirect + github.com/gophercloud/gophercloud v1.7.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/gosnmp/gosnmp v1.36.0 // indirect github.com/grafana/gomemcache v0.0.0-20230316202710-a081dae0aba9 // indirect - github.com/grafana/loki/pkg/push v0.0.0-20230904153656-e4cc2a4f5ec8 // k166 branch + github.com/grafana/loki/pkg/push v0.0.0-20231212100434-384e5c2dc872 // k180 branch github.com/grobie/gomemcache v0.0.0-20230213081705-239240bbc445 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.0 // indirect github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c // indirect @@ -415,7 +412,7 @@ require ( github.com/hashicorp/hcl v1.0.0 // indirect github.com/hashicorp/mdns v1.0.4 // indirect github.com/hashicorp/memberlist v0.5.0 // indirect - github.com/hashicorp/nomad/api v0.0.0-20230718173136-3a687930bd3e // indirect + github.com/hashicorp/nomad/api v0.0.0-20230721134942-515895c7690c // indirect github.com/hashicorp/serf v0.10.1 // indirect github.com/hashicorp/vic v1.5.1-0.20190403131502-bbfe86ec9443 // indirect github.com/hodgesds/perf-utils v0.7.0 // indirect @@ -426,15 +423,15 @@ require ( github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/infinityworks/go-common v0.0.0-20170820165359-7f20a140fd37 // indirect github.com/influxdata/telegraf v1.16.3 // indirect - github.com/ionos-cloud/sdk-go/v6 v6.1.8 // indirect + github.com/ionos-cloud/sdk-go/v6 v6.1.9 // indirect github.com/jackc/chunkreader/v2 v2.0.1 // indirect - github.com/jackc/pgconn v1.13.0 // indirect + github.com/jackc/pgconn v1.14.0 // indirect github.com/jackc/pgio v1.0.0 // indirect github.com/jackc/pgpassfile v1.0.0 // indirect - github.com/jackc/pgproto3/v2 v2.3.1 // indirect - github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b // indirect - github.com/jackc/pgtype v1.12.0 // indirect - github.com/jackc/pgx/v4 v4.17.2 // indirect + github.com/jackc/pgproto3/v2 v2.3.2 // indirect + github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect + github.com/jackc/pgtype v1.14.0 // indirect + github.com/jackc/pgx/v4 v4.18.1 // indirect github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect github.com/jcmturner/aescts/v2 v2.0.0 // indirect github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect @@ -448,7 +445,7 @@ require ( github.com/jpillora/backoff v1.0.0 // indirect github.com/jsimonetti/rtnetlink v1.3.5 // indirect github.com/karrick/godirwalk v1.17.0 // indirect - github.com/kevinburke/ssh_config v1.1.0 // indirect + github.com/kevinburke/ssh_config v1.2.0 // indirect github.com/klauspost/asmfmt v1.3.2 // indirect github.com/klauspost/cpuid/v2 v2.2.5 // indirect github.com/knadh/koanf v1.5.0 // indirect @@ -456,7 +453,7 @@ require ( github.com/krallistic/kazoo-go v0.0.0-20170526135507-a15279744f4e // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/leodido/ragel-machinery v0.0.0-20181214104525-299bdde78165 // indirect - github.com/linode/linodego v1.19.0 // indirect + github.com/linode/linodego v1.23.0 // indirect github.com/lufia/iostat v1.2.1 // indirect github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c // indirect github.com/magiconair/properties v1.8.7 // indirect @@ -470,7 +467,7 @@ require ( github.com/mdlayher/netlink v1.7.2 // indirect github.com/mdlayher/socket v0.4.1 // indirect github.com/mdlayher/wifi v0.1.0 // indirect - github.com/microsoft/go-mssqldb v0.19.0 // indirect + github.com/microsoft/go-mssqldb v1.6.0 // indirect github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect @@ -506,7 +503,7 @@ require ( github.com/opencontainers/selinux v1.11.0 // indirect github.com/openzipkin/zipkin-go v0.4.2 // indirect github.com/oschwald/maxminddb-golang v1.11.0 - github.com/ovh/go-ovh v1.4.1 // indirect + github.com/ovh/go-ovh v1.4.3 // indirect github.com/packethost/packngo v0.1.1-0.20180711074735-b9cb5096f54c // indirect github.com/patrickmn/go-cache v2.1.0+incompatible // indirect github.com/pelletier/go-toml/v2 v2.0.8 // indirect @@ -516,7 +513,7 @@ require ( github.com/prometheus-community/go-runit v0.1.0 // indirect github.com/prometheus/alertmanager v0.26.0 // indirect github.com/prometheus/common/sigv4 v0.1.0 - github.com/prometheus/exporter-toolkit v0.10.1-0.20230714054209-2f4150c63f97 // indirect + github.com/prometheus/exporter-toolkit v0.11.0 // indirect github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect github.com/remeh/sizedwaitgroup v1.0.0 // indirect github.com/renier/xmlrpc v0.0.0-20170708154548-ce4a1a486c03 // indirect @@ -525,7 +522,7 @@ require ( github.com/safchain/ethtool v0.3.0 // indirect github.com/samber/lo v1.38.1 // indirect github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da // indirect - github.com/scaleway/scaleway-sdk-go v1.0.0-beta.20 + github.com/scaleway/scaleway-sdk-go v1.0.0-beta.21 github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646 // indirect github.com/sergi/go-diff v1.2.0 // indirect @@ -533,7 +530,7 @@ require ( github.com/shopspring/decimal v1.2.0 // indirect github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c // indirect github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546 // indirect - github.com/snowflakedb/gosnowflake v1.6.22 // indirect + github.com/snowflakedb/gosnowflake v1.7.2-0.20240103203018-f1d625f17408 // indirect github.com/softlayer/softlayer-go v0.0.0-20180806151055-260589d94c7d // indirect github.com/soheilhy/cmux v0.1.5 // indirect github.com/spf13/afero v1.9.5 // indirect @@ -551,14 +548,14 @@ require ( github.com/tklauser/numcpus v0.6.1 // indirect github.com/tomnomnom/linkheader v0.0.0-20180905144013-02ca5825eb80 // indirect github.com/uber/jaeger-lib v2.4.1+incompatible // indirect - github.com/vertica/vertica-sql-go v1.3.0 // indirect + github.com/vertica/vertica-sql-go v1.3.3 // indirect github.com/vishvananda/netlink v1.2.1-beta.2 // indirect github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f // indirect github.com/vmware/govmomi v0.32.0 // indirect github.com/vultr/govultr/v2 v2.17.2 // indirect github.com/willf/bitset v1.1.11 // indirect github.com/willf/bloom v2.0.3+incompatible // indirect - github.com/xanzy/ssh-agent v0.3.1 // indirect + github.com/xanzy/ssh-agent v0.3.3 // indirect github.com/xdg-go/pbkdf2 v1.0.0 // indirect github.com/xdg-go/stringprep v1.0.4 // indirect github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c // indirect @@ -567,7 +564,7 @@ require ( github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xeipuuv/gojsonschema v1.2.0 // indirect github.com/xhit/go-str2duration/v2 v2.1.0 // indirect - github.com/xo/dburl v0.13.0 // indirect + github.com/xo/dburl v0.20.0 // indirect github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d // indirect github.com/yusufpapurcu/wmi v1.2.3 // indirect go.etcd.io/etcd/api/v3 v3.5.9 // indirect @@ -583,15 +580,15 @@ require ( go4.org/netipx v0.0.0-20230125063823-8449b0a6169f // indirect golang.org/x/mod v0.14.0 // indirect golang.org/x/sync v0.5.0 // indirect - golang.org/x/term v0.14.0 // indirect + golang.org/x/term v0.15.0 // indirect golang.org/x/tools v0.15.0 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect gonum.org/v1/gonum v0.14.0 // indirect google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto v0.0.0-20231012201019-e917dd12ba7a // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20231016165738-49dd2c1f3d0b // indirect + google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f // indirect gopkg.in/alecthomas/kingpin.v2 v2.2.6 // indirect gopkg.in/fsnotify/fsnotify.v1 v1.4.7 // indirect gopkg.in/inf.v0 v0.9.1 // indirect @@ -607,27 +604,32 @@ require ( require github.com/ianlancetaylor/demangle v0.0.0-20230524184225-eabc099b10ab require ( + connectrpc.com/connect v1.14.0 github.com/githubexporter/github-exporter v0.0.0-20231025122338-656e7dc33fe7 github.com/natefinch/atomic v1.0.1 github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.87.0 github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.87.0 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.87.0 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/vcenterreceiver v0.87.0 - github.com/prometheus/tsdb v0.10.0 go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.42.0 k8s.io/apimachinery v0.28.3 ) require ( dario.cat/mergo v1.0.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4 v4.2.1 // indirect + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v2 v2.2.1 // indirect github.com/Shopify/sarama v1.38.1 // indirect github.com/Workiva/go-datastructures v1.1.0 // indirect + github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.26.0 // indirect github.com/channelmeter/iso8601duration v0.0.0-20150204201828-8da3af7a2a61 // indirect + github.com/cloudflare/circl v1.3.7 // indirect + github.com/containerd/log v0.1.0 // indirect github.com/drone/envsubst v1.0.3 // indirect - github.com/go-jose/go-jose/v3 v3.0.0 // indirect + github.com/go-jose/go-jose/v3 v3.0.1 // indirect github.com/golang-jwt/jwt/v5 v5.0.0 // indirect github.com/google/gnostic-models v0.6.8 // indirect - github.com/hetznercloud/hcloud-go/v2 v2.0.0 // indirect + github.com/hetznercloud/hcloud-go/v2 v2.4.0 // indirect github.com/julienschmidt/httprouter v1.3.0 // indirect github.com/knadh/koanf/v2 v2.0.1 // indirect github.com/lightstep/go-expohisto v1.0.0 // indirect @@ -641,8 +643,10 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.87.0 // indirect github.com/openshift/api v3.9.0+incompatible // indirect github.com/openshift/client-go v0.0.0-20210521082421-73d9475a9142 // indirect + github.com/pjbgf/sha1cd v0.3.0 // indirect github.com/prometheus-community/prom-label-proxy v0.6.0 // indirect github.com/sercand/kuberesolver/v4 v4.0.0 // indirect + github.com/skeema/knownhosts v1.2.1 // indirect github.com/sony/gobreaker v0.5.0 // indirect github.com/tidwall/gjson v1.10.2 // indirect github.com/tidwall/match v1.1.1 // indirect @@ -676,13 +680,13 @@ replace ( k8s.io/klog/v2 => github.com/simonpasquier/klog-gokit/v3 v3.3.0 ) -// TODO(tpaschalis): remove replace directive once: +// TODO(marctc): remove replace directive once: // // * There is a release of Prometheus which contains -// prometheus/prometheus#12677 and prometheus/prometheus#12729. +// prometheus/prometheus#13002 // We use the last v1-related tag as the replace statement does not work for v2 // tags without the v2 suffix to the module root. -replace github.com/prometheus/prometheus => github.com/grafana/prometheus v1.8.2-0.20231016083943-46550094220d // grafana:prometheus:v0.47.2-retry-improvements +replace github.com/prometheus/prometheus => github.com/grafana/prometheus v1.8.2-0.20240105105355-3e2c486167d2 // grafana/prometheus@drop-old-inmemory-samples-squashed-2 replace gopkg.in/yaml.v2 => github.com/rfratto/go-yaml v0.0.0-20211119180816-77389c3526dc @@ -707,10 +711,6 @@ replace ( // https://github.com/grafana/cadvisor/tree/grafana-v0.47-noglobals github.com/google/cadvisor => github.com/grafana/cadvisor v0.0.0-20231110094609-5f7917925dea - // TODO(mattdurham): this is so you can debug on windows, when PR is merged into perflib, can you use that - // and eventually remove if windows_exporter shifts to it. https://github.com/leoluk/perflib_exporter/pull/43 - github.com/leoluk/perflib_exporter => github.com/grafana/perflib_exporter v0.1.1-0.20230511173423-6166026bd090 - github.com/prometheus-community/postgres_exporter => github.com/grafana/postgres_exporter v0.8.1-0.20210722175051-db35d7c2f520 // TODO(marctc): remove once this PR is merged upstream: https://github.com/prometheus/mysqld_exporter/pull/774 @@ -720,9 +720,6 @@ replace ( github.com/prometheus/node_exporter => github.com/grafana/node_exporter v0.18.1-grafana-r01.0.20231004161416-702318429731 ) -// Excluding fixes a conflict in test packages and allows "go mod tidy" to run. -exclude google.golang.org/grpc/examples v0.0.0-20200728065043-dfc0c05b2da9 - // Replacing for an internal fork which allows us to observe metrics produced by the Collector. // This is a temporary solution while a new configuration design is discussed for the collector. Related issues: // https://github.com/open-telemetry/opentelemetry-collector/issues/7532 @@ -745,7 +742,3 @@ exclude ( ) replace github.com/github/smimesign => github.com/grafana/smimesign v0.2.1-0.20220408144937-2a5adf3481d3 - -// This is the last version that used slices.Func with a bool return -// If we upgrade to a newer one then since the signature changed loki will complain. -replace golang.org/x/exp => golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 diff --git a/go.sum b/go.sum index fed259a01614..3223f47d7f2b 100644 --- a/go.sum +++ b/go.sum @@ -28,24 +28,24 @@ cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aD cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= -cloud.google.com/go v0.110.8 h1:tyNdfIxjzaWctIiLYOTalaLKZ17SI44SKFW26QbOhME= -cloud.google.com/go v0.110.8/go.mod h1:Iz8AkXJf1qmxC3Oxoep8R1T36w8B92yU29PcBhHO5fk= +cloud.google.com/go v0.110.10 h1:LXy9GEO+timppncPIAZoOj3l58LIU9k+kn48AN7IO3Y= +cloud.google.com/go v0.110.10/go.mod h1:v1OoFqYxiBkUrruItNM3eT4lLByNjxmJSV/xDKJNnic= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY= -cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= +cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk= +cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI= cloud.google.com/go/compute/metadata v0.2.4-0.20230617002413-005d2dfb6b68 h1:aRVqY1p2IJaBGStWMsQMpkAa83cPkCDLl80eOj0Rbz4= cloud.google.com/go/compute/metadata v0.2.4-0.20230617002413-005d2dfb6b68/go.mod h1:1a3eRNYX12fs5UABBIXS8HXVvQbX9hRB/RkEBPORpe8= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/iam v1.1.2 h1:gacbrBdWcoVmGLozRuStX45YKvJtzIjJdAolzUs1sm4= -cloud.google.com/go/iam v1.1.2/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU= -cloud.google.com/go/kms v1.15.2 h1:lh6qra6oC4AyWe5fUUUBe/S27k12OHAleOOOw6KakdE= -cloud.google.com/go/kms v1.15.2/go.mod h1:3hopT4+7ooWRCjc2DxgnpESFxhIraaI2IpAVUEhbT/w= +cloud.google.com/go/iam v1.1.5 h1:1jTsCu4bcsNsE4iiqNT5SHwrDRCfRmIaaaVFhRveTJI= +cloud.google.com/go/iam v1.1.5/go.mod h1:rB6P/Ic3mykPbFio+vo7403drjlgvoWfYpJhMXEbzv8= +cloud.google.com/go/kms v1.15.5 h1:pj1sRfut2eRbD9pFRjNnPNg/CzJPuQAzUujMIM1vVeM= +cloud.google.com/go/kms v1.15.5/go.mod h1:cU2H5jnp6G2TDpUGZyqTCoy1n16fbubHZjmVXSMtwDI= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= @@ -60,10 +60,13 @@ cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9 cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= code.cloudfoundry.org/clock v1.0.0/go.mod h1:QD9Lzhd/ux6eNQVUDVRJX/RKTigpewimNYBi7ivZKY8= collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= +connectrpc.com/connect v1.14.0 h1:PDS+J7uoz5Oui2VEOMcfz6Qft7opQM9hPiKvtGC01pA= +connectrpc.com/connect v1.14.0/go.mod h1:uoAq5bmhhn43TwhaKdGKN/bZcGtzPW1v+ngDTn5u+8s= contrib.go.opencensus.io/exporter/prometheus v0.4.2 h1:sqfsYl5GIY/L570iT+l93ehxaWJs2/OwXtiWwew3oAg= contrib.go.opencensus.io/exporter/prometheus v0.4.2/go.mod h1:dvEHbiKmgvbr5pjaF9fpw1KeYcjrnC1J8B+JKjsZyRQ= dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 h1:/vQbFIOMbk2FiG/kXiLl8BRyzTWDw7gX/Hz7Dd5eDMs= github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4/go.mod h1:hN7oaIRCjzsZ2dE+yG5k+rsdt3qcwykqK6HVGcKwsw4= github.com/99designs/keyring v1.2.2 h1:pZd3neh/EmUzWONb35LxQfvuY7kiSXAq3HQd97+XBn0= @@ -79,22 +82,24 @@ github.com/Azure/azure-pipeline-go v0.1.9/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9a github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= github.com/Azure/azure-sdk-for-go v36.2.0+incompatible h1:09cv2WoH0g6jl6m2iT+R9qcIPZKhXEL0sbmLhxP895s= github.com/Azure/azure-sdk-for-go v36.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.0.0/go.mod h1:uGG2W01BaETf0Ozp+QxxKJdMBNRWPdstHG0Fmdwn1/U= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.1.2/go.mod h1:uGG2W01BaETf0Ozp+QxxKJdMBNRWPdstHG0Fmdwn1/U= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0-beta.1 h1:ODs3brnqQM99Tq1PffODpAViYv3Bf8zOg464MU7p5ew= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0-beta.1/go.mod h1:3Ug6Qzto9anB6mGlEdgYMDF5zHQ+wwhEaYR4s17PHMw= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0/go.mod h1:bhXu1AjYL+wutSL/kpSq6s7733q2Rb0yuot9Zgfqa/0= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 h1:BMAjVKJM0U/CYF27gA0ZMmXGkOcvfFtD0oHVZ1TIPRI= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0/go.mod h1:1fXstnBMas5kzG+S3q8UoJcmyU6nUeunJcMDHcRYHhs= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.0/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w= github.com/Azure/azure-sdk-for-go/sdk/internal v1.4.0 h1:TuEMD+E+1aTjjLICGQOW6vLe8UWES7kopac9mUXL56Y= github.com/Azure/azure-sdk-for-go/sdk/internal v1.4.0/go.mod h1:s4kgfzA0covAXNicZHDMN58jExvcng2mC/DepXiF1EI= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4 v4.2.1 h1:UPeCRD+XY7QlaGQte2EVI2iOcWvUYA2XY8w5T/8v0NQ= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4 v4.2.1/go.mod h1:oGV6NlB0cvi1ZbYRR2UN44QHxWFyGk+iylgD0qaMXjA= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal v1.1.2 h1:mLY+pNLjCUeKhgnAJWAKhEUQM+RJQo2H1fuGSw1Ky1E= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal v1.1.2/go.mod h1:FbdwsQ2EzwvXxOPcMFYO8ogEc9uMMIj3YkmCdXdAFmk= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/managementgroups/armmanagementgroups v1.0.0 h1:pPvTJ1dY0sA35JOeFq6TsY2xj6Z85Yo23Pj4wCCvu4o= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/managementgroups/armmanagementgroups v1.0.0/go.mod h1:mLfWfj8v3jfWKsL9G4eoBoXVcsqcIUTapmdKy7uGOp0= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/monitor/armmonitor v0.10.2 h1:T3P5KJpcgN0m39dhaNM+JjSqF3Z5VqUlKHlth5FgN+8= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/monitor/armmonitor v0.10.2/go.mod h1:yA8WUvh3K/SABQEtFHg2Bx5D+414FyFqpT5Fu58P3ao= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork v1.1.0 h1:QM6sE5k2ZT/vI5BEe0r7mqjsUSnhVBFbOsVkEuaEfiA= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork v1.1.0/go.mod h1:243D9iHbcQXoFUtgHJwL7gl2zx1aDuDMjvBZVGr2uW0= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v2 v2.2.1 h1:bWh0Z2rOEDfB/ywv/l0iHN1JgyazE6kW/aIA89+CEK0= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v2 v2.2.1/go.mod h1:Bzf34hhAE9NSxailk8xVeLEZbUjOXcC+GnU1mMKdhLw= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resourcegraph/armresourcegraph v0.8.2 h1:f9lam+D19V0TDn17+aFhrVhWPpfsF5zaGHeqDGJZAVc= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resourcegraph/armresourcegraph v0.8.2/go.mod h1:29c9+gYpdWhyC4TPANZBPlgoWllMDhguL2AIByPYQtk= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.1.1 h1:7CBQ+Ei8SP2c6ydQTGCCrS35bDxgTMfoP2miAwK++OU= @@ -103,6 +108,10 @@ github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armsubscriptions github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armsubscriptions v1.2.0/go.mod h1:ThfyMjs6auYrWPnYJjI3H4H++oVPrz01pizpu8lfl3A= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.2.0 h1:Ma67P/GGprNwsslzEH6+Kb8nybI8jpDTm4Wmzu2ReK8= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.2.0/go.mod h1:c+Lifp3EDEamAkPVzMooRNOK6CZjNSdEnf1A7jsI9u4= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.0.0 h1:yfJe15aSwEQ6Oo6J+gdfdulPNoZ3TEhmbhLIoxZcA+U= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.0.0/go.mod h1:Q28U+75mpCaSCDowNEmhIo/rmgdkqmkmzI7N6TGR4UY= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v0.8.0 h1:T028gtTPiYt/RMUfs8nVsAL7FDQrfLlrm/NnRG/zcC4= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v0.8.0/go.mod h1:cw4zVQgBby0Z5f2v0itn6se2dDP17nTjbZFXW5uPyHA= github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0 h1:gggzg0SUMs6SQbEw+3LoSsYf9YMjkupeAnHMX8O9mmY= github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0/go.mod h1:+6KLcKIVgxoBDMqMO/Nvy7bZ9a0nbU3I1DtFQK3YvB4= github.com/Azure/azure-storage-queue-go v0.0.0-20181215014128-6ed74e755687/go.mod h1:K6am8mT+5iFXgingS9LUc7TmbsW6XBw3nxaRyaMyWc8= @@ -116,7 +125,6 @@ github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0= -github.com/Azure/go-autorest/autorest v0.11.0/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= github.com/Azure/go-autorest/autorest v0.11.12/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= github.com/Azure/go-autorest/autorest v0.11.24/go.mod h1:G6kyRlFnTuSbEYkQGawPfsCswgme4iYf6rfSKUDzbCc= github.com/Azure/go-autorest/autorest v0.11.29 h1:I4+HL/JDvErx2LjyzaVxllw2lRDB5/BT2Bm4g20iqYw= @@ -124,7 +132,6 @@ github.com/Azure/go-autorest/autorest v0.11.29/go.mod h1:ZtEzC4Jy2JDrZLxvWs8LrBW github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= github.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= -github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= @@ -132,11 +139,9 @@ github.com/Azure/go-autorest/autorest/adal v0.9.22/go.mod h1:XuAbAEUv2Tta//+voMI github.com/Azure/go-autorest/autorest/adal v0.9.23 h1:Yepx8CvFxwNKpH6ja7RZ+sKX+DWYNldbLiALMC3BTz8= github.com/Azure/go-autorest/autorest/adal v0.9.23/go.mod h1:5pcMqFkdPhviJdlEy3kC/v1ZLnQl0MH6XA5YCcMhy4c= github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM= -github.com/Azure/go-autorest/autorest/azure/auth v0.5.0/go.mod h1:QRTvSZQpxqm8mSErhnbI+tANIBAKP7B+UIE2z4ypUO0= github.com/Azure/go-autorest/autorest/azure/auth v0.5.12 h1:wkAZRgT/pn8HhFyzfe9UnqOjJYqlembgCTi72Bm/xKk= github.com/Azure/go-autorest/autorest/azure/auth v0.5.12/go.mod h1:84w/uV8E37feW2NCJ08uT9VBfjfUHpgLVnG2InYD6cg= github.com/Azure/go-autorest/autorest/azure/cli v0.3.1/go.mod h1:ZG5p860J94/0kI9mNJVoIoLgXcirM2gF5i2kWloofxw= -github.com/Azure/go-autorest/autorest/azure/cli v0.4.0/go.mod h1:JljT387FplPzBA31vUcvsetLKF3pec5bdAxjVU4kI2s= github.com/Azure/go-autorest/autorest/azure/cli v0.4.5/go.mod h1:ADQAXrkgm7acgWVUNamOgh8YNrv4p27l3Wc55oVfpzg= github.com/Azure/go-autorest/autorest/azure/cli v0.4.6 h1:w77/uPk80ZET2F+AfQExZyEWtn+0Rk/uw17m9fv5Ajc= github.com/Azure/go-autorest/autorest/azure/cli v0.4.6/go.mod h1:piCfgPho7BiIDdEQ1+g4VmKyD5y+p/XtSNqE6Hc4QD0= @@ -147,7 +152,6 @@ github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSY github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= -github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/autorest/mocks v0.4.2 h1:PGN4EDXnuQbojHbU0UWoNvmu9AGVwYHG9/fkDYhtAfw= github.com/Azure/go-autorest/autorest/mocks v0.4.2/go.mod h1:Vy7OitM9Kei0i1Oj+LvyAWMXJHeKH1MVlzFugfVrmyU= @@ -155,7 +159,6 @@ github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsI github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk= github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE= github.com/Azure/go-autorest/autorest/validation v0.2.0/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI= -github.com/Azure/go-autorest/autorest/validation v0.3.0/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E= github.com/Azure/go-autorest/autorest/validation v0.3.1 h1:AgyqjAd94fwNAoTjl/WQXg4VvFeRFpO+UhNyRXqF1ac= github.com/Azure/go-autorest/autorest/validation v0.3.1/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E= github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= @@ -165,12 +168,12 @@ github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZ github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= -github.com/AzureAD/microsoft-authentication-library-for-go v0.5.1/go.mod h1:Vt9sXTKwMyGcOxSmLDMnGPgqsUg7m8pe215qMLrDXw4= github.com/AzureAD/microsoft-authentication-library-for-go v1.2.0 h1:hVeq+yCyUi+MsoO/CU95yqCIcdzra5ovzk8Q2BBpV2M= github.com/AzureAD/microsoft-authentication-library-for-go v1.2.0/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak= github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/ClickHouse/clickhouse-go v1.5.4 h1:cKjXeYLNWVJIx2J1K6H2CqyRmfwVJVY1OV1coaaFcI0= github.com/ClickHouse/clickhouse-go v1.5.4/go.mod h1:EaI/sW7Azgz9UATzd5ZdZHRUhHgv5+JMS9NSr2smCJI= github.com/DATA-DOG/go-sqlmock v1.4.1/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= @@ -202,10 +205,8 @@ github.com/Mellanox/rdmamap v0.0.0-20191106181932-7c3c4763a6ee/go.mod h1:jDA6v0T github.com/Microsoft/ApplicationInsights-Go v0.4.2/go.mod h1:CukZ/G66zxXtI+h/VcVn3eVVDGDHfXM2zVILF7bMmsg= github.com/Microsoft/go-winio v0.4.3/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= github.com/Microsoft/go-winio v0.4.9/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= -github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= -github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= -github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/Microsoft/hcsshim v0.11.4 h1:68vKo2VN8DE9AdN4tnkWnmdhqdbpUFM8OF3Airm7fz8= @@ -217,9 +218,8 @@ github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8 github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/OneOfOne/xxhash v1.2.8 h1:31czK/TI9sNkxIKfaUfGlU47BAxQ0ztGgd9vPyqimf8= github.com/OneOfOne/xxhash v1.2.8/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= -github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo= -github.com/ProtonMail/go-crypto v0.0.0-20210920160938-87db9fbc61c7 h1:DSqTh6nEes/uO8BlNcGk8PzZsxY2sN9ZL//veWBdTRI= -github.com/ProtonMail/go-crypto v0.0.0-20210920160938-87db9fbc61c7/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo= +github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 h1:kkhsdkhsCvIsutKu5zLMgWtgh9YxGCNAw8Ad8hjwfYg= +github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/rehttp v1.1.0 h1:JFZ7OeK+hbJpTxhNB0NDZT47AuXqCU0Smxfjtph7/Rs= @@ -244,8 +244,6 @@ github.com/Workiva/go-datastructures v1.1.0 h1:hu20UpgZneBhQ3ZvwiOGlqJSKIosin2Rd github.com/Workiva/go-datastructures v1.1.0/go.mod h1:1yZL+zfsztete+ePzZz/Zb1/t5BnDuE2Ya2MMGhzP6A= github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af h1:DBNMBMuMiWYu0b+8KMJuWmfCkcxl09JwdlqwDZZ6U14= github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af/go.mod h1:5Jv4cbFiHJMsVxt52+i0Ha45fjshj6wxYr1r19tB9bw= -github.com/acomagu/bufpipe v1.0.3 h1:fxAGrHZTgQ9w5QqVItgzwj235/uYZYgbXitB+dLupOk= -github.com/acomagu/bufpipe v1.0.3/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4= github.com/aerospike/aerospike-client-go v1.27.0/go.mod h1:zj8LBEnWBDOVEIJt8LvaRvDG5ARAoa5dBeHaB472NRc= github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= @@ -273,8 +271,8 @@ github.com/alicebob/miniredis/v2 v2.30.4/go.mod h1:b25qWj4fCEsBeAAR2mlb0ufImGC6u github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9/go.mod h1:eliMa/PW+RDr2QLWRmLH1R1ZA4RInpmvOzDDXtaIZkc= github.com/andybalholm/brotli v1.0.5 h1:8uQZIdzKmjc/iuPu7O2ioW48L81FgatrcpfFmiq/cCs= github.com/andybalholm/brotli v1.0.5/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= -github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA= -github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= +github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8= +github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/antonmedv/expr v1.15.3 h1:q3hOJZNvLvhqE8OHBs1cFRdbXFNKuA+bHmRaI+AmRmI= github.com/antonmedv/expr v1.15.3/go.mod h1:0E/6TxnOlRNp81GMzX9QfDPAmHo2Phg00y4JUv1ihsE= @@ -306,96 +304,98 @@ github.com/avvmoto/buf-readerat v0.0.0-20171115124131-a17c8cb89270 h1:JIxGEMs4E5 github.com/avvmoto/buf-readerat v0.0.0-20171115124131-a17c8cb89270/go.mod h1:2XtVRGCw/HthOLxU0Qw6o6jSJrcEoOb2OCCl8gQYvGw= github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= github.com/aws/aws-sdk-go v1.15.24/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= -github.com/aws/aws-sdk-go v1.25.41/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.30.27/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= github.com/aws/aws-sdk-go v1.34.34/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.38.68/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.45.24 h1:TZx/CizkmCQn8Rtsb11iLYutEQVGK5PK9wAhwouELBo= -github.com/aws/aws-sdk-go v1.45.24/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/aws/aws-sdk-go v1.45.25 h1:c4fLlh5sLdK2DCRTY1z0hyuJZU4ygxX8m1FswL6/nF4= +github.com/aws/aws-sdk-go v1.45.25/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/aws/aws-sdk-go-v2 v1.7.0/go.mod h1:tb9wi5s61kTDA5qCkcDbt3KRVV74GGslQkl/DRdX/P4= github.com/aws/aws-sdk-go-v2 v1.9.2/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= github.com/aws/aws-sdk-go-v2 v1.18.1/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= -github.com/aws/aws-sdk-go-v2 v1.21.1 h1:wjHYshtPpYOZm+/mu3NhVgRRc0baM6LJZOmxPZ5Cwzs= -github.com/aws/aws-sdk-go-v2 v1.21.1/go.mod h1:ErQhvNuEMhJjweavOYhxVkn2RUx7kQXVATHrjKtxIpM= +github.com/aws/aws-sdk-go-v2 v1.24.0 h1:890+mqQ+hTpNuw0gGP6/4akolQkSToDJgHfQE7AwGuk= +github.com/aws/aws-sdk-go-v2 v1.24.0/go.mod h1:LNh45Br1YAkEKaAqvmE1m8FUx6a5b/V0oAKV7of29b4= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 h1:dK82zF6kkPeCo8J1e+tGx4JdvDIQzj7ygIoLg8WMuGs= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10/go.mod h1:VeTZetY5KRJLuD/7fkQXMU6Mw7H5m/KP2J5Iy9osMno= github.com/aws/aws-sdk-go-v2/config v1.8.3/go.mod h1:4AEiLtAb8kLs7vgw2ZV3p2VZ1+hBavOc84hqxVNpCyw= github.com/aws/aws-sdk-go-v2/config v1.18.27/go.mod h1:0My+YgmkGxeqjXZb5BYme5pc4drjTnM+x1GJ3zv42Nw= -github.com/aws/aws-sdk-go-v2/config v1.18.44 h1:U10NQ3OxiY0dGGozmVIENIDnCT0W432PWxk2VO8wGnY= -github.com/aws/aws-sdk-go-v2/config v1.18.44/go.mod h1:pHxnQBldd0heEdJmolLBk78D1Bf69YnKLY3LOpFImlU= +github.com/aws/aws-sdk-go-v2/config v1.26.2 h1:+RWLEIWQIGgrz2pBPAUoGgNGs1TOyF4Hml7hCnYj2jc= +github.com/aws/aws-sdk-go-v2/config v1.26.2/go.mod h1:l6xqvUxt0Oj7PI/SUXYLNyZ9T/yBPn3YTQcJLLOdtR8= github.com/aws/aws-sdk-go-v2/credentials v1.4.3/go.mod h1:FNNC6nQZQUuyhq5aE5c7ata8o9e4ECGmS4lAXC7o1mQ= github.com/aws/aws-sdk-go-v2/credentials v1.13.26/go.mod h1:GoXt2YC8jHUBbA4jr+W3JiemnIbkXOfxSXcisUsZ3os= -github.com/aws/aws-sdk-go-v2/credentials v1.13.42 h1:KMkjpZqcMOwtRHChVlHdNxTUUAC6NC/b58mRZDIdcRg= -github.com/aws/aws-sdk-go-v2/credentials v1.13.42/go.mod h1:7ltKclhvEB8305sBhrpls24HGxORl6qgnQqSJ314Uw8= +github.com/aws/aws-sdk-go-v2/credentials v1.16.13 h1:WLABQ4Cp4vXtXfOWOS3MEZKr6AAYUpMczLhgKtAjQ/8= +github.com/aws/aws-sdk-go-v2/credentials v1.16.13/go.mod h1:Qg6x82FXwW0sJHzYruxGiuApNo31UEtJvXVSZAXeWiw= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.6.0/go.mod h1:gqlclDEZp4aqJOancXK6TN24aKhT0W0Ae9MHk3wzTMM= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.4/go.mod h1:E1hLXN/BL2e6YizK1zFlYd8vsfi2GTjbjBazinMmeaM= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.12 h1:3j5lrl9kVQrJ1BU4O0z7MQ8sa+UXdiLuo4j0V+odNI8= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.12/go.mod h1:JbFpcHDBdsex1zpIKuVRorZSQiZEyc3MykNCcjgz174= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.10 h1:w98BT5w+ao1/r5sUuiH6JkVzjowOKeOJRHERyy1vh58= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.10/go.mod h1:K2WGI7vUvkIv1HoNbfBA1bvIZ+9kL3YVmWxeKuLQsiw= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.69 h1:u9tquzvPabbR1hghIq0+snSCYPeF9jA7JeB46iazH6w= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.69/go.mod h1:KzrYE4t9hLh8TjJkfGsmPYcVlYb7QWiPPv3aCOhwms0= github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.34/go.mod h1:wZpTEecJe0Btj3IYnDx/VlUzor9wm3fJHyvLpQF0VwY= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.42 h1:817VqVe6wvwE46xXy6YF5RywvjOX6U2zRQQ6IbQFK0s= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.42/go.mod h1:oDfgXoBBmj+kXnqxDDnIDnC56QBosglKp8ftRCTxR+0= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.9 h1:v+HbZaCGmOwnTTVS86Fleq0vPzOd7tnJGbFhP0stNLs= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.9/go.mod h1:Xjqy+Nyj7VDLBtCMkQYOw1QYfAEZCVLrfI0ezve8wd4= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.28/go.mod h1:7VRpKQQedkfIEXb4k52I7swUnZP0wohVajJMRn3vsUw= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.36 h1:7ZApaXzWbo8slc+W5TynuUlB4z66g44h7uqa3/d/BsY= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.36/go.mod h1:rwr4WnmFi3RJO0M4dxbJtgi9BPLMpVBMX1nUte5ha9U= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.9 h1:N94sVhRACtXyVcjXxrwK1SKFIJrA9pOJ5yu2eSHnmls= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.9/go.mod h1:hqamLz7g1/4EJP+GH5NBhcUMLjW+gKLQabgyz6/7WAU= github.com/aws/aws-sdk-go-v2/internal/ini v1.2.4/go.mod h1:ZcBrrI3zBKlhGFNYWvju0I3TR93I7YIgAfy82Fh4lcQ= github.com/aws/aws-sdk-go-v2/internal/ini v1.3.35/go.mod h1:0Eg1YjxE0Bhn56lx+SHJwCzhW+2JGtizsrx+lCqrfm0= -github.com/aws/aws-sdk-go-v2/internal/ini v1.3.44 h1:quOJOqlbSfeJTboXLjYXM1M9T52LBXqLoTPlmsKLpBo= -github.com/aws/aws-sdk-go-v2/internal/ini v1.3.44/go.mod h1:LNy+P1+1LiRcCsVYr/4zG5n8zWFL0xsvZkOybjbftm8= +github.com/aws/aws-sdk-go-v2/internal/ini v1.7.2 h1:GrSw8s0Gs/5zZ0SX+gX4zQjRnRsMJDJ2sLur1gRBhEM= +github.com/aws/aws-sdk-go-v2/internal/ini v1.7.2/go.mod h1:6fQQgfuGmw8Al/3M2IgIllycxV7ZW7WCdVSqfBeUiCY= github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.26 h1:wscW+pnn3J1OYnanMnza5ZVYXLX4cKk5rAvUAl4Qu+c= github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.26/go.mod h1:MtYiox5gvyB+OyP0Mr0Sm/yzbEAIPL9eijj/ouHAPw0= -github.com/aws/aws-sdk-go-v2/service/amp v1.16.14 h1:cak6jLkSwmPqcJ7pcVlkABsYfjCxxiyjBM2xBgjPwmY= -github.com/aws/aws-sdk-go-v2/service/amp v1.16.14/go.mod h1:Tq9wKXE+SPKKkwJSRHE/u+aOdUdvU//AuPfi/w6iNdc= -github.com/aws/aws-sdk-go-v2/service/apigateway v1.16.14 h1:mXf/MQX2zcKpWTfI4YgHrD4UYBh6AzyBCRfVdsxExaU= -github.com/aws/aws-sdk-go-v2/service/apigateway v1.16.14/go.mod h1:KJyzRVA5DkFaU4hVgKDoHiSrCobfmYP8UpRXlybTuTU= -github.com/aws/aws-sdk-go-v2/service/apigatewayv2 v1.13.15 h1:lgTqmtilhObvVhxeBhX/KRC5RaB4A0dQqDDdLmfAP+0= -github.com/aws/aws-sdk-go-v2/service/apigatewayv2 v1.13.15/go.mod h1:lg/1D90DDo2//C84mvygysHF4JRo+Vf/W5YbkHoeUk8= +github.com/aws/aws-sdk-go-v2/service/amp v1.17.5 h1:Wg2vTVYrMrfkNqrCGaggQq1UBdzgrAsorAfavLNpU/E= +github.com/aws/aws-sdk-go-v2/service/amp v1.17.5/go.mod h1:JXkUFaC1ISQYHO535+mgMPF0b1OaSdrsM5FhFfBbbQY= +github.com/aws/aws-sdk-go-v2/service/apigateway v1.18.0 h1:rByriM7T0xvKy7eDiNUhFyVgnGupZ7DIifReKDzfk5E= +github.com/aws/aws-sdk-go-v2/service/apigateway v1.18.0/go.mod h1:OJmEdRP/gDTqY71Cc/eJ/anpvvGHNgf62FyNuah3X48= +github.com/aws/aws-sdk-go-v2/service/apigatewayv2 v1.14.5 h1:pLmOgMUiwXOi3oKx2J3feVb9JGVgwJ78RYnOV9UR0BM= +github.com/aws/aws-sdk-go-v2/service/apigatewayv2 v1.14.5/go.mod h1:4eIs6K6ag6ymoUMOFfjm9dmP9KbuKgC7K5eIqlIBsbY= github.com/aws/aws-sdk-go-v2/service/appconfig v1.4.2/go.mod h1:FZ3HkCe+b10uFZZkFdvf98LHW21k49W8o8J366lqVKY= -github.com/aws/aws-sdk-go-v2/service/autoscaling v1.28.10 h1:moHEk4wbdc8VNvff4UOLuXVHtjh7YtsGdiyB0MrPPKg= -github.com/aws/aws-sdk-go-v2/service/autoscaling v1.28.10/go.mod h1:P3qp1VYVoxHgDhpDDCTre1ee9IKpmgqnUoOb+8RA9qI= +github.com/aws/aws-sdk-go-v2/service/autoscaling v1.30.6 h1:OuxP8FzE3++AjQ8wabMcwJxtS25inpTIblMPNzV3nB8= +github.com/aws/aws-sdk-go-v2/service/autoscaling v1.30.6/go.mod h1:iHCpld+TvQd0odwp6BiwtL9H9LbU41kPW1i9oBy3iOo= github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.5.0/go.mod h1:acH3+MQoiMzozT/ivU+DbRg7Ooo2298RdRaWcOv+4vM= -github.com/aws/aws-sdk-go-v2/service/databasemigrationservice v1.27.0 h1:8ei9YIP3tmLbIX4rh1Hq9MM8/rpb1QBtHreVN/TP7wQ= -github.com/aws/aws-sdk-go-v2/service/databasemigrationservice v1.27.0/go.mod h1:UXh7fjHrDoVd/tRPQyGCSfb04setwR75qxAx7+x1vcU= -github.com/aws/aws-sdk-go-v2/service/ec2 v1.106.0 h1:chzRNw2kwcrosHm0k72Wyf4sbUNcG8+HeCJbSBtsOTk= -github.com/aws/aws-sdk-go-v2/service/ec2 v1.106.0/go.mod h1:/0btVmMZJ0sn9JQ2N96XszlQNeRCJhhXOS/sPZgDeew= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.11 h1:y2+VQzC6Zh2ojtV2LoC0MNwHWc6qXv/j2vrQtlftkdA= +github.com/aws/aws-sdk-go-v2/service/databasemigrationservice v1.30.4 h1:Ir8BEejwSOOrD9juzFSMdXkXPyIdj1DfkFR+FJb0kc8= +github.com/aws/aws-sdk-go-v2/service/databasemigrationservice v1.30.4/go.mod h1:NSAyKko0rDkrZOjcdCPPvMEe+FyIw/aDDQ8X+xAIW44= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.117.0 h1:Yq39vbwQX+Xw+Ubcsg/ElwO+TWAxAIAdrREtpjGnCHw= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.117.0/go.mod h1:0FhI2Rzcv5BNM3dNnbcCx2qa2naFZoAidJi11cQgzL0= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.11/go.mod h1:iV4q2hsqtNECrfmlXyord9u4zyuFEJX9eLgLpSPzWA8= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 h1:/b31bi3YVNlkzkBrm9LfpaKoaYZUxIAj4sHfOTmLfqw= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4/go.mod h1:2aGXHFmbInwgP9ZfpmdIfOELL79zhdNYNmReK8qDfdQ= github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.29 h1:zZSLP3v3riMOP14H7b4XP0uyfREDQOYv2cqIrvTXDNQ= github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.29/go.mod h1:z7EjRjVwZ6pWcWdI2H64dKttvzaP99jRIj5hphW0M5U= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.2/go.mod h1:72HRZDLMtmVQiLG2tLfQcaWLCssELvGl+Zf2WVxMmR8= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.28/go.mod h1:jj7znCIg05jXlaGBlFMGP8+7UN3VtCkRBG2spnmRQkU= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.36 h1:YXlm7LxwNlauqb2OrinWlcvtsflTzP8GaMvYfQBhoT4= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.36/go.mod h1:ou9ffqJ9hKOVZmjlC6kQ6oROAyG1M4yBKzR+9BKbDwk= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.9 h1:Nf2sHxjMJR8CSImIVCONRi4g0Su3J+TSTbS7G0pUeMU= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.9/go.mod h1:idky4TER38YIjr2cADF1/ugFMKvZV7p//pVeV5LZbF0= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.14.3 h1:dBL3StFxHtpBzJJ/mNEsjXVgfO+7jR0dAIEwLqMapEA= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.14.3/go.mod h1:f1QyiAsvIv4B49DmCqrhlXqyaR+0IxMmyX+1P+AnzOM= -github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.14.15 h1:5I9Yi2Ls1q8/VTpRmlLOGilFCtJNsEms+64BhYybm7Y= -github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.14.15/go.mod h1:86l8OObGPcaNgQ2pVaRRdaHTepispGs2UYLp8niWkSM= +github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.15.5 h1:dMsTYzhTpsDMY79IzCh/jq1tHRwgfa15ujhKUjZk0fg= +github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.15.5/go.mod h1:Lh/6ABs1m80bEB36fAW9gEPW5kSsAr7Mdn8dGyWRLp0= github.com/aws/aws-sdk-go-v2/service/s3 v1.34.1 h1:rYYwwsGqbwvGgQHjBkqgDt8MynXk+I8xgS0IEj5gOT0= github.com/aws/aws-sdk-go-v2/service/s3 v1.34.1/go.mod h1:aVbf0sko/TsLWHx30c/uVu7c62+0EAJ3vbxaJga0xCw= -github.com/aws/aws-sdk-go-v2/service/shield v1.18.13 h1:/QqZKWvxShuecy5hZm6P4pJQ2Uzn6TSJtsd9xeaqLG0= -github.com/aws/aws-sdk-go-v2/service/shield v1.18.13/go.mod h1:YcHL79qHynGYok2NKGb3+mrb6EWROWD4gBU3v+tKtUM= +github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.26.0 h1:dPCRgAL4WD9tSMaDglRNGOiAtSTjkwNiUW5GDpWFfHA= +github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.26.0/go.mod h1:4Ae1NCLK6ghmjzd45Tc33GgCKhUWD2ORAlULtMO1Cbs= +github.com/aws/aws-sdk-go-v2/service/shield v1.19.5 h1:zX/1OHVjTNB2D1xiQ0pByYNLbVgbl84fTj5W4tMKdAk= +github.com/aws/aws-sdk-go-v2/service/shield v1.19.5/go.mod h1:NKqcE1DkD5YSbTAR8MxhFGFDmSkGNo68/Q8hht3Mi5w= github.com/aws/aws-sdk-go-v2/service/sso v1.4.2/go.mod h1:NBvT9R1MEF+Ud6ApJKM0G+IkPchKS7p7c2YPKwHmBOk= github.com/aws/aws-sdk-go-v2/service/sso v1.12.12/go.mod h1:HuCOxYsF21eKrerARYO6HapNeh9GBNq7fius2AcwodY= -github.com/aws/aws-sdk-go-v2/service/sso v1.15.1 h1:ZN3bxw9OYC5D6umLw6f57rNJfGfhg1DIAAcKpzyUTOE= -github.com/aws/aws-sdk-go-v2/service/sso v1.15.1/go.mod h1:PieckvBoT5HtyB9AsJRrYZFY2Z+EyfVM/9zG6gbV8DQ= +github.com/aws/aws-sdk-go-v2/service/sso v1.18.5 h1:ldSFWz9tEHAwHNmjx2Cvy1MjP5/L9kNoR0skc6wyOOM= +github.com/aws/aws-sdk-go-v2/service/sso v1.18.5/go.mod h1:CaFfXLYL376jgbP7VKC96uFcU8Rlavak0UlAwk1Dlhc= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.12/go.mod h1:E4VrHCPzmVB/KFXtqBGKb3c8zpbNBgKe3fisDNLAW5w= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.17.2 h1:fSCCJuT5i6ht8TqGdZc5Q5K9pz/atrf7qH4iK5C9XzU= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.17.2/go.mod h1:5eNtr+vNc5vVd92q7SJ+U/HszsIdhZBEyi9dkMRKsp8= -github.com/aws/aws-sdk-go-v2/service/storagegateway v1.18.16 h1:Gk+75k6j55fqE+uA/99jAlcZBY4OLT244JuKp+HLXxo= -github.com/aws/aws-sdk-go-v2/service/storagegateway v1.18.16/go.mod h1:l/XhpyuxnJ3s8yKi9h0XDwVqM18iDEFeUVDYGCEcE/g= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.5 h1:2k9KmFawS63euAkY4/ixVNsYYwrwnd5fIvgEKkfZFNM= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.5/go.mod h1:W+nd4wWDVkSUIox9bacmkBP5NMFQeTJ/xqNabpzSR38= +github.com/aws/aws-sdk-go-v2/service/storagegateway v1.19.6 h1:DfxHxomSOVAmiYb4I1IkcrKtjFrm4EHUEw/oHPuNgxI= +github.com/aws/aws-sdk-go-v2/service/storagegateway v1.19.6/go.mod h1:o3x7HLasCY8mN914V4611sbXPOE54V8t0pzCtz5bxQ0= github.com/aws/aws-sdk-go-v2/service/sts v1.7.2/go.mod h1:8EzeIqfWt2wWT4rJVu3f21TfrhJ8AEMzVybRNSb/b4g= github.com/aws/aws-sdk-go-v2/service/sts v1.19.2/go.mod h1:dp0yLPsLBOi++WTxzCjA/oZqi6NPIhoR+uF7GeMU9eg= -github.com/aws/aws-sdk-go-v2/service/sts v1.23.1 h1:ASNYk1ypWAxRhJjKS0jBnTUeDl7HROOpeSMu1xDA/I8= -github.com/aws/aws-sdk-go-v2/service/sts v1.23.1/go.mod h1:2cnsAhVT3mqusovc2stUSUrSBGTcX9nh8Tu6xh//2eI= +github.com/aws/aws-sdk-go-v2/service/sts v1.26.6 h1:HJeiuZ2fldpd0WqngyMR6KW7ofkXNLyOaHwEIGm39Cs= +github.com/aws/aws-sdk-go-v2/service/sts v1.26.6/go.mod h1:XX5gh4CB7wAs4KhcF46G6C8a2i7eupU19dcAAE+EydU= github.com/aws/smithy-go v1.5.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= -github.com/aws/smithy-go v1.15.0 h1:PS/durmlzvAFpQHDs4wi4sNNP9ExsqZh6IlfdHXgKK8= -github.com/aws/smithy-go v1.15.0/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= +github.com/aws/smithy-go v1.19.0 h1:KWFKQV80DpP3vJrrA9sVAHQ5gc2z8i4EzrLhLlWXcBM= +github.com/aws/smithy-go v1.19.0/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE= github.com/aybabtme/iocontrol v0.0.0-20150809002002-ad15bcfc95a0 h1:0NmehRCgyk5rljDQLKUO+cRJCnduDyn11+zGZIc9Z48= github.com/aybabtme/iocontrol v0.0.0-20150809002002-ad15bcfc95a0/go.mod h1:6L7zgvqo0idzI7IO8de6ZC051AfXb5ipkIJ7bIA2tGA= github.com/basgys/goxml2json v1.1.0 h1:4ln5i4rseYfXNd86lGEB+Vi652IsIXIvggKM/BhUKVw= @@ -427,12 +427,11 @@ github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dR github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= github.com/boynux/squid-exporter v1.10.5-0.20230618153315-c1fae094e18e h1:C1vYe728vM2FpXaICJuDRt5zgGyRdMmUGYnVfM7WcLY= github.com/boynux/squid-exporter v1.10.5-0.20230618153315-c1fae094e18e/go.mod h1:8NpZERGK+R9DGuZqqsKfnf2qI/rh7yBT8End29IvgNA= -github.com/bufbuild/connect-go v1.10.0 h1:QAJ3G9A1OYQW2Jbk3DeoJbkCxuKArrvZgDt47mjdTbg= -github.com/bufbuild/connect-go v1.10.0/go.mod h1:CAIePUgkDR5pAFaylSMtNK45ANQjp9JvpluG20rhpV8= github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= -github.com/burningalchemist/sql_exporter v0.0.0-20221222155641-2ff59aa75200 h1:1zECtssRshqhP8+DELKyWeg8rxaRC5OO72kJQhrJOE8= -github.com/burningalchemist/sql_exporter v0.0.0-20221222155641-2ff59aa75200/go.mod h1:76BPyhi77q3CgH4ZcgW91r0+34cRMpORDnjlf0CmWpk= +github.com/burningalchemist/sql_exporter v0.0.0-20240103092044-466b38b6abc4 h1:dgjwrjeVe90AeMhrx04TmDKjZe7xqKKEUxT3QKNx9RU= +github.com/burningalchemist/sql_exporter v0.0.0-20240103092044-466b38b6abc4/go.mod h1:aRr7CZ/KleZpcDkQVsNeXE1BFT3xRG8baUHJ7J+j8NI= +github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= github.com/c2h5oh/datasize v0.0.0-20200112174442-28bbd4740fee h1:BnPxIde0gjtTnc9Er7cxvBk8DHLWhEux0SxayC8dP6I= github.com/c2h5oh/datasize v0.0.0-20200112174442-28bbd4740fee/go.mod h1:S/7n9copUssQ56c7aAgHqftWO4LTf4xY6CGWt8Bc+3M= github.com/caio/go-tdigest v2.3.0+incompatible/go.mod h1:sHQM/ubZStBUmF1WbB8FAm8q9GjDajLC5T7ydxE3JHI= @@ -474,6 +473,9 @@ github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp github.com/cisco-ie/nx-telemetry-proto v0.0.0-20190531143454-82441e232cf6/go.mod h1:ugEfq4B8T8ciw/h5mCkgdiDRFS4CkqqhH2dymDB4knc= github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= +github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU= +github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA= github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58 h1:F1EaeKL/ta07PY/k9Os/UFtwERei2/XzGemhpGnBKNg= github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58/go.mod h1:EOBUe0h4xcZ5GoxqC5SDxFQ8gwyZPKQoEzownBlhI80= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= @@ -493,12 +495,14 @@ github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8a github.com/containerd/console v1.0.3 h1:lIr7SlA5PxZyMV30bDW0MGbiOPXwc63yRuCP0ARubLw= github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.7.6 h1:oNAVsnhPoy4BTPQivLgTzI9Oleml9l/+eYIDYXRCYo8= -github.com/containerd/containerd v1.7.6/go.mod h1:SY6lrkkuJT40BVNO37tlYTSnKJnP5AXBc0fhx0q+TJ4= +github.com/containerd/containerd v1.7.11 h1:lfGKw3eU35sjV0aG2eYZTiwFEY1pCzxdzicHP3SZILw= +github.com/containerd/containerd v1.7.11/go.mod h1:5UluHxHTX2rdvYuZ5OJTC5m/KJNs0Zs9wVoJm9zf5ZE= github.com/containerd/continuity v0.0.0-20181203112020-004b46473808/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20190827140505-75bee3e2ccb6/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.4.2 h1:v3y/4Yz5jwnvqPKJJ+7Wf93fyWoCB3F5EclWG023MDM= github.com/containerd/continuity v0.4.2/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ= +github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= +github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/containerd/ttrpc v1.2.2 h1:9vqZr0pxwOF5koz6N0N3kJ0zDHokrcPxIR/ZR2YFtOs= github.com/containerd/ttrpc v1.2.2/go.mod h1:sIT6l32Ph/H9cvnJsfXM5drIVzTr5A2flTf1G5tYZak= github.com/containerd/typeurl v1.0.2 h1:Chlt8zIieDbzQFzXzAeBEF92KhExuE4p9p92/QmY7aY= @@ -559,17 +563,13 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZm github.com/dgrijalva/jwt-go/v4 v4.0.0-preview1/go.mod h1:+hnT3ywWDTAFrW5aE+u2Sa/wT555ZqwoCS+pk3p6ry4= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= -github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/digitalocean/godo v1.1.1/go.mod h1:h6faOIcZ8lWIwNQ+DN7b3CgX4Kwby5T+nbpNqkUIozU= -github.com/digitalocean/godo v1.7.5/go.mod h1:h6faOIcZ8lWIwNQ+DN7b3CgX4Kwby5T+nbpNqkUIozU= github.com/digitalocean/godo v1.10.0/go.mod h1:h6faOIcZ8lWIwNQ+DN7b3CgX4Kwby5T+nbpNqkUIozU= -github.com/digitalocean/godo v1.99.0 h1:gUHO7n9bDaZFWvbzOum4bXE0/09ZuYA9yA8idQHX57E= -github.com/digitalocean/godo v1.99.0/go.mod h1:SsS2oXo2rznfM/nORlZ/6JaUJZFhmKTib1YhopUc8NA= +github.com/digitalocean/godo v1.104.1 h1:SZNxjAsskM/su0YW9P8Wx3gU0W1Z13b6tZlYNpl5BnA= +github.com/digitalocean/godo v1.104.1/go.mod h1:VAI/L5YDzMuPRU01lEEUSQ/sp5Z//1HnnFv/RBTEdbg= github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= -github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= -github.com/dnaeon/go-vcr v1.1.0/go.mod h1:M7tiix8f0r6mKKJ3Yq/kqU1OYf3MnfmBWVbPx/yU9ko= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= github.com/docker/cli v20.10.11+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= @@ -601,8 +601,8 @@ github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:Htrtb github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/dvsekhvalnov/jose2go v1.5.0 h1:3j8ya4Z4kMCwT5nXIKFSV84YS+HdqSSO0VsTQxaLAeM= -github.com/dvsekhvalnov/jose2go v1.5.0/go.mod h1:QsHjhyTlD/lAVqn/NSbVZmSCGeDehTB/mPZadG+mhXU= +github.com/dvsekhvalnov/jose2go v1.6.0 h1:Y9gnSnP4qEI0+/uQkHvFXeD2PLPJeXEL+ySMEA2EjTY= +github.com/dvsekhvalnov/jose2go v1.6.0/go.mod h1:QsHjhyTlD/lAVqn/NSbVZmSCGeDehTB/mPZadG+mhXU= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-resiliency v1.4.0 h1:3OK9bWpPk5q6pbFAaYSEwD9CLUSHG8bnZuqX2yMt3B0= @@ -626,14 +626,16 @@ github.com/elastic/go-windows v1.0.1/go.mod h1:FoVvqWSun28vaDQPbj2Elfc0JahhPB7WQ github.com/elazarl/go-bindata-assetfs v0.0.0-20160803192304-e1a2a7ec64b0/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4= github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a h1:mATvB/9r/3gvcejNsXKSkQ6lcIaNec2nyfOdlTBR2lU= +github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM= github.com/ema/qdisc v1.0.0 h1:EHLG08FVRbWLg8uRICa3xzC9Zm0m7HyMHfXobWFnXYg= github.com/ema/qdisc v1.0.0/go.mod h1:FhIc0fLYi7f+lK5maMsesDqwYojIOh3VfRs8EVd5YJQ= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/emirpasic/gods v1.12.0 h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg= -github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= +github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= +github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= github.com/envoyproxy/go-control-plane v0.0.0-20180919002855-2137d9196328/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -676,7 +678,6 @@ github.com/felixge/fgprof v0.9.3 h1:VvyZxILNuCiUCSXtPtYmmtGvb65nqXh2QFWc0Wpf2/g= github.com/felixge/fgprof v0.9.3/go.mod h1:RdbpDgzqYVh/T9fPELJyV7EYJuHB55UTEULNun8eiPw= github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/form3tech-oss/jwt-go v3.2.5+incompatible h1:/l4kBbb4/vGSsdtB5nUe8L7B9mImVMaBPw9L/0TBHU8= @@ -702,22 +703,25 @@ github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeME github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32/go.mod h1:GIjDIg/heH5DOkXY3YJ/wNhfHsQHoXGjl8G8amsYQ1I= github.com/githubexporter/github-exporter v0.0.0-20231025122338-656e7dc33fe7 h1:wT/4jrX36BHZMjkpPYrCY4lJR+HHG7L+cC0M3p5letQ= github.com/githubexporter/github-exporter v0.0.0-20231025122338-656e7dc33fe7/go.mod h1:q49R4E4fu+HqGnSSSFpAuJIMm8DV5YNhKBW/Ke9SBPE= -github.com/gliderlabs/ssh v0.2.2 h1:6zsha5zo/TWhRhwqCD3+EarCAgZ2yN28ipRnGPnwkI0= -github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= +github.com/gliderlabs/ssh v0.3.5 h1:OcaySEmAQJgyYcArR+gGGTHCyE7nvhEMTlYY+Dp8CpY= +github.com/gliderlabs/ssh v0.3.5/go.mod h1:8XB4KraRrX39qHhT6yxPsHedjA08I/uBVwj4xC+/+z4= github.com/glinton/ping v0.1.4-0.20200311211934-5ac87da8cd96/go.mod h1:uY+1eqFUyotrQxF1wYFNtMeHp/swbYRsoGzfcPZ8x3o= github.com/go-asn1-ber/asn1-ber v1.3.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= -github.com/go-git/gcfg v1.5.0 h1:Q5ViNfGF8zFgyJWPqYwA7qGFoMTEiBmdlkcfRmpIMa4= -github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E= -github.com/go-git/go-billy/v5 v5.2.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= -github.com/go-git/go-billy/v5 v5.3.1 h1:CPiOUAzKtMRvolEKw+bG1PLRpT7D3LIs3/3ey4Aiu34= -github.com/go-git/go-billy/v5 v5.3.1/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= -github.com/go-git/go-git-fixtures/v4 v4.2.1 h1:n9gGL1Ct/yIw+nfsfr8s4+sbhT+Ncu2SubfXjIWgci8= -github.com/go-git/go-git-fixtures/v4 v4.2.1/go.mod h1:K8zd3kDUAykwTdDCr+I0per6Y6vMiRR/nnVTBtavnB0= -github.com/go-git/go-git/v5 v5.4.2 h1:BXyZu9t0VkbiHtqrsvdq39UDhGJTl1h55VW6CSC4aY4= -github.com/go-git/go-git/v5 v5.4.2/go.mod h1:gQ1kArt6d+n+BGd+/B/I74HwRTLhth2+zti4ihgckDc= +github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= +github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= +github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU= +github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow= +github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4= +github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII= +github.com/go-git/go-git/v5 v5.11.0 h1:XIZc1p+8YzypNr34itUfSvYJcv+eYdTnTvOZ2vD3cA4= +github.com/go-git/go-git/v5 v5.11.0/go.mod h1:6GFcX2P3NM7FPBfpePbpLd21XxsgdAt+lKqXmCUiUCY= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= -github.com/go-jose/go-jose/v3 v3.0.0 h1:s6rrhirfEP/CGIoc6p+PZAeogN2SxKav6Wp7+dyMWVo= github.com/go-jose/go-jose/v3 v3.0.0/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8= +github.com/go-jose/go-jose/v3 v3.0.1 h1:pWmKFVtt+Jl0vBZTIpz/eAKwsm6LkIxDVVbFHKkchhA= +github.com/go-jose/go-jose/v3 v3.0.1/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= @@ -733,8 +737,8 @@ github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KE github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= -github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.2.4 h1:QHVo+6stLbfJmYGkQ7uGHUCu5hnAFAj6mDe6Ea0SeOo= @@ -862,21 +866,18 @@ github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7a github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/gogo/status v1.1.1 h1:DuHXlSFHNKqTQ+/ACf5Vs6r4X/dH2EgIzR9Vr+H65kg= github.com/gogo/status v1.1.1/go.mod h1:jpG3dM5QPcqu19Hg8lkUhBFBa3TcLs1DG7+2Jqci7oU= -github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= -github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang-jwt/jwt/v5 v5.0.0 h1:1n1XNM9hk7O9mnQoNBGolZvzebBQ7p93ULHRc28XJUE= github.com/golang-jwt/jwt/v5 v5.0.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= -github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe h1:lXe2qZdvpiX5WZkZR4hgp4KJVfY3nMkvmwbVkpv1rVY= -github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= +github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 h1:au07oEsX2xN0ktxqI+Sida1w446QrXBRJ0nee3SNZlA= +github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang-sql/sqlexp v0.1.0 h1:ZCD6MBpcuOVfGVqsEmY5/4FtYiKz6tSyUv9LPEDei6A= github.com/golang-sql/sqlexp v0.1.0/go.mod h1:J4ad9Vo8ZCWQ2GMrC4UCQy1JpCbwU9m3EOqtpKwwwHI= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= @@ -1002,26 +1003,23 @@ github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.3.1 h1:SBWmZhjUDRorQxrN0nwzf+AHBxnbFjViHQS4P0yVpmQ= -github.com/googleapis/enterprise-certificate-proxy v0.3.1/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= +github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= +github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU= github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gopcua/opcua v0.1.12/go.mod h1:a6QH4F9XeODklCmWuvaOdL8v9H0d73CEKUHWVZLQyE8= github.com/gophercloud/gophercloud v0.0.0-20180828235145-f29afc2cceca/go.mod h1:3WdhXV3rUYy9p6AUW8d94kr+HS62Y4VL9mBnFxsD8q4= -github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= -github.com/gophercloud/gophercloud v1.5.0 h1:cDN6XFCLKiiqvYpjQLq9AiM7RDRbIC9450WpPH+yvXo= -github.com/gophercloud/gophercloud v1.5.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= +github.com/gophercloud/gophercloud v1.7.0 h1:fyJGKh0LBvIZKLvBWvQdIgkaV5yTM3Jh9EYUh+UNCAs= +github.com/gophercloud/gophercloud v1.7.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= github.com/gopherjs/gopherjs v0.0.0-20180825215210-0210a2f0f73c/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25dO0g= @@ -1057,8 +1055,8 @@ github.com/grafana/gomemcache v0.0.0-20230316202710-a081dae0aba9 h1:WB3bGH2f1UN6 github.com/grafana/gomemcache v0.0.0-20230316202710-a081dae0aba9/go.mod h1:PGk3RjYHpxMM8HFPhKKo+vve3DdlPUELZLSDEFehPuU= github.com/grafana/loki v1.6.2-0.20231004111112-07cbef92268a h1:lvSHlNONeo/H+aWRk86QEfBpRDCEX1yoqpsCK0Tys+g= github.com/grafana/loki v1.6.2-0.20231004111112-07cbef92268a/go.mod h1:a5c5ZTC6FNufKkvF8NeDAb2nCWJpgkVDrejmV+O9hac= -github.com/grafana/loki/pkg/push v0.0.0-20230904153656-e4cc2a4f5ec8 h1:yQK/dX7WBva5QvITvmIcbv4boLwSo65a8zjuZcucnko= -github.com/grafana/loki/pkg/push v0.0.0-20230904153656-e4cc2a4f5ec8/go.mod h1:5ll3An1wAxYejo6aM04+3/lc6N4joYVYLY5U+Z4O6vI= +github.com/grafana/loki/pkg/push v0.0.0-20231212100434-384e5c2dc872 h1:6kPX7bngjBgUlHqADwZ6249UtzMaoQW5n0H8bOtnYeM= +github.com/grafana/loki/pkg/push v0.0.0-20231212100434-384e5c2dc872/go.mod h1:f3JSoxBTPXX5ec4FxxeC19nTBSxoTz+cBgS3cYLMcr0= github.com/grafana/mysqld_exporter v0.12.2-0.20231005125903-364b9c41e595 h1:I9sRknI5ajd8whPOX0nBDXy5B6xUfhItClMy+6R4oqE= github.com/grafana/mysqld_exporter v0.12.2-0.20231005125903-364b9c41e595/go.mod h1:U8ifHC5pT2WuVTO7ki4KZmWLjfEKfktQiU3bh0J8scw= github.com/grafana/node_exporter v0.18.1-grafana-r01.0.20231004161416-702318429731 h1:vyyIYY2sLpmgFIckJ1vSO/oYkvB0thDF6UiFYp5PThM= @@ -1069,14 +1067,14 @@ github.com/grafana/opentelemetry-collector/service v0.0.0-20231018134914-c0109e0 github.com/grafana/opentelemetry-collector/service v0.0.0-20231018134914-c0109e052230/go.mod h1:kBdpzrqR2wJkOdg50yzp4dv+2XBMyeqTgF4lCx0hSpQ= github.com/grafana/postgres_exporter v0.8.1-0.20210722175051-db35d7c2f520 h1:HnFWqxhoSF3WC7sKAdMZ+SRXvHLVZlZ3sbQjuUlTqkw= github.com/grafana/postgres_exporter v0.8.1-0.20210722175051-db35d7c2f520/go.mod h1:+HPXgiOV0InDHcZ2jNijL1SOKvo0eEPege5fQA0+ICI= -github.com/grafana/prometheus v1.8.2-0.20231016083943-46550094220d h1:hr0QEXSfpdakWdHw2sZeT/5GnGwIkHnNO0YBkfRj5zk= -github.com/grafana/prometheus v1.8.2-0.20231016083943-46550094220d/go.mod h1:J/bmOSjgH7lFxz2gZhrWEZs2i64vMS+HIuZfmYNhJ/M= -github.com/grafana/pyroscope-go/godeltaprof v0.1.3 h1:eunWpv1B3Z7ZK9o4499EmQGlY+CsDmSZ4FbxjRx37uk= -github.com/grafana/pyroscope-go/godeltaprof v0.1.3/go.mod h1:1HSPtjU8vLG0jE9JrTdzjgFqdJ/VgN7fvxBNq3luJko= -github.com/grafana/pyroscope/api v0.2.0 h1:TzOxL0s6SiaLEy944ZAKgHcx/JDRJXu4O8ObwkqR6p4= -github.com/grafana/pyroscope/api v0.2.0/go.mod h1:nhH+xai9cYFgs6lMy/+L0pKj0d5yCMwji/QAiQFCP+U= -github.com/grafana/pyroscope/ebpf v0.4.0 h1:7fz+5S6MLSc+5cJfmIe7OCvBdooxEm8xy5OAV0s7GA0= -github.com/grafana/pyroscope/ebpf v0.4.0/go.mod h1:eF5+k9lAUBYILVzGccr3hrrvuLy5ZvbDRWGQHDe021w= +github.com/grafana/prometheus v1.8.2-0.20240105105355-3e2c486167d2 h1:eJD8U9G91ID/pKsLjJnjqve8yv1NiE/l6dGYnwchPVM= +github.com/grafana/prometheus v1.8.2-0.20240105105355-3e2c486167d2/go.mod h1:SRw624aMAxTfryAcP8rOjg4S/sHHaetx2lyJJ2nM83g= +github.com/grafana/pyroscope-go/godeltaprof v0.1.6 h1:nEdZ8louGAplSvIJi1HVp7kWvFvdiiYg3COLlTwJiFo= +github.com/grafana/pyroscope-go/godeltaprof v0.1.6/go.mod h1:Tk376Nbldo4Cha9RgiU7ik8WKFkNpfds98aUzS8omLE= +github.com/grafana/pyroscope/api v0.4.0 h1:J86DxoNeLOvtJhB1Cn65JMZkXe682D+RqeoIUiYc/eo= +github.com/grafana/pyroscope/api v0.4.0/go.mod h1:MFnZNeUM4RDsDOnbgKW3GWoLSBpLzMMT9nkvhHHo81o= +github.com/grafana/pyroscope/ebpf v0.4.1 h1:iqQoOsfKen5KpTRe6MfGeBZfgK1s7ROH+Cs/vZs1B3A= +github.com/grafana/pyroscope/ebpf v0.4.1/go.mod h1:W99Mq+yJGP5nZUQWNv+jVytiWWgWXwHjIRmi9k3xHzA= github.com/grafana/regexp v0.0.0-20221123153739-15dc172cd2db h1:7aN5cccjIqCLTzedH7MZzRZt5/lsAHch6Z3L2ZGn5FA= github.com/grafana/regexp v0.0.0-20221123153739-15dc172cd2db/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A= github.com/grafana/river v0.3.0 h1:6TsaR/vkkcppUM9I0muGbPIUedCtpPu6OWreE5+CE6g= @@ -1133,8 +1131,8 @@ github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtng github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-discover v0.0.0-20190403160810-22221edb15cd/go.mod h1:ueUgD9BeIocT7QNuvxSyJyPAM9dfifBcaWmeybb67OY= -github.com/hashicorp/go-discover v0.0.0-20220105235006-b95dfa40aaed h1:TOX8g4kKKZ1tkDRZo7hiqHwUSJTNK5li8pU6GmpVQD0= -github.com/hashicorp/go-discover v0.0.0-20220105235006-b95dfa40aaed/go.mod h1:3/4dzY4lR1Hzt9bBqMhBzG7lngZ0GKx/nL6G/ad62wE= +github.com/hashicorp/go-discover v0.0.0-20230724184603-e89ebd1b2f65 h1:+ZwaKkFuVWS7FZoiltT+7XW/MEFckY9Wxe+xIEErLcM= +github.com/hashicorp/go-discover v0.0.0-20230724184603-e89ebd1b2f65/go.mod h1:RH2Jr1/cCsZ1nRLmAOC65hp/gRehf55SsUIYV2+NAxI= github.com/hashicorp/go-envparse v0.1.0 h1:bE++6bhIsNCPLvgDZkYqo3nA+/PFI51pkrHdmPSDFPY= github.com/hashicorp/go-envparse v0.1.0/go.mod h1:OHheN1GoygLlAkTlXLXvAdnXdZxy8JUweQ1rAXx1xnc= github.com/hashicorp/go-hclog v0.0.0-20180402200405-69ff559dc25f/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= @@ -1224,8 +1222,8 @@ github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOn github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR/prTM= github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0= github.com/hashicorp/net-rpc-msgpackrpc v0.0.0-20151116020338-a14192a58a69/go.mod h1:/z+jUGRBlwVpUZfjute9jWaF6/HuhjuFQuL1YXzVD1Q= -github.com/hashicorp/nomad/api v0.0.0-20230718173136-3a687930bd3e h1:sr4lujmn9heD030xx/Pd4B/JSmvRhFzuotNXaaV0WLs= -github.com/hashicorp/nomad/api v0.0.0-20230718173136-3a687930bd3e/go.mod h1:O23qLAZuCx4htdY9zBaO4cJPXgleSFEdq6D/sezGgYE= +github.com/hashicorp/nomad/api v0.0.0-20230721134942-515895c7690c h1:Nc3Mt2BAnq0/VoLEntF/nipX+K1S7pG+RgwiitSv6v0= +github.com/hashicorp/nomad/api v0.0.0-20230721134942-515895c7690c/go.mod h1:O23qLAZuCx4htdY9zBaO4cJPXgleSFEdq6D/sezGgYE= github.com/hashicorp/raft v1.0.1-0.20190409200437-d9fe23f7d472/go.mod h1:DVSAWItjLjTOkVbSpWQ0j0kUADIvDaCtBxIcbNAQLkI= github.com/hashicorp/raft-boltdb v0.0.0-20150201200839-d1e82c1ec3f1/go.mod h1:pNv7Wc3ycL6F5oOWn+tPGo2gWD4a5X+yp/ntwdKLjRk= github.com/hashicorp/serf v0.8.1/go.mod h1:h/Ru6tmZazX7WO/GDmwdpS975F019L4t5ng5IgwbNrE= @@ -1262,8 +1260,8 @@ github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKe github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/heroku/x v0.0.61 h1:yfoAAtnFWSFZj+UlS+RZL/h8QYEp1R4wHVEg0G+Hwh4= github.com/heroku/x v0.0.61/go.mod h1:C7xYbpMdond+s6L5VpniDUSVPRwm3kZum1o7XiD5ZHk= -github.com/hetznercloud/hcloud-go/v2 v2.0.0 h1:Sg1DJ+MAKvbYAqaBaq9tPbwXBS2ckPIaMtVdUjKu+4g= -github.com/hetznercloud/hcloud-go/v2 v2.0.0/go.mod h1:4iUG2NG8b61IAwNx6UsMWQ6IfIf/i1RsG0BbsKAyR5Q= +github.com/hetznercloud/hcloud-go/v2 v2.4.0 h1:MqlAE+w125PLvJRCpAJmEwrIxoVdUdOyuFUhE/Ukbok= +github.com/hetznercloud/hcloud-go/v2 v2.4.0/go.mod h1:l7fA5xsncFBzQTyw29/dw5Yr88yEGKKdc6BHf24ONS0= github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= github.com/hjson/hjson-go/v4 v4.0.0/go.mod h1:KaYt3bTw3zhBjYqnXkYywcYctk0A2nxeEFTse3rH13E= @@ -1305,8 +1303,8 @@ github.com/influxdata/telegraf v1.16.3 h1:x0qeuSGGMg5y+YqP/5ZHwXZu3bcBrO8AAQOTNl github.com/influxdata/telegraf v1.16.3/go.mod h1:fX/6k7qpIqzVPWyeIamb0wN5hbwc0ANUaTS80lPYFB8= github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65/go.mod h1:zApaNFpP/bTpQItGZNNUMISDMDAnTXu9UqJ4yT3ocz8= github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8/go.mod h1:/2NMgWB1DHM1ti/gqhOlg+LJeBVk6FqR5aVGYY0hlwI= -github.com/ionos-cloud/sdk-go/v6 v6.1.8 h1:493wE/BkZxJf7x79UCE0cYGPZoqQcPiEBALvt7uVGY0= -github.com/ionos-cloud/sdk-go/v6 v6.1.8/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k= +github.com/ionos-cloud/sdk-go/v6 v6.1.9 h1:Iq3VIXzeEbc8EbButuACgfLMiY5TPVWUPNrF+Vsddo4= +github.com/ionos-cloud/sdk-go/v6 v6.1.9/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k= github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8= @@ -1318,8 +1316,8 @@ github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsU github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o= github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY= github.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= -github.com/jackc/pgconn v1.13.0 h1:3L1XMNV2Zvca/8BYhzcRFS70Lr0WlDg16Di6SFGAbys= -github.com/jackc/pgconn v1.13.0/go.mod h1:AnowpAqO4CMIIJNZl2VJp+KrkAZciAkhEl0W0JIobpI= +github.com/jackc/pgconn v1.14.0 h1:vrbA9Ud87g6JdFWkHTJXppVce58qPIdP7N8y0Ml/A7Q= +github.com/jackc/pgconn v1.14.0/go.mod h1:9mBNlny0UvkgJdCDvdVHYSjI+8tD2rnKK69Wz8ti++E= github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= @@ -1335,23 +1333,24 @@ github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvW github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgproto3/v2 v2.3.1 h1:nwj7qwf0S+Q7ISFfBndqeLwSwxs+4DPsbRFjECT1Y4Y= -github.com/jackc/pgproto3/v2 v2.3.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b h1:C8S2+VttkHFdOOCXJe+YGfa4vHYwlt4Zx+IVXQ97jYg= +github.com/jackc/pgproto3/v2 v2.3.2 h1:7eY55bdBeCz1F2fTzSz69QC+pG46jYq9/jtSPiJ5nn0= +github.com/jackc/pgproto3/v2 v2.3.2/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= +github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk= +github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg= github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc= github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= github.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM= -github.com/jackc/pgtype v1.12.0 h1:Dlq8Qvcch7kiehm8wPGIW0W3KsCCHJnRacKW0UM8n5w= -github.com/jackc/pgtype v1.12.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= +github.com/jackc/pgtype v1.14.0 h1:y+xUdabmyMkJLyApYuPj38mW+aAIqCe5uuBB51rH3Vw= +github.com/jackc/pgtype v1.14.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= github.com/jackc/pgx v3.6.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs= -github.com/jackc/pgx/v4 v4.17.2 h1:0Ut0rpeKwvIVbMQ1KbMBU4h6wxehBI535LK6Flheh8E= -github.com/jackc/pgx/v4 v4.17.2/go.mod h1:lcxIZN44yMIrWI78a5CpucdD14hX0SBDbNRvjDBItsw= +github.com/jackc/pgx/v4 v4.18.1 h1:YP7G1KABtKpB5IHrO9vYwSrCOhs7p3uqhvhhQBptya0= +github.com/jackc/pgx/v4 v4.18.1/go.mod h1:FydWkUyadDmdNH/mHnGob881GawxeEm7TcMCzkb+qQE= github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= @@ -1372,14 +1371,12 @@ github.com/jcmturner/gofork v1.7.6 h1:QH0l3hzAU1tfT3rZCnW5zXl+orbkNMMRGJfdJjHVET github.com/jcmturner/gofork v1.7.6/go.mod h1:1622LH6i/EZqLloHfE7IeZ0uEJwMSUyQ/nDd82IeqRo= github.com/jcmturner/goidentity/v6 v6.0.1 h1:VKnZd2oEIMorCTsFBnJWbExfNN7yZr3EhJAxwOkZg6o= github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg= -github.com/jcmturner/gokrb5/v8 v8.4.2/go.mod h1:sb+Xq/fTY5yktf/VxLsE3wlfPqQjp0aWNYyvBVK62bc= github.com/jcmturner/gokrb5/v8 v8.4.4 h1:x1Sv4HaTpepFkXbt2IkL29DXRf8sOfZXo8eRKh687T8= github.com/jcmturner/gokrb5/v8 v8.4.4/go.mod h1:1btQEpgT6k+unzCwX1KdWMEwPPkkgBtP+F6aCACiMrs= github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY= github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= github.com/jefferai/jsonx v0.0.0-20160721235117-9cc31c3135ee/go.mod h1:N0t2vlmpe8nyZB5ouIbJQPDSR+mH6oe7xHB9VZHSUzM= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= @@ -1432,9 +1429,8 @@ github.com/karrick/godirwalk v1.16.1/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1q github.com/karrick/godirwalk v1.17.0 h1:b4kY7nqDdioR/6qnbHQyDvmA17u5G1cZ6J+CZXwSWoI= github.com/karrick/godirwalk v1.17.0/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= -github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= -github.com/kevinburke/ssh_config v1.1.0 h1:pH/t1WS9NzT8go394IqZeJTMHVm6Cr6ZJ6AQ+mdNo/o= -github.com/kevinburke/ssh_config v1.1.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= +github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4= +github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/keybase/go-crypto v0.0.0-20180614160407-5114a9a81e1b/go.mod h1:ghbZscTyKdM07+Fw3KSi0hcJm+AlEUWj8QLlPtijN/M= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= @@ -1488,15 +1484,14 @@ github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.10.1/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw= -github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lightstep/go-expohisto v1.0.0 h1:UPtTS1rGdtehbbAF7o/dhkWLTDI73UifG8LbfQI7cA4= github.com/lightstep/go-expohisto v1.0.0/go.mod h1:xDXD0++Mu2FOaItXtdDfksfgxfV0z1TMPa+e/EUd0cs= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= -github.com/linode/linodego v0.7.1/go.mod h1:ga11n3ivecUrPCHN0rANxKmfWBJVkOXfLMZinAbj2sY= -github.com/linode/linodego v1.19.0 h1:n4WJrcr9+30e9JGZ6DI0nZbm5SdAj1kSwvvt/998YUw= -github.com/linode/linodego v1.19.0/go.mod h1:XZFR+yJ9mm2kwf6itZ6SCpu+6w3KnIevV0Uu5HNWJgQ= +github.com/linode/linodego v1.23.0 h1:s0ReCZtuN9Z1IoUN9w1RLeYO1dMZUGPwOQ/IBFsBHtU= +github.com/linode/linodego v1.23.0/go.mod h1:0U7wj/UQOqBNbKv1FYTXiBUXueR8DY4HvIotwE0ENgg= github.com/lufia/iostat v1.2.1 h1:tnCdZBIglgxD47RyD55kfWQcJMGzO+1QBziSQfesf2k= github.com/lufia/iostat v1.2.1/go.mod h1:rEPNA0xXgjHQjuI5Cy05sLlS2oRcSlWHRLrvh/AQ+Pg= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= @@ -1518,8 +1513,6 @@ github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0 github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= -github.com/matryer/is v1.2.0 h1:92UTHpy8CDwaJ08GqLDzhhuixiBUUD1p3AU6PHddz4A= -github.com/matryer/is v1.2.0/go.mod h1:2fLPjFQM9rhQ15aVEtbuwhJinnOqrmgXPNdZsdwlWXA= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= @@ -1571,14 +1564,14 @@ github.com/mdlayher/wifi v0.1.0 h1:y8wYRUXwok5CtUZOXT3egghYesX0O79E3ALl+SIDm9Q= github.com/mdlayher/wifi v0.1.0/go.mod h1:+gBYnZAMcUKHSFzMJXwlz7tLsEHgwDJ9DJCefhJM+gI= github.com/metalmatze/signal v0.0.0-20210307161603-1c9aa721a97a h1:0usWxe5SGXKQovz3p+BiQ81Jy845xSMu2CWKuXsXuUM= github.com/metalmatze/signal v0.0.0-20210307161603-1c9aa721a97a/go.mod h1:3OETvrxfELvGsU2RoGGWercfeZ4bCL3+SOwzIWtJH/Q= -github.com/microsoft/go-mssqldb v0.19.0 h1:LMRSgLcNMF8paPX14xlyQBmBH+jnFylPsYpVZf86eHM= -github.com/microsoft/go-mssqldb v0.19.0/go.mod h1:ukJCBnnzLzpVF0qYRT+eg1e+eSwjeQ7IvenUv8QPook= +github.com/microsoft/go-mssqldb v1.6.0 h1:mM3gYdVwEPFrlg/Dvr2DNVEgYFG7L42l+dGc67NNNpc= +github.com/microsoft/go-mssqldb v1.6.0/go.mod h1:00mDtPbeQCRGC1HwOOR5K/gr30P1NcEG0vx6Kbv2aJU= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.25/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= -github.com/miekg/dns v1.1.55 h1:GoQ4hpsj0nFLYe+bWiCToyrBEJXkQfOOIvFGFy0lEgo= -github.com/miekg/dns v1.1.55/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY= +github.com/miekg/dns v1.1.56 h1:5imZaSeoRNvpM9SzWNhEcP9QliKiz20/dA2QabIGVnE= +github.com/miekg/dns v1.1.56/go.mod h1:cRm6Oo2C8TY9ZS/TqsSrseAcncm74lfK5G+ikN2SWWY= github.com/mikioh/ipaddr v0.0.0-20190404000644-d465c8ab6721/go.mod h1:Ickgr2WtCLZ2MDGd4Gr0geeCH5HybhRJbonOgQpvSxc= github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8 h1:AMFGa4R4MiIpspGNG7Z948v4n35fFGB3RR3G/ry4FWs= github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY= @@ -1638,9 +1631,7 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8= github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= -github.com/montanaflynn/stats v0.6.6/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= github.com/montanaflynn/stats v0.7.0 h1:r3y12KyNxj/Sb/iOE46ws+3mS1+MZca1wlHQFPsY/JU= github.com/montanaflynn/stats v0.7.0/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= @@ -1681,8 +1672,8 @@ github.com/ncabatoff/go-seq v0.0.0-20180805175032-b08ef85ed833 h1:t4WWQ9I797y7QU github.com/ncabatoff/go-seq v0.0.0-20180805175032-b08ef85ed833/go.mod h1:0CznHmXSjMEqs5Tezj/w2emQoM41wzYM9KpDKUHPYag= github.com/ncabatoff/process-exporter v0.7.10 h1:+Ere7+3se6QqP54gg7aBRagWcL8bq3u5zNi/GRSWeKQ= github.com/ncabatoff/process-exporter v0.7.10/go.mod h1:DHZRZjqxw9LCOpLlX0DjBuyn6d5plh41Jv6Tmttj7Ek= -github.com/nerdswords/yet-another-cloudwatch-exporter v0.54.0 h1:a2jReAfDiSyU/aXCKO05hcJMTdtyQQyj41Jmwyg6fh8= -github.com/nerdswords/yet-another-cloudwatch-exporter v0.54.0/go.mod h1:VngmqrhYKwZzUuv/sgVCfUhLB6BkgSZfL5USqZbGKnY= +github.com/nerdswords/yet-another-cloudwatch-exporter v0.55.0 h1:M3fH9gzU48jBfYbXXYEZVTcUhnfhDIG/oeIQl6kBGP0= +github.com/nerdswords/yet-another-cloudwatch-exporter v0.55.0/go.mod h1:GR4pDHlRonT97AsGSmlcWiISF8AjifK/19SAVD0tIlU= github.com/newrelic/newrelic-telemetry-sdk-go v0.2.0/go.mod h1:G9MqE/cHGv3Hx3qpYhfuyFUsGx2DpVcGi1iJIqTg+JQ= github.com/nicolai86/scaleway-sdk v1.10.2-0.20180628010248-798f60e20bb2 h1:BQ1HW7hr4IVovMwWg0E0PYcyW8CzqDcVmaew9cujU4s= github.com/nicolai86/scaleway-sdk v1.10.2-0.20180628010248-798f60e20bb2/go.mod h1:TLb2Sg7HQcgGdloNxkrmtgDNR9uVYF3lfdFIN4Ro6Sk= @@ -1861,8 +1852,8 @@ github.com/oschwald/geoip2-golang v1.9.0 h1:uvD3O6fXAXs+usU+UGExshpdP13GAqp4GBrz github.com/oschwald/geoip2-golang v1.9.0/go.mod h1:BHK6TvDyATVQhKNbQBdrj9eAvuwOMi2zSFXizL3K81Y= github.com/oschwald/maxminddb-golang v1.11.0 h1:aSXMqYR/EPNjGE8epgqwDay+P30hCBZIveY0WZbAWh0= github.com/oschwald/maxminddb-golang v1.11.0/go.mod h1:YmVI+H0zh3ySFR3w+oz8PCfglAFj3PuCmui13+P9zDg= -github.com/ovh/go-ovh v1.4.1 h1:VBGa5wMyQtTP7Zb+w97zRCh9sLtM/2YKRyy+MEJmWaM= -github.com/ovh/go-ovh v1.4.1/go.mod h1:6bL6pPyUT7tBfI0pqOegJgRjgjuO+mOo+MyXd1EEC0M= +github.com/ovh/go-ovh v1.4.3 h1:Gs3V823zwTFpzgGLZNI6ILS4rmxZgJwJCz54Er9LwD0= +github.com/ovh/go-ovh v1.4.3/go.mod h1:AkPXVtgwB6xlKblMjRKJJmjRp+ogrE7fz2lVgcQY8SY= github.com/packethost/packngo v0.1.1-0.20180711074735-b9cb5096f54c h1:vwpFWvAO8DeIZfFeqASzZfsxuWPno9ncAebBEP0N3uE= github.com/packethost/packngo v0.1.1-0.20180711074735-b9cb5096f54c/go.mod h1:otzZQXgoO96RTzDB/Hycg0qZcXZsWJGJRSXbmEIJ+4M= github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= @@ -1893,7 +1884,8 @@ github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9F github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4/v4 v4.1.18 h1:xaKrnTkyoqfh1YItXl56+6KJNVYWlEEPuAQW9xsplYQ= github.com/pierrec/lz4/v4 v4.1.18/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= -github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4/go.mod h1:N6UoU20jOqggOuDwUaBQpluzLNDqif3kq9z2wpdYEfQ= +github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4= +github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= @@ -1948,8 +1940,8 @@ github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqr github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ= -github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= -github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= +github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= +github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= @@ -1982,8 +1974,8 @@ github.com/prometheus/consul_exporter v0.8.0/go.mod h1:KHTgkT1/oLpXKC4+mKZV63hZS github.com/prometheus/exporter-toolkit v0.6.0/go.mod h1:ZUBIj498ePooX9t/2xtDjeQYwvRpiPP2lh5u4iblj2g= github.com/prometheus/exporter-toolkit v0.7.0/go.mod h1:ZUBIj498ePooX9t/2xtDjeQYwvRpiPP2lh5u4iblj2g= github.com/prometheus/exporter-toolkit v0.7.1/go.mod h1:ZUBIj498ePooX9t/2xtDjeQYwvRpiPP2lh5u4iblj2g= -github.com/prometheus/exporter-toolkit v0.10.1-0.20230714054209-2f4150c63f97 h1:oHcfzdJnM/SFppy2aUlvomk37GI33x9vgJULihE5Dt8= -github.com/prometheus/exporter-toolkit v0.10.1-0.20230714054209-2f4150c63f97/go.mod h1:LoBCZeRh+5hX+fSULNyFnagYlQG/gBsyA/deNzROkq8= +github.com/prometheus/exporter-toolkit v0.11.0 h1:yNTsuZ0aNCNFQ3aFTD2uhPOvr4iD7fdBvKPAEGkNf+g= +github.com/prometheus/exporter-toolkit v0.11.0/go.mod h1:BVnENhnNecpwoTLiABx7mrPB/OLRIgN74qlQbV+FK1Q= github.com/prometheus/memcached_exporter v0.13.0 h1:d246RYODFCXy39XA8S2PBrqp5jLCSvl9b4KsYspDCHk= github.com/prometheus/memcached_exporter v0.13.0/go.mod h1:fp7Wk6v0RFijeP3Syvd1TShBSJoCG5iFfvPdi5dCMEU= github.com/prometheus/procfs v0.0.0-20180408092902-8b1c2da0d56d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= @@ -2006,8 +1998,6 @@ github.com/prometheus/snmp_exporter v0.24.1/go.mod h1:j6uIGkdR0DXvKn7HJtSkeDj//U github.com/prometheus/statsd_exporter v0.22.7/go.mod h1:N/TevpjkIh9ccs6nuzY3jQn9dFqnUakOjnEuMPJJJnI= github.com/prometheus/statsd_exporter v0.22.8 h1:Qo2D9ZzaQG+id9i5NYNGmbf1aa/KxKbB9aKfMS+Yib0= github.com/prometheus/statsd_exporter v0.22.8/go.mod h1:/DzwbTEaFTE0Ojz5PqcSk6+PFHOPWGxdXVr6yC8eFOM= -github.com/prometheus/tsdb v0.10.0 h1:If5rVCMTp6W2SiRAQFlbpJNgVlgMEd+U2GZckwK38ic= -github.com/prometheus/tsdb v0.10.0/go.mod h1:oi49uRhEe9dPUTlS3JRZOwJuVi6tmh10QSgwXEyGCt4= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= @@ -2056,8 +2046,8 @@ github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da h1:p3Vo3i64TCL github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.20 h1:a9hSJdJcd16e0HoMsnFvaHvxB3pxSD+SC7+CISp7xY0= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.20/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.21 h1:yWfiTPwYxB0l5fGMhl/G+liULugVIHD9AU77iNLrURQ= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.21/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= @@ -2106,6 +2096,8 @@ github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/skeema/knownhosts v1.2.1 h1:SHWdIUa82uGZz+F+47k8SY4QhhI291cXCpopT1lK2AQ= +github.com/skeema/knownhosts v1.2.1/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo= github.com/smarty/assertions v1.15.0 h1:cR//PqUBUiQRakZWqBiFFQ9wb8emQGDb0HeGdqGByCY= github.com/smarty/assertions v1.15.0/go.mod h1:yABtdzeQs6l1brC900WlRNwj6ZR55d7B+E8C6HtKdec= github.com/smartystreets/assertions v0.0.0-20180820201707-7c9eb446e3cf/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= @@ -2114,8 +2106,8 @@ github.com/smartystreets/goconvey v0.0.0-20180222194500-ef6db91d284a/go.mod h1:X github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v1.8.1 h1:qGjIddxOk4grTu9JPOU31tVfq3cNdBlNa5sSznIX1xY= github.com/smartystreets/goconvey v1.8.1/go.mod h1:+/u4qLyY6x1jReYOp7GOM2FSt8aP9CzCZL03bI28W60= -github.com/snowflakedb/gosnowflake v1.6.22 h1:2crLpqmFVyV03NPAxxAtzQBMFn6wUPqOJ1uRl4ruOJ4= -github.com/snowflakedb/gosnowflake v1.6.22/go.mod h1:P2fE/xiD2kQXpr48OdgnazkzPsKD6aVtnHD3WP8yD9c= +github.com/snowflakedb/gosnowflake v1.7.2-0.20240103203018-f1d625f17408 h1:uhlKs7Mu7Pc1eLUFGOzBIUA7m7YKyMYu7egQ1LMYAik= +github.com/snowflakedb/gosnowflake v1.7.2-0.20240103203018-f1d625f17408/go.mod h1:xVuVfmC5OturIIXxT0TPTyLPGLSNZaDbgJh2AIIWzLE= github.com/softlayer/softlayer-go v0.0.0-20180806151055-260589d94c7d h1:bVQRCxQvfjNUeRqaY/uT0tFuvuFY0ulgnczuR684Xic= github.com/softlayer/softlayer-go v0.0.0-20180806151055-260589d94c7d/go.mod h1:Cw4GTlQccdRGSEf6KiMju767x0NEHE0YIVPJSaXjlsw= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= @@ -2222,8 +2214,8 @@ github.com/uber/jaeger-lib v2.4.1+incompatible h1:td4jdvLcExb4cBISKIpHuGoVXh+dVK github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/vertica/vertica-sql-go v1.3.0 h1:oZL8PgwrpALegtTFhImsaJvg5oQd2G7v7Uai97X9Xp8= -github.com/vertica/vertica-sql-go v1.3.0/go.mod h1:jnn2GFuv+O2Jcjktb7zyc4Utlbu9YVqpHH/lx63+1M4= +github.com/vertica/vertica-sql-go v1.3.3 h1:fL+FKEAEy5ONmsvya2WH5T8bhkvY27y/Ik3ReR2T+Qw= +github.com/vertica/vertica-sql-go v1.3.3/go.mod h1:jnn2GFuv+O2Jcjktb7zyc4Utlbu9YVqpHH/lx63+1M4= github.com/vincent-petithory/dataurl v1.0.0 h1:cXw+kPto8NLuJtlMsI152irrVw9fRDX8AbShPRpg2CI= github.com/vincent-petithory/dataurl v1.0.0/go.mod h1:FHafX5vmDzyP+1CQATJn7WFKc9CvnvxyvZy6I1MrG/U= github.com/vishvananda/netlink v0.0.0-20171020171820-b2de5d10e38e/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= @@ -2255,9 +2247,8 @@ github.com/wk8/go-ordered-map v0.2.0 h1:KlvGyHstD1kkGZkPtHCyCfRYS0cz84uk6rrW/Dnh github.com/wk8/go-ordered-map v0.2.0/go.mod h1:9ZIbRunKbuvfPKyBP1SIKLcXNlv74YCOZ3t3VTS6gRk= github.com/wvanbergen/kafka v0.0.0-20171203153745-e2edea948ddf/go.mod h1:nxx7XRXbR9ykhnC8lXqQyJS0rfvJGxKyKw/sT1YOttg= github.com/wvanbergen/kazoo-go v0.0.0-20180202103751-f72d8611297a/go.mod h1:vQQATAGxVK20DC1rRubTJbZDDhhpA4QfU02pMdPxGO4= -github.com/xanzy/ssh-agent v0.3.0/go.mod h1:3s9xbODqPuuhK9JV1R321M/FlMZSBvE5aY6eAcqrDh0= -github.com/xanzy/ssh-agent v0.3.1 h1:AmzO1SSWxw73zxFZPRwaMN1MohDw8UyHnmuxyceTEGo= -github.com/xanzy/ssh-agent v0.3.1/go.mod h1:QIE4lCeL7nkC25x+yA3LBIYfwCc1TFziCtG7cBAac6w= +github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= +github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= @@ -2282,8 +2273,8 @@ github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQ github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8Ydu2Bstc= github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xo/dburl v0.13.0 h1:kq+oD1j/m8DnJ/p6G/LQXRosVchs8q5/AszEUKkvYfo= -github.com/xo/dburl v0.13.0/go.mod h1:K6rSPgbVqP3ZFT0RHkdg/M3M5KhLeV2MaS/ZqaLd1kA= +github.com/xo/dburl v0.20.0 h1:v601OhM9J4Zh56R270ncM9HRgoxp39tf9+nt5ft9UD0= +github.com/xo/dburl v0.20.0/go.mod h1:B7/G9FGungw6ighV8xJNwWYQPMfn3gsi2sn5SE8Bzco= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d h1:splanxYIlg+5LfHAM6xpdFEAYOk8iySO56hMFq6uLyA= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -2466,8 +2457,6 @@ go4.org/netipx v0.0.0-20230125063823-8449b0a6169f h1:ketMxHg+vWm3yccyYiq+uK8D3fR go4.org/netipx v0.0.0-20230125063823-8449b0a6169f/go.mod h1:tgPU4N2u9RByaTN3NC2p9xOzyFpte4jYwsIIRF7XlSc= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= @@ -2484,35 +2473,47 @@ golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201112155050-0c6587e931a9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220511200225-c6db032c6c88/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= +golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= +golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I= -golang.org/x/crypto v0.15.0 h1:frVn1TEaCEaZcn3Tmd7Y2b5KKPaZ+I32Q2OA3kYp5TA= -golang.org/x/crypto v0.15.0/go.mod h1:4ChreQoLWfG3xLDer1WdlH5NdlQ3+mwnQq1YTKY+72g= -golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 h1:MGwJjxBy0HJshjDNfLsYO8xppfqWlA5ZT9OhtUUhTNw= -golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= +golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= +golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa h1:FRnLl4eNAQl8hwxVVC17teOw8kdjVDVAiFMtgUdTSRQ= +golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa/go.mod h1:zk2irFbV9DP96SEBUUAy67IdHUaZuSnrz1n472HUCLE= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= @@ -2526,8 +2527,12 @@ golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPI golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= @@ -2535,9 +2540,7 @@ golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -2585,7 +2588,6 @@ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= @@ -2596,7 +2598,6 @@ golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= -golang.org/x/net v0.0.0-20210326060303-6b1517762897/go.mod h1:uSPa2vr4CLtc/ILN5odXGNXS6mhrKVzTaCXzk9m6W3k= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= @@ -2607,16 +2608,16 @@ golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.11.0/go.mod h1:2L/ixqYpgIVXmeoSA/4Lu7BzTG4KIyPIryS4IsOd1oQ= -golang.org/x/net v0.18.0 h1:mIYleuAkSbHh0tCv7RvjL3F6ZVbLjq4+R7zbOn3Kokg= -golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ= +golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= +golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= golang.org/x/oauth2 v0.0.0-20170807180024-9a379c6b3e95/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -2665,7 +2666,6 @@ golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2687,10 +2687,10 @@ golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191003212358-c178f38b412c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2738,14 +2738,12 @@ golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210502180810-71e4cd670f79/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -2763,7 +2761,6 @@ golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211031064116-611d5d643895/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220224120231-95c6836cb0e7/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220708085239-5a0f0661e09d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -2774,6 +2771,7 @@ golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -2781,8 +2779,8 @@ golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.14.1-0.20231108175955-e4099bfacb8c h1:3kC/TjQ+xzIblQv39bCOyRk8fbEeJcDHwbyxPUU2BpA= -golang.org/x/sys v0.14.1-0.20231108175955-e4099bfacb8c/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -2790,10 +2788,11 @@ golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuX golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.9.0/go.mod h1:M6DEAAIenWoTxdKrOltXcmDY3rSplQUkrvaDU5FcQyo= -golang.org/x/term v0.14.0 h1:LGK9IlZ8T9jvdy6cTdfKUCltatMFOehAQo9SRC46UQ8= -golang.org/x/term v0.14.0/go.mod h1:TySc+nGkYR6qt8km8wUhuFRTVSMIX3XPR58y2lC8vww= +golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4= +golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -2808,6 +2807,7 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= @@ -2830,6 +2830,7 @@ golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -2845,9 +2846,11 @@ golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -2863,6 +2866,7 @@ golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= @@ -2894,7 +2898,6 @@ golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.15.0 h1:zdAyfUGbYmuVokhzVmghFl2ZJh5QhcfebBgmVPFYA+8= golang.org/x/tools v0.15.0/go.mod h1:hpksKq4dtpQWS1uQ61JkdqWM3LscIS6Slf+VVkm+wQk= @@ -2946,8 +2949,8 @@ google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00 google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= -google.golang.org/api v0.146.0 h1:9aBYT4vQXt9dhCuLNfwfd3zpwu8atg0yPkjBymwSrOM= -google.golang.org/api v0.146.0/go.mod h1:OARJqIfoYjXJj4C1AiBSXYZt03qsoz8FQYU6fBEfrHM= +google.golang.org/api v0.149.0 h1:b2CqT6kG+zqJIVKRQ3ELJVLN1PwHZ6DJ3dW8yl82rgY= +google.golang.org/api v0.149.0/go.mod h1:Mwn1B7JTXrzXtnvmzQE2BD6bYZQ8DShKZDZbeN9I7qI= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -3021,12 +3024,12 @@ google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEc google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20231012201019-e917dd12ba7a h1:fwgW9j3vHirt4ObdHoYNwuO24BEZjSzbh+zPaNWoiY8= -google.golang.org/genproto v0.0.0-20231012201019-e917dd12ba7a/go.mod h1:EMfReVxb80Dq1hhioy0sOsY9jCE46YDgHlJ7fWVUWRE= -google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97 h1:W18sezcAYs+3tDZX4F80yctqa12jcP1PUS2gQu1zTPU= -google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97/go.mod h1:iargEX0SFPm3xcfMI0d1domjg0ZF4Aa0p2awqyxhvF0= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231016165738-49dd2c1f3d0b h1:ZlWIi1wSK56/8hn4QcBp/j9M7Gt3U/3hZw3mC7vDICo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:swOH3j0KzcDDgGUWr+SNpyTen5YrXjS3eyPzFYKc6lc= +google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17 h1:wpZ8pe2x1Q3f2KyT5f8oP/fa9rHAKgFPr/HZdNuS+PQ= +google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:J7XzRzVy1+IPwWHZUzoD0IccYZIrXILAQpc+Qy9CMhY= +google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 h1:JpwMPBpFN3uKhdaekDpiNlImDdkUAyiJ6ez/uxGaUSo= +google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:0xJLfVdJqpAPl8tDg1ujOCGzx6LFLttXT5NhllGOXY4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f h1:ultW7fxlIvee4HYrtnaRPon9HpEgFk5zYpmfMgtKB5I= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f/go.mod h1:L9KNLi232K1/xB6f7AlSX692koaRnKaWSR0stBki0Yc= google.golang.org/grpc v0.0.0-20180920234847-8997b5fa0873/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= @@ -3081,8 +3084,8 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= -google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= +google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= @@ -3118,8 +3121,6 @@ gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLv gopkg.in/ldap.v3 v3.1.0/go.mod h1:dQjCc0R0kfyFjIlWNMH1DORwUASZyDxo2Ry1B51dXaQ= gopkg.in/mgo.v2 v2.0.0-20160818020120-3f83fa500528/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= -gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU= -gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c= gopkg.in/olivere/elastic.v5 v5.0.70/go.mod h1:FylZT6jQWtfHsicejzOm3jIMVPOAksa80i3o+6qtQRk= gopkg.in/ory-am/dockertest.v3 v3.3.4/go.mod h1:s9mmoLkaGeAh97qygnNj4xWkiN7e1SKekYC6CovU+ek= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= @@ -3157,7 +3158,6 @@ howett.net/plist v1.0.0 h1:7CrbWYbPPO/PyNy38b2EB/+gYbjCe2DXBxgtOOZbSQM= howett.net/plist v1.0.0/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g= k8s.io/api v0.0.0-20180806132203-61b11ee65332/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA= k8s.io/api v0.0.0-20190325185214-7544f9db76f6/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA= -k8s.io/api v0.18.2/go.mod h1:SJCWI7OLzhZSvbY7U8zwNl9UA4o1fizoug34OV/2r78= k8s.io/api v0.21.1/go.mod h1:FstGROTmsSHBarKc8bylzXih8BLNYTiS3TZcsoEDg2s= k8s.io/api v0.28.3 h1:Gj1HtbSdB4P08C8rs9AR94MfSGpRhJgsS+GF9V26xMM= k8s.io/api v0.28.3/go.mod h1:MRCV/jr1dW87/qJnZ57U5Pak65LGmQVkKTzf3AtKFHc= @@ -3166,11 +3166,9 @@ k8s.io/apiextensions-apiserver v0.28.0/go.mod h1:uRdYiwIuu0SyqJKriKmqEN2jThIJPhV k8s.io/apimachinery v0.0.0-20180821005732-488889b0007f/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0= k8s.io/apimachinery v0.0.0-20190223001710-c182ff3b9841/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0= k8s.io/apimachinery v0.17.1/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= -k8s.io/apimachinery v0.18.2/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA= k8s.io/apimachinery v0.21.1/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY= k8s.io/apimachinery v0.28.3 h1:B1wYx8txOaCQG0HmYF6nbpU8dg6HvA06x5tEffvOe7A= k8s.io/apimachinery v0.28.3/go.mod h1:uQTKmIqs+rAYaq+DFaoD2X7pcjLOqbQX2AOiO0nIpb8= -k8s.io/client-go v0.18.2/go.mod h1:Xcm5wVGXX9HAA2JJ2sSBUn3tCJ+4SVlCbl2MNNv+CIU= k8s.io/client-go v0.21.1/go.mod h1:/kEw4RgW+3xnBGzvp9IWxKSNA+lXn3A7AuH3gdOAzLs= k8s.io/client-go v0.28.3 h1:2OqNb72ZuTZPKCl+4gTKvqao0AMOl9f3o2ijbAj3LI4= k8s.io/client-go v0.28.3/go.mod h1:LTykbBp9gsA7SwqirlCXBWtK0guzfhpoW4qSm7i9dxo= @@ -3181,11 +3179,9 @@ k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8 k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= -k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE= k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= -k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= @@ -3204,8 +3200,6 @@ sigs.k8s.io/controller-runtime v0.16.2/go.mod h1:vpMu3LpI5sYWtujJOa2uPK61nB5rbwl sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= -sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= -sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.1.0/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.3.0 h1:UZbZAZfX0wV2zr7YZorDz6GXROfDFj6LvqCRm4VUVKk= diff --git a/operations/agent-flow-mixin/dashboards/prometheus.remote_write.libsonnet b/operations/agent-flow-mixin/dashboards/prometheus.remote_write.libsonnet index 32fe51506047..d9e0d8d170b4 100644 --- a/operations/agent-flow-mixin/dashboards/prometheus.remote_write.libsonnet +++ b/operations/agent-flow-mixin/dashboards/prometheus.remote_write.libsonnet @@ -290,7 +290,7 @@ local stackedPanelMixin = { received a sample for. Active series are garbage collected whenever a truncation of the WAL occurs. |||) + - panel.withPosition({ x: 0, y: 20, w: 12, h: 10 }) + + panel.withPosition({ x: 0, y: 20, w: 8, h: 10 }) + panel.withQueries([ panel.newQuery( expr=||| @@ -301,25 +301,48 @@ local stackedPanelMixin = { ]) ), + // Active series (by instance/component) + ( + panel.new(title='Active series (by instance/component)', type='timeseries') + + panel.withUnit('short') + + panel.withDescription(||| + Total number of active series which are currently being tracked by + prometheus.remote_write components, with separate lines for each agent instance. + + An "active series" is a series that prometheus.remote_write recently + received a sample for. Active series are garbage collected whenever a + truncation of the WAL occurs. + |||) + + panel.withPosition({ x: 8, y: 20, w: 8, h: 10 }) + + panel.withQueries([ + panel.newQuery( + expr=||| + agent_wal_storage_active_series{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id!="", component_id=~"$component", url=~"$url"} + |||, + legendFormat='{{instance}} / {{component_id}}', + ), + ]) + ), + // Active series (by component) ( panel.new(title='Active series (by component)', type='timeseries') + panel.withUnit('short') + panel.withDescription(||| Total number of active series which are currently being tracked by - prometheus.remote_write components. + prometheus.remote_write components, aggregated across all instances. An "active series" is a series that prometheus.remote_write recently received a sample for. Active series are garbage collected whenever a truncation of the WAL occurs. |||) + - panel.withPosition({ x: 12, y: 20, w: 12, h: 10 }) + + panel.withPosition({ x: 16, y: 20, w: 8, h: 10 }) + panel.withQueries([ panel.newQuery( expr=||| - agent_wal_storage_active_series{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id=~"$component", url=~"$url"} + sum by (component_id) (agent_wal_storage_active_series{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id!="", component_id=~"$component", url=~"$url"}) |||, - legendFormat='{{instance}} / {{component_id}}', + legendFormat='{{component_id}}', ), ]) ), diff --git a/production/grafana-agent-mixin/alerts.libsonnet b/operations/agent-static-mixin/alerts.libsonnet similarity index 65% rename from production/grafana-agent-mixin/alerts.libsonnet rename to operations/agent-static-mixin/alerts.libsonnet index 51fe81e0e539..653be89ce471 100644 --- a/production/grafana-agent-mixin/alerts.libsonnet +++ b/operations/agent-static-mixin/alerts.libsonnet @@ -65,132 +65,6 @@ local _config = config._config; }, ], }, - { - name: 'GrafanaAgentSmokeChecks', - rules: [ - { - alert: 'GrafanaAgentDown', - expr: ||| - up{ - namespace="agent-smoke-test", - pod=~"grafana-agent-smoke-test-(0|cluster-0|cluster-1|cluster-2)", - } == 0 - |||, - 'for': '5m', - annotations: { - summary: '{{ $labels.job }} is down', - }, - }, - { - alert: 'GrafanaAgentFlapping', - expr: ||| - avg_over_time(up{ - namespace="agent-smoke-test", - pod=~"grafana-agent-smoke-test-(0|cluster-0|cluster-1|cluster-2)", - }[5m]) < 1 - |||, - 'for': '15m', - annotations: { - summary: '{{ $labels.job }} is flapping', - }, - }, - - // Checks that the CPU usage doesn't go too high. This was generated from internal usage where - // every 1,000 active series used roughly 0.0013441% of CPU. This alert only fires if there is a - // minimum load threshold of at least 1000 active series. - { - alert: 'GrafanaAgentCPUHigh', - expr: ||| - (sum by (pod) (rate(container_cpu_usage_seconds_total{cluster=~".+", namespace=~"agent-smoke-test", container=~".+", pod="grafana-agent-smoke-test-cluster-2"}[5m])) - / - (sum by (pod) (agent_wal_storage_active_series{cluster=~".+", namespace=~"agent-smoke-test", container=~".+", pod="grafana-agent-smoke-test-cluster-2"}) / 1000) - > 0.0013441) - and - sum by (pod) (agent_wal_storage_active_series{cluster=~".+", namespace=~"agent-smoke-test", container=~".+", pod="grafana-agent-smoke-test-cluster-2"}) > 1000 - |||, - 'for': '1h', - annotations: { - summary: '{{ $labels.pod }} is using more than 0.0013441 CPU per 1000 series over the last 5 minutes', - }, - }, - - // We assume roughly ~8KB per series. Check that each deployment - // doesn't go too far above this. - // - // We aggregate the memory of the scraping service together since an individual - // node with a really small number of active series will throw this metric off. - { - alert: 'GrafanaAgentMemHigh', - expr: ||| - sum without (pod, instance) (go_memstats_heap_inuse_bytes{job=~"agent-smoke-test/grafana-agent-smoke-test.*"}) / - sum without (pod, instance, instance_group_name) (agent_wal_storage_active_series{job=~"agent-smoke-test/grafana-agent-smoke-test.*"}) / 1e3 > 10 - |||, - 'for': '1h', - annotations: { - summary: '{{ $labels.job }} has used more than 10KB per series for more than 5 minutes', - }, - }, - { - alert: 'GrafanaAgentContainerRestarts', - expr: ||| - sum by (pod) (rate(kube_pod_container_status_restarts_total{namespace="agent-smoke-test"}[10m])) > 0 - |||, - annotations: { - summary: '{{ $labels.pod }} has a high rate of container restarts', - }, - }, - ], - }, - { - name: 'GrafanaAgentCrowChecks', - rules: [ - { - alert: 'CrowDown', - expr: ||| - up{job=~"agent-smoke-test/crow-.*"} == 0 - |||, - 'for': '5m', - annotations: { - summary: 'Crow {{ $labels.job }} is down.', - }, - }, - { - alert: 'CrowFlapping', - expr: ||| - avg_over_time(up{job=~"agent-smoke-test/crow-.*"}[5m]) < 1 - |||, - 'for': '15m', - annotations: { - summary: 'Crow {{ $labels.job }} is flapping.', - }, - }, - { - alert: 'CrowNotScraped', - expr: ||| - rate(crow_test_samples_total[5m]) == 0 - |||, - 'for': '15m', - annotations: { - summary: 'Crow {{ $labels.job }} is not being scraped.', - }, - }, - { - alert: 'CrowFailures', - expr: ||| - ( - rate(crow_test_sample_results_total{result="success"}[5m]) - / - ignoring(result) sum without (result) (rate(crow_test_sample_results_total[5m])) - ) - < 1 - |||, - 'for': '15m', - annotations: { - summary: 'Crow {{ $labels.job }} has had failures for at least 5m', - }, - }, - ], - }, { name: 'VultureChecks', rules: [ @@ -358,7 +232,7 @@ local _config = config._config; }, annotations: { message: ||| - Instance {{ $labels.instance }} failed to successfully reload the config. + Instance {{ $labels.instance }} failed to successfully reload the config. |||, }, }, @@ -373,7 +247,7 @@ local _config = config._config; }, annotations: { message: ||| - Instance {{ $labels.instance }} failed to successfully reload the config. + Instance {{ $labels.instance }} failed to successfully reload the config. |||, }, }, @@ -388,7 +262,7 @@ local _config = config._config; }, annotations: { message: ||| - Instance {{ $labels.instance }} fell back to empty configuration. + Instance {{ $labels.instance }} fell back to empty configuration. |||, }, }, @@ -403,12 +277,12 @@ local _config = config._config; }, annotations: { message: ||| - Instance {{ $labels.instance }} fell back to empty configuration. + Instance {{ $labels.instance }} fell back to empty configuration. |||, }, }, ], - }, + }, ], }, } diff --git a/production/grafana-agent-mixin/config.libsonnet b/operations/agent-static-mixin/config.libsonnet similarity index 100% rename from production/grafana-agent-mixin/config.libsonnet rename to operations/agent-static-mixin/config.libsonnet diff --git a/production/grafana-agent-mixin/dashboards.libsonnet b/operations/agent-static-mixin/dashboards.libsonnet similarity index 100% rename from production/grafana-agent-mixin/dashboards.libsonnet rename to operations/agent-static-mixin/dashboards.libsonnet diff --git a/production/grafana-agent-mixin/debugging.libsonnet b/operations/agent-static-mixin/debugging.libsonnet similarity index 100% rename from production/grafana-agent-mixin/debugging.libsonnet rename to operations/agent-static-mixin/debugging.libsonnet diff --git a/production/grafana-agent-mixin/jsonnetfile.json b/operations/agent-static-mixin/jsonnetfile.json similarity index 100% rename from production/grafana-agent-mixin/jsonnetfile.json rename to operations/agent-static-mixin/jsonnetfile.json diff --git a/production/grafana-agent-mixin/mixin.libsonnet b/operations/agent-static-mixin/mixin.libsonnet similarity index 100% rename from production/grafana-agent-mixin/mixin.libsonnet rename to operations/agent-static-mixin/mixin.libsonnet diff --git a/production/grafana-agent-mixin/utils.libsonnet b/operations/agent-static-mixin/utils.libsonnet similarity index 100% rename from production/grafana-agent-mixin/utils.libsonnet rename to operations/agent-static-mixin/utils.libsonnet diff --git a/production/operator/crds/monitoring.coreos.com_podmonitors.yaml b/operations/agent-static-operator/crds/monitoring.coreos.com_podmonitors.yaml similarity index 100% rename from production/operator/crds/monitoring.coreos.com_podmonitors.yaml rename to operations/agent-static-operator/crds/monitoring.coreos.com_podmonitors.yaml diff --git a/production/operator/crds/monitoring.coreos.com_probes.yaml b/operations/agent-static-operator/crds/monitoring.coreos.com_probes.yaml similarity index 100% rename from production/operator/crds/monitoring.coreos.com_probes.yaml rename to operations/agent-static-operator/crds/monitoring.coreos.com_probes.yaml diff --git a/production/operator/crds/monitoring.coreos.com_servicemonitors.yaml b/operations/agent-static-operator/crds/monitoring.coreos.com_servicemonitors.yaml similarity index 100% rename from production/operator/crds/monitoring.coreos.com_servicemonitors.yaml rename to operations/agent-static-operator/crds/monitoring.coreos.com_servicemonitors.yaml diff --git a/production/operator/crds/monitoring.grafana.com_grafanaagents.yaml b/operations/agent-static-operator/crds/monitoring.grafana.com_grafanaagents.yaml similarity index 100% rename from production/operator/crds/monitoring.grafana.com_grafanaagents.yaml rename to operations/agent-static-operator/crds/monitoring.grafana.com_grafanaagents.yaml diff --git a/production/operator/crds/monitoring.grafana.com_integrations.yaml b/operations/agent-static-operator/crds/monitoring.grafana.com_integrations.yaml similarity index 100% rename from production/operator/crds/monitoring.grafana.com_integrations.yaml rename to operations/agent-static-operator/crds/monitoring.grafana.com_integrations.yaml diff --git a/production/operator/crds/monitoring.grafana.com_logsinstances.yaml b/operations/agent-static-operator/crds/monitoring.grafana.com_logsinstances.yaml similarity index 100% rename from production/operator/crds/monitoring.grafana.com_logsinstances.yaml rename to operations/agent-static-operator/crds/monitoring.grafana.com_logsinstances.yaml diff --git a/production/operator/crds/monitoring.grafana.com_metricsinstances.yaml b/operations/agent-static-operator/crds/monitoring.grafana.com_metricsinstances.yaml similarity index 100% rename from production/operator/crds/monitoring.grafana.com_metricsinstances.yaml rename to operations/agent-static-operator/crds/monitoring.grafana.com_metricsinstances.yaml diff --git a/production/operator/crds/monitoring.grafana.com_podlogs.yaml b/operations/agent-static-operator/crds/monitoring.grafana.com_podlogs.yaml similarity index 100% rename from production/operator/crds/monitoring.grafana.com_podlogs.yaml rename to operations/agent-static-operator/crds/monitoring.grafana.com_podlogs.yaml diff --git a/production/operator/templates/agent-operator.yaml b/operations/agent-static-operator/templates/agent-operator.yaml similarity index 100% rename from production/operator/templates/agent-operator.yaml rename to operations/agent-static-operator/templates/agent-operator.yaml diff --git a/operations/helm/charts/grafana-agent/CHANGELOG.md b/operations/helm/charts/grafana-agent/CHANGELOG.md index 70b612d74572..f281cc937317 100644 --- a/operations/helm/charts/grafana-agent/CHANGELOG.md +++ b/operations/helm/charts/grafana-agent/CHANGELOG.md @@ -10,10 +10,34 @@ internal API changes are not present. Unreleased ---------- +0.31.0 (2024-01-10) +------------------- + +### Enhancements + +- Update Grafana Agent version to v0.39.0. (@marctc) + +### Bugfixes + +- Configure namespace for service account when RBAC resources is created. (@hainenber) + +### Other changes + +- Change config reloader image to `ghcr.io/jimmidyson/configmap-reload:v0.12.0` to reflect change in repository and version. (@berendiwema) + +0.30.0 (2024-01-05) +------------------- + ### Enhancements - Update `rbac` to include necessary rules for the `otelcol.processor.k8sattributes` component. (@rlankfo) +- Add `serviceAccount.additionalLabels` to values.yaml to enable setting additional labels on the created service account. (@zopanix) + +### Bugfixes + +- Statefulset should use value `.controller.enableStatefulSetAutoDeletePVC` instead of just `.enableStatefulSetAutoDeletePVC`. (@captncraig) + 0.29.0 (2023-11-30) ------------------- diff --git a/operations/helm/charts/grafana-agent/Chart.yaml b/operations/helm/charts/grafana-agent/Chart.yaml index 78630fed515d..992503befc2a 100644 --- a/operations/helm/charts/grafana-agent/Chart.yaml +++ b/operations/helm/charts/grafana-agent/Chart.yaml @@ -2,8 +2,8 @@ apiVersion: v2 name: grafana-agent description: 'Grafana Agent' type: application -version: 0.29.0 -appVersion: 'v0.38.1' +version: 0.31.0 +appVersion: 'v0.39.0' dependencies: - name: crds diff --git a/operations/helm/charts/grafana-agent/README.md b/operations/helm/charts/grafana-agent/README.md index 1839e56ed202..281fc208cf23 100644 --- a/operations/helm/charts/grafana-agent/README.md +++ b/operations/helm/charts/grafana-agent/README.md @@ -1,6 +1,6 @@ # Grafana Agent Helm chart -![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 0.29.0](https://img.shields.io/badge/Version-0.29.0-informational?style=flat-square) ![AppVersion: v0.38.1](https://img.shields.io/badge/AppVersion-v0.38.1-informational?style=flat-square) +![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 0.31.0](https://img.shields.io/badge/Version-0.31.0-informational?style=flat-square) ![AppVersion: v0.39.0](https://img.shields.io/badge/AppVersion-v0.39.0-informational?style=flat-square) Helm chart for deploying [Grafana Agent][] to Kubernetes. @@ -64,9 +64,9 @@ use the older mode (called "static mode"), set the `agent.mode` value to | configReloader.customArgs | list | `[]` | Override the args passed to the container. | | configReloader.enabled | bool | `true` | Enables automatically reloading when the agent config changes. | | configReloader.image.digest | string | `""` | SHA256 digest of image to use for config reloading (either in format "sha256:XYZ" or "XYZ"). When set, will override `configReloader.image.tag` | -| configReloader.image.registry | string | `"docker.io"` | Config reloader image registry (defaults to docker.io) | +| configReloader.image.registry | string | `"ghcr.io"` | Config reloader image registry (defaults to docker.io) | | configReloader.image.repository | string | `"jimmidyson/configmap-reload"` | Repository to get config reloader image from. | -| configReloader.image.tag | string | `"v0.8.0"` | Tag of image to use for config reloading. | +| configReloader.image.tag | string | `"v0.12.0"` | Tag of image to use for config reloading. | | configReloader.resources | object | `{"requests":{"cpu":"1m","memory":"5Mi"}}` | Resource requests and limits to apply to the config reloader container. | | configReloader.securityContext | object | `{}` | Security context to apply to the Grafana configReloader container. | | controller.affinity | object | `{}` | Affinity configuration for pods. | @@ -78,6 +78,7 @@ use the older mode (called "static mode"), set the `agent.mode` value to | controller.dnsPolicy | string | `"ClusterFirst"` | Configures the DNS policy for the pod. https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy | | controller.enableStatefulSetAutoDeletePVC | bool | `false` | Whether to enable automatic deletion of stale PVCs due to a scale down operation, when controller.type is 'statefulset'. | | controller.extraAnnotations | object | `{}` | Annotations to add to controller. | +| controller.extraContainers | list | `[]` | Additional containers to run alongside the agent container and initContainers. | | controller.hostNetwork | bool | `false` | Configures Pods to use the host network. When set to true, the ports that will be used must be specified. | | controller.hostPID | bool | `false` | Configures Pods to use the host PID namespace. | | controller.initContainers | list | `[]` | | @@ -118,6 +119,7 @@ use the older mode (called "static mode"), set the `agent.mode` value to | service.clusterIP | string | `""` | Cluster IP, can be set to None, empty "" or an IP address | | service.enabled | bool | `true` | Creates a Service for the controller's pods. | | service.type | string | `"ClusterIP"` | Service type | +| serviceAccount.additionalLabels | object | `{}` | Additional labels to add to the created service account. | | serviceAccount.annotations | object | `{}` | Annotations to add to the created service account. | | serviceAccount.create | bool | `true` | Whether to create a service account for the Grafana Agent deployment. | | serviceAccount.name | string | `nil` | The name of the existing service account to use when serviceAccount.create is false. | diff --git a/operations/helm/charts/grafana-agent/ci/additional-serviceaccount-label-values.yaml b/operations/helm/charts/grafana-agent/ci/additional-serviceaccount-label-values.yaml new file mode 100644 index 000000000000..91b7cbb7c258 --- /dev/null +++ b/operations/helm/charts/grafana-agent/ci/additional-serviceaccount-label-values.yaml @@ -0,0 +1,3 @@ +serviceAccount: + additionalLabels: + test: "true" diff --git a/operations/helm/charts/grafana-agent/ci/create-statefulset-autoscaling-values.yaml b/operations/helm/charts/grafana-agent/ci/create-statefulset-autoscaling-values.yaml index 89b51fc31c78..eedf5411b31c 100644 --- a/operations/helm/charts/grafana-agent/ci/create-statefulset-autoscaling-values.yaml +++ b/operations/helm/charts/grafana-agent/ci/create-statefulset-autoscaling-values.yaml @@ -3,6 +3,7 @@ controller: type: statefulset autoscaling: enabled: true + enableStatefulSetAutoDeletePVC: true agent: resources: requests: diff --git a/operations/helm/charts/grafana-agent/ci/sidecars-values.yaml b/operations/helm/charts/grafana-agent/ci/sidecars-values.yaml new file mode 100644 index 000000000000..a443c9a94974 --- /dev/null +++ b/operations/helm/charts/grafana-agent/ci/sidecars-values.yaml @@ -0,0 +1,29 @@ +controller: + extraContainers: + - name: geo-ip + image: ghcr.io/maxmind/geoipupdate:v6.0 + volumeMounts: + - name: geoip + mountPath: /etc/geoip + volumes: + - name: geoip + emptyDir: {} + env: + - name: GEOIPUPDATE_ACCOUNT_ID + value: "geoipupdate_account_id" + - name: GEOIPUPDATE_LICENSE_KEY + value: "geoipupdate_license_key" + - name: GEOIPUPDATE_EDITION_IDS + value: "GeoLite2-ASN GeoLite2-City GeoLite2-Country" + - name: GEOIPUPDATE_DB_DIR + value: "/etc/geoip" + volumes: + extra: + - name: geoip + mountPath: /etc/geoip + +agent: + mounts: + extra: + - name: geoip + mountPath: /etc/geoip diff --git a/operations/helm/charts/grafana-agent/templates/controllers/_pod.yaml b/operations/helm/charts/grafana-agent/templates/controllers/_pod.yaml index 1fe0363c9232..907b5eb6bfc1 100644 --- a/operations/helm/charts/grafana-agent/templates/controllers/_pod.yaml +++ b/operations/helm/charts/grafana-agent/templates/controllers/_pod.yaml @@ -32,6 +32,9 @@ spec: containers: {{- include "grafana-agent.container" . | nindent 4 }} {{- include "grafana-agent.watch-container" . | nindent 4 }} + {{- with .Values.controller.extraContainers }} + {{- toYaml . | nindent 4 }} + {{- end}} {{- if .Values.controller.priorityClassName }} priorityClassName: {{ .Values.controller.priorityClassName }} {{- end }} diff --git a/operations/helm/charts/grafana-agent/templates/controllers/statefulset.yaml b/operations/helm/charts/grafana-agent/templates/controllers/statefulset.yaml index c4ef55024a22..a4965d14205d 100644 --- a/operations/helm/charts/grafana-agent/templates/controllers/statefulset.yaml +++ b/operations/helm/charts/grafana-agent/templates/controllers/statefulset.yaml @@ -1,4 +1,7 @@ {{- if eq .Values.controller.type "statefulset" }} +{{- if .Values.enableStatefulSetAutoDeletePVC }} +{{- fail "Value 'enableStatefulSetAutoDeletePVC' should be nested inside 'controller' options." }} +{{- end }} apiVersion: apps/v1 kind: StatefulSet metadata: @@ -35,8 +38,8 @@ spec: - {{ toYaml . | nindent 6 }} {{- end }} {{- end }} - {{- if and (semverCompare ">= 1.23-0" .Capabilities.KubeVersion.Version) (.Values.enableStatefulSetAutoDeletePVC) }} - {{/* + {{- if and (semverCompare ">= 1.23-0" .Capabilities.KubeVersion.Version) (.Values.controller.enableStatefulSetAutoDeletePVC) }} + {{- /* Data on the read nodes is easy to replace, so we want to always delete PVCs to make operation easier, and will rely on re-fetching data when needed. */}} diff --git a/operations/helm/charts/grafana-agent/templates/serviceaccount.yaml b/operations/helm/charts/grafana-agent/templates/serviceaccount.yaml index 8f4c8477a7d5..f2d2c90c6ec6 100644 --- a/operations/helm/charts/grafana-agent/templates/serviceaccount.yaml +++ b/operations/helm/charts/grafana-agent/templates/serviceaccount.yaml @@ -3,8 +3,12 @@ apiVersion: v1 kind: ServiceAccount metadata: name: {{ include "grafana-agent.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} labels: {{- include "grafana-agent.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.additionalLabels }} + {{- toYaml . | nindent 4 }} + {{- end }} {{- with .Values.serviceAccount.annotations }} annotations: {{- toYaml . | nindent 4 }} diff --git a/operations/helm/charts/grafana-agent/values.yaml b/operations/helm/charts/grafana-agent/values.yaml index 7b80e68618e6..0143cefe1962 100644 --- a/operations/helm/charts/grafana-agent/values.yaml +++ b/operations/helm/charts/grafana-agent/values.yaml @@ -115,6 +115,8 @@ rbac: serviceAccount: # -- Whether to create a service account for the Grafana Agent deployment. create: true + # -- Additional labels to add to the created service account. + additionalLabels: {} # -- Annotations to add to the created service account. annotations: {} # -- The name of the existing service account to use when @@ -127,11 +129,11 @@ configReloader: enabled: true image: # -- Config reloader image registry (defaults to docker.io) - registry: "docker.io" + registry: "ghcr.io" # -- Repository to get config reloader image from. repository: jimmidyson/configmap-reload # -- Tag of image to use for config reloading. - tag: v0.8.0 + tag: v0.12.0 # -- SHA256 digest of image to use for config reloading (either in format "sha256:XYZ" or "XYZ"). When set, will override `configReloader.image.tag` digest: "" # -- Override the args passed to the container. @@ -216,6 +218,9 @@ controller: ## initContainers: [] + # -- Additional containers to run alongside the agent container and initContainers. + extraContainers: [] + service: # -- Creates a Service for the controller's pods. enabled: true diff --git a/operations/helm/scripts/rebuild-tests.sh b/operations/helm/scripts/rebuild-tests.sh index af82eef8b12d..66ce2dfaaff6 100755 --- a/operations/helm/scripts/rebuild-tests.sh +++ b/operations/helm/scripts/rebuild-tests.sh @@ -17,7 +17,7 @@ for chart_file in $(find * -name Chart.yaml -print | sort); do FILENAME=$(basename ${FILE_PATH}) TESTNAME=${FILENAME%-values.yaml} # Render chart - helm template --namespace default --debug ${CHART_NAME} ${CHART_DIR} -f ${FILE_PATH} --output-dir ${TEST_DIR}/${TESTNAME} --set '$chart_tests=true' + helm template --namespace default --kube-version 1.26 --debug ${CHART_NAME} ${CHART_DIR} -f ${FILE_PATH} --output-dir ${TEST_DIR}/${TESTNAME} --set '$chart_tests=true' done fi done diff --git a/operations/helm/tests/additional-serviceaccount-label/grafana-agent/templates/configmap.yaml b/operations/helm/tests/additional-serviceaccount-label/grafana-agent/templates/configmap.yaml new file mode 100644 index 000000000000..2fdc6f011777 --- /dev/null +++ b/operations/helm/tests/additional-serviceaccount-label/grafana-agent/templates/configmap.yaml @@ -0,0 +1,42 @@ +--- +# Source: grafana-agent/templates/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: grafana-agent + labels: + helm.sh/chart: grafana-agent + app.kubernetes.io/name: grafana-agent + app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +data: + config.river: |- + logging { + level = "info" + format = "logfmt" + } + + discovery.kubernetes "pods" { + role = "pod" + } + + discovery.kubernetes "nodes" { + role = "node" + } + + discovery.kubernetes "services" { + role = "service" + } + + discovery.kubernetes "endpoints" { + role = "endpoints" + } + + discovery.kubernetes "endpointslices" { + role = "endpointslice" + } + + discovery.kubernetes "ingresses" { + role = "ingress" + } diff --git a/operations/helm/tests/additional-serviceaccount-label/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/additional-serviceaccount-label/grafana-agent/templates/controllers/daemonset.yaml new file mode 100644 index 000000000000..a99f2fd4cbf6 --- /dev/null +++ b/operations/helm/tests/additional-serviceaccount-label/grafana-agent/templates/controllers/daemonset.yaml @@ -0,0 +1,73 @@ +--- +# Source: grafana-agent/templates/controllers/daemonset.yaml +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: grafana-agent + labels: + helm.sh/chart: grafana-agent + app.kubernetes.io/name: grafana-agent + app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +spec: + minReadySeconds: 10 + selector: + matchLabels: + app.kubernetes.io/name: grafana-agent + app.kubernetes.io/instance: grafana-agent + template: + metadata: + labels: + app.kubernetes.io/name: grafana-agent + app.kubernetes.io/instance: grafana-agent + spec: + serviceAccountName: grafana-agent + containers: + - name: grafana-agent + image: docker.io/grafana/agent:v0.39.0 + imagePullPolicy: IfNotPresent + args: + - run + - /etc/agent/config.river + - --storage.path=/tmp/agent + - --server.http.listen-addr=0.0.0.0:80 + - --server.http.ui-path-prefix=/ + env: + - name: AGENT_MODE + value: flow + - name: AGENT_DEPLOY_MODE + value: "helm" + - name: HOSTNAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + ports: + - containerPort: 80 + name: http-metrics + readinessProbe: + httpGet: + path: /-/ready + port: 80 + initialDelaySeconds: 10 + timeoutSeconds: 1 + volumeMounts: + - name: config + mountPath: /etc/agent + - name: config-reloader + image: ghcr.io/jimmidyson/configmap-reload:v0.12.0 + args: + - --volume-dir=/etc/agent + - --webhook-url=http://localhost:80/-/reload + volumeMounts: + - name: config + mountPath: /etc/agent + resources: + requests: + cpu: 1m + memory: 5Mi + dnsPolicy: ClusterFirst + volumes: + - name: config + configMap: + name: grafana-agent diff --git a/operations/helm/tests/additional-serviceaccount-label/grafana-agent/templates/rbac.yaml b/operations/helm/tests/additional-serviceaccount-label/grafana-agent/templates/rbac.yaml new file mode 100644 index 000000000000..3765583fb64f --- /dev/null +++ b/operations/helm/tests/additional-serviceaccount-label/grafana-agent/templates/rbac.yaml @@ -0,0 +1,117 @@ +--- +# Source: grafana-agent/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: grafana-agent + labels: + helm.sh/chart: grafana-agent + app.kubernetes.io/name: grafana-agent + app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +rules: + # Rules which allow discovery.kubernetes to function. + - apiGroups: + - "" + - "discovery.k8s.io" + - "networking.k8s.io" + resources: + - endpoints + - endpointslices + - ingresses + - nodes + - nodes/proxy + - nodes/metrics + - pods + - services + verbs: + - get + - list + - watch + # Rules which allow loki.source.kubernetes and loki.source.podlogs to work. + - apiGroups: + - "" + resources: + - pods + - pods/log + - namespaces + verbs: + - get + - list + - watch + - apiGroups: + - "monitoring.grafana.com" + resources: + - podlogs + verbs: + - get + - list + - watch + # Rules which allow mimir.rules.kubernetes to work. + - apiGroups: ["monitoring.coreos.com"] + resources: + - prometheusrules + verbs: + - get + - list + - watch + - nonResourceURLs: + - /metrics + verbs: + - get + # Rules for prometheus.kubernetes.* + - apiGroups: ["monitoring.coreos.com"] + resources: + - podmonitors + - servicemonitors + - probes + verbs: + - get + - list + - watch + # Rules which allow eventhandler to work. + - apiGroups: + - "" + resources: + - events + verbs: + - get + - list + - watch + # needed for remote.kubernetes.* + - apiGroups: [""] + resources: + - "configmaps" + - "secrets" + verbs: + - get + - list + - watch + # needed for otelcol.processor.k8sattributes + - apiGroups: ["apps"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] + - apiGroups: ["extensions"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] +--- +# Source: grafana-agent/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: grafana-agent + labels: + helm.sh/chart: grafana-agent + app.kubernetes.io/name: grafana-agent + app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: grafana-agent +subjects: + - kind: ServiceAccount + name: grafana-agent + namespace: default diff --git a/operations/helm/tests/additional-serviceaccount-label/grafana-agent/templates/service.yaml b/operations/helm/tests/additional-serviceaccount-label/grafana-agent/templates/service.yaml new file mode 100644 index 000000000000..04f6eeff3c4d --- /dev/null +++ b/operations/helm/tests/additional-serviceaccount-label/grafana-agent/templates/service.yaml @@ -0,0 +1,22 @@ +--- +# Source: grafana-agent/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: grafana-agent + labels: + helm.sh/chart: grafana-agent + app.kubernetes.io/name: grafana-agent + app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +spec: + type: ClusterIP + selector: + app.kubernetes.io/name: grafana-agent + app.kubernetes.io/instance: grafana-agent + ports: + - name: http-metrics + port: 80 + targetPort: 80 + protocol: "TCP" diff --git a/operations/helm/tests/additional-serviceaccount-label/grafana-agent/templates/serviceaccount.yaml b/operations/helm/tests/additional-serviceaccount-label/grafana-agent/templates/serviceaccount.yaml new file mode 100644 index 000000000000..08eca9f756b2 --- /dev/null +++ b/operations/helm/tests/additional-serviceaccount-label/grafana-agent/templates/serviceaccount.yaml @@ -0,0 +1,14 @@ +--- +# Source: grafana-agent/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: grafana-agent + namespace: default + labels: + helm.sh/chart: grafana-agent + app.kubernetes.io/name: grafana-agent + app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm + test: "true" diff --git a/operations/helm/tests/clustering/grafana-agent/templates/controllers/statefulset.yaml b/operations/helm/tests/clustering/grafana-agent/templates/controllers/statefulset.yaml index 92ad19130040..340ec8ca04fd 100644 --- a/operations/helm/tests/clustering/grafana-agent/templates/controllers/statefulset.yaml +++ b/operations/helm/tests/clustering/grafana-agent/templates/controllers/statefulset.yaml @@ -28,7 +28,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.38.1 + image: docker.io/grafana/agent:v0.39.0 imagePullPolicy: IfNotPresent args: - run @@ -60,7 +60,7 @@ spec: - name: config mountPath: /etc/agent - name: config-reloader - image: docker.io/jimmidyson/configmap-reload:v0.8.0 + image: ghcr.io/jimmidyson/configmap-reload:v0.12.0 args: - --volume-dir=/etc/agent - --webhook-url=http://localhost:80/-/reload diff --git a/operations/helm/tests/clustering/grafana-agent/templates/serviceaccount.yaml b/operations/helm/tests/clustering/grafana-agent/templates/serviceaccount.yaml index 1dfd6fff9bdf..65d7e0df383f 100644 --- a/operations/helm/tests/clustering/grafana-agent/templates/serviceaccount.yaml +++ b/operations/helm/tests/clustering/grafana-agent/templates/serviceaccount.yaml @@ -4,6 +4,7 @@ apiVersion: v1 kind: ServiceAccount metadata: name: grafana-agent + namespace: default labels: helm.sh/chart: grafana-agent app.kubernetes.io/name: grafana-agent diff --git a/operations/helm/tests/controller-volumes-extra/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/controller-volumes-extra/grafana-agent/templates/controllers/daemonset.yaml index 39aa9491fa20..01a82312a35c 100644 --- a/operations/helm/tests/controller-volumes-extra/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/controller-volumes-extra/grafana-agent/templates/controllers/daemonset.yaml @@ -25,7 +25,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.38.1 + image: docker.io/grafana/agent:v0.39.0 imagePullPolicy: IfNotPresent args: - run @@ -58,7 +58,7 @@ spec: mountPath: /cache name: cache-volume - name: config-reloader - image: docker.io/jimmidyson/configmap-reload:v0.8.0 + image: ghcr.io/jimmidyson/configmap-reload:v0.12.0 args: - --volume-dir=/etc/agent - --webhook-url=http://localhost:80/-/reload diff --git a/operations/helm/tests/controller-volumes-extra/grafana-agent/templates/serviceaccount.yaml b/operations/helm/tests/controller-volumes-extra/grafana-agent/templates/serviceaccount.yaml index 1dfd6fff9bdf..65d7e0df383f 100644 --- a/operations/helm/tests/controller-volumes-extra/grafana-agent/templates/serviceaccount.yaml +++ b/operations/helm/tests/controller-volumes-extra/grafana-agent/templates/serviceaccount.yaml @@ -4,6 +4,7 @@ apiVersion: v1 kind: ServiceAccount metadata: name: grafana-agent + namespace: default labels: helm.sh/chart: grafana-agent app.kubernetes.io/name: grafana-agent diff --git a/operations/helm/tests/create-daemonset-hostnetwork/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/create-daemonset-hostnetwork/grafana-agent/templates/controllers/daemonset.yaml index 74c228870934..0f6c1dc493bd 100644 --- a/operations/helm/tests/create-daemonset-hostnetwork/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/create-daemonset-hostnetwork/grafana-agent/templates/controllers/daemonset.yaml @@ -25,7 +25,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.38.1 + image: docker.io/grafana/agent:v0.39.0 imagePullPolicy: IfNotPresent args: - run @@ -55,7 +55,7 @@ spec: - name: config mountPath: /etc/agent - name: config-reloader - image: docker.io/jimmidyson/configmap-reload:v0.8.0 + image: ghcr.io/jimmidyson/configmap-reload:v0.12.0 args: - --volume-dir=/etc/agent - --webhook-url=http://localhost:80/-/reload diff --git a/operations/helm/tests/create-daemonset-hostnetwork/grafana-agent/templates/serviceaccount.yaml b/operations/helm/tests/create-daemonset-hostnetwork/grafana-agent/templates/serviceaccount.yaml index 1dfd6fff9bdf..65d7e0df383f 100644 --- a/operations/helm/tests/create-daemonset-hostnetwork/grafana-agent/templates/serviceaccount.yaml +++ b/operations/helm/tests/create-daemonset-hostnetwork/grafana-agent/templates/serviceaccount.yaml @@ -4,6 +4,7 @@ apiVersion: v1 kind: ServiceAccount metadata: name: grafana-agent + namespace: default labels: helm.sh/chart: grafana-agent app.kubernetes.io/name: grafana-agent diff --git a/operations/helm/tests/create-daemonset/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/create-daemonset/grafana-agent/templates/controllers/daemonset.yaml index 7ac89ceb865a..a99f2fd4cbf6 100644 --- a/operations/helm/tests/create-daemonset/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/create-daemonset/grafana-agent/templates/controllers/daemonset.yaml @@ -25,7 +25,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.38.1 + image: docker.io/grafana/agent:v0.39.0 imagePullPolicy: IfNotPresent args: - run @@ -55,7 +55,7 @@ spec: - name: config mountPath: /etc/agent - name: config-reloader - image: docker.io/jimmidyson/configmap-reload:v0.8.0 + image: ghcr.io/jimmidyson/configmap-reload:v0.12.0 args: - --volume-dir=/etc/agent - --webhook-url=http://localhost:80/-/reload diff --git a/operations/helm/tests/create-daemonset/grafana-agent/templates/serviceaccount.yaml b/operations/helm/tests/create-daemonset/grafana-agent/templates/serviceaccount.yaml index 1dfd6fff9bdf..65d7e0df383f 100644 --- a/operations/helm/tests/create-daemonset/grafana-agent/templates/serviceaccount.yaml +++ b/operations/helm/tests/create-daemonset/grafana-agent/templates/serviceaccount.yaml @@ -4,6 +4,7 @@ apiVersion: v1 kind: ServiceAccount metadata: name: grafana-agent + namespace: default labels: helm.sh/chart: grafana-agent app.kubernetes.io/name: grafana-agent diff --git a/operations/helm/tests/create-deployment-autoscaling/grafana-agent/templates/controllers/deployment.yaml b/operations/helm/tests/create-deployment-autoscaling/grafana-agent/templates/controllers/deployment.yaml index 478a47d32f56..b8d1f559d7a4 100644 --- a/operations/helm/tests/create-deployment-autoscaling/grafana-agent/templates/controllers/deployment.yaml +++ b/operations/helm/tests/create-deployment-autoscaling/grafana-agent/templates/controllers/deployment.yaml @@ -25,7 +25,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.38.1 + image: docker.io/grafana/agent:v0.39.0 imagePullPolicy: IfNotPresent args: - run @@ -58,7 +58,7 @@ spec: - name: config mountPath: /etc/agent - name: config-reloader - image: docker.io/jimmidyson/configmap-reload:v0.8.0 + image: ghcr.io/jimmidyson/configmap-reload:v0.12.0 args: - --volume-dir=/etc/agent - --webhook-url=http://localhost:80/-/reload diff --git a/operations/helm/tests/create-deployment-autoscaling/grafana-agent/templates/serviceaccount.yaml b/operations/helm/tests/create-deployment-autoscaling/grafana-agent/templates/serviceaccount.yaml index 1dfd6fff9bdf..65d7e0df383f 100644 --- a/operations/helm/tests/create-deployment-autoscaling/grafana-agent/templates/serviceaccount.yaml +++ b/operations/helm/tests/create-deployment-autoscaling/grafana-agent/templates/serviceaccount.yaml @@ -4,6 +4,7 @@ apiVersion: v1 kind: ServiceAccount metadata: name: grafana-agent + namespace: default labels: helm.sh/chart: grafana-agent app.kubernetes.io/name: grafana-agent diff --git a/operations/helm/tests/create-deployment/grafana-agent/templates/controllers/deployment.yaml b/operations/helm/tests/create-deployment/grafana-agent/templates/controllers/deployment.yaml index 8fbd88183c0f..7de807bd5238 100644 --- a/operations/helm/tests/create-deployment/grafana-agent/templates/controllers/deployment.yaml +++ b/operations/helm/tests/create-deployment/grafana-agent/templates/controllers/deployment.yaml @@ -26,7 +26,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.38.1 + image: docker.io/grafana/agent:v0.39.0 imagePullPolicy: IfNotPresent args: - run @@ -56,7 +56,7 @@ spec: - name: config mountPath: /etc/agent - name: config-reloader - image: docker.io/jimmidyson/configmap-reload:v0.8.0 + image: ghcr.io/jimmidyson/configmap-reload:v0.12.0 args: - --volume-dir=/etc/agent - --webhook-url=http://localhost:80/-/reload diff --git a/operations/helm/tests/create-deployment/grafana-agent/templates/serviceaccount.yaml b/operations/helm/tests/create-deployment/grafana-agent/templates/serviceaccount.yaml index 1dfd6fff9bdf..65d7e0df383f 100644 --- a/operations/helm/tests/create-deployment/grafana-agent/templates/serviceaccount.yaml +++ b/operations/helm/tests/create-deployment/grafana-agent/templates/serviceaccount.yaml @@ -4,6 +4,7 @@ apiVersion: v1 kind: ServiceAccount metadata: name: grafana-agent + namespace: default labels: helm.sh/chart: grafana-agent app.kubernetes.io/name: grafana-agent diff --git a/operations/helm/tests/create-statefulset-autoscaling/grafana-agent/templates/controllers/statefulset.yaml b/operations/helm/tests/create-statefulset-autoscaling/grafana-agent/templates/controllers/statefulset.yaml index 65ce63ce8c00..6a127404aecb 100644 --- a/operations/helm/tests/create-statefulset-autoscaling/grafana-agent/templates/controllers/statefulset.yaml +++ b/operations/helm/tests/create-statefulset-autoscaling/grafana-agent/templates/controllers/statefulset.yaml @@ -27,7 +27,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.38.1 + image: docker.io/grafana/agent:v0.39.0 imagePullPolicy: IfNotPresent args: - run @@ -60,7 +60,7 @@ spec: - name: config mountPath: /etc/agent - name: config-reloader - image: docker.io/jimmidyson/configmap-reload:v0.8.0 + image: ghcr.io/jimmidyson/configmap-reload:v0.12.0 args: - --volume-dir=/etc/agent - --webhook-url=http://localhost:80/-/reload @@ -76,3 +76,6 @@ spec: - name: config configMap: name: grafana-agent + persistentVolumeClaimRetentionPolicy: + whenDeleted: Delete + whenScaled: Delete diff --git a/operations/helm/tests/create-statefulset-autoscaling/grafana-agent/templates/serviceaccount.yaml b/operations/helm/tests/create-statefulset-autoscaling/grafana-agent/templates/serviceaccount.yaml index 1dfd6fff9bdf..65d7e0df383f 100644 --- a/operations/helm/tests/create-statefulset-autoscaling/grafana-agent/templates/serviceaccount.yaml +++ b/operations/helm/tests/create-statefulset-autoscaling/grafana-agent/templates/serviceaccount.yaml @@ -4,6 +4,7 @@ apiVersion: v1 kind: ServiceAccount metadata: name: grafana-agent + namespace: default labels: helm.sh/chart: grafana-agent app.kubernetes.io/name: grafana-agent diff --git a/operations/helm/tests/create-statefulset/grafana-agent/templates/controllers/statefulset.yaml b/operations/helm/tests/create-statefulset/grafana-agent/templates/controllers/statefulset.yaml index cfaf3fa7aa65..c0b85e38db1d 100644 --- a/operations/helm/tests/create-statefulset/grafana-agent/templates/controllers/statefulset.yaml +++ b/operations/helm/tests/create-statefulset/grafana-agent/templates/controllers/statefulset.yaml @@ -28,7 +28,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.38.1 + image: docker.io/grafana/agent:v0.39.0 imagePullPolicy: IfNotPresent args: - run @@ -58,7 +58,7 @@ spec: - name: config mountPath: /etc/agent - name: config-reloader - image: docker.io/jimmidyson/configmap-reload:v0.8.0 + image: ghcr.io/jimmidyson/configmap-reload:v0.12.0 args: - --volume-dir=/etc/agent - --webhook-url=http://localhost:80/-/reload diff --git a/operations/helm/tests/create-statefulset/grafana-agent/templates/serviceaccount.yaml b/operations/helm/tests/create-statefulset/grafana-agent/templates/serviceaccount.yaml index 1dfd6fff9bdf..65d7e0df383f 100644 --- a/operations/helm/tests/create-statefulset/grafana-agent/templates/serviceaccount.yaml +++ b/operations/helm/tests/create-statefulset/grafana-agent/templates/serviceaccount.yaml @@ -4,6 +4,7 @@ apiVersion: v1 kind: ServiceAccount metadata: name: grafana-agent + namespace: default labels: helm.sh/chart: grafana-agent app.kubernetes.io/name: grafana-agent diff --git a/operations/helm/tests/custom-config/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/custom-config/grafana-agent/templates/controllers/daemonset.yaml index 7ac89ceb865a..a99f2fd4cbf6 100644 --- a/operations/helm/tests/custom-config/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/custom-config/grafana-agent/templates/controllers/daemonset.yaml @@ -25,7 +25,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.38.1 + image: docker.io/grafana/agent:v0.39.0 imagePullPolicy: IfNotPresent args: - run @@ -55,7 +55,7 @@ spec: - name: config mountPath: /etc/agent - name: config-reloader - image: docker.io/jimmidyson/configmap-reload:v0.8.0 + image: ghcr.io/jimmidyson/configmap-reload:v0.12.0 args: - --volume-dir=/etc/agent - --webhook-url=http://localhost:80/-/reload diff --git a/operations/helm/tests/custom-config/grafana-agent/templates/serviceaccount.yaml b/operations/helm/tests/custom-config/grafana-agent/templates/serviceaccount.yaml index 1dfd6fff9bdf..65d7e0df383f 100644 --- a/operations/helm/tests/custom-config/grafana-agent/templates/serviceaccount.yaml +++ b/operations/helm/tests/custom-config/grafana-agent/templates/serviceaccount.yaml @@ -4,6 +4,7 @@ apiVersion: v1 kind: ServiceAccount metadata: name: grafana-agent + namespace: default labels: helm.sh/chart: grafana-agent app.kubernetes.io/name: grafana-agent diff --git a/operations/helm/tests/default-values/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/default-values/grafana-agent/templates/controllers/daemonset.yaml index 7ac89ceb865a..a99f2fd4cbf6 100644 --- a/operations/helm/tests/default-values/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/default-values/grafana-agent/templates/controllers/daemonset.yaml @@ -25,7 +25,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.38.1 + image: docker.io/grafana/agent:v0.39.0 imagePullPolicy: IfNotPresent args: - run @@ -55,7 +55,7 @@ spec: - name: config mountPath: /etc/agent - name: config-reloader - image: docker.io/jimmidyson/configmap-reload:v0.8.0 + image: ghcr.io/jimmidyson/configmap-reload:v0.12.0 args: - --volume-dir=/etc/agent - --webhook-url=http://localhost:80/-/reload diff --git a/operations/helm/tests/default-values/grafana-agent/templates/serviceaccount.yaml b/operations/helm/tests/default-values/grafana-agent/templates/serviceaccount.yaml index 1dfd6fff9bdf..65d7e0df383f 100644 --- a/operations/helm/tests/default-values/grafana-agent/templates/serviceaccount.yaml +++ b/operations/helm/tests/default-values/grafana-agent/templates/serviceaccount.yaml @@ -4,6 +4,7 @@ apiVersion: v1 kind: ServiceAccount metadata: name: grafana-agent + namespace: default labels: helm.sh/chart: grafana-agent app.kubernetes.io/name: grafana-agent diff --git a/operations/helm/tests/enable-servicemonitor/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/enable-servicemonitor/grafana-agent/templates/controllers/daemonset.yaml index 7ac89ceb865a..a99f2fd4cbf6 100644 --- a/operations/helm/tests/enable-servicemonitor/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/enable-servicemonitor/grafana-agent/templates/controllers/daemonset.yaml @@ -25,7 +25,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.38.1 + image: docker.io/grafana/agent:v0.39.0 imagePullPolicy: IfNotPresent args: - run @@ -55,7 +55,7 @@ spec: - name: config mountPath: /etc/agent - name: config-reloader - image: docker.io/jimmidyson/configmap-reload:v0.8.0 + image: ghcr.io/jimmidyson/configmap-reload:v0.12.0 args: - --volume-dir=/etc/agent - --webhook-url=http://localhost:80/-/reload diff --git a/operations/helm/tests/enable-servicemonitor/grafana-agent/templates/serviceaccount.yaml b/operations/helm/tests/enable-servicemonitor/grafana-agent/templates/serviceaccount.yaml index 1dfd6fff9bdf..65d7e0df383f 100644 --- a/operations/helm/tests/enable-servicemonitor/grafana-agent/templates/serviceaccount.yaml +++ b/operations/helm/tests/enable-servicemonitor/grafana-agent/templates/serviceaccount.yaml @@ -4,6 +4,7 @@ apiVersion: v1 kind: ServiceAccount metadata: name: grafana-agent + namespace: default labels: helm.sh/chart: grafana-agent app.kubernetes.io/name: grafana-agent diff --git a/operations/helm/tests/envFrom/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/envFrom/grafana-agent/templates/controllers/daemonset.yaml index 2d5c5f77964a..e9b849f6ee3f 100644 --- a/operations/helm/tests/envFrom/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/envFrom/grafana-agent/templates/controllers/daemonset.yaml @@ -25,7 +25,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.38.1 + image: docker.io/grafana/agent:v0.39.0 imagePullPolicy: IfNotPresent args: - run @@ -58,7 +58,7 @@ spec: - name: config mountPath: /etc/agent - name: config-reloader - image: docker.io/jimmidyson/configmap-reload:v0.8.0 + image: ghcr.io/jimmidyson/configmap-reload:v0.12.0 args: - --volume-dir=/etc/agent - --webhook-url=http://localhost:80/-/reload diff --git a/operations/helm/tests/envFrom/grafana-agent/templates/serviceaccount.yaml b/operations/helm/tests/envFrom/grafana-agent/templates/serviceaccount.yaml index 1dfd6fff9bdf..65d7e0df383f 100644 --- a/operations/helm/tests/envFrom/grafana-agent/templates/serviceaccount.yaml +++ b/operations/helm/tests/envFrom/grafana-agent/templates/serviceaccount.yaml @@ -4,6 +4,7 @@ apiVersion: v1 kind: ServiceAccount metadata: name: grafana-agent + namespace: default labels: helm.sh/chart: grafana-agent app.kubernetes.io/name: grafana-agent diff --git a/operations/helm/tests/existing-config/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/existing-config/grafana-agent/templates/controllers/daemonset.yaml index 683d4c72adce..fbdb2752b3ed 100644 --- a/operations/helm/tests/existing-config/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/existing-config/grafana-agent/templates/controllers/daemonset.yaml @@ -25,7 +25,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.38.1 + image: docker.io/grafana/agent:v0.39.0 imagePullPolicy: IfNotPresent args: - run @@ -55,7 +55,7 @@ spec: - name: config mountPath: /etc/agent - name: config-reloader - image: docker.io/jimmidyson/configmap-reload:v0.8.0 + image: ghcr.io/jimmidyson/configmap-reload:v0.12.0 args: - --volume-dir=/etc/agent - --webhook-url=http://localhost:80/-/reload diff --git a/operations/helm/tests/existing-config/grafana-agent/templates/serviceaccount.yaml b/operations/helm/tests/existing-config/grafana-agent/templates/serviceaccount.yaml index 1dfd6fff9bdf..65d7e0df383f 100644 --- a/operations/helm/tests/existing-config/grafana-agent/templates/serviceaccount.yaml +++ b/operations/helm/tests/existing-config/grafana-agent/templates/serviceaccount.yaml @@ -4,6 +4,7 @@ apiVersion: v1 kind: ServiceAccount metadata: name: grafana-agent + namespace: default labels: helm.sh/chart: grafana-agent app.kubernetes.io/name: grafana-agent diff --git a/operations/helm/tests/extra-env/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/extra-env/grafana-agent/templates/controllers/daemonset.yaml index ff83778a94a9..c2ee2305fce5 100644 --- a/operations/helm/tests/extra-env/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/extra-env/grafana-agent/templates/controllers/daemonset.yaml @@ -25,7 +25,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.38.1 + image: docker.io/grafana/agent:v0.39.0 imagePullPolicy: IfNotPresent args: - run @@ -64,7 +64,7 @@ spec: - name: config mountPath: /etc/agent - name: config-reloader - image: docker.io/jimmidyson/configmap-reload:v0.8.0 + image: ghcr.io/jimmidyson/configmap-reload:v0.12.0 args: - --volume-dir=/etc/agent - --webhook-url=http://localhost:80/-/reload diff --git a/operations/helm/tests/extra-env/grafana-agent/templates/serviceaccount.yaml b/operations/helm/tests/extra-env/grafana-agent/templates/serviceaccount.yaml index 1dfd6fff9bdf..65d7e0df383f 100644 --- a/operations/helm/tests/extra-env/grafana-agent/templates/serviceaccount.yaml +++ b/operations/helm/tests/extra-env/grafana-agent/templates/serviceaccount.yaml @@ -4,6 +4,7 @@ apiVersion: v1 kind: ServiceAccount metadata: name: grafana-agent + namespace: default labels: helm.sh/chart: grafana-agent app.kubernetes.io/name: grafana-agent diff --git a/operations/helm/tests/extra-ports/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/extra-ports/grafana-agent/templates/controllers/daemonset.yaml index 4566af378b4c..8e51da6a86f5 100644 --- a/operations/helm/tests/extra-ports/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/extra-ports/grafana-agent/templates/controllers/daemonset.yaml @@ -25,7 +25,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.38.1 + image: docker.io/grafana/agent:v0.39.0 imagePullPolicy: IfNotPresent args: - run @@ -58,7 +58,7 @@ spec: - name: config mountPath: /etc/agent - name: config-reloader - image: docker.io/jimmidyson/configmap-reload:v0.8.0 + image: ghcr.io/jimmidyson/configmap-reload:v0.12.0 args: - --volume-dir=/etc/agent - --webhook-url=http://localhost:80/-/reload diff --git a/operations/helm/tests/extra-ports/grafana-agent/templates/serviceaccount.yaml b/operations/helm/tests/extra-ports/grafana-agent/templates/serviceaccount.yaml index 1dfd6fff9bdf..65d7e0df383f 100644 --- a/operations/helm/tests/extra-ports/grafana-agent/templates/serviceaccount.yaml +++ b/operations/helm/tests/extra-ports/grafana-agent/templates/serviceaccount.yaml @@ -4,6 +4,7 @@ apiVersion: v1 kind: ServiceAccount metadata: name: grafana-agent + namespace: default labels: helm.sh/chart: grafana-agent app.kubernetes.io/name: grafana-agent diff --git a/operations/helm/tests/faro-ingress/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/faro-ingress/grafana-agent/templates/controllers/daemonset.yaml index cb4f0c8964f6..d366cde166d3 100644 --- a/operations/helm/tests/faro-ingress/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/faro-ingress/grafana-agent/templates/controllers/daemonset.yaml @@ -25,7 +25,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.38.1 + image: docker.io/grafana/agent:v0.39.0 imagePullPolicy: IfNotPresent args: - run @@ -58,7 +58,7 @@ spec: - name: config mountPath: /etc/agent - name: config-reloader - image: docker.io/jimmidyson/configmap-reload:v0.8.0 + image: ghcr.io/jimmidyson/configmap-reload:v0.12.0 args: - --volume-dir=/etc/agent - --webhook-url=http://localhost:80/-/reload diff --git a/operations/helm/tests/faro-ingress/grafana-agent/templates/serviceaccount.yaml b/operations/helm/tests/faro-ingress/grafana-agent/templates/serviceaccount.yaml index 1dfd6fff9bdf..65d7e0df383f 100644 --- a/operations/helm/tests/faro-ingress/grafana-agent/templates/serviceaccount.yaml +++ b/operations/helm/tests/faro-ingress/grafana-agent/templates/serviceaccount.yaml @@ -4,6 +4,7 @@ apiVersion: v1 kind: ServiceAccount metadata: name: grafana-agent + namespace: default labels: helm.sh/chart: grafana-agent app.kubernetes.io/name: grafana-agent diff --git a/operations/helm/tests/global-image-pullsecrets/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/global-image-pullsecrets/grafana-agent/templates/controllers/daemonset.yaml index e229da2bf950..808c1056ba2b 100644 --- a/operations/helm/tests/global-image-pullsecrets/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/global-image-pullsecrets/grafana-agent/templates/controllers/daemonset.yaml @@ -30,7 +30,7 @@ spec: - name: global-cred containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.38.1 + image: docker.io/grafana/agent:v0.39.0 imagePullPolicy: IfNotPresent args: - run @@ -60,7 +60,7 @@ spec: - name: config mountPath: /etc/agent - name: config-reloader - image: docker.io/jimmidyson/configmap-reload:v0.8.0 + image: ghcr.io/jimmidyson/configmap-reload:v0.12.0 args: - --volume-dir=/etc/agent - --webhook-url=http://localhost:80/-/reload diff --git a/operations/helm/tests/global-image-pullsecrets/grafana-agent/templates/serviceaccount.yaml b/operations/helm/tests/global-image-pullsecrets/grafana-agent/templates/serviceaccount.yaml index 1dfd6fff9bdf..65d7e0df383f 100644 --- a/operations/helm/tests/global-image-pullsecrets/grafana-agent/templates/serviceaccount.yaml +++ b/operations/helm/tests/global-image-pullsecrets/grafana-agent/templates/serviceaccount.yaml @@ -4,6 +4,7 @@ apiVersion: v1 kind: ServiceAccount metadata: name: grafana-agent + namespace: default labels: helm.sh/chart: grafana-agent app.kubernetes.io/name: grafana-agent diff --git a/operations/helm/tests/global-image-registry/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/global-image-registry/grafana-agent/templates/controllers/daemonset.yaml index 09768a686b48..014f4b84925d 100644 --- a/operations/helm/tests/global-image-registry/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/global-image-registry/grafana-agent/templates/controllers/daemonset.yaml @@ -25,7 +25,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: quay.io/grafana/agent:v0.38.1 + image: quay.io/grafana/agent:v0.39.0 imagePullPolicy: IfNotPresent args: - run @@ -55,7 +55,7 @@ spec: - name: config mountPath: /etc/agent - name: config-reloader - image: quay.io/jimmidyson/configmap-reload:v0.8.0 + image: quay.io/jimmidyson/configmap-reload:v0.12.0 args: - --volume-dir=/etc/agent - --webhook-url=http://localhost:80/-/reload diff --git a/operations/helm/tests/global-image-registry/grafana-agent/templates/serviceaccount.yaml b/operations/helm/tests/global-image-registry/grafana-agent/templates/serviceaccount.yaml index 1dfd6fff9bdf..65d7e0df383f 100644 --- a/operations/helm/tests/global-image-registry/grafana-agent/templates/serviceaccount.yaml +++ b/operations/helm/tests/global-image-registry/grafana-agent/templates/serviceaccount.yaml @@ -4,6 +4,7 @@ apiVersion: v1 kind: ServiceAccount metadata: name: grafana-agent + namespace: default labels: helm.sh/chart: grafana-agent app.kubernetes.io/name: grafana-agent diff --git a/operations/helm/tests/initcontainers/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/initcontainers/grafana-agent/templates/controllers/daemonset.yaml index 512c32eece2e..70c184364e58 100644 --- a/operations/helm/tests/initcontainers/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/initcontainers/grafana-agent/templates/controllers/daemonset.yaml @@ -43,7 +43,7 @@ spec: name: geoip containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.38.1 + image: docker.io/grafana/agent:v0.39.0 imagePullPolicy: IfNotPresent args: - run @@ -76,7 +76,7 @@ spec: mountPath: /etc/geoip name: geoip - name: config-reloader - image: docker.io/jimmidyson/configmap-reload:v0.8.0 + image: ghcr.io/jimmidyson/configmap-reload:v0.12.0 args: - --volume-dir=/etc/agent - --webhook-url=http://localhost:80/-/reload diff --git a/operations/helm/tests/initcontainers/grafana-agent/templates/serviceaccount.yaml b/operations/helm/tests/initcontainers/grafana-agent/templates/serviceaccount.yaml index 1dfd6fff9bdf..65d7e0df383f 100644 --- a/operations/helm/tests/initcontainers/grafana-agent/templates/serviceaccount.yaml +++ b/operations/helm/tests/initcontainers/grafana-agent/templates/serviceaccount.yaml @@ -4,6 +4,7 @@ apiVersion: v1 kind: ServiceAccount metadata: name: grafana-agent + namespace: default labels: helm.sh/chart: grafana-agent app.kubernetes.io/name: grafana-agent diff --git a/operations/helm/tests/local-image-pullsecrets/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/local-image-pullsecrets/grafana-agent/templates/controllers/daemonset.yaml index c3a0fb4cafe1..319e1cf07ad2 100644 --- a/operations/helm/tests/local-image-pullsecrets/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/local-image-pullsecrets/grafana-agent/templates/controllers/daemonset.yaml @@ -27,7 +27,7 @@ spec: - name: local-cred containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.38.1 + image: docker.io/grafana/agent:v0.39.0 imagePullPolicy: IfNotPresent args: - run @@ -57,7 +57,7 @@ spec: - name: config mountPath: /etc/agent - name: config-reloader - image: docker.io/jimmidyson/configmap-reload:v0.8.0 + image: ghcr.io/jimmidyson/configmap-reload:v0.12.0 args: - --volume-dir=/etc/agent - --webhook-url=http://localhost:80/-/reload diff --git a/operations/helm/tests/local-image-pullsecrets/grafana-agent/templates/serviceaccount.yaml b/operations/helm/tests/local-image-pullsecrets/grafana-agent/templates/serviceaccount.yaml index 1dfd6fff9bdf..65d7e0df383f 100644 --- a/operations/helm/tests/local-image-pullsecrets/grafana-agent/templates/serviceaccount.yaml +++ b/operations/helm/tests/local-image-pullsecrets/grafana-agent/templates/serviceaccount.yaml @@ -4,6 +4,7 @@ apiVersion: v1 kind: ServiceAccount metadata: name: grafana-agent + namespace: default labels: helm.sh/chart: grafana-agent app.kubernetes.io/name: grafana-agent diff --git a/operations/helm/tests/local-image-registry/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/local-image-registry/grafana-agent/templates/controllers/daemonset.yaml index 09768a686b48..014f4b84925d 100644 --- a/operations/helm/tests/local-image-registry/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/local-image-registry/grafana-agent/templates/controllers/daemonset.yaml @@ -25,7 +25,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: quay.io/grafana/agent:v0.38.1 + image: quay.io/grafana/agent:v0.39.0 imagePullPolicy: IfNotPresent args: - run @@ -55,7 +55,7 @@ spec: - name: config mountPath: /etc/agent - name: config-reloader - image: quay.io/jimmidyson/configmap-reload:v0.8.0 + image: quay.io/jimmidyson/configmap-reload:v0.12.0 args: - --volume-dir=/etc/agent - --webhook-url=http://localhost:80/-/reload diff --git a/operations/helm/tests/local-image-registry/grafana-agent/templates/serviceaccount.yaml b/operations/helm/tests/local-image-registry/grafana-agent/templates/serviceaccount.yaml index 1dfd6fff9bdf..65d7e0df383f 100644 --- a/operations/helm/tests/local-image-registry/grafana-agent/templates/serviceaccount.yaml +++ b/operations/helm/tests/local-image-registry/grafana-agent/templates/serviceaccount.yaml @@ -4,6 +4,7 @@ apiVersion: v1 kind: ServiceAccount metadata: name: grafana-agent + namespace: default labels: helm.sh/chart: grafana-agent app.kubernetes.io/name: grafana-agent diff --git a/operations/helm/tests/nodeselectors-and-tolerations/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/nodeselectors-and-tolerations/grafana-agent/templates/controllers/daemonset.yaml index 6de1e5ad8593..21338cf3979f 100644 --- a/operations/helm/tests/nodeselectors-and-tolerations/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/nodeselectors-and-tolerations/grafana-agent/templates/controllers/daemonset.yaml @@ -25,7 +25,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.38.1 + image: docker.io/grafana/agent:v0.39.0 imagePullPolicy: IfNotPresent args: - run @@ -55,7 +55,7 @@ spec: - name: config mountPath: /etc/agent - name: config-reloader - image: docker.io/jimmidyson/configmap-reload:v0.8.0 + image: ghcr.io/jimmidyson/configmap-reload:v0.12.0 args: - --volume-dir=/etc/agent - --webhook-url=http://localhost:80/-/reload diff --git a/operations/helm/tests/nodeselectors-and-tolerations/grafana-agent/templates/serviceaccount.yaml b/operations/helm/tests/nodeselectors-and-tolerations/grafana-agent/templates/serviceaccount.yaml index 1dfd6fff9bdf..65d7e0df383f 100644 --- a/operations/helm/tests/nodeselectors-and-tolerations/grafana-agent/templates/serviceaccount.yaml +++ b/operations/helm/tests/nodeselectors-and-tolerations/grafana-agent/templates/serviceaccount.yaml @@ -4,6 +4,7 @@ apiVersion: v1 kind: ServiceAccount metadata: name: grafana-agent + namespace: default labels: helm.sh/chart: grafana-agent app.kubernetes.io/name: grafana-agent diff --git a/operations/helm/tests/sidecars/grafana-agent/templates/configmap.yaml b/operations/helm/tests/sidecars/grafana-agent/templates/configmap.yaml new file mode 100644 index 000000000000..2fdc6f011777 --- /dev/null +++ b/operations/helm/tests/sidecars/grafana-agent/templates/configmap.yaml @@ -0,0 +1,42 @@ +--- +# Source: grafana-agent/templates/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: grafana-agent + labels: + helm.sh/chart: grafana-agent + app.kubernetes.io/name: grafana-agent + app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +data: + config.river: |- + logging { + level = "info" + format = "logfmt" + } + + discovery.kubernetes "pods" { + role = "pod" + } + + discovery.kubernetes "nodes" { + role = "node" + } + + discovery.kubernetes "services" { + role = "service" + } + + discovery.kubernetes "endpoints" { + role = "endpoints" + } + + discovery.kubernetes "endpointslices" { + role = "endpointslice" + } + + discovery.kubernetes "ingresses" { + role = "ingress" + } diff --git a/operations/helm/tests/sidecars/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/sidecars/grafana-agent/templates/controllers/daemonset.yaml new file mode 100644 index 000000000000..b98de68b1306 --- /dev/null +++ b/operations/helm/tests/sidecars/grafana-agent/templates/controllers/daemonset.yaml @@ -0,0 +1,95 @@ +--- +# Source: grafana-agent/templates/controllers/daemonset.yaml +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: grafana-agent + labels: + helm.sh/chart: grafana-agent + app.kubernetes.io/name: grafana-agent + app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +spec: + minReadySeconds: 10 + selector: + matchLabels: + app.kubernetes.io/name: grafana-agent + app.kubernetes.io/instance: grafana-agent + template: + metadata: + labels: + app.kubernetes.io/name: grafana-agent + app.kubernetes.io/instance: grafana-agent + spec: + serviceAccountName: grafana-agent + containers: + - name: grafana-agent + image: docker.io/grafana/agent:v0.39.0 + imagePullPolicy: IfNotPresent + args: + - run + - /etc/agent/config.river + - --storage.path=/tmp/agent + - --server.http.listen-addr=0.0.0.0:80 + - --server.http.ui-path-prefix=/ + env: + - name: AGENT_MODE + value: flow + - name: AGENT_DEPLOY_MODE + value: "helm" + - name: HOSTNAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + ports: + - containerPort: 80 + name: http-metrics + readinessProbe: + httpGet: + path: /-/ready + port: 80 + initialDelaySeconds: 10 + timeoutSeconds: 1 + volumeMounts: + - name: config + mountPath: /etc/agent + - + mountPath: /etc/geoip + name: geoip + - name: config-reloader + image: ghcr.io/jimmidyson/configmap-reload:v0.12.0 + args: + - --volume-dir=/etc/agent + - --webhook-url=http://localhost:80/-/reload + volumeMounts: + - name: config + mountPath: /etc/agent + resources: + requests: + cpu: 1m + memory: 5Mi + - env: + - name: GEOIPUPDATE_ACCOUNT_ID + value: geoipupdate_account_id + - name: GEOIPUPDATE_LICENSE_KEY + value: geoipupdate_license_key + - name: GEOIPUPDATE_EDITION_IDS + value: GeoLite2-ASN GeoLite2-City GeoLite2-Country + - name: GEOIPUPDATE_DB_DIR + value: /etc/geoip + image: ghcr.io/maxmind/geoipupdate:v6.0 + name: geo-ip + volumeMounts: + - mountPath: /etc/geoip + name: geoip + volumes: + - emptyDir: {} + name: geoip + dnsPolicy: ClusterFirst + volumes: + - name: config + configMap: + name: grafana-agent + - mountPath: /etc/geoip + name: geoip diff --git a/operations/helm/tests/sidecars/grafana-agent/templates/rbac.yaml b/operations/helm/tests/sidecars/grafana-agent/templates/rbac.yaml new file mode 100644 index 000000000000..3765583fb64f --- /dev/null +++ b/operations/helm/tests/sidecars/grafana-agent/templates/rbac.yaml @@ -0,0 +1,117 @@ +--- +# Source: grafana-agent/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: grafana-agent + labels: + helm.sh/chart: grafana-agent + app.kubernetes.io/name: grafana-agent + app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +rules: + # Rules which allow discovery.kubernetes to function. + - apiGroups: + - "" + - "discovery.k8s.io" + - "networking.k8s.io" + resources: + - endpoints + - endpointslices + - ingresses + - nodes + - nodes/proxy + - nodes/metrics + - pods + - services + verbs: + - get + - list + - watch + # Rules which allow loki.source.kubernetes and loki.source.podlogs to work. + - apiGroups: + - "" + resources: + - pods + - pods/log + - namespaces + verbs: + - get + - list + - watch + - apiGroups: + - "monitoring.grafana.com" + resources: + - podlogs + verbs: + - get + - list + - watch + # Rules which allow mimir.rules.kubernetes to work. + - apiGroups: ["monitoring.coreos.com"] + resources: + - prometheusrules + verbs: + - get + - list + - watch + - nonResourceURLs: + - /metrics + verbs: + - get + # Rules for prometheus.kubernetes.* + - apiGroups: ["monitoring.coreos.com"] + resources: + - podmonitors + - servicemonitors + - probes + verbs: + - get + - list + - watch + # Rules which allow eventhandler to work. + - apiGroups: + - "" + resources: + - events + verbs: + - get + - list + - watch + # needed for remote.kubernetes.* + - apiGroups: [""] + resources: + - "configmaps" + - "secrets" + verbs: + - get + - list + - watch + # needed for otelcol.processor.k8sattributes + - apiGroups: ["apps"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] + - apiGroups: ["extensions"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] +--- +# Source: grafana-agent/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: grafana-agent + labels: + helm.sh/chart: grafana-agent + app.kubernetes.io/name: grafana-agent + app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: grafana-agent +subjects: + - kind: ServiceAccount + name: grafana-agent + namespace: default diff --git a/operations/helm/tests/sidecars/grafana-agent/templates/service.yaml b/operations/helm/tests/sidecars/grafana-agent/templates/service.yaml new file mode 100644 index 000000000000..04f6eeff3c4d --- /dev/null +++ b/operations/helm/tests/sidecars/grafana-agent/templates/service.yaml @@ -0,0 +1,22 @@ +--- +# Source: grafana-agent/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: grafana-agent + labels: + helm.sh/chart: grafana-agent + app.kubernetes.io/name: grafana-agent + app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm +spec: + type: ClusterIP + selector: + app.kubernetes.io/name: grafana-agent + app.kubernetes.io/instance: grafana-agent + ports: + - name: http-metrics + port: 80 + targetPort: 80 + protocol: "TCP" diff --git a/operations/helm/tests/sidecars/grafana-agent/templates/serviceaccount.yaml b/operations/helm/tests/sidecars/grafana-agent/templates/serviceaccount.yaml new file mode 100644 index 000000000000..65d7e0df383f --- /dev/null +++ b/operations/helm/tests/sidecars/grafana-agent/templates/serviceaccount.yaml @@ -0,0 +1,13 @@ +--- +# Source: grafana-agent/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: grafana-agent + namespace: default + labels: + helm.sh/chart: grafana-agent + app.kubernetes.io/name: grafana-agent + app.kubernetes.io/instance: grafana-agent + app.kubernetes.io/version: "vX.Y.Z" + app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/static-mode/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/static-mode/grafana-agent/templates/controllers/daemonset.yaml index aedb6a176381..1f9e23a47bf0 100644 --- a/operations/helm/tests/static-mode/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/static-mode/grafana-agent/templates/controllers/daemonset.yaml @@ -25,7 +25,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.38.1 + image: docker.io/grafana/agent:v0.39.0 imagePullPolicy: IfNotPresent args: - -config.file=/etc/agent/config.yaml @@ -52,7 +52,7 @@ spec: - name: config mountPath: /etc/agent - name: config-reloader - image: docker.io/jimmidyson/configmap-reload:v0.8.0 + image: ghcr.io/jimmidyson/configmap-reload:v0.12.0 args: - --volume-dir=/etc/agent - --webhook-url=http://localhost:80/-/reload diff --git a/operations/helm/tests/static-mode/grafana-agent/templates/serviceaccount.yaml b/operations/helm/tests/static-mode/grafana-agent/templates/serviceaccount.yaml index 1dfd6fff9bdf..65d7e0df383f 100644 --- a/operations/helm/tests/static-mode/grafana-agent/templates/serviceaccount.yaml +++ b/operations/helm/tests/static-mode/grafana-agent/templates/serviceaccount.yaml @@ -4,6 +4,7 @@ apiVersion: v1 kind: ServiceAccount metadata: name: grafana-agent + namespace: default labels: helm.sh/chart: grafana-agent app.kubernetes.io/name: grafana-agent diff --git a/packaging/grafana-agent/windows/install_script.nsis b/packaging/grafana-agent/windows/install_script.nsis index 07f8986026da..b08a8216c691 100644 --- a/packaging/grafana-agent/windows/install_script.nsis +++ b/packaging/grafana-agent/windows/install_script.nsis @@ -24,17 +24,19 @@ outFile "${OUT}" LicenseData LICENSE # Everything must be global Vars +Var EnableOptionsDialog +Var PassedInParameters Var EnableExporterCheck Var EnableExporterValue -Var EnableExporterDialog -Var PassedInParameters Var Url Var Username Var Password +Var ExpandEnvCheck +Var ExpandEnvValue Page license Page directory -Page custom enableWindowsExporter enableWindowsExporterLeave +Page custom enableOptions enableOptionsLeave Page instfiles # Annoyingly macros need to be defined before use @@ -61,17 +63,18 @@ Section "install" ${GetOptions} $PassedInParameters "/Url" $Url ${GetOptions} $PassedInParameters "/Username" $Username ${GetOptions} $PassedInParameters "/Password" $Password + ${GetOptions} $PassedInParameters "/ExpandEnv" $ExpandEnvValue Call Install Return RunInstaller: Call Install SectionEnd -Function enableWindowsExporter +Function enableOptions nsDialogs::Create 1018 - Pop $EnableExporterDialog + Pop $EnableOptionsDialog - ${If} $EnableExporterDialog == error + ${If} $EnableOptionsDialog == error Abort ${EndIf} @@ -81,16 +84,29 @@ Function enableWindowsExporter ${NSD_CreateCheckBox} 0 13u 100% 12u "" Pop $EnableExporterCheck + ${NSD_CreateLabel} 0 26u 100% 12u "Expand Environment Variables" + Pop $0 + + ${NSD_CreateCheckBox} 0 39u 100% 12u "" + Pop $ExpandEnvCheck + nsDialogs::Show FunctionEnd -Function enableWindowsExporterLeave +Function enableOptionsLeave ${NSD_GetState} $EnableExporterCheck $EnableExporterValue ${If} $EnableExporterValue == ${BST_CHECKED} StrCpy $EnableExporterValue "true" ${Else} StrCpy $EnableExporterValue "false" ${EndIf} + + ${NSD_GetState} $ExpandEnvCheck $ExpandEnvValue + ${If} $ExpandEnvValue == ${BST_CHECKED} + StrCpy $ExpandEnvValue "true" + ${Else} + StrCpy $ExpandEnvValue "false" + ${EndIf} FunctionEnd Function Install @@ -128,7 +144,11 @@ Function Install nsExec::ExecToLog 'sc create "Grafana Agent" binpath= "\"$INSTDIR\grafana-agent-windows-amd64.exe\""' Pop $0 # These separate create and config commands are needed, on the config the binpath is required - nsExec::ExecToLog 'sc config "Grafana Agent" start= auto binpath= "\"$INSTDIR\grafana-agent-windows-amd64.exe\" -config.file=\"$INSTDIR\agent-config.yaml\""' + ${If} $ExpandEnvValue == "true" + nsExec::ExecToLog 'sc config "Grafana Agent" start= auto binpath= "\"$INSTDIR\grafana-agent-windows-amd64.exe\" -config.expand-env -config.file=\"$INSTDIR\agent-config.yaml\""' + ${Else} + nsExec::ExecToLog 'sc config "Grafana Agent" start= auto binpath= "\"$INSTDIR\grafana-agent-windows-amd64.exe\" -config.file=\"$INSTDIR\agent-config.yaml\""' + ${EndIf} Pop $0 nsExec::ExecToLog `sc start "Grafana Agent"` Pop $0 diff --git a/pkg/crow/crow.go b/pkg/crow/crow.go deleted file mode 100644 index 236a53b133cd..000000000000 --- a/pkg/crow/crow.go +++ /dev/null @@ -1,362 +0,0 @@ -// Package crow implements a correctness checker tool similar to Loki Canary. -// Inspired by Cortex test-exporter. -package crow - -import ( - "context" - "errors" - "flag" - "fmt" - "math" - "math/rand" - "net/http" - "strings" - "sync" - "time" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/grafana/dskit/user" - "github.com/opentracing-contrib/go-stdlib/nethttp" - "github.com/prometheus/client_golang/api" - promapi "github.com/prometheus/client_golang/api/prometheus/v1" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promhttp" - commonCfg "github.com/prometheus/common/config" - "github.com/prometheus/common/model" -) - -// Config for the Crow metrics checker. -type Config struct { - PrometheusAddr string // Base URL of Prometheus server - NumSamples int // Number of samples to generate - UserID string // User ID to use for auth when querying. - PasswordFile string // Password File for auth when querying. - ExtraSelectors string // Extra selectors for queries, i.e., cluster="prod" - OrgID string // Org ID to inject in X-Org-ScopeID header when querying. - - // Querying Params - - QueryTimeout time.Duration // Timeout for querying - QueryDuration time.Duration // Time before and after sample to search - QueryStep time.Duration // Step between samples in search - - // Validation Params - - MaxValidations int // Maximum amount of times to search for a sample - MaxTimestampDelta time.Duration // Maximum timestamp delta to use for validating. - ValueEpsilon float64 // Maximum epsilon to use for validating. - - // Logger to use. If nil, logs will be discarded. - Log log.Logger -} - -// RegisterFlags registers flags for the config to the given FlagSet. -func (c *Config) RegisterFlags(f *flag.FlagSet) { - c.RegisterFlagsWithPrefix(f, "") -} - -// RegisterFlagsWithPrefix registers flags for the config to the given FlagSet and -// prefixing each flag with the given prefix. prefix, if non-empty, should end -// in `.`. -func (c *Config) RegisterFlagsWithPrefix(f *flag.FlagSet, prefix string) { - f.StringVar(&c.PrometheusAddr, prefix+"prometheus-addr", DefaultConfig.PrometheusAddr, "Root URL of the Prometheus API to query against") - f.IntVar(&c.NumSamples, prefix+"generate-samples", DefaultConfig.NumSamples, "Number of samples to generate when being scraped") - f.StringVar(&c.UserID, prefix+"user-id", DefaultConfig.UserID, "UserID to use with basic auth.") - f.StringVar(&c.PasswordFile, prefix+"password-file", DefaultConfig.PasswordFile, "Password file to use with basic auth.") - f.StringVar(&c.ExtraSelectors, prefix+"extra-selectors", DefaultConfig.ExtraSelectors, "Extra selectors to include in queries, useful for identifying different instances of this job.") - f.StringVar(&c.OrgID, prefix+"org-id", DefaultConfig.OrgID, "Org ID to inject in X-Org-ScopeID header when querying. Useful for querying multi-tenated Cortex directly.") - - f.DurationVar(&c.QueryTimeout, prefix+"query-timeout", DefaultConfig.QueryTimeout, "timeout for querying") - f.DurationVar(&c.QueryDuration, prefix+"query-duration", DefaultConfig.QueryDuration, "time before and after sample to search") - f.DurationVar(&c.QueryStep, prefix+"query-step", DefaultConfig.QueryStep, "step between samples when searching") - - f.IntVar(&c.MaxValidations, prefix+"max-validations", DefaultConfig.MaxValidations, "Maximum number of times to try validating a sample") - f.DurationVar(&c.MaxTimestampDelta, prefix+"max-timestamp-delta", DefaultConfig.MaxTimestampDelta, "maximum difference from the stored timestamp from the validating sample to allow") - f.Float64Var(&c.ValueEpsilon, prefix+"sample-epsilon", DefaultConfig.ValueEpsilon, "maximum difference from the stored value from the validating sample to allow") -} - -// DefaultConfig holds defaults for Crow settings. -var DefaultConfig = Config{ - MaxValidations: 5, - NumSamples: 10, - - QueryTimeout: 150 * time.Millisecond, - QueryDuration: 2 * time.Second, - QueryStep: 100 * time.Millisecond, - - // MaxTimestampDelta is set to 750ms to allow some buffer for a slow network - // before the scrape goes through. - MaxTimestampDelta: 750 * time.Millisecond, - ValueEpsilon: 0.0001, -} - -// Crow is a correctness checker that validates scraped metrics reach a -// Prometheus-compatible server with the same values and roughly the same -// timestamp. -// -// Crow exposes two sets of metrics: -// -// 1. Test metrics, where each scrape generates a validation job. -// 2. State metrics, exposing state of the Crow checker itself. -// -// These two metrics should be exposed via different endpoints, and only state -// metrics are safe to be manually collected from. -// -// Collecting from the set of test metrics generates a validation job, where -// Crow will query the Prometheus API to ensure the metrics that were scraped -// were written with (approximately) the same timestamp as the scrape time -// and with (approximately) the same floating point values exposed in the -// scrape. -// -// If a set of test metrics were not found and retries have been exhausted, -// or if the metrics were found but the values did not match, the error -// counter will increase. -type Crow struct { - cfg Config - m *metrics - - promClient promapi.API - - wg sync.WaitGroup - quit chan struct{} - - pendingMtx sync.Mutex - pending []*sample - sampleCh chan []*sample -} - -// New creates a new Crow. -func New(cfg Config) (*Crow, error) { - c, err := newCrow(cfg) - if err != nil { - return nil, err - } - - c.wg.Add(1) - go c.runLoop() - return c, nil -} - -func newCrow(cfg Config) (*Crow, error) { - if cfg.Log == nil { - cfg.Log = log.NewNopLogger() - } - - if cfg.PrometheusAddr == "" { - return nil, fmt.Errorf("Crow must be configured with a URL to use for querying Prometheus") - } - - apiCfg := api.Config{ - Address: cfg.PrometheusAddr, - RoundTripper: api.DefaultRoundTripper, - } - if cfg.UserID != "" && cfg.PasswordFile != "" { - apiCfg.RoundTripper = commonCfg.NewBasicAuthRoundTripper(cfg.UserID, "", "", cfg.PasswordFile, api.DefaultRoundTripper) - } - if cfg.OrgID != "" { - apiCfg.RoundTripper = &nethttp.Transport{ - RoundTripper: promhttp.RoundTripperFunc(func(req *http.Request) (*http.Response, error) { - _ = user.InjectOrgIDIntoHTTPRequest(user.InjectOrgID(context.Background(), cfg.OrgID), req) - return apiCfg.RoundTripper.RoundTrip(req) - }), - } - } - - cli, err := api.NewClient(apiCfg) - if err != nil { - return nil, fmt.Errorf("failed to create prometheus client: %w", err) - } - - c := &Crow{ - cfg: cfg, - m: newMetrics(), - promClient: promapi.NewAPI(cli), - - quit: make(chan struct{}), - - sampleCh: make(chan []*sample), - } - return c, nil -} - -func (c *Crow) runLoop() { - defer c.wg.Done() - - ticker := time.NewTicker(250 * time.Millisecond) - defer ticker.Stop() - - for { - select { - case <-c.quit: - return - case samples := <-c.sampleCh: - c.m.totalScrapes.Inc() - c.m.totalSamples.Add(float64(len(samples))) - - c.appendSamples(samples) - case <-ticker.C: - c.checkPending() - } - } -} - -// appendSamples queues samples to be checked. -func (c *Crow) appendSamples(samples []*sample) { - c.pendingMtx.Lock() - defer c.pendingMtx.Unlock() - c.pending = append(c.pending, samples...) - c.m.pendingSets.Set(float64(len(c.pending))) -} - -// checkPending iterates over all pending samples. Samples that are ready -// are immediately validated. Samples are requeued if they're not ready or -// not found during validation. -func (c *Crow) checkPending() { - c.pendingMtx.Lock() - defer c.pendingMtx.Unlock() - - now := time.Now().UTC() - - requeued := []*sample{} - for _, s := range c.pending { - if !s.Ready(now) { - requeued = append(requeued, s) - continue - } - - err := c.validate(s) - if err == nil { - c.m.totalResults.WithLabelValues("success").Inc() - continue - } - - s.ValidationAttempt++ - if s.ValidationAttempt < c.cfg.MaxValidations { - requeued = append(requeued, s) - continue - } - - var vf errValidationFailed - if errors.As(err, &vf) { - switch { - case vf.mismatch: - c.m.totalResults.WithLabelValues("mismatch").Inc() - case vf.missing: - c.m.totalResults.WithLabelValues("missing").Inc() - default: - c.m.totalResults.WithLabelValues("unknown").Inc() - } - } - } - c.pending = requeued - c.m.pendingSets.Set(float64(len(c.pending))) -} - -type errValidationFailed struct { - missing bool - mismatch bool -} - -func (e errValidationFailed) Error() string { - switch { - case e.missing: - return "validation failed: sample missing" - case e.mismatch: - return "validation failed: sample does not match" - default: - return "validation failed" - } -} - -// validate validates a sample. If the sample should be requeued (i.e., -// couldn't be found), returns true. -func (c *Crow) validate(b *sample) error { - ctx, cancel := context.WithTimeout(context.Background(), c.cfg.QueryTimeout) - defer cancel() - - labels := make([]string, 0, len(b.Labels)) - for k, v := range b.Labels { - labels = append(labels, fmt.Sprintf(`%s="%s"`, k, v)) - } - if c.cfg.ExtraSelectors != "" { - labels = append(labels, c.cfg.ExtraSelectors) - } - - query := fmt.Sprintf("%s{%s}", validationSampleName, strings.Join(labels, ",")) - level.Debug(c.cfg.Log).Log("msg", "querying for sample", "query", query) - - val, _, err := c.promClient.QueryRange(ctx, query, promapi.Range{ - Start: b.ScrapeTime.UTC().Add(-c.cfg.QueryDuration), - End: b.ScrapeTime.UTC().Add(+c.cfg.QueryDuration), - Step: c.cfg.QueryStep, - }) - - if err != nil { - level.Error(c.cfg.Log).Log("msg", "failed to query for sample", "query", query, "err", err) - } else if m, ok := val.(model.Matrix); ok { - return c.validateInMatrix(query, b, m) - } - - return errValidationFailed{missing: true} -} - -func (c *Crow) validateInMatrix(query string, b *sample, m model.Matrix) error { - var found, matches bool - - for _, ss := range m { - for _, sp := range ss.Values { - ts := time.Unix(0, sp.Timestamp.UnixNano()) - dist := b.ScrapeTime.Sub(ts) - if dist < 0 { - dist = -dist - } - - if dist <= c.cfg.MaxTimestampDelta { - found = true - matches = math.Abs(float64(sp.Value)-b.Value) <= c.cfg.ValueEpsilon - } - - level.Debug(c.cfg.Log).Log( - "msg", "compared query to stored sample", - "query", query, - "sample", ss.Metric, - "ts", sp.Timestamp, "expect_ts", b.ScrapeTime, - "value", sp.Value, "expect_value", b.Value, - ) - - if found && matches { - break - } - } - } - - if !found || !matches { - return errValidationFailed{ - missing: !found, - mismatch: found && !matches, - } - } - return nil -} - -// TestMetrics exposes a collector of test metrics. Each collection will -// schedule a validation job. -func (c *Crow) TestMetrics() prometheus.Collector { - return &sampleGenerator{ - numSamples: c.cfg.NumSamples, - sendCh: c.sampleCh, - - r: rand.New(rand.NewSource(time.Now().Unix())), - } -} - -// StateMetrics exposes metrics of Crow itself. These metrics are not validated -// for presence in the remote system. -func (c *Crow) StateMetrics() prometheus.Collector { return c.m } - -// Stop stops crow. Panics if Stop is called more than once. -func (c *Crow) Stop() { - close(c.quit) - c.wg.Wait() -} diff --git a/pkg/crow/metrics.go b/pkg/crow/metrics.go deleted file mode 100644 index 6550cc74c596..000000000000 --- a/pkg/crow/metrics.go +++ /dev/null @@ -1,62 +0,0 @@ -package crow - -import "github.com/prometheus/client_golang/prometheus" - -type metrics struct { - totalScrapes prometheus.Counter - totalSamples prometheus.Counter - totalResults *prometheus.CounterVec - pendingSets prometheus.Gauge - - cachedCollectors []prometheus.Collector -} - -func newMetrics() *metrics { - var m metrics - - m.totalScrapes = prometheus.NewCounter(prometheus.CounterOpts{ - Name: "crow_test_scrapes_total", - Help: "Total number of generated test sample sets", - }) - - m.totalSamples = prometheus.NewCounter(prometheus.CounterOpts{ - Name: "crow_test_samples_total", - Help: "Total number of generated test samples", - }) - - m.totalResults = prometheus.NewCounterVec(prometheus.CounterOpts{ - Name: "crow_test_sample_results_total", - Help: "Total validation results of test samples", - }, []string{"result"}) // result is either "success", "missing", "mismatch", or "unknown" - - m.pendingSets = prometheus.NewGauge(prometheus.GaugeOpts{ - Name: "crow_test_pending_validations", - Help: "Total number of pending validations to perform", - }) - - return &m -} - -func (m *metrics) collectors() []prometheus.Collector { - if m.cachedCollectors == nil { - m.cachedCollectors = []prometheus.Collector{ - m.totalScrapes, - m.totalSamples, - m.totalResults, - m.pendingSets, - } - } - return m.cachedCollectors -} - -func (m *metrics) Describe(ch chan<- *prometheus.Desc) { - for _, c := range m.collectors() { - c.Describe(ch) - } -} - -func (m *metrics) Collect(ch chan<- prometheus.Metric) { - for _, c := range m.collectors() { - c.Collect(ch) - } -} diff --git a/pkg/crow/samples.go b/pkg/crow/samples.go deleted file mode 100644 index 066b21008af4..000000000000 --- a/pkg/crow/samples.go +++ /dev/null @@ -1,85 +0,0 @@ -package crow - -import ( - "fmt" - "math/rand" - "time" - - "github.com/prometheus/client_golang/prometheus" -) - -type sample struct { - ScrapeTime time.Time - Labels prometheus.Labels - Value float64 - - // How many times this sample has attempted to be validated. Starts at 0. - ValidationAttempt int -} - -// Ready checks if this sample is ready to be validated. -func (s *sample) Ready(now time.Time) bool { - backoff := sampleBackoff(s.ValidationAttempt) - return now.After(s.ScrapeTime.Add(backoff)) -} - -func sampleBackoff(attempt int) time.Duration { - // Exponential backoff from 1s up to 1s + (250ms * 2^attempt). - return time.Second + (250 * time.Millisecond * 1 << attempt) -} - -type sampleGenerator struct { - numSamples int - sendCh chan<- []*sample - r *rand.Rand -} - -const validationSampleName = "crow_validation_sample" - -func (sg *sampleGenerator) Describe(ch chan<- *prometheus.Desc) { - ch <- prometheus.NewDesc( - validationSampleName, "Sample to validate", - []string{"sample_num"}, - prometheus.Labels{}, - ) -} - -func (sg *sampleGenerator) Collect(ch chan<- prometheus.Metric) { - var ( - scrapeTime = time.Now() - - sampleLabel = "sample_num" - desc = prometheus.NewDesc( - validationSampleName, "Sample to validate", - []string{sampleLabel}, - prometheus.Labels{}, - ) - - usedLabels = map[string]struct{}{} - samples = make([]*sample, sg.numSamples) - ) - - for s := 0; s < sg.numSamples; s++ { - GenLabel: - labelSuffix := make([]byte, 1) - _, _ = sg.r.Read(labelSuffix) - label := fmt.Sprintf("sample_%x", labelSuffix) - if _, exist := usedLabels[label]; exist { - goto GenLabel - } - usedLabels[label] = struct{}{} - - samples[s] = &sample{ - ScrapeTime: scrapeTime, - Labels: prometheus.Labels{sampleLabel: label}, - Value: float64(sg.r.Int63n(1_000_000)), - } - ch <- prometheus.MustNewConstMetric( - desc, - prometheus.GaugeValue, - samples[s].Value, samples[s].Labels[sampleLabel], - ) - } - - sg.sendCh <- samples -} diff --git a/pkg/crow/samples_test.go b/pkg/crow/samples_test.go deleted file mode 100644 index 523c4c2a8cda..000000000000 --- a/pkg/crow/samples_test.go +++ /dev/null @@ -1,101 +0,0 @@ -package crow - -import ( - "fmt" - "math/rand" - "strings" - "testing" - "time" - - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/common/expfmt" - "github.com/stretchr/testify/require" -) - -func Test_sample_Ready(t *testing.T) { - tt := []struct { - sample sample - now time.Time - expect bool - }{ - { - sample: sample{ - ScrapeTime: time.Unix(100, 0).UTC(), - ValidationAttempt: 0, - }, - now: time.Unix(100, 0).UTC(), - expect: false, - }, - { - sample: sample{ - ScrapeTime: time.Unix(100, 0).UTC(), - ValidationAttempt: 0, - }, - now: time.Unix(500, 0).UTC(), - expect: true, - }, - } - - for _, tc := range tt { - ready := tc.sample.Ready(tc.now) - require.Equal(t, tc.expect, ready) - } -} - -func Test_sampleBackoff(t *testing.T) { - tt := []struct { - attempt int - expect time.Duration - }{ - {attempt: 0, expect: 1250 * time.Millisecond}, - {attempt: 1, expect: 1500 * time.Millisecond}, - {attempt: 2, expect: 2000 * time.Millisecond}, - {attempt: 3, expect: 3000 * time.Millisecond}, - {attempt: 4, expect: 5000 * time.Millisecond}, - {attempt: 5, expect: 9000 * time.Millisecond}, - } - - for _, tc := range tt { - t.Run(fmt.Sprintf("%d", tc.attempt), func(t *testing.T) { - actual := sampleBackoff(tc.attempt) - require.Equal(t, tc.expect, actual) - }) - } -} - -func Test_sampleGenerator(t *testing.T) { - var ( - reg = prometheus.NewRegistry() - ) - - gen := sampleGenerator{ - numSamples: 10, - sendCh: make(chan<- []*sample, 10), - r: rand.New(rand.NewSource(0)), - } - reg.MustRegister(&gen) - - mfs, err := reg.Gather() - require.NoError(t, err) - - var sb strings.Builder - enc := expfmt.NewEncoder(&sb, expfmt.FmtText) - for _, mf := range mfs { - require.NoError(t, enc.Encode(mf)) - } - - expect := `# HELP crow_validation_sample Sample to validate -# TYPE crow_validation_sample gauge -crow_validation_sample{sample_num="sample_01"} 393152 -crow_validation_sample{sample_num="sample_14"} 943416 -crow_validation_sample{sample_num="sample_2f"} 980153 -crow_validation_sample{sample_num="sample_51"} 637646 -crow_validation_sample{sample_num="sample_55"} 976708 -crow_validation_sample{sample_num="sample_94"} 995827 -crow_validation_sample{sample_num="sample_c2"} 376202 -crow_validation_sample{sample_num="sample_fa"} 126063 -crow_validation_sample{sample_num="sample_fc"} 422456 -crow_validation_sample{sample_num="sample_fd"} 197794 -` - require.Equal(t, expect, sb.String()) -} diff --git a/pkg/integrations/kafka_exporter/kafka_exporter.go b/pkg/integrations/kafka_exporter/kafka_exporter.go index b8cc491b120a..5d50fe1452ea 100644 --- a/pkg/integrations/kafka_exporter/kafka_exporter.go +++ b/pkg/integrations/kafka_exporter/kafka_exporter.go @@ -28,6 +28,9 @@ var DefaultConfig = Config{ // Config controls kafka_exporter type Config struct { + // The instance label for metrics. + Instance string `yaml:"instance,omitempty"` + // Address array (host:port) of Kafka server KafkaURIs []string `yaml:"kafka_uris,omitempty"` @@ -109,11 +112,14 @@ func (c *Config) Name() string { // there is not exactly one Kafka node, the user must manually provide // their own value for instance key in the common config. func (c *Config) InstanceKey(agentKey string) (string, error) { - if len(c.KafkaURIs) != 1 { + if len(c.KafkaURIs) == 1 { + return c.KafkaURIs[0], nil + } + if c.Instance == "" && len(c.KafkaURIs) > 1 { return "", fmt.Errorf("an automatic value for `instance` cannot be determined from %d kafka servers, manually provide one for this integration", len(c.KafkaURIs)) } - return c.KafkaURIs[0], nil + return c.Instance, nil } // NewIntegration creates a new elasticsearch_exporter diff --git a/pkg/integrations/mssql/sql_exporter.go b/pkg/integrations/mssql/sql_exporter.go index fd9af2278be0..84d930d35a3e 100644 --- a/pkg/integrations/mssql/sql_exporter.go +++ b/pkg/integrations/mssql/sql_exporter.go @@ -114,6 +114,9 @@ func (c *Config) NewIntegration(l log.Logger) (integrations.Integration, error) collectorConfig = *customCollectorConfig } + // TODO(hainenber): expose below attr as config + enablePing := false + t, err := sql_exporter.NewTarget( "mssqlintegration", "", @@ -128,6 +131,7 @@ func (c *Config) NewIntegration(l log.Logger) (integrations.Integration, error) MaxConns: c.MaxOpenConnections, MaxIdleConns: c.MaxIdleConnections, }, + &enablePing, ) if err != nil { diff --git a/pkg/integrations/windows_exporter/config.go b/pkg/integrations/windows_exporter/config.go index 006bc5426d72..a8bdba73174c 100644 --- a/pkg/integrations/windows_exporter/config.go +++ b/pkg/integrations/windows_exporter/config.go @@ -23,6 +23,7 @@ type Config struct { TextFile TextFileConfig `yaml:"text_file,omitempty"` SMTP SMTPConfig `yaml:"smtp,omitempty"` Service ServiceConfig `yaml:"service,omitempty"` + PhysicalDisk PhysicalDiskConfig `yaml:"physical_disk,omitempty"` Process ProcessConfig `yaml:"process,omitempty"` Network NetworkConfig `yaml:"network,omitempty"` MSSQL MSSQLConfig `yaml:"mssql,omitempty"` @@ -126,3 +127,9 @@ type ScheduledTaskConfig struct { Include string `yaml:"include,omitempty"` Exclude string `yaml:"exclude,omitempty"` } + +// PhysicalDiskConfig handles settings for the windows_exporter physical disk collector +type PhysicalDiskConfig struct { + Include string `yaml:"include,omitempty"` + Exclude string `yaml:"exclude,omitempty"` +} diff --git a/pkg/integrations/windows_exporter/config_windows.go b/pkg/integrations/windows_exporter/config_windows.go index 17fd03d4f80c..657ff5861de4 100644 --- a/pkg/integrations/windows_exporter/config_windows.go +++ b/pkg/integrations/windows_exporter/config_windows.go @@ -22,6 +22,9 @@ func (c *Config) ToWindowsExporterConfig() collector.Config { cfg.Textfile.TextFileDirectories = c.TextFile.TextFileDirectory + cfg.PhysicalDisk.DiskInclude = c.PhysicalDisk.Include + cfg.PhysicalDisk.DiskExclude = c.PhysicalDisk.Exclude + cfg.Process.ProcessExclude = coalesceString(c.Process.Exclude, c.Process.BlackList) cfg.Process.ProcessInclude = coalesceString(c.Process.Include, c.Process.WhiteList) @@ -87,6 +90,10 @@ var DefaultConfig = Config{ Include: collector.ConfigDefaults.Net.NicInclude, Exclude: collector.ConfigDefaults.Net.NicExclude, }, + PhysicalDisk: PhysicalDiskConfig{ + Include: collector.ConfigDefaults.PhysicalDisk.DiskInclude, + Exclude: collector.ConfigDefaults.PhysicalDisk.DiskExclude, + }, Process: ProcessConfig{ BlackList: collector.ConfigDefaults.Process.ProcessExclude, WhiteList: collector.ConfigDefaults.Process.ProcessInclude, diff --git a/pkg/integrations/windows_exporter/windows_exporter_windows.go b/pkg/integrations/windows_exporter/windows_exporter_windows.go index fd0a7effe023..94e05f1121a6 100644 --- a/pkg/integrations/windows_exporter/windows_exporter_windows.go +++ b/pkg/integrations/windows_exporter/windows_exporter_windows.go @@ -23,6 +23,11 @@ func New(logger log.Logger, c *Config) (integrations.Integration, error) { if err != nil { return nil, err } + err = winCol.SetPerfCounterQuery() + if err != nil { + return nil, err + } + return integrations.NewCollectorIntegration(c.Name(), integrations.WithCollectors( // Hard-coded 4m timeout to represent the time a series goes stale. // TODO: Make configurable if useful. diff --git a/pkg/metrics/agent_test.go b/pkg/metrics/agent_test.go index 0d003d96474c..05f547c55310 100644 --- a/pkg/metrics/agent_test.go +++ b/pkg/metrics/agent_test.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "net/http" "sync" "testing" "time" @@ -285,6 +286,10 @@ func (i *fakeInstance) StorageDirectory() string { return "" } +func (i *fakeInstance) WriteHandler() http.Handler { + return nil +} + func (i *fakeInstance) Appender(ctx context.Context) storage.Appender { return nil } diff --git a/pkg/metrics/http.go b/pkg/metrics/http.go index 6f7f673066ad..01d185f968d8 100644 --- a/pkg/metrics/http.go +++ b/pkg/metrics/http.go @@ -13,7 +13,6 @@ import ( "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/scrape" - "github.com/prometheus/prometheus/storage/remote" ) // WireAPI adds API routes to the provided mux router. @@ -151,8 +150,7 @@ func (a *Agent) PushMetricsHandler(w http.ResponseWriter, r *http.Request) { return } - handler := remote.NewWriteHandler(a.logger, a.reg, managedInstance) - handler.ServeHTTP(w, r) + managedInstance.WriteHandler().ServeHTTP(w, r) } // getInstanceName uses gorilla/mux's route variables to extract the diff --git a/pkg/metrics/instance/configstore/api_test.go b/pkg/metrics/instance/configstore/api_test.go index 15a111520301..84c0198dee00 100644 --- a/pkg/metrics/instance/configstore/api_test.go +++ b/pkg/metrics/instance/configstore/api_test.go @@ -139,6 +139,7 @@ scrape_configs: honor_timestamps: true metrics_path: /metrics scheme: http + track_timestamps_staleness: true static_configs: - targets: - 127.0.0.1:12345 diff --git a/pkg/metrics/instance/host_filter_test.go b/pkg/metrics/instance/host_filter_test.go index aa53bd25b727..8a24373594c0 100644 --- a/pkg/metrics/instance/host_filter_test.go +++ b/pkg/metrics/instance/host_filter_test.go @@ -172,6 +172,7 @@ func TestHostFilter_PatchSD(t *testing.T) { honor_timestamps: true metrics_path: /metrics scheme: http + track_timestamps_staleness: false follow_redirects: true enable_http2: true kubernetes_sd_configs: diff --git a/pkg/metrics/instance/instance.go b/pkg/metrics/instance/instance.go index eae93936be2f..c81c8e620622 100644 --- a/pkg/metrics/instance/instance.go +++ b/pkg/metrics/instance/instance.go @@ -10,6 +10,7 @@ import ( "errors" "fmt" "math" + "net/http" "os" "path/filepath" "sync" @@ -243,8 +244,9 @@ type Instance struct { logger log.Logger - reg prometheus.Registerer - newWal walStorageFactory + reg prometheus.Registerer + newWal walStorageFactory + writeHandler http.Handler } // New creates a new Instance with a directory for storing the WAL. The instance @@ -407,6 +409,8 @@ func (i *Instance) initialize(ctx context.Context, reg prometheus.Registerer, cf return fmt.Errorf("error creating WAL: %w", err) } + i.writeHandler = remote.NewWriteHandler(i.logger, i.reg, i.wal) + i.discovery, err = i.newDiscoveryManager(ctx, cfg) if err != nil { return fmt.Errorf("error creating discovery manager: %w", err) @@ -582,6 +586,12 @@ func (i *Instance) StorageDirectory() string { return i.wal.Directory() } +// WriteHandler returns an HTTP handler for pushing metrics directly into the +// instance's WAL. +func (i *Instance) WriteHandler() http.Handler { + return i.writeHandler +} + // Appender returns a storage.Appender from the instance's WAL func (i *Instance) Appender(ctx context.Context) storage.Appender { return i.wal.Appender(ctx) diff --git a/pkg/metrics/instance/instance_test.go b/pkg/metrics/instance/instance_test.go index e82117e797df..5b9ee503abb7 100644 --- a/pkg/metrics/instance/instance_test.go +++ b/pkg/metrics/instance/instance_test.go @@ -416,5 +416,6 @@ func runInstance(t *testing.T, i *Instance) { t.Cleanup(func() { cancel() }) go require.NotPanics(t, func() { _ = i.Run(ctx) + require.NotNil(t, i.WriteHandler()) }) } diff --git a/pkg/metrics/instance/manager.go b/pkg/metrics/instance/manager.go index d3351d4d006f..6bb90324fdd6 100644 --- a/pkg/metrics/instance/manager.go +++ b/pkg/metrics/instance/manager.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "net/http" "sync" "time" @@ -66,6 +67,7 @@ type ManagedInstance interface { TargetsActive() map[string][]*scrape.Target StorageDirectory() string Appender(ctx context.Context) storage.Appender + WriteHandler() http.Handler } // BasicManagerConfig controls the operations of a BasicManager. diff --git a/pkg/metrics/instance/manager_test.go b/pkg/metrics/instance/manager_test.go index e524592e02fe..6afed2673218 100644 --- a/pkg/metrics/instance/manager_test.go +++ b/pkg/metrics/instance/manager_test.go @@ -3,6 +3,7 @@ package instance import ( "context" "fmt" + "net/http" "os" "testing" @@ -104,6 +105,7 @@ type mockInstance struct { TargetsActiveFunc func() map[string][]*scrape.Target StorageDirectoryFunc func() string AppenderFunc func() storage.Appender + WriteHandlerFunc func() http.Handler } func (m mockInstance) Run(ctx context.Context) error { @@ -141,6 +143,13 @@ func (m mockInstance) StorageDirectory() string { panic("StorageDirectoryFunc not provided") } +func (m mockInstance) WriteHandler() http.Handler { + if m.WriteHandlerFunc != nil { + return m.WriteHandlerFunc() + } + panic("GetWriteHandlerFunc not provided") +} + func (m mockInstance) Appender(_ context.Context) storage.Appender { if m.AppenderFunc != nil { return m.AppenderFunc() diff --git a/pkg/metrics/instance/marshal_test.go b/pkg/metrics/instance/marshal_test.go index 5d2a68b870da..b102c3d635d2 100644 --- a/pkg/metrics/instance/marshal_test.go +++ b/pkg/metrics/instance/marshal_test.go @@ -36,6 +36,7 @@ scrape_configs: honor_timestamps: true metrics_path: /metrics scheme: http + track_timestamps_staleness: true static_configs: - targets: - 127.0.0.1:12345 @@ -92,6 +93,7 @@ scrape_configs: honor_timestamps: true metrics_path: /metrics scheme: http + track_timestamps_staleness: true static_configs: - targets: - 127.0.0.1:12345 diff --git a/pkg/metrics/instance/noop.go b/pkg/metrics/instance/noop.go index 76d459e19507..f9f86b871320 100644 --- a/pkg/metrics/instance/noop.go +++ b/pkg/metrics/instance/noop.go @@ -2,6 +2,7 @@ package instance import ( "context" + "net/http" "github.com/prometheus/prometheus/scrape" "github.com/prometheus/prometheus/storage" @@ -37,6 +38,11 @@ func (NoOpInstance) StorageDirectory() string { return "" } +// WriteHandler implements Instance. +func (NoOpInstance) WriteHandler() http.Handler { + return nil +} + // Appender implements Instance func (NoOpInstance) Appender(_ context.Context) storage.Appender { return nil diff --git a/pkg/operator/defaults.go b/pkg/operator/defaults.go index b66cfb52289b..bc9cff6ab04e 100644 --- a/pkg/operator/defaults.go +++ b/pkg/operator/defaults.go @@ -2,7 +2,7 @@ package operator // Supported versions of the Grafana Agent. var ( - DefaultAgentVersion = "v0.38.1" + DefaultAgentVersion = "v0.39.0" DefaultAgentBaseImage = "grafana/agent" DefaultAgentImage = DefaultAgentBaseImage + ":" + DefaultAgentVersion ) diff --git a/pkg/operator/kubelet.go b/pkg/operator/kubelet.go index c04c6887f746..9ecff4f9bc8a 100644 --- a/pkg/operator/kubelet.go +++ b/pkg/operator/kubelet.go @@ -3,6 +3,7 @@ package operator import ( "context" "fmt" + "sort" "github.com/go-kit/log" "github.com/go-kit/log/level" @@ -126,6 +127,12 @@ func getNodeAddrs(l log.Logger, nodes *core_v1.NodeList) (addrs []core_v1.Endpoi if failed { return nil, fmt.Errorf("failed to get the address from one or more nodes") } + + // Sort endpoints to reduce performance cost on endpoint watchers + sort.SliceStable(addrs, func(i, j int) bool { + return addrs[i].IP < addrs[j].IP + }) + return } diff --git a/production/kubernetes/README.md b/production/kubernetes/README.md deleted file mode 100644 index 6a98bd3057b6..000000000000 --- a/production/kubernetes/README.md +++ /dev/null @@ -1,50 +0,0 @@ -# Kubernetes Config - -This directory contains Kubernetes manifest templates for rolling out the Agent. - -Manifests: - -- Metric collection (StatefulSet): [`agent-bare.yaml`](./agent-bare.yaml) -- Log collection (DaemonSet): [`agent-loki.yaml`](./agent-loki.yaml) -- Trace collection (Deployment): [`agent-traces.yaml`](./agent-traces.yaml) - -⚠️ **These manifests do not include the Agent's configuration (ConfigMaps)**, -which are necessary to run the Agent. - -For sample configurations and detailed installation instructions, please head to: - -- [Grafana Agent Metrics Kubernetes Quickstart](https://grafana.com/docs/grafana-cloud/quickstart/agent-k8s/k8s_agent_metrics/) -- [Grafana Agent Logs Kubernetes Quickstart](https://grafana.com/docs/grafana-cloud/quickstart/agent-k8s/k8s_agent_logs/) -- [Grafana Agent Traces Kubernetes Quickstart](https://grafana.com/docs/grafana-cloud/quickstart/agent-k8s/k8s_agent_traces/) - -## Manually Applying - -Since the manifest files are just templates, note that they are *not* ready for -applying out of the box and you will have to manually perform the following steps: - -1. Download the manifest as `manifest.yaml` - -2. Modify your copy of the manifest, replacing relevant variables with the appropriate values - -3. Apply the modified manifest file: `kubectl -n default apply -f manifest.yaml`. - -This directory also contains an `install-bare.sh` script that is used inside of -Grafana Cloud instructions. If using the Grafana Agent outside of Grafana Cloud, -it is recommended to follow the steps above instead of calling this script -directly. - -## Rebuilding the manifests - -The manifests provided are created using Grafana Labs' production -[Tanka configs](../tanka/grafana-agent) with some default values. If you want to -build the YAML file with some custom values, you will need the following pieces -of software installed: - -1. [Tanka](https://github.com/grafana/tanka) >= v0.8 -2. [`jsonnet-bundler`](https://github.com/jsonnet-bundler/jsonnet-bundler) >= v0.2.1 - -See the [`template` Tanka environment](./build/templates) for the current -settings that initialize the Grafana Agent Tanka configs. - -To build the YAML files, execute the `./build/build.sh` script or run `make example-kubernetes` -from the project's root directory. diff --git a/production/kubernetes/agent-bare.yaml b/production/kubernetes/agent-bare.yaml deleted file mode 100644 index 0ca2da3bc58f..000000000000 --- a/production/kubernetes/agent-bare.yaml +++ /dev/null @@ -1,115 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - name: grafana-agent - namespace: ${NAMESPACE} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: grafana-agent -rules: -- apiGroups: - - "" - resources: - - nodes - - nodes/proxy - - services - - endpoints - - pods - - events - verbs: - - get - - list - - watch -- nonResourceURLs: - - /metrics - verbs: - - get ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: grafana-agent -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: grafana-agent -subjects: -- kind: ServiceAccount - name: grafana-agent - namespace: ${NAMESPACE} ---- -apiVersion: v1 -kind: Service -metadata: - labels: - name: grafana-agent - name: grafana-agent - namespace: ${NAMESPACE} -spec: - clusterIP: None - ports: - - name: grafana-agent-http-metrics - port: 80 - targetPort: 80 - selector: - name: grafana-agent ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: grafana-agent - namespace: ${NAMESPACE} -spec: - replicas: 1 - selector: - matchLabels: - name: grafana-agent - serviceName: grafana-agent - template: - metadata: - labels: - name: grafana-agent - spec: - containers: - - args: - - -config.expand-env=true - - -config.file=/etc/agent/agent.yaml - - -enable-features=integrations-next - - -server.http.address=0.0.0.0:80 - env: - - name: HOSTNAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - image: grafana/agent:v0.37.4 - imagePullPolicy: IfNotPresent - name: grafana-agent - ports: - - containerPort: 80 - name: http-metrics - volumeMounts: - - mountPath: /var/lib/agent - name: agent-wal - - mountPath: /etc/agent - name: grafana-agent - serviceAccountName: grafana-agent - volumes: - - configMap: - name: grafana-agent - name: grafana-agent - updateStrategy: - type: RollingUpdate - volumeClaimTemplates: - - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - name: agent-wal - namespace: ${NAMESPACE} - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 5Gi diff --git a/production/kubernetes/agent-loki.yaml b/production/kubernetes/agent-loki.yaml deleted file mode 100644 index 4eeb798a00d8..000000000000 --- a/production/kubernetes/agent-loki.yaml +++ /dev/null @@ -1,100 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - name: grafana-agent-logs - namespace: ${NAMESPACE} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: grafana-agent-logs -rules: -- apiGroups: - - "" - resources: - - nodes - - nodes/proxy - - services - - endpoints - - pods - - events - verbs: - - get - - list - - watch -- nonResourceURLs: - - /metrics - verbs: - - get ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: grafana-agent-logs -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: grafana-agent-logs -subjects: -- kind: ServiceAccount - name: grafana-agent-logs - namespace: ${NAMESPACE} ---- -apiVersion: apps/v1 -kind: DaemonSet -metadata: - name: grafana-agent-logs - namespace: ${NAMESPACE} -spec: - minReadySeconds: 10 - selector: - matchLabels: - name: grafana-agent-logs - template: - metadata: - labels: - name: grafana-agent-logs - spec: - containers: - - args: - - -config.expand-env=true - - -config.file=/etc/agent/agent.yaml - - -server.http.address=0.0.0.0:80 - env: - - name: HOSTNAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - image: grafana/agent:v0.37.4 - imagePullPolicy: IfNotPresent - name: grafana-agent-logs - ports: - - containerPort: 80 - name: http-metrics - securityContext: - privileged: true - runAsUser: 0 - volumeMounts: - - mountPath: /etc/agent - name: grafana-agent-logs - - mountPath: /var/log - name: varlog - - mountPath: /var/lib/docker/containers - name: varlibdockercontainers - readOnly: true - serviceAccountName: grafana-agent-logs - tolerations: - - effect: NoSchedule - operator: Exists - volumes: - - configMap: - name: grafana-agent-logs - name: grafana-agent-logs - - hostPath: - path: /var/log - name: varlog - - hostPath: - path: /var/lib/docker/containers - name: varlibdockercontainers - updateStrategy: - type: RollingUpdate diff --git a/production/kubernetes/agent-traces.yaml b/production/kubernetes/agent-traces.yaml deleted file mode 100644 index 7c2835846b0a..000000000000 --- a/production/kubernetes/agent-traces.yaml +++ /dev/null @@ -1,154 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - name: grafana-agent-traces - namespace: ${NAMESPACE} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: grafana-agent-traces -rules: -- apiGroups: - - "" - resources: - - nodes - - nodes/proxy - - services - - endpoints - - pods - - events - verbs: - - get - - list - - watch -- nonResourceURLs: - - /metrics - verbs: - - get ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: grafana-agent-traces -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: grafana-agent-traces -subjects: -- kind: ServiceAccount - name: grafana-agent-traces - namespace: ${NAMESPACE} ---- -apiVersion: v1 -kind: Service -metadata: - labels: - name: grafana-agent-traces - name: grafana-agent-traces - namespace: ${NAMESPACE} -spec: - ports: - - name: grafana-agent-traces-http-metrics - port: 80 - targetPort: 80 - - name: grafana-agent-traces-thrift-compact - port: 6831 - protocol: UDP - targetPort: 6831 - - name: grafana-agent-traces-thrift-binary - port: 6832 - protocol: UDP - targetPort: 6832 - - name: grafana-agent-traces-thrift-http - port: 14268 - protocol: TCP - targetPort: 14268 - - name: grafana-agent-traces-thrift-grpc - port: 14250 - protocol: TCP - targetPort: 14250 - - name: grafana-agent-traces-zipkin - port: 9411 - protocol: TCP - targetPort: 9411 - - name: grafana-agent-traces-otlp-grpc - port: 4317 - protocol: TCP - targetPort: 4317 - - name: grafana-agent-traces-otlp-http - port: 4318 - protocol: TCP - targetPort: 4318 - - name: grafana-agent-traces-opencensus - port: 55678 - protocol: TCP - targetPort: 55678 - selector: - name: grafana-agent-traces ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: grafana-agent-traces - namespace: ${NAMESPACE} -spec: - minReadySeconds: 10 - replicas: 1 - revisionHistoryLimit: 10 - selector: - matchLabels: - name: grafana-agent-traces - template: - metadata: - labels: - name: grafana-agent-traces - spec: - containers: - - args: - - -config.expand-env=true - - -config.file=/etc/agent/agent.yaml - - -server.http.address=0.0.0.0:80 - env: - - name: HOSTNAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - image: grafana/agent:v0.37.4 - imagePullPolicy: IfNotPresent - name: grafana-agent-traces - ports: - - containerPort: 80 - name: http-metrics - - containerPort: 6831 - name: thrift-compact - protocol: UDP - - containerPort: 6832 - name: thrift-binary - protocol: UDP - - containerPort: 14268 - name: thrift-http - protocol: TCP - - containerPort: 14250 - name: thrift-grpc - protocol: TCP - - containerPort: 9411 - name: zipkin - protocol: TCP - - containerPort: 4317 - name: otlp-grpc - protocol: TCP - - containerPort: 4318 - name: otlp-http - protocol: TCP - - containerPort: 55678 - name: opencensus - protocol: TCP - volumeMounts: - - mountPath: /etc/agent - name: grafana-agent-traces - serviceAccountName: grafana-agent-traces - volumes: - - configMap: - name: grafana-agent-traces - name: grafana-agent-traces diff --git a/production/kubernetes/build/build.sh b/production/kubernetes/build/build.sh deleted file mode 100755 index 474afb2f1235..000000000000 --- a/production/kubernetes/build/build.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/usr/bin/env bash -# shellcheck shell=bash - -set +e - -DIRNAME=$(dirname "$0") - -pushd "${DIRNAME}" || exit 1 -# Make sure dependencies are up to date -jb install -tk show --dangerous-allow-redirect ./templates/bare > "${PWD}/../agent-bare.yaml" -tk show --dangerous-allow-redirect ./templates/loki > "${PWD}/../agent-loki.yaml" -tk show --dangerous-allow-redirect ./templates/traces > "${PWD}/../agent-traces.yaml" -tk show --dangerous-allow-redirect ./templates/operator > "${PWD}/../../operator/templates/agent-operator.yaml" -popd || exit 1 diff --git a/production/kubernetes/build/jsonnetfile.json b/production/kubernetes/build/jsonnetfile.json deleted file mode 100644 index 392936f6804d..000000000000 --- a/production/kubernetes/build/jsonnetfile.json +++ /dev/null @@ -1,49 +0,0 @@ -{ - "version": 1, - "dependencies": [ - { - "source": { - "git": { - "remote": "https://github.com/grafana/jsonnet-libs.git", - "subdir": "ksonnet-util" - } - }, - "version": "master" - }, - { - "source": { - "git": { - "remote": "https://github.com/jsonnet-libs/k8s-libsonnet.git", - "subdir": "1.21" - } - }, - "version": "main" - }, - { - "source": { - "git": { - "remote": "https://github.com/kubernetes/kube-state-metrics.git", - "subdir": "jsonnet/kube-state-metrics" - } - }, - "version": "v2.5.0" - }, - { - "source": { - "local": { - "directory": "../../tanka/grafana-agent" - } - }, - "version": "" - }, - { - "source": { - "local": { - "directory": "../../tanka/grafana-agent-operator" - } - }, - "version": "" - } - ], - "legacyImports": true -} diff --git a/production/kubernetes/build/jsonnetfile.lock.json b/production/kubernetes/build/jsonnetfile.lock.json deleted file mode 100644 index 3ed712040c13..000000000000 --- a/production/kubernetes/build/jsonnetfile.lock.json +++ /dev/null @@ -1,74 +0,0 @@ -{ - "version": 1, - "dependencies": [ - { - "source": { - "git": { - "remote": "https://github.com/grafana/jsonnet-libs.git", - "subdir": "ksonnet-util" - } - }, - "version": "28a9c400acbc02994ea8b08494571c7b476096b6", - "sum": "OxgtIWL4hjvG0xkMwUzZ7Yjs52zUhLhaVQpwHCbqf8A=" - }, - { - "source": { - "git": { - "remote": "https://github.com/jsonnet-libs/grafana-agent-libsonnet.git", - "subdir": "0.26" - } - }, - "version": "4763fb9dd69acd7c32ea34a708328ad7d1984100", - "sum": "AcBuxWZhGRgcfHFUxYRUOhAnQ9FnEP37fVl68jAQNc8=", - "name": "agent-operator-gen" - }, - { - "source": { - "git": { - "remote": "https://github.com/jsonnet-libs/k8s-libsonnet.git", - "subdir": "1.21" - } - }, - "version": "f8efa81cf15257bd151b97e31599e20b2ba5311b", - "sum": "FYub7WxElJkqjjXA++DemsKHwsPqUFW945BTgpVop6Q=" - }, - { - "source": { - "git": { - "remote": "https://github.com/jsonnet-libs/prometheus-operator-libsonnet.git", - "subdir": "0.57" - } - }, - "version": "daddbdd13374107f78a2489301f7c23ae1eb0b16", - "sum": "8+yZ7FalORuq5ZGpqSnSa+/4YQcPa7x9rClXcjgGCq0=", - "name": "prom-operator-gen" - }, - { - "source": { - "git": { - "remote": "https://github.com/kubernetes/kube-state-metrics.git", - "subdir": "jsonnet/kube-state-metrics" - } - }, - "version": "0567e1e1b981755e563d2244fa1659563f2cddbc", - "sum": "P0dCnbzyPScQGNXwXRcwiPkMLeTq0IPNbSTysDbySnM=" - }, - { - "source": { - "local": { - "directory": "../../tanka/grafana-agent" - } - }, - "version": "" - }, - { - "source": { - "local": { - "directory": "../../tanka/grafana-agent-operator" - } - }, - "version": "" - } - ], - "legacyImports": false -} diff --git a/production/kubernetes/build/lib/k.libsonnet b/production/kubernetes/build/lib/k.libsonnet deleted file mode 100644 index 3004bc3cf935..000000000000 --- a/production/kubernetes/build/lib/k.libsonnet +++ /dev/null @@ -1 +0,0 @@ -(import 'github.com/jsonnet-libs/k8s-libsonnet/1.21/main.libsonnet') diff --git a/production/kubernetes/build/lib/version.libsonnet b/production/kubernetes/build/lib/version.libsonnet deleted file mode 100644 index be3865408074..000000000000 --- a/production/kubernetes/build/lib/version.libsonnet +++ /dev/null @@ -1 +0,0 @@ -'grafana/agent:v0.37.4' diff --git a/production/kubernetes/build/templates/bare/main.jsonnet b/production/kubernetes/build/templates/bare/main.jsonnet deleted file mode 100644 index fda8512f0291..000000000000 --- a/production/kubernetes/build/templates/bare/main.jsonnet +++ /dev/null @@ -1,41 +0,0 @@ -local agent = import 'grafana-agent/v2/main.libsonnet'; -local k = import 'ksonnet-util/kausal.libsonnet'; - -local pvc = k.core.v1.persistentVolumeClaim; -local volumeMount = k.core.v1.volumeMount; -local containerPort = k.core.v1.containerPort; - -{ - agent: - agent.new(name='grafana-agent', namespace='${NAMESPACE}') + - agent.withStatefulSetController( - replicas=1, - volumeClaims=[ - pvc.new() + - pvc.mixin.metadata.withName('agent-wal') + - pvc.mixin.metadata.withNamespace('${NAMESPACE}') + - pvc.mixin.spec.withAccessModes('ReadWriteOnce') + - pvc.mixin.spec.resources.withRequests({ storage: '5Gi' }), - ], - ) + - agent.withConfigHash(false) + - agent.withArgsMixin({ - 'enable-features': 'integrations-next' - },) + - // add dummy config or else will fail - agent.withAgentConfig({ - server: { log_level: 'error' }, - }) + - agent.withVolumeMountsMixin([volumeMount.new('agent-wal', '/var/lib/agent')]) + - // headless svc needed by statefulset - agent.withService() + - { - controller_service+: { - spec+: { - clusterIP: 'None', - }, - }, - } + - // hack to disable ConfigMap - { configMap:: super.configMap }, -} diff --git a/production/kubernetes/build/templates/bare/spec.json b/production/kubernetes/build/templates/bare/spec.json deleted file mode 100644 index d97d07154983..000000000000 --- a/production/kubernetes/build/templates/bare/spec.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "apiVersion": "tanka.dev/v1alpha1", - "kind": "Environment", - "metadata": { - "name": "template" - }, - "spec": { - "apiServer": "", - "namespace": "" - } -} diff --git a/production/kubernetes/build/templates/base-sigv4/main.jsonnet b/production/kubernetes/build/templates/base-sigv4/main.jsonnet deleted file mode 100644 index 5297475e61d9..000000000000 --- a/production/kubernetes/build/templates/base-sigv4/main.jsonnet +++ /dev/null @@ -1,31 +0,0 @@ -local agent = import 'grafana-agent/grafana-agent.libsonnet'; - -local k = import 'ksonnet-util/kausal.libsonnet'; -local serviceAccount = k.core.v1.serviceAccount; - -agent { - _images+:: { - agent: (import 'version.libsonnet'), - }, - - _config+:: { - namespace: '${NAMESPACE}', - agent_remote_write: [{ - url: '${REMOTE_WRITE_URL}', - sigv4: { - region: '${REGION}', - }, - }], - - // Since the config map isn't managed by Tanka, we don't want to - // add the configmap's hash as an annotation for the Kubernetes - // YAML manifest. - agent_config_hash_annotation: false, - }, - - agent_rbac+: { - service_account+: serviceAccount.mixin.metadata.withAnnotationsMixin({ - 'eks.amazonaws.com/role-arn': '${ROLE_ARN}', - }), - }, -} diff --git a/production/kubernetes/build/templates/base-sigv4/spec.json b/production/kubernetes/build/templates/base-sigv4/spec.json deleted file mode 100644 index d97d07154983..000000000000 --- a/production/kubernetes/build/templates/base-sigv4/spec.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "apiVersion": "tanka.dev/v1alpha1", - "kind": "Environment", - "metadata": { - "name": "template" - }, - "spec": { - "apiServer": "", - "namespace": "" - } -} diff --git a/production/kubernetes/build/templates/base/main.jsonnet b/production/kubernetes/build/templates/base/main.jsonnet deleted file mode 100644 index 687b6ad74ddd..000000000000 --- a/production/kubernetes/build/templates/base/main.jsonnet +++ /dev/null @@ -1,23 +0,0 @@ -local agent = import 'grafana-agent/grafana-agent.libsonnet'; - -agent { - _images+:: { - agent: (import 'version.libsonnet'), - }, - - _config+:: { - namespace: '${NAMESPACE}', - agent_remote_write: [{ - url: '${REMOTE_WRITE_URL}', - basic_auth: { - username: '${REMOTE_WRITE_USERNAME}', - password: '${REMOTE_WRITE_PASSWORD}', - }, - }], - - // Since the config map isn't managed by Tanka, we don't want to - // add the configmap's hash as an annotation for the Kubernetes - // YAML manifest. - agent_config_hash_annotation: false, - }, -} diff --git a/production/kubernetes/build/templates/base/spec.json b/production/kubernetes/build/templates/base/spec.json deleted file mode 100644 index d97d07154983..000000000000 --- a/production/kubernetes/build/templates/base/spec.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "apiVersion": "tanka.dev/v1alpha1", - "kind": "Environment", - "metadata": { - "name": "template" - }, - "spec": { - "apiServer": "", - "namespace": "" - } -} diff --git a/production/kubernetes/build/templates/loki/main.jsonnet b/production/kubernetes/build/templates/loki/main.jsonnet deleted file mode 100644 index 1ec9540e62e8..000000000000 --- a/production/kubernetes/build/templates/loki/main.jsonnet +++ /dev/null @@ -1,17 +0,0 @@ -local agent = import 'grafana-agent/v2/main.libsonnet'; -local k = import 'ksonnet-util/kausal.libsonnet'; - -{ - agent: - agent.new(name='grafana-agent-logs', namespace='${NAMESPACE}') + - agent.withDaemonSetController() + - agent.withConfigHash(false) + - // add dummy config or else will fail - agent.withAgentConfig({ - server: { log_level: 'error' }, - }) + - agent.withLogVolumeMounts() + - agent.withLogPermissions() + - // hack to disable configmap - { configMap:: super.configMap } -} diff --git a/production/kubernetes/build/templates/loki/spec.json b/production/kubernetes/build/templates/loki/spec.json deleted file mode 100644 index d97d07154983..000000000000 --- a/production/kubernetes/build/templates/loki/spec.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "apiVersion": "tanka.dev/v1alpha1", - "kind": "Environment", - "metadata": { - "name": "template" - }, - "spec": { - "apiServer": "", - "namespace": "" - } -} diff --git a/production/kubernetes/build/templates/operator/main.jsonnet b/production/kubernetes/build/templates/operator/main.jsonnet deleted file mode 100644 index fe49426c4fa4..000000000000 --- a/production/kubernetes/build/templates/operator/main.jsonnet +++ /dev/null @@ -1,160 +0,0 @@ -local k = import 'ksonnet-util/kausal.libsonnet'; -local secret = k.core.v1.secret; -local pvc = k.core.v1.persistentVolumeClaim; - -local gen = import 'agent-operator-gen/main.libsonnet'; -local ga = gen.monitoring.v1alpha1.grafanaAgent; -local mi = gen.monitoring.v1alpha1.metricsInstance; -local li = gen.monitoring.v1alpha1.logsInstance; -local pl = gen.monitoring.v1alpha1.podLogs; -local int = gen.monitoring.v1alpha1.integration; - -local op = import 'grafana-agent-operator/operator.libsonnet'; -local ga_util = import 'grafana-agent-operator/util/grafana-agent.libsonnet'; -local mi_util = import 'grafana-agent-operator/util/metricsinstance.libsonnet'; -local li_util = import 'grafana-agent-operator/util/logsinstance.libsonnet'; -local pl_util = import 'grafana-agent-operator/util/k8slogs.libsonnet'; -local mon_util = import 'grafana-agent-operator/util/k8smonitors.libsonnet'; -local int_util = import 'grafana-agent-operator/util/integrations.libsonnet'; - -local ksm = import 'kube-state-metrics/kube-state-metrics.libsonnet'; - -{ - local this = self, - - _images:: { - agent: 'grafana/agent:v0.37.4', - agent_operator: 'grafana/agent-operator:v0.37.4', - ksm: 'registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.5.0', - }, - - _config:: { - namespace: '${NAMESPACE}', - metrics_url: '${METRICS_URL}', - metrics_user: '${METRICS_USER}', - metrics_key: '${METRICS_KEY}', - logs_url: '${LOGS_URL}', - logs_user: '${LOGS_USER}', - logs_key: '${LOGS_KEY}', - cluster_label: { cluster: '${CLUSTER}' }, - kubelet_job: 'kubelet', - cadvisor_job: 'cadvisor', - ksm_job: 'kube-state-metrics', - ksm_version: '2.5.0', - }, - - operator: - op.new(name='grafana-agent-operator', namespace=this._config.namespace, image=this._images.agent_operator, serviceAccount='grafana-agent-operator') + - op.withRbac(name='grafana-agent-operator', namespace=this._config.namespace), - - grafana_agent: - ga.new(name='grafana-agent') + - ga.metadata.withNamespace(this._config.namespace) + - ga.spec.withServiceAccountName('grafana-agent') + - ga.spec.withImage(this._images.agent) + - ga.spec.metrics.instanceSelector.withMatchLabels({ agent: 'grafana-agent' }) + - ga.spec.logs.instanceSelector.withMatchLabels({ agent: 'grafana-agent' }) + - ga.spec.integrations.selector.withMatchLabels({ agent: 'grafana-agent' }) + - ga.spec.metrics.withExternalLabels(this._config.cluster_label), - rbac: - ga_util.withRbac(name='grafana-agent', namespace=this._config.namespace), - - metrics_instance: - mi.new(name='grafana-agent-metrics') + - mi.metadata.withNamespace(this._config.namespace) + - mi.metadata.withLabels({ agent: 'grafana-agent' }) + - mi.spec.serviceMonitorSelector.withMatchLabels({ instance: 'primary' }) + - mi_util.withRemoteWrite(secretName='metrics-secret', metricsUrl=this._config.metrics_url) + - mi_util.withNilServiceMonitorNamespace(), - metrics_secret: - secret.new('metrics-secret', {}) + - secret.withStringData({ - username: this._config.metrics_user, - password: this._config.metrics_key, - }) + secret.mixin.metadata.withNamespace(this._config.namespace), - - logs_instance: - li.new(name='grafana-agent-logs') + - li.metadata.withNamespace(this._config.namespace) + - li.metadata.withLabels({ agent: 'grafana-agent' }) + - li.spec.podLogsSelector.withMatchLabels({ instance: 'primary' }) + - li_util.withLogsClient(secretName='logs-secret', logsUrl=this._config.logs_url, externalLabels=this._config.cluster_label) + - li_util.withNilPodLogsNamespace(), - logs_secret: - secret.new('logs-secret', {}) + - secret.withStringData({ - username: this._config.logs_user, - password: this._config.logs_key, - }) + secret.mixin.metadata.withNamespace(this._config.namespace), - - pod_logs: - pl.new('kubernetes-logs') + - pl.metadata.withNamespace(this._config.namespace) + - pl.metadata.withLabels({ instance: 'primary' }) + - pl.spec.withPipelineStages(pl.spec.pipelineStages.withCri({})) + - pl.spec.namespaceSelector.withAny(true) + - pl.spec.selector.withMatchLabels({}) + - pl.spec.withRelabelings(pl_util.withK8sLogsRelabeling()), - - k8s_monitors: [ - mon_util.newKubernetesMonitor( - name='kubelet-monitor', - namespace=this._config.namespace, - monitorLabels={ instance: 'primary' }, - targetNamespace='default', - targetLabels={ 'app.kubernetes.io/name': 'kubelet' }, - jobLabel=this._config.kubelet_job, - metricsPath='/metrics', - allowlist=false, - allowlistMetrics=[] - ), - mon_util.newKubernetesMonitor( - name='cadvisor-monitor', - namespace=this._config.namespace, - monitorLabels={ instance: 'primary' }, - targetNamespace='default', - targetLabels={ 'app.kubernetes.io/name': 'kubelet' }, - jobLabel=this._config.cadvisor_job, - metricsPath='/metrics/cadvisor', - allowlist=false, - allowlistMetrics=[] - ), - mon_util.newServiceMonitor( - name='ksm-monitor', - namespace=this._config.namespace, - monitorLabels={ instance: 'primary' }, - targetNamespace=this._config.namespace, - targetLabels={ 'app.kubernetes.io/name': 'kube-state-metrics' }, - jobLabel=this._config.ksm_job, - metricsPath='/metrics', - allowlist=false, - allowlistMetrics=[] - ), - ], - - kube_state_metrics: - ksm { - name:: 'kube-state-metrics', - namespace:: this._config.namespace, - version:: this._config.ksm_version, - image:: this._images.ksm, - }, - - events: - int.new('agent-eventhandler') + - int.metadata.withNamespace(this._config.namespace) + - int.metadata.withLabels({ agent: 'grafana-agent' }) + - int.spec.withName('eventhandler') + - int.spec.type.withUnique(true) + - int.spec.withConfig({ - logs_instance: this._config.namespace + '/' + 'grafana-agent-logs', - cache_path: '/etc/eventhandler/eventhandler.cache', - }) + - int_util.withPVC('agent-eventhandler'), - pvc: - pvc.new('agent-eventhandler') + - pvc.mixin.metadata.withNamespace(this._config.namespace) + - pvc.mixin.spec.withAccessModes('ReadWriteOnce') + - pvc.mixin.spec.resources.withRequests({ storage: '1Gi' }), - -} diff --git a/production/kubernetes/build/templates/operator/spec.json b/production/kubernetes/build/templates/operator/spec.json deleted file mode 100644 index d97d07154983..000000000000 --- a/production/kubernetes/build/templates/operator/spec.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "apiVersion": "tanka.dev/v1alpha1", - "kind": "Environment", - "metadata": { - "name": "template" - }, - "spec": { - "apiServer": "", - "namespace": "" - } -} diff --git a/production/kubernetes/build/templates/traces/main.jsonnet b/production/kubernetes/build/templates/traces/main.jsonnet deleted file mode 100644 index 4868b6829aaf..000000000000 --- a/production/kubernetes/build/templates/traces/main.jsonnet +++ /dev/null @@ -1,41 +0,0 @@ -local agent = import 'grafana-agent/v2/main.libsonnet'; -local k = import 'ksonnet-util/kausal.libsonnet'; - -local containerPort = k.core.v1.containerPort; - -local newPort(name, portNumber, protocol='TCP') = - // Port names for pods cannot be longer than 15 characters. - if std.length(name) > 15 then - error 'port name cannot be longer than 15 characters' - else containerPort.new(name, portNumber) + containerPort.withProtocol(protocol); - -{ - agent: - agent.new(name='grafana-agent-traces', namespace='${NAMESPACE}') + - agent.withDeploymentController(replicas=1) + - agent.withConfigHash(false) + - agent.withPortsMixin([ - // Jaeger receiver - newPort('thrift-compact', 6831, 'UDP'), - newPort('thrift-binary', 6832, 'UDP'), - newPort('thrift-http', 14268, 'TCP'), - newPort('thrift-grpc', 14250, 'TCP'), - - // Zipkin - newPort('zipkin', 9411, 'TCP'), - - // OTLP - newPort('otlp-grpc', 4317, 'TCP'), - newPort('otlp-http', 4318, 'TCP'), - - // Opencensus - newPort('opencensus', 55678, 'TCP'), - ]) + - agent.withService() + - // add dummy config or will fail - agent.withAgentConfig({ - server: { log_level: 'error' }, - }) + - // remove configMap for generated manifests - { configMap:: super.configMap } -} diff --git a/production/kubernetes/build/templates/traces/spec.json b/production/kubernetes/build/templates/traces/spec.json deleted file mode 100644 index d97d07154983..000000000000 --- a/production/kubernetes/build/templates/traces/spec.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "apiVersion": "tanka.dev/v1alpha1", - "kind": "Environment", - "metadata": { - "name": "template" - }, - "spec": { - "apiServer": "", - "namespace": "" - } -} diff --git a/production/kubernetes/install-bare.sh b/production/kubernetes/install-bare.sh deleted file mode 100644 index 93cc7120af23..000000000000 --- a/production/kubernetes/install-bare.sh +++ /dev/null @@ -1,34 +0,0 @@ -#!/usr/bin/env bash -# shellcheck shell=bash - -# -# install-bare.sh is an installer for the Agent without a ConfigMap. It is -# used during the Grafana Cloud integrations wizard and is not recommended -# to be used directly. Instead of calling this script directly, please -# make a copy of ./agent-bare.yaml and modify it for your needs. -# -# Note that agent-bare.yaml does not have a ConfigMap, so the Grafana Agent -# will not launch until one is created. For more information on setting up -# a ConfigMap, please refer to: -# -# Metrics quickstart: https://grafana.com/docs/grafana-cloud/quickstart/agent-k8s/k8s_agent_metrics/ -# Logs quickstart: https://grafana.com/docs/grafana-cloud/quickstart/agent-k8s/k8s_agent_logs/ -# - -check_installed() { - if ! type "$1" >/dev/null 2>&1; then - echo "error: $1 not installed" >&2 - exit 1 - fi -} - -check_installed curl -check_installed envsubst - -MANIFEST_BRANCH=v0.37.4 -MANIFEST_URL=${MANIFEST_URL:-https://raw.githubusercontent.com/grafana/agent/${MANIFEST_BRANCH}/production/kubernetes/agent-bare.yaml} -NAMESPACE=${NAMESPACE:-default} - -export NAMESPACE - -curl -fsSL "$MANIFEST_URL" | envsubst diff --git a/production/tanka/grafana-agent-operator/jsonnetfile.json b/production/tanka/grafana-agent-operator/jsonnetfile.json deleted file mode 100644 index bb15a4133cc2..000000000000 --- a/production/tanka/grafana-agent-operator/jsonnetfile.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "dependencies": [ - { - "name": "ksonnet-util", - "source": { - "git": { - "remote": "https://github.com/grafana/jsonnet-libs", - "subdir": "ksonnet-util" - } - }, - "version": "master" - }, - { - "name": "agent-operator-gen", - "source": { - "git": { - "remote": "https://github.com/jsonnet-libs/grafana-agent-libsonnet.git", - "subdir": "0.26" - } - }, - "version": "main" - }, - { - "name": "prom-operator-gen", - "source": { - "git": { - "remote": "https://github.com/jsonnet-libs/prometheus-operator-libsonnet.git", - "subdir": "0.57" - } - }, - "version": "main" - } - ] -} diff --git a/production/tanka/grafana-agent-operator/operator.libsonnet b/production/tanka/grafana-agent-operator/operator.libsonnet deleted file mode 100644 index 71c2fb8264c9..000000000000 --- a/production/tanka/grafana-agent-operator/operator.libsonnet +++ /dev/null @@ -1,60 +0,0 @@ -{ - new(name='grafana-agent-operator', namespace='', image='grafana/agent-operator:v0.26.0-rc.0', serviceAccount=''):: { - local k = (import 'ksonnet-util/kausal.libsonnet'), - - local container = k.core.v1.container, - local deployment = k.apps.v1.deployment, - - local this = self, - - container:: - container.new(name, image) + - container.withArgsMixin(k.util.mapToFlags({'-kubelet-service': 'default/kubelet'})), - - controller: - deployment.new(name, 1, [this.container]) + - deployment.mixin.metadata.withNamespace(namespace) + - deployment.mixin.spec.template.spec.withServiceAccount(name), - - }, - - withRbac(name, namespace):: { - local k = (import 'ksonnet-util/kausal.libsonnet') { _config+:: { namespace: namespace } }, - local policyRule = k.rbac.v1.policyRule, - local serviceAccount = k.core.v1.serviceAccount, - - rbac: - k.util.rbac(name, [ - policyRule.withApiGroups(['monitoring.grafana.com']) + - policyRule.withResources(['grafanaagents', 'metricsinstances', 'logsinstances', 'podlogs', 'integrations']) + - policyRule.withVerbs(['get', 'list', 'watch']), - - policyRule.withApiGroups(['monitoring.grafana.com']) + - policyRule.withResources(['grafanaagents/finalizers', 'metricsinstances/finalizers', 'logsinstances/finalizers', 'podlogs/finalizers', 'integrations/finalizers']) + - policyRule.withVerbs(['get', 'list', 'watch', 'update']), - - policyRule.withApiGroups(['monitoring.coreos.com']) + - policyRule.withResources(['podmonitors', 'probes', 'servicemonitors']) + - policyRule.withVerbs(['get', 'list', 'watch']), - - policyRule.withApiGroups(['monitoring.coreos.com']) + - policyRule.withResources(['podmonitors/finalizers', 'probes/finalizers', 'servicemonitors/finalizers']) + - policyRule.withVerbs(['get', 'list', 'watch', 'update']), - - policyRule.withApiGroups(['']) + - policyRule.withResources(['namespaces', 'nodes']) + - policyRule.withVerbs(['get', 'list', 'watch']), - - policyRule.withApiGroups(['']) + - policyRule.withResources(['secrets', 'services', 'configmaps', 'endpoints']) + - policyRule.withVerbs(['get', 'list', 'watch', 'create', 'update', 'patch', 'delete']), - - policyRule.withApiGroups(['apps']) + - policyRule.withResources(['statefulsets', 'daemonsets', 'deployments']) + - policyRule.withVerbs(['get', 'list', 'watch', 'create', 'update', 'patch', 'delete']), - - ]) { - service_account+: serviceAccount.mixin.metadata.withNamespace(namespace), - }, - } -} diff --git a/production/tanka/grafana-agent-operator/util/grafana-agent.libsonnet b/production/tanka/grafana-agent-operator/util/grafana-agent.libsonnet deleted file mode 100644 index d5edf3ac7bd5..000000000000 --- a/production/tanka/grafana-agent-operator/util/grafana-agent.libsonnet +++ /dev/null @@ -1,23 +0,0 @@ -{ - withRbac(name, namespace):: { - local k = (import 'ksonnet-util/kausal.libsonnet') + { _config+:: { namespace: namespace } }, - local policyRule = k.rbac.v1.policyRule, - local serviceAccount = k.core.v1.serviceAccount, - - rbac: - k.util.rbac(name, [ - policyRule.withApiGroups(['']) + - policyRule.withResources(['nodes', 'nodes/proxy', 'nodes/metrics', 'services', 'endpoints', 'pods', 'events']) + - policyRule.withVerbs(['get', 'list', 'watch']), - - policyRule.withApiGroups(['networking.k8s.io']) + - policyRule.withResources(['ingresses']) + - policyRule.withVerbs(['get', 'list', 'watch']), - - policyRule.withNonResourceURLs(['/metrics', '/metrics/cadvisor']) + - policyRule.withVerbs(['get']), - ]) { - service_account+: serviceAccount.mixin.metadata.withNamespace(namespace), - }, - } -} diff --git a/production/tanka/grafana-agent-operator/util/integrations.libsonnet b/production/tanka/grafana-agent-operator/util/integrations.libsonnet deleted file mode 100644 index 04161e8a29ec..000000000000 --- a/production/tanka/grafana-agent-operator/util/integrations.libsonnet +++ /dev/null @@ -1,17 +0,0 @@ -local gen = import 'agent-operator-gen/main.libsonnet'; -local int = gen.monitoring.v1alpha1.integration; - -{ - withPVC(name):: { - spec+: { - volumeMounts: [ - int.spec.volumeMounts.withName(name) + - int.spec.volumeMounts.withMountPath('/etc/eventhandler') - ], - volumes: [ - int.spec.volumes.withName(name) + - int.spec.volumes.persistentVolumeClaim.withClaimName(name) - ] - } - } -} diff --git a/production/tanka/grafana-agent-operator/util/k8slogs.libsonnet b/production/tanka/grafana-agent-operator/util/k8slogs.libsonnet deleted file mode 100644 index 93d4b9e2104c..000000000000 --- a/production/tanka/grafana-agent-operator/util/k8slogs.libsonnet +++ /dev/null @@ -1,33 +0,0 @@ -local gen = import 'agent-operator-gen/main.libsonnet'; -local pl = gen.monitoring.v1alpha1.podLogs; -local r = pl.spec.relabelings; - -{ - withK8sLogsRelabeling():: [ - r.withSourceLabels(['__meta_kubernetes_pod_node_name']) + - r.withTargetLabel('__host__'), - - // r.withAction('replace') + - // r.withReplacement('$1') + - // r.withSeparator('/') + - // r.withSourceLabels(['__meta_kubernetes_namespace', '__meta_kubernetes_pod_name']) + - // r.withTargetLabel('job'), - - r.withAction('replace') + - r.withSourceLabels('__meta_kubernetes_namespace') + - r.withTargetLabel('namespace'), - - r.withAction('replace') + - r.withSourceLabels('__meta_kubernetes_pod_name') + - r.withTargetLabel('pod'), - - r.withAction('replace') + - r.withSourceLabels('__meta_kubernetes_pod_container_name') + - r.withTargetLabel('container'), - - r.withReplacement('/var/log/pods/*$1/*.log') + - r.withSeparator('/') + - r.withSourceLabels(['__meta_kubernetes_pod_uid', '__meta_kubernetes_pod_container_name']) + - r.withTargetLabel('__path__') - ] -} diff --git a/production/tanka/grafana-agent-operator/util/k8smonitors.libsonnet b/production/tanka/grafana-agent-operator/util/k8smonitors.libsonnet deleted file mode 100644 index 90de24a88962..000000000000 --- a/production/tanka/grafana-agent-operator/util/k8smonitors.libsonnet +++ /dev/null @@ -1,56 +0,0 @@ -local prom_gen = import 'prom-operator-gen/main.libsonnet'; -local sm = prom_gen.monitoring.v1.serviceMonitor; -local e = sm.spec.endpoints; -local mr = e.metricRelabelings; -local r = e.relabelings; - -{ - local metricArrayToString(arr) = std.join("|", arr), - - local withJobReplace(job_label) = - r.withAction('replace') + - r.withTargetLabel('job') + - r.withReplacement(job_label), - - local withAllowList(metrics) = - mr.withAction('keep') + - mr.withSourceLabels(['__name__']) + - mr.withRegex(metricArrayToString(metrics)), - - local withMetricsPath() = - r.withSourceLabels(['__metrics_path__']) + - r.withTargetLabel('metrics_path'), - - local withDefaultEndpoint(jobLabel, port, allowlist, allowlistMetrics, path) = - e.withHonorLabels(true) + - e.withInterval('60s') + - (if allowlist then e.withMetricRelabelings(withAllowList(allowlistMetrics)) else {}) + - e.withPort(port) + - e.withPath(path), - - - newKubernetesMonitor(name, namespace, monitorLabels, targetNamespace, targetLabels, jobLabel, metricsPath, allowlist=false, allowlistMetrics=[]):: - sm.new(name) + - sm.metadata.withNamespace(namespace) + - sm.metadata.withLabels(monitorLabels) + - sm.spec.namespaceSelector.withMatchNames(targetNamespace) + - sm.spec.selector.withMatchLabels(targetLabels) + - sm.spec.withEndpoints([ - withDefaultEndpoint(jobLabel, 'https-metrics', allowlist, allowlistMetrics, metricsPath) + - e.withBearerTokenFile('/var/run/secrets/kubernetes.io/serviceaccount/token') + - e.tlsConfig.withInsecureSkipVerify(true) + - e.withRelabelings([withMetricsPath(), withJobReplace(jobLabel)]) + - e.withScheme('https') - ]), - - newServiceMonitor(name, namespace, monitorLabels, targetNamespace, targetLabels, jobLabel, metricsPath, allowlist=false, allowlistMetrics=[]):: - sm.new(name) + - sm.metadata.withNamespace(namespace) + - sm.metadata.withLabels(monitorLabels) + - sm.spec.namespaceSelector.withMatchNames(targetNamespace) + - sm.spec.selector.withMatchLabels(targetLabels) + - sm.spec.withEndpoints([ - withDefaultEndpoint(jobLabel, 'http-metrics', allowlist, allowlistMetrics, metricsPath) + - e.withRelabelings([withJobReplace(jobLabel)]) - ]), -} diff --git a/production/tanka/grafana-agent-operator/util/logsinstance.libsonnet b/production/tanka/grafana-agent-operator/util/logsinstance.libsonnet deleted file mode 100644 index 4f667c14f5f0..000000000000 --- a/production/tanka/grafana-agent-operator/util/logsinstance.libsonnet +++ /dev/null @@ -1,21 +0,0 @@ -local gen = import 'agent-operator-gen/main.libsonnet'; -local li = gen.monitoring.v1alpha1.logsInstance; -local clients = li.spec.clients; - -{ - withLogsClient(secretName, logsUrl, externalLabels={}):: - li.spec.withClients( - clients.withUrl(logsUrl) + - clients.basicAuth.username.withKey('username') + - clients.basicAuth.username.withName(secretName) + - clients.basicAuth.password.withKey('password') + - clients.basicAuth.password.withName(secretName) + - if externalLabels != {} then clients.withExternalLabels(externalLabels) else {} - ), - - withNilPodLogsNamespace():: { - spec+: { - podLogsNamespaceSelector: {} - } - }, -} diff --git a/production/tanka/grafana-agent-operator/util/metricsinstance.libsonnet b/production/tanka/grafana-agent-operator/util/metricsinstance.libsonnet deleted file mode 100644 index af0da101fd46..000000000000 --- a/production/tanka/grafana-agent-operator/util/metricsinstance.libsonnet +++ /dev/null @@ -1,20 +0,0 @@ -local gen = import 'agent-operator-gen/main.libsonnet'; -local mi = gen.monitoring.v1alpha1.metricsInstance; -local rw = mi.spec.remoteWrite; - -{ - withRemoteWrite(secretName, metricsUrl):: - mi.spec.withRemoteWrite( - rw.withUrl(metricsUrl) + - rw.basicAuth.username.withKey('username') + - rw.basicAuth.username.withName(secretName) + - rw.basicAuth.password.withKey('password') + - rw.basicAuth.password.withName(secretName) - ), - - withNilServiceMonitorNamespace():: { - spec+: { - serviceMonitorNamespaceSelector: {} - } - } -} diff --git a/production/tanka/grafana-agent/config.libsonnet b/production/tanka/grafana-agent/config.libsonnet deleted file mode 100644 index 7c1e7a2c8c80..000000000000 --- a/production/tanka/grafana-agent/config.libsonnet +++ /dev/null @@ -1,117 +0,0 @@ -local k8s_v2 = import './v2/internal/helpers/k8s.libsonnet'; - -{ - _images+:: { - agent: 'grafana/agent:latest', - agentctl: 'grafana/agentctl:latest', - }, - - _config+:: { - // - // Deployment options - // - agent_cluster_role_name: 'grafana-agent', - agent_configmap_name: 'grafana-agent', - agent_deployment_configmap_name: self.agent_configmap_name + '-deployment', - agent_pod_name: 'grafana-agent', - agent_deployment_pod_name: self.agent_pod_name + '-deployment', - - cluster_dns_tld: 'local', - cluster_dns_suffix: 'cluster.' + self.cluster_dns_tld, - cluster_name: error 'must specify cluster name', - namespace: error 'must specify namespace', - - agent_config_hash_annotation: true, - - // - // Prometheus instance options - // - - // Enabling this causes the agent to only scrape metrics on the same node - // on which it is currently running. - // - // Take CAUTION when disabling this! If the agent is deployed - // as a DaemonSet (like it is here by default), then disabling this will - // scrape all metrics multiple times, once per node, leading to - // duplicate samples being rejected and might hit limits. - agent_host_filter: true, - - // The directory where the WAL is stored for all instances. - agent_wal_dir: '/var/lib/agent/data', - - prometheus_kubernetes_api_server_address: 'kubernetes.default.svc.%(cluster_dns_suffix)s:443' % self, - prometheus_insecure_skip_verify: false, - scrape_api_server_endpoints: true, - - // - // Config passed to the agent - // - // agent_config is rendered as a YAML and is the configuration file used - // to control the agent. A single instance is hard-coded and its - // scrape_configs are defined below. - // - // deployment_agent_config is a copy of `agent_config` that is used by the - // single-replica deployment to scrape jobs that don't work in host - // filtering mode. - agent_config: { - server: { - log_level: 'info', - }, - - metrics: { - global: { - scrape_interval: '1m', - }, - - wal_directory: $._config.agent_wal_dir, - - configs: [{ - name: 'agent', - - host_filter: $._config.agent_host_filter, - - scrape_configs: - if $._config.agent_host_filter then - $._config.kubernetes_scrape_configs - else - $._config.kubernetes_scrape_configs + $._config.deployment_scrape_configs, - remote_write: $._config.agent_remote_write, - }], - }, - }, - deployment_agent_config: self.agent_config { - prometheus+: { - configs: [{ - name: 'agent', - - host_filter: false, - - scrape_configs: $._config.deployment_scrape_configs, - remote_write: $._config.agent_remote_write, - }], - }, - - }, - - local all_scrape_configs = k8s_v2.metrics({ - scrape_api_server_endpoints: $._config.scrape_api_server_endpoints, - insecure_skip_verify: $._config.prometheus_insecure_skip_verify, - kubernetes_api_server_address: $._config.prometheus_kubernetes_api_server_address, - ksm_namespace: $._config.namespace, - node_exporter_namespace: $._config.namespace, - }), - - // We have two optional extension points for scrape config. One for the - // statefulset that holds all the agents attached to a node - // (kubernetes_scrape_configs) and One for the single replica deployment - // that is used to scrape jobs that don't work with host filtering mode - // (deployment_scrape_configs) the later is only used when host_filter = - // true. - deployment_scrape_configs: - std.filter(function(job) job.job_name == 'default/kubernetes', all_scrape_configs), - kubernetes_scrape_configs: - std.filter(function(job) job.job_name != 'default/kubernetes', all_scrape_configs), - - agent_remote_write: [], - }, -} diff --git a/production/tanka/grafana-agent/grafana-agent.libsonnet b/production/tanka/grafana-agent/grafana-agent.libsonnet deleted file mode 100644 index f280da77fcf9..000000000000 --- a/production/tanka/grafana-agent/grafana-agent.libsonnet +++ /dev/null @@ -1,85 +0,0 @@ -local config = import 'config.libsonnet'; -local k = import 'ksonnet-util/kausal.libsonnet'; - -k + config { - local configMap = $.core.v1.configMap, - local container = $.core.v1.container, - local daemonSet = $.apps.v1.daemonSet, - local deployment = $.apps.v1.deployment, - local policyRule = $.rbac.v1.policyRule, - local serviceAccount = $.core.v1.serviceAccount, - - agent_rbac: - $.util.rbac($._config.agent_cluster_role_name, [ - policyRule.withApiGroups(['']) + - policyRule.withResources(['nodes', 'nodes/proxy', 'services', 'endpoints', 'pods']) + - policyRule.withVerbs(['get', 'list', 'watch']), - - policyRule.withNonResourceUrls('/metrics') + - policyRule.withVerbs(['get']), - ]) { - service_account+: - serviceAccount.mixin.metadata.withNamespace($._config.namespace), - }, - - agent_config_map: - configMap.new($._config.agent_configmap_name) + - configMap.mixin.metadata.withNamespace($._config.namespace) + - configMap.withData({ - 'agent.yml': $.util.manifestYaml($._config.agent_config), - }), - - agent_args:: { - 'config.file': '/etc/agent/agent.yml', - 'metrics.wal-directory': '/tmp/agent/data', - }, - - agent_container:: - container.new('agent', $._images.agent) + - container.withPorts($.core.v1.containerPort.new('http-metrics', 80)) + - container.withArgsMixin($.util.mapToFlags($.agent_args)) + - container.withEnv([ - $.core.v1.envVar.fromFieldPath('HOSTNAME', 'spec.nodeName'), - ]) + - container.mixin.securityContext.withPrivileged(true) + - container.mixin.securityContext.withRunAsUser(0), - - config_hash_mixin:: { - local hash(config) = { config_hash: std.md5(std.toString(config)) }, - daemonSet: - if $._config.agent_config_hash_annotation then - daemonSet.mixin.spec.template.metadata.withAnnotationsMixin(hash($._config.agent_config)) - else {}, - deployment: - if $._config.agent_config_hash_annotation then - deployment.mixin.spec.template.metadata.withAnnotationsMixin(hash($._config.deployment_agent_config)) - else {}, - }, - - // TODO(rfratto): persistent storage for the WAL here is missing. hostVolume? - agent_daemonset: - daemonSet.new($._config.agent_pod_name, [$.agent_container]) + - daemonSet.mixin.metadata.withNamespace($._config.namespace) + - daemonSet.mixin.spec.template.spec.withServiceAccount($._config.agent_cluster_role_name) + - self.config_hash_mixin.daemonSet + - $.util.configVolumeMount($._config.agent_configmap_name, '/etc/agent'), - - agent_deployment_config_map: - if $._config.agent_host_filter then - configMap.new($._config.agent_deployment_configmap_name) + - configMap.mixin.metadata.withNamespace($._config.namespace) + - configMap.withData({ - 'agent.yml': $.util.manifestYaml($._config.deployment_agent_config), - }) - else {}, - - agent_deployment: - if $._config.agent_host_filter then - deployment.new($._config.agent_deployment_pod_name, 1, [$.agent_container]) + - deployment.mixin.metadata.withNamespace($._config.namespace) + - deployment.mixin.spec.template.spec.withServiceAccount($._config.agent_cluster_role_name) + - deployment.mixin.spec.withReplicas(1) + - self.config_hash_mixin.deployment + - $.util.configVolumeMount($._config.agent_deployment_configmap_name, '/etc/agent') - else {}, -} diff --git a/production/tanka/grafana-agent/jsonnetfile.json b/production/tanka/grafana-agent/jsonnetfile.json deleted file mode 100644 index c903ac17c07c..000000000000 --- a/production/tanka/grafana-agent/jsonnetfile.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "dependencies": [ - { - "name": "ksonnet-util", - "source": { - "git": { - "remote": "https://github.com/grafana/jsonnet-libs", - "subdir": "ksonnet-util" - } - }, - "version": "master" - } - ] -} diff --git a/production/tanka/grafana-agent/scraping-svc/main.libsonnet b/production/tanka/grafana-agent/scraping-svc/main.libsonnet deleted file mode 100644 index 7e414a0e816d..000000000000 --- a/production/tanka/grafana-agent/scraping-svc/main.libsonnet +++ /dev/null @@ -1,107 +0,0 @@ -local config = import '../config.libsonnet'; -local syncer = import './syncer.libsonnet'; -local k = import 'ksonnet-util/kausal.libsonnet'; - -local containerPort = k.core.v1.containerPort; -local configMap = k.core.v1.configMap; -local container = k.core.v1.container; -local deployment = k.apps.v1.deployment; -local policyRule = k.rbac.v1.policyRule; - -{ - new(namespace='default', kube_namespace='kube-system'):: config { - local this = self, - - // Use the default config from the non-scraping-service mode - // but change some of the defaults. - _config+:: { - agent_cluster_role_name: 'grafana-agent-cluster', - agent_configmap_name: 'grafana-agent-cluster', - agent_pod_name: 'grafana-agent-cluster', - agent_replicas: 3, - - namespace: namespace, - kube_namespace: kube_namespace, - - // Scraping service should not be using host filtering - agent_host_filter: false, - - // - // KVStore options - // - agent_config_kvstore: error 'must configure config kvstore', - agent_ring_kvstore: error 'must configure ring kvstore', - - agent_config+: { - metrics+: { - // No configs are used in the scraping service mode. - configs:: [], - - scraping_service: { - enabled: true, - kvstore: this._config.agent_config_kvstore, - lifecycler: { - ring: { - kvstore: this._config.agent_ring_kvstore, - }, - }, - }, - }, - }, - }, - - rbac: - // Need to do a hack here so ksonnet util has our configs :( - (k { _config+: this._config }).util.rbac(this._config.agent_cluster_role_name, [ - policyRule.withApiGroups(['']) + - policyRule.withResources(['nodes', 'nodes/proxy', 'services', 'endpoints', 'pods']) + - policyRule.withVerbs(['get', 'list', 'watch']), - - policyRule.withNonResourceUrls('/metrics') + - policyRule.withVerbs(['get']), - ]), - - configMap: - configMap.new(this._config.agent_configmap_name) + - configMap.withData({ - 'agent.yml': k.util.manifestYaml(this._config.agent_config), - }), - - container:: - container.new('agent-cluster', this._images.agent) + - container.withPorts(containerPort.new(name='http-metrics', port=80)) + - container.withArgsMixin(k.util.mapToFlags({ - 'config.file': '/etc/agent/agent.yml', - 'metrics.wal-directory': '/tmp/agent/data', - })) + - container.withEnv([ - k.core.v1.envVar.fromFieldPath('HOSTNAME', 'spec.nodeName'), - ]) + - container.mixin.securityContext.withPrivileged(true) + - container.mixin.securityContext.withRunAsUser(0), - - deployment: - deployment.new(this._config.agent_pod_name, this._config.agent_replicas, [this.container]) + - deployment.mixin.spec.template.spec.withServiceAccount(this._config.agent_cluster_role_name) + - deployment.mixin.spec.withMinReadySeconds(60) + - deployment.mixin.spec.strategy.rollingUpdate.withMaxSurge(0) + - deployment.mixin.spec.strategy.rollingUpdate.withMaxUnavailable(1) + - deployment.mixin.spec.template.spec.withTerminationGracePeriodSeconds(4800) + - k.util.configVolumeMount(this._config.agent_configmap_name, '/etc/agent'), - - service: - k.util.serviceFor(this.deployment), - - // Create the cronjob that syncs configs to the API - syncer: - syncer.new(this._images.agentctl, this._config), - }, - - withImagesMixin(images):: { _images+: images }, - - // withConfig overrides the config used for the agent. - withConfig(config):: { _config: config }, - - // withConfigMixin merges the provided config with the existing config. - withConfigMixin(config):: { _config+: config }, -} diff --git a/production/tanka/grafana-agent/scraping-svc/syncer.libsonnet b/production/tanka/grafana-agent/scraping-svc/syncer.libsonnet deleted file mode 100644 index d24373e164f0..000000000000 --- a/production/tanka/grafana-agent/scraping-svc/syncer.libsonnet +++ /dev/null @@ -1,61 +0,0 @@ -local k = import 'ksonnet-util/kausal.libsonnet'; - -local cronJob = k.batch.v1.cronJob; -local configMap = k.core.v1.configMap; -local container = k.core.v1.container; -local deployment = k.apps.v1.deployment; -local volumeMount = k.core.v1.volumeMount; -local volume = k.core.v1.volume; - -{ - new(agentctl_image, config):: { - local this = self, - - local configs = std.foldl( - function(agg, cfg) - // Sanitize the name and remove / so every file goes into the same - // folder. - local name = std.strReplace(cfg.job_name, '/', '_'); - - agg { - ['%s.yml' % name]: k.util.manifestYaml( - { - scrape_configs: [cfg], - remote_write: config.agent_remote_write, - }, - ), - }, - config.kubernetes_scrape_configs, - {}, - ), - - configMap: - configMap.new('agent-syncer') + - configMap.withData(configs), - - container:: - container.new('agent-syncer', agentctl_image) + - container.withArgsMixin([ - 'config-sync', - '--addr=http://%(agent_pod_name)s.%(namespace)s.svc.cluster.local:80' % config, - '/etc/configs', - ]) + - container.withVolumeMounts([ - volumeMount.new('agent-syncer', '/etc/configs'), - ]), - - syncer_job: - cronJob.new('agent-syncer', '*/5 * * * *', this.container) + - cronJob.mixin.spec.withSuccessfulJobsHistoryLimit(1) + - cronJob.mixin.spec.withFailedJobsHistoryLimit(3) + - cronJob.mixin.spec.jobTemplate.spec.template.spec.withRestartPolicy('OnFailure') + - cronJob.mixin.spec.jobTemplate.spec.template.spec.withActiveDeadlineSeconds(600) + - cronJob.mixin.spec.jobTemplate.spec.withTtlSecondsAfterFinished(120) + - cronJob.mixin.spec.jobTemplate.spec.template.spec.withVolumes([ - volume.fromConfigMap( - name='agent-syncer', - configMapName=this.configMap.metadata.name, - ), - ]), - }, -} diff --git a/production/tanka/grafana-agent/smoke/avalanche/main.libsonnet b/production/tanka/grafana-agent/smoke/avalanche/main.libsonnet deleted file mode 100644 index 6887337ce2a9..000000000000 --- a/production/tanka/grafana-agent/smoke/avalanche/main.libsonnet +++ /dev/null @@ -1,49 +0,0 @@ -local k = import 'ksonnet-util/kausal.libsonnet'; - -local configMap = k.core.v1.configMap; -local container = k.core.v1.container; -local containerPort = k.core.v1.containerPort; -local deployment = k.apps.v1.deployment; -local pvc = k.core.v1.persistentVolumeClaim; -local service = k.core.v1.service; -local volumeMount = k.core.v1.volumeMount; -local volume = k.core.v1.volume; - -{ - new(name='avalanche', replicas=1, namespace='', config={}):: { - local this = self, - - _config+:: { - image: 'quay.io/freshtracks.io/avalanche:latest', - - metric_count: 500, - label_count: 10, - series_count: 10, - metricname_length: 5, - labelname_length: 5, - value_interval: 30, - series_interval: 30, - metric_interval: 120, - } + config, - - container:: - container.new(name, this._config.image) + - container.withPorts([ - containerPort.newNamed(name='http', containerPort=9001), - ]) + - container.withArgsMixin([ - '--metric-count=%d' % this._config.metric_count, - '--label-count=%d' % this._config.label_count, - '--series-count=%d' % this._config.series_count, - '--metricname-length=%d' % this._config.metricname_length, - '--labelname-length=%d' % this._config.labelname_length, - '--value-interval=%d' % this._config.value_interval, - '--series-interval=%d' % this._config.series_interval, - '--metric-interval=%d' % this._config.metric_interval, - ]), - - deployment: - deployment.new(name, replicas, [self.container]) + - deployment.mixin.metadata.withNamespace(namespace), - }, -} diff --git a/production/tanka/grafana-agent/smoke/crow/main.libsonnet b/production/tanka/grafana-agent/smoke/crow/main.libsonnet deleted file mode 100644 index a159e6c16657..000000000000 --- a/production/tanka/grafana-agent/smoke/crow/main.libsonnet +++ /dev/null @@ -1,36 +0,0 @@ -local k = import 'ksonnet-util/kausal.libsonnet'; - -local configMap = k.core.v1.configMap; -local container = k.core.v1.container; -local containerPort = k.core.v1.containerPort; -local deployment = k.apps.v1.deployment; -local pvc = k.core.v1.persistentVolumeClaim; -local service = k.core.v1.service; -local volumeMount = k.core.v1.volumeMount; -local volume = k.core.v1.volume; - -{ - new(name='crow', namespace='', config={}):: { - local this = self, - - _config+:: { - image: 'us.gcr.io/kubernetes-dev/grafana/agent-crow:main', - args: { - 'server.http.address': '0.0.0.0:80', - }, - pull_secret: '', - } + config, - - container:: - container.new(name, this._config.image) + - container.withPorts([ - containerPort.newNamed(name='http-metrics', containerPort=80), - ]) + - container.withArgsMixin(k.util.mapToFlags(this._config.args)), - - deployment: - deployment.new(name, 1, [self.container]) + - deployment.mixin.metadata.withNamespace(namespace) + - deployment.spec.template.spec.withImagePullSecrets({ name: this._config.pull_secret }), - }, -} diff --git a/production/tanka/grafana-agent/smoke/etcd/main.libsonnet b/production/tanka/grafana-agent/smoke/etcd/main.libsonnet deleted file mode 100644 index 46598632a545..000000000000 --- a/production/tanka/grafana-agent/smoke/etcd/main.libsonnet +++ /dev/null @@ -1,30 +0,0 @@ -local k = import 'ksonnet-util/kausal.libsonnet'; - -local container = k.core.v1.container; -local containerPort = k.core.v1.containerPort; -local deployment = k.apps.v1.deployment; -local service = k.core.v1.service; - -{ - new(namespace=''):: { - container:: - container.new('etcd', 'gcr.io/etcd-development/etcd:v3.4.7') + - container.withPorts([ - containerPort.newNamed(name='etcd', containerPort=2379), - ]) + - container.withArgsMixin([ - '/usr/local/bin/etcd', - '--listen-client-urls=http://0.0.0.0:2379', - '--advertise-client-urls=http://0.0.0.0:2379', - '--log-level=info', - ]), - - deployment: - deployment.new('etcd', 1, [self.container]) + - deployment.mixin.metadata.withNamespace(namespace), - - service: - k.util.serviceFor(self.deployment) + - service.mixin.metadata.withNamespace(namespace), - }, -} diff --git a/production/tanka/grafana-agent/smoke/main.libsonnet b/production/tanka/grafana-agent/smoke/main.libsonnet deleted file mode 100644 index d8cbac4dc142..000000000000 --- a/production/tanka/grafana-agent/smoke/main.libsonnet +++ /dev/null @@ -1,69 +0,0 @@ -local k = import 'ksonnet-util/kausal.libsonnet'; -local policyRule = k.rbac.v1.policyRule; -local serviceAccount = k.core.v1.serviceAccount; -local container = k.core.v1.container; -local containerPort = k.core.v1.containerPort; -local deployment = k.apps.v1.deployment; -local service = k.core.v1.service; -local util = k.util; - -{ - new(name='grafana-agent-smoke', namespace='default', config={}):: { - local k = (import 'ksonnet-util/kausal.libsonnet') { _config+:: { namespace: namespace } }, - - local this = self, - - _images:: { - agentsmoke: 'us.gcr.io/kubernetes-dev/grafana/agent-smoke:main', - }, - - _config:: { - mutationFrequency: '5m', - chaosFrequency: '30m', - image: this._images.agentsmoke, - pull_secret: '', - podPrefix: 'grafana-agent', - simulateErrors: true, - } + config, - - rbac: - k.util.rbac(name, [ - policyRule.withApiGroups(['apps']) + - policyRule.withResources(['deployments/scale']) + - policyRule.withVerbs(['get', 'update']), - policyRule.withApiGroups(['']) + - policyRule.withResources(['pods']) + - policyRule.withVerbs(['list', 'delete']), - ]) { - service_account+: - serviceAccount.mixin.metadata.withNamespace(namespace), - }, - - container:: - container.new('agent-smoke', this._config.image) + - container.withPorts([ - containerPort.newNamed(name='remote-write', containerPort=19090), - ]) + - container.withArgsMixin(k.util.mapToFlags({ - 'log.level': 'debug', - namespace: namespace, - 'mutation-frequency': this._config.mutationFrequency, - 'chaos-frequency': this._config.chaosFrequency, - 'pod-prefix': this._config.podPrefix, - 'fake-remote-write': true, - 'simulate-errors': this._config.simulateErrors, - })), - - agentsmoke_deployment: - deployment.new(name, 1, [self.container]) + - deployment.mixin.metadata.withNamespace(namespace) + - deployment.mixin.spec.template.spec.withServiceAccount(name) + - deployment.spec.template.spec.withImagePullSecrets({ name: this._config.pull_secret }), - - service: - util.serviceFor(self.agentsmoke_deployment) + - service.mixin.metadata.withNamespace(namespace), - }, - - monitoring: (import './prometheus_monitoring.libsonnet'), -} diff --git a/production/tanka/grafana-agent/v1/README.md b/production/tanka/grafana-agent/v1/README.md deleted file mode 100644 index 5eb6f0513722..000000000000 --- a/production/tanka/grafana-agent/v1/README.md +++ /dev/null @@ -1,79 +0,0 @@ -# Tanka Configs - -**STATUS**: Abandoned. Use v0 (parent directory) or v2 instead. - -This directory contains the Tanka configs that we use to deploy the Grafana -Agent. It is marked as `v1` and is incompatible with the `v0` configs -found in the [parent directory](../). - -This library is currently a work in progress and backwards-incompatible changes -may occur. Once the library is considered complete, no further backwards -incompatible changes will be made. - -## Capabilities - -This library is significantly more flexible than its `v0` counterpart. It tries -to allow to deploy and configure the Agent in a feature matrix: - -| Mechanism | Metrics | Logs | Traces | Integrations | -| ---------------- | ------- | --------- | ------ | ------------ | -| DaemonSet | Yes | Yes | Yes | Yes | -| Deployment | Yes | No | No | No | -| Scraping Service | Yes | No | No | No | - -The library can be invoked multiple times to get full coverage. For example, you -may wish to deploy a scraping service for scalable metrics collection, and a -DaemonSet with just Loki Logs for log collection. - -Trying to use the library in incompatible ways will generate errors. For -example, you may not deploy a scraping service with Loki logs collection. - -## API - -## Generate Agent Deployment - -- `new(name, namespace)`: Create a new DaemonSet. This is the default mode to - deploy the Agent. Enables host filtering. -- `newDeployment(name, namespace)`: Create a new single-replica Deployment. - Disables host filtering. -- `newScrapingService(name, namespace, replicas)`: (Not yet available). Create a - scalable deployment of clustered Agents. Requires being given a KV store such as Redis or ETCD. - -## Configure Metrics - -- `withMetricsConfig(config)`: Creates a metrics config block. -- `defaultMetricsConfig`: Default metrics config block. -- `withMetricsInstances(instances)`: Creates a metrics instance config to - tell the Agent what to scrape. -- `withRemoteWrite(remote_writes)`: Configures locations to remote write metrics - to. Controls remote writes globally. -- `scrapeInstanceKubernetes`: Default metrics instance config to scrape from - Kubernetes. - -## Configure Logs - -- `withLogsConfig(config)`: Creates a Logs config block to pass to the Agent. -- `newLogsClient(client_config)`: Creates a new client configuration to pass - to `withLogsClients`. -- `withLogsClients(clients)`: Add a set of clients to a Logs config block. -- `scrapeKubernetesLogs`: Default Logs config that collects logs from Kubernetes - pods. - -## Configure Traces - -- `withTracesConfig(config)`: Creates a Traces config block to pass to the Agent. -- `withTracesRemoteWrite(remote_write)`: Configures one or multiple locations to push spans to. -- `withTracesSamplingStrategies(strategies)`: Configures strategies for trace collection. -- `withTracesScrapeConfigs(scrape_configs)`: Configures scrape configs to attach - labels to incoming spans. -- `tracesScrapeKubernetes`: Default scrape configs to collect meta information - from pods. Aligns with the labels from `scrapeInstanceKubernetes` and - `scrapeKubernetesLogs` so logs, metrics, and traces all use the same set of - labels. - -## General - -- `withImages(images)`: Use custom images. -- `withConfigHash(include=true)`: Whether to include a config hash annotation. -- `withPortsMixin(ports)`: Mixin ports from `k.core.v1.containerPort` against - the container and service. diff --git a/production/tanka/grafana-agent/v1/internal/agent.libsonnet b/production/tanka/grafana-agent/v1/internal/agent.libsonnet deleted file mode 100644 index d6af77dd0bd7..000000000000 --- a/production/tanka/grafana-agent/v1/internal/agent.libsonnet +++ /dev/null @@ -1,72 +0,0 @@ -local k = import 'ksonnet-util/kausal.libsonnet'; - -local configMap = k.core.v1.configMap; -local container = k.core.v1.container; -local daemonSet = k.apps.v1.daemonSet; -local deployment = k.apps.v1.deployment; -local policyRule = k.rbac.v1.policyRule; -local serviceAccount = k.core.v1.serviceAccount; - -{ - newAgent(name='grafana-agent', namespace='default', image, config, use_daemonset=true):: { - local controller = if use_daemonset then daemonSet else deployment, - local k = (import 'ksonnet-util/kausal.libsonnet') { _config+:: { namespace: namespace } }, - local this = self, - - _controller:: controller, - _config_hash:: true, - - listen_port:: 8080, - - rbac: - k.util.rbac(name, [ - // Need for k8s SD on Loki/Prometheus subsystems - policyRule.withApiGroups(['']) + - policyRule.withResources(['nodes', 'nodes/proxy', 'services', 'endpoints', 'pods']) + - policyRule.withVerbs(['get', 'list', 'watch']), - - // Needed for Prometheus subsystem to scrape k8s API - policyRule.withNonResourceUrls('/metrics') + - policyRule.withVerbs(['get']), - ]) { - service_account+: - serviceAccount.mixin.metadata.withNamespace(namespace), - }, - - config_map: - configMap.new(name) + - configMap.mixin.metadata.withNamespace(namespace) + - configMap.withData({ - 'agent.yaml': k.util.manifestYaml(config), - }), - - container:: - container.new('agent', image) + - container.withPorts(k.core.v1.containerPort.new('http-metrics', self.listen_port)) + - container.withArgsMixin(k.util.mapToFlags({ - 'config.file': '/etc/agent/agent.yaml', - 'server.http.address': '0.0.0.0:' + this.listen_port, - })) + - container.withEnvMixin([ - k.core.v1.envVar.fromFieldPath('HOSTNAME', 'spec.nodeName'), - ]), - - agent: - ( - if use_daemonset then daemonSet.new(name, [self.container]) - else deployment.new(name, 1, [self.container]) - ) + - controller.mixin.metadata.withNamespace(namespace) + - controller.mixin.spec.template.spec.withServiceAccount(name) + - ( - if self._config_hash - then controller.mixin.spec.template.metadata.withAnnotationsMixin({ - config_hash: std.md5(std.toString(config)), - }) - else {} - ) + - k.util.configVolumeMount(name, '/etc/agent'), - }, - - withConfigHash(include):: { _config_hash:: include }, -} diff --git a/production/tanka/grafana-agent/v1/internal/kubernetes_instance.libsonnet b/production/tanka/grafana-agent/v1/internal/kubernetes_instance.libsonnet deleted file mode 100644 index a4749c9797fc..000000000000 --- a/production/tanka/grafana-agent/v1/internal/kubernetes_instance.libsonnet +++ /dev/null @@ -1,27 +0,0 @@ -local k8s_v2 = import '../../v2/internal/helpers/k8s.libsonnet'; - -{ - kubernetesScrapeInstanceConfig:: { - scrape_api_server_endpoints: false, - insecure_skip_verify: false, - - cluster_dns_tld: 'local', - cluster_dns_suffix: 'cluster.' + self.cluster_dns_tld, - kubernetes_api_server_address: 'kubernetes.default.svc.%(cluster_dns_suffix)s:443' % self, - }, - - newKubernetesScrapeInstance(config, namespace='default'):: { - local _config = $.kubernetesScrapeInstanceConfig + config, - - name: 'kubernetes', - scrape_configs: k8s_v2.metrics({ - scrape_api_server_endpoints: _config.scrape_api_server_endpoints, - insecure_skip_verify: _config.insecure_skip_verify, - cluster_dns_tld: _config.cluster_dns_tld, - cluster_dns_suffix: _config.cluster_dns_suffix, - kubernetes_api_server_address: _config.kubernetes_api_server_address, - ksm_namespace: namespace, - node_exporter_namespace: namespace, - }), - }, -} diff --git a/production/tanka/grafana-agent/v1/internal/kubernetes_logs.libsonnet b/production/tanka/grafana-agent/v1/internal/kubernetes_logs.libsonnet deleted file mode 100644 index 8ef2d4f40200..000000000000 --- a/production/tanka/grafana-agent/v1/internal/kubernetes_logs.libsonnet +++ /dev/null @@ -1,7 +0,0 @@ -local k8s_v2 = import '../../v2/internal/helpers/k8s.libsonnet'; - -{ - newKubernetesLogsCollector():: { - scrape_configs: k8s_v2.logs(), - }, -} diff --git a/production/tanka/grafana-agent/v1/internal/utils.libsonnet b/production/tanka/grafana-agent/v1/internal/utils.libsonnet deleted file mode 100644 index 35ab5834a782..000000000000 --- a/production/tanka/grafana-agent/v1/internal/utils.libsonnet +++ /dev/null @@ -1,36 +0,0 @@ -{ - // Returns true if the scrape_config only contains a service_discovery for - // Kubernetes (via kubernetes_sd_configs) that has role: pod - isOnlyK8sPodDiscovery(scrape_config):: - // Get all the *_sd_configs and filter that down to the sd_configs that aren't - // kubernetes_sd_configs. It should be 0. - std.length(std.filter( - function(key) key != 'kubernetes_sd_configs', - std.filter( - function(key) std.endsWith(key, '_sd_configs'), - std.objectFields(scrape_config), - ), - )) == 0 && - // Make sure there are 0 kubernetes_sd_configs whose role is not pod - std.length(std.filter( - function(kube_sd_config) kube_sd_config.role != 'pod', - std.flatMap( - function(key) scrape_config[key], - std.filter( - function(key) key == 'kubernetes_sd_configs', - std.objectFields(scrape_config) - ) - ) - )) == 0, - - // host_filter_compatible instances are ones that: - // - only use kubernetes_sd_configs - // - only use kubernetes_sd_configs with role = 'pod' - transformInstances(instances=[], host_filter_compatible=true):: - std.map(function(instance) instance { - scrape_configs: std.filter( - function(cfg) $.isOnlyK8sPodDiscovery(cfg) == host_filter_compatible, - super.scrape_configs, - ), - }, instances), -} diff --git a/production/tanka/grafana-agent/v1/lib/deployment.libsonnet b/production/tanka/grafana-agent/v1/lib/deployment.libsonnet deleted file mode 100644 index e557a0739516..000000000000 --- a/production/tanka/grafana-agent/v1/lib/deployment.libsonnet +++ /dev/null @@ -1,83 +0,0 @@ -local agent = import '../internal/agent.libsonnet'; -local k = import 'ksonnet-util/kausal.libsonnet'; - -local configMap = k.core.v1.configMap; -local service = k.core.v1.service; -local container = k.core.v1.container; - -{ - // newDeployment creates a new single-replicated Deployment of the - // grafana-agent. By default, this deployment will do no collection. You must - // merge the result of this function with the following: - // - // - withMetricsConfig - // - withMetricsInstances - // - optionally withRemoteWrite - // - // newDeployment does not support log collection. - newDeployment(name='grafana-agent', namespace='default'):: { - assert !std.objectHas(self, '_logs_config') : ||| - Log collection is not supported with newDeployment. - |||, - assert !std.objectHas(self, '_integrations') : ||| - Integrations are not supported with newDeployment. - |||, - - local this = self, - - _mode:: 'deployment', - _images:: $._images, - _config_hash:: true, - - local has_metrics_config = std.objectHasAll(self, '_metrics_config'), - local has_metrics_instances = std.objectHasAll(self, '_metrics_instances'), - local has_trace_config = std.objectHasAll(self, '_trace_config'), - local has_sampling_strategies = std.objectHasAll(self, '_traces_sampling_strategies'), - - config:: { - server: { - log_level: 'info', - }, - } + ( - if has_metrics_config - then { - metrics: - this._metrics_config { - configs: - if has_metrics_instances - then this._metrics_instances - else [], - }, - } - else {} - ) + ( - if has_trace_config then { - traces: { - configs: [this._trace_config { - name: 'default', - }], - }, - } - else {} - ), - - agent: - agent.newAgent(name, namespace, self._images.agent, self.config, use_daemonset=false) + - agent.withConfigHash(self._config_hash) + { - // If sampling strategies were defined, we need to mount them as a JSON - // file. - config_map+: - if has_sampling_strategies - then configMap.withDataMixin({ - 'strategies.json': std.toString(this._traces_sampling_strategies), - }) - else {}, - // If we're deploying for tracing, applications will want to write to - // a service for load balancing span delivery. - service: - if has_trace_config - then k.util.serviceFor(self.agent) + service.mixin.metadata.withNamespace(namespace) - else {}, - }, - }, -} diff --git a/production/tanka/grafana-agent/v1/lib/integrations.libsonnet b/production/tanka/grafana-agent/v1/lib/integrations.libsonnet deleted file mode 100644 index 6b1482816cf9..000000000000 --- a/production/tanka/grafana-agent/v1/lib/integrations.libsonnet +++ /dev/null @@ -1,33 +0,0 @@ -local k = import 'ksonnet-util/kausal.libsonnet'; - -local container = k.core.v1.container; - -{ - // withIntegrations controls the integrations component of the Agent. - // - // For the full list of options, refer to the configuration reference: - // https://github.com/grafana/agent/blob/main/docs/configuration-reference.md#integrations_config - withIntegrations(integrations):: { - assert std.objectHasAll(self, '_mode') : ||| - withIntegrations must be merged with the result of calling new. - |||, - _integrations:: integrations, - }, - - integrationsMixin:: { - container+:: - container.mixin.securityContext.withPrivileged(true) + - container.mixin.securityContext.withRunAsUser(0), - - local controller = self._controller, - agent+: - // procfs, sysfs, rotfs - k.util.hostVolumeMount('proc', '/proc', '/host/proc', readOnly=true) + - k.util.hostVolumeMount('sys', '/sys', '/host/sys', readOnly=true) + - k.util.hostVolumeMount('root', '/', '/host/root', readOnly=true) + - - controller.mixin.spec.template.spec.withHostPID(true) + - controller.mixin.spec.template.spec.withHostNetwork(true) + - controller.mixin.spec.template.spec.withDnsPolicy('ClusterFirstWithHostNet'), - }, -} diff --git a/production/tanka/grafana-agent/v1/lib/logs.libsonnet b/production/tanka/grafana-agent/v1/lib/logs.libsonnet deleted file mode 100644 index babb85d5ecca..000000000000 --- a/production/tanka/grafana-agent/v1/lib/logs.libsonnet +++ /dev/null @@ -1,82 +0,0 @@ -local scrape_k8s_logs = import '../internal/kubernetes_logs.libsonnet'; -local k = import 'ksonnet-util/kausal.libsonnet'; - -local container = k.core.v1.container; - -{ - // withLogsConfig adds a Logs config to collect logs. - // - // For the full list of options, refer to the configuration reference: - // https://grafana.com/docs/agent/latest/configuration/logs-config/ - withLogsConfig(config):: { - assert std.objectHasAll(self, '_mode') : ||| - withLogsConfig must be merged with the result of calling new. - |||, - _logs_config:: config, - }, - - // newLogsClient creates a new client object. Results from this can be passed into - // withLogsClients. - // - // client_config should be an object of the following shape: - // - // { - // scheme: 'https', // or http - // hostname: 'logs-us-central1.grafana.net', // replace with hostname to use - // username: '', // OPTIONAL username for Loki API connection - // password: '', // OPTIONAL password for Loki API connection - // external_labels: {}, // OPTIONAL labels to set for connection - // } - newLogsClient(client_config):: - { - url: ( - if std.objectHasAll(client_config, 'username') then - '%(scheme)s://%(username)s:%(password)s@%(hostname)s/loki/api/v1/push' % client_config - else - '%(scheme)s://%(hostname)s/loki/api/v1/push' % client_config - ), - } + ( - if std.objectHasAll(client_config, 'external_labels') - then { external_labels: client_config.external_labels } - else {} - ), - - // withLogsClients adds clients to send logs to. At least one client must be - // present. Clients can be created by calling newLogsClient or by creating - // an object that conforms to the Promtail client_config schema specified - // here: - // - // https://grafana.com/docs/loki/latest/clients/promtail/configuration/#client_config - // - // withLogsClients should be merged with the result of withLogsConfig. - withLogsClients(clients):: { - assert std.objectHasAll(self, '_logs_config') : ||| - withLogsClients must be merged with the result of calling withLogsConfig. - |||, - - _logs_config+:: { - clients: if std.isArray(clients) then clients else [clients], - }, - }, - - // logsPermissionsMixin mutates the container and deployment to work with - // reading Docker container logs. - logsPermissionsMixin:: { - container+:: - container.mixin.securityContext.withPrivileged(true) + - container.mixin.securityContext.withRunAsUser(0), - - agent+: - // For reading docker containers. /var/log is used for the positions file - // and shouldn't be set to readonly. - k.util.hostVolumeMount('varlog', '/var/log', '/var/log') + - k.util.hostVolumeMount('varlibdockercontainers', '/var/lib/docker/containers', '/var/lib/docker/containers', readOnly=true) + - - // For reading journald - k.util.hostVolumeMount('etcmachineid', '/etc/machine-id', '/etc/machine-id', readOnly=true), - }, - - // scrapeKubernetesLogs defines a Logs config that can collect logs from - // Kubernetes pods. - scrapeKubernetesLogs: scrape_k8s_logs.newKubernetesLogsCollector(), -} diff --git a/production/tanka/grafana-agent/v1/lib/metrics.libsonnet b/production/tanka/grafana-agent/v1/lib/metrics.libsonnet deleted file mode 100644 index 3cad8e867ab8..000000000000 --- a/production/tanka/grafana-agent/v1/lib/metrics.libsonnet +++ /dev/null @@ -1,116 +0,0 @@ -local scrape_k8s = import '../internal/kubernetes_instance.libsonnet'; - -{ - // defaultMetricsConfig holds the default Metrics Config with all - // options that the Agent supports. It is better to use this object as a - // reference rather than extending it; since all fields are defined here, if - // the Agent changes a default value in the future, the default change will - // be overridden by the values here. - // - // Required fields will be marked as REQUIRED. - defaultMetricsConfig:: { - // Settings that apply to all launched Metrics instances by default. - // These settings may be overridden on a per-instance basis. - global: { - // How frequently to scrape for metrics. - scrape_interval: '1m', - - // How long to wait before timing out from scraping a target. - scrape_timeout: '10s', - - // Extra labels to apply to all scraped targets. - external_labels: { - /* foo: 'bar', */ - }, - }, - - // Where to store the WAL for metrics before they are sent to remote_write. - // REQUIRED. The value here is preconfigured to work with the Tanka configs. - wal_directory: '/var/lib/agent/data', - - // If an instance crashes abnormally, wait this long before restarting it. - // 0s disables the backoff period and restarts the instance immediately. - instance_restart_backoff: '5s', - - // How to spawn instances based on compatible fields. Supported values: - // "shared" (default), "distinct". - instance_mode: 'shared', - }, - - // withMetricsConfig controls the Metrics engine settings for the Agent. - // defaultMetricsConfig explicitly defines all supported values that can be - // provided within config. - withMetricsConfig(config):: { _metrics_config:: config }, - - // withMetricsInstances controls the Metrics instances the Agent will - // launch. Instances may be a single object or an array of objects. Each - // object must have a name key that is unique to that object. - // - // scrapeInstanceKubernetes defines an example set of instances and the - // ones Grafana Labs uses in production. It does not demonstrate all available - // values for scrape configs and remote_write. For detailed information on - // instance config settings, consult the Agent documentation: - // - // https://github.com/grafana/agent/blob/main/docs/configuration-reference.md#metrics_instance_config - // - // host_filter does not need to be applied here; the library will apply it - // automatically based on how the Agent is being deployed. - // - // remote_write rules may be defined in the instance object. Optionally, - // remove_write rules may be applied to every instance object by using the - // withRemoteWrite function. - withMetricsInstances(instances):: { - assert std.objectHasAll(self, '_mode') : ||| - withMetricsInstances must be merged with the result of calling new, - newDeployment, or newScrapingService. - |||, - - local list = if std.isArray(instances) then instances else [instances], - - // If the library was invoked in daemonset mode, we want to use - // host_filtering mode so each Agent only scrapes stuff from its local - // machine. - local host_filter = super._mode == 'daemonset', - - // Apply host_filtering over our list of instances. - _metrics_instances:: std.map(function(inst) inst { - host_filter: host_filter, - - // Make sure remote_write is an empty array if it doesn't exist. - remote_write: - if !std.objectHas(inst, 'remote_write') || !std.isArray(inst.remote_write) - then [] - else inst.remote_write, - }, list), - }, - - // withRemoteWrite overwrites all the remote_write configs provided in - // withMetricsInstances with the specified remote_writes. This is - // useful when there are multiple instances and you just want everything - // to remote_write to the same place. - // - // Refer to the remote_write specification for all available fields: - // https://github.com/grafana/agent/blob/main/docs/configuration-reference.md#remote_write - withRemoteWrite(remote_writes):: { - assert std.objectHasAll(self, '_mode') : ||| - withMetricsInstances must be merged with the result of calling new, - newDeployment, or newScrapingService. - |||, - - local list = if std.isArray(remote_writes) then remote_writes else [remote_writes], - _metrics_config+:: { global+: { remote_write: list } }, - }, - - // scrapeInstanceKubernetes defines an instance config Grafana Labs uses to - // scrape Kubernetes metrics. - // - // Pods will be scraped if: - // - // 1. They have a port ending in -metrics - // 2. They do not have a prometheus.io/scrape=false annotation - // 3. They have a name label - scrapeInstanceKubernetes: scrape_k8s.newKubernetesScrapeInstance( - config=scrape_k8s.kubernetesScrapeInstanceConfig, - namespace='default', - ), -} diff --git a/production/tanka/grafana-agent/v1/lib/scraping_service.libsonnet b/production/tanka/grafana-agent/v1/lib/scraping_service.libsonnet deleted file mode 100644 index 6962ad3b8d30..000000000000 --- a/production/tanka/grafana-agent/v1/lib/scraping_service.libsonnet +++ /dev/null @@ -1,4 +0,0 @@ -{ - // TODO(rfratto): port scraping service code and expose as newScrapingService - // here. -} diff --git a/production/tanka/grafana-agent/v1/lib/traces.libsonnet b/production/tanka/grafana-agent/v1/lib/traces.libsonnet deleted file mode 100644 index 98a91fcb4191..000000000000 --- a/production/tanka/grafana-agent/v1/lib/traces.libsonnet +++ /dev/null @@ -1,121 +0,0 @@ -{ - // withTracesConfig adds a Traces config to collect traces. - // - // For the full list of options, refer to the configuration reference: - // - withTracesConfig(config):: { - assert std.objectHasAll(self, '_mode') : ||| - withTracesConfig must be merged with the result of calling new. - |||, - _trace_config:: config, - }, - - // withTracesRemoteWrite configures one or multiple backends to write traces to. - // - // Available options can be found in the configuration reference: - // https://github.com/grafana/agent/blob/main/docs/configuration-reference.md#traces_config - withTracesRemoteWrite(remote_write):: { - assert std.objectHasAll(self, '_trace_config') : ||| - withTracesRemoteWrite must be merged with the result of calling - withTracesConfig. - |||, - _trace_config+:: { remote_write: remote_write }, - }, - - // withTracesSamplingStrategies accepts an object for trace sampling strategies. - // - // Refer to Jaeger's documentation for available fields: - // https://www.jaegertracing.io/docs/1.17/sampling/#collector-sampling-configuration - // - // Creating a file isn't necessary; just provide the object and a ConfigMap - // will be created for you and added to the tempo config. - withTracesSamplingStrategies(strategies):: { - assert std.objectHasAll(self, '_trace_config') : ||| - withTracesPushConfig must be merged with the result of calling - withTracesConfig. - |||, - - assert - std.objectHasAll(self._trace_config, 'receivers') && - std.objectHasAll(self._trace_config.receivers, 'jaeger') : ||| - withStrategies can only be used if the traces config is configured for - receiving Jaeger spans and traces. - |||, - - // The main library should detect the presence of _traces_sampling_strategies - // and create a ConfigMap bound to /etc/agent/strategies.json. - _traces_sampling_strategies:: strategies, - _trace_config+:: { - receivers+: { - jaeger+: { - remote_sampling: { - strategy_file: '/etc/agent/strategies.json', - insecure: true, - }, - }, - }, - }, - }, - - // Configures scrape_configs for discovering meta labels that will be attached - // to incoming metrics and spans whose IP matches the __address__ of the - // target. - withTracesScrapeConfigs(scrape_configs):: { - assert std.objectHasAll(self, '_trace_config') : ||| - withTracesScrapeConfigs must be merged with the result of calling - withTracesConfig. - |||, - _trace_config+: { scrape_configs: scrape_configs }, - }, - - // Provides a default set of scrape_configs to use for discovering labels from - // Pods. Labels will be attached to any traces sent from the discovered pods. - tracesScrapeKubernetes:: [ - { - bearer_token_file: '/var/run/secrets/kubernetes.io/serviceaccount/token', - job_name: 'kubernetes-pods', - kubernetes_sd_configs: [{ role: 'pod' }], - relabel_configs: [ - { - action: 'replace', - source_labels: ['__meta_kubernetes_namespace'], - target_label: 'namespace', - }, - { - action: 'replace', - source_labels: ['__meta_kubernetes_pod_name'], - target_label: 'pod', - }, - { - action: 'replace', - source_labels: ['__meta_kubernetes_pod_container_name'], - target_label: 'container', - }, - ], - tls_config: { - ca_file: '/var/run/secrets/kubernetes.io/serviceaccount/ca.crt', - insecure_skip_verify: false, - }, - }, - ], - - // withTracesTailSamplingConfig tail-based sampling for traces. - // - // Available options can be found in the configuration reference: - // https://github.com/grafana/agent/blob/main/docs/configuration-reference.md#traces_config - withTracesTailSamplingConfig(tail_sampling):: { - assert std.objectHasAll(self, '_trace_config') : ||| - withTracesTailSamplingConfig must be merged with the result of calling - withTracesConfig. - |||, - _trace_config+:: { tail_sampling: tail_sampling }, - }, - - withTracesLoadBalancingConfig(load_balancing):: { - assert std.objectHasAll(self, '_trace_config') : ||| - withTracesLoadBalancingConfig must be merged with the result of calling - withTracesConfig. - |||, - _trace_config+:: { load_balancing: load_balancing }, - }, -} diff --git a/production/tanka/grafana-agent/v1/main.libsonnet b/production/tanka/grafana-agent/v1/main.libsonnet deleted file mode 100644 index 8ff06ab1bf5b..000000000000 --- a/production/tanka/grafana-agent/v1/main.libsonnet +++ /dev/null @@ -1,142 +0,0 @@ -local agent = import './internal/agent.libsonnet'; -local utils = import './internal/utils.libsonnet'; -local k = import 'ksonnet-util/kausal.libsonnet'; - -local container = k.core.v1.container; -local configMap = k.core.v1.configMap; -local service = k.core.v1.service; - -// Merge all of our libraries to create the final exposed library. -(import './lib/deployment.libsonnet') + -(import './lib/integrations.libsonnet') + -(import './lib/metrics.libsonnet') + -(import './lib/scraping_service.libsonnet') + -(import './lib/logs.libsonnet') + -(import './lib/traces.libsonnet') + -{ - _images:: { - agent: 'grafana/agent:v0.37.4', - agentctl: 'grafana/agentctl:v0.37.4', - }, - - // new creates a new DaemonSet deployment of the grafana-agent. By default, - // the deployment will do no collection. You must merge the result of this - // function with one or more of the following: - // - // - withMetricsConfig, withMetricsInstances (and optionally withRemoteWrite) - // - withLogsConfig - // - // When using withMetricsInstances, a [name]-etc deployment - // with one replica will be created alongside the DaemonSet. This deployment - // is responsible for handling scrape configs that will not work on the host - // machine. - // - // For example, if a scrape_config scrapes the Kubernetes API, that must be - // handled by the [name]-etc deployment as the Kubernetes API does not run - // on any node in the cluster. - // - // scrapeInstanceKubernetes provides the default - // MetricsInstanceConfig Grafana Labs uses in production. - new(name='grafana-agent', namespace='default'):: { - local this = self, - - _mode:: 'daemonset', - _images:: $._images, - _config_hash:: true, - - local has_logs_config = std.objectHasAll(self, '_logs_config'), - local has_trace_config = std.objectHasAll(self, '_trace_config'), - local has_metrics_config = std.objectHasAll(self, '_metrics_config'), - local has_metrics_instances = std.objectHasAll(self, '_metrics_instances'), - local has_integrations = std.objectHasAll(self, '_integrations'), - local has_sampling_strategies = std.objectHasAll(self, '_traces_sampling_strategies'), - - local metrics_instances = - if has_metrics_instances then this._metrics_instances else [], - local host_filter_instances = utils.transformInstances(metrics_instances, true), - local etc_instances = utils.transformInstances(metrics_instances, false), - - config:: { - server: { - log_level: 'info', - }, - } + ( - if has_metrics_config - then { metrics: this._metrics_config { configs: host_filter_instances } } - else {} - ) + ( - if has_logs_config then { - logs: { - positions_directory: '/tmp/positions', - configs: [this._logs_config { - name: 'default', - }], - }, - } else {} - ) + ( - if has_trace_config then { - traces: { - configs: [this._trace_config { - name: 'default', - }], - }, - } - else {} - ) + ( - if has_integrations then { integrations: this._integrations } else {} - ), - - etc_config:: if has_metrics_config then this.config { - // Hide logs and integrations from our extra configs, we just want the - // scrape configs that wouldn't work for the DaemonSet. - metrics+: { - configs: std.map(function(cfg) cfg { host_filter: false }, etc_instances), - }, - logs:: {}, - traces:: {}, - integrations:: {}, - }, - - agent: - agent.newAgent(name, namespace, self._images.agent, self.config, use_daemonset=true) + - agent.withConfigHash(self._config_hash) + { - // If sampling strategies were defined, we need to mount them as a JSON - // file. - config_map+: - if has_sampling_strategies - then configMap.withDataMixin({ - 'strategies.json': std.toString(this._traces_sampling_strategies), - }) - else {}, - - // If we're deploying for tracing, applications will want to write to - // a service for load balancing span delivery. - service: - if has_trace_config - then k.util.serviceFor(self.agent) + service.mixin.metadata.withNamespace(namespace) - else {}, - } + ( - if has_logs_config then $.logsPermissionsMixin else {} - ) + ( - if has_integrations && std.objectHas(this._integrations, 'node_exporter') then $.integrationsMixin else {} - ), - - agent_etc: if std.length(etc_instances) > 0 then - agent.newAgent(name + '-etc', namespace, self._images.agent, self.etc_config, use_daemonset=false) + - agent.withConfigHash(self._config_hash), - }, - - // withImages sets the images used for launching the Agent. - // Keys supported: agent, agentctl - withImages(images):: { _images+: images }, - - // Includes or excludes the config hash annotation. - withConfigHash(include=true):: { _config_hash:: include }, - - // withPortsMixin adds extra ports to expose. - withPortsMixin(ports=[]):: { - agent+: { - container+:: container.withPortsMixin(ports), - }, - }, -} diff --git a/production/tanka/grafana-agent/v2/README.md b/production/tanka/grafana-agent/v2/README.md deleted file mode 100644 index 33c18cadeac7..000000000000 --- a/production/tanka/grafana-agent/v2/README.md +++ /dev/null @@ -1,84 +0,0 @@ -# Tanka Configs - -**STATUS**: Work in progress, use of these configs is not recommended for production. - -This directory contains the Tanka configs that we use to deploy the Grafana -Agent. It is marked as `v2` and is incompatible previous versions of the library -located in other directories. - -This library is currently a work in progress and backwards-incompatible changes -may occur. Once the library is considered complete, no further backwards -incompatible changes will be made. - -## Capabilities - -This library is significantly simplified over the `v0` and `v1` counterparts. -Since there are many ways to combine the various functionalities of the Grafana -Agent, the `v2` library aims to stay out of your way and provide optional composible -helpers that may be useful for some people. - -Users of the library will pick a controller for their deployment. They are -expected to know what feature are compatible with which controller: - -| Controller | Metrics | Logs | Traces | Integrations | -| ---------------- | ------------------- | --------- | ------ | ------------ | -| DaemonSet | If host filtering | Yes | Yes | No | -| Deployment | Yes | No | No | Yes | -| StatefulSet | Yes | No | No | Yes | - -Creating an incompatible deployment will cause runtime issues when running the -Agent (for example, if configuring Logs with a StatefulSet, you will only get -logs from the node the pods are running on). - -To get full coverage of features, you must create multiple deployments of the -library. You may wish to combine a StatefulSet for metrics and integrations, a -Deployment for Traces, and a DaemonSet for logs. - -## API - -## Generate Agent Deployment - -- `new(name='grafana-agent', namespace='')`: Create a new Agent without a - controller. -- `withDeploymentController(replicas=1)`: Attach a Deployment as the Agent - controller. Number of replicas may optionally be given. -- `withDaemonSetController()`: Attach a DaemonSet as the Agent controller. -- `withStatefulSetController(replicas=1, volumeClaims=[])`: Attach a StatefulSet - as the Agent controller. Number of replicas and a set of volume claim - templates may be given. - -## Generate Scraping Service Syncer - -The Scraping Service Syncer is used to sync metrics instance configs against the -scraping service config management API. - -- `newSyncer(name='grafana-agent-sycner', namespace='', config={})` - -## General - -- `withAgentConfig(config)`: Provide a custom Agent config. -- `withArgsMixin(config)`: Pass a map of additional flags to set. -- `withMetricsPort(port)`: Value for the `http-metrics` port (default 80) -- `withImagesMixin(images)`: Use custom images instead of the defaults. -- `withConfigHash(include=true)`: Whether to include a config hash annotation. -- `withPortsMixin(ports=[])`: Mixin ports from `k.core.v1.containerPort` against - the container and service. -- `withVolumesMixin(volumes=[])`: Volume to attach to the pod. -- `withVolumeMountsMixin(mounts=[])`: Volume mounts to attach to the container. - -## Helpers - -- `newKubernetesMetrics(config={})`: Creates a set of metrics scrape_configs for - collecting metrics from Kubernetes pods. -- `newKubernetesLogs(config={})`: Creates a set of logs scrape_configs for - collecting logs from Kubernetes pods. -- `newKubernetesTraces(config={})`: Creates a set of traces scrape_configs for - associating spans with metadata from discovered Kubernetes pods. -- `withLogVolumeMounts(config={})`: Adds volume mounts to the controller for collecting - logs. -- `withLogPermissions(config={})`: Runs the container as privileged and as the root user - so logs can be collected properly. -- `withService(config)`: Add a service for the deployment, statefulset, or daemonset. - Note that this must be called after any ports are added via `withPortsMixin`. - - diff --git a/production/tanka/grafana-agent/v2/internal/base.libsonnet b/production/tanka/grafana-agent/v2/internal/base.libsonnet deleted file mode 100644 index 53921bf64aaf..000000000000 --- a/production/tanka/grafana-agent/v2/internal/base.libsonnet +++ /dev/null @@ -1,56 +0,0 @@ -function(name='grafana-agent', namespace='') { - local k = (import 'ksonnet-util/kausal.libsonnet') { _config+:: { namespace: namespace } }, - - local container = k.core.v1.container, - local configMap = k.core.v1.configMap, - local containerPort = k.core.v1.containerPort, - local policyRule = k.rbac.v1.policyRule, - local serviceAccount = k.core.v1.serviceAccount, - local envVar = k.core.v1.envVar, - - local this = self, - - _images:: { - agent: 'grafana/agent:v0.37.4', - agentctl: 'grafana/agentctl:v0.37.4', - }, - _config:: { - name: name, - namespace: namespace, - config_hash: true, - agent_config: '', - agent_port: 80, - agent_args: { - 'config.file': '/etc/agent/agent.yaml', - 'server.http.address': '0.0.0.0:80', - 'config.expand-env': 'true', - }, - }, - - rbac: k.util.rbac(name, [ - policyRule.withApiGroups(['']) + - policyRule.withResources(['nodes', 'nodes/proxy', 'services', 'endpoints', 'pods', 'events']) + - policyRule.withVerbs(['get', 'list', 'watch']), - - policyRule.withNonResourceUrls('/metrics') + - policyRule.withVerbs(['get']), - ]) { - service_account+: serviceAccount.mixin.metadata.withNamespace(namespace), - }, - - configMap: - configMap.new(name) + - configMap.mixin.metadata.withNamespace(namespace) + - configMap.withData({ - 'agent.yaml': k.util.manifestYaml(this._config.agent_config), - }), - - container:: - container.new(name, this._images.agent) + - container.withPorts(containerPort.new('http-metrics', this._config.agent_port)) + - container.withArgsMixin(k.util.mapToFlags(this._config.agent_args)) + - // `HOSTNAME` is required for promtail (logs) otherwise it will silently do nothing - container.withEnvMixin([ - envVar.fromFieldPath('HOSTNAME', 'spec.nodeName'), - ]), -} diff --git a/production/tanka/grafana-agent/v2/internal/controllers/daemonset.libsonnet b/production/tanka/grafana-agent/v2/internal/controllers/daemonset.libsonnet deleted file mode 100644 index 5e5f8880a2a4..000000000000 --- a/production/tanka/grafana-agent/v2/internal/controllers/daemonset.libsonnet +++ /dev/null @@ -1,22 +0,0 @@ -function() { - local this = self, - local _config = this._config, - local name = _config.name, - local namespace = _config.namespace, - - local k = (import 'ksonnet-util/kausal.libsonnet') { _config+:: this._config }, - local daemonSet = k.apps.v1.daemonSet, - - controller: - daemonSet.new(name, [this.container]) + - daemonSet.mixin.metadata.withNamespace(namespace) + - daemonSet.mixin.spec.template.spec.withServiceAccountName(name) + - ( - if _config.config_hash - then daemonSet.mixin.spec.template.metadata.withAnnotationsMixin({ - config_hash: std.md5(std.toString(_config.agent_config)), - }) - else {} - ) + - k.util.configVolumeMount(name, '/etc/agent'), -} diff --git a/production/tanka/grafana-agent/v2/internal/controllers/deployment.libsonnet b/production/tanka/grafana-agent/v2/internal/controllers/deployment.libsonnet deleted file mode 100644 index 5afbe9923118..000000000000 --- a/production/tanka/grafana-agent/v2/internal/controllers/deployment.libsonnet +++ /dev/null @@ -1,22 +0,0 @@ -function(replicas=1) { - local this = self, - local _config = this._config, - local name = _config.name, - local namespace = _config.namespace, - - local k = (import 'ksonnet-util/kausal.libsonnet') { _config+:: this._config }, - local deployment = k.apps.v1.deployment, - - controller: - deployment.new(name, replicas, [this.container]) + - deployment.mixin.metadata.withNamespace(namespace) + - deployment.mixin.spec.template.spec.withServiceAccountName(name) + - ( - if _config.config_hash - then deployment.mixin.spec.template.metadata.withAnnotationsMixin({ - config_hash: std.md5(std.toString(_config.agent_config)), - }) - else {} - ) + - k.util.configVolumeMount(name, '/etc/agent'), -} diff --git a/production/tanka/grafana-agent/v2/internal/controllers/statefulset.libsonnet b/production/tanka/grafana-agent/v2/internal/controllers/statefulset.libsonnet deleted file mode 100644 index d80cab383bc5..000000000000 --- a/production/tanka/grafana-agent/v2/internal/controllers/statefulset.libsonnet +++ /dev/null @@ -1,23 +0,0 @@ -function(replicas=1, volumeClaims=[]) { - local this = self, - local _config = this._config, - local name = _config.name, - local namespace = _config.namespace, - - local k = (import 'ksonnet-util/kausal.libsonnet') { _config+:: this._config }, - local statefulSet = k.apps.v1.statefulSet, - - controller: - statefulSet.new(name, replicas, [this.container], volumeClaims) + - statefulSet.mixin.metadata.withNamespace(namespace) + - statefulSet.mixin.spec.withServiceName(name) + - statefulSet.mixin.spec.template.spec.withServiceAccountName(name) + - ( - if _config.config_hash - then statefulSet.mixin.spec.template.metadata.withAnnotationsMixin({ - config_hash: std.md5(std.toString(_config.agent_config)), - }) - else {} - ) + - k.util.configVolumeMount(name, '/etc/agent'), -} diff --git a/production/tanka/grafana-agent/v2/internal/helpers/k8s.libsonnet b/production/tanka/grafana-agent/v2/internal/helpers/k8s.libsonnet deleted file mode 100644 index 5ae43c901a6c..000000000000 --- a/production/tanka/grafana-agent/v2/internal/helpers/k8s.libsonnet +++ /dev/null @@ -1,523 +0,0 @@ -local k8s_tls_config(config) = { - tls_config: { - ca_file: '/var/run/secrets/kubernetes.io/serviceaccount/ca.crt', - insecure_skip_verify: config.insecure_skip_verify, - }, - bearer_token_file: '/var/run/secrets/kubernetes.io/serviceaccount/token', -}; - -local gen_scrape_config(job_name, pod_uid) = { - job_name: job_name, - pipeline_stages: [{ - docker: {}, - }], - kubernetes_sd_configs: [{ - role: 'pod', - }], - - relabel_configs: self.prelabel_config + [ - // Only scrape local pods; Promtail will drop targets with a __host__ label - // that does not match the current host name. - { - source_labels: ['__meta_kubernetes_pod_node_name'], - target_label: '__host__', - }, - - // Drop pods without a __service__ label. - { - source_labels: ['__service__'], - action: 'drop', - regex: '', - }, - - // Include all the other labels on the pod. - // Perform this mapping before applying additional label replacement rules - // to prevent a supplied label from overwriting any of the following labels. - { - action: 'labelmap', - regex: '__meta_kubernetes_pod_label_(.+)', - }, - - // Rename jobs to be /. - { - source_labels: ['__meta_kubernetes_namespace', '__service__'], - action: 'replace', - separator: '/', - target_label: 'job', - replacement: '$1', - }, - - // But also include the namespace, pod, container as separate - // labels. They uniquely identify a container. They are also - // identical to the target labels configured in Prometheus - // (but note that Loki does not use an instance label). - { - source_labels: ['__meta_kubernetes_namespace'], - action: 'replace', - target_label: 'namespace', - }, - { - source_labels: ['__meta_kubernetes_pod_name'], - action: 'replace', - target_label: 'pod', // Not 'pod_name', which disappeared in K8s 1.16. - }, - { - source_labels: ['__meta_kubernetes_pod_container_name'], - action: 'replace', - target_label: 'container', // Not 'container_name', which disappeared in K8s 1.16. - }, - - // Kubernetes puts logs under subdirectories keyed pod UID and container_name. - { - source_labels: [pod_uid, '__meta_kubernetes_pod_container_name'], - target_label: '__path__', - separator: '/', - replacement: '/var/log/pods/*$1/*.log', - }, - ], -}; - -{ - metrics(config):: - local _config = { - scrape_api_server_endpoints: false, - insecure_skip_verify: false, - - cluster_dns_tld: 'local', - cluster_dns_suffix: 'cluster.' + self.cluster_dns_tld, - kubernetes_api_server_address: 'kubernetes.default.svc.%(cluster_dns_suffix)s:443' % self, - - ksm_namespace: 'kube-system', - node_exporter_namespace: 'kube-system', - } + config; - - [ - k8s_tls_config(_config) { - job_name: 'default/kubernetes', - kubernetes_sd_configs: [{ - role: if _config.scrape_api_server_endpoints then 'endpoints' else 'service', - }], - scheme: 'https', - tls_config+: { - server_name: 'kubernetes', - }, - - relabel_configs: [{ - source_labels: ['__meta_kubernetes_service_label_component'], - regex: 'apiserver', - action: 'keep', - }], - - // Keep limited set of metrics to reduce default usage, drop all others - metric_relabel_configs: [ - { - source_labels: ['__name__'], - regex: 'workqueue_queue_duration_seconds_bucket|process_cpu_seconds_total|process_resident_memory_bytes|workqueue_depth|rest_client_request_duration_seconds_bucket|workqueue_adds_total|up|rest_client_requests_total|apiserver_request_total|go_goroutines', - action: 'keep', - }, - ], - }, - - { - job_name: 'kubernetes-pods', - kubernetes_sd_configs: [{ - role: 'pod', - }], - - // You can specify the following annotations (on pods): - // prometheus.io/scrape: false - don't scrape this pod - // prometheus.io/scheme: https - use https for scraping - // prometheus.io/port - scrape this port - // prometheus.io/path - scrape this path - // prometheus.io/param- - send ?parameter=value with the scrape - relabel_configs: [ - // Drop anything annotated with prometheus.io/scrape=false - { - source_labels: ['__meta_kubernetes_pod_annotation_prometheus_io_scrape'], - action: 'drop', - regex: 'false', - }, - - // Drop any endpoint whose pod port name does not end with metrics - { - source_labels: ['__meta_kubernetes_pod_container_port_name'], - action: 'keep', - regex: '.*-metrics', - }, - - // Allow pods to override the scrape scheme with prometheus.io/scheme=https - { - source_labels: ['__meta_kubernetes_pod_annotation_prometheus_io_scheme'], - action: 'replace', - target_label: '__scheme__', - regex: '(https?)', - replacement: '$1', - }, - - // Allow service to override the scrape path with prometheus.io/path=/other_metrics_path - { - source_labels: ['__meta_kubernetes_pod_annotation_prometheus_io_path'], - action: 'replace', - target_label: '__metrics_path__', - regex: '(.+)', - replacement: '$1', - }, - - // Allow services to override the scrape port with prometheus.io/port=1234 - { - source_labels: ['__address__', '__meta_kubernetes_pod_annotation_prometheus_io_port'], - action: 'replace', - target_label: '__address__', - regex: '(.+?)(\\:\\d+)?;(\\d+)', - replacement: '$1:$3', - }, - - // Drop pods without a name label - { - source_labels: ['__meta_kubernetes_pod_label_name'], - action: 'drop', - regex: '', - }, - - // Rename jobs to be / - { - source_labels: ['__meta_kubernetes_namespace', '__meta_kubernetes_pod_label_name'], - action: 'replace', - separator: '/', - target_label: 'job', - replacement: '$1', - }, - - // But also include the namespace as a separate label for routing alerts - { - source_labels: ['__meta_kubernetes_namespace'], - action: 'replace', - target_label: 'namespace', - }, - { - source_labels: ['__meta_kubernetes_pod_name'], - action: 'replace', - target_label: 'pod', // Not 'pod_name', which disappeared in K8s 1.16. - }, - { - source_labels: ['__meta_kubernetes_pod_container_name'], - action: 'replace', - target_label: 'container', // Not 'container_name', which disappeared in K8s 1.16. - }, - - // Rename instances to the concatenation of pod:container:port. - // All three components are needed to guarantee a unique instance label. - { - source_labels: [ - '__meta_kubernetes_pod_name', - '__meta_kubernetes_pod_container_name', - '__meta_kubernetes_pod_container_port_name', - ], - action: 'replace', - separator: ':', - target_label: 'instance', - }, - - // Map prometheus.io/param-=value fields to __param_=value - { - regex: '__meta_kubernetes_pod_annotation_prometheus_io_param_(.+)', - action: 'labelmap', - replacement: '__param_$1', - }, - - // Drop pods with phase Succeeded or Failed - { - source_labels: ['__meta_kubernetes_pod_phase'], - action: 'drop', - regex: 'Succeeded|Failed', - }, - ], - }, - - // A separate scrape config for kube-state-metrics which doesn't add a - // namespace label and instead takes the namespace label from the exported - // timeseries. This prevents the exported namespace label from being - // renamed to exported_namesapce and allows us to route alerts based on - // namespace. - { - job_name: '%s/kube-state-metrics' % _config.ksm_namespace, - kubernetes_sd_configs: [{ - role: 'pod', - namespaces: { - names: [_config.ksm_namespace], - }, - }], - - relabel_configs: [ - // Drop anything whose service is not kube-state-metrics - { - source_labels: ['__meta_kubernetes_pod_label_name'], - regex: 'kube-state-metrics', - action: 'keep', - }, - - // Rename instances to the concatenation of pod:container:port. - // In the specific case of KSM, we could leave out the container - // name and still have a unique instance label, but we leave it - // in here for consistency with the normal pod scraping. - { - source_labels: [ - '__meta_kubernetes_pod_name', - '__meta_kubernetes_pod_container_name', - '__meta_kubernetes_pod_container_port_name', - ], - action: 'replace', - separator: ':', - target_label: 'instance', - }, - ], - }, - - // A separate scrape config for node-exporter which maps the node name - // onto the instance label. - { - job_name: '%s/node-exporter' % _config.node_exporter_namespace, - kubernetes_sd_configs: [{ - role: 'pod', - namespaces: { - names: [_config.node_exporter_namespace], - }, - }], - - relabel_configs: [ - // Drop anything whose name is not node-exporter. - { - source_labels: ['__meta_kubernetes_pod_label_name'], - regex: 'node-exporter', - action: 'keep', - }, - - // Rename instances to be the node name. - { - source_labels: ['__meta_kubernetes_pod_node_name'], - action: 'replace', - target_label: 'instance', - }, - - // But also include the namespace as a separate label, for - // routing alerts. - { - source_labels: ['__meta_kubernetes_namespace'], - action: 'replace', - target_label: 'namespace', - }, - ], - }, - - // This scrape config gathers all kubelet metrics. - k8s_tls_config(_config) { - job_name: 'kube-system/kubelet', - kubernetes_sd_configs: [{ role: 'node' }], - - relabel_configs: [ - { - target_label: '__address__', - replacement: _config.kubernetes_api_server_address, - }, - { - target_label: '__scheme__', - replacement: 'https', - }, - { - source_labels: ['__meta_kubernetes_node_name'], - regex: '(.+)', - target_label: '__metrics_path__', - replacement: '/api/v1/nodes/$1/proxy/metrics', - }, - ], - }, - - // As of k8s 1.7.3, cAdvisor metrics are available via kubelet using the - // /metrics/cadvisor path. - k8s_tls_config(_config) { - job_name: 'kube-system/cadvisor', - kubernetes_sd_configs: [{ - role: 'node', - }], - scheme: 'https', - - relabel_configs: [ - { - target_label: '__address__', - replacement: _config.kubernetes_api_server_address, - }, - { - source_labels: ['__meta_kubernetes_node_name'], - regex: '(.+)', - target_label: '__metrics_path__', - replacement: '/api/v1/nodes/$1/proxy/metrics/cadvisor', - }, - ], - - metric_relabel_configs: [ - // Let system processes like kubelet survive the next rule by giving them a fake image. - { - source_labels: ['__name__', 'id'], - regex: 'container_([a-z_]+);/system.slice/(.+)', - target_label: 'image', - replacement: '$2', - }, - - // Drop container_* metrics with no image. - { - source_labels: ['__name__', 'image'], - regex: 'container_([a-z_]+);', - action: 'drop', - }, - - // Drop a bunch of metrics which are disabled but still sent, - // see https://github.com/google/cadvisor/issues/1925. - { - source_labels: ['__name__'], - regex: 'container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s)', - action: 'drop', - }, - ], - }, - ], - - logs(config={}):: [ - // Scrape config to scrape any pods with a 'name' label. - gen_scrape_config('kubernetes-pods-name', '__meta_kubernetes_pod_uid') { - prelabel_config:: [ - // Use name label as __service__. - { - source_labels: ['__meta_kubernetes_pod_label_name'], - target_label: '__service__', - }, - ], - }, - - // Scrape config to scrape any pods with an 'app' label. - gen_scrape_config('kubernetes-pods-app', '__meta_kubernetes_pod_uid') { - prelabel_config:: [ - // Drop pods with a 'name' label. They will have already been added by - // the scrape_config that matches on the 'name' label - { - source_labels: ['__meta_kubernetes_pod_label_name'], - action: 'drop', - regex: '.+', - }, - - // Use app label as the __service__. - { - source_labels: ['__meta_kubernetes_pod_label_app'], - target_label: '__service__', - }, - ], - }, - - // Scrape config to scrape any pods with a direct controller (eg - // StatefulSets). - gen_scrape_config('kubernetes-pods-direct-controllers', '__meta_kubernetes_pod_uid') { - prelabel_config:: [ - // Drop pods with a 'name' or 'app' label. They will have already been added by - // the scrape_config that matches above. - { - source_labels: ['__meta_kubernetes_pod_label_name', '__meta_kubernetes_pod_label_app'], - separator: '', - action: 'drop', - regex: '.+', - }, - - // Drop pods with an indirect controller. eg Deployments create replicaSets - // which then create pods. - { - source_labels: ['__meta_kubernetes_pod_controller_name'], - action: 'drop', - regex: '[0-9a-z-.]+-[0-9a-f]{8,10}', - }, - - // Use controller name as __service__. - { - source_labels: ['__meta_kubernetes_pod_controller_name'], - target_label: '__service__', - }, - ], - }, - - // Scrape config to scrape any pods with an indirect controller (eg - // Deployments). - gen_scrape_config('kubernetes-pods-indirect-controller', '__meta_kubernetes_pod_uid') { - prelabel_config:: [ - // Drop pods with a 'name' or 'app' label. They will have already been added by - // the scrape_config that matches above. - { - source_labels: ['__meta_kubernetes_pod_label_name', '__meta_kubernetes_pod_label_app'], - separator: '', - action: 'drop', - regex: '.+', - }, - - // Drop pods not from an indirect controller. eg StatefulSets, DaemonSets - { - source_labels: ['__meta_kubernetes_pod_controller_name'], - regex: '[0-9a-z-.]+-[0-9a-f]{8,10}', - action: 'keep', - }, - - // Put the indirect controller name into a temp label. - { - source_labels: ['__meta_kubernetes_pod_controller_name'], - action: 'replace', - regex: '([0-9a-z-.]+)-[0-9a-f]{8,10}', - target_label: '__service__', - }, - ], - }, - - // Scrape config to scrape any control plane static pods (e.g. kube-apiserver - // etcd, kube-controller-manager & kube-scheduler) - gen_scrape_config('kubernetes-pods-static', '__meta_kubernetes_pod_annotation_kubernetes_io_config_mirror') { - prelabel_config:: [ - // Ignore pods that aren't mirror pods - { - action: 'drop', - source_labels: ['__meta_kubernetes_pod_annotation_kubernetes_io_config_mirror'], - regex: '', - }, - - // Static control plane pods usually have a component label that identifies them - { - action: 'replace', - source_labels: ['__meta_kubernetes_pod_label_component'], - target_label: '__service__', - }, - ], - }, - ], - - traces(config={}):: [ - { - bearer_token_file: '/var/run/secrets/kubernetes.io/serviceaccount/token', - job_name: 'kubernetes-pods', - kubernetes_sd_configs: [{ role: 'pod' }], - relabel_configs: [ - { - action: 'replace', - source_labels: ['__meta_kubernetes_namespace'], - target_label: 'namespace', - }, - { - action: 'replace', - source_labels: ['__meta_kubernetes_pod_name'], - target_label: 'pod', - }, - { - action: 'replace', - source_labels: ['__meta_kubernetes_pod_container_name'], - target_label: 'container', - }, - ], - tls_config: { - ca_file: '/var/run/secrets/kubernetes.io/serviceaccount/ca.crt', - insecure_skip_verify: false, - }, - }, - ], -} diff --git a/production/tanka/grafana-agent/v2/internal/helpers/logs.libsonnet b/production/tanka/grafana-agent/v2/internal/helpers/logs.libsonnet deleted file mode 100644 index 30a88b899d5a..000000000000 --- a/production/tanka/grafana-agent/v2/internal/helpers/logs.libsonnet +++ /dev/null @@ -1,27 +0,0 @@ -local k = import 'ksonnet-util/kausal.libsonnet'; -local container = k.core.v1.container; - -{ - volumeMounts(config={}):: { - // Disable journald mount by default - local _config = { - journald: false, - } + config, - - controller+: - // For reading docker containers. /var/log is used for the positions file - // and shouldn't be set to readonly. - k.util.hostVolumeMount('varlog', '/var/log', '/var/log') + - k.util.hostVolumeMount('varlibdockercontainers', '/var/lib/docker/containers', '/var/lib/docker/containers', readOnly=true) + - - // For reading journald - if _config.journald == false then {} - else k.util.hostVolumeMount('etcmachineid', '/etc/machine-id', '/etc/machine-id', readOnly=true), - }, - - permissions(config={}):: { - container+:: - container.mixin.securityContext.withPrivileged(true) + - container.mixin.securityContext.withRunAsUser(0), - }, -} diff --git a/production/tanka/grafana-agent/v2/internal/helpers/service.libsonnet b/production/tanka/grafana-agent/v2/internal/helpers/service.libsonnet deleted file mode 100644 index 08f6502fa8ef..000000000000 --- a/production/tanka/grafana-agent/v2/internal/helpers/service.libsonnet +++ /dev/null @@ -1,13 +0,0 @@ -local k = import 'ksonnet-util/kausal.libsonnet'; -local svc = k.core.v1.service; - -{ - service(config={}):: { - local this = self, - local _config = this._config, - - controller_service: - k.util.serviceFor(this.controller) + - svc.mixin.metadata.withNamespace(_config.namespace), - }, -} diff --git a/production/tanka/grafana-agent/v2/internal/syncer.libsonnet b/production/tanka/grafana-agent/v2/internal/syncer.libsonnet deleted file mode 100644 index 689b14fd87b3..000000000000 --- a/production/tanka/grafana-agent/v2/internal/syncer.libsonnet +++ /dev/null @@ -1,62 +0,0 @@ -local k = import 'ksonnet-util/kausal.libsonnet'; - -local cronJob = k.batch.v1.cronJob; -local configMap = k.core.v1.configMap; -local container = k.core.v1.container; -local deployment = k.apps.v1.deployment; -local volumeMount = k.core.v1.volumeMount; -local volume = k.core.v1.volume; - -function( - name='grafana-agent-syncer', - namespace='', - config={}, -) { - local _config = { - api: error 'api must be set', - image: 'grafana/agentctl:v0.37.4', - schedule: '*/5 * * * *', - configs: [], - } + config, - - local this = self, - local _configs = std.foldl( - function(agg, cfg) - // Sanitize the name and remove / so every file goes into the same - // folder. - local name = std.strReplace(cfg.name, '/', '_'); - - agg { ['%s.yml' % name]: k.util.manifestYaml(cfg) }, - _config.configs, - {}, - ), - - configMap: - configMap.new(name) + - configMap.mixin.metadata.withNamespace(namespace) + - configMap.withData(_configs), - - container:: - container.new(name, _config.image) + - container.withArgsMixin([ - 'config-sync', - '--addr=%s' % _config.api, - '/etc/configs', - ]) + - container.withVolumeMounts(volumeMount.new(name, '/etc/configs')), - - job: - cronJob.new(name, _config.schedule, this.container) + - cronJob.mixin.metadata.withNamespace(namespace) + - cronJob.mixin.spec.withSuccessfulJobsHistoryLimit(1) + - cronJob.mixin.spec.withFailedJobsHistoryLimit(3) + - cronJob.mixin.spec.jobTemplate.spec.template.spec.withRestartPolicy('OnFailure') + - cronJob.mixin.spec.jobTemplate.spec.template.spec.withActiveDeadlineSeconds(600) + - cronJob.mixin.spec.jobTemplate.spec.withTtlSecondsAfterFinished(120) + - cronJob.mixin.spec.jobTemplate.spec.template.spec.withVolumes([ - volume.fromConfigMap( - name=name, - configMapName=this.configMap.metadata.name, - ), - ]), -} diff --git a/production/tanka/grafana-agent/v2/main.libsonnet b/production/tanka/grafana-agent/v2/main.libsonnet deleted file mode 100644 index 71a8e4b69f6d..000000000000 --- a/production/tanka/grafana-agent/v2/main.libsonnet +++ /dev/null @@ -1,50 +0,0 @@ -local k = import 'ksonnet-util/kausal.libsonnet'; -local container = k.core.v1.container; -local podTemplateSpec = k.core.v1.podTemplateSpec.spec; - -{ - new(name='grafana-agent', namespace=''):: - (import './internal/base.libsonnet')(name, namespace), - - // Controllers - withDeploymentController(replicas=1):: - (import './internal/controllers/deployment.libsonnet')(replicas), - withDaemonSetController():: - (import './internal/controllers/daemonset.libsonnet')(), - withStatefulSetController(replicas=1, volumeClaims=[]):: - (import './internal/controllers/statefulset.libsonnet')(replicas, volumeClaims), - - // Syncer - newSyncer(name='grafana-agent-syncer', namespace='', config={}):: - (import './internal/syncer.libsonnet')(name, namespace, config), - - // General - withAgentConfig(config):: { _config+: { agent_config: config } }, - withMetricsPort(port):: { _config+: { agent_port: port } }, - withArgsMixin(args):: { _config+: { agent_args+: args } }, - withImagesMixin(images):: { _images+: images }, - withConfigHash(include=true):: { _config+: { config_hash: include } }, - withPortsMixin(ports=[]):: { container+:: container.withPortsMixin(ports) }, - withVolumeMountsMixin(mounts=[]):: { container+:: container.withVolumeMountsMixin(mounts) }, - withVolumesMixin(volumes=[]):: { - controller+: { - spec+: { - template+: podTemplateSpec.withVolumesMixin(volumes), - }, - }, - }, - - // Helpers - newKubernetesMetrics(config={}):: - (import './internal/helpers/k8s.libsonnet').metrics(config), - newKubernetesLogs(config={}):: - (import './internal/helpers/k8s.libsonnet').logs(config), - newKubernetesTraces(config={}):: - (import './internal/helpers/k8s.libsonnet').traces(config), - withLogVolumeMounts(config={}):: - (import './internal/helpers/logs.libsonnet').volumeMounts(config), - withLogPermissions(config={}):: - (import './internal/helpers/logs.libsonnet').permissions(config), - withService(config={}):: - (import './internal/helpers/service.libsonnet').service(config), -} diff --git a/tools/ci/docker-containers b/tools/ci/docker-containers index f42c6211415a..ee01a0905137 100755 --- a/tools/ci/docker-containers +++ b/tools/ci/docker-containers @@ -21,8 +21,6 @@ export AGENT_IMAGE=grafana/agent export AGENT_BORINGCRYPTO_IMAGE=grafana/agent-boringcrypto export AGENTCTL_IMAGE=grafana/agentctl export OPERATOR_IMAGE=grafana/agent-operator -export SMOKE_IMAGE=us.gcr.io/kubernetes-dev/grafana/agent-smoke -export CROW_IMAGE=us.gcr.io/kubernetes-dev/grafana/agent-crow # We need to determine what version to assign to built binaries. If containers # are being built from a Drone tag trigger, we force the version to come from the @@ -106,30 +104,8 @@ case "$TARGET_CONTAINER" in . ;; - smoke) - docker buildx build --push \ - --platform $BUILD_PLATFORMS \ - --build-arg RELEASE_BUILD=1 \ - --build-arg VERSION="$VERSION" \ - -t "$SMOKE_IMAGE:$VERSION" \ - -t "$SMOKE_IMAGE:$BRANCH_TAG" \ - -f tools/smoke/Dockerfile \ - . - ;; - - crow) - docker buildx build --push \ - --platform $BUILD_PLATFORMS \ - --build-arg RELEASE_BUILD=1 \ - --build-arg VERSION="$VERSION" \ - -t "$CROW_IMAGE:$VERSION" \ - -t "$CROW_IMAGE:$BRANCH_TAG" \ - -f tools/crow/Dockerfile \ - . - ;; - *) - echo "Usage: $0 agent|agent-boringcrypto|agentctl|agent-operator|smoke|crow" + echo "Usage: $0 agent|agent-boringcrypto|agentctl|agent-operator" exit 1 ;; esac diff --git a/tools/crow/.gitignore b/tools/crow/.gitignore deleted file mode 100644 index 4abc650571dc..000000000000 --- a/tools/crow/.gitignore +++ /dev/null @@ -1 +0,0 @@ -grafana-agent-crow diff --git a/tools/crow/Dockerfile b/tools/crow/Dockerfile deleted file mode 100644 index 7decaf84fdd0..000000000000 --- a/tools/crow/Dockerfile +++ /dev/null @@ -1,39 +0,0 @@ -# syntax=docker/dockerfile:1.4 - -# NOTE: This Dockerfile can only be built using BuildKit. BuildKit is used by -# default when running `docker buildx build` or when DOCKER_BUILDKIT=1 is set -# in environment variables. - -FROM --platform=$BUILDPLATFORM grafana/agent-build-image:0.30.4 as build -ARG BUILDPLATFORM -ARG TARGETPLATFORM -ARG TARGETOS -ARG TARGETARCH -ARG TARGETVARIANT -ARG RELEASE_BUILD=1 -ARG VERSION - -COPY . /src/agent -WORKDIR /src/agent - -RUN --mount=type=cache,target=/root/.cache/go-build \ - --mount=type=cache,target=/go/pkg/mod \ - GOOS=$TARGETOS GOARCH=$TARGETARCH GOARM=${TARGETVARIANT#v} \ - RELEASE_BUILD=${RELEASE_BUILD} VERSION=${VERSION} \ - make crow - -FROM ubuntu:lunar - -LABEL org.opencontainers.image.source="https://github.com/grafana/agent" - -# Install dependencies needed at runtime. -RUN < rnd { - w.WriteHeader(http.StatusInternalServerError) - return - } - w.WriteHeader(http.StatusOK) - } - } - } - - // add default tasks - s.tasks = append(s.tasks, - repeatingTask{ - Task: &deletePodTask{ - logger: log.With(s.logger, "task", "delete_pod", "pod", "grafana-agent-0"), - clientset: clientset, - namespace: cfg.Namespace, - pod: fmt.Sprintf("%s-0", cfg.PodPrefix), - }, - frequency: cfg.ChaosFrequency, - }, - repeatingTask{ - Task: &deletePodBySelectorTask{ - logger: log.With(s.logger, "task", "delete_pod_by_selector"), - clientset: clientset, - namespace: cfg.Namespace, - selector: fmt.Sprintf("name=%s-cluster", cfg.PodPrefix), - }, - frequency: cfg.ChaosFrequency, - }, - repeatingTask{ - Task: &scaleDeploymentTask{ - logger: log.With(s.logger, "task", "scale_deployment", "deployment", "avalanche"), - clientset: clientset, - namespace: cfg.Namespace, - deployment: "avalanche", - maxReplicas: 11, - minReplicas: 2, - }, - frequency: cfg.MutationFrequency, - }) - - return s, nil -} - -func (s *Smoke) ServeHTTP(w http.ResponseWriter, r *http.Request) { - s.fakeRemoteWriteHandler(w, r) -} - -// Run starts the smoke test and runs the tasks concurrently. -func (s *Smoke) Run(ctx context.Context) error { - var g run.Group - ctx, cancel := context.WithCancel(ctx) - defer cancel() - taskFn := func(t repeatingTask) func() error { - return func() error { - tick := time.NewTicker(t.frequency) - defer tick.Stop() - for { - select { - case <-tick.C: - if err := t.Run(ctx); err != nil { - return err - } - case <-ctx.Done(): - return nil - } - } - } - } - for _, task := range s.tasks { - g.Add(taskFn(task), func(_ error) { - cancel() - }) - } - - if s.cfg.FakeRemoteWrite && s.fakeRemoteWriteHandler != nil { - level.Info(s.logger).Log("msg", "serving fake remote-write endpoint on :19090") - g.Add(func() error { - return http.ListenAndServe(":19090", s) - }, func(_ error) { - cancel() - }) - } - - return g.Run() -} diff --git a/tools/smoke/internal/tasks.go b/tools/smoke/internal/tasks.go deleted file mode 100644 index a2243f512a4a..000000000000 --- a/tools/smoke/internal/tasks.go +++ /dev/null @@ -1,101 +0,0 @@ -package smoke - -import ( - "context" - "math/rand" - "time" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" - "k8s.io/utils/ptr" -) - -// The Task interface represents some unit of work performed concurrently. -type Task interface { - Run(context.Context) error -} - -type repeatingTask struct { - Task - frequency time.Duration -} - -type deletePodTask struct { - logger log.Logger - clientset kubernetes.Interface - namespace string - pod string -} - -func (t *deletePodTask) Run(ctx context.Context) error { - level.Debug(t.logger).Log("msg", "deleting pod") - if err := t.clientset.CoreV1().Pods(t.namespace).Delete(ctx, t.pod, metav1.DeleteOptions{ - GracePeriodSeconds: ptr.To(int64(0)), - }); err != nil { - level.Error(t.logger).Log("msg", "failed to delete pod", "err", err) - } - return nil -} - -type scaleDeploymentTask struct { - logger log.Logger - clientset kubernetes.Interface - namespace string - deployment string - maxReplicas int - minReplicas int -} - -func (t *scaleDeploymentTask) Run(ctx context.Context) error { - newReplicas := rand.Intn(t.maxReplicas-t.minReplicas) + t.minReplicas - level.Debug(t.logger).Log("msg", "scaling replicas", "replicas", newReplicas) - - scale, err := t.clientset.AppsV1().Deployments(t.namespace). - GetScale(ctx, t.deployment, metav1.GetOptions{}) - if err != nil { - level.Error(t.logger).Log("msg", "failed to get autoscalingv1.Scale object", "err", err) - return nil - } - - sc := *scale - sc.Spec.Replicas = int32(newReplicas) - _, err = t.clientset.AppsV1().Deployments(t.namespace). - UpdateScale(ctx, t.deployment, &sc, metav1.UpdateOptions{}) - if err != nil { - level.Error(t.logger).Log("msg", "failed to scale deployment", "err", err) - } - return nil -} - -type deletePodBySelectorTask struct { - logger log.Logger - clientset kubernetes.Interface - namespace string - selector string -} - -func (t *deletePodBySelectorTask) Run(ctx context.Context) error { - list, err := t.clientset.CoreV1().Pods(t.namespace).List(ctx, metav1.ListOptions{ - LabelSelector: t.selector, - }) - if err != nil { - level.Error(t.logger).Log("msg", "failed to list pods", "err", err) - return nil - } - - l := len(list.Items) - if l > 0 { - i := rand.Intn(l) - pod := list.Items[i].Name - level.Debug(t.logger).Log("msg", "deleting pod", "pod", pod) - if err := t.clientset.CoreV1().Pods(t.namespace).Delete(ctx, pod, metav1.DeleteOptions{ - GracePeriodSeconds: ptr.To(int64(0)), - }); err != nil { - level.Error(t.logger).Log("msg", "failed to delete pod", "pod", pod, "err", err) - } - } - - return nil -} diff --git a/tools/smoke/internal/tasks_test.go b/tools/smoke/internal/tasks_test.go deleted file mode 100644 index 76fb1cbd2789..000000000000 --- a/tools/smoke/internal/tasks_test.go +++ /dev/null @@ -1,144 +0,0 @@ -package smoke - -import ( - "context" - "testing" - - "github.com/go-kit/log" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/kubernetes/fake" -) - -// Note: these tests are mostly worthless at this point -// but would allow easy debugging of tasks as they become more -// complex. Using https://pkg.go.dev/k8s.io/client-go/testing#ObjectTracker -// to mock responses from the fake client is also possible. - -func Test_deletePodBySelectorTask_Run(t1 *testing.T) { - type fields struct { - logger log.Logger - clientset kubernetes.Interface - namespace string - selector string - } - type args struct { - ctx context.Context - } - tests := []struct { - name string - fields fields - args args - wantErr bool - }{ - { - name: "deletePodBySelectorTask", - fields: fields{ - logger: log.NewNopLogger(), - clientset: fake.NewSimpleClientset(), - namespace: "foo", - selector: "foo=bar", - }, - }, - } - for _, tt := range tests { - t1.Run(tt.name, func(t1 *testing.T) { - t := &deletePodBySelectorTask{ - logger: tt.fields.logger, - clientset: tt.fields.clientset, - namespace: tt.fields.namespace, - selector: tt.fields.selector, - } - if err := t.Run(tt.args.ctx); (err != nil) != tt.wantErr { - t1.Errorf("Run() error = %v, wantErr %v", err, tt.wantErr) - } - }) - } -} - -func Test_deletePodTask_Run(t1 *testing.T) { - type fields struct { - logger log.Logger - clientset kubernetes.Interface - namespace string - pod string - } - type args struct { - ctx context.Context - } - tests := []struct { - name string - fields fields - args args - wantErr bool - }{ - { - name: "deletePodTask", - fields: fields{ - logger: log.NewNopLogger(), - clientset: fake.NewSimpleClientset(), - namespace: "foo", - pod: "bar", - }, - }, - } - for _, tt := range tests { - t1.Run(tt.name, func(t1 *testing.T) { - t := &deletePodTask{ - logger: tt.fields.logger, - clientset: tt.fields.clientset, - namespace: tt.fields.namespace, - pod: tt.fields.pod, - } - if err := t.Run(tt.args.ctx); (err != nil) != tt.wantErr { - t1.Errorf("Run() error = %v, wantErr %v", err, tt.wantErr) - } - }) - } -} - -func Test_scaleDeploymentTask_Run(t1 *testing.T) { - type fields struct { - logger log.Logger - clientset kubernetes.Interface - namespace string - deployment string - maxReplicas int - minReplicas int - } - type args struct { - ctx context.Context - } - tests := []struct { - name string - fields fields - args args - wantErr bool - }{ - { - name: "scaleDeploymentTask", - fields: fields{ - logger: log.NewNopLogger(), - clientset: fake.NewSimpleClientset(), - namespace: "foo", - deployment: "bar", - maxReplicas: 11, - minReplicas: 2, - }, - }, - } - for _, tt := range tests { - t1.Run(tt.name, func(t1 *testing.T) { - t := &scaleDeploymentTask{ - logger: tt.fields.logger, - clientset: tt.fields.clientset, - namespace: tt.fields.namespace, - deployment: tt.fields.deployment, - maxReplicas: tt.fields.maxReplicas, - minReplicas: tt.fields.minReplicas, - } - if err := t.Run(tt.args.ctx); (err != nil) != tt.wantErr { - t1.Errorf("Run() error = %v, wantErr %v", err, tt.wantErr) - } - }) - } -} diff --git a/tools/smoke/main.go b/tools/smoke/main.go deleted file mode 100644 index 422bf64f2d2a..000000000000 --- a/tools/smoke/main.go +++ /dev/null @@ -1,48 +0,0 @@ -package main - -import ( - "context" - "flag" - "os" - "time" - - "github.com/go-kit/log/level" - "github.com/grafana/agent/pkg/server" - smoke "github.com/grafana/agent/tools/smoke/internal" - "github.com/grafana/dskit/log" -) - -func main() { - var ( - cfg smoke.Config - logLevel log.Level - logFormat string - withTimeout time.Duration - ) - - cfg.RegisterFlags(flag.CommandLine) - logLevel.RegisterFlags(flag.CommandLine) - flag.DurationVar(&withTimeout, "duration", time.Duration(0), "test duration") - flag.Parse() - - logger := server.NewLoggerFromLevel(logLevel, logFormat) - - ctx := context.Background() - if withTimeout > 0 { - var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, withTimeout) - defer cancel() - level.Debug(logger).Log("msg", "running with duration", "duration", withTimeout.String()) - } - - level.Info(logger).Log("msg", "starting smoke test") - smokeTest, err := smoke.New(logger, cfg) - if err != nil { - level.Error(logger).Log("msg", "error constructing smoke test", "err", err) - os.Exit(1) - } - if err := smokeTest.Run(ctx); err != nil { - level.Error(logger).Log("msg", "smoke test run failure", "err", err) - os.Exit(1) - } -} diff --git a/web/ui/package.json b/web/ui/package.json index 43541fd75b60..8e59d7d2b100 100644 --- a/web/ui/package.json +++ b/web/ui/package.json @@ -21,9 +21,9 @@ "@types/d3-zoom": "^3.0.2", "@types/react": "^18.2.6", "@types/react-dom": "^18.2.14", - "@types/react-syntax-highlighter": "^15.5.6", + "@types/react-syntax-highlighter": "^15.5.11", "eslint": "^8.40.0", - "eslint-config-prettier": "^8.8.0", + "eslint-config-prettier": "^9.1.0", "eslint-config-react-app": "^7.0.1", "eslint-plugin-prettier": "^4.2.1", "eslint-plugin-simple-import-sort": "^10.0.0", diff --git a/web/ui/yarn.lock b/web/ui/yarn.lock index 11ad3cc5f7fe..20ef8714129d 100644 --- a/web/ui/yarn.lock +++ b/web/ui/yarn.lock @@ -2207,10 +2207,10 @@ dependencies: "@types/react" "*" -"@types/react-syntax-highlighter@^15.5.6": - version "15.5.6" - resolved "https://registry.npmjs.org/@types/react-syntax-highlighter/-/react-syntax-highlighter-15.5.6.tgz" - integrity sha512-i7wFuLbIAFlabTeD2I1cLjEOrG/xdMa/rpx2zwzAoGHuXJDhSqp9BSfDlMHSh9JSuNfxHk9eEmMX6D55GiyjGg== +"@types/react-syntax-highlighter@^15.5.11": + version "15.5.11" + resolved "https://registry.yarnpkg.com/@types/react-syntax-highlighter/-/react-syntax-highlighter-15.5.11.tgz#e050745b22eff81fc13cb0c763dd0d063413bbf1" + integrity sha512-ZqIJl+Pg8kD+47kxUjvrlElrraSUrYa4h0dauY/U/FTUuprSCqvUj+9PNQNQzVc6AJgIWUUxn87/gqsMHNbRjw== dependencies: "@types/react" "*" @@ -4418,10 +4418,10 @@ escodegen@^2.0.0: optionalDependencies: source-map "~0.6.1" -eslint-config-prettier@^8.8.0: - version "8.8.0" - resolved "https://registry.npmjs.org/eslint-config-prettier/-/eslint-config-prettier-8.8.0.tgz" - integrity sha512-wLbQiFre3tdGgpDv67NQKnJuTlcUVYHas3k+DZCc2U2BadthoEY4B7hLPvAxaqdyOGCzuLfii2fqGph10va7oA== +eslint-config-prettier@^9.1.0: + version "9.1.0" + resolved "https://registry.yarnpkg.com/eslint-config-prettier/-/eslint-config-prettier-9.1.0.tgz#31af3d94578645966c082fcb71a5846d3c94867f" + integrity sha512-NSWl5BFQWEPi1j4TjVNItzYV7dZXZ+wP6I6ZhrBGpChQhZRUaElihE9uRRkcbRnNb76UMKDF3r+WTmNcGPKsqw== eslint-config-react-app@^7.0.1: version "7.0.1" @@ -4931,10 +4931,9 @@ flatted@^3.1.0: resolved "https://registry.npmjs.org/flatted/-/flatted-3.2.7.tgz" integrity sha512-5nqDSxl8nn5BSNxyR3n4I6eDmbolI6WT+QqR547RwxQapgjQBmtktdP+HTBb/a/zLsbzERTONyUB5pefh5TtjQ== -follow-redirects@^1.0.0: - version "1.15.2" - resolved "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.2.tgz" - integrity sha512-VQLG33o04KaQ8uYi2tVNbdrWp1QWxNNea+nmIB4EVM28v0hmP17z7aG1+wAkNzVq4KeXTq3221ye5qTJP91JwA== + version "1.15.4" + resolved "https://registry.yarnpkg.com/follow-redirects/-/follow-redirects-1.15.4.tgz#cdc7d308bf6493126b17ea2191ea0ccf3e535adf" + integrity sha512-Cr4D/5wlrb0z9dgERpUL3LrmPKVDsETIJhaCMeDfuFYcqa5bldGV6wBsAN6X/vxlXQtFBMrXdXxdL8CbDTGniw== for-each@^0.3.3: version "0.3.3"