diff --git a/.github/workflows/e2e.yaml b/.github/workflows/e2e.yaml
index 08a8535b30..97d1278b43 100644
--- a/.github/workflows/e2e.yaml
+++ b/.github/workflows/e2e.yaml
@@ -25,7 +25,7 @@ jobs:
- name: Install Go
uses: actions/setup-go@v4
with:
- go-version: "1.21.11"
+ go-version: "1.21.12"
cache: true
- run: yarn --frozen-lockfile
- run: make build
@@ -55,12 +55,12 @@ jobs:
- name: Install Go
uses: actions/setup-go@v4
with:
- go-version: "1.21.11"
+ go-version: "1.21.12"
cache: true
- run: yarn --frozen-lockfile
- run: make build
- name: run nginx with /foobar/
- run: docker-compose -f scripts/base-url/docker-compose.yaml up -d
+ run: docker compose -f scripts/base-url/docker-compose.yaml up -d
- name: Cypress run
uses: cypress-io/github-action@v5
with:
diff --git a/.github/workflows/fuzzer.yml b/.github/workflows/fuzzer.yml
index 5fbf6012f5..db08e7eb09 100644
--- a/.github/workflows/fuzzer.yml
+++ b/.github/workflows/fuzzer.yml
@@ -13,6 +13,6 @@ jobs:
- name: Install Go
uses: actions/setup-go@v4
with:
- go-version: 1.21.11
+ go-version: 1.21.12
- name: Run Fuzz_Merge_Single
run: go test -fuzz=Fuzz_Merge_Single --fuzztime 1h -run '^$' -v ./pkg/pprof/
diff --git a/.github/workflows/helm-ci.yml b/.github/workflows/helm-ci.yml
index eb2dc81a99..bb732868b2 100644
--- a/.github/workflows/helm-ci.yml
+++ b/.github/workflows/helm-ci.yml
@@ -6,7 +6,7 @@ jobs:
call-lint:
uses: grafana/helm-charts/.github/workflows/linter.yml@main
with:
- filter_regex_include: .*operations/pyroscope/helm/.*
+ filter_regex_include: operations/pyroscope/helm/pyroscope/templates/.*
call-lint-test:
uses: grafana/helm-charts/.github/workflows/lint-test.yaml@main
@@ -14,4 +14,3 @@ jobs:
ct_configfile: operations/pyroscope/helm/ct.yaml
ct_check_version_increment: false
helm_version: v3.14.3
- kubeVersion: "1.23"
diff --git a/.github/workflows/publish-technical-documentation-next.yml b/.github/workflows/publish-technical-documentation-next.yml
index a6f853c049..25fa4e1fe2 100644
--- a/.github/workflows/publish-technical-documentation-next.yml
+++ b/.github/workflows/publish-technical-documentation-next.yml
@@ -15,7 +15,7 @@ jobs:
uses: "actions/checkout@v3"
- name: "Build website"
run: |
- docker run -v ${PWD}/docs/sources:/hugo/content/docs/phlare/latest -e HUGO_REFLINKSERRORLEVEL=ERROR --rm grafana/docs-base:latest /bin/bash -c 'make hugo'
+ docker run -v ${PWD}/docs/sources:/hugo/content/docs/pyroscope/latest -e HUGO_REFLINKSERRORLEVEL=ERROR --rm grafana/docs-base:latest /bin/bash -c 'make hugo'
sync:
runs-on: "ubuntu-latest"
diff --git a/.github/workflows/publish-technical-documentation-release.yml b/.github/workflows/publish-technical-documentation-release.yml
index e0587cceb6..cd33b11ba8 100644
--- a/.github/workflows/publish-technical-documentation-release.yml
+++ b/.github/workflows/publish-technical-documentation-release.yml
@@ -17,7 +17,7 @@ jobs:
uses: "actions/checkout@v3"
- name: "Build website"
run: |
- docker run -v ${PWD}/docs/sources:/hugo/content/docs/phlare/latest -e HUGO_REFLINKSERRORLEVEL=ERROR --rm grafana/docs-base:latest /bin/bash -c 'make hugo'
+ docker run -v ${PWD}/docs/sources:/hugo/content/docs/pyroscope/latest -e HUGO_REFLINKSERRORLEVEL=ERROR --rm grafana/docs-base:latest /bin/bash -c 'make hugo'
sync:
runs-on: "ubuntu-latest"
diff --git a/.github/workflows/pyrobench.yaml b/.github/workflows/pyrobench.yaml
new file mode 100644
index 0000000000..838327b487
--- /dev/null
+++ b/.github/workflows/pyrobench.yaml
@@ -0,0 +1,16 @@
+on: issue_comment
+
+jobs:
+ pyrobench:
+ name: Run Pyrobench on demand by PR comment
+ if: ${{ (github.event.issue.pull_request) && contains(github.event.comment.body, '@pyrobench') }}
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/setup-go@v5
+ with:
+ go-version: '1.22'
+ - name: Pyrobench
+ uses: grafana/pyrobench@main
+ with:
+ github_context: ${{ toJson(github) }}
+ github_token: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index 8dc13dce0f..888c763ec5 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -28,7 +28,7 @@ jobs:
- run: git fetch --force --tags
- uses: actions/setup-go@v3
with:
- go-version: "1.21.11"
+ go-version: "1.21.12"
cache: true
- uses: actions/setup-node@v3
with:
diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
index 31fda45447..9215438a97 100644
--- a/.github/workflows/test.yml
+++ b/.github/workflows/test.yml
@@ -22,7 +22,7 @@ jobs:
- name: Install Go
uses: actions/setup-go@v4
with:
- go-version: 1.21.11
+ go-version: 1.21.12
- name: Format
run: make fmt check/unstaged-changes
test:
@@ -33,7 +33,7 @@ jobs:
- name: Install Go
uses: actions/setup-go@v4
with:
- go-version: 1.21.11
+ go-version: 1.21.12
- name: Go Mod
run: make check/go/mod
- name: Test
@@ -46,7 +46,7 @@ jobs:
- name: Install Go
uses: actions/setup-go@v4
with:
- go-version: 1.21.11
+ go-version: 1.21.12
- name: Run linter
run: make lint
- name: Check helm manifests
@@ -96,7 +96,7 @@ jobs:
- name: Set up go
uses: actions/setup-go@v2
with:
- go-version: 1.21.11
+ go-version: 1.21.12
- uses: actions/setup-node@v3
with:
node-version: lts/hydrogen
@@ -118,7 +118,7 @@ jobs:
- name: Set up go
uses: actions/setup-go@v2
with:
- go-version: 1.21.11
+ go-version: 1.21.12
- uses: actions/setup-node@v3
with:
node-version: lts/hydrogen
@@ -132,7 +132,7 @@ jobs:
- name: Pyroscope Build & push multi-arch image
id: build-push
run: |
- make docker-image/pyroscope/push "BUILDX_ARGS=--cache-from=type=gha --cache-to=type=gha"
+ make docker-image/pyroscope/push docker-image/pyroscope/push-debug "BUILDX_ARGS=--cache-from=type=gha --cache-to=type=gha"
deploy-dev-001:
if: github.event_name == 'push' && github.repository == 'grafana/pyroscope'
diff --git a/.github/workflows/test_ebpf.yml b/.github/workflows/test_ebpf.yml
index 33e4314b1f..4ec2f5a408 100644
--- a/.github/workflows/test_ebpf.yml
+++ b/.github/workflows/test_ebpf.yml
@@ -27,7 +27,7 @@ jobs:
- name: Install Go
uses: actions/setup-go@v4
with:
- go-version: 1.21.11
+ go-version: 1.21.12
- name: Test
run: sudo make -C ./ebpf go/test/amd64
test_ebpf_qemu:
@@ -82,7 +82,7 @@ jobs:
- name: Install Go
uses: actions/setup-go@v4
with:
- go-version: 1.21.11
+ go-version: 1.21.12
- name: Install qemu
run: sudo apt-get update && sudo apt-get -y install qemu-system-x86 qemu-system-aarch64
- name: Build tests
diff --git a/.github/workflows/weekly-release.yml b/.github/workflows/weekly-release.yml
index 03c3f732ec..3904338833 100644
--- a/.github/workflows/weekly-release.yml
+++ b/.github/workflows/weekly-release.yml
@@ -15,6 +15,8 @@ jobs:
run: echo "GORELEASER_CURRENT_TAG=v0.0.0-$(./tools/image-tag)" >> $GITHUB_ENV
- name: Set WEEKLY_IMAGE_TAG
run: echo "WEEKLY_IMAGE_TAG=$(./tools/image-tag)" >> $GITHUB_ENV
+ - name: Set GORELEASER_STRIP_DEBUG_INFO=false, so binaries are not stripped of debug info
+ run: echo "GORELEASER_STRIP_DEBUG_INFO=false" >> $GITHUB_ENV
# Forces goreleaser to use the correct previous tag for the changelog
- name: Set GORELEASER_PREVIOUS_TAG
run: echo "GORELEASER_PREVIOUS_TAG=$(git tag -l --sort=-version:refname | grep -E '^weekly-.*' | head -n 2 | tail -1)" >> $GITHUB_ENV
@@ -25,7 +27,7 @@ jobs:
git tag "$WEEKLY_IMAGE_TAG"
- uses: actions/setup-go@v3
with:
- go-version: "1.21.11"
+ go-version: "1.21.12"
cache: true
# setup docker buildx
- name: Set up QEMU
diff --git a/.goreleaser.yaml b/.goreleaser.yaml
index 867395cfb9..ae9de709df 100644
--- a/.goreleaser.yaml
+++ b/.goreleaser.yaml
@@ -3,7 +3,10 @@ version: 2
before:
hooks:
# This hook ensures that goreleaser uses the correct go version for a Pyroscope release
- - sh -euc "go version | grep "go version go1.21.11 " || { echo "Unexpected go version"; exit 1; }"
+ - sh -euc 'go version | grep "go version go1.21.12 " || { echo "Unexpected go version"; exit 1; }'
+env:
+ # Strip debug information from the binary by default, weekly builds will have debug information
+ - GORELEASER_DEBUG_INFO_FLAGS={{ if and (index .Env "GORELEASER_STRIP_DEBUG_INFO") (eq .Env.GORELEASER_STRIP_DEBUG_INFO "false") }}{{ else }}-s -w{{ end }}
builds:
- env:
- CGO_ENABLED=0
@@ -15,7 +18,6 @@ builds:
- arm64
- arm
goarm:
- - "6"
- "7"
main: ./cmd/pyroscope
mod_timestamp: "{{ .CommitTimestamp }}"
@@ -26,7 +28,7 @@ builds:
- embedassets
ldflags:
- >
- -extldflags "-static" -s -w
+ -extldflags "-static" {{ .Env.GORELEASER_DEBUG_INFO_FLAGS }}
-X "github.com/grafana/pyroscope/pkg/util/build.Branch={{ .Branch }}"
-X "github.com/grafana/pyroscope/pkg/util/build.Version={{ .Version }}"
-X "github.com/grafana/pyroscope/pkg/util/build.Revision={{ .ShortCommit }}"
@@ -38,7 +40,7 @@ builds:
- netgo
ldflags:
- >
- -extldflags "-static" -s -w
+ -extldflags "-static" {{ .Env.GORELEASER_DEBUG_INFO_FLAGS }}
-X "github.com/grafana/pyroscope/pkg/util/build.Branch={{ .Branch }}"
-X "github.com/grafana/pyroscope/pkg/util/build.Version={{ .Version }}"
-X "github.com/grafana/pyroscope/pkg/util/build.Revision={{ .ShortCommit }}"
@@ -52,7 +54,6 @@ builds:
- arm64
- arm
goarm:
- - "6"
- "7"
ignore:
- goos: windows
@@ -97,22 +98,6 @@ dockers:
- "--label=org.opencontainers.image.title={{.ProjectName}}"
- "--label=org.opencontainers.image.revision={{.FullCommit}}"
- "--label=org.opencontainers.image.version={{.Version}}"
- - use: buildx
- goos: linux
- goarch: arm
- goarm: "6"
- extra_files:
- - cmd/pyroscope/pyroscope.yaml
- dockerfile: ./cmd/pyroscope/Dockerfile
- image_templates:
- - "grafana/{{ .ProjectName }}:{{ .Version }}-armv6"
- - "grafana/{{ .ProjectName }}:latest-armv6"
- build_flag_templates:
- - "--platform=linux/arm/v6"
- - "--label=org.opencontainers.image.created={{.Date}}"
- - "--label=org.opencontainers.image.title={{.ProjectName}}"
- - "--label=org.opencontainers.image.revision={{.FullCommit}}"
- - "--label=org.opencontainers.image.version={{.Version}}"
- use: buildx
goos: linux
goarch: arm
@@ -135,13 +120,11 @@ docker_manifests:
image_templates:
- grafana/{{ .ProjectName }}:{{ .Version }}-amd64
- grafana/{{ .ProjectName }}:{{ .Version }}-arm64v8
- - grafana/{{ .ProjectName }}:{{ .Version }}-armv6
- grafana/{{ .ProjectName }}:{{ .Version }}-armv7
- name_template: grafana/{{ .ProjectName }}:latest
image_templates:
- grafana/{{ .ProjectName }}:latest-amd64
- grafana/{{ .ProjectName }}:latest-arm64v8
- - grafana/{{ .ProjectName }}:latest-armv6
- grafana/{{ .ProjectName }}:latest-armv7
nfpms:
- id: pyroscope
diff --git a/Makefile b/Makefile
index dee6fadb15..dd37c00785 100644
--- a/Makefile
+++ b/Makefile
@@ -77,13 +77,10 @@ build: frontend/build go/bin ## Do a production build (requiring the frontend bu
build-dev: ## Do a dev build (without requiring the frontend)
$(MAKE) EMBEDASSETS="" go/bin
-.PHONY: frontend/build
-frontend/build: frontend/deps ## Do a production build for the frontend
- yarn build
-.PHONY: frontend/deps
-frontend/deps:
- yarn --frozen-lockfile
+.PHONY: frontend/build
+frontend/build:
+ docker build -f cmd/pyroscope/frontend.Dockerfile --output=public/build .
.PHONY: release
release/prereq: $(BIN)/goreleaser ## Ensure release pre requesites are met
@@ -165,7 +162,7 @@ check/go/mod: go/mod
define docker_buildx
- docker buildx build $(1) --platform $(IMAGE_PLATFORM) $(BUILDX_ARGS) --build-arg=revision=$(GIT_REVISION) -t $(IMAGE_PREFIX)$(shell basename $(@D)) -t $(IMAGE_PREFIX)$(shell basename $(@D)):$(IMAGE_TAG) -f cmd/$(shell basename $(@D))/$(2)Dockerfile .
+ docker buildx build $(1) --platform $(IMAGE_PLATFORM) $(BUILDX_ARGS) --build-arg=revision=$(GIT_REVISION) -t $(IMAGE_PREFIX)$(shell basename $(@D)):$(2)latest -t $(IMAGE_PREFIX)$(shell basename $(@D)):$(2)$(IMAGE_TAG) -f cmd/$(shell basename $(@D))/$(2)Dockerfile .
endef
define deploy
@@ -191,6 +188,12 @@ docker-image/pyroscope/build-debug: GOARCH=amd64
docker-image/pyroscope/build-debug: frontend/build go/bin-debug $(BIN)/linux_amd64/dlv
$(call docker_buildx,--load,debug.)
+.PHONY: docker-image/pyroscope/push-debug
+docker-image/pyroscope/push-debug: GOOS=linux
+docker-image/pyroscope/push-debug: GOARCH=amd64
+docker-image/pyroscope/push-debug: frontend/build go/bin-debug $(BIN)/linux_amd64/dlv
+ $(call docker_buildx,--push,debug.)
+
.PHONY: docker-image/pyroscope/build
docker-image/pyroscope/build: GOOS=linux
docker-image/pyroscope/build: GOARCH=amd64
@@ -260,11 +263,11 @@ $(BIN)/buf: Makefile
$(BIN)/golangci-lint: Makefile
@mkdir -p $(@D)
- GOBIN=$(abspath $(@D)) $(GO) install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.58.2
+ GOBIN=$(abspath $(@D)) $(GO) install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.59.1
$(BIN)/protoc-gen-go: Makefile go.mod
@mkdir -p $(@D)
- GOBIN=$(abspath $(@D)) $(GO) install google.golang.org/protobuf/cmd/protoc-gen-go@v1.34.1
+ GOBIN=$(abspath $(@D)) $(GO) install google.golang.org/protobuf/cmd/protoc-gen-go@v1.34.2
$(BIN)/protoc-gen-connect-go: Makefile go.mod
@mkdir -p $(@D)
@@ -327,7 +330,7 @@ $(BIN)/gotestsum: Makefile go.mod
@mkdir -p $(@D)
GOBIN=$(abspath $(@D)) $(GO) install gotest.tools/gotestsum@v1.9.0
-DLV_VERSION=v1.21.0
+DLV_VERSION=v1.23.0
$(BIN)/dlv: Makefile go.mod
@mkdir -p $(@D)
@@ -357,7 +360,7 @@ helm/lint: $(BIN)/helm
$(BIN)/helm lint ./operations/pyroscope/helm/pyroscope/
helm/docs: $(BIN)/helm
- docker run --rm --volume "$(CURDIR)/operations/pyroscope/helm:/helm-docs" -u "$(shell id -u)" jnorwood/helm-docs:v1.13.1
+ docker run --rm --volume "$(CURDIR)/operations/pyroscope/helm:/helm-docs" -u "$(shell id -u)" jnorwood/helm-docs:v1.8.1
.PHONY: goreleaser/lint
goreleaser/lint: $(BIN)/goreleaser
diff --git a/README.md b/README.md
index dbf4e458dc..e5dd6f0be1 100644
--- a/README.md
+++ b/README.md
@@ -20,7 +20,7 @@ Grafana Pyroscope is an open source continuous profiling platform. It will help
* Understand the call tree of your application
* Auto-instrument your code to link profiling data to traces
-## 🔥 [Pyroscope Live Demo](https://demo.pyroscope.io/) 🔥
+## 🔥 [Pyroscope Live Demo](https://play.grafana.org/a/grafana-pyroscope-app/)
[![Pyroscope GIF Demo](https://user-images.githubusercontent.com/23323466/143324845-16ff72df-231e-412d-bd0a-38ef2e09cba8.gif)](https://demo.pyroscope.io/)
@@ -116,20 +116,15 @@ For more information on how to use Pyroscope with other programming languages, i
You can download the latest version of pyroscope for macOS, linux and Docker from our [Releases page](https://github.com/grafana/pyroscope/releases).
-## Supported Languages
+## [Supported Languages][supported languages]
-* [x] Go (via `pprof`)
-* [x] Python (via `py-spy`)
-* [x] Ruby (via `rbspy`)
-* [x] Linux eBPF
-* [x] Java (via `async-profiler`)
-* [x] Rust (via `pprof-rs`)
-* [x] .NET
-* [x] PHP (via `phpspy`)
-* [x] Node
+Our documentation contains the most recent list of [supported languages] and also an overview over what [profiling types are supported per language][profile-types-languages].
Let us know what other integrations you want to see in [our issues](https://github.com/grafana/pyroscope/issues?q=is%3Aissue+is%3Aopen+label%3Anew-profilers) or in [our slack](https://slack.grafana.com).
+[supported languages]: https://grafana.com/docs/pyroscope/latest/configure-client/
+[profile-types-languages]: https://grafana.com/docs/pyroscope/latest/view-and-analyze-profile-data/profiling-types/#available-profiling-types
+
## Credits
Pyroscope is possible thanks to the excellent work of many people, including but not limited to:
diff --git a/api/gen/proto/go/adhocprofiles/v1/adhocprofiles.pb.go b/api/gen/proto/go/adhocprofiles/v1/adhocprofiles.pb.go
index 3a581b3aee..b92ebcf4eb 100644
--- a/api/gen/proto/go/adhocprofiles/v1/adhocprofiles.pb.go
+++ b/api/gen/proto/go/adhocprofiles/v1/adhocprofiles.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.34.1
+// protoc-gen-go v1.34.2
// protoc (unknown)
// source: adhocprofiles/v1/adhocprofiles.proto
@@ -495,7 +495,7 @@ func file_adhocprofiles_v1_adhocprofiles_proto_rawDescGZIP() []byte {
}
var file_adhocprofiles_v1_adhocprofiles_proto_msgTypes = make([]protoimpl.MessageInfo, 6)
-var file_adhocprofiles_v1_adhocprofiles_proto_goTypes = []interface{}{
+var file_adhocprofiles_v1_adhocprofiles_proto_goTypes = []any{
(*AdHocProfilesUploadRequest)(nil), // 0: adhocprofiles.v1.AdHocProfilesUploadRequest
(*AdHocProfilesGetRequest)(nil), // 1: adhocprofiles.v1.AdHocProfilesGetRequest
(*AdHocProfilesGetResponse)(nil), // 2: adhocprofiles.v1.AdHocProfilesGetResponse
@@ -524,7 +524,7 @@ func file_adhocprofiles_v1_adhocprofiles_proto_init() {
return
}
if !protoimpl.UnsafeEnabled {
- file_adhocprofiles_v1_adhocprofiles_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ file_adhocprofiles_v1_adhocprofiles_proto_msgTypes[0].Exporter = func(v any, i int) any {
switch v := v.(*AdHocProfilesUploadRequest); i {
case 0:
return &v.state
@@ -536,7 +536,7 @@ func file_adhocprofiles_v1_adhocprofiles_proto_init() {
return nil
}
}
- file_adhocprofiles_v1_adhocprofiles_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ file_adhocprofiles_v1_adhocprofiles_proto_msgTypes[1].Exporter = func(v any, i int) any {
switch v := v.(*AdHocProfilesGetRequest); i {
case 0:
return &v.state
@@ -548,7 +548,7 @@ func file_adhocprofiles_v1_adhocprofiles_proto_init() {
return nil
}
}
- file_adhocprofiles_v1_adhocprofiles_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ file_adhocprofiles_v1_adhocprofiles_proto_msgTypes[2].Exporter = func(v any, i int) any {
switch v := v.(*AdHocProfilesGetResponse); i {
case 0:
return &v.state
@@ -560,7 +560,7 @@ func file_adhocprofiles_v1_adhocprofiles_proto_init() {
return nil
}
}
- file_adhocprofiles_v1_adhocprofiles_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ file_adhocprofiles_v1_adhocprofiles_proto_msgTypes[3].Exporter = func(v any, i int) any {
switch v := v.(*AdHocProfilesListRequest); i {
case 0:
return &v.state
@@ -572,7 +572,7 @@ func file_adhocprofiles_v1_adhocprofiles_proto_init() {
return nil
}
}
- file_adhocprofiles_v1_adhocprofiles_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ file_adhocprofiles_v1_adhocprofiles_proto_msgTypes[4].Exporter = func(v any, i int) any {
switch v := v.(*AdHocProfilesListResponse); i {
case 0:
return &v.state
@@ -584,7 +584,7 @@ func file_adhocprofiles_v1_adhocprofiles_proto_init() {
return nil
}
}
- file_adhocprofiles_v1_adhocprofiles_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ file_adhocprofiles_v1_adhocprofiles_proto_msgTypes[5].Exporter = func(v any, i int) any {
switch v := v.(*AdHocProfilesProfileMetadata); i {
case 0:
return &v.state
@@ -597,8 +597,8 @@ func file_adhocprofiles_v1_adhocprofiles_proto_init() {
}
}
}
- file_adhocprofiles_v1_adhocprofiles_proto_msgTypes[0].OneofWrappers = []interface{}{}
- file_adhocprofiles_v1_adhocprofiles_proto_msgTypes[1].OneofWrappers = []interface{}{}
+ file_adhocprofiles_v1_adhocprofiles_proto_msgTypes[0].OneofWrappers = []any{}
+ file_adhocprofiles_v1_adhocprofiles_proto_msgTypes[1].OneofWrappers = []any{}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
diff --git a/api/gen/proto/go/google/v1/profile.pb.go b/api/gen/proto/go/google/v1/profile.pb.go
index 4df2323579..9d2a74865e 100644
--- a/api/gen/proto/go/google/v1/profile.pb.go
+++ b/api/gen/proto/go/google/v1/profile.pb.go
@@ -38,7 +38,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.34.1
+// protoc-gen-go v1.34.2
// protoc (unknown)
// source: google/v1/profile.proto
@@ -954,7 +954,7 @@ func file_google_v1_profile_proto_rawDescGZIP() []byte {
}
var file_google_v1_profile_proto_msgTypes = make([]protoimpl.MessageInfo, 8)
-var file_google_v1_profile_proto_goTypes = []interface{}{
+var file_google_v1_profile_proto_goTypes = []any{
(*Profile)(nil), // 0: google.v1.Profile
(*ValueType)(nil), // 1: google.v1.ValueType
(*Sample)(nil), // 2: google.v1.Sample
@@ -986,7 +986,7 @@ func file_google_v1_profile_proto_init() {
return
}
if !protoimpl.UnsafeEnabled {
- file_google_v1_profile_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ file_google_v1_profile_proto_msgTypes[0].Exporter = func(v any, i int) any {
switch v := v.(*Profile); i {
case 0:
return &v.state
@@ -998,7 +998,7 @@ func file_google_v1_profile_proto_init() {
return nil
}
}
- file_google_v1_profile_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ file_google_v1_profile_proto_msgTypes[1].Exporter = func(v any, i int) any {
switch v := v.(*ValueType); i {
case 0:
return &v.state
@@ -1010,7 +1010,7 @@ func file_google_v1_profile_proto_init() {
return nil
}
}
- file_google_v1_profile_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ file_google_v1_profile_proto_msgTypes[2].Exporter = func(v any, i int) any {
switch v := v.(*Sample); i {
case 0:
return &v.state
@@ -1022,7 +1022,7 @@ func file_google_v1_profile_proto_init() {
return nil
}
}
- file_google_v1_profile_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ file_google_v1_profile_proto_msgTypes[3].Exporter = func(v any, i int) any {
switch v := v.(*Label); i {
case 0:
return &v.state
@@ -1034,7 +1034,7 @@ func file_google_v1_profile_proto_init() {
return nil
}
}
- file_google_v1_profile_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ file_google_v1_profile_proto_msgTypes[4].Exporter = func(v any, i int) any {
switch v := v.(*Mapping); i {
case 0:
return &v.state
@@ -1046,7 +1046,7 @@ func file_google_v1_profile_proto_init() {
return nil
}
}
- file_google_v1_profile_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ file_google_v1_profile_proto_msgTypes[5].Exporter = func(v any, i int) any {
switch v := v.(*Location); i {
case 0:
return &v.state
@@ -1058,7 +1058,7 @@ func file_google_v1_profile_proto_init() {
return nil
}
}
- file_google_v1_profile_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ file_google_v1_profile_proto_msgTypes[6].Exporter = func(v any, i int) any {
switch v := v.(*Line); i {
case 0:
return &v.state
@@ -1070,7 +1070,7 @@ func file_google_v1_profile_proto_init() {
return nil
}
}
- file_google_v1_profile_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ file_google_v1_profile_proto_msgTypes[7].Exporter = func(v any, i int) any {
switch v := v.(*Function); i {
case 0:
return &v.state
diff --git a/api/gen/proto/go/ingester/v1/ingester.pb.go b/api/gen/proto/go/ingester/v1/ingester.pb.go
index 3b679ca791..a805190c13 100644
--- a/api/gen/proto/go/ingester/v1/ingester.pb.go
+++ b/api/gen/proto/go/ingester/v1/ingester.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.34.1
+// protoc-gen-go v1.34.2
// protoc (unknown)
// source: ingester/v1/ingester.proto
@@ -2200,7 +2200,7 @@ func file_ingester_v1_ingester_proto_rawDescGZIP() []byte {
var file_ingester_v1_ingester_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
var file_ingester_v1_ingester_proto_msgTypes = make([]protoimpl.MessageInfo, 29)
-var file_ingester_v1_ingester_proto_goTypes = []interface{}{
+var file_ingester_v1_ingester_proto_goTypes = []any{
(StacktracesMergeFormat)(0), // 0: ingester.v1.StacktracesMergeFormat
(*ProfileTypesRequest)(nil), // 1: ingester.v1.ProfileTypesRequest
(*ProfileTypesResponse)(nil), // 2: ingester.v1.ProfileTypesResponse
@@ -2317,7 +2317,7 @@ func file_ingester_v1_ingester_proto_init() {
return
}
if !protoimpl.UnsafeEnabled {
- file_ingester_v1_ingester_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ file_ingester_v1_ingester_proto_msgTypes[0].Exporter = func(v any, i int) any {
switch v := v.(*ProfileTypesRequest); i {
case 0:
return &v.state
@@ -2329,7 +2329,7 @@ func file_ingester_v1_ingester_proto_init() {
return nil
}
}
- file_ingester_v1_ingester_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ file_ingester_v1_ingester_proto_msgTypes[1].Exporter = func(v any, i int) any {
switch v := v.(*ProfileTypesResponse); i {
case 0:
return &v.state
@@ -2341,7 +2341,7 @@ func file_ingester_v1_ingester_proto_init() {
return nil
}
}
- file_ingester_v1_ingester_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ file_ingester_v1_ingester_proto_msgTypes[2].Exporter = func(v any, i int) any {
switch v := v.(*SeriesRequest); i {
case 0:
return &v.state
@@ -2353,7 +2353,7 @@ func file_ingester_v1_ingester_proto_init() {
return nil
}
}
- file_ingester_v1_ingester_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ file_ingester_v1_ingester_proto_msgTypes[3].Exporter = func(v any, i int) any {
switch v := v.(*SeriesResponse); i {
case 0:
return &v.state
@@ -2365,7 +2365,7 @@ func file_ingester_v1_ingester_proto_init() {
return nil
}
}
- file_ingester_v1_ingester_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ file_ingester_v1_ingester_proto_msgTypes[4].Exporter = func(v any, i int) any {
switch v := v.(*FlushRequest); i {
case 0:
return &v.state
@@ -2377,7 +2377,7 @@ func file_ingester_v1_ingester_proto_init() {
return nil
}
}
- file_ingester_v1_ingester_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ file_ingester_v1_ingester_proto_msgTypes[5].Exporter = func(v any, i int) any {
switch v := v.(*FlushResponse); i {
case 0:
return &v.state
@@ -2389,7 +2389,7 @@ func file_ingester_v1_ingester_proto_init() {
return nil
}
}
- file_ingester_v1_ingester_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ file_ingester_v1_ingester_proto_msgTypes[6].Exporter = func(v any, i int) any {
switch v := v.(*SelectProfilesRequest); i {
case 0:
return &v.state
@@ -2401,7 +2401,7 @@ func file_ingester_v1_ingester_proto_init() {
return nil
}
}
- file_ingester_v1_ingester_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ file_ingester_v1_ingester_proto_msgTypes[7].Exporter = func(v any, i int) any {
switch v := v.(*MergeProfilesStacktracesRequest); i {
case 0:
return &v.state
@@ -2413,7 +2413,7 @@ func file_ingester_v1_ingester_proto_init() {
return nil
}
}
- file_ingester_v1_ingester_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ file_ingester_v1_ingester_proto_msgTypes[8].Exporter = func(v any, i int) any {
switch v := v.(*MergeProfilesStacktracesResult); i {
case 0:
return &v.state
@@ -2425,7 +2425,7 @@ func file_ingester_v1_ingester_proto_init() {
return nil
}
}
- file_ingester_v1_ingester_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ file_ingester_v1_ingester_proto_msgTypes[9].Exporter = func(v any, i int) any {
switch v := v.(*MergeProfilesStacktracesResponse); i {
case 0:
return &v.state
@@ -2437,7 +2437,7 @@ func file_ingester_v1_ingester_proto_init() {
return nil
}
}
- file_ingester_v1_ingester_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ file_ingester_v1_ingester_proto_msgTypes[10].Exporter = func(v any, i int) any {
switch v := v.(*SelectSpanProfileRequest); i {
case 0:
return &v.state
@@ -2449,7 +2449,7 @@ func file_ingester_v1_ingester_proto_init() {
return nil
}
}
- file_ingester_v1_ingester_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+ file_ingester_v1_ingester_proto_msgTypes[11].Exporter = func(v any, i int) any {
switch v := v.(*MergeSpanProfileRequest); i {
case 0:
return &v.state
@@ -2461,7 +2461,7 @@ func file_ingester_v1_ingester_proto_init() {
return nil
}
}
- file_ingester_v1_ingester_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
+ file_ingester_v1_ingester_proto_msgTypes[12].Exporter = func(v any, i int) any {
switch v := v.(*MergeSpanProfileResponse); i {
case 0:
return &v.state
@@ -2473,7 +2473,7 @@ func file_ingester_v1_ingester_proto_init() {
return nil
}
}
- file_ingester_v1_ingester_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
+ file_ingester_v1_ingester_proto_msgTypes[13].Exporter = func(v any, i int) any {
switch v := v.(*MergeSpanProfileResult); i {
case 0:
return &v.state
@@ -2485,7 +2485,7 @@ func file_ingester_v1_ingester_proto_init() {
return nil
}
}
- file_ingester_v1_ingester_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
+ file_ingester_v1_ingester_proto_msgTypes[14].Exporter = func(v any, i int) any {
switch v := v.(*ProfileSets); i {
case 0:
return &v.state
@@ -2497,7 +2497,7 @@ func file_ingester_v1_ingester_proto_init() {
return nil
}
}
- file_ingester_v1_ingester_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
+ file_ingester_v1_ingester_proto_msgTypes[15].Exporter = func(v any, i int) any {
switch v := v.(*SeriesProfile); i {
case 0:
return &v.state
@@ -2509,7 +2509,7 @@ func file_ingester_v1_ingester_proto_init() {
return nil
}
}
- file_ingester_v1_ingester_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} {
+ file_ingester_v1_ingester_proto_msgTypes[16].Exporter = func(v any, i int) any {
switch v := v.(*Profile); i {
case 0:
return &v.state
@@ -2521,7 +2521,7 @@ func file_ingester_v1_ingester_proto_init() {
return nil
}
}
- file_ingester_v1_ingester_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} {
+ file_ingester_v1_ingester_proto_msgTypes[17].Exporter = func(v any, i int) any {
switch v := v.(*StacktraceSample); i {
case 0:
return &v.state
@@ -2533,7 +2533,7 @@ func file_ingester_v1_ingester_proto_init() {
return nil
}
}
- file_ingester_v1_ingester_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
+ file_ingester_v1_ingester_proto_msgTypes[18].Exporter = func(v any, i int) any {
switch v := v.(*MergeProfilesLabelsRequest); i {
case 0:
return &v.state
@@ -2545,7 +2545,7 @@ func file_ingester_v1_ingester_proto_init() {
return nil
}
}
- file_ingester_v1_ingester_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} {
+ file_ingester_v1_ingester_proto_msgTypes[19].Exporter = func(v any, i int) any {
switch v := v.(*MergeProfilesLabelsResponse); i {
case 0:
return &v.state
@@ -2557,7 +2557,7 @@ func file_ingester_v1_ingester_proto_init() {
return nil
}
}
- file_ingester_v1_ingester_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} {
+ file_ingester_v1_ingester_proto_msgTypes[20].Exporter = func(v any, i int) any {
switch v := v.(*MergeProfilesPprofRequest); i {
case 0:
return &v.state
@@ -2569,7 +2569,7 @@ func file_ingester_v1_ingester_proto_init() {
return nil
}
}
- file_ingester_v1_ingester_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} {
+ file_ingester_v1_ingester_proto_msgTypes[21].Exporter = func(v any, i int) any {
switch v := v.(*MergeProfilesPprofResponse); i {
case 0:
return &v.state
@@ -2581,7 +2581,7 @@ func file_ingester_v1_ingester_proto_init() {
return nil
}
}
- file_ingester_v1_ingester_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} {
+ file_ingester_v1_ingester_proto_msgTypes[22].Exporter = func(v any, i int) any {
switch v := v.(*BlockMetadataRequest); i {
case 0:
return &v.state
@@ -2593,7 +2593,7 @@ func file_ingester_v1_ingester_proto_init() {
return nil
}
}
- file_ingester_v1_ingester_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} {
+ file_ingester_v1_ingester_proto_msgTypes[23].Exporter = func(v any, i int) any {
switch v := v.(*BlockMetadataResponse); i {
case 0:
return &v.state
@@ -2605,7 +2605,7 @@ func file_ingester_v1_ingester_proto_init() {
return nil
}
}
- file_ingester_v1_ingester_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} {
+ file_ingester_v1_ingester_proto_msgTypes[24].Exporter = func(v any, i int) any {
switch v := v.(*Hints); i {
case 0:
return &v.state
@@ -2617,7 +2617,7 @@ func file_ingester_v1_ingester_proto_init() {
return nil
}
}
- file_ingester_v1_ingester_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} {
+ file_ingester_v1_ingester_proto_msgTypes[25].Exporter = func(v any, i int) any {
switch v := v.(*BlockHints); i {
case 0:
return &v.state
@@ -2629,7 +2629,7 @@ func file_ingester_v1_ingester_proto_init() {
return nil
}
}
- file_ingester_v1_ingester_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} {
+ file_ingester_v1_ingester_proto_msgTypes[26].Exporter = func(v any, i int) any {
switch v := v.(*GetBlockStatsRequest); i {
case 0:
return &v.state
@@ -2641,7 +2641,7 @@ func file_ingester_v1_ingester_proto_init() {
return nil
}
}
- file_ingester_v1_ingester_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} {
+ file_ingester_v1_ingester_proto_msgTypes[27].Exporter = func(v any, i int) any {
switch v := v.(*GetBlockStatsResponse); i {
case 0:
return &v.state
@@ -2653,7 +2653,7 @@ func file_ingester_v1_ingester_proto_init() {
return nil
}
}
- file_ingester_v1_ingester_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} {
+ file_ingester_v1_ingester_proto_msgTypes[28].Exporter = func(v any, i int) any {
switch v := v.(*BlockStats); i {
case 0:
return &v.state
@@ -2666,12 +2666,12 @@ func file_ingester_v1_ingester_proto_init() {
}
}
}
- file_ingester_v1_ingester_proto_msgTypes[6].OneofWrappers = []interface{}{}
- file_ingester_v1_ingester_proto_msgTypes[7].OneofWrappers = []interface{}{}
- file_ingester_v1_ingester_proto_msgTypes[10].OneofWrappers = []interface{}{}
- file_ingester_v1_ingester_proto_msgTypes[11].OneofWrappers = []interface{}{}
- file_ingester_v1_ingester_proto_msgTypes[18].OneofWrappers = []interface{}{}
- file_ingester_v1_ingester_proto_msgTypes[20].OneofWrappers = []interface{}{}
+ file_ingester_v1_ingester_proto_msgTypes[6].OneofWrappers = []any{}
+ file_ingester_v1_ingester_proto_msgTypes[7].OneofWrappers = []any{}
+ file_ingester_v1_ingester_proto_msgTypes[10].OneofWrappers = []any{}
+ file_ingester_v1_ingester_proto_msgTypes[11].OneofWrappers = []any{}
+ file_ingester_v1_ingester_proto_msgTypes[18].OneofWrappers = []any{}
+ file_ingester_v1_ingester_proto_msgTypes[20].OneofWrappers = []any{}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
diff --git a/api/gen/proto/go/push/v1/push.pb.go b/api/gen/proto/go/push/v1/push.pb.go
index 997e17464a..17c139b332 100644
--- a/api/gen/proto/go/push/v1/push.pb.go
+++ b/api/gen/proto/go/push/v1/push.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.34.1
+// protoc-gen-go v1.34.2
// protoc (unknown)
// source: push/v1/push.proto
@@ -276,7 +276,7 @@ func file_push_v1_push_proto_rawDescGZIP() []byte {
}
var file_push_v1_push_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
-var file_push_v1_push_proto_goTypes = []interface{}{
+var file_push_v1_push_proto_goTypes = []any{
(*PushResponse)(nil), // 0: push.v1.PushResponse
(*PushRequest)(nil), // 1: push.v1.PushRequest
(*RawProfileSeries)(nil), // 2: push.v1.RawProfileSeries
@@ -302,7 +302,7 @@ func file_push_v1_push_proto_init() {
return
}
if !protoimpl.UnsafeEnabled {
- file_push_v1_push_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ file_push_v1_push_proto_msgTypes[0].Exporter = func(v any, i int) any {
switch v := v.(*PushResponse); i {
case 0:
return &v.state
@@ -314,7 +314,7 @@ func file_push_v1_push_proto_init() {
return nil
}
}
- file_push_v1_push_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ file_push_v1_push_proto_msgTypes[1].Exporter = func(v any, i int) any {
switch v := v.(*PushRequest); i {
case 0:
return &v.state
@@ -326,7 +326,7 @@ func file_push_v1_push_proto_init() {
return nil
}
}
- file_push_v1_push_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ file_push_v1_push_proto_msgTypes[2].Exporter = func(v any, i int) any {
switch v := v.(*RawProfileSeries); i {
case 0:
return &v.state
@@ -338,7 +338,7 @@ func file_push_v1_push_proto_init() {
return nil
}
}
- file_push_v1_push_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ file_push_v1_push_proto_msgTypes[3].Exporter = func(v any, i int) any {
switch v := v.(*RawSample); i {
case 0:
return &v.state
diff --git a/api/gen/proto/go/querier/v1/querier.pb.go b/api/gen/proto/go/querier/v1/querier.pb.go
index 3cfa0a2ec6..077f4b8868 100644
--- a/api/gen/proto/go/querier/v1/querier.pb.go
+++ b/api/gen/proto/go/querier/v1/querier.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.34.1
+// protoc-gen-go v1.34.2
// protoc (unknown)
// source: querier/v1/querier.proto
@@ -1748,7 +1748,7 @@ func file_querier_v1_querier_proto_rawDescGZIP() []byte {
var file_querier_v1_querier_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
var file_querier_v1_querier_proto_msgTypes = make([]protoimpl.MessageInfo, 20)
-var file_querier_v1_querier_proto_goTypes = []interface{}{
+var file_querier_v1_querier_proto_goTypes = []any{
(ProfileFormat)(0), // 0: querier.v1.ProfileFormat
(*ProfileTypesRequest)(nil), // 1: querier.v1.ProfileTypesRequest
(*ProfileTypesResponse)(nil), // 2: querier.v1.ProfileTypesResponse
@@ -1836,7 +1836,7 @@ func file_querier_v1_querier_proto_init() {
return
}
if !protoimpl.UnsafeEnabled {
- file_querier_v1_querier_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ file_querier_v1_querier_proto_msgTypes[0].Exporter = func(v any, i int) any {
switch v := v.(*ProfileTypesRequest); i {
case 0:
return &v.state
@@ -1848,7 +1848,7 @@ func file_querier_v1_querier_proto_init() {
return nil
}
}
- file_querier_v1_querier_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ file_querier_v1_querier_proto_msgTypes[1].Exporter = func(v any, i int) any {
switch v := v.(*ProfileTypesResponse); i {
case 0:
return &v.state
@@ -1860,7 +1860,7 @@ func file_querier_v1_querier_proto_init() {
return nil
}
}
- file_querier_v1_querier_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ file_querier_v1_querier_proto_msgTypes[2].Exporter = func(v any, i int) any {
switch v := v.(*SeriesRequest); i {
case 0:
return &v.state
@@ -1872,7 +1872,7 @@ func file_querier_v1_querier_proto_init() {
return nil
}
}
- file_querier_v1_querier_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ file_querier_v1_querier_proto_msgTypes[3].Exporter = func(v any, i int) any {
switch v := v.(*SeriesResponse); i {
case 0:
return &v.state
@@ -1884,7 +1884,7 @@ func file_querier_v1_querier_proto_init() {
return nil
}
}
- file_querier_v1_querier_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ file_querier_v1_querier_proto_msgTypes[4].Exporter = func(v any, i int) any {
switch v := v.(*SelectMergeStacktracesRequest); i {
case 0:
return &v.state
@@ -1896,7 +1896,7 @@ func file_querier_v1_querier_proto_init() {
return nil
}
}
- file_querier_v1_querier_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ file_querier_v1_querier_proto_msgTypes[5].Exporter = func(v any, i int) any {
switch v := v.(*SelectMergeStacktracesResponse); i {
case 0:
return &v.state
@@ -1908,7 +1908,7 @@ func file_querier_v1_querier_proto_init() {
return nil
}
}
- file_querier_v1_querier_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ file_querier_v1_querier_proto_msgTypes[6].Exporter = func(v any, i int) any {
switch v := v.(*SelectMergeSpanProfileRequest); i {
case 0:
return &v.state
@@ -1920,7 +1920,7 @@ func file_querier_v1_querier_proto_init() {
return nil
}
}
- file_querier_v1_querier_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ file_querier_v1_querier_proto_msgTypes[7].Exporter = func(v any, i int) any {
switch v := v.(*SelectMergeSpanProfileResponse); i {
case 0:
return &v.state
@@ -1932,7 +1932,7 @@ func file_querier_v1_querier_proto_init() {
return nil
}
}
- file_querier_v1_querier_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ file_querier_v1_querier_proto_msgTypes[8].Exporter = func(v any, i int) any {
switch v := v.(*DiffRequest); i {
case 0:
return &v.state
@@ -1944,7 +1944,7 @@ func file_querier_v1_querier_proto_init() {
return nil
}
}
- file_querier_v1_querier_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ file_querier_v1_querier_proto_msgTypes[9].Exporter = func(v any, i int) any {
switch v := v.(*DiffResponse); i {
case 0:
return &v.state
@@ -1956,7 +1956,7 @@ func file_querier_v1_querier_proto_init() {
return nil
}
}
- file_querier_v1_querier_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ file_querier_v1_querier_proto_msgTypes[10].Exporter = func(v any, i int) any {
switch v := v.(*FlameGraph); i {
case 0:
return &v.state
@@ -1968,7 +1968,7 @@ func file_querier_v1_querier_proto_init() {
return nil
}
}
- file_querier_v1_querier_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+ file_querier_v1_querier_proto_msgTypes[11].Exporter = func(v any, i int) any {
switch v := v.(*FlameGraphDiff); i {
case 0:
return &v.state
@@ -1980,7 +1980,7 @@ func file_querier_v1_querier_proto_init() {
return nil
}
}
- file_querier_v1_querier_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
+ file_querier_v1_querier_proto_msgTypes[12].Exporter = func(v any, i int) any {
switch v := v.(*Level); i {
case 0:
return &v.state
@@ -1992,7 +1992,7 @@ func file_querier_v1_querier_proto_init() {
return nil
}
}
- file_querier_v1_querier_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
+ file_querier_v1_querier_proto_msgTypes[13].Exporter = func(v any, i int) any {
switch v := v.(*SelectMergeProfileRequest); i {
case 0:
return &v.state
@@ -2004,7 +2004,7 @@ func file_querier_v1_querier_proto_init() {
return nil
}
}
- file_querier_v1_querier_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
+ file_querier_v1_querier_proto_msgTypes[14].Exporter = func(v any, i int) any {
switch v := v.(*SelectSeriesRequest); i {
case 0:
return &v.state
@@ -2016,7 +2016,7 @@ func file_querier_v1_querier_proto_init() {
return nil
}
}
- file_querier_v1_querier_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
+ file_querier_v1_querier_proto_msgTypes[15].Exporter = func(v any, i int) any {
switch v := v.(*SelectSeriesResponse); i {
case 0:
return &v.state
@@ -2028,7 +2028,7 @@ func file_querier_v1_querier_proto_init() {
return nil
}
}
- file_querier_v1_querier_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} {
+ file_querier_v1_querier_proto_msgTypes[16].Exporter = func(v any, i int) any {
switch v := v.(*AnalyzeQueryRequest); i {
case 0:
return &v.state
@@ -2040,7 +2040,7 @@ func file_querier_v1_querier_proto_init() {
return nil
}
}
- file_querier_v1_querier_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} {
+ file_querier_v1_querier_proto_msgTypes[17].Exporter = func(v any, i int) any {
switch v := v.(*AnalyzeQueryResponse); i {
case 0:
return &v.state
@@ -2052,7 +2052,7 @@ func file_querier_v1_querier_proto_init() {
return nil
}
}
- file_querier_v1_querier_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
+ file_querier_v1_querier_proto_msgTypes[18].Exporter = func(v any, i int) any {
switch v := v.(*QueryScope); i {
case 0:
return &v.state
@@ -2064,7 +2064,7 @@ func file_querier_v1_querier_proto_init() {
return nil
}
}
- file_querier_v1_querier_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} {
+ file_querier_v1_querier_proto_msgTypes[19].Exporter = func(v any, i int) any {
switch v := v.(*QueryImpact); i {
case 0:
return &v.state
@@ -2077,10 +2077,10 @@ func file_querier_v1_querier_proto_init() {
}
}
}
- file_querier_v1_querier_proto_msgTypes[4].OneofWrappers = []interface{}{}
- file_querier_v1_querier_proto_msgTypes[6].OneofWrappers = []interface{}{}
- file_querier_v1_querier_proto_msgTypes[13].OneofWrappers = []interface{}{}
- file_querier_v1_querier_proto_msgTypes[14].OneofWrappers = []interface{}{}
+ file_querier_v1_querier_proto_msgTypes[4].OneofWrappers = []any{}
+ file_querier_v1_querier_proto_msgTypes[6].OneofWrappers = []any{}
+ file_querier_v1_querier_proto_msgTypes[13].OneofWrappers = []any{}
+ file_querier_v1_querier_proto_msgTypes[14].OneofWrappers = []any{}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
diff --git a/api/gen/proto/go/settings/v1/setting.pb.go b/api/gen/proto/go/settings/v1/setting.pb.go
index 4d35a886e5..292e87e6d1 100644
--- a/api/gen/proto/go/settings/v1/setting.pb.go
+++ b/api/gen/proto/go/settings/v1/setting.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.34.1
+// protoc-gen-go v1.34.2
// protoc (unknown)
// source: settings/v1/setting.proto
@@ -1175,7 +1175,7 @@ func file_settings_v1_setting_proto_rawDescGZIP() []byte {
var file_settings_v1_setting_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
var file_settings_v1_setting_proto_msgTypes = make([]protoimpl.MessageInfo, 14)
-var file_settings_v1_setting_proto_goTypes = []interface{}{
+var file_settings_v1_setting_proto_goTypes = []any{
(CollectionRuleAction)(0), // 0: settings.v1.CollectionRuleAction
(Status)(0), // 1: settings.v1.Status
(*GetSettingsRequest)(nil), // 2: settings.v1.GetSettingsRequest
@@ -1227,7 +1227,7 @@ func file_settings_v1_setting_proto_init() {
return
}
if !protoimpl.UnsafeEnabled {
- file_settings_v1_setting_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ file_settings_v1_setting_proto_msgTypes[0].Exporter = func(v any, i int) any {
switch v := v.(*GetSettingsRequest); i {
case 0:
return &v.state
@@ -1239,7 +1239,7 @@ func file_settings_v1_setting_proto_init() {
return nil
}
}
- file_settings_v1_setting_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ file_settings_v1_setting_proto_msgTypes[1].Exporter = func(v any, i int) any {
switch v := v.(*GetSettingsResponse); i {
case 0:
return &v.state
@@ -1251,7 +1251,7 @@ func file_settings_v1_setting_proto_init() {
return nil
}
}
- file_settings_v1_setting_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ file_settings_v1_setting_proto_msgTypes[2].Exporter = func(v any, i int) any {
switch v := v.(*SetSettingsRequest); i {
case 0:
return &v.state
@@ -1263,7 +1263,7 @@ func file_settings_v1_setting_proto_init() {
return nil
}
}
- file_settings_v1_setting_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ file_settings_v1_setting_proto_msgTypes[3].Exporter = func(v any, i int) any {
switch v := v.(*SetSettingsResponse); i {
case 0:
return &v.state
@@ -1275,7 +1275,7 @@ func file_settings_v1_setting_proto_init() {
return nil
}
}
- file_settings_v1_setting_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ file_settings_v1_setting_proto_msgTypes[4].Exporter = func(v any, i int) any {
switch v := v.(*Setting); i {
case 0:
return &v.state
@@ -1287,7 +1287,7 @@ func file_settings_v1_setting_proto_init() {
return nil
}
}
- file_settings_v1_setting_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ file_settings_v1_setting_proto_msgTypes[5].Exporter = func(v any, i int) any {
switch v := v.(*CollectionRule); i {
case 0:
return &v.state
@@ -1299,7 +1299,7 @@ func file_settings_v1_setting_proto_init() {
return nil
}
}
- file_settings_v1_setting_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ file_settings_v1_setting_proto_msgTypes[6].Exporter = func(v any, i int) any {
switch v := v.(*CollectionRuleStore); i {
case 0:
return &v.state
@@ -1311,7 +1311,7 @@ func file_settings_v1_setting_proto_init() {
return nil
}
}
- file_settings_v1_setting_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ file_settings_v1_setting_proto_msgTypes[7].Exporter = func(v any, i int) any {
switch v := v.(*CollectionTarget); i {
case 0:
return &v.state
@@ -1323,7 +1323,7 @@ func file_settings_v1_setting_proto_init() {
return nil
}
}
- file_settings_v1_setting_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ file_settings_v1_setting_proto_msgTypes[8].Exporter = func(v any, i int) any {
switch v := v.(*CollectionInstance); i {
case 0:
return &v.state
@@ -1335,7 +1335,7 @@ func file_settings_v1_setting_proto_init() {
return nil
}
}
- file_settings_v1_setting_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ file_settings_v1_setting_proto_msgTypes[9].Exporter = func(v any, i int) any {
switch v := v.(*CollectionMessage); i {
case 0:
return &v.state
@@ -1347,7 +1347,7 @@ func file_settings_v1_setting_proto_init() {
return nil
}
}
- file_settings_v1_setting_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ file_settings_v1_setting_proto_msgTypes[10].Exporter = func(v any, i int) any {
switch v := v.(*CollectionPayloadSubscribe); i {
case 0:
return &v.state
@@ -1359,7 +1359,7 @@ func file_settings_v1_setting_proto_init() {
return nil
}
}
- file_settings_v1_setting_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+ file_settings_v1_setting_proto_msgTypes[11].Exporter = func(v any, i int) any {
switch v := v.(*CollectionPayloadData); i {
case 0:
return &v.state
@@ -1371,7 +1371,7 @@ func file_settings_v1_setting_proto_init() {
return nil
}
}
- file_settings_v1_setting_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
+ file_settings_v1_setting_proto_msgTypes[12].Exporter = func(v any, i int) any {
switch v := v.(*CollectionPayloadRuleInsert); i {
case 0:
return &v.state
@@ -1383,7 +1383,7 @@ func file_settings_v1_setting_proto_init() {
return nil
}
}
- file_settings_v1_setting_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
+ file_settings_v1_setting_proto_msgTypes[13].Exporter = func(v any, i int) any {
switch v := v.(*CollectionPayloadRuleDelete); i {
case 0:
return &v.state
@@ -1396,9 +1396,9 @@ func file_settings_v1_setting_proto_init() {
}
}
}
- file_settings_v1_setting_proto_msgTypes[5].OneofWrappers = []interface{}{}
- file_settings_v1_setting_proto_msgTypes[9].OneofWrappers = []interface{}{}
- file_settings_v1_setting_proto_msgTypes[12].OneofWrappers = []interface{}{}
+ file_settings_v1_setting_proto_msgTypes[5].OneofWrappers = []any{}
+ file_settings_v1_setting_proto_msgTypes[9].OneofWrappers = []any{}
+ file_settings_v1_setting_proto_msgTypes[12].OneofWrappers = []any{}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
diff --git a/api/gen/proto/go/status/v1/status.pb.go b/api/gen/proto/go/status/v1/status.pb.go
index dbf3e21ef2..ef45ee7ee4 100644
--- a/api/gen/proto/go/status/v1/status.pb.go
+++ b/api/gen/proto/go/status/v1/status.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.34.1
+// protoc-gen-go v1.34.2
// protoc (unknown)
// source: status/v1/status.proto
@@ -363,7 +363,7 @@ func file_status_v1_status_proto_rawDescGZIP() []byte {
}
var file_status_v1_status_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
-var file_status_v1_status_proto_goTypes = []interface{}{
+var file_status_v1_status_proto_goTypes = []any{
(*GetBuildInfoRequest)(nil), // 0: status.v1.GetBuildInfoRequest
(*GetBuildInfoResponse)(nil), // 1: status.v1.GetBuildInfoResponse
(*GetBuildInfoData)(nil), // 2: status.v1.GetBuildInfoData
@@ -394,7 +394,7 @@ func file_status_v1_status_proto_init() {
return
}
if !protoimpl.UnsafeEnabled {
- file_status_v1_status_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ file_status_v1_status_proto_msgTypes[0].Exporter = func(v any, i int) any {
switch v := v.(*GetBuildInfoRequest); i {
case 0:
return &v.state
@@ -406,7 +406,7 @@ func file_status_v1_status_proto_init() {
return nil
}
}
- file_status_v1_status_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ file_status_v1_status_proto_msgTypes[1].Exporter = func(v any, i int) any {
switch v := v.(*GetBuildInfoResponse); i {
case 0:
return &v.state
@@ -418,7 +418,7 @@ func file_status_v1_status_proto_init() {
return nil
}
}
- file_status_v1_status_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ file_status_v1_status_proto_msgTypes[2].Exporter = func(v any, i int) any {
switch v := v.(*GetBuildInfoData); i {
case 0:
return &v.state
@@ -430,7 +430,7 @@ func file_status_v1_status_proto_init() {
return nil
}
}
- file_status_v1_status_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ file_status_v1_status_proto_msgTypes[3].Exporter = func(v any, i int) any {
switch v := v.(*GetConfigRequest); i {
case 0:
return &v.state
@@ -442,7 +442,7 @@ func file_status_v1_status_proto_init() {
return nil
}
}
- file_status_v1_status_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ file_status_v1_status_proto_msgTypes[4].Exporter = func(v any, i int) any {
switch v := v.(*GetConfigResponse); i {
case 0:
return &v.state
diff --git a/api/gen/proto/go/storegateway/v1/storegateway.pb.go b/api/gen/proto/go/storegateway/v1/storegateway.pb.go
index 9248db42b9..635c7ebe7b 100644
--- a/api/gen/proto/go/storegateway/v1/storegateway.pb.go
+++ b/api/gen/proto/go/storegateway/v1/storegateway.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.34.1
+// protoc-gen-go v1.34.2
// protoc (unknown)
// source: storegateway/v1/storegateway.proto
@@ -112,7 +112,7 @@ var file_storegateway_v1_storegateway_proto_rawDesc = []byte{
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
-var file_storegateway_v1_storegateway_proto_goTypes = []interface{}{
+var file_storegateway_v1_storegateway_proto_goTypes = []any{
(*v1.MergeProfilesStacktracesRequest)(nil), // 0: ingester.v1.MergeProfilesStacktracesRequest
(*v1.MergeProfilesLabelsRequest)(nil), // 1: ingester.v1.MergeProfilesLabelsRequest
(*v1.MergeProfilesPprofRequest)(nil), // 2: ingester.v1.MergeProfilesPprofRequest
diff --git a/api/gen/proto/go/types/v1/types.pb.go b/api/gen/proto/go/types/v1/types.pb.go
index c2fad88103..cd2f67933f 100644
--- a/api/gen/proto/go/types/v1/types.pb.go
+++ b/api/gen/proto/go/types/v1/types.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.34.1
+// protoc-gen-go v1.34.2
// protoc (unknown)
// source: types/v1/types.proto
@@ -1136,7 +1136,7 @@ func file_types_v1_types_proto_rawDescGZIP() []byte {
var file_types_v1_types_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
var file_types_v1_types_proto_msgTypes = make([]protoimpl.MessageInfo, 16)
-var file_types_v1_types_proto_goTypes = []interface{}{
+var file_types_v1_types_proto_goTypes = []any{
(TimeSeriesAggregationType)(0), // 0: types.v1.TimeSeriesAggregationType
(*LabelPair)(nil), // 1: types.v1.LabelPair
(*ProfileType)(nil), // 2: types.v1.ProfileType
@@ -1176,7 +1176,7 @@ func file_types_v1_types_proto_init() {
return
}
if !protoimpl.UnsafeEnabled {
- file_types_v1_types_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ file_types_v1_types_proto_msgTypes[0].Exporter = func(v any, i int) any {
switch v := v.(*LabelPair); i {
case 0:
return &v.state
@@ -1188,7 +1188,7 @@ func file_types_v1_types_proto_init() {
return nil
}
}
- file_types_v1_types_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ file_types_v1_types_proto_msgTypes[1].Exporter = func(v any, i int) any {
switch v := v.(*ProfileType); i {
case 0:
return &v.state
@@ -1200,7 +1200,7 @@ func file_types_v1_types_proto_init() {
return nil
}
}
- file_types_v1_types_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ file_types_v1_types_proto_msgTypes[2].Exporter = func(v any, i int) any {
switch v := v.(*Labels); i {
case 0:
return &v.state
@@ -1212,7 +1212,7 @@ func file_types_v1_types_proto_init() {
return nil
}
}
- file_types_v1_types_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ file_types_v1_types_proto_msgTypes[3].Exporter = func(v any, i int) any {
switch v := v.(*Series); i {
case 0:
return &v.state
@@ -1224,7 +1224,7 @@ func file_types_v1_types_proto_init() {
return nil
}
}
- file_types_v1_types_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ file_types_v1_types_proto_msgTypes[4].Exporter = func(v any, i int) any {
switch v := v.(*Point); i {
case 0:
return &v.state
@@ -1236,7 +1236,7 @@ func file_types_v1_types_proto_init() {
return nil
}
}
- file_types_v1_types_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ file_types_v1_types_proto_msgTypes[5].Exporter = func(v any, i int) any {
switch v := v.(*LabelValuesRequest); i {
case 0:
return &v.state
@@ -1248,7 +1248,7 @@ func file_types_v1_types_proto_init() {
return nil
}
}
- file_types_v1_types_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ file_types_v1_types_proto_msgTypes[6].Exporter = func(v any, i int) any {
switch v := v.(*LabelValuesResponse); i {
case 0:
return &v.state
@@ -1260,7 +1260,7 @@ func file_types_v1_types_proto_init() {
return nil
}
}
- file_types_v1_types_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ file_types_v1_types_proto_msgTypes[7].Exporter = func(v any, i int) any {
switch v := v.(*LabelNamesRequest); i {
case 0:
return &v.state
@@ -1272,7 +1272,7 @@ func file_types_v1_types_proto_init() {
return nil
}
}
- file_types_v1_types_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ file_types_v1_types_proto_msgTypes[8].Exporter = func(v any, i int) any {
switch v := v.(*LabelNamesResponse); i {
case 0:
return &v.state
@@ -1284,7 +1284,7 @@ func file_types_v1_types_proto_init() {
return nil
}
}
- file_types_v1_types_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ file_types_v1_types_proto_msgTypes[9].Exporter = func(v any, i int) any {
switch v := v.(*BlockInfo); i {
case 0:
return &v.state
@@ -1296,7 +1296,7 @@ func file_types_v1_types_proto_init() {
return nil
}
}
- file_types_v1_types_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ file_types_v1_types_proto_msgTypes[10].Exporter = func(v any, i int) any {
switch v := v.(*BlockCompaction); i {
case 0:
return &v.state
@@ -1308,7 +1308,7 @@ func file_types_v1_types_proto_init() {
return nil
}
}
- file_types_v1_types_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+ file_types_v1_types_proto_msgTypes[11].Exporter = func(v any, i int) any {
switch v := v.(*StackTraceSelector); i {
case 0:
return &v.state
@@ -1320,7 +1320,7 @@ func file_types_v1_types_proto_init() {
return nil
}
}
- file_types_v1_types_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
+ file_types_v1_types_proto_msgTypes[12].Exporter = func(v any, i int) any {
switch v := v.(*Location); i {
case 0:
return &v.state
@@ -1332,7 +1332,7 @@ func file_types_v1_types_proto_init() {
return nil
}
}
- file_types_v1_types_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
+ file_types_v1_types_proto_msgTypes[13].Exporter = func(v any, i int) any {
switch v := v.(*GoPGO); i {
case 0:
return &v.state
@@ -1344,7 +1344,7 @@ func file_types_v1_types_proto_init() {
return nil
}
}
- file_types_v1_types_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
+ file_types_v1_types_proto_msgTypes[14].Exporter = func(v any, i int) any {
switch v := v.(*GetProfileStatsRequest); i {
case 0:
return &v.state
@@ -1356,7 +1356,7 @@ func file_types_v1_types_proto_init() {
return nil
}
}
- file_types_v1_types_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
+ file_types_v1_types_proto_msgTypes[15].Exporter = func(v any, i int) any {
switch v := v.(*GetProfileStatsResponse); i {
case 0:
return &v.state
diff --git a/api/gen/proto/go/vcs/v1/vcs.pb.go b/api/gen/proto/go/vcs/v1/vcs.pb.go
index ee5a294d38..194183d9a2 100644
--- a/api/gen/proto/go/vcs/v1/vcs.pb.go
+++ b/api/gen/proto/go/vcs/v1/vcs.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.34.1
+// protoc-gen-go v1.34.2
// protoc (unknown)
// source: vcs/v1/vcs.proto
@@ -702,7 +702,7 @@ func file_vcs_v1_vcs_proto_rawDescGZIP() []byte {
}
var file_vcs_v1_vcs_proto_msgTypes = make([]protoimpl.MessageInfo, 11)
-var file_vcs_v1_vcs_proto_goTypes = []interface{}{
+var file_vcs_v1_vcs_proto_goTypes = []any{
(*GithubAppRequest)(nil), // 0: vcs.v1.GithubAppRequest
(*GithubAppResponse)(nil), // 1: vcs.v1.GithubAppResponse
(*GithubLoginRequest)(nil), // 2: vcs.v1.GithubLoginRequest
@@ -740,7 +740,7 @@ func file_vcs_v1_vcs_proto_init() {
return
}
if !protoimpl.UnsafeEnabled {
- file_vcs_v1_vcs_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ file_vcs_v1_vcs_proto_msgTypes[0].Exporter = func(v any, i int) any {
switch v := v.(*GithubAppRequest); i {
case 0:
return &v.state
@@ -752,7 +752,7 @@ func file_vcs_v1_vcs_proto_init() {
return nil
}
}
- file_vcs_v1_vcs_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ file_vcs_v1_vcs_proto_msgTypes[1].Exporter = func(v any, i int) any {
switch v := v.(*GithubAppResponse); i {
case 0:
return &v.state
@@ -764,7 +764,7 @@ func file_vcs_v1_vcs_proto_init() {
return nil
}
}
- file_vcs_v1_vcs_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ file_vcs_v1_vcs_proto_msgTypes[2].Exporter = func(v any, i int) any {
switch v := v.(*GithubLoginRequest); i {
case 0:
return &v.state
@@ -776,7 +776,7 @@ func file_vcs_v1_vcs_proto_init() {
return nil
}
}
- file_vcs_v1_vcs_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ file_vcs_v1_vcs_proto_msgTypes[3].Exporter = func(v any, i int) any {
switch v := v.(*GithubLoginResponse); i {
case 0:
return &v.state
@@ -788,7 +788,7 @@ func file_vcs_v1_vcs_proto_init() {
return nil
}
}
- file_vcs_v1_vcs_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ file_vcs_v1_vcs_proto_msgTypes[4].Exporter = func(v any, i int) any {
switch v := v.(*GithubRefreshRequest); i {
case 0:
return &v.state
@@ -800,7 +800,7 @@ func file_vcs_v1_vcs_proto_init() {
return nil
}
}
- file_vcs_v1_vcs_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ file_vcs_v1_vcs_proto_msgTypes[5].Exporter = func(v any, i int) any {
switch v := v.(*GithubRefreshResponse); i {
case 0:
return &v.state
@@ -812,7 +812,7 @@ func file_vcs_v1_vcs_proto_init() {
return nil
}
}
- file_vcs_v1_vcs_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ file_vcs_v1_vcs_proto_msgTypes[6].Exporter = func(v any, i int) any {
switch v := v.(*GetFileRequest); i {
case 0:
return &v.state
@@ -824,7 +824,7 @@ func file_vcs_v1_vcs_proto_init() {
return nil
}
}
- file_vcs_v1_vcs_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ file_vcs_v1_vcs_proto_msgTypes[7].Exporter = func(v any, i int) any {
switch v := v.(*GetFileResponse); i {
case 0:
return &v.state
@@ -836,7 +836,7 @@ func file_vcs_v1_vcs_proto_init() {
return nil
}
}
- file_vcs_v1_vcs_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ file_vcs_v1_vcs_proto_msgTypes[8].Exporter = func(v any, i int) any {
switch v := v.(*GetCommitRequest); i {
case 0:
return &v.state
@@ -848,7 +848,7 @@ func file_vcs_v1_vcs_proto_init() {
return nil
}
}
- file_vcs_v1_vcs_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ file_vcs_v1_vcs_proto_msgTypes[9].Exporter = func(v any, i int) any {
switch v := v.(*GetCommitResponse); i {
case 0:
return &v.state
@@ -860,7 +860,7 @@ func file_vcs_v1_vcs_proto_init() {
return nil
}
}
- file_vcs_v1_vcs_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ file_vcs_v1_vcs_proto_msgTypes[10].Exporter = func(v any, i int) any {
switch v := v.(*CommitAuthor); i {
case 0:
return &v.state
diff --git a/api/gen/proto/go/version/v1/version.pb.go b/api/gen/proto/go/version/v1/version.pb.go
index 9909712bfa..cb22481d0d 100644
--- a/api/gen/proto/go/version/v1/version.pb.go
+++ b/api/gen/proto/go/version/v1/version.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.34.1
+// protoc-gen-go v1.34.2
// protoc (unknown)
// source: version/v1/version.proto
@@ -297,7 +297,7 @@ func file_version_v1_version_proto_rawDescGZIP() []byte {
}
var file_version_v1_version_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
-var file_version_v1_version_proto_goTypes = []interface{}{
+var file_version_v1_version_proto_goTypes = []any{
(*VersionRequest)(nil), // 0: version.v1.VersionRequest
(*VersionResponse)(nil), // 1: version.v1.VersionResponse
(*InstanceVersion)(nil), // 2: version.v1.InstanceVersion
@@ -322,7 +322,7 @@ func file_version_v1_version_proto_init() {
return
}
if !protoimpl.UnsafeEnabled {
- file_version_v1_version_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ file_version_v1_version_proto_msgTypes[0].Exporter = func(v any, i int) any {
switch v := v.(*VersionRequest); i {
case 0:
return &v.state
@@ -334,7 +334,7 @@ func file_version_v1_version_proto_init() {
return nil
}
}
- file_version_v1_version_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ file_version_v1_version_proto_msgTypes[1].Exporter = func(v any, i int) any {
switch v := v.(*VersionResponse); i {
case 0:
return &v.state
@@ -346,7 +346,7 @@ func file_version_v1_version_proto_init() {
return nil
}
}
- file_version_v1_version_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ file_version_v1_version_proto_msgTypes[2].Exporter = func(v any, i int) any {
switch v := v.(*InstanceVersion); i {
case 0:
return &v.state
@@ -358,7 +358,7 @@ func file_version_v1_version_proto_init() {
return nil
}
}
- file_version_v1_version_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ file_version_v1_version_proto_msgTypes[3].Exporter = func(v any, i int) any {
switch v := v.(*Versions); i {
case 0:
return &v.state
diff --git a/api/go.mod b/api/go.mod
index f2b73aed7d..85216efe33 100644
--- a/api/go.mod
+++ b/api/go.mod
@@ -7,16 +7,16 @@ require (
github.com/gorilla/mux v1.8.0
github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0
github.com/planetscale/vtprotobuf v0.6.0
- github.com/prometheus/common v0.52.3
- google.golang.org/genproto/googleapis/api v0.0.0-20240304212257-790db918fca8
+ github.com/prometheus/common v0.55.0
+ google.golang.org/genproto/googleapis/api v0.0.0-20240314234333-6e1732d8331c
google.golang.org/grpc v1.62.1
- google.golang.org/protobuf v1.34.1
+ google.golang.org/protobuf v1.34.2
)
require (
- github.com/golang/protobuf v1.5.3 // indirect
+ github.com/golang/protobuf v1.5.4 // indirect
golang.org/x/net v0.26.0 // indirect
- golang.org/x/sys v0.21.0 // indirect
+ golang.org/x/sys v0.23.0 // indirect
golang.org/x/text v0.16.0 // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20240304161311-37d4d3c04a78 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 // indirect
)
diff --git a/api/go.sum b/api/go.sum
index f1e0cc994f..ca87d2ab94 100644
--- a/api/go.sum
+++ b/api/go.sum
@@ -1,9 +1,7 @@
connectrpc.com/connect v1.16.2 h1:ybd6y+ls7GOlb7Bh5C8+ghA6SvCBajHwxssO2CGFjqE=
connectrpc.com/connect v1.16.2/go.mod h1:n2kgwskMHXC+lVqb18wngEpF95ldBHXjZYJussz5FRc=
-github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
-github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
-github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
-github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
+github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
@@ -12,22 +10,19 @@ github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 h1:Wqo399gCIufwto+VfwCSvsnfGpF
github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0/go.mod h1:qmOFXW2epJhM0qSnUUYpldc7gVz2KMQwJ/QYCDIa7XU=
github.com/planetscale/vtprotobuf v0.6.0 h1:nBeETjudeJ5ZgBHUz1fVHvbqUKnYOXNhsIEabROxmNA=
github.com/planetscale/vtprotobuf v0.6.0/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8=
-github.com/prometheus/common v0.52.3 h1:5f8uj6ZwHSscOGNdIQg6OiZv/ybiK2CO2q2drVZAQSA=
-github.com/prometheus/common v0.52.3/go.mod h1:BrxBKv3FWBIGXw89Mg1AeBq7FSyRzXWI3l3e7W3RN5U=
+github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc=
+github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8=
golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ=
golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
-golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws=
-golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.23.0 h1:YfKFowiIMvtgl1UERQoTPPToxltDeZfbj4H7dVUCwmM=
+golang.org/x/sys v0.23.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
-golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-google.golang.org/genproto/googleapis/api v0.0.0-20240304212257-790db918fca8 h1:8eadJkXbwDEMNwcB5O0s5Y5eCfyuCLdvaiOIaGTrWmQ=
-google.golang.org/genproto/googleapis/api v0.0.0-20240304212257-790db918fca8/go.mod h1:O1cOfN1Cy6QEYr7VxtjOyP5AdAuR0aJ/MYZaaof623Y=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240304161311-37d4d3c04a78 h1:Xs9lu+tLXxLIfuci70nG4cpwaRC+mRQPUL7LoIeDJC4=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240304161311-37d4d3c04a78/go.mod h1:UCOku4NytXMJuLQE5VuqA5lX3PcHCBo8pxNyvkf4xBs=
+google.golang.org/genproto/googleapis/api v0.0.0-20240314234333-6e1732d8331c h1:kaI7oewGK5YnVwj+Y+EJBO/YN1ht8iTL9XkFHtVZLsc=
+google.golang.org/genproto/googleapis/api v0.0.0-20240314234333-6e1732d8331c/go.mod h1:VQW3tUculP/D4B+xVCo+VgSq8As6wA9ZjHl//pmk+6s=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 h1:NnYq6UN9ReLM9/Y01KWNOWyI5xQ9kbIms5GGJVwS/Yc=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY=
google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk=
google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE=
-google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
-google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg=
-google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
+google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
+google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
diff --git a/cmd/profilecli/query.go b/cmd/profilecli/query.go
index 52d7faa729..c965f302c4 100644
--- a/cmd/profilecli/query.go
+++ b/cmd/profilecli/query.go
@@ -108,13 +108,15 @@ func addQueryParams(queryCmd commander) *queryParams {
type queryMergeParams struct {
*queryParams
- ProfileType string
+ ProfileType string
+ StacktraceSelector []string
}
func addQueryMergeParams(queryCmd commander) *queryMergeParams {
params := new(queryMergeParams)
params.queryParams = addQueryParams(queryCmd)
queryCmd.Flag("profile-type", "Profile type to query.").Default("process_cpu:cpu:nanoseconds:cpu:nanoseconds").StringVar(¶ms.ProfileType)
+ queryCmd.Flag("stacktrace-selector", "Only query locations with those symbols. Provide multiple times starting with the root").StringsVar(¶ms.StacktraceSelector)
return params
}
@@ -124,13 +126,28 @@ func queryMerge(ctx context.Context, params *queryMergeParams, outputFlag string
return err
}
level.Info(logger).Log("msg", "query aggregated profile from profile store", "url", params.URL, "from", from, "to", to, "query", params.Query, "type", params.ProfileType)
- return selectMergeProfile(ctx, params.phlareClient, outputFlag,
- &querierv1.SelectMergeProfileRequest{
- ProfileTypeID: params.ProfileType,
- Start: from.UnixMilli(),
- End: to.UnixMilli(),
- LabelSelector: params.Query,
- })
+
+ req := &querierv1.SelectMergeProfileRequest{
+ ProfileTypeID: params.ProfileType,
+ Start: from.UnixMilli(),
+ End: to.UnixMilli(),
+ LabelSelector: params.Query,
+ }
+
+ if len(params.StacktraceSelector) > 0 {
+ locations := make([]*typesv1.Location, 0, len(params.StacktraceSelector))
+ for _, cs := range params.StacktraceSelector {
+ locations = append(locations, &typesv1.Location{
+ Name: cs,
+ })
+ }
+ req.StackTraceSelector = &typesv1.StackTraceSelector{
+ CallSite: locations,
+ }
+ level.Info(logger).Log("msg", "selecting with stackstrace selector", "call-site", fmt.Sprintf("%#+v", params.StacktraceSelector))
+ }
+
+ return selectMergeProfile(ctx, params.phlareClient, outputFlag, req)
}
func selectMergeProfile(ctx context.Context, client *phlareClient, outputFlag string, req *querierv1.SelectMergeProfileRequest) error {
diff --git a/cmd/pyroscope/Dockerfile b/cmd/pyroscope/Dockerfile
index 55ed07034e..edf7d0b8ff 100644
--- a/cmd/pyroscope/Dockerfile
+++ b/cmd/pyroscope/Dockerfile
@@ -1,17 +1,26 @@
-FROM alpine:3.18.7
+FROM gcr.io/distroless/static:debug AS debug
-RUN apk add --no-cache ca-certificates
+SHELL [ "/busybox/sh", "-c" ]
+
+RUN addgroup -g 10001 -S pyroscope && \
+ adduser -u 10001 -S pyroscope -G pyroscope -h /data
+
+FROM gcr.io/distroless/static
+
+COPY --from=debug /etc/passwd /etc/passwd
+COPY --from=debug /etc/group /etc/group
+
+# Copy folder from debug container, this folder needs to have the correct UID
+# in order for the container to run as non-root.
+VOLUME /data
+COPY --chown=pyroscope:pyroscope --from=debug /data /data
+VOLUME /data-compactor
+COPY --chown=pyroscope:pyroscope --from=debug /data /data-compactor
COPY cmd/pyroscope/pyroscope.yaml /etc/pyroscope/config.yaml
COPY profilecli /usr/bin/profilecli
COPY pyroscope /usr/bin/pyroscope
-RUN addgroup -g 10001 -S pyroscope && \
- adduser -u 10001 -S pyroscope -G pyroscope
-RUN mkdir -p /data && \
- chown -R pyroscope:pyroscope /data
-VOLUME /data
-
USER pyroscope
EXPOSE 4040
ENTRYPOINT [ "/usr/bin/pyroscope" ]
diff --git a/cmd/pyroscope/debug.Dockerfile b/cmd/pyroscope/debug.Dockerfile
index 730370b102..e3be6ef60d 100644
--- a/cmd/pyroscope/debug.Dockerfile
+++ b/cmd/pyroscope/debug.Dockerfile
@@ -1,22 +1,22 @@
-FROM golang as builder
+FROM gcr.io/distroless/static:debug
-WORKDIR /app
-FROM alpine:3.18.7
+SHELL [ "/busybox/sh", "-c" ]
-RUN apk add --no-cache ca-certificates
+RUN addgroup -g 10001 -S pyroscope && \
+ adduser -u 10001 -S pyroscope -G pyroscope -h /data
-COPY .tmp/bin/linux_amd64/dlv /usr/bin/dlv
+# This folder is created by adduser command with right owner/group
+VOLUME /data
+
+# This folder needs to be created and set to the right owner/group
+VOLUME /data-compactor
+RUN mkdir -p /data-compactor && chown pyroscope:pyroscope /data /data-compactor
+COPY .tmp/bin/linux_amd64/dlv /usr/bin/dlv
COPY cmd/pyroscope/pyroscope.yaml /etc/pyroscope/config.yaml
COPY profilecli /usr/bin/profilecli
COPY pyroscope /usr/bin/pyroscope
-RUN addgroup -g 10001 -S pyroscope && \
- adduser -u 10001 -S pyroscope -G pyroscope
-RUN mkdir -p /data && \
- chown -R pyroscope:pyroscope /data
-VOLUME /data
-
USER pyroscope
EXPOSE 4040
ENTRYPOINT ["/usr/bin/dlv", "--listen=:40000", "--headless=true", "--log", "--continue", "--accept-multiclient" , "--api-version=2", "exec", "/usr/bin/pyroscope", "--"]
diff --git a/cmd/pyroscope/frontend.Dockerfile b/cmd/pyroscope/frontend.Dockerfile
index d516e956d1..fffdffbe7e 100644
--- a/cmd/pyroscope/frontend.Dockerfile
+++ b/cmd/pyroscope/frontend.Dockerfile
@@ -1,4 +1,4 @@
-FROM node:18 as builder
+FROM node:18 AS builder
RUN apt-get update && apt-get install -y libpango1.0-dev libcairo2-dev
WORKDIR /pyroscope
COPY yarn.lock package.json tsconfig.json ./
diff --git a/cmd/pyroscope/help-all.txt.tmpl b/cmd/pyroscope/help-all.txt.tmpl
index bf47cbe097..122b9354d8 100644
--- a/cmd/pyroscope/help-all.txt.tmpl
+++ b/cmd/pyroscope/help-all.txt.tmpl
@@ -175,6 +175,10 @@ Usage of ./pyroscope:
Per-tenant allowed ingestion burst size (in sample size). Units in MB. The burst size refers to the per-distributor local rate limiter, and should be set at least to the maximum profile size expected in a single push request. (default 2)
-distributor.ingestion-rate-limit-mb float
Per-tenant ingestion rate limit in sample size per second. Units in MB. (default 4)
+ -distributor.ingestion-relabeling-default-rules-position value
+ Position of the default ingestion relabeling rules in relation to relabel rules from overrides. Valid values are 'first', 'last' or 'disabled'. (default "first")
+ -distributor.ingestion-relabeling-rules value
+ List of ingestion relabel configurations. The relabeling rules work the same way, as those of [Prometheus](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config). All rules are applied in the order they are specified. Note: In most situations, it is more effective to use relabeling directly in Grafana Alloy.
-distributor.ingestion-tenant-shard-size int
The tenant's shard size used by shuffle-sharding. Must be set both on ingesters and distributors. 0 disables shuffle sharding.
-distributor.push.timeout duration
diff --git a/docs/Makefile b/docs/Makefile
index 272be1340f..dfefedeea2 100644
--- a/docs/Makefile
+++ b/docs/Makefile
@@ -10,4 +10,4 @@ include docs.mk
.PHONY: test
test:
- docker run -v $(CURDIR)/sources:/hugo/content/docs/phlare/latest -e HUGO_REFLINKSERRORLEVEL=ERROR --rm grafana/docs-base:latest /bin/bash -c 'exec make hugo'
+ docker run -v $(CURDIR)/sources:/hugo/content/docs/pyroscope/latest -e HUGO_REFLINKSERRORLEVEL=ERROR --rm grafana/docs-base:latest /bin/bash -c 'exec make hugo'
diff --git a/docs/make-docs b/docs/make-docs
index f531df2ebb..170e361431 100755
--- a/docs/make-docs
+++ b/docs/make-docs
@@ -6,6 +6,15 @@
# [Semantic versioning](https://semver.org/) is used to help the reader identify the significance of changes.
# Changes are relevant to this script and the support docs.mk GNU Make interface.
#
+# ## 8.0.1 (2024-07-01)
+#
+# ### Fixed
+#
+# - Update log suppression to catch new format of website /docs/ homepage REF_NOT_FOUND warnings.
+#
+# These warnings are related to missing some pages during the build that are required for the /docs/ homepage.
+# They were previously suppressed but the log format changed and without this change they reappear in the latest builds.
+#
# ## 8.0.0 (2024-05-28)
#
# ### Changed
@@ -905,7 +914,7 @@ EOF
-e '/Press Ctrl+C to stop/ d' \
-e '/make/ d' \
-e '/WARNING: The manual_mount source directory/ d' \
- -e '/docs\/_index.md .* not found/ d'
+ -e '/"docs\/_index.md" not found/d'
fi
;;
esac
diff --git a/docs/sources/configure-client/grafana-agent/_index.md b/docs/sources/configure-client/grafana-agent/_index.md
index 1082d9ea3f..319dcab3eb 100644
--- a/docs/sources/configure-client/grafana-agent/_index.md
+++ b/docs/sources/configure-client/grafana-agent/_index.md
@@ -46,6 +46,14 @@ Benefits of eBPF profiling:
1. Configure the Agent to use eBPF for profiling. Refer to the [eBPF documentation](/docs/pyroscope/latest/configure-client/grafana-agent/ebpf) for detailed steps.
1. The collector collects eBPF profiles and sends them to the Pyroscope server.
+### Supported languages
+
+This eBPF profiler only collects CPU profiles. Generally, natively compiled languages like C/C++, Go, and Rust are supported. Refer to [Troubleshooting unknown symbols][troubleshooting] for additional requirements.
+
+Python is the only supported high-level language, as long as `python_enabled=true`.
+Other high-level languages like Java, Ruby, PHP, and JavaScript require additional work to show stack traces of methods in these languages correctly.
+Currently, the CPU usage for these languages is reported as belonging to the runtime's methods.
+
## Golang profiling in pull mode
In pull mode, the collector periodically retrieves profiles from Golang applications, specifically targeting the pprof endpoints.
@@ -67,3 +75,5 @@ In pull mode, the collector periodically retrieves profiles from Golang applicat
Whether using eBPF for versatile system and application profiling or relying on Golang's built-in pprof endpoints in pull mode, Grafana Agent and Grafana Alloy collectors offer streamlined processes to gather essential profiling data.
Choose the method that best fits your application and infrastructure needs.
+
+[troubleshooting]: /docs/alloy/latest/reference/components/pyroscope/pyroscope.ebpf/#troubleshooting-unknown-symbols
diff --git a/docs/sources/configure-client/grafana-agent/ebpf/_index.md b/docs/sources/configure-client/grafana-agent/ebpf/_index.md
index ed33926284..b4e55515ea 100644
--- a/docs/sources/configure-client/grafana-agent/ebpf/_index.md
+++ b/docs/sources/configure-client/grafana-agent/ebpf/_index.md
@@ -30,11 +30,23 @@ However, eBPF has some limitations that make it unsuitable for certain use cases
- It does not support all profile types such as memory and contention/lock profiling.
- eBPF requires root access to the host machine, which can be a problem in some environments.
+## Supported languages
+
+This eBPF profiler only collects CPU profiles. Generally, natively compiled languages like C/C++, Go, and Rust are supported. Refer to [Troubleshooting unknown symbols][troubleshooting] for additional requirements.
+
+Python is the only supported high-level language, as long as `python_enabled=true`.
+Other high-level languages like Java, Ruby, PHP, and JavaScript require additional work to show stack traces of methods in these languages correctly.
+Currently, the CPU usage for these languages is reported as belonging to the runtime's methods.
+
+
## eBPF via the Grafana Agent
{{< docs/shared lookup="agent-deprecation.md" source="alloy" version="next" >}}
-The Grafana Agent is a lightweight, all-in-one agent that can collect, transform, and ship observability data. For profiling, the Grafana Agent can be configured to collect eBPF profiles and send them to Pyroscope.
+The Grafana Agent is a lightweight, all-in-one agent that can collect, transform, and ship observability data.
+For profiling, the Grafana Agent can be configured to collect eBPF profiles and send them to Pyroscope.
This section contains instructions for installing and configuring the Grafana Agent to collect eBPF profiles.
For more information about the Grafana Agent itself, see the [Grafana Agent documentation](/docs/agent/latest/flow/).
+
+[troubleshooting]: /docs/alloy/latest/reference/components/pyroscope/pyroscope.ebpf/#troubleshooting-unknown-symbols
diff --git a/docs/sources/configure-client/grafana-agent/ebpf/configuration/_index.md b/docs/sources/configure-client/grafana-agent/ebpf/configuration/_index.md
index 44fb1654dd..11165f5567 100644
--- a/docs/sources/configure-client/grafana-agent/ebpf/configuration/_index.md
+++ b/docs/sources/configure-client/grafana-agent/ebpf/configuration/_index.md
@@ -8,7 +8,8 @@ weight: 30
## Configuration reference
-Grafana Agent supports eBPF profiling in [Flow mode](/docs/agent/latest/flow/). The configuration file is written in the [River](/docs/agent/latest/flow/config-language/) language and is composed of components that are used to collect, transform, and send data.
+Grafana Agent supports eBPF profiling in [Flow mode](https://grafana.com/docs/agent/latest/flow/).
+The configuration file is written in the [River](https://grafana.com/docs/agent/latest/flow/concepts/config-language/) language and is composed of components that are used to collect, transform, and send data.
The `pyroscope.ebpf` component is used to collect application performance profiles via eBPF.
@@ -17,7 +18,7 @@ The `pyroscope.ebpf` component is used to collect application performance profil
The `pyroscope.ebpf` runs on the host machine and collects stack traces associated with a process running on the current host.
Using the `targets` argument, you can specify which processes and containers to profile on the machine. The `targets` can be from discovery components such as `discovery.process`, `dicovery.kubernetes`, `discovery.docker`, and `discovery.dockerswarm`.
-To relabel discovered targets and set your own labels you can use the `discovery.relabel` component. For more information, see [Components](/docs/agent/latest/flow/concepts/components/).
+To relabel discovered targets and set your own labels you can use the `discovery.relabel` component. For more information, refer to [Components](/docs/agent/latest/flow/concepts/components/).
The `forward_to` parameter should point to a `pyroscope.write` component to send the collected profiles to your Pyroscope Server or [Grafana Cloud](/products/cloud/).
diff --git a/docs/sources/configure-client/grafana-agent/go_pull.md b/docs/sources/configure-client/grafana-agent/go_pull.md
index b40ef08b06..40790a51ed 100644
--- a/docs/sources/configure-client/grafana-agent/go_pull.md
+++ b/docs/sources/configure-client/grafana-agent/go_pull.md
@@ -12,10 +12,10 @@ In pull mode, the collector, whether Grafana Alloy (preferred) or Grafana Agent
To set up Golang profiling in pull mode, you need to:
-1. Expose pprof endpoints
-2. Install a collector, either Grafana Alloy (preferred) or Grafana Agent (legacy)
-3. Prepare the collector's configuration file
-4. Start the collector
+1. Expose pprof endpoints.
+2. Install a collector, either Grafana Alloy (preferred) or Grafana Agent (legacy).
+3. Prepare the collector's configuration file.
+4. Start the collector.
### Expose pprof endpoints
@@ -48,7 +48,7 @@ To install Alloy, refer to [Grafana Alloy installation](https://grafana.com/docs
{{< docs/shared lookup="agent-deprecation.md" source="alloy" version="next" >}}
-If you are using legacy Grafana Agent Flow, use the [Grafana Agent in Flow mode](/docs/agent/latest/flow/setup/install/) documentation to install.
+If you are using legacy Grafana Agent Flow, use the [Grafana Agent in Flow mode](https://grafana.com/docs/agent/latest/flow/get-started/install/) documentation to install.
### Prepare the collector configuration file
@@ -64,7 +64,7 @@ and `pyroscope.scrape`.
}
```
-2. Add `pyroscope.scrape` block.
+1. Add `pyroscope.scrape` block.
```river
pyroscope.scrape "scrape_job_name" {
targets = [{"__address__" = "localhost:4040", "service_name" = "example_service"}]
@@ -107,7 +107,7 @@ and `pyroscope.scrape`.
```
-3. Save the changes to the file.
+1. Save the changes to the file.
### Start the collector
@@ -116,12 +116,14 @@ and `pyroscope.scrape`.
docker run -p 4040:4040 grafana/pyroscope
```
-2. Start the collector:
- * To start Grafana Alloy, replace `configuration.alloy` with your configuration file name:
`alloy run --stability.level=public-preview configuration.alloy`
- The `stability.level` option is required for `pyroscope.scrape`. For more information about `stability.level`, refer to [The run command](https://grafana.com/docs/alloy/latest/reference/cli/run/#permitted-stability-levels) documentation.
+1. Start the collector:
+
+ * To start Grafana Alloy v1.2: Replace `configuration.alloy` with your configuration file name:
`alloy run configuration.alloy`
+ * To start Grafana Alloy v1.0/1.1: Replace `configuration.alloy` with your configuration file name:
`alloy run --stability.level=public-preview configuration.alloy`
+ The `stability.level` option is required for `pyroscope.scrape` with Alloy v1.0 or v1.1. For more information about `stability.level`, refer to [The run command](https://grafana.com/docs/alloy/latest/reference/cli/run/#permitted-stability-levels) documentation.
* To start Grafana Agent, replace `configuration.river` with your configuration file name:
` grafana-agent-flow run configuration.river`
-3. Open a browser to http://localhost:4040. The page should list profiles.
+1. Open a browser to http://localhost:4040. The page should list profiles.
## Examples
@@ -154,7 +156,7 @@ pyroscope.write "write_job_name" {
}
```
-2. Drop not running pods, create `namespace`, `pod`, `node` and `container` labels.
+1. Drop not running pods, create `namespace`, `pod`, `node` and `container` labels.
Compose `service_name` label based on `namespace` and `container` labels.
Select only services matching regex pattern `(ns1/.*)|(ns2/container-.*0)`.
```river
@@ -239,17 +241,17 @@ router.PathPrefix("/debug/pprof").Handler(http.DefaultServeMux)
### Grafana Alloy
- [Grafana Alloy](https://grafana.com/docs/alloy/latest/)
-- [pyroscope.scrape](/docs/alloy/latest/flow/reference/components/pyroscope.scrape/)
-- [pyroscope.write](/docs/alloy/latest/flow/reference/components/pyroscope.write/)
-- [discovery.kubernetes](/docs/alloy/latest/flow/reference/components/discovery.kubernetes/)
-- [discovery.docker](/docs/alloy/latest/flow/reference/components/discovery.docker/)
-- [discovery.relabel](/docs/alloy/latest/flow/reference/components/discovery.relabel/)
+- [pyroscope.scrape](https://grafana.com/docs/alloy/latest/reference/components/pyroscope/pyroscope.scrape/)
+- [pyroscope.write](https://grafana.com/docs/alloy/latest/reference/components/pyroscope/pyroscope.write/)
+- [discovery.kubernetes](https://grafana.com/docs/alloy/latest/reference/components/discovery/discovery.kubernetes/)
+- [discovery.docker](/docs/alloy/latest/flow/reference/components/discovery/discovery.docker/)
+- [discovery.relabel](/docs/alloy/latest/flow/reference/components/discovery/discovery.relabel/)
### Grafana Agent
- [Example using grafana-agent](https://github.com/grafana/pyroscope/tree/main/examples/grafana-agent-auto-instrumentation).
- [pyroscope.scrape](/docs/agent/latest/flow/reference/components/pyroscope.scrape/)
-- [pyroscope.write](/docs/agent/latest/flow/reference/components/pyroscope.write/)
-- [discovery.kubernetes](/docs/agent/latest/flow/reference/components/discovery.kubernetes/)
-- [discovery.docker](/docs/agent/latest/flow/reference/components/discovery.docker/)
+- [pyroscope.write](https://grafana.com/docs/agent/latest/flow/reference/components/pyroscope.write/)
+- [discovery.kubernetes](https://grafana.com/docs/agent/latest/flow/reference/components/discovery.kubernetes/)
+- [discovery.docker](https://grafana.com/docs/agent/latest/flow/reference/components/discovery.docker/)
- [discovery.relabel](/docs/agent/latest/flow/reference/components/discovery.relabel/)
diff --git a/docs/sources/configure-client/grafana-agent/java/_index.md b/docs/sources/configure-client/grafana-agent/java/_index.md
index 12c644f234..7ea5c5f336 100644
--- a/docs/sources/configure-client/grafana-agent/java/_index.md
+++ b/docs/sources/configure-client/grafana-agent/java/_index.md
@@ -9,9 +9,11 @@ weight: 20
Grafana Alloy and Grafana Agent in [Flow mode](/docs/agent/latest/flow/) support Java profiling.
-Written in the
-[River](/docs/agent/latest/flow/config-language/) language, the configuration file is composed of components that are used to collect,
+
+The collector configuration file is composed of components that are used to collect,
transform, and send data.
+Alloy configuration files use the Alloy [configuration syntax](https://grafana.com/docs/alloy/latest/concepts/configuration-syntax/).
+Agent Flow files use the [River](https://grafana.com/docs/agent/latest/flow/concepts/config-language/) language.
{{< docs/shared lookup="agent-deprecation.md" source="alloy" version="next" >}}
@@ -52,21 +54,21 @@ Pyroscope Server or [Grafana Cloud](/products/cloud/).
The special label `__process_pid__` _must always_ be present in each target of `targets` and corresponds to the `PID` of
the process to profile.
-The special label `service_name` is required and must always be present. If `service_name` is not specified, `pyroscope.java`
-attempts to infer it from discovery meta labels. If `service_name` is not specified and could not be inferred, then it is
-set to `unspecified`.
+The special label `service_name` is required and must always be present.
+If `service_name` isn't specified, `pyroscope.java` attempts to infer it from discovery meta labels.
+If `service_name` isn't specified and couldn't be inferred, then it's set to `unspecified`.
The `profiling_config` block describes how async-profiler is invoked.
-The following arguments are supported:
+It supports the following arguments:
| Name | Type | Description | Default | Required |
|---------------|------------|----------------------------------------------------------------------------------------------------------|---------|----------|
| `interval` | `duration` | How frequently to collect profiles from the targets. | "60s" | no |
-| `cpu` | `bool` | A flag to enable cpu profiling, using `itimer` async-profiler event. | true | no |
-| `sample_rate` | `int` | CPU profiling sample rate. It is converted from Hz to interval and passed as `-i` arg to async-profiler. | 100 | no |
-| `alloc` | `string` | Allocation profiling sampling configuration It is passed as `--alloc` arg to async-profiler. | "512k" | no |
-| `lock` | `string` | Lock profiling sampling configuration. It is passed as `--lock` arg to async-profiler. | "10ms" | no |
+| `cpu` | `bool` | A flag to enable CPU profiling, using `itimer` async-profiler event. | true | no |
+| `sample_rate` | `int` | CPU profiling sample rate. It's converted from Hz to interval and passed as `-i` arg to async-profiler. | 100 | no |
+| `alloc` | `string` | Allocation profiling sampling configuration It's passed as `--alloc` arg to async-profiler. | "512k" | no |
+| `lock` | `string` | Lock profiling sampling configuration. It's passed as `--lock` arg to async-profiler. | "10ms" | no |
For more information on async-profiler configuration,
see [profiler-options](https://github.com/async-profiler/async-profiler?tab=readme-ov-file#profiler-options).
@@ -78,13 +80,22 @@ and `discover.process` components to work.
### Start the collector
-To start Grafana Alloy, replace `configuration.alloy` with your configuration file name:
+To start Grafana Alloy v1.2: Replace `configuration.alloy` with your configuration file name:
+
+```bash
+alloy run configuration.alloy
+```
+
+To start Grafana Alloy v1.0/1.1: Replace `configuration.alloy` with your configuration file name:
+
+```bash
+alloy run --stability.level=public-preview configuration.alloy
+```
-`alloy run --stability.level=public-preview configuration.alloy`
+The `stability.level` option is required for `pyroscope.scrape` with Alloy v1.0 or v1.1. For more information about `stability.level`, refer to [The run command](https://grafana.com/docs/alloy/latest/reference/cli/run/#permitted-stability-levels) documentation.
-The `stability.level` option is required for `pyroscope.scrape`. For more information about `stability.level`, refer to [The run command](https://grafana.com/docs/alloy/latest/reference/cli/run/#permitted-stability-levels) documentation.
-To start Grafana Agent, replace `configuration.river` with your configuration file name:
+To start Grafana Agent, replace `configuration.river` with your configuration filename:
` grafana-agent-flow run configuration.river`
### Send data to Grafana Cloud Profiles
@@ -281,11 +292,11 @@ For more information:
### Grafana Alloy
- [Grafana Alloy](https://grafana.com/docs/alloy/latest/)
-- [pyroscope.scrape](/docs/alloy/latest/flow/reference/components/pyroscope.scrape/)
-- [pyroscope.write](/docs/alloy/latest/flow/reference/components/pyroscope.write/)
-- [discovery.kubernetes](/docs/alloy/latest/flow/reference/components/discovery.kubernetes/)
-- [discovery.docker](/docs/alloy/latest/flow/reference/components/discovery.docker/)
-- [discovery.relabel](/docs/alloy/latest/flow/reference/components/discovery.relabel/)
+- [pyroscope.scrape](https://grafana.com/docs/alloy/latest/reference/components/pyroscope/pyroscope.scrape/)
+- [pyroscope.write](https://grafana.com/docs/alloy/latest/reference/components/pyroscope/pyroscope.write/)
+- [discovery.kubernetes](https://grafana.com/docs/alloy/latest/reference/components/discovery/discovery.kubernetes/)
+- [discovery.docker](/docs/alloy/latest/flow/reference/components/discovery/discovery.docker/)
+- [discovery.relabel](/docs/alloy/latest/flow/reference/components/discovery/discovery.relabel/)
### Grafana Agent
diff --git a/docs/sources/configure-client/grafana-agent/sampling.md b/docs/sources/configure-client/grafana-agent/sampling.md
index ab8c5c39d5..49a32f6153 100644
--- a/docs/sources/configure-client/grafana-agent/sampling.md
+++ b/docs/sources/configure-client/grafana-agent/sampling.md
@@ -11,24 +11,31 @@ Applications often have many instances deployed. While Pyroscope is designed to
For example, the volume of profiling data your application generates may make it unreasonable to profile every instance, or you might be targeting cost-reduction.
-Through configuration of the Grafana Agent, Pyroscope can sample scrape targets.
+Through configuration of Grafana Alloy (preferred) or Grafana Agent (legacy) collectors, Pyroscope can sample scrape targets.
-## Prerequisites
+{{< docs/shared source="alloy" lookup="agent-deprecation.md" version="next" >}}
-Before you begin, make sure you understand how to [configure the Grafana Agent]({{< relref "." >}}) to scrape targets and are familiar with the Grafana Agent [component configuration language](/docs/agent/latest/flow/config-language/components).
+## Before you begin
+
+Make sure you understand how to configure the collector to scrape targets and are familiar with the component configuration language.
+Alloy configuration files use the Alloy [configuration syntax](https://grafana.com/docs/alloy/latest/concepts/configuration-syntax/).
+Agent Flow files use the [River](https://grafana.com/docs/agent/latest/flow/concepts/config-language/) language.
## Configuration
-The `hashmod` action and the `modulus` argument are used in conjunction to enable sampling behavior by sharding one or more labels. To read further on these concepts, see [rule block documentation](/docs/agent/latest/flow/reference/components/discovery.relabel#rule-block). In short, `hashmod` will perform an MD5 hash on the source labels and `modulus` will perform a modulus operation on the output.
+The `hashmod` action and the `modulus` argument are used in conjunction to enable sampling behavior by sharding one or more labels. To read further on these concepts, refer to [rule block documentation](/docs/agent/latest/flow/reference/components/discovery.relabel#rule-block). In short, `hashmod` performs an MD5 hash on the source labels and `modulus` performs a modulus operation on the output.
-The sample size can be modified by changing the value of `modulus` in the `hashmod` action and the `regex` argument in the `keep` action. The `modulus` value defines the number of shards, while the `regex` value will select a subset of the shards.
+The sample size can be modified by changing the value of `modulus` in the `hashmod` action and the `regex` argument in the `keep` action.
+The `modulus` value defines the number of shards, while the `regex` value selects a subset of the shards.
![Workflow for sampling scrape targets](../sample.svg)
-> **Note:**
-> Choose your source label(s) for the `hashmod` action carefully. They must uniquely define each scrape target or `hashmod` will not be able to shard the targets uniformly.
+{{< admonition type="note" >}}
+Choose your source label(s) for the `hashmod` action carefully. They must uniquely define each scrape target or `hashmod` won't be able to shard the targets uniformly.
+{{< /admonition >}}
-For example, consider an application deployed on Kubernetes with 100 pod replicas, all uniquely identified by the label `pod_hash`. The following configuration is set to sample 15% of the pods:
+For example, consider an application deployed on Kubernetes with 100 pod replicas, all uniquely identified by the label `pod_hash`.
+The following configuration is set to sample 15% of the pods:
```river
discovery.kubernetes "profile_pods" {
@@ -59,6 +66,9 @@ discovery.relabel "profile_pods" {
## Considerations
-This strategy does not guarantee precise sampling. Due to its reliance on an MD5 hash, there is not a perfectly uniform distribution of scrape targets into shards. Larger numbers of scrape targets will yield increasingly accurate sampling.
+This strategy doesn't guarantee precise sampling.
+Due to its reliance on an MD5 hash, there isn't a perfectly uniform distribution of scrape targets into shards.
+Larger numbers of scrape targets yield increasingly accurate sampling.
-Keep in mind, if the label being hashed is deterministic, you will see deterministic sharding and thereby deterministic sampling of scrape targets. Similarly, if the label being hashed is non-deterministic, you will see scrape targets being sampled in a non-deterministic fashion.
+Keep in mind, if the label hashed is deterministic, you see deterministic sharding and thereby deterministic sampling of scrape targets.
+Similarly, if the label hashed is non-deterministic, you see scrape targets sampled in a non-deterministic fashion.
diff --git a/docs/sources/configure-client/trace-span-profiles/_index.md b/docs/sources/configure-client/trace-span-profiles/_index.md
index fd68b2c65f..e19197c2cf 100644
--- a/docs/sources/configure-client/trace-span-profiles/_index.md
+++ b/docs/sources/configure-client/trace-span-profiles/_index.md
@@ -18,7 +18,9 @@ Key benefits and features:
- Seamless integration: Smoothly transition from a high-level trace overview to detailed profiling of specific trace spans within Grafana’s trace view
- Efficiency and cost savings: Quickly identify and address performance issues, reducing troubleshooting time and operational costs
-Get started:
+## Get started
+
+Select an option from the list below:
- Configure Pyroscope: Begin sending profiling data to unlock the full potential of Span Profiles
- Client-side packages: Easily link traces and profiles using available packages for Go, Java, Ruby, .NET, and Python
diff --git a/docs/sources/configure-client/trace-span-profiles/dotnet-span-profiles.md b/docs/sources/configure-client/trace-span-profiles/dotnet-span-profiles.md
index d924a2568f..561ba517cc 100644
--- a/docs/sources/configure-client/trace-span-profiles/dotnet-span-profiles.md
+++ b/docs/sources/configure-client/trace-span-profiles/dotnet-span-profiles.md
@@ -36,7 +36,7 @@ To use Span Profiles, you need to:
Your applications must be instrumented for profiling and tracing before you can use span profiles.
* Profiling: Your application must be instrumented with Pyroscope's .NET instrumentation library. Refer to the [.NET]({{< relref "../language-sdks/dotnet" >}}) guide for instructions.
-* Tracing: Your application must be instrumented with OpenTelemetry traces. Refer to the [OpenTelemetry](https://opentelemetry.io/docs/languages/net/getting-started/) guide for isntructions.
+* Tracing: Your application must be instrumented with OpenTelemetry traces. Refer to the [OpenTelemetry](https://opentelemetry.io/docs/languages/net/getting-started/) guide for instructions.
{{< admonition type="note" >}}
Span profiles in .NET are only supported using [OpenTelemetry manual instrumentation](https://opentelemetry.io/docs/languages/net/instrumentation/)
@@ -72,7 +72,7 @@ With the span processor registered, spans created automatically (for example, HT
To view the span profiles in Grafana Tempo, you need to have a Grafana instance running and a data source configured to link traces and profiles.
-Refer to the [data source configuration documentation](/docs/grafana/datasources/tempo/configure-tempo-data-source) to see how to configure the visualization to link traces with profiles.
+Refer to the [data source configuration documentation](https://grafana.com/docs/grafana//datasources/tempo/configure-tempo-data-source) to see how to configure the visualization to link traces with profiles.
## Examples
diff --git a/docs/sources/configure-client/trace-span-profiles/java-span-profiles.md b/docs/sources/configure-client/trace-span-profiles/java-span-profiles.md
index 3ce598dd51..8a3889ecf6 100644
--- a/docs/sources/configure-client/trace-span-profiles/java-span-profiles.md
+++ b/docs/sources/configure-client/trace-span-profiles/java-span-profiles.md
@@ -79,7 +79,7 @@ ENV OTEL_PYROSCOPE_START_PROFILING=true
## Useful for debugging
# ENV PYROSCOPE_LOG_LEVEL=debug
-## Those environment variables need to be overwritten at runtime, if you are using Grafana Cloud
+## Those environment variables need to be overwritten at runtime, if you are using Grafana Cloud
ENV PYROSCOPE_SERVER_ADDRESS=http://localhost:4040
# ENV PYROSCOPE_BASIC_AUTH_USER=123 ## Grafana Cloud Username
# ENV PYROSCOPE_BASIC_AUTH_PASSWORD=glc_secret ## Grafana Cloud Password / API Token
@@ -88,11 +88,19 @@ ENV PYROSCOPE_SERVER_ADDRESS=http://localhost:4040
CMD ["java", "-Dserver.port=5000", "-javaagent:./opentelemetry-javaagent.jar", "-javaagent:pyroscope.jar", "-jar", "./my-app.jar" ]
```
+### Available configuration options
+
+| Flag | Description | Default |
+| -------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
+| `otel.pyroscope.start.profiling` | Boolean flag to start PyroscopeAgent. Set to false if you want to start the PyroscopeAgent manually. | `true` |
+| `otel.pyroscope.root.span.only` | Boolean flag. When enabled, the tracer will annotate only the first span created locally (the root span), but the profile will include samples of all the nested spans. This may be helpful in case if the trace consists of multiple spans shorter than 10ms and profiler can't collect and annotate samples properly. | `true` |
+| `otel.pyroscope.add.span.name` | Boolean flag. Controls whether the span name added to profile labels. | `true` |
+
## View the span profiles in Grafana Tempo
To view the span profiles in Grafana Tempo, you need to have a Grafana instance running and a data source configured to link trace spans and profiles.
-Refer to the [data source configuration documentation](https://grafana.com/docs/grafana/latest/datasources/tempo/configure-tempo-data-source/) to see how to configure the visualization to link trace spans with profiles.
+Refer to the [data source configuration documentation](https://grafana.com/docs/grafana//datasources/tempo/configure-tempo-data-source/) to see how to configure the visualization to link trace spans with profiles.
To use a simple configuration, follow these steps:
diff --git a/docs/sources/configure-client/trace-span-profiles/python-span-profiles.md b/docs/sources/configure-client/trace-span-profiles/python-span-profiles.md
index 0c949cab92..46fde01202 100644
--- a/docs/sources/configure-client/trace-span-profiles/python-span-profiles.md
+++ b/docs/sources/configure-client/trace-span-profiles/python-span-profiles.md
@@ -71,7 +71,7 @@ With the span processor registered, spans created automatically (for example, HT
To view the span profiles in Grafana Tempo, you need to have a Grafana instance running and a data source configured to link traces and profiles.
-Refer to the [data source configuration documentation](/docs/grafana/datasources/tempo/configure-tempo-data-source) to see how to configure the visualization to link traces with profiles.
+Refer to the [data source configuration documentation](https://grafana.com/docs/grafana//datasources/tempo/configure-tempo-data-source) to see how to configure the visualization to link traces with profiles.
## Examples
diff --git a/docs/sources/configure-client/trace-span-profiles/ruby-span-profiles.md b/docs/sources/configure-client/trace-span-profiles/ruby-span-profiles.md
index 9d5d3e8226..5a3109e46a 100644
--- a/docs/sources/configure-client/trace-span-profiles/ruby-span-profiles.md
+++ b/docs/sources/configure-client/trace-span-profiles/ruby-span-profiles.md
@@ -68,7 +68,7 @@ end
To view the span profiles in Grafana Tempo, you need to have a Grafana instance running and a data source configured to link trace spans and profiles.
-Refer to the [data source configuration documentation](/docs/grafana/datasources/tempo/configure-tempo-data-source) to see how to configure the visualization to link trace spans with profiles.
+Refer to the [data source configuration documentation](https://grafana.com/docs/grafana//datasources/tempo/configure-tempo-data-source) to see how to configure the visualization to link trace spans with profiles.
To use a simple configuration, follow these steps:
diff --git a/docs/sources/configure-server/reference-configuration-parameters/index.md b/docs/sources/configure-server/reference-configuration-parameters/index.md
index 070f09b853..f24d239132 100644
--- a/docs/sources/configure-server/reference-configuration-parameters/index.md
+++ b/docs/sources/configure-server/reference-configuration-parameters/index.md
@@ -1843,6 +1843,8 @@ The `limits` block configures default and per-tenant limits imposed by component
# CLI flag: -validation.max-profile-symbol-value-length
[max_profile_symbol_value_length: | default = 65535]
+distributor_usage_groups:
+
# Duration of the distributor aggregation window. Requires aggregation period to
# be specified. 0 to disable.
# CLI flag: -distributor.aggregation-window
@@ -1853,6 +1855,31 @@ The `limits` block configures default and per-tenant limits imposed by component
# CLI flag: -distributor.aggregation-period
[distributor_aggregation_period: | default = 0s]
+# List of ingestion relabel configurations. The relabeling rules work the same
+# way, as those of
+# [Prometheus](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config).
+# All rules are applied in the order they are specified. Note: In most
+# situations, it is more effective to use relabeling directly in Grafana Alloy.
+# Example:
+# This example consists of two rules, the first one will drop all profiles
+# received with an label 'environment="secrets"' and the second rule will add
+# a label 'powered_by="Grafana Labs"' to all profile series.
+# ingestion_relabeling_rules:
+# - action: drop
+# regex: secret
+# source_labels:
+# - environment
+# - action: replace
+# replacement: grafana-labs
+# target_label: powered_by
+# CLI flag: -distributor.ingestion-relabeling-rules
+[ingestion_relabeling_rules: | default = []]
+
+# Position of the default ingestion relabeling rules in relation to relabel
+# rules from overrides. Valid values are 'first', 'last' or 'disabled'.
+# CLI flag: -distributor.ingestion-relabeling-default-rules-position
+[ingestion_relabeling_default_rules_position: | default = "first"]
+
# The tenant's shard size used by shuffle-sharding. Must be set both on
# ingesters and distributors. 0 disables shuffle sharding.
# CLI flag: -distributor.ingestion-tenant-shard-size
diff --git a/docs/sources/deploy-kubernetes/helm.md b/docs/sources/deploy-kubernetes/helm.md
index dfc842c0dd..e6c951bd6f 100644
--- a/docs/sources/deploy-kubernetes/helm.md
+++ b/docs/sources/deploy-kubernetes/helm.md
@@ -123,7 +123,7 @@ Use a custom namespace so that you don't have to overwrite the default namespace
helm upgrade -n pyroscope-test --install grafana grafana/grafana \
--set image.repository=grafana/grafana \
--set image.tag=main \
- --set env.GF_FEATURE_TOGGLES_ENABLE=flameGraph \
+ --set env.GF_INSTALL_PLUGINS=grafana-pyroscope-app \
--set env.GF_AUTH_ANONYMOUS_ENABLED=true \
--set env.GF_AUTH_ANONYMOUS_ORG_ROLE=Admin \
--set env.GF_DIAGNOSTICS_PROFILING_ENABLED=true \
diff --git a/docs/sources/get-started/_index.md b/docs/sources/get-started/_index.md
index 2ee0668879..820397f8ba 100644
--- a/docs/sources/get-started/_index.md
+++ b/docs/sources/get-started/_index.md
@@ -83,13 +83,18 @@ Verify that you have installed [Docker](https://docs.docker.com/engine/install/)
1. In a new terminal, run a local Grafana server using Docker:
```bash
- docker run --rm --name=grafana -p 3000:3000 -e "GF_FEATURE_TOGGLES_ENABLE=flameGraph" --network=pyroscope-demo grafana/grafana:main
+ docker run --rm --name=grafana \
+ --network=pyroscope-demo \
+ -p 3000:3000 \
+ -e "GF_INSTALL_PLUGINS=grafana-pyroscope-app"\
+ -e "GF_AUTH_ANONYMOUS_ENABLED=true" \
+ -e "GF_AUTH_ANONYMOUS_ORG_ROLE=Admin" \
+ -e "GF_AUTH_DISABLE_LOGIN_FORM=true" \
+ grafana/grafana:main
```
1. In a browser, go to the Grafana server at [http://localhost:3000/datasources](http://localhost:3000/datasources).
-1. Sign in using the default username `admin` and password `admin`.
-
1. Use the following settings to configure a Pyroscope data source to query the local Pyroscope server:
| Field | Value |
diff --git a/docs/sources/introduction/pyroscope-in-grafana.md b/docs/sources/introduction/pyroscope-in-grafana.md
index a5f1f6f000..53a0c973b1 100644
--- a/docs/sources/introduction/pyroscope-in-grafana.md
+++ b/docs/sources/introduction/pyroscope-in-grafana.md
@@ -16,7 +16,7 @@ keywords:
Pyroscope can be used alongside the other Grafana tools such as Loki, Tempo, Mimir, and k6.
You can use Pyroscope to get the most granular insight into your application and how you can use it to fix issues that you may have identified via metrics, logs, traces, or anything else.
-You can use Pyroscope within Grafana by using the [Pyroscope data source plugin](/docs/grafana/datasources/grafana-pyroscope/).
+You can use Pyroscope within Grafana by using the [Pyroscope data source plugin](https://grafana.com/docs/grafana/datasources/grafana-pyroscope/).
This plugin lets you query Pyroscope data from within Grafana and visualize it alongside your other Grafana data.
## Visualize traces and profiles data
diff --git a/docs/sources/release-notes/v1-7.md b/docs/sources/release-notes/v1-7.md
new file mode 100644
index 0000000000..e81fa0dde4
--- /dev/null
+++ b/docs/sources/release-notes/v1-7.md
@@ -0,0 +1,61 @@
+---
+title: Version 1.7 release notes
+menuTitle: V1.7
+description: Release notes for Grafana Pyroscope 1.7
+weight: 550
+---
+
+# Version 1.7 release notes
+
+We are excited to present Grafana Pyroscope 1.7.
+
+This release includes several new features:
+
+* The ability to relabel profiles at ingest time
+* Per-app (service) usage metrics
+* Stacktrace selectors for merge profile queries
+* Profile `pprof` export tailored to Go PGO
+
+Additionally, we've improved stability, performance, and documentation.
+
+Notable changes are listed below. For more details, check out the full 1.7.0 changelog: https://github.com/grafana/pyroscope/compare/v1.6.0...v1.7.0.
+
+## Improvements and updates
+
+Version 1.7 includes the following improvements and updates:
+
+* Ability to relabel profiles at ingest ([#3369](https://github.com/grafana/pyroscope/pull/3369))
+* Use Grafana Alloy (instead of Grafana Agent) in the Helm chart ([#3381](https://github.com/grafana/pyroscope/pull/3381))
+* Per-app usage metrics ([#3429](https://github.com/grafana/pyroscope/pull/3429))
+* Add stacktrace selectors to query merge ([#3412](https://github.com/grafana/pyroscope/pull/3412))
+* `pprof` export for Go PGO ([#3360](https://github.com/grafana/pyroscope/pull/3360))
+* Custom binary format for symdb ([#3138](https://github.com/grafana/pyroscope/pull/3138))
+* Repair truncated Go CPU profiles ([#3344](https://github.com/grafana/pyroscope/pull/3344))
+* Add initial load tests ([#3331](https://github.com/grafana/pyroscope/pull/3331))
+* Align default step for `/render` with Grafana ([#3326](https://github.com/grafana/pyroscope/pull/3326))
+* Allow use of different protocols in `profilecli` ([#3368](https://github.com/grafana/pyroscope/pull/3368))
+* Various performance improvements (#3395, #3345, #3349, #3351, #3386, #3348, #3358)
+* Improve readiness check for ingesters and frontend ([#3435](https://github.com/grafana/pyroscope/pull/3435))
+
+## Fixes
+
+Version 1.7 includes the following fixes:
+
+* Fix error handling in filterProfiles ([#3338](https://github.com/grafana/pyroscope/pull/3338))
+* Fix frontend header handling ([#3363](https://github.com/grafana/pyroscope/pull/3363))
+* Fix line numbers for pyspy ([#3337](https://github.com/grafana/pyroscope/pull/3337))
+* Don't compute delta on relabeled `godeltaprof` memory profiles ([#3398](https://github.com/grafana/pyroscope/pull/3398))
+* Honor stacktrace partitions at downsampling ([#3408](https://github.com/grafana/pyroscope/pull/3408))
+* Fix infinite loop in index writer ([#3356](https://github.com/grafana/pyroscope/pull/3356))
+
+## Documentation improvements
+
+Version 1.7 includes the following documentation updates:
+
+* Add a Grafana installation to all examples ([#3431](https://github.com/grafana/pyroscope/pull/3431))
+* Fix broken links ([#3440](https://github.com/grafana/pyroscope/pull/3440))
+* Remove `--stability-level` for Alloy v1.2 ([#3382](https://github.com/grafana/pyroscope/pull/3382))
+* Add parameters from otel-profiling-java ([#3444](https://github.com/grafana/pyroscope/pull/3444))
+* Add supported languages for eBPF ([#3434](https://github.com/grafana/pyroscope/pull/3434))
+* Link to supported languages ([#3432](https://github.com/grafana/pyroscope/pull/3432))
+* Update link to play.grafana.org ([#3433](https://github.com/grafana/pyroscope/pull/3433))
diff --git a/ebpf/go.mod b/ebpf/go.mod
index 7e9fff5b42..b6b4f4cbc9 100644
--- a/ebpf/go.mod
+++ b/ebpf/go.mod
@@ -5,44 +5,44 @@ go 1.21
require (
connectrpc.com/connect v1.16.2
github.com/avvmoto/buf-readerat v0.0.0-20171115124131-a17c8cb89270
- github.com/cespare/xxhash/v2 v2.2.0
+ github.com/cespare/xxhash/v2 v2.3.0
github.com/cilium/ebpf v0.11.0
github.com/go-kit/log v0.2.1
github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7
github.com/grafana/pyroscope/api v0.4.0
github.com/hashicorp/golang-lru/v2 v2.0.7
github.com/ianlancetaylor/demangle v0.0.0-20230524184225-eabc099b10ab
- github.com/klauspost/compress v1.17.7
+ github.com/klauspost/compress v1.17.9
github.com/pkg/errors v0.9.1
- github.com/prometheus/client_golang v1.19.0
- github.com/prometheus/common v0.52.3
+ github.com/prometheus/client_golang v1.19.1
+ github.com/prometheus/common v0.55.0
github.com/prometheus/prometheus v0.51.2
github.com/samber/lo v1.38.1
github.com/stretchr/testify v1.9.0
- golang.org/x/sys v0.21.0
+ golang.org/x/sys v0.23.0
)
require (
github.com/beorn7/perks v1.0.1 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/go-logfmt/logfmt v0.6.0 // indirect
- github.com/golang/protobuf v1.5.3 // indirect
+ github.com/golang/protobuf v1.5.4 // indirect
github.com/gorilla/mux v1.8.0 // indirect
github.com/grafana/regexp v0.0.0-20221123153739-15dc172cd2db // indirect
github.com/jpillora/backoff v1.0.0 // indirect
+ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
- github.com/prometheus/client_model v0.6.0 // indirect
- github.com/prometheus/procfs v0.12.0 // indirect
+ github.com/prometheus/client_model v0.6.1 // indirect
+ github.com/prometheus/procfs v0.15.1 // indirect
github.com/rogpeppe/go-internal v1.12.0 // indirect
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect
golang.org/x/net v0.26.0 // indirect
- golang.org/x/oauth2 v0.18.0 // indirect
+ golang.org/x/oauth2 v0.21.0 // indirect
golang.org/x/text v0.16.0 // indirect
- google.golang.org/appengine v1.6.8 // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20240304161311-37d4d3c04a78 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 // indirect
google.golang.org/grpc v1.62.1 // indirect
- google.golang.org/protobuf v1.34.1 // indirect
+ google.golang.org/protobuf v1.34.2 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)
diff --git a/ebpf/go.sum b/ebpf/go.sum
index 43a16de7c5..d3bff4adff 100644
--- a/ebpf/go.sum
+++ b/ebpf/go.sum
@@ -4,8 +4,8 @@ github.com/avvmoto/buf-readerat v0.0.0-20171115124131-a17c8cb89270 h1:JIxGEMs4E5
github.com/avvmoto/buf-readerat v0.0.0-20171115124131-a17c8cb89270/go.mod h1:2XtVRGCw/HthOLxU0Qw6o6jSJrcEoOb2OCCl8gQYvGw=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
-github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
-github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
+github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cilium/ebpf v0.11.0 h1:V8gS/bTCCjX9uUnkUFUpPsksM8n1lXBAvHcpiFk1X2Y=
github.com/cilium/ebpf v0.11.0/go.mod h1:WE7CZAnqOL2RouJ4f1uyNhqr2P4CCvXFIqdRDUgWsVs=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
@@ -16,11 +16,8 @@ github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU=
github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4=
github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
-github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
-github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
-github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
-github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
-github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
+github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7 h1:y3N7Bm7Y9/CtpiVkw/ZWj6lSlDF3F74SfKwfTCer72Q=
@@ -37,26 +34,28 @@ github.com/ianlancetaylor/demangle v0.0.0-20230524184225-eabc099b10ab h1:BA4a7pe
github.com/ianlancetaylor/demangle v0.0.0-20230524184225-eabc099b10ab/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw=
github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
-github.com/klauspost/compress v1.17.7 h1:ehO88t2UGzQK66LMdE8tibEd1ErmzZjNEqWkjLAKQQg=
-github.com/klauspost/compress v1.17.7/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
+github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
+github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
+github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU=
-github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k=
-github.com/prometheus/client_model v0.6.0 h1:k1v3CzpSRUTrKMppY35TLwPvxHqBu0bYgxZzqGIgaos=
-github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOAGQPoaGhyTchlyt8=
-github.com/prometheus/common v0.52.3 h1:5f8uj6ZwHSscOGNdIQg6OiZv/ybiK2CO2q2drVZAQSA=
-github.com/prometheus/common v0.52.3/go.mod h1:BrxBKv3FWBIGXw89Mg1AeBq7FSyRzXWI3l3e7W3RN5U=
-github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
-github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
+github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE=
+github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho=
+github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
+github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
+github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc=
+github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8=
+github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
+github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
github.com/prometheus/prometheus v0.51.2 h1:U0faf1nT4CB9DkBW87XLJCBi2s8nwWXdTbyzRUAkX0w=
github.com/prometheus/prometheus v0.51.2/go.mod h1:yv4MwOn3yHMQ6MZGHPg/U7Fcyqf+rxqiZfSur6myVtc=
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
@@ -65,48 +64,24 @@ github.com/samber/lo v1.38.1 h1:j2XEAqXKb09Am4ebOg31SpvzUTTs6EN3VfgeLUhPdXM=
github.com/samber/lo v1.38.1/go.mod h1:+m/ZKRl6ClXCE2Lgf3MsQlWfh4bn1bz6CXEOxnEXnEA=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
-github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
-golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA=
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08=
-golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
-golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ=
golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
-golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI=
-golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8=
-golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs=
+golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE=
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
-golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
-golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
-golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
-golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM=
-google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240304161311-37d4d3c04a78 h1:Xs9lu+tLXxLIfuci70nG4cpwaRC+mRQPUL7LoIeDJC4=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240304161311-37d4d3c04a78/go.mod h1:UCOku4NytXMJuLQE5VuqA5lX3PcHCBo8pxNyvkf4xBs=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 h1:NnYq6UN9ReLM9/Y01KWNOWyI5xQ9kbIms5GGJVwS/Yc=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY=
google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk=
google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE=
-google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
-google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg=
-google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
+google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
+google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
diff --git a/examples/golang-pgo/Dockerfile b/examples/golang-pgo/Dockerfile
index eee781ecdb..1d9ef01822 100644
--- a/examples/golang-pgo/Dockerfile
+++ b/examples/golang-pgo/Dockerfile
@@ -1,4 +1,4 @@
-FROM golang:1.21.11
+FROM golang:1.21.12
WORKDIR /go/src/app
COPY . .
diff --git a/examples/golang-pgo/docker-compose.yml b/examples/golang-pgo/docker-compose.yml
index 7e850fd1a7..c88b86778b 100644
--- a/examples/golang-pgo/docker-compose.yml
+++ b/examples/golang-pgo/docker-compose.yml
@@ -1,15 +1,25 @@
-version: "3"
+version: '3'
services:
rideshare-go:
environment:
- - REGION=us-east
- - PYROSCOPE_SERVER_ADDRESS=http://pyroscope:4040
+ - REGION=us-east
+ - PYROSCOPE_SERVER_ADDRESS=http://pyroscope:4040
build:
context: .
ports:
- - '5001:5001'
-
+ - 5001:5001
pyroscope:
image: grafana/pyroscope:latest
ports:
- - '4040:4040'
+ - 4040:4040
+ grafana:
+ image: grafana/grafana:latest
+ environment:
+ - GF_INSTALL_PLUGINS=grafana-pyroscope-app
+ - GF_AUTH_ANONYMOUS_ENABLED=true
+ - GF_AUTH_ANONYMOUS_ORG_ROLE=Admin
+ - GF_AUTH_DISABLE_LOGIN_FORM=true
+ volumes:
+ - ./grafana-provisioning:/etc/grafana/provisioning
+ ports:
+ - 3000:3000
diff --git a/examples/golang-pgo/grafana-provisioning/datasources/pyroscope.yml b/examples/golang-pgo/grafana-provisioning/datasources/pyroscope.yml
new file mode 100644
index 0000000000..6f04d797c8
--- /dev/null
+++ b/examples/golang-pgo/grafana-provisioning/datasources/pyroscope.yml
@@ -0,0 +1,14 @@
+---
+apiVersion: 1
+datasources:
+ - uid: local-pyroscope
+ type: grafana-pyroscope-datasource
+ name: Pyroscope
+ url: http://pyroscope:4040
+ jsonData:
+ keepCookies: [GitSession]
+ # Uncomment these if using with Grafana Cloud
+ # basicAuth: true
+ # basicAuthUser: '123456'
+ # secureJsonData:
+ # basicAuthPassword: PASSWORD
diff --git a/examples/golang-pgo/grafana-provisioning/plugins/explore-profiles.yml b/examples/golang-pgo/grafana-provisioning/plugins/explore-profiles.yml
new file mode 100644
index 0000000000..0d1302202e
--- /dev/null
+++ b/examples/golang-pgo/grafana-provisioning/plugins/explore-profiles.yml
@@ -0,0 +1,11 @@
+---
+apiVersion: 1
+apps:
+ - type: grafana-pyroscope-app
+ jsonData:
+ backendUrl: http://pyroscope:4040
+ # uncomment this if sending data to Grafana Cloud
+ # basicAuthUser: '123456'
+ secureJsonData:
+ # uncomment this if sending data to Grafana Cloud
+ # basicAuthPassword: PASSWORD
diff --git a/examples/grafana-agent-auto-instrumentation/ebpf/docker/docker-compose.yml b/examples/grafana-agent-auto-instrumentation/ebpf/docker/docker-compose.yml
index 9b11911359..3058f02640 100644
--- a/examples/grafana-agent-auto-instrumentation/ebpf/docker/docker-compose.yml
+++ b/examples/grafana-agent-auto-instrumentation/ebpf/docker/docker-compose.yml
@@ -1,25 +1,34 @@
----
version: '3.9'
services:
pyroscope:
image: grafana/pyroscope
ports:
- - '4040:4040'
-
+ - 4040:4040
grafana-agent:
- image: 'grafana/agent:main'
+ image: grafana/agent:main
user: root
privileged: true
- pid: 'host'
+ pid: host
environment:
- - AGENT_MODE=flow
+ - AGENT_MODE=flow
volumes:
- - '/var/run/docker.sock:/var/run/docker.sock'
- - ./config.river:/config.river
+ - /var/run/docker.sock:/var/run/docker.sock
+ - ./config.river:/config.river
ports:
- - '12345:12345'
+ - 12345:12345
command:
- - 'run'
- - '/config.river'
- - '--storage.path=/tmp/agent'
- - '--server.http.listen-addr=0.0.0.0:12345'
+ - run
+ - /config.river
+ - --storage.path=/tmp/agent
+ - --server.http.listen-addr=0.0.0.0:12345
+ grafana:
+ image: grafana/grafana:latest
+ environment:
+ - GF_INSTALL_PLUGINS=grafana-pyroscope-app
+ - GF_AUTH_ANONYMOUS_ENABLED=true
+ - GF_AUTH_ANONYMOUS_ORG_ROLE=Admin
+ - GF_AUTH_DISABLE_LOGIN_FORM=true
+ volumes:
+ - ./grafana-provisioning:/etc/grafana/provisioning
+ ports:
+ - 3000:3000
diff --git a/examples/grafana-agent-auto-instrumentation/ebpf/docker/grafana-provisioning/datasources/pyroscope.yml b/examples/grafana-agent-auto-instrumentation/ebpf/docker/grafana-provisioning/datasources/pyroscope.yml
new file mode 100644
index 0000000000..6f04d797c8
--- /dev/null
+++ b/examples/grafana-agent-auto-instrumentation/ebpf/docker/grafana-provisioning/datasources/pyroscope.yml
@@ -0,0 +1,14 @@
+---
+apiVersion: 1
+datasources:
+ - uid: local-pyroscope
+ type: grafana-pyroscope-datasource
+ name: Pyroscope
+ url: http://pyroscope:4040
+ jsonData:
+ keepCookies: [GitSession]
+ # Uncomment these if using with Grafana Cloud
+ # basicAuth: true
+ # basicAuthUser: '123456'
+ # secureJsonData:
+ # basicAuthPassword: PASSWORD
diff --git a/examples/grafana-agent-auto-instrumentation/ebpf/docker/grafana-provisioning/plugins/explore-profiles.yml b/examples/grafana-agent-auto-instrumentation/ebpf/docker/grafana-provisioning/plugins/explore-profiles.yml
new file mode 100644
index 0000000000..0d1302202e
--- /dev/null
+++ b/examples/grafana-agent-auto-instrumentation/ebpf/docker/grafana-provisioning/plugins/explore-profiles.yml
@@ -0,0 +1,11 @@
+---
+apiVersion: 1
+apps:
+ - type: grafana-pyroscope-app
+ jsonData:
+ backendUrl: http://pyroscope:4040
+ # uncomment this if sending data to Grafana Cloud
+ # basicAuthUser: '123456'
+ secureJsonData:
+ # uncomment this if sending data to Grafana Cloud
+ # basicAuthPassword: PASSWORD
diff --git a/examples/grafana-agent-auto-instrumentation/ebpf/local/docker-compose.yml b/examples/grafana-agent-auto-instrumentation/ebpf/local/docker-compose.yml
index 23b1a05e81..b0793bf67b 100644
--- a/examples/grafana-agent-auto-instrumentation/ebpf/local/docker-compose.yml
+++ b/examples/grafana-agent-auto-instrumentation/ebpf/local/docker-compose.yml
@@ -1,24 +1,33 @@
----
version: '3.9'
services:
pyroscope:
image: grafana/pyroscope
ports:
- - '4040:4040'
-
+ - 4040:4040
app:
- image: 'grafana/agent:main'
+ image: grafana/agent:main
user: root
privileged: true
- pid: 'host'
+ pid: host
environment:
- - AGENT_MODE=flow
+ - AGENT_MODE=flow
volumes:
- - ./config.river:/config.river
+ - ./config.river:/config.river
ports:
- - '12345:12345'
+ - 12345:12345
command:
- - 'run'
- - '/config.river'
- - '--storage.path=/tmp/agent'
- - '--server.http.listen-addr=0.0.0.0:12345'
+ - run
+ - /config.river
+ - --storage.path=/tmp/agent
+ - --server.http.listen-addr=0.0.0.0:12345
+ grafana:
+ image: grafana/grafana:latest
+ environment:
+ - GF_INSTALL_PLUGINS=grafana-pyroscope-app
+ - GF_AUTH_ANONYMOUS_ENABLED=true
+ - GF_AUTH_ANONYMOUS_ORG_ROLE=Admin
+ - GF_AUTH_DISABLE_LOGIN_FORM=true
+ volumes:
+ - ./grafana-provisioning:/etc/grafana/provisioning
+ ports:
+ - 3000:3000
diff --git a/examples/grafana-agent-auto-instrumentation/ebpf/local/grafana-provisioning/datasources/pyroscope.yml b/examples/grafana-agent-auto-instrumentation/ebpf/local/grafana-provisioning/datasources/pyroscope.yml
new file mode 100644
index 0000000000..6f04d797c8
--- /dev/null
+++ b/examples/grafana-agent-auto-instrumentation/ebpf/local/grafana-provisioning/datasources/pyroscope.yml
@@ -0,0 +1,14 @@
+---
+apiVersion: 1
+datasources:
+ - uid: local-pyroscope
+ type: grafana-pyroscope-datasource
+ name: Pyroscope
+ url: http://pyroscope:4040
+ jsonData:
+ keepCookies: [GitSession]
+ # Uncomment these if using with Grafana Cloud
+ # basicAuth: true
+ # basicAuthUser: '123456'
+ # secureJsonData:
+ # basicAuthPassword: PASSWORD
diff --git a/examples/grafana-agent-auto-instrumentation/ebpf/local/grafana-provisioning/plugins/explore-profiles.yml b/examples/grafana-agent-auto-instrumentation/ebpf/local/grafana-provisioning/plugins/explore-profiles.yml
new file mode 100644
index 0000000000..0d1302202e
--- /dev/null
+++ b/examples/grafana-agent-auto-instrumentation/ebpf/local/grafana-provisioning/plugins/explore-profiles.yml
@@ -0,0 +1,11 @@
+---
+apiVersion: 1
+apps:
+ - type: grafana-pyroscope-app
+ jsonData:
+ backendUrl: http://pyroscope:4040
+ # uncomment this if sending data to Grafana Cloud
+ # basicAuthUser: '123456'
+ secureJsonData:
+ # uncomment this if sending data to Grafana Cloud
+ # basicAuthPassword: PASSWORD
diff --git a/examples/grafana-agent-auto-instrumentation/golang-pull/docker-compose.yml b/examples/grafana-agent-auto-instrumentation/golang-pull/docker-compose.yml
index 5b6933e4f8..2c71b7577b 100644
--- a/examples/grafana-agent-auto-instrumentation/golang-pull/docker-compose.yml
+++ b/examples/grafana-agent-auto-instrumentation/golang-pull/docker-compose.yml
@@ -6,6 +6,11 @@ services:
- ./grafana-provisioning:/etc/grafana/provisioning
- ./grafana/grafana.ini:/etc/grafana/grafana.ini
- ./grafana/home.json:/default-dashboard.json
+ environment:
+ - GF_INSTALL_PLUGINS=grafana-pyroscope-app
+ - GF_AUTH_ANONYMOUS_ENABLED=true
+ - GF_AUTH_ANONYMOUS_ORG_ROLE=Admin
+ - GF_AUTH_DISABLE_LOGIN_FORM=true
ports:
- 3000:3000
diff --git a/examples/grafana-agent-auto-instrumentation/golang-pull/grafana-provisioning/plugins/explore-profiles.yml b/examples/grafana-agent-auto-instrumentation/golang-pull/grafana-provisioning/plugins/explore-profiles.yml
new file mode 100644
index 0000000000..0d1302202e
--- /dev/null
+++ b/examples/grafana-agent-auto-instrumentation/golang-pull/grafana-provisioning/plugins/explore-profiles.yml
@@ -0,0 +1,11 @@
+---
+apiVersion: 1
+apps:
+ - type: grafana-pyroscope-app
+ jsonData:
+ backendUrl: http://pyroscope:4040
+ # uncomment this if sending data to Grafana Cloud
+ # basicAuthUser: '123456'
+ secureJsonData:
+ # uncomment this if sending data to Grafana Cloud
+ # basicAuthPassword: PASSWORD
diff --git a/examples/grafana-agent-auto-instrumentation/java/docker-compose.yml b/examples/grafana-agent-auto-instrumentation/java/docker-compose.yml
index 7430435253..a40b956e0a 100644
--- a/examples/grafana-agent-auto-instrumentation/java/docker-compose.yml
+++ b/examples/grafana-agent-auto-instrumentation/java/docker-compose.yml
@@ -5,22 +5,32 @@ services:
context: .
dockerfile: java.Dockerfile
pyroscope:
- image: 'grafana/pyroscope:latest'
+ image: grafana/pyroscope:latest
ports:
- - 4040:4040
-
+ - 4040:4040
agent:
image: grafana/agent:main
volumes:
- - ./config.river:/etc/agent-config/config.river
+ - ./config.river:/etc/agent-config/config.river
command:
- - run
- - /etc/agent-config/config.river
- - --server.http.listen-addr=0.0.0.0:12345
+ - run
+ - /etc/agent-config/config.river
+ - --server.http.listen-addr=0.0.0.0:12345
environment:
HOSTNAME: agent
AGENT_MODE: flow
ports:
- - "12345:12345"
+ - 12345:12345
privileged: true
pid: host
+ grafana:
+ image: grafana/grafana:latest
+ environment:
+ - GF_INSTALL_PLUGINS=grafana-pyroscope-app
+ - GF_AUTH_ANONYMOUS_ENABLED=true
+ - GF_AUTH_ANONYMOUS_ORG_ROLE=Admin
+ - GF_AUTH_DISABLE_LOGIN_FORM=true
+ volumes:
+ - ./grafana-provisioning:/etc/grafana/provisioning
+ ports:
+ - 3000:3000
diff --git a/examples/grafana-agent-auto-instrumentation/java/grafana-provisioning/datasources/pyroscope.yml b/examples/grafana-agent-auto-instrumentation/java/grafana-provisioning/datasources/pyroscope.yml
new file mode 100644
index 0000000000..6f04d797c8
--- /dev/null
+++ b/examples/grafana-agent-auto-instrumentation/java/grafana-provisioning/datasources/pyroscope.yml
@@ -0,0 +1,14 @@
+---
+apiVersion: 1
+datasources:
+ - uid: local-pyroscope
+ type: grafana-pyroscope-datasource
+ name: Pyroscope
+ url: http://pyroscope:4040
+ jsonData:
+ keepCookies: [GitSession]
+ # Uncomment these if using with Grafana Cloud
+ # basicAuth: true
+ # basicAuthUser: '123456'
+ # secureJsonData:
+ # basicAuthPassword: PASSWORD
diff --git a/examples/grafana-agent-auto-instrumentation/java/grafana-provisioning/plugins/explore-profiles.yml b/examples/grafana-agent-auto-instrumentation/java/grafana-provisioning/plugins/explore-profiles.yml
new file mode 100644
index 0000000000..0d1302202e
--- /dev/null
+++ b/examples/grafana-agent-auto-instrumentation/java/grafana-provisioning/plugins/explore-profiles.yml
@@ -0,0 +1,11 @@
+---
+apiVersion: 1
+apps:
+ - type: grafana-pyroscope-app
+ jsonData:
+ backendUrl: http://pyroscope:4040
+ # uncomment this if sending data to Grafana Cloud
+ # basicAuthUser: '123456'
+ secureJsonData:
+ # uncomment this if sending data to Grafana Cloud
+ # basicAuthPassword: PASSWORD
diff --git a/examples/language-sdk-instrumentation/dotnet/fast-slow/docker-compose.yml b/examples/language-sdk-instrumentation/dotnet/fast-slow/docker-compose.yml
index af79b0f000..d43a233214 100644
--- a/examples/language-sdk-instrumentation/dotnet/fast-slow/docker-compose.yml
+++ b/examples/language-sdk-instrumentation/dotnet/fast-slow/docker-compose.yml
@@ -1,19 +1,27 @@
----
version: '3.9'
services:
pyroscope:
image: grafana/pyroscope
ports:
- - '4040:4040'
-
+ - 4040:4040
app-glibc:
platform: linux/amd64
build:
context: .
dockerfile: Dockerfile
-
app-musl:
platform: linux/amd64
build:
context: .
dockerfile: musl.Dockerfile
+ grafana:
+ image: grafana/grafana:latest
+ environment:
+ - GF_INSTALL_PLUGINS=grafana-pyroscope-app
+ - GF_AUTH_ANONYMOUS_ENABLED=true
+ - GF_AUTH_ANONYMOUS_ORG_ROLE=Admin
+ - GF_AUTH_DISABLE_LOGIN_FORM=true
+ volumes:
+ - ./grafana-provisioning:/etc/grafana/provisioning
+ ports:
+ - 3000:3000
diff --git a/examples/language-sdk-instrumentation/dotnet/fast-slow/grafana-provisioning/datasources/pyroscope.yml b/examples/language-sdk-instrumentation/dotnet/fast-slow/grafana-provisioning/datasources/pyroscope.yml
new file mode 100644
index 0000000000..6f04d797c8
--- /dev/null
+++ b/examples/language-sdk-instrumentation/dotnet/fast-slow/grafana-provisioning/datasources/pyroscope.yml
@@ -0,0 +1,14 @@
+---
+apiVersion: 1
+datasources:
+ - uid: local-pyroscope
+ type: grafana-pyroscope-datasource
+ name: Pyroscope
+ url: http://pyroscope:4040
+ jsonData:
+ keepCookies: [GitSession]
+ # Uncomment these if using with Grafana Cloud
+ # basicAuth: true
+ # basicAuthUser: '123456'
+ # secureJsonData:
+ # basicAuthPassword: PASSWORD
diff --git a/examples/language-sdk-instrumentation/dotnet/fast-slow/grafana-provisioning/plugins/explore-profiles.yml b/examples/language-sdk-instrumentation/dotnet/fast-slow/grafana-provisioning/plugins/explore-profiles.yml
new file mode 100644
index 0000000000..0d1302202e
--- /dev/null
+++ b/examples/language-sdk-instrumentation/dotnet/fast-slow/grafana-provisioning/plugins/explore-profiles.yml
@@ -0,0 +1,11 @@
+---
+apiVersion: 1
+apps:
+ - type: grafana-pyroscope-app
+ jsonData:
+ backendUrl: http://pyroscope:4040
+ # uncomment this if sending data to Grafana Cloud
+ # basicAuthUser: '123456'
+ secureJsonData:
+ # uncomment this if sending data to Grafana Cloud
+ # basicAuthPassword: PASSWORD
diff --git a/examples/language-sdk-instrumentation/dotnet/rideshare/docker-compose.yml b/examples/language-sdk-instrumentation/dotnet/rideshare/docker-compose.yml
index 7c64baad09..2a08555941 100644
--- a/examples/language-sdk-instrumentation/dotnet/rideshare/docker-compose.yml
+++ b/examples/language-sdk-instrumentation/dotnet/rideshare/docker-compose.yml
@@ -1,67 +1,72 @@
-version: "3"
+version: '3'
services:
pyroscope:
image: grafana/pyroscope:latest
ports:
- - '4040:4040'
-
+ - 4040:4040
us-east:
platform: linux/amd64
ports:
- - 5000
+ - 5000
environment:
- - REGION=us-east
- - PYROSCOPE_LABELS=region:us-east
- - PYROSCOPE_SERVER_ADDRESS=http://pyroscope:4040
- - RIDESHARE_LISTEN_PORT=5000
+ - REGION=us-east
+ - PYROSCOPE_LABELS=region:us-east
+ - PYROSCOPE_SERVER_ADDRESS=http://pyroscope:4040
+ - RIDESHARE_LISTEN_PORT=5000
build:
context: .
-
eu-north:
platform: linux/amd64
ports:
- - 5000
+ - 5000
environment:
- - REGION=eu-north
- - PYROSCOPE_LABELS=region:eu-north
- - PYROSCOPE_SERVER_ADDRESS=http://pyroscope:4040
- - RIDESHARE_LISTEN_PORT=5000
-
+ - REGION=eu-north
+ - PYROSCOPE_LABELS=region:eu-north
+ - PYROSCOPE_SERVER_ADDRESS=http://pyroscope:4040
+ - RIDESHARE_LISTEN_PORT=5000
build:
context: .
-
ap-south:
platform: linux/amd64
ports:
- - 5000
+ - 5000
environment:
- - REGION=ap-south
- - PYROSCOPE_LABELS=region:ap-south
- - PYROSCOPE_SERVER_ADDRESS=http://pyroscope:4040
- - RIDESHARE_LISTEN_PORT=5000
+ - REGION=ap-south
+ - PYROSCOPE_LABELS=region:ap-south
+ - PYROSCOPE_SERVER_ADDRESS=http://pyroscope:4040
+ - RIDESHARE_LISTEN_PORT=5000
build:
context: .
-
ap-south-alpine:
platform: linux/amd64
ports:
- - 5000
+ - 5000
environment:
- - REGION=ap-south
- - PYROSCOPE_LABELS=region:ap-south-alpine
- - PYROSCOPE_SERVER_ADDRESS=http://pyroscope:4040
- - RIDESHARE_LISTEN_PORT=5000
+ - REGION=ap-south
+ - PYROSCOPE_LABELS=region:ap-south-alpine
+ - PYROSCOPE_SERVER_ADDRESS=http://pyroscope:4040
+ - RIDESHARE_LISTEN_PORT=5000
build:
context: .
dockerfile: musl.Dockerfile
-
load-generator:
build:
context: .
dockerfile: Dockerfile.load-generator
depends_on:
- - pyroscope
- - us-east
- - eu-north
- - ap-south
- - ap-south-alpine
+ - pyroscope
+ - us-east
+ - eu-north
+ - ap-south
+ - ap-south-alpine
+ grafana:
+ image: grafana/grafana:latest
+ environment:
+ - GF_INSTALL_PLUGINS=grafana-pyroscope-app
+ - GF_AUTH_ANONYMOUS_ENABLED=true
+ - GF_AUTH_ANONYMOUS_ORG_ROLE=Admin
+ - GF_AUTH_DISABLE_LOGIN_FORM=true
+ volumes:
+ - ./grafana-provisioning:/etc/grafana/provisioning
+ ports:
+ - 3000:3000
diff --git a/examples/language-sdk-instrumentation/dotnet/rideshare/grafana-provisioning/datasources/pyroscope.yml b/examples/language-sdk-instrumentation/dotnet/rideshare/grafana-provisioning/datasources/pyroscope.yml
new file mode 100644
index 0000000000..6f04d797c8
--- /dev/null
+++ b/examples/language-sdk-instrumentation/dotnet/rideshare/grafana-provisioning/datasources/pyroscope.yml
@@ -0,0 +1,14 @@
+---
+apiVersion: 1
+datasources:
+ - uid: local-pyroscope
+ type: grafana-pyroscope-datasource
+ name: Pyroscope
+ url: http://pyroscope:4040
+ jsonData:
+ keepCookies: [GitSession]
+ # Uncomment these if using with Grafana Cloud
+ # basicAuth: true
+ # basicAuthUser: '123456'
+ # secureJsonData:
+ # basicAuthPassword: PASSWORD
diff --git a/examples/language-sdk-instrumentation/dotnet/rideshare/grafana-provisioning/plugins/explore-profiles.yml b/examples/language-sdk-instrumentation/dotnet/rideshare/grafana-provisioning/plugins/explore-profiles.yml
new file mode 100644
index 0000000000..0d1302202e
--- /dev/null
+++ b/examples/language-sdk-instrumentation/dotnet/rideshare/grafana-provisioning/plugins/explore-profiles.yml
@@ -0,0 +1,11 @@
+---
+apiVersion: 1
+apps:
+ - type: grafana-pyroscope-app
+ jsonData:
+ backendUrl: http://pyroscope:4040
+ # uncomment this if sending data to Grafana Cloud
+ # basicAuthUser: '123456'
+ secureJsonData:
+ # uncomment this if sending data to Grafana Cloud
+ # basicAuthPassword: PASSWORD
diff --git a/examples/language-sdk-instrumentation/dotnet/web-new/docker-compose.yml b/examples/language-sdk-instrumentation/dotnet/web-new/docker-compose.yml
index fc4d331fe5..12af60e3b5 100644
--- a/examples/language-sdk-instrumentation/dotnet/web-new/docker-compose.yml
+++ b/examples/language-sdk-instrumentation/dotnet/web-new/docker-compose.yml
@@ -1,15 +1,24 @@
----
version: '3.9'
services:
pyroscope:
image: grafana/pyroscope
ports:
- - '4040:4040'
-
+ - 4040:4040
app:
platform: linux/amd64
environment:
ASPNETCORE_URLS: http://*:5000
ports:
- - '5000:5000'
+ - 5000:5000
build: ''
+ grafana:
+ image: grafana/grafana:latest
+ environment:
+ - GF_INSTALL_PLUGINS=grafana-pyroscope-app
+ - GF_AUTH_ANONYMOUS_ENABLED=true
+ - GF_AUTH_ANONYMOUS_ORG_ROLE=Admin
+ - GF_AUTH_DISABLE_LOGIN_FORM=true
+ volumes:
+ - ./grafana-provisioning:/etc/grafana/provisioning
+ ports:
+ - 3000:3000
diff --git a/examples/language-sdk-instrumentation/dotnet/web-new/grafana-provisioning/datasources/pyroscope.yml b/examples/language-sdk-instrumentation/dotnet/web-new/grafana-provisioning/datasources/pyroscope.yml
new file mode 100644
index 0000000000..6f04d797c8
--- /dev/null
+++ b/examples/language-sdk-instrumentation/dotnet/web-new/grafana-provisioning/datasources/pyroscope.yml
@@ -0,0 +1,14 @@
+---
+apiVersion: 1
+datasources:
+ - uid: local-pyroscope
+ type: grafana-pyroscope-datasource
+ name: Pyroscope
+ url: http://pyroscope:4040
+ jsonData:
+ keepCookies: [GitSession]
+ # Uncomment these if using with Grafana Cloud
+ # basicAuth: true
+ # basicAuthUser: '123456'
+ # secureJsonData:
+ # basicAuthPassword: PASSWORD
diff --git a/examples/language-sdk-instrumentation/dotnet/web-new/grafana-provisioning/plugins/explore-profiles.yml b/examples/language-sdk-instrumentation/dotnet/web-new/grafana-provisioning/plugins/explore-profiles.yml
new file mode 100644
index 0000000000..0d1302202e
--- /dev/null
+++ b/examples/language-sdk-instrumentation/dotnet/web-new/grafana-provisioning/plugins/explore-profiles.yml
@@ -0,0 +1,11 @@
+---
+apiVersion: 1
+apps:
+ - type: grafana-pyroscope-app
+ jsonData:
+ backendUrl: http://pyroscope:4040
+ # uncomment this if sending data to Grafana Cloud
+ # basicAuthUser: '123456'
+ secureJsonData:
+ # uncomment this if sending data to Grafana Cloud
+ # basicAuthPassword: PASSWORD
diff --git a/examples/language-sdk-instrumentation/golang-push/rideshare/Dockerfile b/examples/language-sdk-instrumentation/golang-push/rideshare/Dockerfile
index 0a2c268f65..76557d6168 100644
--- a/examples/language-sdk-instrumentation/golang-push/rideshare/Dockerfile
+++ b/examples/language-sdk-instrumentation/golang-push/rideshare/Dockerfile
@@ -1,4 +1,4 @@
-FROM golang:1.21.11
+FROM golang:1.21.12
WORKDIR /go/src/app
COPY . .
diff --git a/examples/language-sdk-instrumentation/golang-push/rideshare/Dockerfile.load-generator b/examples/language-sdk-instrumentation/golang-push/rideshare/Dockerfile.load-generator
index dcefa4b969..73c2397d08 100644
--- a/examples/language-sdk-instrumentation/golang-push/rideshare/Dockerfile.load-generator
+++ b/examples/language-sdk-instrumentation/golang-push/rideshare/Dockerfile.load-generator
@@ -1,4 +1,4 @@
-FROM golang:1.21.11
+FROM golang:1.21.12
WORKDIR /go/src/app
COPY . .
diff --git a/examples/language-sdk-instrumentation/golang-push/rideshare/docker-compose.yml b/examples/language-sdk-instrumentation/golang-push/rideshare/docker-compose.yml
index 43d3249c12..dca6e83f77 100644
--- a/examples/language-sdk-instrumentation/golang-push/rideshare/docker-compose.yml
+++ b/examples/language-sdk-instrumentation/golang-push/rideshare/docker-compose.yml
@@ -1,43 +1,49 @@
-version: "3"
+version: '3'
services:
us-east:
ports:
- - 5000
+ - 5000
environment:
- - REGION=us-east
- - PYROSCOPE_SERVER_ADDRESS=http://pyroscope:4040
- - PARAMETERS_POOL_SIZE=1000
- - PARAMETERS_POOL_BUFFER_SIZE_KB=1000
+ - REGION=us-east
+ - PYROSCOPE_SERVER_ADDRESS=http://pyroscope:4040
+ - PARAMETERS_POOL_SIZE=1000
+ - PARAMETERS_POOL_BUFFER_SIZE_KB=1000
build:
context: .
-
eu-north:
ports:
- - 5000
+ - 5000
environment:
- - REGION=eu-north
- - PYROSCOPE_SERVER_ADDRESS=http://pyroscope:4040
-
+ - REGION=eu-north
+ - PYROSCOPE_SERVER_ADDRESS=http://pyroscope:4040
build:
context: .
-
ap-south:
ports:
- - 5000
+ - 5000
environment:
- - REGION=ap-south
- - PYROSCOPE_SERVER_ADDRESS=http://pyroscope:4040
+ - REGION=ap-south
+ - PYROSCOPE_SERVER_ADDRESS=http://pyroscope:4040
build:
context: .
-
pyroscope:
image: grafana/pyroscope:latest
ports:
- - '4040:4040'
-
+ - 4040:4040
load-generator:
build:
context: .
dockerfile: Dockerfile.load-generator
environment:
- - PYROSCOPE_SERVER_ADDRESS=http://pyroscope:4040
+ - PYROSCOPE_SERVER_ADDRESS=http://pyroscope:4040
+ grafana:
+ image: grafana/grafana:latest
+ environment:
+ - GF_INSTALL_PLUGINS=grafana-pyroscope-app
+ - GF_AUTH_ANONYMOUS_ENABLED=true
+ - GF_AUTH_ANONYMOUS_ORG_ROLE=Admin
+ - GF_AUTH_DISABLE_LOGIN_FORM=true
+ volumes:
+ - ./grafana-provisioning:/etc/grafana/provisioning
+ ports:
+ - 3000:3000
diff --git a/examples/language-sdk-instrumentation/golang-push/rideshare/grafana-provisioning/datasources/pyroscope.yml b/examples/language-sdk-instrumentation/golang-push/rideshare/grafana-provisioning/datasources/pyroscope.yml
new file mode 100644
index 0000000000..6f04d797c8
--- /dev/null
+++ b/examples/language-sdk-instrumentation/golang-push/rideshare/grafana-provisioning/datasources/pyroscope.yml
@@ -0,0 +1,14 @@
+---
+apiVersion: 1
+datasources:
+ - uid: local-pyroscope
+ type: grafana-pyroscope-datasource
+ name: Pyroscope
+ url: http://pyroscope:4040
+ jsonData:
+ keepCookies: [GitSession]
+ # Uncomment these if using with Grafana Cloud
+ # basicAuth: true
+ # basicAuthUser: '123456'
+ # secureJsonData:
+ # basicAuthPassword: PASSWORD
diff --git a/examples/language-sdk-instrumentation/golang-push/rideshare/grafana-provisioning/plugins/explore-profiles.yml b/examples/language-sdk-instrumentation/golang-push/rideshare/grafana-provisioning/plugins/explore-profiles.yml
new file mode 100644
index 0000000000..0d1302202e
--- /dev/null
+++ b/examples/language-sdk-instrumentation/golang-push/rideshare/grafana-provisioning/plugins/explore-profiles.yml
@@ -0,0 +1,11 @@
+---
+apiVersion: 1
+apps:
+ - type: grafana-pyroscope-app
+ jsonData:
+ backendUrl: http://pyroscope:4040
+ # uncomment this if sending data to Grafana Cloud
+ # basicAuthUser: '123456'
+ secureJsonData:
+ # uncomment this if sending data to Grafana Cloud
+ # basicAuthPassword: PASSWORD
diff --git a/examples/language-sdk-instrumentation/golang-push/simple/Dockerfile b/examples/language-sdk-instrumentation/golang-push/simple/Dockerfile
index 066da7fcbd..7073c0e0eb 100644
--- a/examples/language-sdk-instrumentation/golang-push/simple/Dockerfile
+++ b/examples/language-sdk-instrumentation/golang-push/simple/Dockerfile
@@ -1,4 +1,4 @@
-FROM golang:1.21.11
+FROM golang:1.21.12
WORKDIR /go/src/app
diff --git a/examples/language-sdk-instrumentation/golang-push/simple/docker-compose.yml b/examples/language-sdk-instrumentation/golang-push/simple/docker-compose.yml
index 6344e3564d..e70e416d32 100644
--- a/examples/language-sdk-instrumentation/golang-push/simple/docker-compose.yml
+++ b/examples/language-sdk-instrumentation/golang-push/simple/docker-compose.yml
@@ -1,12 +1,21 @@
----
version: '3.9'
services:
pyroscope:
- image: 'grafana/pyroscope:latest'
+ image: grafana/pyroscope:latest
ports:
- - '4040:4040'
-
+ - 4040:4040
app:
build: .
environment:
- - PYROSCOPE_SERVER_ADDRESS=http://pyroscope:4040
+ - PYROSCOPE_SERVER_ADDRESS=http://pyroscope:4040
+ grafana:
+ image: grafana/grafana:latest
+ environment:
+ - GF_INSTALL_PLUGINS=grafana-pyroscope-app
+ - GF_AUTH_ANONYMOUS_ENABLED=true
+ - GF_AUTH_ANONYMOUS_ORG_ROLE=Admin
+ - GF_AUTH_DISABLE_LOGIN_FORM=true
+ volumes:
+ - ./grafana-provisioning:/etc/grafana/provisioning
+ ports:
+ - 3000:3000
diff --git a/examples/language-sdk-instrumentation/golang-push/simple/grafana-provisioning/datasources/pyroscope.yml b/examples/language-sdk-instrumentation/golang-push/simple/grafana-provisioning/datasources/pyroscope.yml
new file mode 100644
index 0000000000..6f04d797c8
--- /dev/null
+++ b/examples/language-sdk-instrumentation/golang-push/simple/grafana-provisioning/datasources/pyroscope.yml
@@ -0,0 +1,14 @@
+---
+apiVersion: 1
+datasources:
+ - uid: local-pyroscope
+ type: grafana-pyroscope-datasource
+ name: Pyroscope
+ url: http://pyroscope:4040
+ jsonData:
+ keepCookies: [GitSession]
+ # Uncomment these if using with Grafana Cloud
+ # basicAuth: true
+ # basicAuthUser: '123456'
+ # secureJsonData:
+ # basicAuthPassword: PASSWORD
diff --git a/examples/language-sdk-instrumentation/golang-push/simple/grafana-provisioning/plugins/explore-profiles.yml b/examples/language-sdk-instrumentation/golang-push/simple/grafana-provisioning/plugins/explore-profiles.yml
new file mode 100644
index 0000000000..0d1302202e
--- /dev/null
+++ b/examples/language-sdk-instrumentation/golang-push/simple/grafana-provisioning/plugins/explore-profiles.yml
@@ -0,0 +1,11 @@
+---
+apiVersion: 1
+apps:
+ - type: grafana-pyroscope-app
+ jsonData:
+ backendUrl: http://pyroscope:4040
+ # uncomment this if sending data to Grafana Cloud
+ # basicAuthUser: '123456'
+ secureJsonData:
+ # uncomment this if sending data to Grafana Cloud
+ # basicAuthPassword: PASSWORD
diff --git a/examples/language-sdk-instrumentation/java/fib/docker-compose.yml b/examples/language-sdk-instrumentation/java/fib/docker-compose.yml
index 9f01fab1a2..ab96c746b0 100644
--- a/examples/language-sdk-instrumentation/java/fib/docker-compose.yml
+++ b/examples/language-sdk-instrumentation/java/fib/docker-compose.yml
@@ -1,15 +1,24 @@
----
version: '3.9'
services:
pyroscope:
image: grafana/pyroscope
ports:
- - '4040:4040'
-
+ - 4040:4040
app:
build: .
privileged: true
environment:
- - 'PYROSCOPE_APPLICATION_NAME=fibonacci-java-lock-push'
- - 'PYROSCOPE_SERVER_ADDRESS=http://pyroscope:4040'
- - 'PYROSCOPE_FORMAT=jfr'
+ - PYROSCOPE_APPLICATION_NAME=fibonacci-java-lock-push
+ - PYROSCOPE_SERVER_ADDRESS=http://pyroscope:4040
+ - PYROSCOPE_FORMAT=jfr
+ grafana:
+ image: grafana/grafana:latest
+ environment:
+ - GF_INSTALL_PLUGINS=grafana-pyroscope-app
+ - GF_AUTH_ANONYMOUS_ENABLED=true
+ - GF_AUTH_ANONYMOUS_ORG_ROLE=Admin
+ - GF_AUTH_DISABLE_LOGIN_FORM=true
+ volumes:
+ - ./grafana-provisioning:/etc/grafana/provisioning
+ ports:
+ - 3000:3000
diff --git a/examples/language-sdk-instrumentation/java/fib/grafana-provisioning/datasources/pyroscope.yml b/examples/language-sdk-instrumentation/java/fib/grafana-provisioning/datasources/pyroscope.yml
new file mode 100644
index 0000000000..6f04d797c8
--- /dev/null
+++ b/examples/language-sdk-instrumentation/java/fib/grafana-provisioning/datasources/pyroscope.yml
@@ -0,0 +1,14 @@
+---
+apiVersion: 1
+datasources:
+ - uid: local-pyroscope
+ type: grafana-pyroscope-datasource
+ name: Pyroscope
+ url: http://pyroscope:4040
+ jsonData:
+ keepCookies: [GitSession]
+ # Uncomment these if using with Grafana Cloud
+ # basicAuth: true
+ # basicAuthUser: '123456'
+ # secureJsonData:
+ # basicAuthPassword: PASSWORD
diff --git a/examples/language-sdk-instrumentation/java/fib/grafana-provisioning/plugins/explore-profiles.yml b/examples/language-sdk-instrumentation/java/fib/grafana-provisioning/plugins/explore-profiles.yml
new file mode 100644
index 0000000000..0d1302202e
--- /dev/null
+++ b/examples/language-sdk-instrumentation/java/fib/grafana-provisioning/plugins/explore-profiles.yml
@@ -0,0 +1,11 @@
+---
+apiVersion: 1
+apps:
+ - type: grafana-pyroscope-app
+ jsonData:
+ backendUrl: http://pyroscope:4040
+ # uncomment this if sending data to Grafana Cloud
+ # basicAuthUser: '123456'
+ secureJsonData:
+ # uncomment this if sending data to Grafana Cloud
+ # basicAuthPassword: PASSWORD
diff --git a/examples/language-sdk-instrumentation/java/rideshare/docker-compose.yml b/examples/language-sdk-instrumentation/java/rideshare/docker-compose.yml
index 9e77934032..b0252095cb 100644
--- a/examples/language-sdk-instrumentation/java/rideshare/docker-compose.yml
+++ b/examples/language-sdk-instrumentation/java/rideshare/docker-compose.yml
@@ -3,36 +3,43 @@ services:
pyroscope:
image: grafana/pyroscope
ports:
- - '4040:4040'
-
+ - 4040:4040
us-east:
ports:
- - 5000
+ - 5000
environment:
- - REGION=us-east
- - PYROSCOPE_SERVER_ADDRESS=http://pyroscope:4040
+ - REGION=us-east
+ - PYROSCOPE_SERVER_ADDRESS=http://pyroscope:4040
build:
context: .
-
eu-north:
ports:
- - 5000
+ - 5000
environment:
- - REGION=eu-north
- - PYROSCOPE_SERVER_ADDRESS=http://pyroscope:4040
+ - REGION=eu-north
+ - PYROSCOPE_SERVER_ADDRESS=http://pyroscope:4040
build:
context: .
-
ap-south:
ports:
- - 5000
+ - 5000
environment:
- - REGION=ap-south
- - PYROSCOPE_SERVER_ADDRESS=http://pyroscope:4040
+ - REGION=ap-south
+ - PYROSCOPE_SERVER_ADDRESS=http://pyroscope:4040
build:
context: .
-
load-generator:
build:
context: .
dockerfile: Dockerfile.load-generator
+ grafana:
+ image: grafana/grafana:latest
+ environment:
+ - GF_INSTALL_PLUGINS=grafana-pyroscope-app
+ - GF_AUTH_ANONYMOUS_ENABLED=true
+ - GF_AUTH_ANONYMOUS_ORG_ROLE=Admin
+ - GF_AUTH_DISABLE_LOGIN_FORM=true
+ volumes:
+ - ./grafana-provisioning:/etc/grafana/provisioning
+ ports:
+ - 3000:3000
diff --git a/examples/language-sdk-instrumentation/java/rideshare/grafana-provisioning/datasources/pyroscope.yml b/examples/language-sdk-instrumentation/java/rideshare/grafana-provisioning/datasources/pyroscope.yml
new file mode 100644
index 0000000000..6f04d797c8
--- /dev/null
+++ b/examples/language-sdk-instrumentation/java/rideshare/grafana-provisioning/datasources/pyroscope.yml
@@ -0,0 +1,14 @@
+---
+apiVersion: 1
+datasources:
+ - uid: local-pyroscope
+ type: grafana-pyroscope-datasource
+ name: Pyroscope
+ url: http://pyroscope:4040
+ jsonData:
+ keepCookies: [GitSession]
+ # Uncomment these if using with Grafana Cloud
+ # basicAuth: true
+ # basicAuthUser: '123456'
+ # secureJsonData:
+ # basicAuthPassword: PASSWORD
diff --git a/examples/language-sdk-instrumentation/java/rideshare/grafana-provisioning/plugins/explore-profiles.yml b/examples/language-sdk-instrumentation/java/rideshare/grafana-provisioning/plugins/explore-profiles.yml
new file mode 100644
index 0000000000..0d1302202e
--- /dev/null
+++ b/examples/language-sdk-instrumentation/java/rideshare/grafana-provisioning/plugins/explore-profiles.yml
@@ -0,0 +1,11 @@
+---
+apiVersion: 1
+apps:
+ - type: grafana-pyroscope-app
+ jsonData:
+ backendUrl: http://pyroscope:4040
+ # uncomment this if sending data to Grafana Cloud
+ # basicAuthUser: '123456'
+ secureJsonData:
+ # uncomment this if sending data to Grafana Cloud
+ # basicAuthPassword: PASSWORD
diff --git a/examples/language-sdk-instrumentation/java/simple/docker-compose.yml b/examples/language-sdk-instrumentation/java/simple/docker-compose.yml
index fbf457ba64..e9ad9b50f4 100644
--- a/examples/language-sdk-instrumentation/java/simple/docker-compose.yml
+++ b/examples/language-sdk-instrumentation/java/simple/docker-compose.yml
@@ -1,11 +1,20 @@
----
version: '3.9'
services:
pyroscope:
image: grafana/pyroscope
ports:
- - '4040:4040'
-
+ - 4040:4040
app:
build: .
privileged: true
+ grafana:
+ image: grafana/grafana:latest
+ environment:
+ - GF_INSTALL_PLUGINS=grafana-pyroscope-app
+ - GF_AUTH_ANONYMOUS_ENABLED=true
+ - GF_AUTH_ANONYMOUS_ORG_ROLE=Admin
+ - GF_AUTH_DISABLE_LOGIN_FORM=true
+ volumes:
+ - ./grafana-provisioning:/etc/grafana/provisioning
+ ports:
+ - 3000:3000
diff --git a/examples/language-sdk-instrumentation/java/simple/grafana-provisioning/datasources/pyroscope.yml b/examples/language-sdk-instrumentation/java/simple/grafana-provisioning/datasources/pyroscope.yml
new file mode 100644
index 0000000000..6f04d797c8
--- /dev/null
+++ b/examples/language-sdk-instrumentation/java/simple/grafana-provisioning/datasources/pyroscope.yml
@@ -0,0 +1,14 @@
+---
+apiVersion: 1
+datasources:
+ - uid: local-pyroscope
+ type: grafana-pyroscope-datasource
+ name: Pyroscope
+ url: http://pyroscope:4040
+ jsonData:
+ keepCookies: [GitSession]
+ # Uncomment these if using with Grafana Cloud
+ # basicAuth: true
+ # basicAuthUser: '123456'
+ # secureJsonData:
+ # basicAuthPassword: PASSWORD
diff --git a/examples/language-sdk-instrumentation/java/simple/grafana-provisioning/plugins/explore-profiles.yml b/examples/language-sdk-instrumentation/java/simple/grafana-provisioning/plugins/explore-profiles.yml
new file mode 100644
index 0000000000..0d1302202e
--- /dev/null
+++ b/examples/language-sdk-instrumentation/java/simple/grafana-provisioning/plugins/explore-profiles.yml
@@ -0,0 +1,11 @@
+---
+apiVersion: 1
+apps:
+ - type: grafana-pyroscope-app
+ jsonData:
+ backendUrl: http://pyroscope:4040
+ # uncomment this if sending data to Grafana Cloud
+ # basicAuthUser: '123456'
+ secureJsonData:
+ # uncomment this if sending data to Grafana Cloud
+ # basicAuthPassword: PASSWORD
diff --git a/examples/language-sdk-instrumentation/nodejs/express-pull/docker-compose.yml b/examples/language-sdk-instrumentation/nodejs/express-pull/docker-compose.yml
index 7d854ccc35..4f648d2b50 100644
--- a/examples/language-sdk-instrumentation/nodejs/express-pull/docker-compose.yml
+++ b/examples/language-sdk-instrumentation/nodejs/express-pull/docker-compose.yml
@@ -3,45 +3,51 @@ services:
pyroscope:
image: grafana/pyroscope
ports:
- - '4040:4040'
-
+ - 4040:4040
agent:
image: grafana/agent:latest
volumes:
- - ./agent.config.river:/etc/agent-config/config.river:ro
+ - ./agent.config.river:/etc/agent-config/config.river:ro
command:
- - run
- - /etc/agent-config/config.river
- - --server.http.listen-addr=0.0.0.0:12345
+ - run
+ - /etc/agent-config/config.river
+ - --server.http.listen-addr=0.0.0.0:12345
environment:
HOSTNAME: agent
AGENT_MODE: flow
ports:
- - "12345:12345"
-
+ - 12345:12345
us-east:
environment:
- - REGION=us-east
+ - REGION=us-east
build:
context: .
-
eu-north:
environment:
- - REGION=eu-north
+ - REGION=eu-north
build:
context: .
-
ap-south:
environment:
- - REGION=ap-south
+ - REGION=ap-south
build:
context: .
-
load-generator:
build:
context: ../
dockerfile: Dockerfile.load-generator
depends_on:
- - us-east
- - eu-north
- - ap-south
+ - us-east
+ - eu-north
+ - ap-south
+ grafana:
+ image: grafana/grafana:latest
+ environment:
+ - GF_INSTALL_PLUGINS=grafana-pyroscope-app
+ - GF_AUTH_ANONYMOUS_ENABLED=true
+ - GF_AUTH_ANONYMOUS_ORG_ROLE=Admin
+ - GF_AUTH_DISABLE_LOGIN_FORM=true
+ volumes:
+ - ./grafana-provisioning:/etc/grafana/provisioning
+ ports:
+ - 3000:3000
diff --git a/examples/language-sdk-instrumentation/nodejs/express-pull/grafana-provisioning/datasources/pyroscope.yml b/examples/language-sdk-instrumentation/nodejs/express-pull/grafana-provisioning/datasources/pyroscope.yml
new file mode 100644
index 0000000000..6f04d797c8
--- /dev/null
+++ b/examples/language-sdk-instrumentation/nodejs/express-pull/grafana-provisioning/datasources/pyroscope.yml
@@ -0,0 +1,14 @@
+---
+apiVersion: 1
+datasources:
+ - uid: local-pyroscope
+ type: grafana-pyroscope-datasource
+ name: Pyroscope
+ url: http://pyroscope:4040
+ jsonData:
+ keepCookies: [GitSession]
+ # Uncomment these if using with Grafana Cloud
+ # basicAuth: true
+ # basicAuthUser: '123456'
+ # secureJsonData:
+ # basicAuthPassword: PASSWORD
diff --git a/examples/language-sdk-instrumentation/nodejs/express-pull/grafana-provisioning/plugins/explore-profiles.yml b/examples/language-sdk-instrumentation/nodejs/express-pull/grafana-provisioning/plugins/explore-profiles.yml
new file mode 100644
index 0000000000..0d1302202e
--- /dev/null
+++ b/examples/language-sdk-instrumentation/nodejs/express-pull/grafana-provisioning/plugins/explore-profiles.yml
@@ -0,0 +1,11 @@
+---
+apiVersion: 1
+apps:
+ - type: grafana-pyroscope-app
+ jsonData:
+ backendUrl: http://pyroscope:4040
+ # uncomment this if sending data to Grafana Cloud
+ # basicAuthUser: '123456'
+ secureJsonData:
+ # uncomment this if sending data to Grafana Cloud
+ # basicAuthPassword: PASSWORD
diff --git a/examples/language-sdk-instrumentation/nodejs/express-ts-inline/docker-compose.yml b/examples/language-sdk-instrumentation/nodejs/express-ts-inline/docker-compose.yml
index 6f23a94ebf..9edd5e73c4 100644
--- a/examples/language-sdk-instrumentation/nodejs/express-ts-inline/docker-compose.yml
+++ b/examples/language-sdk-instrumentation/nodejs/express-ts-inline/docker-compose.yml
@@ -1,40 +1,46 @@
----
version: '3.9'
services:
pyroscope:
- image: 'grafana/pyroscope:latest'
+ image: grafana/pyroscope:latest
ports:
- - '4040:4040'
-
+ - 4040:4040
us-east:
ports:
- - 5000
+ - 5000
environment:
- - REGION=us-east
+ - REGION=us-east
build:
context: .
-
eu-north:
ports:
- - 5000
+ - 5000
environment:
- - REGION=eu-north
+ - REGION=eu-north
build:
context: .
-
ap-south:
ports:
- - 5000
+ - 5000
environment:
- - REGION=ap-south
+ - REGION=ap-south
build:
context: .
-
load-generator:
build:
context: ../
dockerfile: Dockerfile.load-generator
depends_on:
- - us-east
- - eu-north
- - ap-south
+ - us-east
+ - eu-north
+ - ap-south
+ grafana:
+ image: grafana/grafana:latest
+ environment:
+ - GF_INSTALL_PLUGINS=grafana-pyroscope-app
+ - GF_AUTH_ANONYMOUS_ENABLED=true
+ - GF_AUTH_ANONYMOUS_ORG_ROLE=Admin
+ - GF_AUTH_DISABLE_LOGIN_FORM=true
+ volumes:
+ - ./grafana-provisioning:/etc/grafana/provisioning
+ ports:
+ - 3000:3000
diff --git a/examples/language-sdk-instrumentation/nodejs/express-ts-inline/grafana-provisioning/datasources/pyroscope.yml b/examples/language-sdk-instrumentation/nodejs/express-ts-inline/grafana-provisioning/datasources/pyroscope.yml
new file mode 100644
index 0000000000..6f04d797c8
--- /dev/null
+++ b/examples/language-sdk-instrumentation/nodejs/express-ts-inline/grafana-provisioning/datasources/pyroscope.yml
@@ -0,0 +1,14 @@
+---
+apiVersion: 1
+datasources:
+ - uid: local-pyroscope
+ type: grafana-pyroscope-datasource
+ name: Pyroscope
+ url: http://pyroscope:4040
+ jsonData:
+ keepCookies: [GitSession]
+ # Uncomment these if using with Grafana Cloud
+ # basicAuth: true
+ # basicAuthUser: '123456'
+ # secureJsonData:
+ # basicAuthPassword: PASSWORD
diff --git a/examples/language-sdk-instrumentation/nodejs/express-ts-inline/grafana-provisioning/plugins/explore-profiles.yml b/examples/language-sdk-instrumentation/nodejs/express-ts-inline/grafana-provisioning/plugins/explore-profiles.yml
new file mode 100644
index 0000000000..0d1302202e
--- /dev/null
+++ b/examples/language-sdk-instrumentation/nodejs/express-ts-inline/grafana-provisioning/plugins/explore-profiles.yml
@@ -0,0 +1,11 @@
+---
+apiVersion: 1
+apps:
+ - type: grafana-pyroscope-app
+ jsonData:
+ backendUrl: http://pyroscope:4040
+ # uncomment this if sending data to Grafana Cloud
+ # basicAuthUser: '123456'
+ secureJsonData:
+ # uncomment this if sending data to Grafana Cloud
+ # basicAuthPassword: PASSWORD
diff --git a/examples/language-sdk-instrumentation/nodejs/express-ts/docker-compose.yml b/examples/language-sdk-instrumentation/nodejs/express-ts/docker-compose.yml
index 6f23a94ebf..9edd5e73c4 100644
--- a/examples/language-sdk-instrumentation/nodejs/express-ts/docker-compose.yml
+++ b/examples/language-sdk-instrumentation/nodejs/express-ts/docker-compose.yml
@@ -1,40 +1,46 @@
----
version: '3.9'
services:
pyroscope:
- image: 'grafana/pyroscope:latest'
+ image: grafana/pyroscope:latest
ports:
- - '4040:4040'
-
+ - 4040:4040
us-east:
ports:
- - 5000
+ - 5000
environment:
- - REGION=us-east
+ - REGION=us-east
build:
context: .
-
eu-north:
ports:
- - 5000
+ - 5000
environment:
- - REGION=eu-north
+ - REGION=eu-north
build:
context: .
-
ap-south:
ports:
- - 5000
+ - 5000
environment:
- - REGION=ap-south
+ - REGION=ap-south
build:
context: .
-
load-generator:
build:
context: ../
dockerfile: Dockerfile.load-generator
depends_on:
- - us-east
- - eu-north
- - ap-south
+ - us-east
+ - eu-north
+ - ap-south
+ grafana:
+ image: grafana/grafana:latest
+ environment:
+ - GF_INSTALL_PLUGINS=grafana-pyroscope-app
+ - GF_AUTH_ANONYMOUS_ENABLED=true
+ - GF_AUTH_ANONYMOUS_ORG_ROLE=Admin
+ - GF_AUTH_DISABLE_LOGIN_FORM=true
+ volumes:
+ - ./grafana-provisioning:/etc/grafana/provisioning
+ ports:
+ - 3000:3000
diff --git a/examples/language-sdk-instrumentation/nodejs/express-ts/grafana-provisioning/datasources/pyroscope.yml b/examples/language-sdk-instrumentation/nodejs/express-ts/grafana-provisioning/datasources/pyroscope.yml
new file mode 100644
index 0000000000..6f04d797c8
--- /dev/null
+++ b/examples/language-sdk-instrumentation/nodejs/express-ts/grafana-provisioning/datasources/pyroscope.yml
@@ -0,0 +1,14 @@
+---
+apiVersion: 1
+datasources:
+ - uid: local-pyroscope
+ type: grafana-pyroscope-datasource
+ name: Pyroscope
+ url: http://pyroscope:4040
+ jsonData:
+ keepCookies: [GitSession]
+ # Uncomment these if using with Grafana Cloud
+ # basicAuth: true
+ # basicAuthUser: '123456'
+ # secureJsonData:
+ # basicAuthPassword: PASSWORD
diff --git a/examples/language-sdk-instrumentation/nodejs/express-ts/grafana-provisioning/plugins/explore-profiles.yml b/examples/language-sdk-instrumentation/nodejs/express-ts/grafana-provisioning/plugins/explore-profiles.yml
new file mode 100644
index 0000000000..0d1302202e
--- /dev/null
+++ b/examples/language-sdk-instrumentation/nodejs/express-ts/grafana-provisioning/plugins/explore-profiles.yml
@@ -0,0 +1,11 @@
+---
+apiVersion: 1
+apps:
+ - type: grafana-pyroscope-app
+ jsonData:
+ backendUrl: http://pyroscope:4040
+ # uncomment this if sending data to Grafana Cloud
+ # basicAuthUser: '123456'
+ secureJsonData:
+ # uncomment this if sending data to Grafana Cloud
+ # basicAuthPassword: PASSWORD
diff --git a/examples/language-sdk-instrumentation/nodejs/express/docker-compose.yml b/examples/language-sdk-instrumentation/nodejs/express/docker-compose.yml
index 798cf54bff..a6d3fd6c60 100644
--- a/examples/language-sdk-instrumentation/nodejs/express/docker-compose.yml
+++ b/examples/language-sdk-instrumentation/nodejs/express/docker-compose.yml
@@ -3,37 +3,44 @@ services:
pyroscope:
image: grafana/pyroscope
ports:
- - '4040:4040'
-
+ - 4040:4040
us-east:
ports:
- - 5000
+ - 5000
environment:
- - REGION=us-east
+ - REGION=us-east
build:
context: .
-
eu-north:
ports:
- - 5000
+ - 5000
environment:
- - REGION=eu-north
+ - REGION=eu-north
build:
context: .
-
ap-south:
ports:
- - 5000
+ - 5000
environment:
- - REGION=ap-south
+ - REGION=ap-south
build:
context: .
-
load-generator:
build:
context: ../
dockerfile: Dockerfile.load-generator
depends_on:
- - us-east
- - eu-north
- - ap-south
+ - us-east
+ - eu-north
+ - ap-south
+ grafana:
+ image: grafana/grafana:latest
+ environment:
+ - GF_INSTALL_PLUGINS=grafana-pyroscope-app
+ - GF_AUTH_ANONYMOUS_ENABLED=true
+ - GF_AUTH_ANONYMOUS_ORG_ROLE=Admin
+ - GF_AUTH_DISABLE_LOGIN_FORM=true
+ volumes:
+ - ./grafana-provisioning:/etc/grafana/provisioning
+ ports:
+ - 3000:3000
diff --git a/examples/language-sdk-instrumentation/nodejs/express/grafana-provisioning/datasources/pyroscope.yml b/examples/language-sdk-instrumentation/nodejs/express/grafana-provisioning/datasources/pyroscope.yml
new file mode 100644
index 0000000000..6f04d797c8
--- /dev/null
+++ b/examples/language-sdk-instrumentation/nodejs/express/grafana-provisioning/datasources/pyroscope.yml
@@ -0,0 +1,14 @@
+---
+apiVersion: 1
+datasources:
+ - uid: local-pyroscope
+ type: grafana-pyroscope-datasource
+ name: Pyroscope
+ url: http://pyroscope:4040
+ jsonData:
+ keepCookies: [GitSession]
+ # Uncomment these if using with Grafana Cloud
+ # basicAuth: true
+ # basicAuthUser: '123456'
+ # secureJsonData:
+ # basicAuthPassword: PASSWORD
diff --git a/examples/language-sdk-instrumentation/nodejs/express/grafana-provisioning/plugins/explore-profiles.yml b/examples/language-sdk-instrumentation/nodejs/express/grafana-provisioning/plugins/explore-profiles.yml
new file mode 100644
index 0000000000..0d1302202e
--- /dev/null
+++ b/examples/language-sdk-instrumentation/nodejs/express/grafana-provisioning/plugins/explore-profiles.yml
@@ -0,0 +1,11 @@
+---
+apiVersion: 1
+apps:
+ - type: grafana-pyroscope-app
+ jsonData:
+ backendUrl: http://pyroscope:4040
+ # uncomment this if sending data to Grafana Cloud
+ # basicAuthUser: '123456'
+ secureJsonData:
+ # uncomment this if sending data to Grafana Cloud
+ # basicAuthPassword: PASSWORD
diff --git a/examples/language-sdk-instrumentation/python/rideshare/django/docker-compose.yml b/examples/language-sdk-instrumentation/python/rideshare/django/docker-compose.yml
index 696e0d3d73..ef083a752a 100644
--- a/examples/language-sdk-instrumentation/python/rideshare/django/docker-compose.yml
+++ b/examples/language-sdk-instrumentation/python/rideshare/django/docker-compose.yml
@@ -1,34 +1,40 @@
version: '3'
-
services:
pyroscope:
image: grafana/pyroscope
ports:
- - '4040:4040'
-
+ - 4040:4040
web:
build: ./app
command: python manage.py runserver 0.0.0.0:8000
ports:
- - 8000:8000
+ - 8000:8000
env_file:
- - ./.env.dev
+ - ./.env.dev
depends_on:
- - db
-
+ - db
db:
image: postgres:13.0-alpine
ports:
- - '5432'
+ - '5432'
environment:
- - POSTGRES_USER=hello_django
- - POSTGRES_PASSWORD=hello_django
- - POSTGRES_DB=hello_django_dev
-
+ - POSTGRES_USER=hello_django
+ - POSTGRES_PASSWORD=hello_django
+ - POSTGRES_DB=hello_django_dev
load-generator:
build:
context: .
dockerfile: Dockerfile.load-generator
-
+ grafana:
+ image: grafana/grafana:latest
+ environment:
+ - GF_INSTALL_PLUGINS=grafana-pyroscope-app
+ - GF_AUTH_ANONYMOUS_ENABLED=true
+ - GF_AUTH_ANONYMOUS_ORG_ROLE=Admin
+ - GF_AUTH_DISABLE_LOGIN_FORM=true
+ volumes:
+ - ./grafana-provisioning:/etc/grafana/provisioning
+ ports:
+ - 3000:3000
volumes:
- postgres_data:
+ postgres_data: null
diff --git a/examples/language-sdk-instrumentation/python/rideshare/django/grafana-provisioning/datasources/pyroscope.yml b/examples/language-sdk-instrumentation/python/rideshare/django/grafana-provisioning/datasources/pyroscope.yml
new file mode 100644
index 0000000000..6f04d797c8
--- /dev/null
+++ b/examples/language-sdk-instrumentation/python/rideshare/django/grafana-provisioning/datasources/pyroscope.yml
@@ -0,0 +1,14 @@
+---
+apiVersion: 1
+datasources:
+ - uid: local-pyroscope
+ type: grafana-pyroscope-datasource
+ name: Pyroscope
+ url: http://pyroscope:4040
+ jsonData:
+ keepCookies: [GitSession]
+ # Uncomment these if using with Grafana Cloud
+ # basicAuth: true
+ # basicAuthUser: '123456'
+ # secureJsonData:
+ # basicAuthPassword: PASSWORD
diff --git a/examples/language-sdk-instrumentation/python/rideshare/django/grafana-provisioning/plugins/explore-profiles.yml b/examples/language-sdk-instrumentation/python/rideshare/django/grafana-provisioning/plugins/explore-profiles.yml
new file mode 100644
index 0000000000..0d1302202e
--- /dev/null
+++ b/examples/language-sdk-instrumentation/python/rideshare/django/grafana-provisioning/plugins/explore-profiles.yml
@@ -0,0 +1,11 @@
+---
+apiVersion: 1
+apps:
+ - type: grafana-pyroscope-app
+ jsonData:
+ backendUrl: http://pyroscope:4040
+ # uncomment this if sending data to Grafana Cloud
+ # basicAuthUser: '123456'
+ secureJsonData:
+ # uncomment this if sending data to Grafana Cloud
+ # basicAuthPassword: PASSWORD
diff --git a/examples/language-sdk-instrumentation/python/rideshare/fastapi/docker-compose.yml b/examples/language-sdk-instrumentation/python/rideshare/fastapi/docker-compose.yml
index 25adcf9a5a..e2a54f0c59 100644
--- a/examples/language-sdk-instrumentation/python/rideshare/fastapi/docker-compose.yml
+++ b/examples/language-sdk-instrumentation/python/rideshare/fastapi/docker-compose.yml
@@ -3,27 +3,34 @@ services:
pyroscope:
image: grafana/pyroscope
ports:
- - '4040:4040'
-
+ - 4040:4040
us-east:
environment:
- - REGION=us-east
+ - REGION=us-east
build:
context: .
-
eu-north:
environment:
- - REGION=eu-north
+ - REGION=eu-north
build:
context: .
-
ap-south:
environment:
- - REGION=ap-south
+ - REGION=ap-south
build:
context: .
-
load-generator:
build:
context: .
dockerfile: Dockerfile.load-generator
+ grafana:
+ image: grafana/grafana:latest
+ environment:
+ - GF_INSTALL_PLUGINS=grafana-pyroscope-app
+ - GF_AUTH_ANONYMOUS_ENABLED=true
+ - GF_AUTH_ANONYMOUS_ORG_ROLE=Admin
+ - GF_AUTH_DISABLE_LOGIN_FORM=true
+ volumes:
+ - ./grafana-provisioning:/etc/grafana/provisioning
+ ports:
+ - 3000:3000
diff --git a/examples/language-sdk-instrumentation/python/rideshare/fastapi/grafana-provisioning/datasources/pyroscope.yml b/examples/language-sdk-instrumentation/python/rideshare/fastapi/grafana-provisioning/datasources/pyroscope.yml
new file mode 100644
index 0000000000..6f04d797c8
--- /dev/null
+++ b/examples/language-sdk-instrumentation/python/rideshare/fastapi/grafana-provisioning/datasources/pyroscope.yml
@@ -0,0 +1,14 @@
+---
+apiVersion: 1
+datasources:
+ - uid: local-pyroscope
+ type: grafana-pyroscope-datasource
+ name: Pyroscope
+ url: http://pyroscope:4040
+ jsonData:
+ keepCookies: [GitSession]
+ # Uncomment these if using with Grafana Cloud
+ # basicAuth: true
+ # basicAuthUser: '123456'
+ # secureJsonData:
+ # basicAuthPassword: PASSWORD
diff --git a/examples/language-sdk-instrumentation/python/rideshare/fastapi/grafana-provisioning/plugins/explore-profiles.yml b/examples/language-sdk-instrumentation/python/rideshare/fastapi/grafana-provisioning/plugins/explore-profiles.yml
new file mode 100644
index 0000000000..0d1302202e
--- /dev/null
+++ b/examples/language-sdk-instrumentation/python/rideshare/fastapi/grafana-provisioning/plugins/explore-profiles.yml
@@ -0,0 +1,11 @@
+---
+apiVersion: 1
+apps:
+ - type: grafana-pyroscope-app
+ jsonData:
+ backendUrl: http://pyroscope:4040
+ # uncomment this if sending data to Grafana Cloud
+ # basicAuthUser: '123456'
+ secureJsonData:
+ # uncomment this if sending data to Grafana Cloud
+ # basicAuthPassword: PASSWORD
diff --git a/examples/language-sdk-instrumentation/python/rideshare/flask/docker-compose.yml b/examples/language-sdk-instrumentation/python/rideshare/flask/docker-compose.yml
index b56be64379..52ca75f871 100644
--- a/examples/language-sdk-instrumentation/python/rideshare/flask/docker-compose.yml
+++ b/examples/language-sdk-instrumentation/python/rideshare/flask/docker-compose.yml
@@ -3,33 +3,40 @@ services:
pyroscope:
image: grafana/pyroscope
ports:
- - '4040:4040'
-
+ - 4040:4040
us-east:
ports:
- - 5000
+ - 5000
environment:
- - REGION=us-east
+ - REGION=us-east
build:
context: .
-
eu-north:
ports:
- - 5000
+ - 5000
environment:
- - REGION=eu-north
+ - REGION=eu-north
build:
context: .
-
ap-south:
ports:
- - 5000
+ - 5000
environment:
- - REGION=ap-south
+ - REGION=ap-south
build:
context: .
-
load-generator:
build:
context: .
dockerfile: Dockerfile.load-generator
+ grafana:
+ image: grafana/grafana:latest
+ environment:
+ - GF_INSTALL_PLUGINS=grafana-pyroscope-app
+ - GF_AUTH_ANONYMOUS_ENABLED=true
+ - GF_AUTH_ANONYMOUS_ORG_ROLE=Admin
+ - GF_AUTH_DISABLE_LOGIN_FORM=true
+ volumes:
+ - ./grafana-provisioning:/etc/grafana/provisioning
+ ports:
+ - 3000:3000
diff --git a/examples/language-sdk-instrumentation/python/rideshare/flask/grafana-provisioning/datasources/pyroscope.yml b/examples/language-sdk-instrumentation/python/rideshare/flask/grafana-provisioning/datasources/pyroscope.yml
new file mode 100644
index 0000000000..6f04d797c8
--- /dev/null
+++ b/examples/language-sdk-instrumentation/python/rideshare/flask/grafana-provisioning/datasources/pyroscope.yml
@@ -0,0 +1,14 @@
+---
+apiVersion: 1
+datasources:
+ - uid: local-pyroscope
+ type: grafana-pyroscope-datasource
+ name: Pyroscope
+ url: http://pyroscope:4040
+ jsonData:
+ keepCookies: [GitSession]
+ # Uncomment these if using with Grafana Cloud
+ # basicAuth: true
+ # basicAuthUser: '123456'
+ # secureJsonData:
+ # basicAuthPassword: PASSWORD
diff --git a/examples/language-sdk-instrumentation/python/rideshare/flask/grafana-provisioning/plugins/explore-profiles.yml b/examples/language-sdk-instrumentation/python/rideshare/flask/grafana-provisioning/plugins/explore-profiles.yml
new file mode 100644
index 0000000000..0d1302202e
--- /dev/null
+++ b/examples/language-sdk-instrumentation/python/rideshare/flask/grafana-provisioning/plugins/explore-profiles.yml
@@ -0,0 +1,11 @@
+---
+apiVersion: 1
+apps:
+ - type: grafana-pyroscope-app
+ jsonData:
+ backendUrl: http://pyroscope:4040
+ # uncomment this if sending data to Grafana Cloud
+ # basicAuthUser: '123456'
+ secureJsonData:
+ # uncomment this if sending data to Grafana Cloud
+ # basicAuthPassword: PASSWORD
diff --git a/examples/language-sdk-instrumentation/python/simple/docker-compose.yml b/examples/language-sdk-instrumentation/python/simple/docker-compose.yml
index 6ddeb49d27..5bb3868412 100644
--- a/examples/language-sdk-instrumentation/python/simple/docker-compose.yml
+++ b/examples/language-sdk-instrumentation/python/simple/docker-compose.yml
@@ -1,11 +1,19 @@
----
version: '3.9'
-
services:
pyroscope:
image: grafana/pyroscope
ports:
- - '4040:4040'
-
+ - 4040:4040
app:
build: .
+ grafana:
+ image: grafana/grafana:latest
+ environment:
+ - GF_INSTALL_PLUGINS=grafana-pyroscope-app
+ - GF_AUTH_ANONYMOUS_ENABLED=true
+ - GF_AUTH_ANONYMOUS_ORG_ROLE=Admin
+ - GF_AUTH_DISABLE_LOGIN_FORM=true
+ volumes:
+ - ./grafana-provisioning:/etc/grafana/provisioning
+ ports:
+ - 3000:3000
diff --git a/examples/language-sdk-instrumentation/python/simple/grafana-provisioning/datasources/pyroscope.yml b/examples/language-sdk-instrumentation/python/simple/grafana-provisioning/datasources/pyroscope.yml
new file mode 100644
index 0000000000..6f04d797c8
--- /dev/null
+++ b/examples/language-sdk-instrumentation/python/simple/grafana-provisioning/datasources/pyroscope.yml
@@ -0,0 +1,14 @@
+---
+apiVersion: 1
+datasources:
+ - uid: local-pyroscope
+ type: grafana-pyroscope-datasource
+ name: Pyroscope
+ url: http://pyroscope:4040
+ jsonData:
+ keepCookies: [GitSession]
+ # Uncomment these if using with Grafana Cloud
+ # basicAuth: true
+ # basicAuthUser: '123456'
+ # secureJsonData:
+ # basicAuthPassword: PASSWORD
diff --git a/examples/language-sdk-instrumentation/python/simple/grafana-provisioning/plugins/explore-profiles.yml b/examples/language-sdk-instrumentation/python/simple/grafana-provisioning/plugins/explore-profiles.yml
new file mode 100644
index 0000000000..0d1302202e
--- /dev/null
+++ b/examples/language-sdk-instrumentation/python/simple/grafana-provisioning/plugins/explore-profiles.yml
@@ -0,0 +1,11 @@
+---
+apiVersion: 1
+apps:
+ - type: grafana-pyroscope-app
+ jsonData:
+ backendUrl: http://pyroscope:4040
+ # uncomment this if sending data to Grafana Cloud
+ # basicAuthUser: '123456'
+ secureJsonData:
+ # uncomment this if sending data to Grafana Cloud
+ # basicAuthPassword: PASSWORD
diff --git a/examples/language-sdk-instrumentation/ruby/rideshare/docker-compose.yml b/examples/language-sdk-instrumentation/ruby/rideshare/docker-compose.yml
index 25adcf9a5a..e2a54f0c59 100644
--- a/examples/language-sdk-instrumentation/ruby/rideshare/docker-compose.yml
+++ b/examples/language-sdk-instrumentation/ruby/rideshare/docker-compose.yml
@@ -3,27 +3,34 @@ services:
pyroscope:
image: grafana/pyroscope
ports:
- - '4040:4040'
-
+ - 4040:4040
us-east:
environment:
- - REGION=us-east
+ - REGION=us-east
build:
context: .
-
eu-north:
environment:
- - REGION=eu-north
+ - REGION=eu-north
build:
context: .
-
ap-south:
environment:
- - REGION=ap-south
+ - REGION=ap-south
build:
context: .
-
load-generator:
build:
context: .
dockerfile: Dockerfile.load-generator
+ grafana:
+ image: grafana/grafana:latest
+ environment:
+ - GF_INSTALL_PLUGINS=grafana-pyroscope-app
+ - GF_AUTH_ANONYMOUS_ENABLED=true
+ - GF_AUTH_ANONYMOUS_ORG_ROLE=Admin
+ - GF_AUTH_DISABLE_LOGIN_FORM=true
+ volumes:
+ - ./grafana-provisioning:/etc/grafana/provisioning
+ ports:
+ - 3000:3000
diff --git a/examples/language-sdk-instrumentation/ruby/rideshare/grafana-provisioning/datasources/pyroscope.yml b/examples/language-sdk-instrumentation/ruby/rideshare/grafana-provisioning/datasources/pyroscope.yml
new file mode 100644
index 0000000000..6f04d797c8
--- /dev/null
+++ b/examples/language-sdk-instrumentation/ruby/rideshare/grafana-provisioning/datasources/pyroscope.yml
@@ -0,0 +1,14 @@
+---
+apiVersion: 1
+datasources:
+ - uid: local-pyroscope
+ type: grafana-pyroscope-datasource
+ name: Pyroscope
+ url: http://pyroscope:4040
+ jsonData:
+ keepCookies: [GitSession]
+ # Uncomment these if using with Grafana Cloud
+ # basicAuth: true
+ # basicAuthUser: '123456'
+ # secureJsonData:
+ # basicAuthPassword: PASSWORD
diff --git a/examples/language-sdk-instrumentation/ruby/rideshare/grafana-provisioning/plugins/explore-profiles.yml b/examples/language-sdk-instrumentation/ruby/rideshare/grafana-provisioning/plugins/explore-profiles.yml
new file mode 100644
index 0000000000..0d1302202e
--- /dev/null
+++ b/examples/language-sdk-instrumentation/ruby/rideshare/grafana-provisioning/plugins/explore-profiles.yml
@@ -0,0 +1,11 @@
+---
+apiVersion: 1
+apps:
+ - type: grafana-pyroscope-app
+ jsonData:
+ backendUrl: http://pyroscope:4040
+ # uncomment this if sending data to Grafana Cloud
+ # basicAuthUser: '123456'
+ secureJsonData:
+ # uncomment this if sending data to Grafana Cloud
+ # basicAuthPassword: PASSWORD
diff --git a/examples/language-sdk-instrumentation/ruby/rideshare_rails/docker-compose.yml b/examples/language-sdk-instrumentation/ruby/rideshare_rails/docker-compose.yml
index 5e689ee362..988f2faeb5 100644
--- a/examples/language-sdk-instrumentation/ruby/rideshare_rails/docker-compose.yml
+++ b/examples/language-sdk-instrumentation/ruby/rideshare_rails/docker-compose.yml
@@ -1,41 +1,48 @@
-version: "3"
+version: '3'
services:
pyroscope:
image: grafana/pyroscope
ports:
- - '4040:4040'
-
+ - 4040:4040
us-east:
ports:
- - 5000
+ - 5000
environment:
- - REGION=us-east
+ - REGION=us-east
build:
context: .
links:
- - 'pyroscope'
-
+ - pyroscope
eu-north:
ports:
- - 5000
+ - 5000
environment:
- - REGION=eu-north
+ - REGION=eu-north
build:
context: .
-
ap-south:
ports:
- - 5000
+ - 5000
environment:
- - REGION=ap-south
+ - REGION=ap-south
build:
context: .
-
load-generator:
build:
context: .
dockerfile: Dockerfile.load-generator
links:
- - us-east
- - ap-south
- - eu-north
+ - us-east
+ - ap-south
+ - eu-north
+ grafana:
+ image: grafana/grafana:latest
+ environment:
+ - GF_INSTALL_PLUGINS=grafana-pyroscope-app
+ - GF_AUTH_ANONYMOUS_ENABLED=true
+ - GF_AUTH_ANONYMOUS_ORG_ROLE=Admin
+ - GF_AUTH_DISABLE_LOGIN_FORM=true
+ volumes:
+ - ./grafana-provisioning:/etc/grafana/provisioning
+ ports:
+ - 3000:3000
diff --git a/examples/language-sdk-instrumentation/ruby/rideshare_rails/grafana-provisioning/datasources/pyroscope.yml b/examples/language-sdk-instrumentation/ruby/rideshare_rails/grafana-provisioning/datasources/pyroscope.yml
new file mode 100644
index 0000000000..6f04d797c8
--- /dev/null
+++ b/examples/language-sdk-instrumentation/ruby/rideshare_rails/grafana-provisioning/datasources/pyroscope.yml
@@ -0,0 +1,14 @@
+---
+apiVersion: 1
+datasources:
+ - uid: local-pyroscope
+ type: grafana-pyroscope-datasource
+ name: Pyroscope
+ url: http://pyroscope:4040
+ jsonData:
+ keepCookies: [GitSession]
+ # Uncomment these if using with Grafana Cloud
+ # basicAuth: true
+ # basicAuthUser: '123456'
+ # secureJsonData:
+ # basicAuthPassword: PASSWORD
diff --git a/examples/language-sdk-instrumentation/ruby/rideshare_rails/grafana-provisioning/plugins/explore-profiles.yml b/examples/language-sdk-instrumentation/ruby/rideshare_rails/grafana-provisioning/plugins/explore-profiles.yml
new file mode 100644
index 0000000000..0d1302202e
--- /dev/null
+++ b/examples/language-sdk-instrumentation/ruby/rideshare_rails/grafana-provisioning/plugins/explore-profiles.yml
@@ -0,0 +1,11 @@
+---
+apiVersion: 1
+apps:
+ - type: grafana-pyroscope-app
+ jsonData:
+ backendUrl: http://pyroscope:4040
+ # uncomment this if sending data to Grafana Cloud
+ # basicAuthUser: '123456'
+ secureJsonData:
+ # uncomment this if sending data to Grafana Cloud
+ # basicAuthPassword: PASSWORD
diff --git a/examples/language-sdk-instrumentation/ruby/simple/docker-compose.yml b/examples/language-sdk-instrumentation/ruby/simple/docker-compose.yml
index c5662aec98..5bb3868412 100644
--- a/examples/language-sdk-instrumentation/ruby/simple/docker-compose.yml
+++ b/examples/language-sdk-instrumentation/ruby/simple/docker-compose.yml
@@ -1,10 +1,19 @@
----
version: '3.9'
services:
pyroscope:
image: grafana/pyroscope
ports:
- - '4040:4040'
-
+ - 4040:4040
app:
build: .
+ grafana:
+ image: grafana/grafana:latest
+ environment:
+ - GF_INSTALL_PLUGINS=grafana-pyroscope-app
+ - GF_AUTH_ANONYMOUS_ENABLED=true
+ - GF_AUTH_ANONYMOUS_ORG_ROLE=Admin
+ - GF_AUTH_DISABLE_LOGIN_FORM=true
+ volumes:
+ - ./grafana-provisioning:/etc/grafana/provisioning
+ ports:
+ - 3000:3000
diff --git a/examples/language-sdk-instrumentation/ruby/simple/grafana-provisioning/datasources/pyroscope.yml b/examples/language-sdk-instrumentation/ruby/simple/grafana-provisioning/datasources/pyroscope.yml
new file mode 100644
index 0000000000..6f04d797c8
--- /dev/null
+++ b/examples/language-sdk-instrumentation/ruby/simple/grafana-provisioning/datasources/pyroscope.yml
@@ -0,0 +1,14 @@
+---
+apiVersion: 1
+datasources:
+ - uid: local-pyroscope
+ type: grafana-pyroscope-datasource
+ name: Pyroscope
+ url: http://pyroscope:4040
+ jsonData:
+ keepCookies: [GitSession]
+ # Uncomment these if using with Grafana Cloud
+ # basicAuth: true
+ # basicAuthUser: '123456'
+ # secureJsonData:
+ # basicAuthPassword: PASSWORD
diff --git a/examples/language-sdk-instrumentation/ruby/simple/grafana-provisioning/plugins/explore-profiles.yml b/examples/language-sdk-instrumentation/ruby/simple/grafana-provisioning/plugins/explore-profiles.yml
new file mode 100644
index 0000000000..0d1302202e
--- /dev/null
+++ b/examples/language-sdk-instrumentation/ruby/simple/grafana-provisioning/plugins/explore-profiles.yml
@@ -0,0 +1,11 @@
+---
+apiVersion: 1
+apps:
+ - type: grafana-pyroscope-app
+ jsonData:
+ backendUrl: http://pyroscope:4040
+ # uncomment this if sending data to Grafana Cloud
+ # basicAuthUser: '123456'
+ secureJsonData:
+ # uncomment this if sending data to Grafana Cloud
+ # basicAuthPassword: PASSWORD
diff --git a/examples/language-sdk-instrumentation/rust/rideshare/docker-compose.yml b/examples/language-sdk-instrumentation/rust/rideshare/docker-compose.yml
index 7915c3b2bc..b0252095cb 100644
--- a/examples/language-sdk-instrumentation/rust/rideshare/docker-compose.yml
+++ b/examples/language-sdk-instrumentation/rust/rideshare/docker-compose.yml
@@ -3,37 +3,43 @@ services:
pyroscope:
image: grafana/pyroscope
ports:
- - '4040:4040'
-
+ - 4040:4040
us-east:
ports:
- - 5000
+ - 5000
environment:
- - REGION=us-east
- - PYROSCOPE_SERVER_ADDRESS=http://pyroscope:4040
+ - REGION=us-east
+ - PYROSCOPE_SERVER_ADDRESS=http://pyroscope:4040
build:
context: .
-
eu-north:
ports:
- - 5000
+ - 5000
environment:
- - REGION=eu-north
- - PYROSCOPE_SERVER_ADDRESS=http://pyroscope:4040
-
+ - REGION=eu-north
+ - PYROSCOPE_SERVER_ADDRESS=http://pyroscope:4040
build:
context: .
-
ap-south:
ports:
- - 5000
+ - 5000
environment:
- - REGION=ap-south
- - PYROSCOPE_SERVER_ADDRESS=http://pyroscope:4040
+ - REGION=ap-south
+ - PYROSCOPE_SERVER_ADDRESS=http://pyroscope:4040
build:
context: .
-
load-generator:
build:
context: .
dockerfile: Dockerfile.load-generator
+ grafana:
+ image: grafana/grafana:latest
+ environment:
+ - GF_INSTALL_PLUGINS=grafana-pyroscope-app
+ - GF_AUTH_ANONYMOUS_ENABLED=true
+ - GF_AUTH_ANONYMOUS_ORG_ROLE=Admin
+ - GF_AUTH_DISABLE_LOGIN_FORM=true
+ volumes:
+ - ./grafana-provisioning:/etc/grafana/provisioning
+ ports:
+ - 3000:3000
diff --git a/examples/language-sdk-instrumentation/rust/rideshare/grafana-provisioning/datasources/pyroscope.yml b/examples/language-sdk-instrumentation/rust/rideshare/grafana-provisioning/datasources/pyroscope.yml
new file mode 100644
index 0000000000..6f04d797c8
--- /dev/null
+++ b/examples/language-sdk-instrumentation/rust/rideshare/grafana-provisioning/datasources/pyroscope.yml
@@ -0,0 +1,14 @@
+---
+apiVersion: 1
+datasources:
+ - uid: local-pyroscope
+ type: grafana-pyroscope-datasource
+ name: Pyroscope
+ url: http://pyroscope:4040
+ jsonData:
+ keepCookies: [GitSession]
+ # Uncomment these if using with Grafana Cloud
+ # basicAuth: true
+ # basicAuthUser: '123456'
+ # secureJsonData:
+ # basicAuthPassword: PASSWORD
diff --git a/examples/language-sdk-instrumentation/rust/rideshare/grafana-provisioning/plugins/explore-profiles.yml b/examples/language-sdk-instrumentation/rust/rideshare/grafana-provisioning/plugins/explore-profiles.yml
new file mode 100644
index 0000000000..0d1302202e
--- /dev/null
+++ b/examples/language-sdk-instrumentation/rust/rideshare/grafana-provisioning/plugins/explore-profiles.yml
@@ -0,0 +1,11 @@
+---
+apiVersion: 1
+apps:
+ - type: grafana-pyroscope-app
+ jsonData:
+ backendUrl: http://pyroscope:4040
+ # uncomment this if sending data to Grafana Cloud
+ # basicAuthUser: '123456'
+ secureJsonData:
+ # uncomment this if sending data to Grafana Cloud
+ # basicAuthPassword: PASSWORD
diff --git a/examples/tracing/tempo/docker-compose.yml b/examples/tracing/tempo/docker-compose.yml
index 04af08f274..811624db3e 100644
--- a/examples/tracing/tempo/docker-compose.yml
+++ b/examples/tracing/tempo/docker-compose.yml
@@ -94,15 +94,15 @@ services:
- http://rideshare-python-eu-east:5000
grafana:
- image: grafana/grafana-dev:10.3.0-151740
+ image: grafana/grafana:latest
environment:
- GF_AUTH_ANONYMOUS_ENABLED=true
- GF_AUTH_ANONYMOUS_ORG_ROLE=Admin
- GF_AUTH_DISABLE_LOGIN_FORM=true
- - GF_INSTALL_PLUGINS=pyroscope-panel
+ - GF_INSTALL_PLUGINS=grafana-pyroscope-app
- GF_FEATURE_TOGGLES_ENABLE=traceToProfiles tracesEmbeddedFlameGraph
volumes:
- - ./grafana/provisioning/datasources:/etc/grafana/provisioning/datasources
+ - ./grafana/provisioning:/etc/grafana/provisioning
ports:
- '3000:3000'
diff --git a/examples/tracing/tempo/grafana/provisioning/plugins/explore-profiles.yml b/examples/tracing/tempo/grafana/provisioning/plugins/explore-profiles.yml
new file mode 100644
index 0000000000..0d1302202e
--- /dev/null
+++ b/examples/tracing/tempo/grafana/provisioning/plugins/explore-profiles.yml
@@ -0,0 +1,11 @@
+---
+apiVersion: 1
+apps:
+ - type: grafana-pyroscope-app
+ jsonData:
+ backendUrl: http://pyroscope:4040
+ # uncomment this if sending data to Grafana Cloud
+ # basicAuthUser: '123456'
+ secureJsonData:
+ # uncomment this if sending data to Grafana Cloud
+ # basicAuthPassword: PASSWORD
diff --git a/go.mod b/go.mod
index 689ef7f721..b58ea62763 100644
--- a/go.mod
+++ b/go.mod
@@ -8,7 +8,7 @@ require (
github.com/PuerkitoBio/goquery v1.8.1
github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59
github.com/briandowns/spinner v1.23.0
- github.com/cespare/xxhash/v2 v2.2.0
+ github.com/cespare/xxhash/v2 v2.3.0
github.com/colega/zeropool v0.0.0-20230505084239-6fb4a4f75381
github.com/dennwc/varint v1.0.0
github.com/dgryski/go-groupvarint v0.0.0-20230630160417-2bfb7969fb3c
@@ -39,10 +39,10 @@ require (
github.com/hashicorp/golang-lru/v2 v2.0.7
github.com/json-iterator/go v1.1.12
github.com/k0kubun/pp/v3 v3.2.0
- github.com/klauspost/compress v1.17.7
+ github.com/klauspost/compress v1.17.9
github.com/kubescape/go-git-url v0.0.27
github.com/mattn/go-isatty v0.0.19
- github.com/minio/minio-go/v7 v7.0.61
+ github.com/minio/minio-go/v7 v7.0.72
github.com/mitchellh/go-wordwrap v1.0.1
github.com/oauth2-proxy/oauth2-proxy/v7 v7.5.1
github.com/oklog/ulid v1.3.1
@@ -54,16 +54,16 @@ require (
github.com/parquet-go/parquet-go v0.18.1-0.20231004061202-cde8189c4c26
github.com/pkg/errors v0.9.1
github.com/planetscale/vtprotobuf v0.6.0
- github.com/prometheus/client_golang v1.19.0
- github.com/prometheus/client_model v0.6.0
- github.com/prometheus/common v0.52.3
+ github.com/prometheus/client_golang v1.19.1
+ github.com/prometheus/client_model v0.6.1
+ github.com/prometheus/common v0.55.0
github.com/prometheus/prometheus v0.51.2
github.com/samber/lo v1.38.1
github.com/simonswine/tempopb v0.2.0
github.com/sirupsen/logrus v1.9.3
github.com/spf13/afero v1.11.0
github.com/stretchr/testify v1.9.0
- github.com/thanos-io/objstore v0.0.0-20230727115635-d0c43443ecda
+ github.com/thanos-io/objstore v0.0.0-20240722162417-19b0c0f0ffd8
github.com/uber/jaeger-client-go v2.30.0+incompatible
github.com/valyala/bytebufferpool v1.0.0
github.com/xlab/treeprint v1.2.0
@@ -73,29 +73,28 @@ require (
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a
golang.org/x/mod v0.17.0
golang.org/x/net v0.26.0
- golang.org/x/oauth2 v0.18.0
- golang.org/x/sync v0.7.0
- golang.org/x/sys v0.21.0
+ golang.org/x/oauth2 v0.21.0
+ golang.org/x/sync v0.8.0
+ golang.org/x/sys v0.23.0
golang.org/x/text v0.16.0
golang.org/x/time v0.5.0
- google.golang.org/genproto/googleapis/api v0.0.0-20240304212257-790db918fca8
+ google.golang.org/genproto/googleapis/api v0.0.0-20240314234333-6e1732d8331c
google.golang.org/grpc v1.62.1
- google.golang.org/protobuf v1.34.1
+ google.golang.org/protobuf v1.34.2
gopkg.in/alecthomas/kingpin.v2 v2.2.6
gopkg.in/yaml.v3 v3.0.1
sigs.k8s.io/yaml v1.3.0
)
require (
- cloud.google.com/go v0.112.0 // indirect
- cloud.google.com/go/compute v1.23.4 // indirect
- cloud.google.com/go/compute/metadata v0.2.4-0.20230617002413-005d2dfb6b68 // indirect
- cloud.google.com/go/iam v1.1.6 // indirect
- cloud.google.com/go/storage v1.36.0 // indirect
+ cloud.google.com/go v0.112.1 // indirect
+ cloud.google.com/go/compute/metadata v0.3.0 // indirect
+ cloud.google.com/go/iam v1.1.7 // indirect
+ cloud.google.com/go/storage v1.40.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 // indirect
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 // indirect
- github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0 // indirect
+ github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.0 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect
github.com/HdrHistogram/hdrhistogram-go v1.1.2 // indirect
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 // indirect
@@ -144,15 +143,16 @@ require (
github.com/go-openapi/swag v0.22.9 // indirect
github.com/go-openapi/validate v0.23.0 // indirect
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
+ github.com/goccy/go-json v0.10.3 // indirect
github.com/gogo/googleapis v1.4.1 // indirect
github.com/golang-jwt/jwt/v5 v5.2.1 // indirect
- github.com/golang/protobuf v1.5.3 // indirect
+ github.com/golang/protobuf v1.5.4 // indirect
github.com/golang/snappy v0.0.4 // indirect
github.com/google/btree v1.1.2 // indirect
github.com/google/go-querystring v1.1.0 // indirect
github.com/google/s2a-go v0.1.7 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect
- github.com/googleapis/gax-go/v2 v2.12.2 // indirect
+ github.com/googleapis/gax-go/v2 v2.12.3 // indirect
github.com/grafana/jfr-parser v0.8.1-0.20240228024232-8abcb81c304c // indirect
github.com/hashicorp/consul/api v1.28.2 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
@@ -171,14 +171,13 @@ require (
github.com/josharian/intern v1.0.0 // indirect
github.com/jpillora/backoff v1.0.0 // indirect
github.com/julienschmidt/httprouter v1.3.0 // indirect
- github.com/klauspost/cpuid/v2 v2.2.5 // indirect
+ github.com/klauspost/cpuid/v2 v2.2.8 // indirect
github.com/kylelemons/godebug v1.1.0 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-runewidth v0.0.14 // indirect
github.com/miekg/dns v1.1.58 // indirect
github.com/minio/md5-simd v1.1.2 // indirect
- github.com/minio/sha256-simd v1.0.1 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
@@ -195,7 +194,7 @@ require (
github.com/prometheus/alertmanager v0.27.0 // indirect
github.com/prometheus/common/sigv4 v0.1.0 // indirect
github.com/prometheus/exporter-toolkit v0.11.0 // indirect
- github.com/prometheus/procfs v0.12.0 // indirect
+ github.com/prometheus/procfs v0.15.1 // indirect
github.com/rivo/uniseg v0.4.3 // indirect
github.com/rs/xid v1.5.0 // indirect
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect
@@ -223,10 +222,9 @@ require (
golang.org/x/crypto v0.24.0 // indirect
golang.org/x/term v0.21.0 // indirect
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect
- google.golang.org/api v0.168.0 // indirect
- google.golang.org/appengine v1.6.8 // indirect
- google.golang.org/genproto v0.0.0-20240205150955-31a09d347014 // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20240304161311-37d4d3c04a78 // indirect
+ google.golang.org/api v0.172.0 // indirect
+ google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
k8s.io/apimachinery v0.29.2 // indirect
@@ -242,9 +240,6 @@ replace (
// merged upstream yet.
github.com/hashicorp/memberlist => github.com/grafana/memberlist v0.3.1-0.20220708130638-bd88e10a3d91
- // Replaced with fork, to allow prefix listing, see https://github.com/simonswine/objstore/commit/84f91ea90e721f17d2263cf479fff801cab7cf27
- github.com/thanos-io/objstore => github.com/grafana/objstore v0.0.0-20231121154247-84f91ea90e72
-
// gopkg.in/yaml.v3
// + https://github.com/go-yaml/yaml/pull/691
// + https://github.com/go-yaml/yaml/pull/876
diff --git a/go.sum b/go.sum
index 42cbb920f0..8ed60b7dd1 100644
--- a/go.sum
+++ b/go.sum
@@ -13,22 +13,20 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
-cloud.google.com/go v0.112.0 h1:tpFCD7hpHFlQ8yPwT3x+QeXqc2T6+n6T+hmABHfDUSM=
-cloud.google.com/go v0.112.0/go.mod h1:3jEEVwZ/MHU4djK5t5RHuKOA/GbLddgTdVubX1qnPD4=
+cloud.google.com/go v0.112.1 h1:uJSeirPke5UNZHIb4SxfZklVSiWWVqW4oXlETwZziwM=
+cloud.google.com/go v0.112.1/go.mod h1:+Vbu+Y1UU+I1rjmzeMOb/8RfkKJK2Gyxi1X6jJCZLo4=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
-cloud.google.com/go/compute v1.23.4 h1:EBT9Nw4q3zyE7G45Wvv3MzolIrCJEuHys5muLY0wvAw=
-cloud.google.com/go/compute v1.23.4/go.mod h1:/EJMj55asU6kAFnuZET8zqgwgJ9FvXWXOkkfQZa4ioI=
-cloud.google.com/go/compute/metadata v0.2.4-0.20230617002413-005d2dfb6b68 h1:aRVqY1p2IJaBGStWMsQMpkAa83cPkCDLl80eOj0Rbz4=
-cloud.google.com/go/compute/metadata v0.2.4-0.20230617002413-005d2dfb6b68/go.mod h1:1a3eRNYX12fs5UABBIXS8HXVvQbX9hRB/RkEBPORpe8=
+cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc=
+cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
-cloud.google.com/go/iam v1.1.6 h1:bEa06k05IO4f4uJonbB5iAgKTPpABy1ayxaIZV/GHVc=
-cloud.google.com/go/iam v1.1.6/go.mod h1:O0zxdPeGBoFdWW3HWmBxJsk0pfvNM/p/qa82rWOGTwI=
+cloud.google.com/go/iam v1.1.7 h1:z4VHOhwKLF/+UYXAJDFwGtNF0b6gjsW1Pk9Ml0U/IoM=
+cloud.google.com/go/iam v1.1.7/go.mod h1:J4PMPg8TtyurAUvSmPj8FF3EDgY1SPRZxcUGrn7WXGA=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
@@ -38,8 +36,8 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
-cloud.google.com/go/storage v1.36.0 h1:P0mOkAcaJxhCTvAkMhxMfrTKiNcub4YmmPBtlhAyTr8=
-cloud.google.com/go/storage v1.36.0/go.mod h1:M6M/3V/D3KpzMTJyPOR/HU6n2Si5QdaXYEsng2xgOs8=
+cloud.google.com/go/storage v1.40.0 h1:VEpDQV5CJxFmJ6ueWNsKxcr1QAYOXEgxDa+sBbJahPw=
+cloud.google.com/go/storage v1.40.0/go.mod h1:Rrj7/hKlG87BLqDJYtwR0fbPld8uJPbQ2ucUMY7Ir0g=
connectrpc.com/connect v1.16.2 h1:ybd6y+ls7GOlb7Bh5C8+ghA6SvCBajHwxssO2CGFjqE=
connectrpc.com/connect v1.16.2/go.mod h1:n2kgwskMHXC+lVqb18wngEpF95ldBHXjZYJussz5FRc=
connectrpc.com/grpchealth v1.3.0 h1:FA3OIwAvuMokQIXQrY5LbIy8IenftksTP/lG4PbYN+E=
@@ -55,8 +53,10 @@ github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.5
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.5.0/go.mod h1:uYt4CfhkJA9o0FN7jfE5minm/i4nUE4MjGUJkzB6Zs8=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 h1:bXwSugBiSbgtz7rOtbfGf+woewp4f06orW9OP5BjHLA=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0/go.mod h1:Y/HgrePTmGy9HjdSGTqZNa+apUpTVIEVKXJyARP2lrk=
-github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0 h1:u/LLAOFgsMv7HmNL4Qufg58y+qElGOt5qv0z1mURkRY=
-github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0/go.mod h1:2e8rMJtl2+2j+HXbTBwnyGpm5Nou7KhvSfxOq8JpTag=
+github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0 h1:AifHbc4mg0x9zW52WOpKbsHaDKuRhlI7TVl47thgQ70=
+github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0/go.mod h1:T5RfihdXtBDxt1Ch2wobif3TvzTdumDy29kahv6AV9A=
+github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.0 h1:IfFdxTUDiV58iZqPKgyWiz4X4fCxZeQ1pTQPImLYXpY=
+github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.0/go.mod h1:SUZc9YRRHfx2+FAQKNDGrssXehqLpxmwRv2mC/5ntj4=
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU=
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
@@ -134,12 +134,14 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
+github.com/bluele/gcache v0.0.2 h1:WcbfdXICg7G/DGBh1PFfcirkWOQV+v077yF1pSy3DGw=
+github.com/bluele/gcache v0.0.2/go.mod h1:m15KV+ECjptwSPxKhOhQoAFQVtUFjTVkc3H8o0t/fp0=
github.com/briandowns/spinner v1.23.0 h1:alDF2guRWqa/FOZZYWjlMIx2L6H0wyewPxo/CH4Pt2A=
github.com/briandowns/spinner v1.23.0/go.mod h1:rPG4gmXeN3wQV/TsAY4w8lPdIM6RX3yqeBQJSrbXjuE=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
-github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
-github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
+github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chainguard-dev/git-urls v1.0.2 h1:pSpT7ifrpc5X55n4aTTm7FFUE+ZQHKiqpiwNkJrVcKQ=
github.com/chainguard-dev/git-urls v1.0.2/go.mod h1:rbGgj10OS7UgZlbzdUQIQpT0k/D4+An04HJY7Ol+Y/o=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
@@ -218,6 +220,8 @@ github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSw
github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
+github.com/fullstorydev/emulators/storage v0.0.0-20240401123056-edc69752f474 h1:TufioMBjkJ6/Oqmlye/ReuxHFS35HyLmypj/BNy/8GY=
+github.com/fullstorydev/emulators/storage v0.0.0-20240401123056-edc69752f474/go.mod h1:PQwxF4UU8wuL+srGxr3BOhIW5zXqgucwVlO/nPZLsxw=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
@@ -261,6 +265,8 @@ github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEe
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg=
github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw=
+github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA=
+github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw=
github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
@@ -302,10 +308,8 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
-github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
-github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
-github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
+github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
@@ -362,8 +366,8 @@ github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfF
github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
-github.com/googleapis/gax-go/v2 v2.12.2 h1:mhN09QQW1jEWeMF74zGR81R30z4VJzjZsfkUhuHF+DA=
-github.com/googleapis/gax-go/v2 v2.12.2/go.mod h1:61M8vcyyXR2kqKFxKrfA22jaA8JGF7Dc8App1U3H6jc=
+github.com/googleapis/gax-go/v2 v2.12.3 h1:5/zPPDvw8Q1SuXjrqrZslrqT7dL/uJT2CQii/cLCKqA=
+github.com/googleapis/gax-go/v2 v2.12.3/go.mod h1:AKloxT6GtNbaLm8QTNSidHUVsHYcBHwWRvkNFJUQcS4=
github.com/gophercloud/gophercloud v1.8.0 h1:TM3Jawprb2NrdOnvcHhWJalmKmAmOGgfZElM/3oBYCk=
github.com/gophercloud/gophercloud v1.8.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM=
github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
@@ -380,8 +384,6 @@ github.com/grafana/jfr-parser/pprof v0.0.0-20240228024232-8abcb81c304c h1:tGu1DT
github.com/grafana/jfr-parser/pprof v0.0.0-20240228024232-8abcb81c304c/go.mod h1:P5406BrWxjahTzVF6aCSumNI1KPlZJc0zO0v+zKZ4gc=
github.com/grafana/memberlist v0.3.1-0.20220708130638-bd88e10a3d91 h1:/NipyHnOmvRsVzj81j2qE0VxsvsqhOB0f4vJIhk2qCQ=
github.com/grafana/memberlist v0.3.1-0.20220708130638-bd88e10a3d91/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
-github.com/grafana/objstore v0.0.0-20231121154247-84f91ea90e72 h1:o22hsDMQ3kv/0N9PkzHQSd5xMmmmdA5UJR7Jb4xISZQ=
-github.com/grafana/objstore v0.0.0-20231121154247-84f91ea90e72/go.mod h1:JauBAcJ61tRSv9widgISVmA6akQXDeUMXBrVmWW4xog=
github.com/grafana/pyroscope-go v1.0.3 h1:8WWmItzLfg4m8G+j//ElSjMeMr88Y6Lvblar6qeTyKk=
github.com/grafana/pyroscope-go v1.0.3/go.mod h1:0d7ftwSMBV/Awm7CCiYmHQEG8Y44Ma3YSjt+nWcWztY=
github.com/grafana/pyroscope-go/godeltaprof v0.1.7 h1:C11j63y7gymiW8VugJ9ZW0pWfxTZugdSJyC48olk5KY=
@@ -479,11 +481,11 @@ github.com/k0kubun/pp/v3 v3.2.0/go.mod h1:ODtJQbQcIRfAD3N+theGCV1m/CBxweERz2dapd
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.17.3/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
-github.com/klauspost/compress v1.17.7 h1:ehO88t2UGzQK66LMdE8tibEd1ErmzZjNEqWkjLAKQQg=
-github.com/klauspost/compress v1.17.7/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
+github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
+github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
-github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg=
-github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
+github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM=
+github.com/klauspost/cpuid/v2 v2.2.8/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00=
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@@ -529,10 +531,8 @@ github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4=
github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY=
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
-github.com/minio/minio-go/v7 v7.0.61 h1:87c+x8J3jxQ5VUGimV9oHdpjsAvy3fhneEBKuoKEVUI=
-github.com/minio/minio-go/v7 v7.0.61/go.mod h1:BTu8FcrEw+HidY0zd/0eny43QnVNkXRPXrLXFuQBHXg=
-github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM=
-github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8=
+github.com/minio/minio-go/v7 v7.0.72 h1:ZSbxs2BfJensLyHdVOgHv+pfmvxYraaUy07ER04dWnA=
+github.com/minio/minio-go/v7 v7.0.72/go.mod h1:4yBA8v80xGA30cfM3fz0DKYMXunWl/AV/6tWEs9ryzo=
github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
@@ -612,21 +612,21 @@ github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5Fsn
github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
-github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU=
-github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k=
+github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE=
+github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.6.0 h1:k1v3CzpSRUTrKMppY35TLwPvxHqBu0bYgxZzqGIgaos=
-github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOAGQPoaGhyTchlyt8=
+github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
+github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
-github.com/prometheus/common v0.52.3 h1:5f8uj6ZwHSscOGNdIQg6OiZv/ybiK2CO2q2drVZAQSA=
-github.com/prometheus/common v0.52.3/go.mod h1:BrxBKv3FWBIGXw89Mg1AeBq7FSyRzXWI3l3e7W3RN5U=
+github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc=
+github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8=
github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4=
github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI=
github.com/prometheus/exporter-toolkit v0.11.0 h1:yNTsuZ0aNCNFQ3aFTD2uhPOvr4iD7fdBvKPAEGkNf+g=
@@ -636,8 +636,8 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
-github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
-github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
+github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
+github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
github.com/prometheus/prometheus v0.51.2 h1:U0faf1nT4CB9DkBW87XLJCBi2s8nwWXdTbyzRUAkX0w=
github.com/prometheus/prometheus v0.51.2/go.mod h1:yv4MwOn3yHMQ6MZGHPg/U7Fcyqf+rxqiZfSur6myVtc=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
@@ -696,6 +696,8 @@ github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.0.194/go.mod
github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/kms v1.0.194/go.mod h1:yrBKWhChnDqNz1xuXdSbWXG56XawEq0G5j1lg4VwBD4=
github.com/tencentyun/cos-go-sdk-v5 v0.7.40 h1:W6vDGKCHe4wBACI1d2UgE6+50sJFhRWU4O8IB2ozzxM=
github.com/tencentyun/cos-go-sdk-v5 v0.7.40/go.mod h1:4dCEtLHGh8QPxHEkgq+nFaky7yZxQuYwgSJM87icDaw=
+github.com/thanos-io/objstore v0.0.0-20240722162417-19b0c0f0ffd8 h1:QAgAQPtOj3OTlNKrm7G/xPeuDa8xz7brfNHv3WTUq6I=
+github.com/thanos-io/objstore v0.0.0-20240722162417-19b0c0f0ffd8/go.mod h1:3ukSkG4rIRUGkKM4oIz+BSuUx2e3RlQVVv3Cc3W+Tv4=
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
github.com/uber/jaeger-client-go v2.30.0+incompatible h1:D6wyKGCecFaSRUpo8lCVbaOOb6ThwMmTEbhRwtKR97o=
github.com/uber/jaeger-client-go v2.30.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
@@ -851,8 +853,8 @@ golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4Iltr
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI=
-golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8=
+golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs=
+golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -865,8 +867,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
-golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
+golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -924,8 +926,8 @@ golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws=
-golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.23.0 h1:YfKFowiIMvtgl1UERQoTPPToxltDeZfbj4H7dVUCwmM=
+golang.org/x/sys v0.23.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
@@ -938,7 +940,6 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
-golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
@@ -1000,8 +1001,8 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk=
-golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
+golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU=
+golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90=
gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0=
gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
@@ -1022,16 +1023,14 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
-google.golang.org/api v0.168.0 h1:MBRe+Ki4mMN93jhDDbpuRLjRddooArz4FeSObvUMmjY=
-google.golang.org/api v0.168.0/go.mod h1:gpNOiMA2tZ4mf5R9Iwf4rK/Dcz0fbdIgWYWVoxmsyLg=
+google.golang.org/api v0.172.0 h1:/1OcMZGPmW1rX2LCu2CmGUD1KXK1+pfzxotxyRUCCdk=
+google.golang.org/api v0.172.0/go.mod h1:+fJZq6QXWfa9pXhnIzsjx4yI22d4aI9ZpLb58gvXjis=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
-google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM=
-google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds=
google.golang.org/genproto v0.0.0-20180518175338-11a468237815/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
@@ -1062,12 +1061,12 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20240205150955-31a09d347014 h1:g/4bk7P6TPMkAUbUhquq98xey1slwvuVJPosdBqYJlU=
-google.golang.org/genproto v0.0.0-20240205150955-31a09d347014/go.mod h1:xEgQu1e4stdSSsxPDK8Azkrk/ECl5HvdPf6nbZrTS5M=
-google.golang.org/genproto/googleapis/api v0.0.0-20240304212257-790db918fca8 h1:8eadJkXbwDEMNwcB5O0s5Y5eCfyuCLdvaiOIaGTrWmQ=
-google.golang.org/genproto/googleapis/api v0.0.0-20240304212257-790db918fca8/go.mod h1:O1cOfN1Cy6QEYr7VxtjOyP5AdAuR0aJ/MYZaaof623Y=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240304161311-37d4d3c04a78 h1:Xs9lu+tLXxLIfuci70nG4cpwaRC+mRQPUL7LoIeDJC4=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240304161311-37d4d3c04a78/go.mod h1:UCOku4NytXMJuLQE5VuqA5lX3PcHCBo8pxNyvkf4xBs=
+google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9 h1:9+tzLLstTlPTRyJTh+ah5wIMsBW5c4tQwGTN3thOW9Y=
+google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9/go.mod h1:mqHbVIp48Muh7Ywss/AD6I5kNVKZMmAa/QEW58Gxp2s=
+google.golang.org/genproto/googleapis/api v0.0.0-20240314234333-6e1732d8331c h1:kaI7oewGK5YnVwj+Y+EJBO/YN1ht8iTL9XkFHtVZLsc=
+google.golang.org/genproto/googleapis/api v0.0.0-20240314234333-6e1732d8331c/go.mod h1:VQW3tUculP/D4B+xVCo+VgSq8As6wA9ZjHl//pmk+6s=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 h1:NnYq6UN9ReLM9/Y01KWNOWyI5xQ9kbIms5GGJVwS/Yc=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY=
google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
@@ -1096,9 +1095,8 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
-google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg=
-google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
+google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
+google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
diff --git a/go.work.sum b/go.work.sum
index b0c5ae268c..fca2569ad2 100644
--- a/go.work.sum
+++ b/go.work.sum
@@ -28,6 +28,8 @@ cloud.google.com/go/aiplatform v1.57.0 h1:WcZ6wDf/1qBWatmGM9Z+2BTiNjQQX54k2BekHU
cloud.google.com/go/aiplatform v1.57.0/go.mod h1:pwZMGvqe0JRkI1GWSZCtnAfrR4K1bv65IHILGA//VEU=
cloud.google.com/go/aiplatform v1.58.2 h1:qu6n5nqCRntJOIjqs2/SztZp4KMuXTejyMRkPC0eGhM=
cloud.google.com/go/aiplatform v1.58.2/go.mod h1:c3kCiVmb6UC1dHAjZjcpDj6ZS0bHQ2slL88ZjC2LtlA=
+cloud.google.com/go/aiplatform v1.60.0 h1:0cSrii1ZeLr16MbBoocyy5KVnrSdiQ3KN/vtrTe7RqE=
+cloud.google.com/go/aiplatform v1.60.0/go.mod h1:eTlGuHOahHprZw3Hio5VKmtThIOak5/qy6pzdsqcQnM=
cloud.google.com/go/analytics v0.21.6 h1:fnV7B8lqyEYxCU0LKk+vUL7mTlqRAq4uFlIthIdr/iA=
cloud.google.com/go/analytics v0.21.6/go.mod h1:eiROFQKosh4hMaNhF85Oc9WO97Cpa7RggD40e/RBy8w=
cloud.google.com/go/analytics v0.23.0 h1:Q+y94XH84jM8SK8O7qiY/PJRexb6n7dRbQ6PiUa4YGM=
@@ -60,6 +62,8 @@ cloud.google.com/go/asset v1.15.3 h1:uI8Bdm81s0esVWbWrTHcjFDFKNOa9aB7rI1vud1hO84
cloud.google.com/go/asset v1.15.3/go.mod h1:yYLfUD4wL4X589A9tYrv4rFrba0QlDeag0CMcM5ggXU=
cloud.google.com/go/asset v1.17.1 h1:xra2nJlExLat2rcpimofBw+SmPwgS78Xxhg4Lh/BcyA=
cloud.google.com/go/asset v1.17.1/go.mod h1:byvDw36UME5AzGNK7o4JnOnINkwOZ1yRrGrKIahHrng=
+cloud.google.com/go/asset v1.17.2 h1:xgFnBP3luSbUcC9RWJvb3Zkt+y/wW6PKwPHr3ssnIP8=
+cloud.google.com/go/asset v1.17.2/go.mod h1:SVbzde67ehddSoKf5uebOD1sYw8Ab/jD/9EIeWg99q4=
cloud.google.com/go/assuredworkloads v1.11.4 h1:FsLSkmYYeNuzDm8L4YPfLWV+lQaUrJmH5OuD37t1k20=
cloud.google.com/go/assuredworkloads v1.11.4/go.mod h1:4pwwGNwy1RP0m+y12ef3Q/8PaiWrIDQ6nD2E8kvWI9U=
cloud.google.com/go/assuredworkloads v1.11.5 h1:gCrN3IyvqY3cP0wh2h43d99CgH3G+WYs9CeuFVKChR8=
@@ -84,6 +88,8 @@ cloud.google.com/go/bigquery v1.57.1 h1:FiULdbbzUxWD0Y4ZGPSVCDLvqRSyCIO6zKV7E2nf
cloud.google.com/go/bigquery v1.57.1/go.mod h1:iYzC0tGVWt1jqSzBHqCr3lrRn0u13E8e+AqowBsDgug=
cloud.google.com/go/bigquery v1.58.0 h1:drSd9RcPVLJP2iFMimvOB9SCSIrcl+9HD4II03Oy7A0=
cloud.google.com/go/bigquery v1.58.0/go.mod h1:0eh4mWNY0KrBTjUzLjoYImapGORq9gEPT7MWjCy9lik=
+cloud.google.com/go/bigquery v1.59.1 h1:CpT+/njKuKT3CEmswm6IbhNu9u35zt5dO4yPDLW+nG4=
+cloud.google.com/go/bigquery v1.59.1/go.mod h1:VP1UJYgevyTwsV7desjzNzDND5p6hZB+Z8gZJN1GQUc=
cloud.google.com/go/billing v1.18.0 h1:GvKy4xLy1zF1XPbwP5NJb2HjRxhnhxjjXxvyZ1S/IAo=
cloud.google.com/go/billing v1.18.0/go.mod h1:5DOYQStCxquGprqfuid/7haD7th74kyMBHkjO/OvDtk=
cloud.google.com/go/billing v1.18.2 h1:oWUEQvuC4JvtnqLZ35zgzdbuHt4Itbftvzbe6aEyFdE=
@@ -121,6 +127,8 @@ cloud.google.com/go/compute v1.20.0/go.mod h1:kn5BhC++qUWR/AM3Dn21myV7QbgqejW04c
cloud.google.com/go/compute v1.20.1/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM=
cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM=
cloud.google.com/go/compute v1.23.1/go.mod h1:CqB3xpmPKKt3OJpW2ndFIXnA9A4xAy/F3Xp1ixncW78=
+cloud.google.com/go/compute v1.24.0 h1:phWcR2eWzRJaL/kOiJwfFsPs4BaKq1j6vnpZrc1YlVg=
+cloud.google.com/go/compute v1.24.0/go.mod h1:kw1/T+h/+tK2LJK0wiPPx1intgdAM3j/g3hFDlscY40=
cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
cloud.google.com/go/contactcenterinsights v1.12.1 h1:EiGBeejtDDtr3JXt9W7xlhXyZ+REB5k2tBgVPVtmNb0=
cloud.google.com/go/contactcenterinsights v1.12.1/go.mod h1:HHX5wrz5LHVAwfI2smIotQG9x8Qd6gYilaHcLLLmNis=
@@ -130,6 +138,8 @@ cloud.google.com/go/container v1.29.0 h1:jIltU529R2zBFvP8rhiG1mgeTcnT27KhU0H/1d6
cloud.google.com/go/container v1.29.0/go.mod h1:b1A1gJeTBXVLQ6GGw9/9M4FG94BEGsqJ5+t4d/3N7O4=
cloud.google.com/go/container v1.30.1 h1:DbEwg6d9FggyNeSb+AiW6142m2YVPTSENzGx2INDv58=
cloud.google.com/go/container v1.30.1/go.mod h1:vkbfX0EnAKL/vgVECs5BZn24e1cJROzgszJirRKQ4Bg=
+cloud.google.com/go/container v1.31.0 h1:MAaNH7VRNPWEhvqOypq2j+7ONJKrKzon4v9nS3nLZe0=
+cloud.google.com/go/container v1.31.0/go.mod h1:7yABn5s3Iv3lmw7oMmyGbeV6tQj86njcTijkkGuvdZA=
cloud.google.com/go/containeranalysis v0.11.3 h1:5rhYLX+3a01drpREqBZVXR9YmWH45RnML++8NsCtuD8=
cloud.google.com/go/containeranalysis v0.11.3/go.mod h1:kMeST7yWFQMGjiG9K7Eov+fPNQcGhb8mXj/UcTiWw9U=
cloud.google.com/go/containeranalysis v0.11.4 h1:doJ0M1ljS4hS0D2UbHywlHGwB7sQLNrt9vFk9Zyi7vY=
@@ -158,6 +168,8 @@ cloud.google.com/go/dataplex v1.13.0 h1:ACVOuxwe7gP0SqEso9SLyXbcZNk5l8hjcTX+XLnt
cloud.google.com/go/dataplex v1.13.0/go.mod h1:mHJYQQ2VEJHsyoC0OdNyy988DvEbPhqFs5OOLffLX0c=
cloud.google.com/go/dataplex v1.14.1 h1:7qrFI9Mz7wNpYjloi6BYVxV0deV09/RbajprVV+ni6Q=
cloud.google.com/go/dataplex v1.14.1/go.mod h1:bWxQAbg6Smg+sca2+Ex7s8D9a5qU6xfXtwmq4BVReps=
+cloud.google.com/go/dataplex v1.14.2 h1:fxIfdU8fxzR3clhOoNI7XFppvAmndxDu1AMH+qX9WKQ=
+cloud.google.com/go/dataplex v1.14.2/go.mod h1:0oGOSFlEKef1cQeAHXy4GZPB/Ife0fz/PxBf+ZymA2U=
cloud.google.com/go/dataproc v1.12.0 h1:W47qHL3W4BPkAIbk4SWmIERwsWBaNnWm0P2sdx3YgGU=
cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4=
cloud.google.com/go/dataproc/v2 v2.3.0 h1:tTVP9tTxmc8fixxOd/8s6Q6Pz/+yzn7r7XdZHretQH0=
@@ -182,6 +194,8 @@ cloud.google.com/go/dialogflow v1.47.0 h1:tLCWad8HZhlyUNfDzDP5m+oH6h/1Uvw/ei7B9A
cloud.google.com/go/dialogflow v1.47.0/go.mod h1:mHly4vU7cPXVweuB5R0zsYKPMzy240aQdAu06SqBbAQ=
cloud.google.com/go/dialogflow v1.48.2 h1:KK9beiSJIqdrjdVHJoUuDDNSnWReY2e+7Cm6adq7moA=
cloud.google.com/go/dialogflow v1.48.2/go.mod h1:7A2oDf6JJ1/+hdpnFRfb/RjJUOh2X3rhIa5P8wQSEX4=
+cloud.google.com/go/dialogflow v1.49.0 h1:KqG0oxGE71qo0lRVyAoeBozefCvsMfcDzDjoLYSY0F4=
+cloud.google.com/go/dialogflow v1.49.0/go.mod h1:dhVrXKETtdPlpPhE7+2/k4Z8FRNUp6kMV3EW3oz/fe0=
cloud.google.com/go/dlp v1.11.1 h1:OFlXedmPP/5//X1hBEeq3D9kUVm9fb6ywYANlpv/EsQ=
cloud.google.com/go/dlp v1.11.1/go.mod h1:/PA2EnioBeXTL/0hInwgj0rfsQb3lpE3R8XUJxqUNKI=
cloud.google.com/go/dlp v1.11.2 h1:lTipOuJaSjlYnnotPMbEhKURLC6GzCMDDzVbJAEbmYM=
@@ -190,6 +204,8 @@ cloud.google.com/go/documentai v1.23.6 h1:0/S3AhS23+0qaFe3tkgMmS3STxgDgmE1jg4Tva
cloud.google.com/go/documentai v1.23.6/go.mod h1:ghzBsyVTiVdkfKaUCum/9bGBEyBjDO4GfooEcYKhN+g=
cloud.google.com/go/documentai v1.23.8 h1:ZObcx0ia1XTj737+K9W8ngWFzghyf9c0/BvdJcADONk=
cloud.google.com/go/documentai v1.23.8/go.mod h1:Vd/y5PosxCpUHmwC+v9arZyeMfTqBR9VIwOwIqQYYfA=
+cloud.google.com/go/documentai v1.25.0 h1:lI62GMEEPO6vXJI9hj+G9WjOvnR0hEjvjokrnex4cxA=
+cloud.google.com/go/documentai v1.25.0/go.mod h1:ftLnzw5VcXkLItp6pw1mFic91tMRyfv6hHEY5br4KzY=
cloud.google.com/go/domains v0.9.4 h1:ua4GvsDztZ5F3xqjeLKVRDeOvJshf5QFgWGg1CKti3A=
cloud.google.com/go/domains v0.9.4/go.mod h1:27jmJGShuXYdUNjyDG0SodTfT5RwLi7xmH334Gvi3fY=
cloud.google.com/go/domains v0.9.5 h1:Mml/R6s3vQQvFPpi/9oX3O5dRirgjyJ8cksK8N19Y7g=
@@ -260,6 +276,8 @@ cloud.google.com/go/kms v1.15.5 h1:pj1sRfut2eRbD9pFRjNnPNg/CzJPuQAzUujMIM1vVeM=
cloud.google.com/go/kms v1.15.5/go.mod h1:cU2H5jnp6G2TDpUGZyqTCoy1n16fbubHZjmVXSMtwDI=
cloud.google.com/go/kms v1.15.6 h1:ktpEMQmsOAYj3VZwH020FcQlm23BVYg8T8O1woG2GcE=
cloud.google.com/go/kms v1.15.6/go.mod h1:yF75jttnIdHfGBoE51AKsD/Yqf+/jICzB9v1s1acsms=
+cloud.google.com/go/kms v1.15.7 h1:7caV9K3yIxvlQPAcaFffhlT7d1qpxjB1wHBtjWa13SM=
+cloud.google.com/go/kms v1.15.7/go.mod h1:ub54lbsa6tDkUwnu4W7Yt1aAIFLnspgh0kPGToDukeI=
cloud.google.com/go/language v1.12.2 h1:zg9uq2yS9PGIOdc0Kz/l+zMtOlxKWonZjjo5w5YPG2A=
cloud.google.com/go/language v1.12.2/go.mod h1:9idWapzr/JKXBBQ4lWqVX/hcadxB194ry20m/bTrhWc=
cloud.google.com/go/language v1.12.3 h1:iaJZg6K4j/2PvZZVcjeO/btcWWIllVRBhuTFjGO4LXs=
@@ -301,6 +319,8 @@ cloud.google.com/go/monitoring v1.16.3 h1:mf2SN9qSoBtIgiMA4R/y4VADPWZA7VCNJA079q
cloud.google.com/go/monitoring v1.16.3/go.mod h1:KwSsX5+8PnXv5NJnICZzW2R8pWTis8ypC4zmdRD63Tw=
cloud.google.com/go/monitoring v1.17.1 h1:xqcNr+JXmFMCPXnent/i1r0De6zrcqzgcMy5X1xa5vg=
cloud.google.com/go/monitoring v1.17.1/go.mod h1:SJzPMakCF0GHOuKEH/r4hxVKF04zl+cRPQyc3d/fqII=
+cloud.google.com/go/monitoring v1.18.0 h1:NfkDLQDG2UR3WYZVQE8kwSbUIEyIqJUPl+aOQdFH1T4=
+cloud.google.com/go/monitoring v1.18.0/go.mod h1:c92vVBCeq/OB4Ioyo+NbN2U7tlg5ZH41PZcdvfc+Lcg=
cloud.google.com/go/networkconnectivity v1.14.3 h1:e9lUkCe2BexsqsUc2bjV8+gFBpQa54J+/F3qKVtW+wA=
cloud.google.com/go/networkconnectivity v1.14.3/go.mod h1:4aoeFdrJpYEXNvrnfyD5kIzs8YtHg945Og4koAjHQek=
cloud.google.com/go/networkconnectivity v1.14.4 h1:GBfXFhLyPspnaBE3nI/BRjdhW8vcbpT9QjE/4kDCDdc=
@@ -383,6 +403,8 @@ cloud.google.com/go/retail v1.14.4 h1:geqdX1FNqqL2p0ADXjPpw8lq986iv5GrVcieTYafuJ
cloud.google.com/go/retail v1.14.4/go.mod h1:l/N7cMtY78yRnJqp5JW8emy7MB1nz8E4t2yfOmklYfg=
cloud.google.com/go/retail v1.15.1 h1:woH0EWW1IngTeyPqE95uVeMadJIB3N5VDYsRM4dJuzQ=
cloud.google.com/go/retail v1.15.1/go.mod h1:In9nSBOYhLbDGa87QvWlnE1XA14xBN2FpQRiRsUs9wU=
+cloud.google.com/go/retail v1.16.0 h1:Fn1GuAua1c6crCGqfJ1qMxG1Xh10Tg/x5EUODEHMqkw=
+cloud.google.com/go/retail v1.16.0/go.mod h1:LW7tllVveZo4ReWt68VnldZFWJRzsh9np+01J9dYWzE=
cloud.google.com/go/run v1.3.3 h1:qdfZteAm+vgzN1iXzILo3nJFQbzziudkJrvd9wCf3FQ=
cloud.google.com/go/run v1.3.3/go.mod h1:WSM5pGyJ7cfYyYbONVQBN4buz42zFqwG67Q3ch07iK4=
cloud.google.com/go/run v1.3.4 h1:m9WDA7DzTpczhZggwYlZcBWgCRb+kgSIisWn1sbw2rQ=
@@ -457,6 +479,8 @@ cloud.google.com/go/vision/v2 v2.7.5 h1:T/ujUghvEaTb+YnFY/jiYwVAkMbIC8EieK0CJo6B
cloud.google.com/go/vision/v2 v2.7.5/go.mod h1:GcviprJLFfK9OLf0z8Gm6lQb6ZFUulvpZws+mm6yPLM=
cloud.google.com/go/vision/v2 v2.7.6 h1:xunpR5DR3vaIvoaVSXBWpYc9uGrMxEdhhfYL+NKv84c=
cloud.google.com/go/vision/v2 v2.7.6/go.mod h1:ZkvWTVNPBU3YZYzgF9Y1jwEbD1NBOCyJn0KFdQfE6Bw=
+cloud.google.com/go/vision/v2 v2.8.0 h1:W52z1b6LdGI66MVhE70g/NFty9zCYYcjdKuycqmlhtg=
+cloud.google.com/go/vision/v2 v2.8.0/go.mod h1:ocqDiA2j97pvgogdyhoxiQp2ZkDCyr0HWpicywGGRhU=
cloud.google.com/go/vmmigration v1.7.4 h1:qPNdab4aGgtaRX+51jCOtJxlJp6P26qua4o1xxUDjpc=
cloud.google.com/go/vmmigration v1.7.4/go.mod h1:yBXCmiLaB99hEl/G9ZooNx2GyzgsjKnw5fWcINRgD70=
cloud.google.com/go/vmmigration v1.7.5 h1:5v9RT2vWyuw3pK2ox0HQpkoftO7Q7/8591dTxxQc79g=
@@ -491,6 +515,8 @@ github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork v1.1.0/
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v2 v2.2.1/go.mod h1:Bzf34hhAE9NSxailk8xVeLEZbUjOXcC+GnU1mMKdhLw=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.0.0 h1:ECsQtyERDVz3NP3kvDOTLvbQhqWp/x9EsGKtb4ogUr8=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.0.0/go.mod h1:s1tW/At+xHqjNFvWU4G0c0Qv33KOhvbGNj0RCTQDV8s=
+github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/testdata/perf v0.0.0-20240208231215-981108a6de20 h1:45Ajiuhu6AeJTFdwxn2OWXZTQOHdXT1U/aezrVu6HIM=
+github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/testdata/perf v0.0.0-20240208231215-981108a6de20/go.mod h1:KMKhmwqL1TqoNRkQG2KGmDaVwT5Dte9d3PoADB38/UY=
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/Bose/minisentinel v0.0.0-20200130220412-917c5a9223bb h1:ZVN4Iat3runWOFLaBCDVU5a9X/XikSRBosye++6gojw=
@@ -613,6 +639,8 @@ github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cu
github.com/digitalocean/godo v1.104.1/go.mod h1:VAI/L5YDzMuPRU01lEEUSQ/sp5Z//1HnnFv/RBTEdbg=
github.com/dnaeon/go-vcr v1.1.0 h1:ReYa/UBrRyQdant9B4fNHGoCNKw6qh6P0fsdGmZpR7c=
github.com/dnaeon/go-vcr v1.1.0/go.mod h1:M7tiix8f0r6mKKJ3Yq/kqU1OYf3MnfmBWVbPx/yU9ko=
+github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
+github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v24.0.6+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
@@ -846,6 +874,8 @@ github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3 h1:+n/aFZefKZp7spd8D
github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE=
github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g=
github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY=
+github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g=
+github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM=
github.com/mitchellh/cli v1.1.0 h1:tEElEatulEHDeedTxwckzyYMA5c86fbmNIUL1hBIiTg=
github.com/mitchellh/cli v1.1.5 h1:OxRIeJXpAMztws/XHlN2vu6imG5Dpq+j61AzAX5fLng=
github.com/mitchellh/cli v1.1.5/go.mod h1:v8+iFts2sPIKUV1ltktPXMCC8fumSKFItNcD2cLtRR4=
@@ -936,6 +966,7 @@ github.com/prometheus/client_golang v1.15.1/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt
github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA=
github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU=
github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU=
+github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY=
github.com/prometheus/common v0.46.0/go.mod h1:Tp0qkxpb9Jsg54QMe+EAmqXkSV7Evdy1BTn+g2pa/hQ=
github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc=
@@ -1004,6 +1035,8 @@ github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8
github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0=
github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.0.194 h1:Oho9ykiKXwOHkeq5jSAvlkBAcRwNqnrUca/5WacvH2E=
github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/kms v1.0.194 h1:YB6qJyCPuwtHFr54/GAfYj1VfwhiDHnwtOKu40OaG2M=
+github.com/thanos-io/objstore v0.0.0-20230727115635-d0c43443ecda h1:DtxaU/a7QRPiUhwtPrZFlS81y+9Mgny4KoLq65cu04U=
+github.com/thanos-io/objstore v0.0.0-20230727115635-d0c43443ecda/go.mod h1:IS7Z25+0KaknyU2P5PTP/5hwY6Yr/FzbInF88Yd5auU=
github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4=
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926 h1:G3dpKMzFDjgEh2q1Z7zUUtKa8ViPtH+ocF0bE0g00O8=
github.com/vmihailenco/msgpack/v5 v5.3.5 h1:5gO0H1iULLWGhs2H5tbAHIZTV8/cYafcFOr9znI5mJU=
@@ -1163,6 +1196,7 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
+golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
@@ -1184,8 +1218,6 @@ golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg
golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps=
golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
-golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU=
-golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90=
gonum.org/v1/gonum v0.8.2 h1:CCXrcPKiGGotvnN6jfUsKk4rRqm7q09/YbKb5xCEvtM=
gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0 h1:OE9mWmgKkjJyEmDAAtGMPjXu+YNeGvK9VTSHY6+Qihc=
gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b h1:Qh4dB5D/WpoUUp3lSod7qgoyEHbDGPUWjIbnqdqqe1k=
@@ -1219,6 +1251,8 @@ google.golang.org/api v0.155.0/go.mod h1:GI5qK5f40kCpHfPn6+YzGAByIKWv8ujFnmoWm7I
google.golang.org/api v0.160.0/go.mod h1:0mu0TpK33qnydLvWqbImq2b1eQ5FHRSDCBzAxX9ZHyw=
google.golang.org/api v0.164.0/go.mod h1:2OatzO7ZDQsoS7IFf3rvsE17/TldiU3F/zxFHeqUB5o=
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM=
+google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds=
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
@@ -1285,6 +1319,8 @@ google.golang.org/genproto/googleapis/bytestream v0.0.0-20231120223509-83a465c02
google.golang.org/genproto/googleapis/bytestream v0.0.0-20231120223509-83a465c0220f/go.mod h1:iIgEblxoG4klcXsG0d9cpoxJ4xndv6+1FkDROCHhPRI=
google.golang.org/genproto/googleapis/bytestream v0.0.0-20240304161311-37d4d3c04a78 h1:YqFWYZXim8bG9v68xU8WjTZmYKb5M5dMeSOWIp6jogI=
google.golang.org/genproto/googleapis/bytestream v0.0.0-20240304161311-37d4d3c04a78/go.mod h1:vh/N7795ftP0AkN1w8XKqN4w1OdUKXW5Eummda+ofv8=
+google.golang.org/genproto/googleapis/bytestream v0.0.0-20240318140521-94a12d6c2237 h1:BGtl5+MtFriTFllRl3QPEPWZrD8nVhSTONzTkSin3+c=
+google.golang.org/genproto/googleapis/bytestream v0.0.0-20240318140521-94a12d6c2237/go.mod h1:IN9OQUXZ0xT+26MDwZL8fJcYw+y99b0eYPA2U15Jt8o=
google.golang.org/genproto/googleapis/rpc v0.0.0-20231009173412-8bfb1ae86b6c/go.mod h1:4cYg8o5yUbm77w8ZX00LhMVNl/YVBFJRYWDc0uYWMs0=
google.golang.org/genproto/googleapis/rpc v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:swOH3j0KzcDDgGUWr+SNpyTen5YrXjS3eyPzFYKc6lc=
google.golang.org/genproto/googleapis/rpc v0.0.0-20231030173426-d783a09b4405/go.mod h1:67X1fPuzjcrkymZzZV1vvkFeTn2Rvc6lYF9MYFGCcwE=
@@ -1297,6 +1333,7 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80/go.
google.golang.org/genproto/googleapis/rpc v0.0.0-20240125205218-1f4bbc51befe/go.mod h1:PAREbraiVEVGVdTZsVWjSbbTtSyGbAgIIvni8a8CD5s=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240205150955-31a09d347014/go.mod h1:SaPjaZGWb0lPqs6Ittu0spdfrOArqji4ZdeP5IC/9N4=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240228224816-df926f6c8641/go.mod h1:UCOku4NytXMJuLQE5VuqA5lX3PcHCBo8pxNyvkf4xBs=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240311132316-a219d84964c2/go.mod h1:UCOku4NytXMJuLQE5VuqA5lX3PcHCBo8pxNyvkf4xBs=
google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
diff --git a/operations/pyroscope/helm/ct.yaml b/operations/pyroscope/helm/ct.yaml
index 6777e31fba..40a1a35017 100644
--- a/operations/pyroscope/helm/ct.yaml
+++ b/operations/pyroscope/helm/ct.yaml
@@ -1,7 +1,7 @@
# See https://github.com/helm/chart-testing#configuration
remote: origin
-target-branch: next
-kubeVersion: "1.22"
+target-branch: main
+kubeVersion: "1.23"
chart-dirs:
- operations/pyroscope/helm/
chart-repos:
diff --git a/operations/pyroscope/helm/pyroscope/Chart.yaml b/operations/pyroscope/helm/pyroscope/Chart.yaml
index 5263182e6a..05739c101c 100644
--- a/operations/pyroscope/helm/pyroscope/Chart.yaml
+++ b/operations/pyroscope/helm/pyroscope/Chart.yaml
@@ -2,8 +2,8 @@ apiVersion: v2
name: pyroscope
description: 🔥 horizontally-scalable, highly-available, multi-tenant continuous profiling aggregation system
type: application
-version: 1.6.1
-appVersion: 1.6.1
+version: 1.7.1
+appVersion: 1.7.1
dependencies:
- name: grafana-agent
alias: agent
diff --git a/operations/pyroscope/helm/pyroscope/README.md b/operations/pyroscope/helm/pyroscope/README.md
index 74c7d5b26f..50baeb59e2 100644
--- a/operations/pyroscope/helm/pyroscope/README.md
+++ b/operations/pyroscope/helm/pyroscope/README.md
@@ -1,6 +1,6 @@
# pyroscope
-![Version: 1.6.1](https://img.shields.io/badge/Version-1.6.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.6.1](https://img.shields.io/badge/AppVersion-1.6.1-informational?style=flat-square)
+![Version: 1.7.1](https://img.shields.io/badge/Version-1.7.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.7.1](https://img.shields.io/badge/AppVersion-1.7.1-informational?style=flat-square)
🔥 horizontally-scalable, highly-available, multi-tenant continuous profiling aggregation system
@@ -86,4 +86,4 @@
| serviceMonitor.tlsConfig | string | `nil` | ServiceMonitor will use these tlsConfig settings to make the health check requests |
----------------------------------------------
-Autogenerated from chart metadata using [helm-docs v1.13.1](https://github.com/norwoodj/helm-docs/releases/v1.13.1)
+Autogenerated from chart metadata using [helm-docs v1.8.1](https://github.com/norwoodj/helm-docs/releases/v1.8.1)
diff --git a/operations/pyroscope/helm/pyroscope/ci/micro-services-values.yaml b/operations/pyroscope/helm/pyroscope/ci/micro-services-values.yaml
index 6b14ec7962..9bc6978bc3 100644
--- a/operations/pyroscope/helm/pyroscope/ci/micro-services-values.yaml
+++ b/operations/pyroscope/helm/pyroscope/ci/micro-services-values.yaml
@@ -62,6 +62,27 @@ pyroscope:
requests:
memory: 64Mi
cpu: 20m
+ initContainers:
+ - name: create-bucket
+ image: minio/mc
+ command:
+ - /bin/sh
+ - -c
+ - |
+ export MC_CONFIG_DIR="/tmp/mc"
+ mkdir -p "$MC_CONFIG_DIR"
+ until mc config host add myminio http://$MINIO_ENDPOINT:9000 grafana-pyroscope supersecret; do
+ echo "Waiting for Minio to be available..."
+ sleep 5
+ done
+ mc mb myminio/grafana-pyroscope-data --ignore-existing
+ env:
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: MINIO_ENDPOINT
+ value: $(POD_NAMESPACE)-minio
minio:
enabled: true
diff --git a/operations/pyroscope/helm/pyroscope/rendered/micro-services-hpa.yaml b/operations/pyroscope/helm/pyroscope/rendered/micro-services-hpa.yaml
index baaeb4e426..8a706e88ef 100644
--- a/operations/pyroscope/helm/pyroscope/rendered/micro-services-hpa.yaml
+++ b/operations/pyroscope/helm/pyroscope/rendered/micro-services-hpa.yaml
@@ -6,10 +6,10 @@ metadata:
name: pyroscope-dev-compactor
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: "compactor"
spec:
@@ -27,10 +27,10 @@ metadata:
name: pyroscope-dev-distributor
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: "distributor"
spec:
@@ -48,10 +48,10 @@ metadata:
name: pyroscope-dev-ingester
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: "ingester"
spec:
@@ -69,10 +69,10 @@ metadata:
name: pyroscope-dev-querier
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: "querier"
spec:
@@ -90,10 +90,10 @@ metadata:
name: pyroscope-dev-query-frontend
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: "query-frontend"
spec:
@@ -111,10 +111,10 @@ metadata:
name: pyroscope-dev-query-scheduler
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: "query-scheduler"
spec:
@@ -132,10 +132,10 @@ metadata:
name: pyroscope-dev-store-gateway
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: "store-gateway"
spec:
@@ -176,10 +176,10 @@ metadata:
name: pyroscope-dev
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
---
# Source: pyroscope/charts/minio/templates/secrets.yaml
@@ -523,10 +523,10 @@ metadata:
name: alloy-config-pyroscope
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
data:
config.alloy: |
@@ -1363,10 +1363,10 @@ metadata:
name: pyroscope-dev-overrides-config
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
data:
overrides.yaml: |
@@ -1380,10 +1380,10 @@ metadata:
name: pyroscope-dev-config
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
data:
config.yaml: |
@@ -1503,10 +1503,10 @@ metadata:
name: default-pyroscope-dev
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
rules:
- apiGroups:
@@ -1553,10 +1553,10 @@ metadata:
name: default-pyroscope-dev
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
@@ -1700,10 +1700,10 @@ metadata:
name: pyroscope-dev-memberlist
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
spec:
type: ClusterIP
@@ -1727,10 +1727,10 @@ metadata:
name: pyroscope-dev-compactor
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: "compactor"
spec:
@@ -1752,10 +1752,10 @@ metadata:
name: pyroscope-dev-compactor-headless
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: "compactor"
spec:
@@ -1778,10 +1778,10 @@ metadata:
name: pyroscope-dev-distributor
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: "distributor"
spec:
@@ -1803,10 +1803,10 @@ metadata:
name: pyroscope-dev-distributor-headless
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: "distributor"
spec:
@@ -1829,10 +1829,10 @@ metadata:
name: pyroscope-dev-ingester
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: "ingester"
spec:
@@ -1854,10 +1854,10 @@ metadata:
name: pyroscope-dev-ingester-headless
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: "ingester"
spec:
@@ -1880,10 +1880,10 @@ metadata:
name: pyroscope-dev-querier
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: "querier"
spec:
@@ -1905,10 +1905,10 @@ metadata:
name: pyroscope-dev-querier-headless
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: "querier"
spec:
@@ -1931,10 +1931,10 @@ metadata:
name: pyroscope-dev-query-frontend
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: "query-frontend"
spec:
@@ -1956,10 +1956,10 @@ metadata:
name: pyroscope-dev-query-frontend-headless
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: "query-frontend"
spec:
@@ -1982,10 +1982,10 @@ metadata:
name: pyroscope-dev-query-scheduler
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: "query-scheduler"
spec:
@@ -2007,10 +2007,10 @@ metadata:
name: pyroscope-dev-query-scheduler-headless
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: "query-scheduler"
spec:
@@ -2033,10 +2033,10 @@ metadata:
name: pyroscope-dev-store-gateway
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: "store-gateway"
spec:
@@ -2058,10 +2058,10 @@ metadata:
name: pyroscope-dev-store-gateway-headless
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: "store-gateway"
spec:
@@ -2084,10 +2084,10 @@ metadata:
name: pyroscope-dev-distributor
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: "distributor"
spec:
@@ -2099,7 +2099,7 @@ spec:
template:
metadata:
annotations:
- checksum/config: dde1479a6b672fba848d9db186796ead0de4454310cf8fdf8185339c47b35812
+ checksum/config: bda5e7377527f00be42d8f1af6aee522ee8f66d44079f0e684fe21731d983f7d
profiles.grafana.com/cpu.port_name: http2
profiles.grafana.com/cpu.scrape: "true"
profiles.grafana.com/goroutine.port_name: http2
@@ -2122,7 +2122,7 @@ spec:
- name: "distributor"
securityContext:
{}
- image: "grafana/pyroscope:1.6.1"
+ image: "grafana/pyroscope:1.7.1"
imagePullPolicy: IfNotPresent
args:
- "-target=distributor"
@@ -2177,10 +2177,10 @@ metadata:
name: pyroscope-dev-querier
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: "querier"
spec:
@@ -2192,7 +2192,7 @@ spec:
template:
metadata:
annotations:
- checksum/config: dde1479a6b672fba848d9db186796ead0de4454310cf8fdf8185339c47b35812
+ checksum/config: bda5e7377527f00be42d8f1af6aee522ee8f66d44079f0e684fe21731d983f7d
profiles.grafana.com/cpu.port_name: http2
profiles.grafana.com/cpu.scrape: "true"
profiles.grafana.com/goroutine.port_name: http2
@@ -2215,7 +2215,7 @@ spec:
- name: "querier"
securityContext:
{}
- image: "grafana/pyroscope:1.6.1"
+ image: "grafana/pyroscope:1.7.1"
imagePullPolicy: IfNotPresent
args:
- "-target=querier"
@@ -2270,10 +2270,10 @@ metadata:
name: pyroscope-dev-query-frontend
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: "query-frontend"
spec:
@@ -2285,7 +2285,7 @@ spec:
template:
metadata:
annotations:
- checksum/config: dde1479a6b672fba848d9db186796ead0de4454310cf8fdf8185339c47b35812
+ checksum/config: bda5e7377527f00be42d8f1af6aee522ee8f66d44079f0e684fe21731d983f7d
profiles.grafana.com/cpu.port_name: http2
profiles.grafana.com/cpu.scrape: "true"
profiles.grafana.com/goroutine.port_name: http2
@@ -2308,7 +2308,7 @@ spec:
- name: "query-frontend"
securityContext:
{}
- image: "grafana/pyroscope:1.6.1"
+ image: "grafana/pyroscope:1.7.1"
imagePullPolicy: IfNotPresent
args:
- "-target=query-frontend"
@@ -2363,10 +2363,10 @@ metadata:
name: pyroscope-dev-query-scheduler
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: "query-scheduler"
spec:
@@ -2378,7 +2378,7 @@ spec:
template:
metadata:
annotations:
- checksum/config: dde1479a6b672fba848d9db186796ead0de4454310cf8fdf8185339c47b35812
+ checksum/config: bda5e7377527f00be42d8f1af6aee522ee8f66d44079f0e684fe21731d983f7d
profiles.grafana.com/cpu.port_name: http2
profiles.grafana.com/cpu.scrape: "true"
profiles.grafana.com/goroutine.port_name: http2
@@ -2401,7 +2401,7 @@ spec:
- name: "query-scheduler"
securityContext:
{}
- image: "grafana/pyroscope:1.6.1"
+ image: "grafana/pyroscope:1.7.1"
imagePullPolicy: IfNotPresent
args:
- "-target=query-scheduler"
@@ -2456,10 +2456,10 @@ metadata:
name: pyroscope-dev-distributor
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: "distributor"
spec:
@@ -2484,10 +2484,10 @@ metadata:
name: pyroscope-dev-querier
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: "querier"
spec:
@@ -2515,10 +2515,10 @@ metadata:
name: pyroscope-dev-query-frontend
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: "query-frontend"
spec:
@@ -2543,10 +2543,10 @@ metadata:
name: pyroscope-dev-query-scheduler
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: "query-scheduler"
spec:
@@ -2754,10 +2754,10 @@ metadata:
name: pyroscope-dev-compactor
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: "compactor"
spec:
@@ -2772,7 +2772,7 @@ spec:
template:
metadata:
annotations:
- checksum/config: dde1479a6b672fba848d9db186796ead0de4454310cf8fdf8185339c47b35812
+ checksum/config: bda5e7377527f00be42d8f1af6aee522ee8f66d44079f0e684fe21731d983f7d
profiles.grafana.com/cpu.port_name: http2
profiles.grafana.com/cpu.scrape: "true"
profiles.grafana.com/goroutine.port_name: http2
@@ -2795,7 +2795,7 @@ spec:
- name: "compactor"
securityContext:
{}
- image: "grafana/pyroscope:1.6.1"
+ image: "grafana/pyroscope:1.7.1"
imagePullPolicy: IfNotPresent
args:
- "-target=compactor"
@@ -2855,10 +2855,10 @@ metadata:
name: pyroscope-dev-ingester
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: "ingester"
spec:
@@ -2873,7 +2873,7 @@ spec:
template:
metadata:
annotations:
- checksum/config: dde1479a6b672fba848d9db186796ead0de4454310cf8fdf8185339c47b35812
+ checksum/config: bda5e7377527f00be42d8f1af6aee522ee8f66d44079f0e684fe21731d983f7d
profiles.grafana.com/cpu.port_name: http2
profiles.grafana.com/cpu.scrape: "true"
profiles.grafana.com/goroutine.port_name: http2
@@ -2896,7 +2896,7 @@ spec:
- name: "ingester"
securityContext:
{}
- image: "grafana/pyroscope:1.6.1"
+ image: "grafana/pyroscope:1.7.1"
imagePullPolicy: IfNotPresent
args:
- "-target=ingester"
@@ -2952,10 +2952,10 @@ metadata:
name: pyroscope-dev-store-gateway
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: "store-gateway"
spec:
@@ -2970,7 +2970,7 @@ spec:
template:
metadata:
annotations:
- checksum/config: dde1479a6b672fba848d9db186796ead0de4454310cf8fdf8185339c47b35812
+ checksum/config: bda5e7377527f00be42d8f1af6aee522ee8f66d44079f0e684fe21731d983f7d
profiles.grafana.com/cpu.port_name: http2
profiles.grafana.com/cpu.scrape: "true"
profiles.grafana.com/goroutine.port_name: http2
@@ -2993,7 +2993,7 @@ spec:
- name: "store-gateway"
securityContext:
{}
- image: "grafana/pyroscope:1.6.1"
+ image: "grafana/pyroscope:1.7.1"
imagePullPolicy: IfNotPresent
args:
- "-target=store-gateway"
diff --git a/operations/pyroscope/helm/pyroscope/rendered/micro-services.yaml b/operations/pyroscope/helm/pyroscope/rendered/micro-services.yaml
index 9f0cafee11..1a58cd3160 100644
--- a/operations/pyroscope/helm/pyroscope/rendered/micro-services.yaml
+++ b/operations/pyroscope/helm/pyroscope/rendered/micro-services.yaml
@@ -2,14 +2,35 @@
# Source: pyroscope/templates/deployments-statefulsets.yaml
apiVersion: policy/v1
kind: PodDisruptionBudget
+metadata:
+ name: pyroscope-dev-ad-hoc-profiles
+ namespace: default
+ labels:
+ helm.sh/chart: pyroscope-1.7.1
+ app.kubernetes.io/name: pyroscope
+ app.kubernetes.io/instance: pyroscope-dev
+ app.kubernetes.io/version: "1.7.1"
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/component: "ad-hoc-profiles"
+spec:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: pyroscope
+ app.kubernetes.io/instance: pyroscope-dev
+ app.kubernetes.io/component: "ad-hoc-profiles"
+---
+# Source: pyroscope/templates/deployments-statefulsets.yaml
+apiVersion: policy/v1
+kind: PodDisruptionBudget
metadata:
name: pyroscope-dev-compactor
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: "compactor"
spec:
@@ -27,10 +48,10 @@ metadata:
name: pyroscope-dev-distributor
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: "distributor"
spec:
@@ -48,10 +69,10 @@ metadata:
name: pyroscope-dev-ingester
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: "ingester"
spec:
@@ -69,10 +90,10 @@ metadata:
name: pyroscope-dev-querier
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: "querier"
spec:
@@ -90,10 +111,10 @@ metadata:
name: pyroscope-dev-query-frontend
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: "query-frontend"
spec:
@@ -111,10 +132,10 @@ metadata:
name: pyroscope-dev-query-scheduler
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: "query-scheduler"
spec:
@@ -132,10 +153,10 @@ metadata:
name: pyroscope-dev-store-gateway
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: "store-gateway"
spec:
@@ -146,6 +167,27 @@ spec:
app.kubernetes.io/instance: pyroscope-dev
app.kubernetes.io/component: "store-gateway"
---
+# Source: pyroscope/templates/deployments-statefulsets.yaml
+apiVersion: policy/v1
+kind: PodDisruptionBudget
+metadata:
+ name: pyroscope-dev-tenant-settings
+ namespace: default
+ labels:
+ helm.sh/chart: pyroscope-1.7.1
+ app.kubernetes.io/name: pyroscope
+ app.kubernetes.io/instance: pyroscope-dev
+ app.kubernetes.io/version: "1.7.1"
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/component: "tenant-settings"
+spec:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: pyroscope
+ app.kubernetes.io/instance: pyroscope-dev
+ app.kubernetes.io/component: "tenant-settings"
+---
# Source: pyroscope/charts/alloy/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
@@ -176,10 +218,10 @@ metadata:
name: pyroscope-dev
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
---
# Source: pyroscope/charts/minio/templates/secrets.yaml
@@ -523,10 +565,10 @@ metadata:
name: alloy-config-pyroscope
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
data:
config.alloy: |
@@ -1363,10 +1405,10 @@ metadata:
name: pyroscope-dev-overrides-config
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
data:
overrides.yaml: |
@@ -1380,10 +1422,10 @@ metadata:
name: pyroscope-dev-config
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
data:
config.yaml: |
@@ -1503,10 +1545,10 @@ metadata:
name: default-pyroscope-dev
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
rules:
- apiGroups:
@@ -1553,10 +1595,10 @@ metadata:
name: default-pyroscope-dev
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
@@ -1700,10 +1742,10 @@ metadata:
name: pyroscope-dev-memberlist
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
spec:
type: ClusterIP
@@ -1723,14 +1765,65 @@ spec:
# Source: pyroscope/templates/services.yaml
apiVersion: v1
kind: Service
+metadata:
+ name: pyroscope-dev-ad-hoc-profiles
+ namespace: default
+ labels:
+ helm.sh/chart: pyroscope-1.7.1
+ app.kubernetes.io/name: pyroscope
+ app.kubernetes.io/instance: pyroscope-dev
+ app.kubernetes.io/version: "1.7.1"
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/component: "ad-hoc-profiles"
+spec:
+ type: ClusterIP
+ ports:
+ - port: 4040
+ targetPort: http2
+ protocol: TCP
+ name: http2
+ selector:
+ app.kubernetes.io/name: pyroscope
+ app.kubernetes.io/instance: pyroscope-dev
+ app.kubernetes.io/component: "ad-hoc-profiles"
+---
+# Source: pyroscope/templates/services.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: pyroscope-dev-ad-hoc-profiles-headless
+ namespace: default
+ labels:
+ helm.sh/chart: pyroscope-1.7.1
+ app.kubernetes.io/name: pyroscope
+ app.kubernetes.io/instance: pyroscope-dev
+ app.kubernetes.io/version: "1.7.1"
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/component: "ad-hoc-profiles"
+spec:
+ type: ClusterIP
+ clusterIP: None
+ ports:
+ - port: 4040
+ targetPort: http2
+ protocol: TCP
+ name: http2
+ selector:
+ app.kubernetes.io/name: pyroscope
+ app.kubernetes.io/instance: pyroscope-dev
+ app.kubernetes.io/component: "ad-hoc-profiles"
+---
+# Source: pyroscope/templates/services.yaml
+apiVersion: v1
+kind: Service
metadata:
name: pyroscope-dev-compactor
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: "compactor"
spec:
@@ -1752,10 +1845,10 @@ metadata:
name: pyroscope-dev-compactor-headless
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: "compactor"
spec:
@@ -1778,10 +1871,10 @@ metadata:
name: pyroscope-dev-distributor
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: "distributor"
spec:
@@ -1803,10 +1896,10 @@ metadata:
name: pyroscope-dev-distributor-headless
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: "distributor"
spec:
@@ -1829,10 +1922,10 @@ metadata:
name: pyroscope-dev-ingester
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: "ingester"
spec:
@@ -1854,10 +1947,10 @@ metadata:
name: pyroscope-dev-ingester-headless
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: "ingester"
spec:
@@ -1880,10 +1973,10 @@ metadata:
name: pyroscope-dev-querier
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: "querier"
spec:
@@ -1905,10 +1998,10 @@ metadata:
name: pyroscope-dev-querier-headless
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: "querier"
spec:
@@ -1931,10 +2024,10 @@ metadata:
name: pyroscope-dev-query-frontend
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: "query-frontend"
spec:
@@ -1956,10 +2049,10 @@ metadata:
name: pyroscope-dev-query-frontend-headless
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: "query-frontend"
spec:
@@ -1982,10 +2075,10 @@ metadata:
name: pyroscope-dev-query-scheduler
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: "query-scheduler"
spec:
@@ -2007,10 +2100,10 @@ metadata:
name: pyroscope-dev-query-scheduler-headless
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: "query-scheduler"
spec:
@@ -2033,10 +2126,10 @@ metadata:
name: pyroscope-dev-store-gateway
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: "store-gateway"
spec:
@@ -2058,10 +2151,10 @@ metadata:
name: pyroscope-dev-store-gateway-headless
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: "store-gateway"
spec:
@@ -2077,6 +2170,151 @@ spec:
app.kubernetes.io/instance: pyroscope-dev
app.kubernetes.io/component: "store-gateway"
---
+# Source: pyroscope/templates/services.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: pyroscope-dev-tenant-settings
+ namespace: default
+ labels:
+ helm.sh/chart: pyroscope-1.7.1
+ app.kubernetes.io/name: pyroscope
+ app.kubernetes.io/instance: pyroscope-dev
+ app.kubernetes.io/version: "1.7.1"
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/component: "tenant-settings"
+spec:
+ type: ClusterIP
+ ports:
+ - port: 4040
+ targetPort: http2
+ protocol: TCP
+ name: http2
+ selector:
+ app.kubernetes.io/name: pyroscope
+ app.kubernetes.io/instance: pyroscope-dev
+ app.kubernetes.io/component: "tenant-settings"
+---
+# Source: pyroscope/templates/services.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: pyroscope-dev-tenant-settings-headless
+ namespace: default
+ labels:
+ helm.sh/chart: pyroscope-1.7.1
+ app.kubernetes.io/name: pyroscope
+ app.kubernetes.io/instance: pyroscope-dev
+ app.kubernetes.io/version: "1.7.1"
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/component: "tenant-settings"
+spec:
+ type: ClusterIP
+ clusterIP: None
+ ports:
+ - port: 4040
+ targetPort: http2
+ protocol: TCP
+ name: http2
+ selector:
+ app.kubernetes.io/name: pyroscope
+ app.kubernetes.io/instance: pyroscope-dev
+ app.kubernetes.io/component: "tenant-settings"
+---
+# Source: pyroscope/templates/deployments-statefulsets.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: pyroscope-dev-ad-hoc-profiles
+ namespace: default
+ labels:
+ helm.sh/chart: pyroscope-1.7.1
+ app.kubernetes.io/name: pyroscope
+ app.kubernetes.io/instance: pyroscope-dev
+ app.kubernetes.io/version: "1.7.1"
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/component: "ad-hoc-profiles"
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: pyroscope
+ app.kubernetes.io/instance: pyroscope-dev
+ app.kubernetes.io/component: "ad-hoc-profiles"
+ template:
+ metadata:
+ annotations:
+ checksum/config: bda5e7377527f00be42d8f1af6aee522ee8f66d44079f0e684fe21731d983f7d
+ profiles.grafana.com/cpu.port_name: http2
+ profiles.grafana.com/cpu.scrape: "true"
+ profiles.grafana.com/goroutine.port_name: http2
+ profiles.grafana.com/goroutine.scrape: "true"
+ profiles.grafana.com/memory.port_name: http2
+ profiles.grafana.com/memory.scrape: "true"
+ labels:
+ app.kubernetes.io/name: pyroscope
+ app.kubernetes.io/instance: pyroscope-dev
+ app.kubernetes.io/component: "ad-hoc-profiles"
+ name: "ad-hoc-profiles"
+ spec:
+ serviceAccountName: pyroscope-dev
+ securityContext:
+ fsGroup: 10001
+ runAsNonRoot: true
+ runAsUser: 10001
+ dnsPolicy: ClusterFirst
+ containers:
+ - name: "ad-hoc-profiles"
+ securityContext:
+ {}
+ image: "grafana/pyroscope:1.7.1"
+ imagePullPolicy: IfNotPresent
+ args:
+ - "-target=ad-hoc-profiles"
+ - "-self-profiling.disable-push=true"
+ - "-server.http-listen-port=4040"
+ - "-memberlist.cluster-label=default-pyroscope-dev"
+ - "-memberlist.join=dns+pyroscope-dev-memberlist.default.svc.cluster.local.:7946"
+ - "-config.file=/etc/pyroscope/config.yaml"
+ - "-runtime-config.file=/etc/pyroscope/overrides/overrides.yaml"
+ - "-log.level=debug"
+ - "-store-gateway.sharding-ring.replication-factor=3"
+ ports:
+ - name: http2
+ containerPort: 4040
+ protocol: TCP
+ - name: memberlist
+ containerPort: 7946
+ protocol: TCP
+ readinessProbe:
+ httpGet:
+ path: /ready
+ port: http2
+ scheme: HTTP
+ volumeMounts:
+ - name: config
+ mountPath: /etc/pyroscope/config.yaml
+ subPath: config.yaml
+ - name: overrides-config
+ mountPath: /etc/pyroscope/overrides/
+ - name: data
+ mountPath: /data
+ resources:
+ limits:
+ memory: 4Gi
+ requests:
+ cpu: 0.1
+ memory: 16Mi
+ volumes:
+ - name: config
+ configMap:
+ name: pyroscope-dev-config
+ - name: overrides-config
+ configMap:
+ name: pyroscope-dev-overrides-config
+ - name: data
+ emptyDir: {}
+---
# Source: pyroscope/templates/deployments-statefulsets.yaml
apiVersion: apps/v1
kind: Deployment
@@ -2084,10 +2322,10 @@ metadata:
name: pyroscope-dev-distributor
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: "distributor"
spec:
@@ -2100,7 +2338,7 @@ spec:
template:
metadata:
annotations:
- checksum/config: dde1479a6b672fba848d9db186796ead0de4454310cf8fdf8185339c47b35812
+ checksum/config: bda5e7377527f00be42d8f1af6aee522ee8f66d44079f0e684fe21731d983f7d
profiles.grafana.com/cpu.port_name: http2
profiles.grafana.com/cpu.scrape: "true"
profiles.grafana.com/goroutine.port_name: http2
@@ -2123,7 +2361,7 @@ spec:
- name: "distributor"
securityContext:
{}
- image: "grafana/pyroscope:1.6.1"
+ image: "grafana/pyroscope:1.7.1"
imagePullPolicy: IfNotPresent
args:
- "-target=distributor"
@@ -2178,10 +2416,10 @@ metadata:
name: pyroscope-dev-querier
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: "querier"
spec:
@@ -2194,7 +2432,7 @@ spec:
template:
metadata:
annotations:
- checksum/config: dde1479a6b672fba848d9db186796ead0de4454310cf8fdf8185339c47b35812
+ checksum/config: bda5e7377527f00be42d8f1af6aee522ee8f66d44079f0e684fe21731d983f7d
profiles.grafana.com/cpu.port_name: http2
profiles.grafana.com/cpu.scrape: "true"
profiles.grafana.com/goroutine.port_name: http2
@@ -2217,7 +2455,7 @@ spec:
- name: "querier"
securityContext:
{}
- image: "grafana/pyroscope:1.6.1"
+ image: "grafana/pyroscope:1.7.1"
imagePullPolicy: IfNotPresent
args:
- "-target=querier"
@@ -2272,10 +2510,10 @@ metadata:
name: pyroscope-dev-query-frontend
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: "query-frontend"
spec:
@@ -2288,7 +2526,7 @@ spec:
template:
metadata:
annotations:
- checksum/config: dde1479a6b672fba848d9db186796ead0de4454310cf8fdf8185339c47b35812
+ checksum/config: bda5e7377527f00be42d8f1af6aee522ee8f66d44079f0e684fe21731d983f7d
profiles.grafana.com/cpu.port_name: http2
profiles.grafana.com/cpu.scrape: "true"
profiles.grafana.com/goroutine.port_name: http2
@@ -2311,7 +2549,7 @@ spec:
- name: "query-frontend"
securityContext:
{}
- image: "grafana/pyroscope:1.6.1"
+ image: "grafana/pyroscope:1.7.1"
imagePullPolicy: IfNotPresent
args:
- "-target=query-frontend"
@@ -2366,10 +2604,10 @@ metadata:
name: pyroscope-dev-query-scheduler
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: "query-scheduler"
spec:
@@ -2382,7 +2620,7 @@ spec:
template:
metadata:
annotations:
- checksum/config: dde1479a6b672fba848d9db186796ead0de4454310cf8fdf8185339c47b35812
+ checksum/config: bda5e7377527f00be42d8f1af6aee522ee8f66d44079f0e684fe21731d983f7d
profiles.grafana.com/cpu.port_name: http2
profiles.grafana.com/cpu.scrape: "true"
profiles.grafana.com/goroutine.port_name: http2
@@ -2405,7 +2643,7 @@ spec:
- name: "query-scheduler"
securityContext:
{}
- image: "grafana/pyroscope:1.6.1"
+ image: "grafana/pyroscope:1.7.1"
imagePullPolicy: IfNotPresent
args:
- "-target=query-scheduler"
@@ -2453,6 +2691,100 @@ spec:
- name: data
emptyDir: {}
---
+# Source: pyroscope/templates/deployments-statefulsets.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: pyroscope-dev-tenant-settings
+ namespace: default
+ labels:
+ helm.sh/chart: pyroscope-1.7.1
+ app.kubernetes.io/name: pyroscope
+ app.kubernetes.io/instance: pyroscope-dev
+ app.kubernetes.io/version: "1.7.1"
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/component: "tenant-settings"
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: pyroscope
+ app.kubernetes.io/instance: pyroscope-dev
+ app.kubernetes.io/component: "tenant-settings"
+ template:
+ metadata:
+ annotations:
+ checksum/config: bda5e7377527f00be42d8f1af6aee522ee8f66d44079f0e684fe21731d983f7d
+ profiles.grafana.com/cpu.port_name: http2
+ profiles.grafana.com/cpu.scrape: "true"
+ profiles.grafana.com/goroutine.port_name: http2
+ profiles.grafana.com/goroutine.scrape: "true"
+ profiles.grafana.com/memory.port_name: http2
+ profiles.grafana.com/memory.scrape: "true"
+ labels:
+ app.kubernetes.io/name: pyroscope
+ app.kubernetes.io/instance: pyroscope-dev
+ app.kubernetes.io/component: "tenant-settings"
+ name: "tenant-settings"
+ spec:
+ serviceAccountName: pyroscope-dev
+ securityContext:
+ fsGroup: 10001
+ runAsNonRoot: true
+ runAsUser: 10001
+ dnsPolicy: ClusterFirst
+ containers:
+ - name: "tenant-settings"
+ securityContext:
+ {}
+ image: "grafana/pyroscope:1.7.1"
+ imagePullPolicy: IfNotPresent
+ args:
+ - "-target=tenant-settings"
+ - "-self-profiling.disable-push=true"
+ - "-server.http-listen-port=4040"
+ - "-memberlist.cluster-label=default-pyroscope-dev"
+ - "-memberlist.join=dns+pyroscope-dev-memberlist.default.svc.cluster.local.:7946"
+ - "-config.file=/etc/pyroscope/config.yaml"
+ - "-runtime-config.file=/etc/pyroscope/overrides/overrides.yaml"
+ - "-log.level=debug"
+ - "-store-gateway.sharding-ring.replication-factor=3"
+ ports:
+ - name: http2
+ containerPort: 4040
+ protocol: TCP
+ - name: memberlist
+ containerPort: 7946
+ protocol: TCP
+ readinessProbe:
+ httpGet:
+ path: /ready
+ port: http2
+ scheme: HTTP
+ volumeMounts:
+ - name: config
+ mountPath: /etc/pyroscope/config.yaml
+ subPath: config.yaml
+ - name: overrides-config
+ mountPath: /etc/pyroscope/overrides/
+ - name: data
+ mountPath: /data
+ resources:
+ limits:
+ memory: 4Gi
+ requests:
+ cpu: 0.1
+ memory: 16Mi
+ volumes:
+ - name: config
+ configMap:
+ name: pyroscope-dev-config
+ - name: overrides-config
+ configMap:
+ name: pyroscope-dev-overrides-config
+ - name: data
+ emptyDir: {}
+---
# Source: pyroscope/charts/alloy/templates/controllers/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
@@ -2643,10 +2975,10 @@ metadata:
name: pyroscope-dev-compactor
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: "compactor"
spec:
@@ -2661,7 +2993,7 @@ spec:
template:
metadata:
annotations:
- checksum/config: dde1479a6b672fba848d9db186796ead0de4454310cf8fdf8185339c47b35812
+ checksum/config: bda5e7377527f00be42d8f1af6aee522ee8f66d44079f0e684fe21731d983f7d
profiles.grafana.com/cpu.port_name: http2
profiles.grafana.com/cpu.scrape: "true"
profiles.grafana.com/goroutine.port_name: http2
@@ -2684,7 +3016,7 @@ spec:
- name: "compactor"
securityContext:
{}
- image: "grafana/pyroscope:1.6.1"
+ image: "grafana/pyroscope:1.7.1"
imagePullPolicy: IfNotPresent
args:
- "-target=compactor"
@@ -2744,10 +3076,10 @@ metadata:
name: pyroscope-dev-ingester
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: "ingester"
spec:
@@ -2762,7 +3094,7 @@ spec:
template:
metadata:
annotations:
- checksum/config: dde1479a6b672fba848d9db186796ead0de4454310cf8fdf8185339c47b35812
+ checksum/config: bda5e7377527f00be42d8f1af6aee522ee8f66d44079f0e684fe21731d983f7d
profiles.grafana.com/cpu.port_name: http2
profiles.grafana.com/cpu.scrape: "true"
profiles.grafana.com/goroutine.port_name: http2
@@ -2785,7 +3117,7 @@ spec:
- name: "ingester"
securityContext:
{}
- image: "grafana/pyroscope:1.6.1"
+ image: "grafana/pyroscope:1.7.1"
imagePullPolicy: IfNotPresent
args:
- "-target=ingester"
@@ -2841,10 +3173,10 @@ metadata:
name: pyroscope-dev-store-gateway
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: "store-gateway"
spec:
@@ -2859,7 +3191,7 @@ spec:
template:
metadata:
annotations:
- checksum/config: dde1479a6b672fba848d9db186796ead0de4454310cf8fdf8185339c47b35812
+ checksum/config: bda5e7377527f00be42d8f1af6aee522ee8f66d44079f0e684fe21731d983f7d
profiles.grafana.com/cpu.port_name: http2
profiles.grafana.com/cpu.scrape: "true"
profiles.grafana.com/goroutine.port_name: http2
@@ -2882,7 +3214,7 @@ spec:
- name: "store-gateway"
securityContext:
{}
- image: "grafana/pyroscope:1.6.1"
+ image: "grafana/pyroscope:1.7.1"
imagePullPolicy: IfNotPresent
args:
- "-target=store-gateway"
diff --git a/operations/pyroscope/helm/pyroscope/rendered/single-binary.yaml b/operations/pyroscope/helm/pyroscope/rendered/single-binary.yaml
index 4f77e9a788..c85ac1b5d4 100644
--- a/operations/pyroscope/helm/pyroscope/rendered/single-binary.yaml
+++ b/operations/pyroscope/helm/pyroscope/rendered/single-binary.yaml
@@ -6,10 +6,10 @@ metadata:
name: pyroscope-dev
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: "all"
spec:
@@ -43,10 +43,10 @@ metadata:
name: pyroscope-dev
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
---
# Source: pyroscope/templates/configmap-alloy.yaml
@@ -56,10 +56,10 @@ metadata:
name: alloy-config-pyroscope
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
data:
config.alloy: |
@@ -896,10 +896,10 @@ metadata:
name: pyroscope-dev-overrides-config
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
data:
overrides.yaml: |
@@ -913,10 +913,10 @@ metadata:
name: pyroscope-dev-config
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
data:
config.yaml: |
@@ -1029,10 +1029,10 @@ metadata:
name: default-pyroscope-dev
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
rules:
- apiGroups:
@@ -1079,10 +1079,10 @@ metadata:
name: default-pyroscope-dev
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
@@ -1158,10 +1158,10 @@ metadata:
name: pyroscope-dev-memberlist
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
spec:
type: ClusterIP
@@ -1185,10 +1185,10 @@ metadata:
name: pyroscope-dev
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: "all"
spec:
@@ -1210,10 +1210,10 @@ metadata:
name: pyroscope-dev-headless
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: "all"
spec:
@@ -1324,10 +1324,10 @@ metadata:
name: pyroscope-dev
namespace: default
labels:
- helm.sh/chart: pyroscope-1.6.1
+ helm.sh/chart: pyroscope-1.7.1
app.kubernetes.io/name: pyroscope
app.kubernetes.io/instance: pyroscope-dev
- app.kubernetes.io/version: "1.6.1"
+ app.kubernetes.io/version: "1.7.1"
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: "all"
spec:
@@ -1342,7 +1342,7 @@ spec:
template:
metadata:
annotations:
- checksum/config: 2e02966e7cf43f885ceda61ac06c802b5d711320b5f703275c7169ce21d1e92b
+ checksum/config: 83e85964754b7e0a989f7bc04af56b5994db1db5ac04cc51dbdddccdbeb9cc1a
profiles.grafana.com/cpu.port_name: http2
profiles.grafana.com/cpu.scrape: "true"
profiles.grafana.com/goroutine.port_name: http2
@@ -1365,7 +1365,7 @@ spec:
- name: "pyroscope"
securityContext:
{}
- image: "grafana/pyroscope:1.6.1"
+ image: "grafana/pyroscope:1.7.1"
imagePullPolicy: IfNotPresent
args:
- "-target=all"
diff --git a/operations/pyroscope/helm/pyroscope/templates/ingress.yaml b/operations/pyroscope/helm/pyroscope/templates/ingress.yaml
index 840fcc4f49..b6d311c92a 100644
--- a/operations/pyroscope/helm/pyroscope/templates/ingress.yaml
+++ b/operations/pyroscope/helm/pyroscope/templates/ingress.yaml
@@ -89,5 +89,27 @@ spec:
number: {{ $.Values.pyroscope.service.port }}
path: /ingest
pathType: Prefix
+ - backend:
+ service:
+ {{- if gt (len $.Values.pyroscope.components) 1}}
+ name: {{ include "pyroscope.fullname" $ }}-tenant-settings
+ {{- else }}
+ name: {{ include "pyroscope.fullname" $ }}
+ {{- end }}
+ port:
+ number: {{ $.Values.pyroscope.service.port }}
+ path: /settings.v1.SettingsService/
+ pathType: ImplementationSpecific
+ - backend:
+ service:
+ {{- if gt (len $.Values.pyroscope.components) 1}}
+ name: {{ include "pyroscope.fullname" $ }}-ad-hoc-profiles
+ {{- else }}
+ name: {{ include "pyroscope.fullname" $ }}
+ {{- end }}
+ port:
+ number: {{ $.Values.pyroscope.service.port }}
+ path: /adhocprofiles.v1.AdHocProfileService/
+ pathType: ImplementationSpecific
{{- end }}
{{- end }}
diff --git a/operations/pyroscope/helm/pyroscope/values-micro-services.yaml b/operations/pyroscope/helm/pyroscope/values-micro-services.yaml
index b51e8c51d2..9759973ab1 100644
--- a/operations/pyroscope/helm/pyroscope/values-micro-services.yaml
+++ b/operations/pyroscope/helm/pyroscope/values-micro-services.yaml
@@ -84,5 +84,23 @@ pyroscope:
# Depending on this flag and the number of tenants + blocks that need to be synced on startup, pods can take
# some time to become ready. This value can be used to ensure Kubernetes waits long enough and reduce errors.
initialDelaySeconds: 60
+ tenant-settings:
+ kind: Deployment
+ replicaCount: 1
+ resources:
+ limits:
+ memory: 4Gi
+ requests:
+ memory: 16Mi
+ cpu: 0.1
+ ad-hoc-profiles:
+ kind: Deployment
+ replicaCount: 1
+ resources:
+ limits:
+ memory: 4Gi
+ requests:
+ memory: 16Mi
+ cpu: 0.1
minio:
enabled: true
diff --git a/operations/pyroscope/helm/pyroscope/values.yaml b/operations/pyroscope/helm/pyroscope/values.yaml
index 75814a0891..b01cd0cdc9 100644
--- a/operations/pyroscope/helm/pyroscope/values.yaml
+++ b/operations/pyroscope/helm/pyroscope/values.yaml
@@ -47,7 +47,7 @@ pyroscope:
name: ""
podAnnotations:
- # Scrapes itself see https://grafana.com/docs/phlare/latest/operators-guide/deploy-kubernetes/#optional-scrape-your-own-workloads-profiles
+ # Scrapes itself see https://grafana.com/docs/pyroscope/latest/deploy-kubernetes/helm/#optional-scrape-your-own-workloads-profiles
profiles.grafana.com/memory.scrape: "true"
profiles.grafana.com/memory.port_name: http2
profiles.grafana.com/cpu.scrape: "true"
@@ -189,7 +189,7 @@ alloy:
profiles.grafana.com/goroutine.scrape: "true"
profiles.grafana.com/goroutine.port_name: "http-metrics"
alloy:
- stabilityLevel: "public-preview" # This needs to be set for some of our resources until verison v1.2 is released
+ stabilityLevel: "public-preview" # This needs to be set for some of our resources until verison v1.2 is released
configMap:
create: false
name: alloy-config-pyroscope
diff --git a/operations/pyroscope/jsonnet/values-micro-services.json b/operations/pyroscope/jsonnet/values-micro-services.json
index 19df46387c..38eaa78aa8 100644
--- a/operations/pyroscope/jsonnet/values-micro-services.json
+++ b/operations/pyroscope/jsonnet/values-micro-services.json
@@ -4,6 +4,19 @@
},
"pyroscope": {
"components": {
+ "ad-hoc-profiles": {
+ "kind": "Deployment",
+ "replicaCount": 1,
+ "resources": {
+ "limits": {
+ "memory": "4Gi"
+ },
+ "requests": {
+ "cpu": 0.1,
+ "memory": "16Mi"
+ }
+ }
+ },
"compactor": {
"kind": "StatefulSet",
"persistence": {
@@ -105,6 +118,19 @@
"memory": "8Gi"
}
}
+ },
+ "tenant-settings": {
+ "kind": "Deployment",
+ "replicaCount": 1,
+ "resources": {
+ "limits": {
+ "memory": "4Gi"
+ },
+ "requests": {
+ "cpu": 0.1,
+ "memory": "16Mi"
+ }
+ }
}
},
"extraArgs": {
diff --git a/pkg/adhocprofiles/adhocprofiles.go b/pkg/adhocprofiles/adhocprofiles.go
index d947940f36..a9d49263c5 100644
--- a/pkg/adhocprofiles/adhocprofiles.go
+++ b/pkg/adhocprofiles/adhocprofiles.go
@@ -6,6 +6,7 @@ import (
"crypto/rand"
"encoding/base64"
"encoding/json"
+ "fmt"
"io"
"slices"
"strings"
@@ -41,6 +42,33 @@ type AdHocProfile struct {
UploadedAt time.Time `json:"uploadedAt"`
}
+func validRunes(r rune) bool {
+ if r >= 'a' && r <= 'z' || r >= 'A' && r <= 'Z' || r >= '0' && r <= '9' || r == '.' || r == '-' || r == '_' {
+ return true
+ }
+ return false
+}
+
+// check if the id is valid
+func validID(id string) bool {
+ for _, r := range id {
+ if !validRunes(r) {
+ return false
+ }
+ }
+ return true
+}
+
+// replaces invalid runes in the id with underscores
+func replaceInvalidRunes(id string) string {
+ return strings.Map(func(r rune) rune {
+ if validRunes(r) {
+ return r
+ }
+ return '_'
+ }, id)
+}
+
func NewAdHocProfiles(bucket objstore.Bucket, logger log.Logger, limits frontend.Limits) *AdHocProfiles {
a := &AdHocProfiles{
logger: logger,
@@ -68,6 +96,9 @@ func (a *AdHocProfiles) Upload(ctx context.Context, c *connect.Request[v1.AdHocP
UploadedAt: time.Now().UTC(),
}
+ // replace runes outside of [a-zA-Z0-9_-.] with underscores
+ adHocProfile.Name = replaceInvalidRunes(adHocProfile.Name)
+
// TODO: Add per-tenant upload limits (number of files, total size, etc.)
maxNodes, err := validation.ValidateMaxNodes(a.limits, []string{tenantID}, c.Msg.GetMaxNodes())
@@ -118,7 +149,12 @@ func (a *AdHocProfiles) Get(ctx context.Context, c *connect.Request[v1.AdHocProf
bucket := a.getBucket(tenantID)
- reader, err := bucket.Get(ctx, c.Msg.GetId())
+ id := c.Msg.GetId()
+ if !validID(id) {
+ return nil, connect.NewError(connect.CodeInvalidArgument, fmt.Errorf("id '%s' is invalid: can only contain [a-zA-Z0-9_-.]", id))
+ }
+
+ reader, err := bucket.Get(ctx, id)
if err != nil {
return nil, errors.Wrapf(err, "failed to get profile")
}
@@ -167,6 +203,11 @@ func (a *AdHocProfiles) List(ctx context.Context, c *connect.Request[v1.AdHocPro
profiles := make([]*v1.AdHocProfilesProfileMetadata, 0)
err = bucket.Iter(ctx, "", func(s string) error {
+ // do not list elements with invalid ids
+ if !validID(s) {
+ return nil
+ }
+
separatorIndex := strings.IndexRune(s, '-')
id, err := ulid.Parse(s[0:separatorIndex])
if err != nil {
diff --git a/pkg/adhocprofiles/adhocprofiles_test.go b/pkg/adhocprofiles/adhocprofiles_test.go
index d8f47a8559..3180331d9b 100644
--- a/pkg/adhocprofiles/adhocprofiles_test.go
+++ b/pkg/adhocprofiles/adhocprofiles_test.go
@@ -6,6 +6,7 @@ import (
"encoding/base64"
"encoding/json"
"os"
+ "strings"
"testing"
"connectrpc.com/connect"
@@ -125,9 +126,10 @@ func TestAdHocProfiles_Upload(t *testing.T) {
c *connect.Request[v1.AdHocProfilesUploadRequest]
}
tests := []struct {
- name string
- args args
- wantErr bool
+ name string
+ args args
+ wantErr bool
+ expectedSuffix string
}{
{
name: "reject requests with missing tenant id",
@@ -153,11 +155,24 @@ func TestAdHocProfiles_Upload(t *testing.T) {
args: args{
ctx: tenant.InjectTenantID(context.Background(), "tenant"),
c: connect.NewRequest(&v1.AdHocProfilesUploadRequest{
- Name: "test",
+ Name: "test.cpu.pb.gz",
Profile: encodedProfile,
}),
},
- wantErr: false,
+ wantErr: false,
+ expectedSuffix: "-test.cpu.pb.gz",
+ },
+ {
+ name: "should limit profile names to particular character set",
+ args: args{
+ ctx: tenant.InjectTenantID(context.Background(), "tenant"),
+ c: connect.NewRequest(&v1.AdHocProfilesUploadRequest{
+ Name: "test/../../../etc/passwd",
+ Profile: encodedProfile,
+ }),
+ },
+ wantErr: false,
+ expectedSuffix: "-test_.._.._.._etc_passwd",
},
}
for _, tt := range tests {
@@ -172,6 +187,18 @@ func TestAdHocProfiles_Upload(t *testing.T) {
t.Errorf("Upload() error = %v, wantErr %v", err, tt.wantErr)
return
}
+
+ if tt.expectedSuffix != "" {
+ found := false
+ err := bucket.Iter(tt.args.ctx, "tenant/adhoc", func(name string) error {
+ if strings.HasSuffix(name, tt.expectedSuffix) {
+ found = true
+ }
+ return nil
+ })
+ require.NoError(t, err)
+ require.True(t, found)
+ }
})
}
}
diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go
index 7eb50241a9..1dcb39af16 100644
--- a/pkg/distributor/distributor.go
+++ b/pkg/distributor/distributor.go
@@ -126,6 +126,7 @@ type Limits interface {
MaxSessionsPerSeries(tenantID string) int
EnforceLabelsOrder(tenantID string) bool
IngestionRelabelingRules(tenantID string) []*relabel.Config
+ DistributorUsageGroups(tenantID string) *validation.UsageGroupConfig
validation.ProfileValidationLimits
aggregator.Limits
}
@@ -284,9 +285,13 @@ func (d *Distributor) PushParsed(ctx context.Context, req *distributormodel.Push
return nil, err
}
+ usageGroups := d.limits.DistributorUsageGroups(tenantID)
+
for _, series := range req.Series {
profName := phlaremodel.Labels(series.Labels).Get(ProfileName)
+ groups := usageGroups.GetUsageGroups(tenantID, phlaremodel.Labels(series.Labels))
profLanguage := d.GetProfileLanguage(series)
+
for _, raw := range series.Samples {
usagestats.NewCounter(fmt.Sprintf("distributor_profile_type_%s_received", profName)).Inc(1)
d.profileReceivedStats.Inc(1, profLanguage)
@@ -298,11 +303,14 @@ func (d *Distributor) PushParsed(ctx context.Context, req *distributormodel.Push
d.metrics.receivedDecompressedBytes.WithLabelValues(profName, tenantID).Observe(float64(decompressedSize))
d.metrics.receivedSamples.WithLabelValues(profName, tenantID).Observe(float64(len(p.Sample)))
d.profileSizeStats.Record(float64(decompressedSize), profLanguage)
+ groups.CountReceivedBytes(profName, int64(decompressedSize))
if err = validation.ValidateProfile(d.limits, tenantID, p.Profile, decompressedSize, series.Labels, now); err != nil {
_ = level.Debug(d.logger).Log("msg", "invalid profile", "err", err)
- validation.DiscardedProfiles.WithLabelValues(string(validation.ReasonOf(err)), tenantID).Add(float64(req.TotalProfiles))
- validation.DiscardedBytes.WithLabelValues(string(validation.ReasonOf(err)), tenantID).Add(float64(req.TotalBytesUncompressed))
+ reason := string(validation.ReasonOf(err))
+ validation.DiscardedProfiles.WithLabelValues(reason, tenantID).Add(float64(req.TotalProfiles))
+ validation.DiscardedBytes.WithLabelValues(reason, tenantID).Add(float64(req.TotalBytesUncompressed))
+ groups.CountDiscardedBytes(reason, req.TotalBytesUncompressed)
return nil, connect.NewError(connect.CodeInvalidArgument, err)
}
@@ -401,11 +409,12 @@ func (d *Distributor) sendRequests(ctx context.Context, req *distributormodel.Pu
for _, series := range req.Series {
series.Labels = d.limitMaxSessionsPerSeries(maxSessionsPerSeries, series.Labels)
}
+ usageGroups := d.limits.DistributorUsageGroups(tenantID)
// Next we split profiles by labels and apply relabel rules.
- profileSeries, bytesRelabelDropped, profilesRelabelDropped := extractSampleSeries(req, d.limits.IngestionRelabelingRules(tenantID))
- validation.DiscardedBytes.WithLabelValues(string(validation.RelabelRules), tenantID).Add(bytesRelabelDropped)
- validation.DiscardedProfiles.WithLabelValues(string(validation.RelabelRules), tenantID).Add(profilesRelabelDropped)
+ profileSeries, bytesRelabelDropped, profilesRelabelDropped := extractSampleSeries(req, tenantID, usageGroups, d.limits.IngestionRelabelingRules(tenantID))
+ validation.DiscardedBytes.WithLabelValues(string(validation.DroppedByRelabelRules), tenantID).Add(bytesRelabelDropped)
+ validation.DiscardedProfiles.WithLabelValues(string(validation.DroppedByRelabelRules), tenantID).Add(profilesRelabelDropped)
// Filter our series and profiles without samples.
for _, series := range profileSeries {
@@ -427,9 +436,13 @@ func (d *Distributor) sendRequests(ctx context.Context, req *distributormodel.Pu
if enforceLabelsOrder {
series.Labels = phlaremodel.Labels(series.Labels).InsertSorted(phlaremodel.LabelNameOrder, phlaremodel.LabelOrderEnforced)
}
+
+ groups := usageGroups.GetUsageGroups(tenantID, phlaremodel.Labels(series.Labels))
+
if err = validation.ValidateLabels(d.limits, tenantID, series.Labels); err != nil {
validation.DiscardedProfiles.WithLabelValues(string(validation.ReasonOf(err)), tenantID).Add(float64(req.TotalProfiles))
validation.DiscardedBytes.WithLabelValues(string(validation.ReasonOf(err)), tenantID).Add(float64(req.TotalBytesUncompressed))
+ groups.CountDiscardedBytes(string(validation.ReasonOf(err)), req.TotalBytesUncompressed)
return nil, connect.NewError(connect.CodeInvalidArgument, err)
}
keys[i] = TokenFor(tenantID, phlaremodel.LabelPairsString(series.Labels))
@@ -746,7 +759,7 @@ func (g *groupsWithFingerprints) add(stringTable []string, lbls phlaremodel.Labe
})
}
-func extractSampleSeries(req *distributormodel.PushRequest, relabelRules []*relabel.Config) (result []*distributormodel.ProfileSeries, bytesRelabelDropped, profilesRelabelDropped float64) {
+func extractSampleSeries(req *distributormodel.PushRequest, tenantID string, usageGroups *validation.UsageGroupConfig, relabelRules []*relabel.Config) (result []*distributormodel.ProfileSeries, bytesRelabelDropped, profilesRelabelDropped float64) {
var (
lblbuilder = phlaremodel.NewLabelsBuilder(phlaremodel.EmptyLabels())
)
@@ -757,6 +770,8 @@ func extractSampleSeries(req *distributormodel.PushRequest, relabelRules []*rela
Labels: series.Labels,
Samples: make([]*distributormodel.ProfileSample, 0, len(series.Samples)),
}
+ usageGroups := usageGroups.GetUsageGroups(tenantID, phlaremodel.Labels(series.Labels))
+
for _, raw := range series.Samples {
pprof.RenameLabel(raw.Profile.Profile, pprof.ProfileIDLabelName, pprof.SpanIDLabelName)
groups := pprof.GroupSamplesWithoutLabels(raw.Profile.Profile, pprof.SpanIDLabelName)
@@ -771,6 +786,7 @@ func extractSampleSeries(req *distributormodel.PushRequest, relabelRules []*rela
if !keep {
bytesRelabelDropped += float64(raw.Profile.SizeVT())
profilesRelabelDropped++ // in this case we dropped a whole profile
+ usageGroups.CountDiscardedBytes(string(validation.DroppedByRelabelRules), int64(raw.Profile.SizeVT()))
continue
}
}
@@ -792,7 +808,9 @@ func extractSampleSeries(req *distributormodel.PushRequest, relabelRules []*rela
if len(relabelRules) > 0 {
keep := relabel.ProcessBuilder(lblbuilder, relabelRules...)
if !keep {
- bytesRelabelDropped += float64(sampleSize(raw.Profile.Profile.StringTable, group.Samples))
+ droppedBytes := sampleSize(raw.Profile.Profile.StringTable, group.Samples)
+ bytesRelabelDropped += float64(droppedBytes)
+ usageGroups.CountDiscardedBytes(string(validation.DroppedByRelabelRules), droppedBytes)
continue
}
}
diff --git a/pkg/distributor/distributor_test.go b/pkg/distributor/distributor_test.go
index b3818a9a0d..7908b97833 100644
--- a/pkg/distributor/distributor_test.go
+++ b/pkg/distributor/distributor_test.go
@@ -733,6 +733,7 @@ func Test_SampleLabels(t *testing.T) {
series: []*distributormodel.ProfileSeries{
{
Labels: []*typesv1.LabelPair{
+ {Name: "__delta__", Value: "false"},
{Name: "__name__", Value: "memory"},
{Name: "__name_replaced__", Value: "godeltaprof_memory"},
},
@@ -996,8 +997,15 @@ func Test_SampleLabels(t *testing.T) {
for _, tc := range testCases {
tc := tc
+
+ // These are both required to be set to fulfill the usage group
+ // reporting. Neither are validated by the tests, nor do they influence
+ // test behavior in any way.
+ ug := &validation.UsageGroupConfig{}
+ const dummyTenantID = "tenant1"
+
t.Run(tc.description, func(t *testing.T) {
- series, actualBytesDropped, actualProfilesDropped := extractSampleSeries(tc.pushReq, tc.relabelRules)
+ series, actualBytesDropped, actualProfilesDropped := extractSampleSeries(tc.pushReq, dummyTenantID, ug, tc.relabelRules)
assert.Equal(t, tc.expectBytesDropped, actualBytesDropped)
assert.Equal(t, tc.expectProfilesDropped, actualProfilesDropped)
require.Len(t, series, len(tc.series))
diff --git a/pkg/frontend/frontend_diff_test.go b/pkg/frontend/frontend_diff_test.go
index 7405b1945e..4e5943e727 100644
--- a/pkg/frontend/frontend_diff_test.go
+++ b/pkg/frontend/frontend_diff_test.go
@@ -76,7 +76,7 @@ func Test_Frontend_Diff(t *testing.T) {
Left: &querierv1.SelectMergeStacktracesRequest{
ProfileTypeID: profileType,
LabelSelector: "{}",
- Start: 0000,
+ Start: 1,
End: 1000,
},
Right: &querierv1.SelectMergeStacktracesRequest{
diff --git a/pkg/frontend/frontendpb/frontend.pb.go b/pkg/frontend/frontendpb/frontend.pb.go
index 5741acb828..d9aa7cfb82 100644
--- a/pkg/frontend/frontendpb/frontend.pb.go
+++ b/pkg/frontend/frontendpb/frontend.pb.go
@@ -5,7 +5,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.34.1
+// protoc-gen-go v1.34.2
// protoc (unknown)
// source: frontend/frontendpb/frontend.proto
@@ -181,7 +181,7 @@ func file_frontend_frontendpb_frontend_proto_rawDescGZIP() []byte {
}
var file_frontend_frontendpb_frontend_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
-var file_frontend_frontendpb_frontend_proto_goTypes = []interface{}{
+var file_frontend_frontendpb_frontend_proto_goTypes = []any{
(*QueryResultRequest)(nil), // 0: frontendpb.QueryResultRequest
(*QueryResultResponse)(nil), // 1: frontendpb.QueryResultResponse
(*httpgrpc.HTTPResponse)(nil), // 2: httpgrpc.HTTPResponse
@@ -205,7 +205,7 @@ func file_frontend_frontendpb_frontend_proto_init() {
return
}
if !protoimpl.UnsafeEnabled {
- file_frontend_frontendpb_frontend_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ file_frontend_frontendpb_frontend_proto_msgTypes[0].Exporter = func(v any, i int) any {
switch v := v.(*QueryResultRequest); i {
case 0:
return &v.state
@@ -217,7 +217,7 @@ func file_frontend_frontendpb_frontend_proto_init() {
return nil
}
}
- file_frontend_frontendpb_frontend_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ file_frontend_frontendpb_frontend_proto_msgTypes[1].Exporter = func(v any, i int) any {
switch v := v.(*QueryResultResponse); i {
case 0:
return &v.state
diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go
index 118e9460da..de9887528b 100644
--- a/pkg/ingester/ingester.go
+++ b/pkg/ingester/ingester.go
@@ -253,7 +253,11 @@ func (i *Ingester) evictBlock(tenantID string, b ulid.ULID, fn func() error) (er
func (i *Ingester) Push(ctx context.Context, req *connect.Request[pushv1.PushRequest]) (*connect.Response[pushv1.PushResponse], error) {
return forInstanceUnary(ctx, i, func(instance *instance) (*connect.Response[pushv1.PushResponse], error) {
+ usageGroups := i.limits.DistributorUsageGroups(instance.tenantID)
+
for _, series := range req.Msg.Series {
+ groups := usageGroups.GetUsageGroups(instance.tenantID, series.Labels)
+
for _, sample := range series.Samples {
err := pprof.FromBytes(sample.RawProfile, func(p *profilev1.Profile, size int) error {
id, err := uuid.Parse(sample.ID)
@@ -265,6 +269,8 @@ func (i *Ingester) Push(ctx context.Context, req *connect.Request[pushv1.PushReq
if reason != validation.Unknown {
validation.DiscardedProfiles.WithLabelValues(string(reason), instance.tenantID).Add(float64(1))
validation.DiscardedBytes.WithLabelValues(string(reason), instance.tenantID).Add(float64(size))
+ groups.CountDiscardedBytes(string(reason), int64(size))
+
switch validation.ReasonOf(err) {
case validation.SeriesLimit:
return connect.NewError(connect.CodeResourceExhausted, err)
diff --git a/pkg/ingester/limiter.go b/pkg/ingester/limiter.go
index 3802f0dac9..116938f578 100644
--- a/pkg/ingester/limiter.go
+++ b/pkg/ingester/limiter.go
@@ -26,6 +26,7 @@ type Limits interface {
MaxLocalSeriesPerTenant(tenantID string) int
MaxGlobalSeriesPerTenant(tenantID string) int
IngestionTenantShardSize(tenantID string) int
+ DistributorUsageGroups(tenantID string) *validation.UsageGroupConfig
}
type Limiter interface {
diff --git a/pkg/ingester/limiter_test.go b/pkg/ingester/limiter_test.go
index f27f5070de..ddb4c476d0 100644
--- a/pkg/ingester/limiter_test.go
+++ b/pkg/ingester/limiter_test.go
@@ -11,6 +11,7 @@ import (
"github.com/stretchr/testify/require"
phlaremodel "github.com/grafana/pyroscope/pkg/model"
+ "github.com/grafana/pyroscope/pkg/validation"
)
type fakeLimits struct {
@@ -31,6 +32,10 @@ func (f *fakeLimits) IngestionTenantShardSize(userID string) int {
return f.ingestionTenantShardSize
}
+func (f *fakeLimits) DistributorUsageGroups(userID string) *validation.UsageGroupConfig {
+ return &validation.UsageGroupConfig{}
+}
+
type fakeRingCount struct {
healthyInstancesCount int
}
diff --git a/pkg/objstore/providers/filesystem/bucket_client.go b/pkg/objstore/providers/filesystem/bucket_client.go
index 0ae39e73f4..e30565c1e5 100644
--- a/pkg/objstore/providers/filesystem/bucket_client.go
+++ b/pkg/objstore/providers/filesystem/bucket_client.go
@@ -2,13 +2,9 @@ package filesystem
import (
"context"
- "io"
"os"
"path/filepath"
- "strings"
- "github.com/grafana/dskit/runutil"
- "github.com/pkg/errors"
"github.com/thanos-io/objstore"
"github.com/thanos-io/objstore/providers/filesystem"
@@ -48,95 +44,6 @@ func (b *Bucket) ReaderAt(ctx context.Context, filename string) (phlareobjstore.
return &FileReaderAt{File: f}, nil
}
-func (b *Bucket) Iter(ctx context.Context, dir string, f func(string) error, options ...objstore.IterOption) error {
- params := objstore.ApplyIterOptions(options...)
- if !params.WithoutAppendDirDelim || strings.HasSuffix(dir, objstore.DirDelim) {
- if dir != "" {
- dir = strings.TrimSuffix(dir, objstore.DirDelim) + objstore.DirDelim
- }
- return b.Bucket.Iter(ctx, dir, f, options...)
- }
- relDir := filepath.Dir(dir)
- prefix := dir
- return b.iterPrefix(ctx, filepath.Join(b.rootDir, relDir), relDir, prefix, f, options...)
-}
-
-// iterPrefix calls f for each entry in the given directory matching the prefix.
-func (b *Bucket) iterPrefix(ctx context.Context, absDir string, relDir string, prefix string, f func(string) error, options ...objstore.IterOption) error {
- if ctx.Err() != nil {
- return ctx.Err()
- }
-
- params := objstore.ApplyIterOptions(options...)
- info, err := os.Stat(absDir)
- if err != nil {
- if os.IsNotExist(err) {
- return nil
- }
- return errors.Wrapf(err, "stat %s", absDir)
- }
- if !info.IsDir() {
- return nil
- }
-
- files, err := os.ReadDir(absDir)
- if err != nil {
- return err
- }
- for _, file := range files {
- name := filepath.Join(relDir, file.Name())
- if prefix != "" && !strings.HasPrefix(name, prefix) {
- continue
- }
-
- if file.IsDir() {
- empty, err := isDirEmpty(filepath.Join(absDir, file.Name()))
- if err != nil {
- return err
- }
-
- if empty {
- // Skip empty directories.
- continue
- }
-
- name += objstore.DirDelim
-
- if params.Recursive {
- // Recursively list files in the subdirectory.
- if err := b.iterPrefix(ctx, filepath.Join(absDir, file.Name()), name, prefix, f, options...); err != nil {
- return err
- }
-
- // The callback f() has already been called for the subdirectory
- // files so we should skip to next filesystem entry.
- continue
- }
- }
- if err := f(name); err != nil {
- return err
- }
- }
- return nil
-}
-
-func isDirEmpty(name string) (ok bool, err error) {
- f, err := os.Open(filepath.Clean(name))
- if os.IsNotExist(err) {
- // The directory doesn't exist. We don't consider it an error and we treat it like empty.
- return true, nil
- }
- if err != nil {
- return false, err
- }
- defer runutil.CloseWithErrCapture(&err, f, "isDirEmpty")
-
- if _, err = f.Readdir(1); err == io.EOF || os.IsNotExist(err) {
- return true, nil
- }
- return false, err
-}
-
// ReaderWithExpectedErrs implements objstore.Bucket.
func (b *Bucket) ReaderWithExpectedErrs(fn phlareobjstore.IsOpFailureExpectedFunc) phlareobjstore.BucketReader {
return b.WithExpectedErrs(fn)
diff --git a/pkg/objstore/providers/filesystem/bucket_client_test.go b/pkg/objstore/providers/filesystem/bucket_client_test.go
index f24d0a24da..7dfe748e9d 100644
--- a/pkg/objstore/providers/filesystem/bucket_client_test.go
+++ b/pkg/objstore/providers/filesystem/bucket_client_test.go
@@ -79,31 +79,6 @@ func TestIter(t *testing.T) {
expected: []string{"foo/ba/buzz3", "foo/bar/buz1", "foo/bar/buz2", "foo/buzz4", "foo/buzz5", "foo6"},
options: []objstore.IterOption{objstore.WithRecursiveIter},
},
- {
- prefix: "foo",
- expected: []string{"foo/", "foo6"},
- options: []objstore.IterOption{objstore.WithoutApendingDirDelim},
- },
- {
- prefix: "f",
- expected: []string{"foo/", "foo6"},
- options: []objstore.IterOption{objstore.WithoutApendingDirDelim},
- },
- {
- prefix: "foo/ba",
- expected: []string{"foo/ba/", "foo/bar/"},
- options: []objstore.IterOption{objstore.WithoutApendingDirDelim},
- },
- {
- prefix: "foo/ba",
- expected: []string{"foo/ba/buzz3", "foo/bar/buz1", "foo/bar/buz2"},
- options: []objstore.IterOption{objstore.WithoutApendingDirDelim, objstore.WithRecursiveIter},
- },
- {
- prefix: "fo",
- expected: []string{"foo/ba/buzz3", "foo/bar/buz1", "foo/bar/buz2", "foo/buzz4", "foo/buzz5", "foo6"},
- options: []objstore.IterOption{objstore.WithoutApendingDirDelim, objstore.WithRecursiveIter},
- },
} {
tc := tc
t.Run(tc.prefix, func(t *testing.T) {
diff --git a/pkg/og/storage/segment/debug_vis.go b/pkg/og/storage/segment/debug_vis.go
deleted file mode 100644
index c206ca1fae..0000000000
--- a/pkg/og/storage/segment/debug_vis.go
+++ /dev/null
@@ -1,145 +0,0 @@
-package segment
-
-import (
- "encoding/json"
- "math/big"
- "os"
- "text/template"
- "time"
-)
-
-var visDebuggingEnabled = false
-
-type visualizeNode2 struct {
- T1 time.Time
- T2 time.Time
- Depth int
- HasTrie bool
- Samples uint64
- M int
- D int
- Used bool
-}
-
-type vis struct {
- nodes []*visualizeNode2
-}
-
-// This is here for debugging
-func newVis() *vis {
- return &vis{nodes: []*visualizeNode2{}}
-}
-
-func (v *vis) add(n *streeNode, r *big.Rat, used bool) {
- if !visDebuggingEnabled {
- return
- }
- v.nodes = append(v.nodes, &visualizeNode2{
- T1: n.time.UTC(),
- T2: n.time.Add(durations[n.depth]).UTC(),
- Depth: n.depth,
- HasTrie: n.present,
- Samples: n.samples,
- M: int(r.Num().Int64()),
- D: int(r.Denom().Int64()),
- Used: used,
- })
-}
-
-type TmpltVars struct {
- Data string
-}
-
-func (v *vis) print(name string) {
- if !visDebuggingEnabled {
- return
- }
- vizTmplt, _ := template.New("viz").Parse(vizTmplt)
-
- jsonBytes, _ := json.MarshalIndent(v.nodes, "", " ")
- jsonStr := string(jsonBytes)
- w, _ := os.Create(name)
- vizTmplt.Execute(w, TmpltVars{Data: jsonStr})
-}
-
-var vizTmplt = `
-
-
-
-
-
-
-
-
-
-
-
-
-
-`
diff --git a/pkg/og/storage/segment/fuzz_test.go b/pkg/og/storage/segment/fuzz_test.go
deleted file mode 100644
index 3df605eb56..0000000000
--- a/pkg/og/storage/segment/fuzz_test.go
+++ /dev/null
@@ -1,280 +0,0 @@
-package segment
-
-import (
- "log"
- "math/big"
- "math/rand"
- "sync"
- "time"
-
- . "github.com/onsi/ginkgo/v2"
- . "github.com/onsi/gomega"
-
- "github.com/grafana/pyroscope/pkg/og/testing"
-)
-
-type datapoint struct {
- t time.Time
- samples uint64
- r *big.Rat
-}
-
-type storageMock struct {
- resolution time.Duration
- data []datapoint
-}
-
-func newMock(resolution time.Duration) *storageMock {
- return &storageMock{
- resolution: resolution,
- data: []datapoint{},
- }
-}
-
-func (sm *storageMock) Put(st, et time.Time, samples uint64) {
- st, et = normalize(st, et)
- fullDur := et.Sub(st) / sm.resolution
- for t := st; t.Before(et); t = t.Add(sm.resolution) {
- d := datapoint{
- t: t,
- samples: samples,
- r: big.NewRat(int64(samples), int64(fullDur)),
- }
-
- sm.data = append(sm.data, d)
- }
-}
-
-func (sm *storageMock) Get(st, et time.Time, cb func(depth int, samples, writes uint64, t time.Time, r *big.Rat)) {
- st, et = normalize(st, et)
- for _, d := range sm.data {
- if !d.t.Before(st) && !d.t.Add(sm.resolution).After(et) {
- cb(0, 1, 1, d.t, d.r)
- }
- }
-}
-
-// if you change something in this test make sure it doesn't change test coverage.
-func fuzzTest(testWrites bool, writeSize func() int) {
- s := New()
- m := newMock(10 * time.Second)
-
- r := rand.New(rand.NewSource(1213))
-
- for k := 0; k < 20; k++ {
- maxStartTime := r.Intn(5000)
- // for i := 0; i < 10; i++ {
- for i := 0; i < r.Intn(200); i++ {
- sti := r.Intn(maxStartTime) * 10
- st := testing.SimpleTime(sti)
- et := testing.SimpleTime(sti + writeSize())
- dur := et.Sub(st)
-
- // samples := uint64(1+r.Intn(10)) * uint64(dur/(10*time.Second))
- samples := uint64(20)
-
- m.Put(st, et, samples)
- s.Put(st, et, samples, func(depth int, t time.Time, r *big.Rat, addons []Addon) {
- log.Println(depth, r, dur)
- })
- }
- mSum := big.NewRat(0, 1)
- mWrites := big.NewRat(0, 1)
- sSum := big.NewRat(0, 1)
- sWrites := big.NewRat(0, 1)
- for i := 0; i < r.Intn(100); i++ {
- sti := r.Intn(100) * 10
- st := testing.SimpleTime(sti)
- et := testing.SimpleTime(sti + r.Intn(100)*10)
-
- m.Get(st, et, func(depth int, samples, writes uint64, t time.Time, r *big.Rat) {
- rClone := big.NewRat(r.Num().Int64(), r.Denom().Int64())
- mSum.Add(mSum, rClone.Mul(rClone, big.NewRat(int64(samples), 1)))
- log.Println("mWrites", samples, writes, r)
- // if r.Num().Int64() > 0 {
- // r = r.Inv(r)
- w := big.NewRat(int64(writes), 1)
- // mWrites.Add(mWrites, r.Mul(r, w))
- mWrites.Add(mWrites, w)
- // }
- })
-
- s.Get(st, et, func(depth int, samples, writes uint64, t time.Time, r *big.Rat) {
- rClone := big.NewRat(r.Num().Int64(), r.Denom().Int64())
- sSum.Add(sSum, rClone.Mul(rClone, big.NewRat(int64(samples), 1)))
- log.Println("sWrites", samples, writes, r)
- // if r.Num().Int64() > 0 {
- // r = r.Inv(r)
- w := big.NewRat(int64(writes), 1)
- // sWrites.Add(sWrites, r.Mul(r, w))
- sWrites.Add(sWrites, w)
- // }
- })
- }
- mSumF, _ := mSum.Float64()
- mWritesF, _ := mWrites.Float64()
- log.Println("m:", mSum, mSumF, mWrites, mWritesF)
-
- sSumF, _ := sSum.Float64()
- sWritesF, _ := sWrites.Float64()
- log.Println("s:", sSum, sSumF, sWrites, sWritesF)
-
- Expect(mSum.Cmp(sSum)).To(Equal(0))
- if testWrites {
- Expect(mWrites.Cmp(sWrites)).To(Equal(0))
- }
- }
-}
-
-// See https://github.com/pyroscope-io/pyroscope/issues/28 for more context
-var _ = Describe("segment", func() {
- Context("fuzz tests", func() {
- Context("writes are 10 second long", func() {
- It("works as expected", func() {
- done := make(chan interface{})
- go func() {
- fuzzTest(true, func() int {
- return 10
- })
- close(done)
- }()
- Eventually(done, 5).Should(BeClosed())
- })
- })
- Context("writes are different lengths", func() {
- It("works as expected", func() {
- done := make(chan interface{})
- go func() {
- fuzzTest(false, func() int {
- return 20
- // return 1 + rand.Intn(10)*10
- })
- close(done)
- }()
- Eventually(done, 5).Should(BeClosed())
- })
- })
- Context("retention and sampling randomized test", func() {
- It("works as expected", func() {
- var (
- seed = 7332
- n = 1
- wg sync.WaitGroup
- )
- wg.Add(n)
- for i := 0; i < n; i++ {
- go func(i int) {
- fuzzDeleteNodesBefore(seed + i)
- wg.Done()
- }(i)
- }
- wg.Wait()
- })
- })
- })
-})
-
-func fuzzDeleteNodesBefore(seed int) {
- defer GinkgoRecover()
-
- s := New()
- r := rand.New(rand.NewSource(int64(seed)))
- w := testSegWriter{
- n: 10e3, // Number of writes
- r: r,
-
- samplesPerWrite: 100,
- writeTimeSpanSec: 10,
- startTimeMin: randInt(1000, 3000),
- startTimeMax: randInt(7000, 100000),
-
- buckets: make([]*bucket, 10),
- }
-
- w.write(s)
-
- for _, b := range w.buckets {
- // Delete samples that fall within the time span of the bucket.
- removed, err := s.DeleteNodesBefore(&RetentionPolicy{AbsoluteTime: b.time})
- Expect(err).ToNot(HaveOccurred())
- Expect(removed).To(BeFalse())
- // Ensure we have removed expected number of samples from the segment.
- samples, writes := totalSamplesWrites(s, time.Time{}, testing.SimpleTime(w.startTimeMax*10))
- Expect(samples).To(Equal(b.samples))
- Expect(writes).To(Equal(b.writes))
- // Ensure no samples left outside the retention period.
- samples, writes = totalSamplesWrites(s, b.time, testing.SimpleTime(w.startTimeMax*10))
- Expect(samples).To(Equal(b.samples))
- Expect(writes).To(Equal(b.writes))
- }
-
- st := testing.SimpleTime(w.startTimeMax * 10)
- samples, writes := totalSamplesWrites(s, st, st.Add(time.Hour))
- Expect(samples).To(BeZero())
- Expect(writes).To(BeZero())
-}
-
-// testSegWriter inserts randomized data into the segment recording the
-// samples distribution by time. Every bucket indicates the number of
-// writes and samples that had been written before the bucket time mark.
-type testSegWriter struct {
- r *rand.Rand
- n int
-
- samplesPerWrite int
- writeTimeSpanSec int
- expectedWrites int
-
- startTimeMin int
- startTimeMax int
-
- buckets []*bucket
-}
-
-type bucket struct {
- time time.Time
- samples int
- writes int
-}
-
-func (f testSegWriter) putStartEndTime() (st time.Time, et time.Time) {
- st = testing.SimpleTime(randInt(f.startTimeMin, f.startTimeMax) * 10)
- et = st.Add(time.Second * time.Duration(f.writeTimeSpanSec))
- return st, et
-}
-
-func randInt(min, max int) int { return rand.Intn(max-min) + min }
-
-func (f testSegWriter) expectedSamples() int { return f.n * f.samplesPerWrite }
-
-func (f testSegWriter) write(s *Segment) {
- // Initialize time buckets, if required: the whole time
- // span is divided proportionally to the number of buckets.
- if len(f.buckets) > 0 {
- step := (f.startTimeMax - f.startTimeMin) / len(f.buckets) * 10
- for i := 0; i < len(f.buckets); i++ {
- f.buckets[i] = &bucket{time: testing.SimpleTime(f.startTimeMin + step*i)}
- }
- }
- for i := 0; i < f.n; i++ {
- st, et := f.putStartEndTime()
- err := s.Put(st, et, uint64(f.samplesPerWrite), putNoOp)
- Expect(err).ToNot(HaveOccurred())
- for _, b := range f.buckets {
- if et.After(b.time) {
- b.samples += f.samplesPerWrite
- b.writes++
- }
- }
- }
-}
-
-func totalSamplesWrites(s *Segment, st, et time.Time) (samples, writes int) {
- v := big.NewRat(0, 1)
- s.Get(st, et, func(depth int, s, w uint64, t time.Time, r *big.Rat) {
- x := big.NewRat(r.Num().Int64(), r.Denom().Int64())
- v.Add(v, x.Mul(x, big.NewRat(int64(s), 1)))
- writes += int(w)
- })
- return int(v.Num().Int64()), writes
-}
diff --git a/pkg/og/storage/segment/key_bech_test.go b/pkg/og/storage/segment/key_bech_test.go
index b254b1c0e0..b1c56c5531 100644
--- a/pkg/og/storage/segment/key_bech_test.go
+++ b/pkg/og/storage/segment/key_bech_test.go
@@ -31,6 +31,8 @@ func BenchmarkKey_Parse(b *testing.B) {
}
}
+func randInt(min, max int) int { return rand.Intn(max-min) + min }
+
// TODO(kolesnikovae): This is not near perfect way of generating strings.
// It makes sense to create a package for util functions like this.
diff --git a/pkg/og/storage/segment/key_test.go b/pkg/og/storage/segment/key_test.go
index e5f6d8cedc..b1fbfc9f9a 100644
--- a/pkg/og/storage/segment/key_test.go
+++ b/pkg/og/storage/segment/key_test.go
@@ -7,6 +7,7 @@ import (
"github.com/grafana/pyroscope/pkg/og/flameql"
)
+// todo port from ginko
var _ = Describe("segment key", func() {
Context("ParseKey", func() {
It("no tags version works", func() {
diff --git a/pkg/og/storage/segment/overlap.go b/pkg/og/storage/segment/overlap.go
deleted file mode 100644
index 53d300f078..0000000000
--- a/pkg/og/storage/segment/overlap.go
+++ /dev/null
@@ -1,48 +0,0 @@
-package segment
-
-import (
- "math/big"
- "time"
-)
-
-func tmin(a, b time.Time) time.Time {
- if a.Before(b) {
- return a
- }
- return b
-}
-
-func tmax(a, b time.Time) time.Time {
- if a.After(b) {
- return a
- }
- return b
-}
-
-func dmax(a, b time.Duration) time.Duration {
- if a > b {
- return a
- }
- return b
-}
-
-// relationship overlap read overlap write
-// inside rel = iota // | S E | <1 1/1
-// match // matching ranges 1/1 1/1
-// outside // | | S E 0/1 0/1
-// overlap // | S | E <1 <1
-// contain // S | | E 1/1 <1
-
-// t1, t2 represent segment node, st, et represent the read query time range
-func overlapRead(t1, t2, st, et time.Time, dur time.Duration) *big.Rat {
- m := int64(dmax(0, tmin(t2, et).Sub(tmax(t1, st))) / dur)
- d := int64(t2.Sub(t1) / dur)
- return big.NewRat(m, d)
-}
-
-// t1, t2 represent segment node, st, et represent the write query time range
-func overlapWrite(t1, t2, st, et time.Time, dur time.Duration) *big.Rat {
- m := int64(dmax(0, tmin(t2, et).Sub(tmax(t1, st))) / dur)
- d := int64(et.Sub(st) / dur)
- return big.NewRat(m, d)
-}
diff --git a/pkg/og/storage/segment/overlap_test.go b/pkg/og/storage/segment/overlap_test.go
deleted file mode 100644
index 8e2c2837e3..0000000000
--- a/pkg/og/storage/segment/overlap_test.go
+++ /dev/null
@@ -1,195 +0,0 @@
-package segment
-
-import (
- "math/big"
- "time"
-
- . "github.com/onsi/ginkgo/v2"
- . "github.com/onsi/gomega"
- "github.com/grafana/pyroscope/pkg/og/testing"
-)
-
-// relationship overlap read overlap write
-// inside rel = iota // | S E | <1 1/1
-// match // matching ranges 1/1 1/1
-// outside // | | S E 0/1 0/1
-// overlap // | S | E <1 <1
-// contain // S | | E 1/1 <1
-
-var _ = Describe("segment", func() {
- Context("overlapRead", func() {
- Context("match", func() {
- It("returns correct values", func() {
- Expect(overlapRead(
- testing.SimpleTime(0), testing.SimpleTime(100), // t1 t2
- testing.SimpleTime(0), testing.SimpleTime(100), // st et
- 10*time.Second,
- ).String()).To(Equal(big.NewRat(1, 1).String()))
- })
- })
- Context("inside", func() {
- It("returns correct values", func() {
- Expect(overlapRead(
- testing.SimpleTime(0), testing.SimpleTime(100), // t1 t2
- testing.SimpleTime(10), testing.SimpleTime(90), // st et
- 10*time.Second,
- ).String()).To(Equal(big.NewRat(8, 10).String()))
- Expect(overlapRead(
- testing.SimpleTime(0), testing.SimpleTime(100), // t1 t2
- testing.SimpleTime(0), testing.SimpleTime(90), // st et
- 10*time.Second,
- ).String()).To(Equal(big.NewRat(9, 10).String()))
- Expect(overlapRead(
- testing.SimpleTime(0), testing.SimpleTime(100), // t1 t2
- testing.SimpleTime(10), testing.SimpleTime(100), // st et
- 10*time.Second,
- ).String()).To(Equal(big.NewRat(9, 10).String()))
- })
- })
- Context("contain", func() {
- It("returns correct values", func() {
- Expect(overlapRead(
- testing.SimpleTime(100), testing.SimpleTime(200), // t1 t2
- testing.SimpleTime(90), testing.SimpleTime(210), // st et
- 10*time.Second,
- ).String()).To(Equal(big.NewRat(1, 1).String()))
- Expect(overlapRead(
- testing.SimpleTime(100), testing.SimpleTime(200), // t1 t2
- testing.SimpleTime(100), testing.SimpleTime(210), // st et
- 10*time.Second,
- ).String()).To(Equal(big.NewRat(1, 1).String()))
- Expect(overlapRead(
- testing.SimpleTime(100), testing.SimpleTime(200), // t1 t2
- testing.SimpleTime(90), testing.SimpleTime(200), // st et
- 10*time.Second,
- ).String()).To(Equal(big.NewRat(1, 1).String()))
- })
- })
- Context("overlap", func() {
- It("returns correct values", func() {
- Expect(overlapRead(
- testing.SimpleTime(100), testing.SimpleTime(200), // t1 t2
- testing.SimpleTime(90), testing.SimpleTime(110), // st et
- 10*time.Second,
- ).String()).To(Equal(big.NewRat(1, 10).String()))
- Expect(overlapRead(
- testing.SimpleTime(100), testing.SimpleTime(200), // t1 t2
- testing.SimpleTime(190), testing.SimpleTime(210), // st et
- 10*time.Second,
- ).String()).To(Equal(big.NewRat(1, 10).String()))
- })
- })
- Context("outside", func() {
- It("returns correct values", func() {
- Expect(overlapRead(
- testing.SimpleTime(100), testing.SimpleTime(200), // t1 t2
- testing.SimpleTime(90), testing.SimpleTime(100), // st et
- 10*time.Second,
- ).String()).To(Equal(big.NewRat(0, 1).String()))
- Expect(overlapRead(
- testing.SimpleTime(100), testing.SimpleTime(200), // t1 t2
- testing.SimpleTime(80), testing.SimpleTime(90), // st et
- 10*time.Second,
- ).String()).To(Equal(big.NewRat(0, 1).String()))
- Expect(overlapRead(
- testing.SimpleTime(100), testing.SimpleTime(200), // t1 t2
- testing.SimpleTime(200), testing.SimpleTime(210), // st et
- 10*time.Second,
- ).String()).To(Equal(big.NewRat(0, 1).String()))
- Expect(overlapRead(
- testing.SimpleTime(100), testing.SimpleTime(200), // t1 t2
- testing.SimpleTime(210), testing.SimpleTime(220), // st et
- 10*time.Second,
- ).String()).To(Equal(big.NewRat(0, 1).String()))
- })
- })
- })
-
- Context("overlapWrite", func() {
- Context("match", func() {
- It("returns correct values", func() {
- Expect(overlapWrite(
- testing.SimpleTime(0), testing.SimpleTime(100), // t1 t2
- testing.SimpleTime(0), testing.SimpleTime(100), // st et
- 10*time.Second,
- ).String()).To(Equal(big.NewRat(1, 1).String()))
- })
- })
- Context("inside", func() {
- It("returns correct values", func() {
- Expect(overlapWrite(
- testing.SimpleTime(0), testing.SimpleTime(100), // t1 t2
- testing.SimpleTime(10), testing.SimpleTime(90), // st et
- 10*time.Second,
- ).String()).To(Equal(big.NewRat(1, 1).String()))
- Expect(overlapWrite(
- testing.SimpleTime(0), testing.SimpleTime(100), // t1 t2
- testing.SimpleTime(0), testing.SimpleTime(90), // st et
- 10*time.Second,
- ).String()).To(Equal(big.NewRat(1, 1).String()))
- Expect(overlapWrite(
- testing.SimpleTime(0), testing.SimpleTime(100), // t1 t2
- testing.SimpleTime(10), testing.SimpleTime(100), // st et
- 10*time.Second,
- ).String()).To(Equal(big.NewRat(1, 1).String()))
- })
- })
- Context("contain", func() {
- It("returns correct values", func() {
- Expect(overlapWrite(
- testing.SimpleTime(100), testing.SimpleTime(200), // t1 t2
- testing.SimpleTime(90), testing.SimpleTime(210), // st et
- 10*time.Second,
- ).String()).To(Equal(big.NewRat(10, 12).String()))
- Expect(overlapWrite(
- testing.SimpleTime(100), testing.SimpleTime(200), // t1 t2
- testing.SimpleTime(100), testing.SimpleTime(210), // st et
- 10*time.Second,
- ).String()).To(Equal(big.NewRat(10, 11).String()))
- Expect(overlapWrite(
- testing.SimpleTime(100), testing.SimpleTime(200), // t1 t2
- testing.SimpleTime(90), testing.SimpleTime(200), // st et
- 10*time.Second,
- ).String()).To(Equal(big.NewRat(10, 11).String()))
- })
- })
- Context("overlap", func() {
- It("returns correct values", func() {
- Expect(overlapWrite(
- testing.SimpleTime(100), testing.SimpleTime(200), // t1 t2
- testing.SimpleTime(90), testing.SimpleTime(110), // st et
- 10*time.Second,
- ).String()).To(Equal(big.NewRat(1, 2).String()))
- Expect(overlapWrite(
- testing.SimpleTime(100), testing.SimpleTime(200), // t1 t2
- testing.SimpleTime(190), testing.SimpleTime(210), // st et
- 10*time.Second,
- ).String()).To(Equal(big.NewRat(1, 2).String()))
- })
- })
- Context("outside", func() {
- It("returns correct values", func() {
- Expect(overlapWrite(
- testing.SimpleTime(100), testing.SimpleTime(200), // t1 t2
- testing.SimpleTime(90), testing.SimpleTime(100), // st et
- 10*time.Second,
- ).String()).To(Equal(big.NewRat(0, 1).String()))
- Expect(overlapWrite(
- testing.SimpleTime(100), testing.SimpleTime(200), // t1 t2
- testing.SimpleTime(80), testing.SimpleTime(90), // st et
- 10*time.Second,
- ).String()).To(Equal(big.NewRat(0, 1).String()))
- Expect(overlapWrite(
- testing.SimpleTime(100), testing.SimpleTime(200), // t1 t2
- testing.SimpleTime(200), testing.SimpleTime(210), // st et
- 10*time.Second,
- ).String()).To(Equal(big.NewRat(0, 1).String()))
- Expect(overlapWrite(
- testing.SimpleTime(100), testing.SimpleTime(200), // t1 t2
- testing.SimpleTime(210), testing.SimpleTime(220), // st et
- 10*time.Second,
- ).String()).To(Equal(big.NewRat(0, 1).String()))
- })
- })
- })
-})
diff --git a/pkg/og/storage/segment/relationship.go b/pkg/og/storage/segment/relationship.go
deleted file mode 100644
index 627d6f5674..0000000000
--- a/pkg/og/storage/segment/relationship.go
+++ /dev/null
@@ -1,53 +0,0 @@
-package segment
-
-import (
- "time"
-)
-
-type rel int
-
-const (
- // relationship overlap read overlap write
- inside rel = iota // | S E | <1 1/1
- match // matching ranges 1/1 1/1
- outside // | | S E 0/1 0/1
- overlap // | S | E <1 <1
- contain // S | | E 1/1 <1
-)
-
-var overlapStrings map[rel]string
-
-// TODO: I bet there's a better way
-func init() {
- overlapStrings = make(map[rel]string)
- overlapStrings[inside] = "inside"
- overlapStrings[outside] = "outside"
- overlapStrings[match] = "match"
- overlapStrings[overlap] = "overlap"
- overlapStrings[contain] = "contain"
-}
-
-func (r rel) String() string {
- return overlapStrings[r]
-}
-
-// t1, t2 represent segment node, st, et represent the read/write query time range
-func relationship(t1, t2, st, et time.Time) rel {
- if t1.Equal(st) && t2.Equal(et) {
- return match
- }
- if !t1.After(st) && !t2.Before(et) {
- return inside
- }
- if !t1.Before(st) && !t2.After(et) {
- return contain
- }
- if !t1.After(st) && !t2.After(st) {
- return outside
- }
- if !t1.Before(et) && !t2.Before(et) {
- return outside
- }
-
- return overlap
-}
diff --git a/pkg/og/storage/segment/relationship_test.go b/pkg/og/storage/segment/relationship_test.go
deleted file mode 100644
index d03137a5f3..0000000000
--- a/pkg/og/storage/segment/relationship_test.go
+++ /dev/null
@@ -1,91 +0,0 @@
-package segment
-
-import (
- . "github.com/onsi/ginkgo/v2"
- . "github.com/onsi/gomega"
- "github.com/grafana/pyroscope/pkg/og/testing"
-)
-
-// relationship overlap read overlap write
-// inside rel = iota // | S E | <1 1/1
-// match // matching ranges 1/1 1/1
-// outside // | | S E 0/1 0/1
-// overlap // | S | E <1 <1
-// contain // S | | E 1/1 <1
-
-var _ = Describe("stree", func() {
- Context("relationship", func() {
- Context("match", func() {
- It("returns correct values", func() {
- Expect(relationship(
- testing.SimpleTime(0), testing.SimpleTime(100), // t1 t2
- testing.SimpleTime(0), testing.SimpleTime(100), // st et
- ).String()).To(Equal("match"))
- })
- })
- Context("inside", func() {
- It("returns correct values", func() {
- Expect(relationship(
- testing.SimpleTime(0), testing.SimpleTime(100), // t1 t2
- testing.SimpleTime(10), testing.SimpleTime(90), // st et
- ).String()).To(Equal("inside"))
- Expect(relationship(
- testing.SimpleTime(0), testing.SimpleTime(100), // t1 t2
- testing.SimpleTime(0), testing.SimpleTime(90), // st et
- ).String()).To(Equal("inside"))
- Expect(relationship(
- testing.SimpleTime(0), testing.SimpleTime(100), // t1 t2
- testing.SimpleTime(10), testing.SimpleTime(100), // st et
- ).String()).To(Equal("inside"))
- })
- })
- Context("contain", func() {
- It("returns correct values", func() {
- Expect(relationship(
- testing.SimpleTime(100), testing.SimpleTime(200), // t1 t2
- testing.SimpleTime(90), testing.SimpleTime(210), // st et
- ).String()).To(Equal("contain"))
- Expect(relationship(
- testing.SimpleTime(100), testing.SimpleTime(200), // t1 t2
- testing.SimpleTime(100), testing.SimpleTime(210), // st et
- ).String()).To(Equal("contain"))
- Expect(relationship(
- testing.SimpleTime(100), testing.SimpleTime(200), // t1 t2
- testing.SimpleTime(90), testing.SimpleTime(200), // st et
- ).String()).To(Equal("contain"))
- })
- })
- Context("overlap", func() {
- It("returns correct values", func() {
- Expect(relationship(
- testing.SimpleTime(100), testing.SimpleTime(200), // t1 t2
- testing.SimpleTime(90), testing.SimpleTime(110), // st et
- ).String()).To(Equal("overlap"))
- Expect(relationship(
- testing.SimpleTime(100), testing.SimpleTime(200), // t1 t2
- testing.SimpleTime(190), testing.SimpleTime(210), // st et
- ).String()).To(Equal("overlap"))
- })
- })
- Context("outside", func() {
- It("returns correct values", func() {
- Expect(relationship(
- testing.SimpleTime(100), testing.SimpleTime(200), // t1 t2
- testing.SimpleTime(90), testing.SimpleTime(100), // st et
- ).String()).To(Equal("outside"))
- Expect(relationship(
- testing.SimpleTime(100), testing.SimpleTime(200), // t1 t2
- testing.SimpleTime(80), testing.SimpleTime(90), // st et
- ).String()).To(Equal("outside"))
- Expect(relationship(
- testing.SimpleTime(100), testing.SimpleTime(200), // t1 t2
- testing.SimpleTime(200), testing.SimpleTime(210), // st et
- ).String()).To(Equal("outside"))
- Expect(relationship(
- testing.SimpleTime(100), testing.SimpleTime(200), // t1 t2
- testing.SimpleTime(210), testing.SimpleTime(220), // st et
- ).String()).To(Equal("outside"))
- })
- })
- })
-})
diff --git a/pkg/og/storage/segment/retention.go b/pkg/og/storage/segment/retention.go
deleted file mode 100644
index 63b47fa2da..0000000000
--- a/pkg/og/storage/segment/retention.go
+++ /dev/null
@@ -1,81 +0,0 @@
-package segment
-
-import (
- "time"
-)
-
-type RetentionPolicy struct {
- now time.Time
-
- AbsoluteTime time.Time
- Levels map[int]time.Time
-
- ExemplarsRetentionTime time.Time
-}
-
-func NewRetentionPolicy() *RetentionPolicy {
- return &RetentionPolicy{now: time.Now()}
-}
-
-func (r RetentionPolicy) LowerTimeBoundary() time.Time {
- if len(r.Levels) == 0 {
- return r.AbsoluteTime
- }
- return r.Levels[0]
-}
-
-func (r *RetentionPolicy) SetAbsolutePeriod(period time.Duration) *RetentionPolicy {
- r.AbsoluteTime = r.periodToTime(period)
- return r
-}
-
-func (r *RetentionPolicy) SetExemplarsRetentionPeriod(period time.Duration) *RetentionPolicy {
- r.ExemplarsRetentionTime = r.periodToTime(period)
- return r
-}
-
-func (r *RetentionPolicy) SetLevelPeriod(level int, period time.Duration) *RetentionPolicy {
- if r.Levels == nil {
- r.Levels = make(map[int]time.Time)
- }
- r.Levels[level] = r.periodToTime(period)
- return r
-}
-
-func (r *RetentionPolicy) SetLevels(levels ...time.Duration) *RetentionPolicy {
- if r.Levels == nil {
- r.Levels = make(map[int]time.Time)
- }
- for level, period := range levels {
- if period != 0 {
- r.Levels[level] = r.periodToTime(period)
- }
- }
- return r
-}
-
-func (r RetentionPolicy) isToBeDeleted(sn *streeNode) bool {
- return sn.isBefore(r.AbsoluteTime) || sn.isBefore(r.levelMaxTime(sn.depth))
-}
-
-func (r RetentionPolicy) periodToTime(age time.Duration) time.Time {
- if age == 0 {
- return time.Time{}
- }
- return r.now.Add(-1 * age)
-}
-
-func (r *RetentionPolicy) normalize() *RetentionPolicy {
- r.AbsoluteTime = normalizeTime(r.AbsoluteTime)
- for k, v := range r.Levels {
- r.Levels[k] = normalizeTime(v)
- }
- return r
-}
-
-func (r RetentionPolicy) levelMaxTime(depth int) time.Time {
- if r.Levels == nil {
- return time.Time{}
- }
- return r.Levels[depth]
-}
diff --git a/pkg/og/storage/segment/segment.go b/pkg/og/storage/segment/segment.go
deleted file mode 100644
index b0eb4f961d..0000000000
--- a/pkg/og/storage/segment/segment.go
+++ /dev/null
@@ -1,460 +0,0 @@
-package segment
-
-import (
- "context"
- "errors"
- "fmt"
- "math/big"
- "os"
- "path/filepath"
- "runtime/trace"
- "sync"
- "time"
-
- "github.com/grafana/pyroscope/pkg/og/storage/metadata"
-)
-
-type streeNode struct {
- depth int
- time time.Time
- present bool
- samples uint64
- writes uint64
- children []*streeNode
-}
-
-func (sn *streeNode) replace(child *streeNode) {
- i := child.time.Sub(sn.time) / durations[child.depth]
- sn.children[i] = child
-}
-
-func (sn *streeNode) relationship(st, et time.Time) rel {
- t2 := sn.time.Add(durations[sn.depth])
- return relationship(sn.time, t2, st, et)
-}
-
-func (sn *streeNode) isBefore(rt time.Time) bool {
- t2 := sn.time.Add(durations[sn.depth])
- return !t2.After(rt)
-}
-
-func (sn *streeNode) isAfter(rt time.Time) bool {
- return sn.time.After(rt)
-}
-
-func (sn *streeNode) endTime() time.Time {
- return sn.time.Add(durations[sn.depth])
-}
-
-func (sn *streeNode) overlapRead(st, et time.Time) *big.Rat {
- t2 := sn.time.Add(durations[sn.depth])
- return overlapRead(sn.time, t2, st, et, durations[0])
-}
-
-func (sn *streeNode) overlapWrite(st, et time.Time) *big.Rat {
- t2 := sn.time.Add(durations[sn.depth])
- return overlapWrite(sn.time, t2, st, et, durations[0])
-}
-
-func (sn *streeNode) findAddons() []Addon {
- res := []Addon{}
- if sn.present {
- res = append(res, Addon{
- Depth: sn.depth,
- T: sn.time,
- })
- } else {
- for _, child := range sn.children {
- if child != nil {
- res = append(res, child.findAddons()...)
- }
- }
- }
- return res
-}
-
-func (sn *streeNode) put(st, et time.Time, samples uint64, cb func(n *streeNode, depth int, dt time.Time, r *big.Rat, addons []Addon)) {
- nodes := []*streeNode{sn}
-
- for len(nodes) > 0 {
- sn = nodes[0]
- nodes = nodes[1:]
-
- rel := sn.relationship(st, et)
- if rel != outside {
- childrenCount := 0
- createNewChildren := rel == inside || rel == overlap
- for i, v := range sn.children {
- if createNewChildren && v == nil { // maybe create a new child
- childT := sn.time.Truncate(durations[sn.depth]).Add(time.Duration(i) * durations[sn.depth-1])
-
- rel2 := relationship(childT, childT.Add(durations[sn.depth-1]), st, et)
- if rel2 != outside {
- sn.children[i] = newNode(childT, sn.depth-1, 10)
- }
- }
-
- if sn.children[i] != nil {
- childrenCount++
- nodes = append(nodes, sn.children[i])
- }
- }
- var addons []Addon
-
- r := sn.overlapWrite(st, et)
- fv, _ := r.Float64()
- sn.samples += uint64(float64(samples) * fv)
- sn.writes += uint64(1)
-
- // relationship overlap read overlap write
- // inside rel = iota // | S E | <1 1/1
- // match // matching ranges 1/1 1/1
- // outside // | | S E 0/1 0/1
- // overlap // | S | E <1 <1
- // contain // S | | E 1/1 <1
-
- if rel == match || rel == contain || childrenCount > 1 || sn.present {
- if !sn.present {
- addons = sn.findAddons()
- }
-
- cb(sn, sn.depth, sn.time, r, addons)
- sn.present = true
- }
- }
- }
-}
-
-func normalize(st, et time.Time) (time.Time, time.Time) {
- st = st.Truncate(durations[0])
- et2 := et.Truncate(durations[0])
- if et2.Equal(et) && !st.Equal(et2) {
- return st, et
- }
- return st, et2.Add(durations[0])
-}
-
-func normalizeTime(t time.Time) time.Time {
- return t.Truncate(durations[0])
-}
-
-// get traverses through the tree searching for the nodes satisfying
-// the given time range. If no nodes were found, the most precise
-// down-sampling root node will be passed to the callback function,
-// and relationship r will be proportional to the down-sampling factor.
-//
-// relationship overlap read overlap write
-// inside rel = iota // | S E | <1 1/1
-// match // matching ranges 1/1 1/1
-// outside // | | S E 0/1 0/1
-// overlap // | S | E <1 <1
-// contain // S | | E 1/1 <1
-func (sn *streeNode) get(ctx context.Context, s *Segment, st, et time.Time, cb func(*streeNode, *big.Rat)) {
- r := sn.relationship(st, et)
- trace.Logf(ctx, traceCatNodeGet, "D=%d T=%v P=%v R=%v", sn.depth, sn.time.Unix(), sn.present, r)
- switch r {
- case outside:
- return
- case inside, overlap:
- // Defer to children.
- case contain, match:
- // Take the node as is.
- if sn.present {
- cb(sn, big.NewRat(1, 1))
- return
- }
- }
- trace.Log(ctx, traceCatNodeGet, "drill down")
- // Whether child nodes are outside the retention period.
- if sn.time.Before(s.watermarks.levels[sn.depth-1]) && sn.present {
- trace.Log(ctx, traceCatNodeGet, "sampled")
- // Create a sampled tree from the current node.
- cb(sn, sn.overlapRead(st, et))
- return
- }
- // Traverse nodes recursively.
- for _, v := range sn.children {
- if v != nil {
- v.get(ctx, s, st, et, cb)
- }
- }
-}
-
-// deleteDataBefore returns true if the node should be deleted.
-func (sn *streeNode) deleteNodesBefore(t *RetentionPolicy) (bool, error) {
- if sn.isAfter(t.AbsoluteTime) && t.Levels == nil {
- return false, nil
- }
- remove := t.isToBeDeleted(sn)
- for i, v := range sn.children {
- if v == nil {
- continue
- }
- ok, err := v.deleteNodesBefore(t)
- if err != nil {
- return false, err
- }
- if ok {
- sn.children[i] = nil
- }
- }
- return remove, nil
-}
-
-func (sn *streeNode) walkNodesToDelete(t *RetentionPolicy, cb func(depth int, t time.Time) error) (bool, error) {
- if sn.isAfter(t.AbsoluteTime) && t.Levels == nil {
- return false, nil
- }
- var err error
- remove := t.isToBeDeleted(sn)
- if remove {
- if err = cb(sn.depth, sn.time); err != nil {
- return false, err
- }
- }
- for _, v := range sn.children {
- if v == nil {
- continue
- }
- if _, err = v.walkNodesToDelete(t, cb); err != nil {
- return false, err
- }
- }
- return remove, nil
-}
-
-type Segment struct {
- m sync.RWMutex
- root *streeNode
-
- spyName string
- sampleRate uint32
- units metadata.Units
- aggregationType metadata.AggregationType
-
- watermarks
-}
-
-type watermarks struct {
- absoluteTime time.Time
- levels map[int]time.Time
-}
-
-func newNode(t time.Time, depth, multiplier int) *streeNode {
- sn := &streeNode{
- depth: depth,
- time: t,
- }
- if depth > 0 {
- sn.children = make([]*streeNode, multiplier)
- }
- return sn
-}
-
-func New() *Segment {
- return &Segment{watermarks: watermarks{
- levels: make(map[int]time.Time),
- }}
-}
-
-// TODO: DRY
-func maxTime(a, b time.Time) time.Time {
- if a.After(b) {
- return a
- }
- return b
-}
-
-func minTime(a, b time.Time) time.Time {
- if a.Before(b) {
- return a
- }
- return b
-}
-
-func (s *Segment) growTree(st, et time.Time) bool {
- var prevVal *streeNode
- if s.root != nil {
- st = minTime(st, s.root.time)
- et = maxTime(et, s.root.endTime())
- } else {
- st = st.Truncate(durations[0])
- s.root = newNode(st, 0, multiplier)
- }
-
- for {
- rel := s.root.relationship(st, et)
-
- if rel == inside || rel == match {
- break
- }
-
- prevVal = s.root
- newDepth := prevVal.depth + 1
- if newDepth >= len(durations) {
- return false
- }
- s.root = newNode(prevVal.time.Truncate(durations[newDepth]), newDepth, multiplier)
- if prevVal != nil {
- s.root.samples = prevVal.samples
- s.root.writes = prevVal.writes
- s.root.replace(prevVal)
- }
- }
- return true
-}
-
-type Addon struct {
- Depth int
- T time.Time
-}
-
-var errStartTimeBeforeEndTime = errors.New("start time cannot be after end time")
-var errTreeMaxSize = errors.New("segment tree reached max size, check start / end time parameters")
-
-// TODO: simplify arguments
-// TODO: validate st < et
-func (s *Segment) Put(st, et time.Time, samples uint64, cb func(depth int, t time.Time, r *big.Rat, addons []Addon)) error {
- s.m.Lock()
- defer s.m.Unlock()
-
- st, et = normalize(st, et)
- if st.After(et) {
- return errStartTimeBeforeEndTime
- }
-
- if !s.growTree(st, et) {
- return errTreeMaxSize
- }
- v := newVis()
- s.root.put(st, et, samples, func(sn *streeNode, depth int, tm time.Time, r *big.Rat, addons []Addon) {
- v.add(sn, r, true)
- cb(depth, tm, r, addons)
- })
- v.print(filepath.Join(os.TempDir(), fmt.Sprintf("0-put-%s-%s.html", st.String(), et.String())))
- return nil
-}
-
-const (
- traceRegionGet = "segment.Get"
- traceCatGet = traceRegionGet
- traceCatNodeGet = "node.get"
-)
-
-//revive:disable-next-line:get-return callback
-func (s *Segment) Get(st, et time.Time, cb func(depth int, samples, writes uint64, t time.Time, r *big.Rat)) {
- // TODO: simplify arguments
- // TODO: validate st < et
- s.GetContext(context.Background(), st, et, cb)
-}
-
-//revive:disable-next-line:get-return callback
-func (s *Segment) GetContext(ctx context.Context, st, et time.Time, cb func(depth int, samples, writes uint64, t time.Time, r *big.Rat)) {
- defer trace.StartRegion(ctx, traceRegionGet).End()
- s.m.RLock()
- defer s.m.RUnlock()
- if st.Before(s.watermarks.absoluteTime) {
- trace.Logf(ctx, traceCatGet, "start time %s is outside the retention period; set to %s", st, s.watermarks.absoluteTime)
- st = s.watermarks.absoluteTime
- }
- st, et = normalize(st, et)
- if s.root == nil {
- trace.Log(ctx, traceCatGet, "empty")
- return
- }
- // divider := int(et.Sub(st) / durations[0])
- v := newVis()
- s.root.get(ctx, s, st, et, func(sn *streeNode, r *big.Rat) {
- // TODO: pass m / d from .get() ?
- v.add(sn, r, true)
- cb(sn.depth, sn.samples, sn.writes, sn.time, r)
- })
- v.print(filepath.Join(os.TempDir(), fmt.Sprintf("0-get-%s-%s.html", st.String(), et.String())))
-}
-
-func (s *Segment) DeleteNodesBefore(t *RetentionPolicy) (bool, error) {
- s.m.Lock()
- defer s.m.Unlock()
- if s.root == nil {
- return true, nil
- }
- ok, err := s.root.deleteNodesBefore(t.normalize())
- if err != nil {
- return false, err
- }
- if ok {
- s.root = nil
- }
- s.updateWatermarks(t)
- return ok, nil
-}
-
-func (s *Segment) updateWatermarks(t *RetentionPolicy) {
- if t.AbsoluteTime.After(s.watermarks.absoluteTime) {
- s.watermarks.absoluteTime = t.AbsoluteTime
- }
- for k, v := range t.Levels {
- if level, ok := s.watermarks.levels[k]; ok && v.Before(level) {
- continue
- }
- s.watermarks.levels[k] = v
- }
-}
-
-func (s *Segment) WalkNodesToDelete(t *RetentionPolicy, cb func(depth int, t time.Time) error) (bool, error) {
- s.m.RLock()
- defer s.m.RUnlock()
- if s.root == nil {
- return true, nil
- }
- return s.root.walkNodesToDelete(t.normalize(), cb)
-}
-
-func (s *Segment) SetMetadata(md metadata.Metadata) {
- s.m.Lock()
- s.spyName = md.SpyName
- s.sampleRate = md.SampleRate
- s.units = md.Units
- s.aggregationType = md.AggregationType
- s.m.Unlock()
-}
-
-func (s *Segment) GetMetadata() metadata.Metadata {
- s.m.Lock()
- md := metadata.Metadata{
- SpyName: s.spyName,
- SampleRate: s.sampleRate,
- Units: s.units,
- AggregationType: s.aggregationType,
- }
- s.m.Unlock()
- return md
-}
-
-var zeroTime time.Time
-
-func (s *Segment) StartTime() time.Time {
- if s.root == nil {
- return zeroTime
- }
- n := s.root
-
- for {
- if len(n.children) == 0 {
- return n.time
- }
-
- oldN := n
-
- for _, child := range n.children {
- if child != nil {
- n = child
- break
- }
- }
-
- if n == oldN {
- return n.time
- }
- }
-}
diff --git a/pkg/og/storage/segment/segment_test.go b/pkg/og/storage/segment/segment_test.go
deleted file mode 100644
index a3573d23b1..0000000000
--- a/pkg/og/storage/segment/segment_test.go
+++ /dev/null
@@ -1,482 +0,0 @@
-package segment
-
-import (
- "bufio"
- "log"
- "math/big"
- "math/rand"
- "os"
- "strconv"
- "strings"
- "time"
-
- . "github.com/onsi/ginkgo/v2"
- . "github.com/onsi/gomega"
-
- "github.com/grafana/pyroscope/pkg/og/testing"
-)
-
-var putNoOp = func(depth int, t time.Time, r *big.Rat, addons []Addon) {}
-
-func doGet(s *Segment, st, et time.Time) []time.Time {
- res := []time.Time{}
- s.Get(st, et, func(d int, samples, writes uint64, t time.Time, r *big.Rat) {
- res = append(res, t)
- })
- return res
-}
-
-func strip(val string) string {
- ret := ""
- scanner := bufio.NewScanner(strings.NewReader(val))
- for scanner.Scan() {
- line := strings.TrimSpace(scanner.Text())
- if len(line) > 0 {
- ret += line + "\n"
- }
- }
- return ret
-}
-
-func expectChildrenSamplesAddUpToParentSamples(tn *streeNode) {
- childrenSum := uint64(0)
- if len(tn.children) == 0 {
- return
- }
- for _, v := range tn.children {
- if v != nil {
- expectChildrenSamplesAddUpToParentSamples(v)
- childrenSum += v.samples
- }
- }
- Expect(childrenSum).To(Equal(tn.samples))
-}
-
-var _ = Describe("stree", func() {
- Context("Get", func() {
- Context("When there's no root", func() {
- It("get doesn't fail", func() {
- s := New()
- Expect(doGet(s, testing.SimpleTime(0), testing.SimpleTime(39))).To(HaveLen(0))
- })
- })
- })
-
- Context("StartTime", func() {
- Context("empty segment", func() {
- It("returns zero time", func() {
- s := New()
- Expect(s.StartTime().IsZero()).To(BeTrue())
- })
- })
-
- Context("fuzz test", func() {
- It("always returns the right values", func() {
- r := rand.New(rand.NewSource(6231912))
-
- // doesn't work with minTime = 0
- minTime := 1023886146
- maxTime := 1623886146
-
- runs := 100
- maxInsertionsPerTree := 100
-
- for i := 0; i < runs; i++ {
- s := New()
- minSt := maxTime
- for j := 0; j < 1+r.Intn(maxInsertionsPerTree); j++ {
- st := (minTime + r.Intn(maxTime-minTime)) / 10 * 10
- if st < minSt {
- minSt = st
- }
- et := st + 10 + r.Intn(1000)
- s.Put(testing.SimpleTime(st), testing.SimpleTime(et), 1, func(de int, t time.Time, r *big.Rat, a []Addon) {})
- }
-
- Expect(s.StartTime()).To(Equal(testing.SimpleTime(minSt)))
- }
- })
- })
- })
-
- Context("DeleteDataBefore", func() {
- Context("empty segment", func() {
- It("returns true and no keys", func() {
- s := New()
-
- keys := []string{}
- rp := &RetentionPolicy{AbsoluteTime: testing.SimpleTime(19)}
- r, _ := s.WalkNodesToDelete(rp, func(depth int, t time.Time) error {
- keys = append(keys, strconv.Itoa(depth)+":"+strconv.Itoa(int(t.Unix())))
- return nil
- })
-
- Expect(r).To(BeTrue())
- Expect(keys).To(BeEmpty())
- })
- })
-
- Context("simple test 1", func() {
- It("correctly deletes data", func() {
- s := New()
- s.Put(testing.SimpleUTime(10), testing.SimpleUTime(19), 1, func(de int, t time.Time, r *big.Rat, a []Addon) {})
- s.Put(testing.SimpleUTime(20), testing.SimpleUTime(29), 1, func(de int, t time.Time, r *big.Rat, a []Addon) {})
-
- keys := []string{}
- rp := &RetentionPolicy{AbsoluteTime: testing.SimpleUTime(21)}
- r, _ := s.WalkNodesToDelete(rp, func(depth int, t time.Time) error {
- keys = append(keys, strconv.Itoa(depth)+":"+strconv.Itoa(int(t.Unix())))
- return nil
- })
-
- Expect(r).To(BeFalse())
- Expect(keys).To(ConsistOf([]string{
- "0:10",
- }))
- })
- })
-
- Context("simple test 3", func() {
- It("correctly deletes data", func() {
- s := New()
- s.Put(testing.SimpleUTime(10), testing.SimpleUTime(19), 1, func(de int, t time.Time, r *big.Rat, a []Addon) {})
- s.Put(testing.SimpleUTime(1020), testing.SimpleUTime(1029), 1, func(de int, t time.Time, r *big.Rat, a []Addon) {})
-
- keys := []string{}
- rp := &RetentionPolicy{AbsoluteTime: testing.SimpleUTime(21)}
- r, _ := s.WalkNodesToDelete(rp, func(depth int, t time.Time) error {
- keys = append(keys, strconv.Itoa(depth)+":"+strconv.Itoa(int(t.Unix())))
- return nil
- })
-
- Expect(r).To(BeFalse())
- Expect(keys).To(ConsistOf([]string{
- "0:10",
- }))
- })
- })
-
- Context("simple test 2", func() {
- It("correctly deletes data", func() {
- s := New()
- s.Put(testing.SimpleUTime(10), testing.SimpleUTime(19), 1, func(de int, t time.Time, r *big.Rat, a []Addon) {})
- s.Put(testing.SimpleUTime(20), testing.SimpleUTime(29), 1, func(de int, t time.Time, r *big.Rat, a []Addon) {})
-
- keys := []string{}
- rp := &RetentionPolicy{AbsoluteTime: testing.SimpleUTime(200)}
- r, _ := s.WalkNodesToDelete(rp, func(depth int, t time.Time) error {
- keys = append(keys, strconv.Itoa(depth)+":"+strconv.Itoa(int(t.Unix())))
- return nil
- })
-
- Expect(r).To(BeTrue())
- Expect(keys).To(ConsistOf([]string{
- "1:0",
- "0:10",
- "0:20",
- }))
- })
- })
-
- Context("level-based retention", func() {
- It("correctly deletes data partially", func() {
- s := New()
- s.Put(testing.SimpleUTime(10), testing.SimpleUTime(19), 1, func(de int, t time.Time, r *big.Rat, a []Addon) {})
- s.Put(testing.SimpleUTime(20), testing.SimpleUTime(29), 1, func(de int, t time.Time, r *big.Rat, a []Addon) {})
-
- keys := []string{}
- rp := &RetentionPolicy{Levels: map[int]time.Time{0: time.Now()}}
- r, _ := s.WalkNodesToDelete(rp, func(depth int, t time.Time) error {
- keys = append(keys, strconv.Itoa(depth)+":"+strconv.Itoa(int(t.Unix())))
- return nil
- })
-
- Expect(r).To(BeFalse())
- Expect(s.root).ToNot(BeNil())
- Expect(keys).To(ConsistOf([]string{
- "0:10",
- "0:20",
- }))
-
- removed, err := s.DeleteNodesBefore(rp)
- Expect(err).ToNot(HaveOccurred())
- Expect(removed).To(BeFalse())
- })
-
- It("correctly deletes data completely", func() {
- s := New()
- s.Put(testing.SimpleUTime(10), testing.SimpleUTime(19), 1, func(de int, t time.Time, r *big.Rat, a []Addon) {})
- s.Put(testing.SimpleUTime(20), testing.SimpleUTime(29), 1, func(de int, t time.Time, r *big.Rat, a []Addon) {})
-
- var keys []string
- rp := &RetentionPolicy{Levels: map[int]time.Time{0: time.Now(), 1: time.Now()}}
- r, _ := s.WalkNodesToDelete(rp, func(depth int, t time.Time) error {
- keys = append(keys, strconv.Itoa(depth)+":"+strconv.Itoa(int(t.Unix())))
- return nil
- })
-
- Expect(r).To(BeTrue())
- Expect(keys).To(ConsistOf([]string{
- "1:0",
- "0:10",
- "0:20",
- }))
-
- removed, err := s.DeleteNodesBefore(rp)
- Expect(err).ToNot(HaveOccurred())
- Expect(removed).To(BeTrue())
- })
-
- Context("Issue 715", func() {
- // See https://github.com/pyroscope-io/pyroscope/issues/715
- It("does not return nodes affected by retention policy", func() {
- b, err := os.Open("testdata/issue_715")
- Expect(err).ToNot(HaveOccurred())
- s, err := Deserialize(b)
- Expect(err).ToNot(HaveOccurred())
-
- var keys []string
- st := time.Date(2022, time.January, 12, 9, 40, 0, 0, time.UTC)
- et := time.Date(2022, time.January, 12, 10, 40, 0, 0, time.UTC)
- s.Get(st, et, func(depth int, samples, writes uint64, t time.Time, r *big.Rat) {
- keys = append(keys, strconv.Itoa(depth)+":"+strconv.Itoa(int(t.Unix()))+":"+r.String())
- })
-
- Expect(keys).To(BeEmpty())
- })
-
- It("correctly samples data", func() {
- s := New()
- st := time.Date(2021, time.December, 1, 0, 0, 0, 0, time.UTC)
- et := time.Date(2022, time.January, 1, 0, 0, 0, 0, time.UTC)
- rp := &RetentionPolicy{AbsoluteTime: et}
-
- c := st
- for c.Before(et) {
- e := c.Add(time.Second * time.Duration(10))
- err := s.Put(c, e, 100, func(int, time.Time, *big.Rat, []Addon) {})
- Expect(err).ToNot(HaveOccurred())
- c = e
- }
-
- r, err := s.DeleteNodesBefore(rp)
- Expect(r).To(BeFalse())
- Expect(err).ToNot(HaveOccurred())
-
- gSt := st.Add(-time.Hour)
- gEt := et.Add(time.Hour)
-
- var keys []string
- s.Get(gSt, gEt, func(depth int, samples, writes uint64, t time.Time, r *big.Rat) {
- keys = append(keys, strconv.Itoa(depth)+":"+strconv.Itoa(int(t.Unix()))+":"+r.String())
- })
-
- Expect(keys).To(BeEmpty())
- })
-
- It("correctly samples data with level retention period", func() {
- s := New()
- st := time.Date(2021, time.December, 1, 0, 0, 0, 0, time.UTC)
- et := time.Date(2021, time.December, 2, 0, 0, 0, 0, time.UTC)
-
- c := st
- for c.Before(et) {
- e := c.Add(time.Second * time.Duration(10))
- err := s.Put(c, e, 100, func(int, time.Time, *big.Rat, []Addon) {})
- Expect(err).ToNot(HaveOccurred())
- c = e
- }
-
- r, err := s.DeleteNodesBefore(&RetentionPolicy{Levels: map[int]time.Time{0: et}})
- Expect(r).To(BeFalse())
- Expect(err).ToNot(HaveOccurred())
-
- gSt := time.Date(2021, time.December, 1, 10, 0, 0, 0, time.UTC)
- gEt := gSt.Add(time.Second * 30)
-
- var keys []string
- s.Get(gSt, gEt, func(depth int, samples, writes uint64, t time.Time, r *big.Rat) {
- keys = append(keys, strconv.Itoa(depth)+":"+strconv.Itoa(int(t.Unix()))+":"+r.String())
- })
-
- Expect(keys).To(ConsistOf([]string{
- "1:1638352800:3/10",
- }))
- })
- })
- })
- })
-
- Context("Put", func() {
- Context("When inserts are far apart", func() {
- Context("When second insert is far in the future", func() {
- It("sets root properly", func() {
- log.Println("---")
- s := New()
- s.Put(testing.SimpleTime(1330),
- testing.SimpleTime(1339), 1, func(de int, t time.Time, r *big.Rat, a []Addon) {})
- Expect(s.root).ToNot(BeNil())
- Expect(s.root.depth).To(Equal(0))
- s.Put(testing.SimpleTime(1110),
- testing.SimpleTime(1119), 1, func(de int, t time.Time, r *big.Rat, a []Addon) {})
- expectChildrenSamplesAddUpToParentSamples(s.root)
- })
- })
- Context("When second insert is far in the past", func() {
- It("sets root properly", func() {
- log.Println("---")
- s := New()
- s.Put(testing.SimpleTime(2030),
- testing.SimpleTime(2039), 1, func(de int, t time.Time, r *big.Rat, a []Addon) {})
- Expect(s.root).ToNot(BeNil())
- Expect(s.root.depth).To(Equal(0))
- s.Put(testing.SimpleTime(0),
- testing.SimpleTime(9), 1, func(de int, t time.Time, r *big.Rat, a []Addon) {})
- expectChildrenSamplesAddUpToParentSamples(s.root)
- })
- })
- })
-
- Context("When empty", func() {
- It("sets root properly", func() {
- s := New()
- s.Put(testing.SimpleTime(0),
- testing.SimpleTime(9), 1, func(de int, t time.Time, r *big.Rat, a []Addon) {})
- Expect(s.root).ToNot(BeNil())
- Expect(s.root.depth).To(Equal(0))
- })
-
- It("sets root properly", func() {
- s := New()
- s.Put(testing.SimpleTime(0),
- testing.SimpleTime(49), 1, func(de int, t time.Time, r *big.Rat, a []Addon) {})
- Expect(s.root).ToNot(BeNil())
- Expect(s.root.depth).To(Equal(1))
- })
-
- It("sets root properly", func() {
- s := New()
- s.Put(testing.SimpleTime(10),
- testing.SimpleTime(109), 1, func(de int, t time.Time, r *big.Rat, a []Addon) {})
- Expect(s.root).ToNot(BeNil())
- Expect(s.root.depth).To(Equal(2))
- })
-
- It("sets root properly", func() {
- s := New()
- s.Put(testing.SimpleTime(10),
- testing.SimpleTime(19), 1, func(de int, t time.Time, r *big.Rat, a []Addon) {})
- Expect(s.root).ToNot(BeNil())
- Expect(s.root.depth).To(Equal(0))
- s.Put(testing.SimpleTime(10),
- testing.SimpleTime(19), 1, func(de int, t time.Time, r *big.Rat, a []Addon) {})
- expectChildrenSamplesAddUpToParentSamples(s.root)
- })
-
- It("sets root properly", func() {
- s := New()
- s.Put(testing.SimpleTime(10),
- testing.SimpleTime(19), 1, func(de int, t time.Time, r *big.Rat, a []Addon) {})
- Expect(s.root).ToNot(BeNil())
- Expect(s.root.depth).To(Equal(0))
- s.Put(testing.SimpleTime(20),
- testing.SimpleTime(29), 1, func(de int, t time.Time, r *big.Rat, a []Addon) {})
- Expect(s.root).ToNot(BeNil())
- Expect(s.root.depth).To(Equal(1))
- expectChildrenSamplesAddUpToParentSamples(s.root)
- })
-
- It("sets root properly", func() {
- s := New()
- s.Put(testing.SimpleTime(10),
- testing.SimpleTime(19), 10, func(de int, t time.Time, r *big.Rat, a []Addon) {})
- Expect(s.root).ToNot(BeNil())
- Expect(s.root.depth).To(Equal(0))
- s.Put(testing.SimpleTime(20),
- testing.SimpleTime(39), 10, func(de int, t time.Time, r *big.Rat, a []Addon) {})
- Expect(s.root).ToNot(BeNil())
- Expect(s.root.depth).To(Equal(1))
- expectChildrenSamplesAddUpToParentSamples(s.root)
- })
-
- It("sets root properly", func() {
- s := New()
- s.Put(testing.SimpleTime(10),
- testing.SimpleTime(19), 1, func(de int, t time.Time, r *big.Rat, a []Addon) {})
- Expect(s.root).ToNot(BeNil())
- Expect(s.root.depth).To(Equal(0))
-
- s.Put(testing.SimpleTime(20),
- testing.SimpleTime(29), 1, func(de int, t time.Time, r *big.Rat, a []Addon) {})
- Expect(s.root).ToNot(BeNil())
- Expect(s.root.depth).To(Equal(1))
-
- s.Put(testing.SimpleTime(30),
- testing.SimpleTime(39), 1, func(de int, t time.Time, r *big.Rat, a []Addon) {})
- Expect(s.root).ToNot(BeNil())
- Expect(s.root.depth).To(Equal(1))
- expectChildrenSamplesAddUpToParentSamples(s.root)
- })
-
- It("sets root properly", func() {
- s := New()
- s.Put(testing.SimpleTime(30),
- testing.SimpleTime(39), 1, func(de int, t time.Time, r *big.Rat, a []Addon) {})
- Expect(s.root).ToNot(BeNil())
- Expect(s.root.depth).To(Equal(0))
-
- s.Put(testing.SimpleTime(20),
- testing.SimpleTime(29), 1, func(de int, t time.Time, r *big.Rat, a []Addon) {})
- Expect(s.root).ToNot(BeNil())
- Expect(s.root.depth).To(Equal(1))
-
- s.Put(testing.SimpleTime(10),
- testing.SimpleTime(19), 1, func(de int, t time.Time, r *big.Rat, a []Addon) {})
- Expect(s.root).ToNot(BeNil())
- Expect(s.root.depth).To(Equal(1))
-
- Expect(doGet(s, testing.SimpleTime(0), testing.SimpleTime(39))).To(HaveLen(3))
- })
-
- It("works with 3 mins", func() {
- s := New()
- s.Put(testing.SimpleTime(10),
- testing.SimpleTime(70), 1, func(de int, t time.Time, r *big.Rat, a []Addon) {})
- Expect(s.root).ToNot(BeNil())
- Expect(s.root.depth).To(Equal(1))
- // Expect(doGet(s, testing.SimpleTime(20, testing.SimpleTime(49))).To(HaveLen(3))
- })
-
- It("sets trie properly, gets work", func() {
- s := New()
-
- s.Put(testing.SimpleTime(0),
- testing.SimpleTime(9), 1, func(de int, t time.Time, r *big.Rat, a []Addon) {})
- Expect(s.root).ToNot(BeNil())
- Expect(s.root.depth).To(Equal(0))
-
- s.Put(testing.SimpleTime(100),
- testing.SimpleTime(109), 1, func(de int, t time.Time, r *big.Rat, a []Addon) {})
- expectChildrenSamplesAddUpToParentSamples(s.root)
- Expect(s.root).ToNot(BeNil())
- Expect(s.root.depth).To(Equal(2))
- Expect(s.root.present).To(BeTrue())
- Expect(s.root.children[0]).ToNot(BeNil())
- Expect(s.root.children[0].present).ToNot(BeTrue())
- Expect(s.root.children[1]).ToNot(BeNil())
- Expect(s.root.children[1].present).ToNot(BeTrue())
- Expect(s.root.children[0].children[0].present).To(BeTrue())
- Expect(s.root.children[1].children[0].present).To(BeTrue())
-
- Expect(doGet(s, testing.SimpleTime(0), testing.SimpleTime(9))).To(HaveLen(1))
- Expect(doGet(s, testing.SimpleTime(10), testing.SimpleTime(19))).To(HaveLen(0))
- Expect(doGet(s, testing.SimpleTime(100), testing.SimpleTime(109))).To(HaveLen(1))
- Expect(doGet(s, testing.SimpleTime(0), testing.SimpleTime(109))).To(HaveLen(2))
- Expect(doGet(s, testing.SimpleTime(0), testing.SimpleTime(999))).To(HaveLen(1))
- Expect(doGet(s, testing.SimpleTime(0), testing.SimpleTime(1000))).To(HaveLen(1))
- Expect(doGet(s, testing.SimpleTime(0), testing.SimpleTime(1001))).To(HaveLen(1))
- Expect(doGet(s, testing.SimpleTime(0), testing.SimpleTime(989))).To(HaveLen(2))
- })
- })
- })
-})
diff --git a/pkg/og/storage/segment/serialization.go b/pkg/og/storage/segment/serialization.go
deleted file mode 100644
index e484915b72..0000000000
--- a/pkg/og/storage/segment/serialization.go
+++ /dev/null
@@ -1,234 +0,0 @@
-package segment
-
-import (
- "bufio"
- "bytes"
- "errors"
- "io"
- "time"
-
- "github.com/grafana/pyroscope/pkg/og/storage/metadata"
- "github.com/grafana/pyroscope/pkg/og/util/serialization"
- "github.com/grafana/pyroscope/pkg/og/util/varint"
-)
-
-// serialization format version. it's not very useful right now, but it will be in the future
-const currentVersion = 3
-
-func (s *Segment) populateFromMetadata(mdata map[string]interface{}) {
- if v, ok := mdata["sampleRate"]; ok {
- s.sampleRate = uint32(v.(float64))
- }
- if v, ok := mdata["spyName"]; ok {
- s.spyName = v.(string)
- }
- if v, ok := mdata["units"]; ok {
- s.units = metadata.Units(v.(string))
- }
- if v, ok := mdata["aggregationType"]; ok {
- s.aggregationType = metadata.AggregationType(v.(string))
- }
-}
-
-func (s *Segment) generateMetadata() map[string]interface{} {
- return map[string]interface{}{
- "sampleRate": s.sampleRate,
- "spyName": s.spyName,
- "units": s.units,
- "aggregationType": s.aggregationType,
- }
-}
-
-func (s *Segment) Serialize(w io.Writer) error {
- s.m.RLock()
- defer s.m.RUnlock()
-
- vw := varint.NewWriter()
- if _, err := vw.Write(w, currentVersion); err != nil {
- return err
- }
- if err := serialization.WriteMetadata(w, s.generateMetadata()); err != nil {
- return err
- }
-
- if s.root == nil {
- return nil
- }
-
- s.serialize(w, vw, s.root)
-
- return s.watermarks.serialize(w)
-}
-
-func (s *Segment) serialize(w io.Writer, vw varint.Writer, n *streeNode) {
- vw.Write(w, uint64(n.depth))
- vw.Write(w, uint64(n.time.Unix()))
- vw.Write(w, n.samples)
- vw.Write(w, n.writes)
- p := uint64(0)
- if n.present {
- p = 1
- }
- vw.Write(w, p)
-
- // depth
- // time
- // keyInChunks
- // children
- l := 0
- for _, v := range n.children {
- if v != nil {
- l++
- }
- }
-
- vw.Write(w, uint64(l))
- for _, v := range n.children {
- if v != nil {
- s.serialize(w, vw, v)
- }
- }
-}
-
-var errMaxDepth = errors.New("depth is too high")
-
-func Deserialize(r io.Reader) (*Segment, error) {
- s := New()
- br := bufio.NewReader(r) // TODO if it's already a bytereader skip
-
- // reads serialization format version, see comment at the top
- version, err := varint.Read(br)
- if err != nil {
- return nil, err
- }
-
- mdata, err := serialization.ReadMetadata(br)
- if err != nil {
- return nil, err
- }
- s.populateFromMetadata(mdata)
-
- // In some cases, there can be no nodes.
- if br.Buffered() == 0 {
- return s, nil
- }
-
- parents := []*streeNode{nil}
- for len(parents) > 0 {
- depth, err := varint.Read(br)
- if err != nil {
- return nil, err
- }
- if int(depth) >= len(durations) {
- return nil, errMaxDepth
- }
- timeVal, err := varint.Read(br)
- if err != nil {
- return nil, err
- }
- samplesVal, err := varint.Read(br)
- if err != nil {
- return nil, err
- }
- var writesVal uint64
- if version >= 2 {
- writesVal, err = varint.Read(br)
- if err != nil {
- return nil, err
- }
- }
- presentVal, err := varint.Read(br)
- if err != nil {
- return nil, err
- }
- node := newNode(time.Unix(int64(timeVal), 0), int(depth), multiplier)
- if presentVal == 1 {
- node.present = true
- }
- node.samples = samplesVal
- node.writes = writesVal
- if s.root == nil {
- s.root = node
- }
-
- parent := parents[0]
- parents = parents[1:]
- if parent != nil {
- parent.replace(node)
- }
- childrenLen, err := varint.Read(br)
- if err != nil {
- return nil, err
- }
-
- r := []*streeNode{}
- for i := 0; i < int(childrenLen); i++ {
- r = append(r, node)
- }
- parents = append(r, parents...)
- }
-
- if version >= 3 {
- if err = deserializeWatermarks(br, &s.watermarks); err != nil {
- return nil, err
- }
- }
-
- return s, nil
-}
-
-func (s *Segment) Bytes() ([]byte, error) {
- b := bytes.Buffer{}
- if err := s.Serialize(&b); err != nil {
- return nil, err
- }
- return b.Bytes(), nil
-}
-
-func FromBytes(p []byte) (*Segment, error) {
- return Deserialize(bytes.NewReader(p))
-}
-
-func (w watermarks) serialize(dst io.Writer) error {
- vw := varint.NewWriter()
- if _, err := vw.Write(dst, uint64(w.absoluteTime.UTC().Unix())); err != nil {
- return err
- }
- if _, err := vw.Write(dst, uint64(len(w.levels))); err != nil {
- return err
- }
- for k, v := range w.levels {
- if _, err := vw.Write(dst, uint64(k)); err != nil {
- return err
- }
- if _, err := vw.Write(dst, uint64(v.UTC().Unix())); err != nil {
- return err
- }
- }
- return nil
-}
-
-func deserializeWatermarks(r io.ByteReader, w *watermarks) error {
- a, err := varint.Read(r)
- if err != nil {
- return err
- }
- w.absoluteTime = time.Unix(int64(a), 0).UTC()
- l, err := varint.Read(r)
- if err != nil {
- return err
- }
- levels := int(l)
- for i := 0; i < levels; i++ {
- k, err := varint.Read(r)
- if err != nil {
- return err
- }
- v, err := varint.Read(r)
- if err != nil {
- return err
- }
- w.levels[int(k)] = time.Unix(int64(v), 0).UTC()
- }
- return nil
-}
diff --git a/pkg/og/storage/segment/serialization_bench_test.go b/pkg/og/storage/segment/serialization_bench_test.go
deleted file mode 100644
index b4ae513397..0000000000
--- a/pkg/og/storage/segment/serialization_bench_test.go
+++ /dev/null
@@ -1,33 +0,0 @@
-package segment
-
-import (
- "bytes"
- "fmt"
- "math/big"
- "math/rand"
- "testing"
- "time"
-
- ptesting "github.com/grafana/pyroscope/pkg/og/testing"
-)
-
-func serialize(s *Segment) []byte {
- var buf bytes.Buffer
- s.Serialize(&buf)
- return buf.Bytes()
-}
-
-func BenchmarkSerialize(b *testing.B) {
- for k := 10; k <= 1000000; k *= 10 {
- s := New()
- for i := 0; i < k; i++ {
- s.Put(ptesting.SimpleTime(i*10), ptesting.SimpleTime(i*10+9), uint64(rand.Intn(100)), func(de int, t time.Time, r *big.Rat, a []Addon) {})
- }
- b.ResetTimer()
- b.Run(fmt.Sprintf("serialize %d", k), func(b *testing.B) {
- for i := 0; i < b.N; i++ {
- _ = serialize(s)
- }
- })
- }
-}
diff --git a/pkg/og/storage/segment/serialization_test.go b/pkg/og/storage/segment/serialization_test.go
deleted file mode 100644
index 9408fe3f6b..0000000000
--- a/pkg/og/storage/segment/serialization_test.go
+++ /dev/null
@@ -1,128 +0,0 @@
-package segment
-
-import (
- "bytes"
- "log"
- "math/big"
- "time"
-
- . "github.com/onsi/ginkgo/v2"
- . "github.com/onsi/gomega"
- "github.com/grafana/pyroscope/pkg/og/testing"
-)
-
-var serializedExampleV1 = "\x01({\"sampleRate\":0,\"spyName\":\"\",\"units\":\"\"}" +
- "\x01\x80\x92\xb8Ø\xfe\xff\xff\xff\x01\x03\x01\x03\x00\x80\x92\xb8Ø\xfe\xff\xff" +
- "\xff\x01\x01\x01\x00\x00\x8a\x92\xb8Ø\xfe\xff\xff\xff\x01\x01\x01\x00\x00\x94\x92" +
- "\xb8Ø\xfe\xff\xff\xff\x01\x01\x01\x00"
-
-var serializedExampleV2 = "\x02={\"aggregationType\":\"\",\"sampleRate\":0,\"spyName\":\"\",\"units\":\"\"}" +
- "\x01\x80\x92\xb8Ø\xfe\xff\xff\xff\x01\x03\x03\x01\x03\x00\x80\x92\xb8Ø\xfe\xff\xff\xff\x01\x01\x01\x01\x00" +
- "\x00\x8a\x92\xb8Ø\xfe\xff\xff\xff\x01\x01\x01\x01\x00\x00\x94\x92\xb8Ø\xfe\xff\xff\xff\x01\x01\x01\x01\x00"
-
-var serializedExampleV3 = "\x03={\"aggregationType\":\"\",\"sampleRate\":0,\"spyName\":\"\",\"units\":\"\"}" +
- "\x01\x80\x92\xb8Ø\xfe\xff\xff\xff\x01\x03\x03\x01\x03\x00\x80\x92\xb8Ø\xfe\xff\xff\xff\x01\x01\x01\x01\x00" +
- "\x00\x8a\x92\xb8Ø\xfe\xff\xff\xff\x01\x01\x01\x01\x00\x00\x94\x92\xb8Ø\xfe\xff\xff\xff\x01\x01\x01\x01\x00" +
- "\x80\x92\xb8Ø\xfe\xff\xff\xff\x01\x00"
-
-var _ = Describe("stree", func() {
- Context("Serialize / Deserialize", func() {
- It("both functions work properly", func() {
- s := New()
- s.Put(testing.SimpleTime(0),
- testing.SimpleTime(9), 1, func(de int, t time.Time, r *big.Rat, a []Addon) {})
- s.Put(testing.SimpleTime(10),
- testing.SimpleTime(19), 1, func(de int, t time.Time, r *big.Rat, a []Addon) {})
- s.Put(testing.SimpleTime(20),
- testing.SimpleTime(29), 1, func(de int, t time.Time, r *big.Rat, a []Addon) {})
-
- s.watermarks = watermarks{absoluteTime: testing.SimpleTime(100)}
-
- var buf bytes.Buffer
- s.Serialize(&buf)
- serialized := buf.Bytes()
- log.Printf("%q", serialized)
-
- s, err := Deserialize(bytes.NewReader(serialized))
- Expect(err).ToNot(HaveOccurred())
- var buf2 bytes.Buffer
- s.Serialize(&buf2)
- serialized2 := buf2.Bytes()
- Expect(string(serialized2)).To(Equal(string(serialized)))
- })
- })
-
- Context("Serialize", func() {
- It("serializes segment tree properly", func() {
- s := New()
- s.Put(testing.SimpleTime(0),
- testing.SimpleTime(9), 1, func(de int, t time.Time, r *big.Rat, a []Addon) {})
- s.Put(testing.SimpleTime(10),
- testing.SimpleTime(19), 1, func(de int, t time.Time, r *big.Rat, a []Addon) {})
- s.Put(testing.SimpleTime(20),
- testing.SimpleTime(29), 1, func(de int, t time.Time, r *big.Rat, a []Addon) {})
-
- var buf bytes.Buffer
- s.Serialize(&buf)
- serialized := buf.Bytes()
- log.Printf("q: %q", string(serialized))
- Expect(string(serialized)).To(Equal(serializedExampleV3))
- })
- })
-
- Context("Deserialize", func() {
- Context("v1", func() {
- It("deserializes v1 data", func() {
- s, err := Deserialize(bytes.NewReader([]byte(serializedExampleV1)))
- Expect(err).ToNot(HaveOccurred())
- Expect(s.root.children[0]).ToNot(BeNil())
- Expect(s.root.children[1]).ToNot(BeNil())
- Expect(s.root.children[2]).ToNot(BeNil())
- Expect(s.root.children[3]).To(BeNil())
- })
- })
- Context("v2", func() {
- It("deserializes v2 data", func() {
- s, err := Deserialize(bytes.NewReader([]byte(serializedExampleV2)))
- Expect(err).ToNot(HaveOccurred())
- Expect(s.root.children[0]).ToNot(BeNil())
- Expect(s.root.children[1]).ToNot(BeNil())
- Expect(s.root.children[2]).ToNot(BeNil())
- Expect(s.root.children[3]).To(BeNil())
- Expect(s.root.writes).To(Equal(uint64(3)))
- })
- })
- Context("v3", func() {
- It("deserializes v3 data", func() {
- s, err := Deserialize(bytes.NewReader([]byte(serializedExampleV3)))
- Expect(err).ToNot(HaveOccurred())
- Expect(s.root.children[0]).ToNot(BeNil())
- Expect(s.root.children[1]).ToNot(BeNil())
- Expect(s.root.children[2]).ToNot(BeNil())
- Expect(s.root.children[3]).To(BeNil())
- Expect(s.root.writes).To(Equal(uint64(3)))
- })
- })
- })
-
- Context("watermarks serialize / deserialize", func() {
- It("both functions work properly", func() {
- w := watermarks{
- absoluteTime: testing.SimpleTime(100),
- levels: map[int]time.Time{
- 0: testing.SimpleTime(100),
- 1: testing.SimpleTime(1000),
- },
- }
-
- var buf bytes.Buffer
- err := w.serialize(&buf)
- Expect(err).ToNot(HaveOccurred())
-
- s := New()
- err = deserializeWatermarks(bytes.NewReader(buf.Bytes()), &s.watermarks)
- Expect(err).ToNot(HaveOccurred())
- Expect(w).To(Equal(s.watermarks))
- })
- })
-})
diff --git a/pkg/og/storage/segment/timeline.go b/pkg/og/storage/segment/timeline.go
index cfb35d11fb..8d204868e6 100644
--- a/pkg/og/storage/segment/timeline.go
+++ b/pkg/og/storage/segment/timeline.go
@@ -32,107 +32,3 @@ type Timeline struct {
// 4. Data before 1635506200 has resolution 10000s
Watermarks map[int]int64 `json:"watermarks"`
}
-
-func GenerateTimeline(st, et time.Time) *Timeline {
- st, et = normalize(st, et)
- totalDuration := et.Sub(st)
- minDuration := totalDuration / time.Duration(1024)
- delta := durations[0]
- for _, d := range durations {
- if d < 0 {
- break
- }
- if d < minDuration {
- delta = d
- }
- }
- return &Timeline{
- st: st,
- et: et,
- StartTime: st.Unix(),
- Samples: make([]uint64, totalDuration/delta),
- durationDelta: delta,
- DurationDeltaNormalized: int64(delta / time.Second),
- Watermarks: make(map[int]int64),
- }
-}
-
-func (tl *Timeline) PopulateTimeline(s *Segment) {
- s.m.Lock()
- if s.root != nil {
- s.root.populateTimeline(tl, s)
- }
- s.m.Unlock()
-}
-
-func (sn streeNode) populateTimeline(tl *Timeline, s *Segment) {
- if sn.relationship(tl.st, tl.et) == outside {
- return
- }
-
- var (
- currentDuration = durations[sn.depth]
- levelWatermark time.Time
- hasDataBefore bool
- )
-
- if sn.depth > 0 {
- levelWatermark = s.watermarks.levels[sn.depth-1]
- }
-
- if len(sn.children) > 0 && currentDuration >= tl.durationDelta {
- for i, v := range sn.children {
- if v != nil {
- v.populateTimeline(tl, s)
- hasDataBefore = true
- continue
- }
- if hasDataBefore || levelWatermark.IsZero() || sn.isBefore(s.watermarks.absoluteTime) {
- continue
- }
- if c := sn.createSampledChild(i); c.isBefore(levelWatermark) && c.isAfter(s.watermarks.absoluteTime) {
- c.populateTimeline(tl, s)
- if m := c.time.Add(durations[c.depth]); m.After(tl.st) {
- tl.Watermarks[c.depth+1] = c.time.Add(durations[c.depth]).Unix()
- }
- }
- }
- return
- }
-
- nodeTime := sn.time
- if currentDuration < tl.durationDelta {
- currentDuration = tl.durationDelta
- nodeTime = nodeTime.Truncate(currentDuration)
- }
-
- i := int(nodeTime.Sub(tl.st) / tl.durationDelta)
- rightBoundary := i + int(currentDuration/tl.durationDelta)
-
- l := len(tl.Samples)
- for i < rightBoundary {
- if i >= 0 && i < l {
- if tl.Samples[i] == 0 {
- tl.Samples[i] = 1
- }
- tl.Samples[i] += sn.samples
- }
- i++
- }
-}
-
-func (sn *streeNode) createSampledChild(i int) *streeNode {
- s := &streeNode{
- depth: sn.depth - 1,
- time: sn.time.Add(time.Duration(i) * durations[sn.depth-1]),
- samples: sn.samples / multiplier,
- writes: sn.samples / multiplier,
- }
- if s.depth > 0 {
- s.children = make([]*streeNode, multiplier)
- for j := range s.children {
- s.children[j] = s.createSampledChild(j)
- }
- }
- return s
-}
diff --git a/pkg/og/storage/segment/timeline_test.go b/pkg/og/storage/segment/timeline_test.go
deleted file mode 100644
index cffa425c82..0000000000
--- a/pkg/og/storage/segment/timeline_test.go
+++ /dev/null
@@ -1,134 +0,0 @@
-package segment
-
-import (
- "math/big"
- "time"
-
- . "github.com/onsi/ginkgo/v2"
- . "github.com/onsi/gomega"
- "github.com/grafana/pyroscope/pkg/og/testing"
-)
-
-var _ = Describe("timeline", func() {
- var (
- timeline *Timeline
- st int
- et int
- )
-
- BeforeEach(func() {
- st = 0
- et = 40
- })
- JustBeforeEach(func() {
- timeline = GenerateTimeline(
- testing.SimpleTime(st),
- testing.SimpleTime(et),
- )
- })
-
- Describe("PopulateTimeline", func() {
- Context("empty segment", func() {
- It("works as expected", func(done Done) {
- s := New()
- timeline.PopulateTimeline(s)
- Expect(timeline.Samples).To(Equal([]uint64{
- 0,
- 0,
- 0,
- 0,
- }))
- close(done)
- })
- })
- Context("one level", func() {
- It("works as expected", func() {
- done := make(chan interface{})
- go func() {
- s := New()
- s.Put(testing.SimpleTime(0),
- testing.SimpleTime(9), 2, func(de int, t time.Time, r *big.Rat, a []Addon) {})
- s.Put(testing.SimpleTime(10),
- testing.SimpleTime(19), 5, func(de int, t time.Time, r *big.Rat, a []Addon) {})
- s.Put(testing.SimpleTime(20),
- testing.SimpleTime(29), 0, func(de int, t time.Time, r *big.Rat, a []Addon) {})
-
- timeline.PopulateTimeline(s)
- Expect(timeline.Samples).To(Equal([]uint64{
- 3,
- 6,
- 1,
- 0,
- }))
-
- close(done)
- }()
- Eventually(done, 5).Should(BeClosed())
- })
- })
- Context("multiple Levels", func() {
- BeforeEach(func() {
- st = 0
- et = 365 * 24 * 60 * 60
- })
-
- It("works as expected", func() {
- done := make(chan interface{})
- go func() {
- s := New()
- s.Put(testing.SimpleTime(0),
- testing.SimpleTime(9), 2, func(de int, t time.Time, r *big.Rat, a []Addon) {})
- s.Put(testing.SimpleTime(10),
- testing.SimpleTime(19), 5, func(de int, t time.Time, r *big.Rat, a []Addon) {})
- s.Put(testing.SimpleTime(20),
- testing.SimpleTime(29), 0, func(de int, t time.Time, r *big.Rat, a []Addon) {})
-
- timeline.PopulateTimeline(s)
- expected := make([]uint64, 3153)
- expected[0] = 8
- Expect(timeline.Samples).To(Equal(expected))
-
- close(done)
- }()
- Eventually(done, 5).Should(BeClosed())
- })
- })
-
- Context("with threshold", func() {
- BeforeEach(func() {
- st = 0
- et = 365 * 24 * 60 * 60
- })
-
- It("removed nodes are down-sampled", func() {
- done := make(chan interface{})
- go func() {
- s := New()
- now := time.Now()
- s.Put(testing.SimpleTime(0),
- testing.SimpleTime(9), 2, func(de int, t time.Time, r *big.Rat, a []Addon) {})
- s.Put(testing.SimpleTime(10),
- testing.SimpleTime(19), 5, func(de int, t time.Time, r *big.Rat, a []Addon) {})
-
- // To prevent segment root removal.
- s.Put(now.Add(-10*time.Second),
- now, 0, func(de int, t time.Time, r *big.Rat, a []Addon) {})
-
- threshold := NewRetentionPolicy().
- SetLevelPeriod(0, time.Second).
- SetLevelPeriod(1, time.Minute)
-
- _, err := s.DeleteNodesBefore(threshold)
- Expect(err).ToNot(HaveOccurred())
- timeline.PopulateTimeline(s)
- expected := make([]uint64, 3153)
- expected[0] = 8
- Expect(timeline.Samples).To(Equal(expected))
-
- close(done)
- }()
- Eventually(done, 5).Should(BeClosed())
- })
- })
- })
-})
diff --git a/pkg/og/storage/segment/visualize.go b/pkg/og/storage/segment/visualize.go
deleted file mode 100644
index 81990fea75..0000000000
--- a/pkg/og/storage/segment/visualize.go
+++ /dev/null
@@ -1,44 +0,0 @@
-package segment
-
-import (
- "time"
-)
-
-// var highchartsTemplate *template.Template
-
-func init() {
-}
-
-type visualizeNode struct {
- T1 time.Time
- T2 time.Time
- Depth int
- HasTrie bool
-}
-
-// This is here for debugging
-func (s *Segment) Visualize() {
- res := []*visualizeNode{}
- if s.root != nil {
- nodes := []*streeNode{s.root}
- for len(nodes) != 0 {
- n := nodes[0]
- nodes = nodes[1:]
- // log.Debug("node:", durations[n.depth])
- res = append(res, &visualizeNode{
- T1: n.time.UTC(),
- T2: n.time.Add(durations[n.depth]).UTC(),
- Depth: n.depth,
- HasTrie: n.present,
- })
- for _, v := range n.children {
- if v != nil {
- nodes = append(nodes, v)
- }
- }
- }
- }
-
- // jsonBytes, _ := json.MarshalIndent(res, "", " ")
- // log.Debug(string(jsonBytes))
-}
diff --git a/pkg/og/storage/tree/profile.pb.go b/pkg/og/storage/tree/profile.pb.go
index fdf226a6f0..14d4e5728b 100644
--- a/pkg/og/storage/tree/profile.pb.go
+++ b/pkg/og/storage/tree/profile.pb.go
@@ -38,7 +38,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.34.1
+// protoc-gen-go v1.34.2
// protoc (unknown)
// source: og/storage/tree/profile.proto
@@ -966,7 +966,7 @@ func file_og_storage_tree_profile_proto_rawDescGZIP() []byte {
}
var file_og_storage_tree_profile_proto_msgTypes = make([]protoimpl.MessageInfo, 8)
-var file_og_storage_tree_profile_proto_goTypes = []interface{}{
+var file_og_storage_tree_profile_proto_goTypes = []any{
(*Profile)(nil), // 0: perftools.profiles.tree.Profile
(*ValueType)(nil), // 1: perftools.profiles.tree.ValueType
(*Sample)(nil), // 2: perftools.profiles.tree.Sample
@@ -998,7 +998,7 @@ func file_og_storage_tree_profile_proto_init() {
return
}
if !protoimpl.UnsafeEnabled {
- file_og_storage_tree_profile_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ file_og_storage_tree_profile_proto_msgTypes[0].Exporter = func(v any, i int) any {
switch v := v.(*Profile); i {
case 0:
return &v.state
@@ -1010,7 +1010,7 @@ func file_og_storage_tree_profile_proto_init() {
return nil
}
}
- file_og_storage_tree_profile_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ file_og_storage_tree_profile_proto_msgTypes[1].Exporter = func(v any, i int) any {
switch v := v.(*ValueType); i {
case 0:
return &v.state
@@ -1022,7 +1022,7 @@ func file_og_storage_tree_profile_proto_init() {
return nil
}
}
- file_og_storage_tree_profile_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ file_og_storage_tree_profile_proto_msgTypes[2].Exporter = func(v any, i int) any {
switch v := v.(*Sample); i {
case 0:
return &v.state
@@ -1034,7 +1034,7 @@ func file_og_storage_tree_profile_proto_init() {
return nil
}
}
- file_og_storage_tree_profile_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ file_og_storage_tree_profile_proto_msgTypes[3].Exporter = func(v any, i int) any {
switch v := v.(*Label); i {
case 0:
return &v.state
@@ -1046,7 +1046,7 @@ func file_og_storage_tree_profile_proto_init() {
return nil
}
}
- file_og_storage_tree_profile_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ file_og_storage_tree_profile_proto_msgTypes[4].Exporter = func(v any, i int) any {
switch v := v.(*Mapping); i {
case 0:
return &v.state
@@ -1058,7 +1058,7 @@ func file_og_storage_tree_profile_proto_init() {
return nil
}
}
- file_og_storage_tree_profile_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ file_og_storage_tree_profile_proto_msgTypes[5].Exporter = func(v any, i int) any {
switch v := v.(*Location); i {
case 0:
return &v.state
@@ -1070,7 +1070,7 @@ func file_og_storage_tree_profile_proto_init() {
return nil
}
}
- file_og_storage_tree_profile_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ file_og_storage_tree_profile_proto_msgTypes[6].Exporter = func(v any, i int) any {
switch v := v.(*Line); i {
case 0:
return &v.state
@@ -1082,7 +1082,7 @@ func file_og_storage_tree_profile_proto_init() {
return nil
}
}
- file_og_storage_tree_profile_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ file_og_storage_tree_profile_proto_msgTypes[7].Exporter = func(v any, i int) any {
switch v := v.(*Function); i {
case 0:
return &v.state
diff --git a/pkg/og/testing/time.go b/pkg/og/testing/time.go
index 11ca446a35..0ba46ddcf8 100644
--- a/pkg/og/testing/time.go
+++ b/pkg/og/testing/time.go
@@ -12,9 +12,6 @@ func ParseTime(str string) time.Time {
return r.UTC()
}
-func SimpleTime(i int) time.Time {
- return time.Time{}.Add(time.Duration(i) * time.Second).UTC()
-}
func SimpleUTime(i int) time.Time {
return time.Unix(int64(i), 0)
diff --git a/pkg/phlare/modules.go b/pkg/phlare/modules.go
index ffa651a606..38aa02fce7 100644
--- a/pkg/phlare/modules.go
+++ b/pkg/phlare/modules.go
@@ -109,6 +109,7 @@ func (f *Phlare) initQueryFrontend() (services.Service, error) {
f.API.RegisterPyroscopeHandlers(frontendSvc)
f.API.RegisterQueryFrontend(frontendSvc)
f.API.RegisterQuerier(frontendSvc)
+ f.frontend = frontendSvc
return frontendSvc, nil
}
@@ -394,6 +395,7 @@ func (f *Phlare) initIngester() (_ services.Service, err error) {
}
f.API.RegisterIngester(svc)
+ f.ingester = svc
return svc, nil
}
diff --git a/pkg/phlare/phlare.go b/pkg/phlare/phlare.go
index ca12ce448b..df60bbd64b 100644
--- a/pkg/phlare/phlare.go
+++ b/pkg/phlare/phlare.go
@@ -239,7 +239,9 @@ type Phlare struct {
grpcGatewayMux *grpcgw.ServeMux
- auth connect.Option
+ auth connect.Option
+ ingester *ingester.Ingester
+ frontend *frontend.Frontend
}
func New(cfg Config) (*Phlare, error) {
@@ -502,6 +504,20 @@ func (f *Phlare) readyHandler(sm *services.Manager) http.HandlerFunc {
return
}
+ if f.ingester != nil {
+ if err := f.ingester.CheckReady(r.Context()); err != nil {
+ http.Error(w, "Ingester not ready: "+err.Error(), http.StatusServiceUnavailable)
+ return
+ }
+ }
+
+ if f.frontend != nil {
+ if err := f.frontend.CheckReady(r.Context()); err != nil {
+ http.Error(w, "Query Frontend not ready: "+err.Error(), http.StatusServiceUnavailable)
+ return
+ }
+ }
+
util.WriteTextResponse(w, "ready")
}
}
diff --git a/pkg/phlaredb/block/list.go b/pkg/phlaredb/block/list.go
index 1312926c74..76327caae8 100644
--- a/pkg/phlaredb/block/list.go
+++ b/pkg/phlaredb/block/list.go
@@ -1,21 +1,12 @@
package block
import (
- "context"
- "fmt"
"os"
- "path"
"path/filepath"
"sort"
"time"
- "github.com/go-kit/log/level"
"github.com/oklog/ulid"
- "github.com/thanos-io/objstore"
- "golang.org/x/sync/errgroup"
-
- phlareobj "github.com/grafana/pyroscope/pkg/objstore"
- "github.com/grafana/pyroscope/pkg/util"
)
func ListBlocks(path string, ulidMinTime time.Time) (map[ulid.ULID]*Meta, error) {
@@ -45,112 +36,6 @@ func ListBlocks(path string, ulidMinTime time.Time) (map[ulid.ULID]*Meta, error)
return result, nil
}
-// IterBlockMetas iterates over all block metas in the given time range.
-// It calls the given function for each block meta.
-// It returns the first error returned by the function.
-// It returns nil if all calls succeed.
-// The function is called concurrently.
-func IterBlockMetas(ctx context.Context, bkt phlareobj.Bucket, from, to time.Time, fn func(*Meta)) error {
- allIDs, err := listAllBlockByPrefixes(ctx, bkt, from, to)
- if err != nil {
- return err
- }
- g, ctx := errgroup.WithContext(ctx)
- g.SetLimit(128)
-
- // fetch all meta.json
- for _, ids := range allIDs {
- for _, id := range ids {
- id := id
- g.Go(func() error {
- r, err := bkt.Get(ctx, path.Join(id, MetaFilename))
- if err != nil {
- if bkt.IsObjNotFoundErr(err) {
- level.Info(util.Logger).Log("msg", "skipping block as meta.json not found", "id", id)
- return nil
- }
- return err
- }
-
- m, err := Read(r)
- if err != nil {
- return err
- }
- fn(m)
- return nil
- })
- }
- }
- return g.Wait()
-}
-
-func listAllBlockByPrefixes(ctx context.Context, bkt phlareobj.Bucket, from, to time.Time) ([][]string, error) {
- // todo: We should cache prefixes listing per tenants.
- blockPrefixes, err := blockPrefixesFromTo(from, to, 4)
- if err != nil {
- return nil, err
- }
- ids := make([][]string, len(blockPrefixes))
- g, ctx := errgroup.WithContext(ctx)
- g.SetLimit(64)
-
- for i, prefix := range blockPrefixes {
- prefix := prefix
- i := i
- g.Go(func() error {
- level.Debug(util.Logger).Log("msg", "listing blocks", "prefix", prefix, "i", i)
- prefixIds := []string{}
- err := bkt.Iter(ctx, prefix, func(name string) error {
- if _, ok := IsBlockDir(name); ok {
- prefixIds = append(prefixIds, name)
- }
- return nil
- }, objstore.WithoutApendingDirDelim)
- if err != nil {
- return err
- }
- ids[i] = prefixIds
- return nil
- })
- }
- if err := g.Wait(); err != nil {
- return nil, err
- }
- return ids, nil
-}
-
-// orderOfSplit is the number of bytes of the ulid id used for the split. The duration of the split is:
-// 0: 1114y
-// 1: 34.8y
-// 2: 1y
-// 3: 12.4d
-// 4: 9h19m
-// TODO: To needs to be adapted based on the MaxBlockDuration.
-func blockPrefixesFromTo(from, to time.Time, orderOfSplit uint8) (prefixes []string, err error) {
- var id ulid.ULID
-
- if orderOfSplit > 9 {
- return nil, fmt.Errorf("order of split must be between 0 and 9")
- }
-
- byteShift := (9 - orderOfSplit) * 5
-
- ms := uint64(from.UnixMilli()) >> byteShift
- ms = ms << byteShift
- for ms <= uint64(to.UnixMilli()) {
- if err := id.SetTime(ms); err != nil {
- return nil, err
- }
- prefixes = append(prefixes, id.String()[:orderOfSplit+1])
-
- ms = ms >> byteShift
- ms += 1
- ms = ms << byteShift
- }
-
- return prefixes, nil
-}
-
func SortBlocks(metas map[ulid.ULID]*Meta) []*Meta {
var blocks []*Meta
diff --git a/pkg/phlaredb/block/list_test.go b/pkg/phlaredb/block/list_test.go
deleted file mode 100644
index 8f93da44d3..0000000000
--- a/pkg/phlaredb/block/list_test.go
+++ /dev/null
@@ -1,41 +0,0 @@
-package block
-
-import (
- "bytes"
- "context"
- "crypto/rand"
- "path"
- "testing"
- "time"
-
- "github.com/oklog/ulid"
- "github.com/prometheus/common/model"
- "github.com/stretchr/testify/require"
-
- objstore_testutil "github.com/grafana/pyroscope/pkg/objstore/testutil"
-)
-
-func TestIterBlockMetas(t *testing.T) {
- bucketClient, _ := objstore_testutil.NewFilesystemBucket(t, context.Background(), t.TempDir())
-
- u := ulid.MustNew(uint64(model.Now()), rand.Reader).String()
- err := bucketClient.Upload(context.Background(), path.Join(u, "index"), bytes.NewBufferString("foo"))
- require.NoError(t, err)
- meta := Meta{
- Version: MetaVersion3,
- ULID: ulid.MustNew(ulid.Now(), rand.Reader),
- }
- buf := bytes.NewBuffer(nil)
- _, err = meta.WriteTo(buf)
- require.NoError(t, err)
-
- err = bucketClient.Upload(context.Background(), path.Join(meta.ULID.String(), MetaFilename), buf)
- require.NoError(t, err)
- found := false
- err = IterBlockMetas(context.Background(), bucketClient, time.Now().Add(-24*time.Hour), time.Now().Add(24*time.Hour), func(m *Meta) {
- found = true
- require.Equal(t, meta.ULID, m.ULID)
- })
- require.NoError(t, err)
- require.True(t, found, "expected to find block meta")
-}
diff --git a/pkg/phlaredb/downsample/downsample.go b/pkg/phlaredb/downsample/downsample.go
index 88a4d2bfae..bb19f885cc 100644
--- a/pkg/phlaredb/downsample/downsample.go
+++ b/pkg/phlaredb/downsample/downsample.go
@@ -33,6 +33,7 @@ type state struct {
currentRow parquet.Row
currentTime int64
currentFp model.Fingerprint
+ currentPartition uint64
totalValue int64
profileCount int64
stackTraceIds []uint64
@@ -208,14 +209,14 @@ func (d *Downsampler) AddRow(row schemav1.ProfileRow, fp model.Fingerprint) erro
s := d.states[i]
aggregationTime := rowTimeSeconds / c.interval.durationSeconds * c.interval.durationSeconds
if len(d.states[i].currentRow) == 0 {
- d.initStateFromRow(s, row, aggregationTime, fp)
+ s.init(row, aggregationTime, fp)
}
- if s.currentTime != aggregationTime || s.currentFp != fp {
+ if !s.matches(aggregationTime, fp, row.StacktracePartitionID()) {
err := d.flush(s, d.profileWriters[i], c)
if err != nil {
return err
}
- d.initStateFromRow(s, row, aggregationTime, fp)
+ s.init(row, aggregationTime, fp)
}
s.profileCount++
row.ForStacktraceIdsAndValues(func(stacktraceIds []parquet.Value, values []parquet.Value) {
@@ -255,9 +256,10 @@ func (d *Downsampler) Close() error {
return nil
}
-func (d *Downsampler) initStateFromRow(s *state, row schemav1.ProfileRow, aggregationTime int64, fp model.Fingerprint) {
+func (s *state) init(row schemav1.ProfileRow, aggregationTime int64, fp model.Fingerprint) {
s.currentTime = aggregationTime
s.currentFp = fp
+ s.currentPartition = row.StacktracePartitionID()
s.totalValue = 0
s.profileCount = 0
if s.values == nil {
@@ -290,3 +292,7 @@ func (d *Downsampler) initStateFromRow(s *state, row schemav1.ProfileRow, aggreg
s.currentRow = append(s.currentRow, parquet.Int32Value(int32(row.SeriesIndex())).Level(0, 0, newCol()))
s.currentRow = append(s.currentRow, parquet.Int64Value(int64(row.StacktracePartitionID())).Level(0, 0, newCol()))
}
+
+func (s *state) matches(t int64, fp model.Fingerprint, sp uint64) bool {
+ return s.currentTime == t && s.currentFp == fp && s.currentPartition == sp
+}
diff --git a/pkg/phlaredb/downsample/downsample_test.go b/pkg/phlaredb/downsample/downsample_test.go
index 3a7e725c54..59432ad231 100644
--- a/pkg/phlaredb/downsample/downsample_test.go
+++ b/pkg/phlaredb/downsample/downsample_test.go
@@ -8,6 +8,7 @@ import (
"github.com/go-kit/log"
"github.com/parquet-go/parquet-go"
"github.com/prometheus/common/model"
+ "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
phlareparquet "github.com/grafana/pyroscope/pkg/parquet"
@@ -39,8 +40,8 @@ func TestDownsampler_ProfileCounts(t *testing.T) {
err = d.Close()
require.NoError(t, err)
- verifyProfileCount(t, outDir, "profiles_5m_sum.parquet", 1867)
- verifyProfileCount(t, outDir, "profiles_1h_sum.parquet", 1)
+ verifyProfileCount(t, outDir, "profiles_5m_sum.parquet", 1869)
+ verifyProfileCount(t, outDir, "profiles_1h_sum.parquet", 50)
}
func TestDownsampler_Aggregation(t *testing.T) {
@@ -134,6 +135,53 @@ func TestDownsampler_VaryingFingerprints(t *testing.T) {
verifyProfileCount(t, outDir, "profiles_1h_sum.parquet", 5)
}
+func TestDownsampler_VaryingPartition(t *testing.T) {
+ profiles := make([]schemav1.InMemoryProfile, 0)
+ builder := testhelper.NewProfileBuilder(1703853310000000000).CPUProfile()
+ builder.ForStacktraceString("a", "b", "c").AddSamples(30)
+ builder.ForStacktraceString("a", "b", "c", "d").AddSamples(20)
+ batch, _ := schemav1testhelper.NewProfileSchema(builder.Profile, "cpu")
+ profiles = append(profiles, batch...)
+
+ builder = testhelper.NewProfileBuilder(1703853311000000000).CPUProfile()
+ builder.ForStacktraceString("a", "b", "c").AddSamples(30)
+ builder.ForStacktraceString("a", "b", "c", "d").AddSamples(20)
+ batch, _ = schemav1testhelper.NewProfileSchema(builder.Profile, "cpu")
+ profiles = append(profiles, batch...)
+
+ reader := schemav1.NewInMemoryProfilesRowReader(profiles)
+ rows, err := phlareparquet.ReadAllWithBufferSize(reader, 5)
+ require.NoError(t, err)
+
+ outDir := t.TempDir()
+ d, err := NewDownsampler(outDir, log.NewNopLogger())
+ require.NoError(t, err)
+
+ for i, row := range rows {
+ r := schemav1.ProfileRow(row)
+ r.SetStacktracePartitionID(uint64(i))
+ err = d.AddRow(r, 1)
+ require.NoError(t, err)
+ }
+
+ err = d.Close()
+ require.NoError(t, err)
+
+ downsampledRows := readDownsampledRows(t, filepath.Join(outDir, "profiles_5m_sum.parquet"), 2)
+ schemav1.DownsampledProfileRow(downsampledRows[0]).ForValues(func(values []parquet.Value) {
+ assert.Equal(t, 2, len(values))
+ assert.Equal(t, int64(30), values[0].Int64()) // a, b, c
+ assert.Equal(t, int64(20), values[1].Int64()) // a, b, c, d
+ })
+
+ downsampledRows = readDownsampledRows(t, filepath.Join(outDir, "profiles_1h_sum.parquet"), 2)
+ schemav1.DownsampledProfileRow(downsampledRows[0]).ForValues(func(values []parquet.Value) {
+ assert.Equal(t, 2, len(values))
+ assert.Equal(t, int64(30), values[0].Int64()) // a, b, c
+ assert.Equal(t, int64(20), values[1].Int64()) // a, b, c, d
+ })
+}
+
func BenchmarkDownsampler_AddRow(b *testing.B) {
f, err := os.Open("../testdata/01HHYG6245NWHZWVP27V8WJRT7/profiles.parquet")
require.NoError(b, err)
diff --git a/pkg/phlaredb/profile_store.go b/pkg/phlaredb/profile_store.go
index 931e3acca7..f639dafdd2 100644
--- a/pkg/phlaredb/profile_store.go
+++ b/pkg/phlaredb/profile_store.go
@@ -284,6 +284,8 @@ func (s *profileStore) cutRowGroup(count int) (err error) {
// held for long as it only performs in-memory operations,
// although blocking readers.
s.rowsLock.Lock()
+ // After the lock is released, rows/profiles should be read from the disk.
+ defer s.rowsLock.Unlock()
s.rowsFlushed += uint64(n)
s.rowGroups = append(s.rowGroups, rowGroup)
// Cutting the index is relatively quick op (no I/O).
@@ -303,8 +305,6 @@ func (s *profileStore) cutRowGroup(count int) (err error) {
level.Debug(s.logger).Log("msg", "cut row group segment", "path", path, "numProfiles", n)
s.metrics.sizeBytes.WithLabelValues(s.Name()).Set(float64(currentSize))
- // After the lock is released, rows/profiles should be read from the disk.
- s.rowsLock.Unlock()
return nil
}
@@ -381,6 +381,9 @@ func (s *profileStore) writeRowGroups(path string, rowGroups []parquet.RowGroup)
readers[i] = rg.Rows()
}
n, numRowGroups, err = phlareparquet.CopyAsRowGroups(s.writer, schemav1.NewMergeProfilesRowReader(readers), s.cfg.MaxBufferRowCount)
+ if err != nil {
+ return 0, 0, err
+ }
if err := s.writer.Close(); err != nil {
return 0, 0, err
diff --git a/pkg/phlaredb/schemas/v1/profiles.go b/pkg/phlaredb/schemas/v1/profiles.go
index 78db2261a6..d03f78f6fc 100644
--- a/pkg/phlaredb/schemas/v1/profiles.go
+++ b/pkg/phlaredb/schemas/v1/profiles.go
@@ -503,8 +503,9 @@ func deconstructMemoryProfile(imp InMemoryProfile, row parquet.Row) parquet.Row
col++
return col
}
- totalCols = 8 + (7 * len(imp.Samples.StacktraceIDs)) + len(imp.Comments)
+ totalCols = profileColumnCount(imp)
)
+
if cap(row) < totalCols {
row = make(parquet.Row, 0, totalCols)
}
@@ -614,6 +615,17 @@ func deconstructMemoryProfile(imp InMemoryProfile, row parquet.Row) parquet.Row
return row
}
+func profileColumnCount(imp InMemoryProfile) int {
+ var totalCols = 10 + (7 * len(imp.Samples.StacktraceIDs)) + len(imp.Comments)
+ if len(imp.Comments) == 0 {
+ totalCols++
+ }
+ if len(imp.Samples.StacktraceIDs) == 0 {
+ totalCols += 7
+ }
+ return totalCols
+}
+
func NewMergeProfilesRowReader(rowGroups []parquet.RowReader) parquet.RowReader {
if len(rowGroups) == 0 {
return phlareparquet.EmptyRowReader
@@ -672,6 +684,10 @@ func (p ProfileRow) SetSeriesIndex(v uint32) {
p[seriesIndexColIndex] = parquet.Int32Value(int32(v)).Level(0, 0, seriesIndexColIndex)
}
+func (p ProfileRow) SetStacktracePartitionID(v uint64) {
+ p[stacktracePartitionColIndex] = parquet.Int64Value(int64(v)).Level(0, 0, stacktracePartitionColIndex)
+}
+
func (p ProfileRow) ForStacktraceIDsValues(fn func([]parquet.Value)) {
start := -1
var i int
diff --git a/pkg/phlaredb/schemas/v1/profiles_test.go b/pkg/phlaredb/schemas/v1/profiles_test.go
index ce53493f54..80b84f33c4 100644
--- a/pkg/phlaredb/schemas/v1/profiles_test.go
+++ b/pkg/phlaredb/schemas/v1/profiles_test.go
@@ -542,3 +542,41 @@ func Test_SamplesRange(t *testing.T) {
})
}
}
+
+func TestColumnCount(t *testing.T) {
+ profiles := []InMemoryProfile{{
+ SeriesIndex: 1,
+ TimeNanos: 2,
+ Samples: Samples{
+ StacktraceIDs: []uint32{1, 2, 3},
+ Values: []uint64{1, 2, 3},
+ },
+ },
+ {
+ SeriesIndex: 1,
+ TimeNanos: 2,
+ Samples: Samples{
+ StacktraceIDs: []uint32{1, 2, 3},
+ Values: []uint64{1, 2, 3},
+ Spans: []uint64{1, 2, 3},
+ },
+ },
+ {
+ SeriesIndex: 1,
+ TimeNanos: 2,
+ Samples: Samples{
+ StacktraceIDs: []uint32{1, 2, 3},
+ Values: []uint64{1, 2, 3},
+ Spans: []uint64{1, 2, 3},
+ },
+ Comments: []int64{1, 2, 3},
+ }}
+ for _, profile := range profiles {
+ count := profileColumnCount(profile)
+
+ row := deconstructMemoryProfile(profile, nil)
+ assert.Equal(t, len(row), count)
+ assert.Equal(t, cap(row), count)
+ }
+
+}
diff --git a/pkg/phlaredb/symdb/resolver_pprof.go b/pkg/phlaredb/symdb/resolver_pprof.go
index c71e0bfe1b..ba1dd4639e 100644
--- a/pkg/phlaredb/symdb/resolver_pprof.go
+++ b/pkg/phlaredb/symdb/resolver_pprof.go
@@ -39,7 +39,7 @@ func buildPprof(
// limit on the number of the nodes in the profile, or
// if stack traces should be filtered by the call site.
case maxNodes > 0 || len(selection.callSite) > 0:
- b = &pprofTree{maxNodes: maxNodes, callSite: selection.callSite}
+ b = &pprofTree{maxNodes: maxNodes, selection: selection}
}
b.init(symbols, samples)
if err := symbols.Stacktraces.ResolveStacktraceLocations(ctx, b, samples.StacktraceIDs); err != nil {
diff --git a/pkg/phlaredb/symdb/resolver_pprof_test.go b/pkg/phlaredb/symdb/resolver_pprof_test.go
index 70817deffa..f67707e5a0 100644
--- a/pkg/phlaredb/symdb/resolver_pprof_test.go
+++ b/pkg/phlaredb/symdb/resolver_pprof_test.go
@@ -141,6 +141,79 @@ func Test_Pprof_subtree(t *testing.T) {
require.Equal(t, expected, actual)
}
+func Test_Pprof_subtree_multiple_versions(t *testing.T) {
+ profile := &googlev1.Profile{
+ StringTable: []string{"", "a", "b", "c", "d"},
+ Function: []*googlev1.Function{
+ {Id: 1, Name: 1}, // a
+ {Id: 2, Name: 2}, // b
+ {Id: 3, Name: 3}, // c
+ {Id: 4, Name: 4, StartLine: 1}, // d
+ {Id: 5, Name: 4, StartLine: 2}, // d(2)
+ },
+ Mapping: []*googlev1.Mapping{{Id: 1}},
+ Location: []*googlev1.Location{
+ {Id: 1, MappingId: 1, Line: []*googlev1.Line{{FunctionId: 1, Line: 1}}}, // a
+ {Id: 2, MappingId: 1, Line: []*googlev1.Line{{FunctionId: 2, Line: 1}}}, // b:1
+ {Id: 3, MappingId: 1, Line: []*googlev1.Line{{FunctionId: 2, Line: 2}}}, // b:2
+ {Id: 4, MappingId: 1, Line: []*googlev1.Line{{FunctionId: 3, Line: 1}}}, // c
+ {Id: 5, MappingId: 1, Line: []*googlev1.Line{{FunctionId: 4, Line: 1}}}, // d
+ {Id: 6, MappingId: 1, Line: []*googlev1.Line{{FunctionId: 5, Line: 1}}}, // d(2)
+ },
+ Sample: []*googlev1.Sample{
+ {LocationId: []uint64{5, 4, 2, 1}, Value: []int64{1}}, // a, b:1, c, d
+ {LocationId: []uint64{6, 4, 3, 1}, Value: []int64{1}}, // a, b:2, c, d(2)
+ {LocationId: []uint64{3, 1}, Value: []int64{1}}, // a, b:2
+ {LocationId: []uint64{4, 1}, Value: []int64{1}}, // a, c
+ {LocationId: []uint64{5}, Value: []int64{1}}, // d
+ {LocationId: []uint64{6}, Value: []int64{1}}, // d (2)
+ },
+ }
+
+ db := NewSymDB(DefaultConfig().WithDirectory(t.TempDir()))
+ w := db.WriteProfileSymbols(0, profile)
+ r := NewResolver(context.Background(), db,
+ WithResolverStackTraceSelector(&typesv1.StackTraceSelector{
+ CallSite: []*typesv1.Location{{Name: "a"}, {Name: "b"}, {Name: "c"}, {Name: "d"}},
+ }))
+
+ r.AddSamples(0, w[0].Samples)
+ actual, err := r.Pprof()
+ require.NoError(t, err)
+ // Sample order is not deterministic.
+ sort.Slice(actual.Sample, func(i, j int) bool {
+ return slices.Compare(actual.Sample[i].LocationId, actual.Sample[j].LocationId) >= 0
+ })
+
+ expected := &googlev1.Profile{
+ PeriodType: &googlev1.ValueType{},
+ SampleType: []*googlev1.ValueType{{}},
+ StringTable: []string{"", "a", "b", "c", "d"},
+ Function: []*googlev1.Function{
+ {Id: 1, Name: 1}, // a
+ {Id: 2, Name: 2}, // b
+ {Id: 3, Name: 3}, // c
+ {Id: 4, Name: 4, StartLine: 1}, // d
+ {Id: 5, Name: 4, StartLine: 2}, // d(2)
+ },
+ Mapping: []*googlev1.Mapping{{Id: 1}},
+ Location: []*googlev1.Location{
+ {Id: 1, MappingId: 1, Line: []*googlev1.Line{{FunctionId: 1, Line: 1}}}, // a
+ {Id: 2, MappingId: 1, Line: []*googlev1.Line{{FunctionId: 2, Line: 1}}}, // b:1
+ {Id: 3, MappingId: 1, Line: []*googlev1.Line{{FunctionId: 2, Line: 2}}}, // b:2
+ {Id: 4, MappingId: 1, Line: []*googlev1.Line{{FunctionId: 3, Line: 1}}}, // c
+ {Id: 5, MappingId: 1, Line: []*googlev1.Line{{FunctionId: 4, Line: 1}}}, // d
+ {Id: 6, MappingId: 1, Line: []*googlev1.Line{{FunctionId: 5, Line: 1}}}, // d(2)
+ },
+ Sample: []*googlev1.Sample{
+ {LocationId: []uint64{6, 4, 3, 1}, Value: []int64{1}}, // a, b:2, c, d(2)
+ {LocationId: []uint64{5, 4, 2, 1}, Value: []int64{1}}, // a, b:1, c, d
+ },
+ }
+
+ require.Equal(t, expected, actual)
+}
+
func Test_Resolver_pprof_options(t *testing.T) {
s := newMemSuite(t, [][]string{{"testdata/profile.pb.gz"}})
samples := s.indexed[0][0].Samples
diff --git a/pkg/phlaredb/symdb/resolver_pprof_tree.go b/pkg/phlaredb/symdb/resolver_pprof_tree.go
index 4a54e49054..da7de651a4 100644
--- a/pkg/phlaredb/symdb/resolver_pprof_tree.go
+++ b/pkg/phlaredb/symdb/resolver_pprof_tree.go
@@ -31,8 +31,8 @@ type pprofTree struct {
functionsBuf []int32
locationsBuf []uint64
- callSite []uint32
- fnNames func(locations []int32) ([]int32, bool)
+ selection *SelectedStackTraces
+ fnNames func(locations []int32) ([]int32, bool)
// After truncation many samples will have the same stack trace.
// The map is used to deduplicate them. The key is sample.LocationId
@@ -57,7 +57,7 @@ func (r *pprofTree) init(symbols *Symbols, samples schemav1.Samples) {
r.functionTree = model.NewStacktraceTree(samples.Len() * 2)
r.stacktraces = make([]truncatedStacktraceSample, 0, samples.Len())
r.sampleMap = make(map[string]*googlev1.Sample, samples.Len())
- if len(r.callSite) > 0 {
+ if r.selection != nil && len(r.selection.callSite) > 0 {
r.fnNames = r.locFunctionsFiltered
} else {
r.fnNames = r.locFunctions
@@ -92,7 +92,7 @@ func (r *pprofTree) locFunctions(locations []int32) ([]int32, bool) {
func (r *pprofTree) locFunctionsFiltered(locations []int32) ([]int32, bool) {
r.functionsBuf = r.functionsBuf[:0]
var pos int
- pathLen := len(r.callSite)
+ pathLen := int(r.selection.depth)
// Even if len(locations) < pathLen, we still
// need to inspect locations line by line.
for i := len(locations) - 1; i >= 0; i-- {
@@ -100,7 +100,7 @@ func (r *pprofTree) locFunctionsFiltered(locations []int32) ([]int32, bool) {
for j := len(lines) - 1; j >= 0; j-- {
f := lines[j].FunctionId
if pos < pathLen {
- if r.callSite[pos] != f {
+ if r.selection.callSite[pos] != r.selection.funcNames[f] {
return nil, false
}
pos++
diff --git a/pkg/phlaredb/symdb/stacktrace_selection.go b/pkg/phlaredb/symdb/stacktrace_selection.go
index 1aca45fc57..02413e2599 100644
--- a/pkg/phlaredb/symdb/stacktrace_selection.go
+++ b/pkg/phlaredb/symdb/stacktrace_selection.go
@@ -44,13 +44,18 @@ type SelectedStackTraces struct {
symbols *Symbols
// Go PGO filter.
gopgo *typesv1.GoPGO
- // call_site filter
+ // Call site filter
relations map[uint32]stackTraceLocationRelation
callSiteSelector []*typesv1.Location
- callSite []uint32 // stack trace of the call site
- location uint32 // stack trace leaf
+ callSite []string // call site strings in the original order.
+ location string // stack trace leaf function.
depth uint32
buf []uint64
+ // Function ID => name. The lookup table is used to
+ // avoid unnecessary indirect accesses through the
+ // strings[functions[id].Name] path. Instead, the
+ // name can be resolved directly funcNames[id].
+ funcNames []string
}
func SelectStackTraces(symbols *Symbols, selector *typesv1.StackTraceSelector) *SelectedStackTraces {
@@ -59,10 +64,14 @@ func SelectStackTraces(symbols *Symbols, selector *typesv1.StackTraceSelector) *
callSiteSelector: selector.GetCallSite(),
gopgo: selector.GetGoPgo(),
}
- x.callSite = findCallSite(symbols, x.callSiteSelector)
+ x.callSite = callSiteFunctions(x.callSiteSelector)
if x.depth = uint32(len(x.callSite)); x.depth > 0 {
x.location = x.callSite[x.depth-1]
}
+ x.funcNames = make([]string, len(symbols.Functions))
+ for i, f := range symbols.Functions {
+ x.funcNames[i] = symbols.Strings[f.Name]
+ }
return x
}
@@ -144,9 +153,11 @@ func (x *SelectedStackTraces) appendStackTrace(locations []uint64) stackTraceLoc
lines := x.symbols.Locations[locations[i]].Line
for j := len(lines) - 1; j >= 0; j-- {
f := lines[j].FunctionId
- n += eq(x.location, f)
- if pos < x.depth && pos == l {
- pos += eq(x.callSite[pos], f)
+ if x.location == x.funcNames[f] {
+ n++
+ }
+ if pos < x.depth && pos == l && x.callSite[pos] == x.funcNames[f] {
+ pos++
}
l++
}
@@ -154,54 +165,22 @@ func (x *SelectedStackTraces) appendStackTrace(locations []uint64) stackTraceLoc
if n == 0 {
return 0
}
+ var isLeaf uint32
leaf := x.symbols.Locations[locations[0]].Line[0]
- isLeaf := eq(x.location, leaf.FunctionId)
- inSubtree := ge(pos, x.depth)
- return stackTraceLocationRelation(inSubtree | isLeaf<<1 | (1-isLeaf)<<2)
-}
-
-func eq(a, b uint32) uint32 {
- if a == b {
- return 1
+ if x.location == x.funcNames[leaf.FunctionId] {
+ isLeaf = 1
}
- return 0
-}
-
-func ge(a, b uint32) uint32 {
- if a >= b {
- return 1
+ var inSubtree uint32
+ if pos >= x.depth {
+ inSubtree = 1
}
- return 0
+ return stackTraceLocationRelation(inSubtree | isLeaf<<1 | (1-isLeaf)<<2)
}
-// findCallSite returns the stack trace of the call site
-// where each element in the stack trace is represented by
-// the function ID. Call site is the last element.
-// TODO(kolesnikovae): Location should also include the line number.
-func findCallSite(symbols *Symbols, locations []*typesv1.Location) []uint32 {
- if len(locations) == 0 {
- return nil
- }
- m := make(map[string]uint32, len(locations))
- for _, loc := range locations {
- m[loc.Name] = 0
- }
- c := len(m) // Only count unique names.
- for f := 0; f < len(symbols.Functions) && c > 0; f++ {
- s := symbols.Strings[symbols.Functions[f].Name]
- if _, ok := m[s]; ok {
- // We assume that no functions have the same name.
- // Otherwise, the last one takes precedence.
- m[s] = uint32(f) // f is FunctionId
- c--
- }
- }
- if c > 0 {
- return nil
- }
- callSite := make([]uint32, len(locations))
+func callSiteFunctions(locations []*typesv1.Location) []string {
+ callSite := make([]string, len(locations))
for i, loc := range locations {
- callSite[i] = m[loc.Name]
+ callSite[i] = loc.Name
}
return callSite
}
diff --git a/pkg/phlaredb/symdb/stacktrace_tree.go b/pkg/phlaredb/symdb/stacktrace_tree.go
index cb610fe0fe..38e1236c6c 100644
--- a/pkg/phlaredb/symdb/stacktrace_tree.go
+++ b/pkg/phlaredb/symdb/stacktrace_tree.go
@@ -153,10 +153,10 @@ func newParentPointerTree(size uint32) *parentPointerTree {
}
func (t *parentPointerTree) resolve(dst []int32, id uint32) []int32 {
+ dst = dst[:0]
if id >= uint32(len(t.nodes)) {
return dst
}
- dst = dst[:0]
n := t.nodes[id]
for n.p >= 0 {
dst = append(dst, n.r)
@@ -166,10 +166,10 @@ func (t *parentPointerTree) resolve(dst []int32, id uint32) []int32 {
}
func (t *parentPointerTree) resolveUint64(dst []uint64, id uint32) []uint64 {
+ dst = dst[:0]
if id >= uint32(len(t.nodes)) {
return dst
}
- dst = dst[:0]
n := t.nodes[id]
for n.p >= 0 {
dst = append(dst, uint64(n.r))
@@ -186,6 +186,37 @@ func (t *parentPointerTree) Nodes() []Node {
return dst
}
+func (t *parentPointerTree) toStacktraceTree() *stacktraceTree {
+ l := int32(len(t.nodes))
+ x := stacktraceTree{nodes: make([]node, l)}
+ x.nodes[0] = node{
+ p: sentinel,
+ fc: sentinel,
+ ns: sentinel,
+ }
+ lc := make([]int32, len(t.nodes))
+ var s int32
+ for i := int32(1); i < l; i++ {
+ n := t.nodes[i]
+ x.nodes[i] = node{
+ p: n.p,
+ r: n.r,
+ fc: sentinel,
+ ns: sentinel,
+ }
+ // Swap the last child of the parent with self.
+ // If this is the first child, update the parent.
+ // Otherwise, update the sibling.
+ s, lc[n.p] = lc[n.p], i
+ if s == 0 {
+ x.nodes[n.p].fc = i
+ } else {
+ x.nodes[s].ns = i
+ }
+ }
+ return &x
+}
+
// ReadFrom decodes parent pointer tree from the reader.
// The tree must have enough nodes.
func (t *parentPointerTree) ReadFrom(r io.Reader) (int64, error) {
diff --git a/pkg/phlaredb/symdb/stacktrace_tree_test.go b/pkg/phlaredb/symdb/stacktrace_tree_test.go
index 55eef46a2f..e3db486b5a 100644
--- a/pkg/phlaredb/symdb/stacktrace_tree_test.go
+++ b/pkg/phlaredb/symdb/stacktrace_tree_test.go
@@ -115,6 +115,16 @@ func Test_stacktrace_tree_encoding_rand(t *testing.T) {
}
}
+func Test_stacktrace_tree_pprof_locations_(t *testing.T) {
+ x := newStacktraceTree(0)
+ assert.Len(t, x.resolve([]int32{0, 1, 2, 3}, 42), 0)
+ assert.Len(t, x.resolveUint64([]uint64{0, 1, 2, 3}, 42), 0)
+
+ p := newParentPointerTree(0)
+ assert.Len(t, p.resolve([]int32{0, 1, 2, 3}, 42), 0)
+ assert.Len(t, p.resolveUint64([]uint64{0, 1, 2, 3}, 42), 0)
+}
+
func Test_stacktrace_tree_pprof_locations(t *testing.T) {
p, err := pprof.OpenFile("testdata/profile.pb.gz")
require.NoError(t, err)
@@ -164,6 +174,44 @@ func Test_stacktrace_tree_pprof_locations(t *testing.T) {
}
}
+// The test is helpful for debugging.
+func Test_parentPointerTree_toStacktraceTree(t *testing.T) {
+ x := newStacktraceTree(10)
+ for _, stack := range [][]uint64{
+ {5, 4, 3, 2, 1},
+ {6, 4, 3, 2, 1},
+ {4, 3, 2, 1},
+ {3, 2, 1},
+ {4, 2, 1},
+ {7, 2, 1},
+ {2, 1},
+ {1},
+ } {
+ x.insert(stack)
+ }
+ assertRestoredStacktraceTree(t, x)
+}
+
+func Test_parentPointerTree_toStacktraceTree_profile(t *testing.T) {
+ p, err := pprof.OpenFile("testdata/profile.pb.gz")
+ require.NoError(t, err)
+ x := newStacktraceTree(defaultStacktraceTreeSize)
+ for _, s := range p.Sample {
+ x.insert(s.LocationId)
+ }
+ assertRestoredStacktraceTree(t, x)
+}
+
+func assertRestoredStacktraceTree(t *testing.T, x *stacktraceTree) {
+ var b bytes.Buffer
+ _, _ = x.WriteTo(&b)
+ ppt := newParentPointerTree(x.len())
+ _, err := ppt.ReadFrom(bytes.NewBuffer(b.Bytes()))
+ require.NoError(t, err)
+ restored := ppt.toStacktraceTree()
+ assert.Equal(t, x.nodes, restored.nodes)
+}
+
func Benchmark_stacktrace_tree_insert(b *testing.B) {
p, err := pprof.OpenFile("testdata/profile.pb.gz")
require.NoError(b, err)
diff --git a/pkg/pprof/pprof.go b/pkg/pprof/pprof.go
index 434e89d296..ef4fda5b01 100644
--- a/pkg/pprof/pprof.go
+++ b/pkg/pprof/pprof.go
@@ -662,6 +662,7 @@ func GroupSamplesWithoutLabelsByKey(p *profilev1.Profile, keys []int64) []Sample
// We hide labels matching the keys to the end
// of the slice, after len() boundary.
s.Label = LabelsWithout(s.Label, keys)
+ sort.Sort(LabelsByKeyValue(s.Label)) // TODO: Find a way to avoid this.
}
// Sorting and grouping accounts only for labels kept.
sort.Sort(SamplesByLabels(p.Sample))
@@ -677,7 +678,7 @@ func GroupSamplesWithoutLabelsByKey(p *profilev1.Profile, keys []int64) []Sample
func restoreRemovedLabels(labels []*profilev1.Label) []*profilev1.Label {
labels = labels[len(labels):cap(labels)]
for i, l := range labels {
- if l == nil {
+ if l == nil { // labels had extra capacity in sample labels
labels = labels[:i]
break
}
diff --git a/pkg/pprof/pprof_test.go b/pkg/pprof/pprof_test.go
index c22263a9f6..615fbc6d22 100644
--- a/pkg/pprof/pprof_test.go
+++ b/pkg/pprof/pprof_test.go
@@ -931,16 +931,16 @@ func Test_GroupSamplesWithout(t *testing.T) {
},
},
{
- Labels: []*profilev1.Label{{Key: 1, Str: 3}},
+ Labels: []*profilev1.Label{{Key: 1, Str: 1}, {Key: 2, Str: 2}},
Samples: []*profilev1.Sample{
{Label: []*profilev1.Label{}},
+ {Label: []*profilev1.Label{}},
},
},
{
- Labels: []*profilev1.Label{{Key: 2, Str: 2}, {Key: 1, Str: 1}},
+ Labels: []*profilev1.Label{{Key: 1, Str: 3}},
Samples: []*profilev1.Sample{
{Label: []*profilev1.Label{}},
- {Label: []*profilev1.Label{}},
},
},
},
@@ -971,14 +971,14 @@ func Test_GroupSamplesWithout(t *testing.T) {
},
},
{
- Labels: []*profilev1.Label{{Key: 3, Str: 3}, {Key: 1, Str: 1}},
+ Labels: []*profilev1.Label{{Key: 1, Str: 1}, {Key: 3, Str: 3}},
Samples: []*profilev1.Sample{
{Label: []*profilev1.Label{{Key: 2, Str: 100}}},
{Label: []*profilev1.Label{{Key: 2, Str: 101}}},
},
},
{
- Labels: []*profilev1.Label{{Key: 3, Str: 4}, {Key: 1, Str: 1}},
+ Labels: []*profilev1.Label{{Key: 1, Str: 1}, {Key: 3, Str: 4}},
Samples: []*profilev1.Sample{
{Label: []*profilev1.Label{{Key: 2, Str: 102}}},
},
@@ -1009,7 +1009,7 @@ func Test_GroupSamplesWithout(t *testing.T) {
},
},
{
- Labels: []*profilev1.Label{{Key: 3, Str: 3}, {Key: 2, Str: 2}, {Key: 1, Str: 1}},
+ Labels: []*profilev1.Label{{Key: 1, Str: 1}, {Key: 2, Str: 2}, {Key: 3, Str: 3}},
Samples: []*profilev1.Sample{
{Label: []*profilev1.Label{}},
},
@@ -1048,12 +1048,57 @@ func Test_GroupSamplesWithout(t *testing.T) {
},
},
},
+ {
+ description: "without single existent, single group",
+ input: &profilev1.Profile{
+ Sample: []*profilev1.Sample{
+ {Label: []*profilev1.Label{{Key: 1, Str: 1}, {Key: 2, Str: 100}, {Key: 3, Str: 3}}},
+ {Label: []*profilev1.Label{{Key: 1, Str: 1}, {Key: 3, Str: 3}}},
+ },
+ },
+ without: []int64{2},
+ expected: []SampleGroup{
+ {
+ Labels: []*profilev1.Label{{Key: 1, Str: 1}, {Key: 3, Str: 3}},
+ Samples: []*profilev1.Sample{
+ {Label: []*profilev1.Label{{Key: 2, Str: 100}}},
+ {Label: []*profilev1.Label{}},
+ },
+ },
+ },
+ },
+ {
+ description: "Testcase for extra labels capacity (restoreRemovedLabels nil check)",
+ input: &profilev1.Profile{
+ Sample: []*profilev1.Sample{
+ {Label: []*profilev1.Label{{Key: 1, Str: 1}, {Key: 2, Str: 100}, {Key: 3, Str: 3}, nil, nil}[:3]},
+ {Label: []*profilev1.Label{{Key: 1, Str: 1}, {Key: 3, Str: 3}}},
+ }[:2],
+ },
+ without: []int64{2},
+ expected: []SampleGroup{
+ {
+ Labels: []*profilev1.Label{{Key: 1, Str: 1}, {Key: 3, Str: 3}},
+ Samples: []*profilev1.Sample{
+ {Label: []*profilev1.Label{{Key: 2, Str: 100}}},
+ {Label: []*profilev1.Label{}},
+ },
+ },
+ },
+ },
}
for _, tc := range testCases {
tc := tc
t.Run(tc.description, func(t *testing.T) {
require.Equal(t, tc.expected, GroupSamplesWithoutLabelsByKey(tc.input, tc.without))
+ for _, g := range tc.expected {
+ for _, sample := range g.Samples {
+ for _, label := range sample.Label {
+ assert.NotNil(t, label)
+ }
+ }
+ }
})
}
}
@@ -1325,10 +1370,10 @@ func Test_GroupSamplesWithout_Go_CPU_profile(t *testing.T) {
assert.Equal(t, groups[0].Labels, []*profilev1.Label{{Key: 18, Str: 19}})
assert.Equal(t, len(groups[0].Samples), 5)
- assert.Equal(t, groups[1].Labels, []*profilev1.Label{{Key: 22, Str: 23}, {Key: 18, Str: 19}})
+ assert.Equal(t, groups[1].Labels, []*profilev1.Label{{Key: 18, Str: 19}, {Key: 22, Str: 23}})
assert.Equal(t, len(groups[1].Samples), 325)
- assert.Equal(t, groups[2].Labels, []*profilev1.Label{{Key: 22, Str: 27}, {Key: 18, Str: 19}})
+ assert.Equal(t, groups[2].Labels, []*profilev1.Label{{Key: 18, Str: 19}, {Key: 22, Str: 27}})
assert.Equal(t, len(groups[2].Samples), 150)
}
@@ -1338,7 +1383,22 @@ func Test_GroupSamplesWithout_dotnet_profile(t *testing.T) {
groups := GroupSamplesWithoutLabels(p.Profile, ProfileIDLabelName)
require.Len(t, groups, 1)
- assert.Equal(t, groups[0].Labels, []*profilev1.Label{{Key: 66, Str: 67}, {Key: 64, Str: 65}})
+ assert.Equal(t, groups[0].Labels, []*profilev1.Label{{Key: 64, Str: 65}, {Key: 66, Str: 67}})
+}
+
+func Test_GroupSamplesWithout_single_group_with_optional_span_id(t *testing.T) {
+ // pprof.Do(context.Background(), pprof.Labels("function", "slow", "qwe", "asd", "asdasd", "zxczxc"), func(c context.Context) {
+ // work(40000)
+ // pprof.Do(c, pprof.Labels("span_id", "239"), func(c context.Context) {
+ // work(40000)
+ // })
+ // })
+ p, err := OpenFile("testdata/single_group_with_optional_span_id.pb.gz")
+ require.NoError(t, err)
+
+ groups := GroupSamplesWithoutLabels(p.Profile, SpanIDLabelName)
+ require.Len(t, groups, 1)
+ assert.Equal(t, groups[0].Labels, []*profilev1.Label{{Key: 5, Str: 6}, {Key: 7, Str: 8}, {Key: 9, Str: 10}})
}
func Test_GetProfileLanguage_go_cpu_profile(t *testing.T) {
diff --git a/pkg/pprof/testdata/single_group_with_optional_span_id.pb.gz b/pkg/pprof/testdata/single_group_with_optional_span_id.pb.gz
new file mode 100644
index 0000000000..45928c6b2b
Binary files /dev/null and b/pkg/pprof/testdata/single_group_with_optional_span_id.pb.gz differ
diff --git a/pkg/querier/stats/stats.pb.go b/pkg/querier/stats/stats.pb.go
index 8919f1e9fa..2865e4d86e 100644
--- a/pkg/querier/stats/stats.pb.go
+++ b/pkg/querier/stats/stats.pb.go
@@ -5,7 +5,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.34.1
+// protoc-gen-go v1.34.2
// protoc (unknown)
// source: querier/stats/stats.proto
@@ -175,7 +175,7 @@ func file_querier_stats_stats_proto_rawDescGZIP() []byte {
}
var file_querier_stats_stats_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
-var file_querier_stats_stats_proto_goTypes = []interface{}{
+var file_querier_stats_stats_proto_goTypes = []any{
(*Stats)(nil), // 0: stats.Stats
}
var file_querier_stats_stats_proto_depIdxs = []int32{
@@ -192,7 +192,7 @@ func file_querier_stats_stats_proto_init() {
return
}
if !protoimpl.UnsafeEnabled {
- file_querier_stats_stats_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ file_querier_stats_stats_proto_msgTypes[0].Exporter = func(v any, i int) any {
switch v := v.(*Stats); i {
case 0:
return &v.state
diff --git a/pkg/scheduler/schedulerpb/scheduler.pb.go b/pkg/scheduler/schedulerpb/scheduler.pb.go
index e717d927cf..a52a439c98 100644
--- a/pkg/scheduler/schedulerpb/scheduler.pb.go
+++ b/pkg/scheduler/schedulerpb/scheduler.pb.go
@@ -5,7 +5,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.34.1
+// protoc-gen-go v1.34.2
// protoc (unknown)
// source: scheduler/schedulerpb/scheduler.proto
@@ -605,7 +605,7 @@ func file_scheduler_schedulerpb_scheduler_proto_rawDescGZIP() []byte {
var file_scheduler_schedulerpb_scheduler_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
var file_scheduler_schedulerpb_scheduler_proto_msgTypes = make([]protoimpl.MessageInfo, 6)
-var file_scheduler_schedulerpb_scheduler_proto_goTypes = []interface{}{
+var file_scheduler_schedulerpb_scheduler_proto_goTypes = []any{
(FrontendToSchedulerType)(0), // 0: schedulerpb.FrontendToSchedulerType
(SchedulerToFrontendStatus)(0), // 1: schedulerpb.SchedulerToFrontendStatus
(*QuerierToScheduler)(nil), // 2: schedulerpb.QuerierToScheduler
@@ -640,7 +640,7 @@ func file_scheduler_schedulerpb_scheduler_proto_init() {
return
}
if !protoimpl.UnsafeEnabled {
- file_scheduler_schedulerpb_scheduler_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ file_scheduler_schedulerpb_scheduler_proto_msgTypes[0].Exporter = func(v any, i int) any {
switch v := v.(*QuerierToScheduler); i {
case 0:
return &v.state
@@ -652,7 +652,7 @@ func file_scheduler_schedulerpb_scheduler_proto_init() {
return nil
}
}
- file_scheduler_schedulerpb_scheduler_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ file_scheduler_schedulerpb_scheduler_proto_msgTypes[1].Exporter = func(v any, i int) any {
switch v := v.(*SchedulerToQuerier); i {
case 0:
return &v.state
@@ -664,7 +664,7 @@ func file_scheduler_schedulerpb_scheduler_proto_init() {
return nil
}
}
- file_scheduler_schedulerpb_scheduler_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ file_scheduler_schedulerpb_scheduler_proto_msgTypes[2].Exporter = func(v any, i int) any {
switch v := v.(*FrontendToScheduler); i {
case 0:
return &v.state
@@ -676,7 +676,7 @@ func file_scheduler_schedulerpb_scheduler_proto_init() {
return nil
}
}
- file_scheduler_schedulerpb_scheduler_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ file_scheduler_schedulerpb_scheduler_proto_msgTypes[3].Exporter = func(v any, i int) any {
switch v := v.(*SchedulerToFrontend); i {
case 0:
return &v.state
@@ -688,7 +688,7 @@ func file_scheduler_schedulerpb_scheduler_proto_init() {
return nil
}
}
- file_scheduler_schedulerpb_scheduler_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ file_scheduler_schedulerpb_scheduler_proto_msgTypes[4].Exporter = func(v any, i int) any {
switch v := v.(*NotifyQuerierShutdownRequest); i {
case 0:
return &v.state
@@ -700,7 +700,7 @@ func file_scheduler_schedulerpb_scheduler_proto_init() {
return nil
}
}
- file_scheduler_schedulerpb_scheduler_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ file_scheduler_schedulerpb_scheduler_proto_msgTypes[5].Exporter = func(v any, i int) any {
switch v := v.(*NotifyQuerierShutdownResponse); i {
case 0:
return &v.state
diff --git a/pkg/test/integration/helper.go b/pkg/test/integration/helper.go
index 5d22c00a6a..dd95e619af 100644
--- a/pkg/test/integration/helper.go
+++ b/pkg/test/integration/helper.go
@@ -88,6 +88,7 @@ func (p *PyroscopeTest) Start(t *testing.T) {
p.config.MemberlistKV.AdvertisePort = p.memberlistPort
p.config.MemberlistKV.TCPTransport.BindPort = p.memberlistPort
p.config.Ingester.LifecyclerConfig.Addr = address
+ p.config.Ingester.LifecyclerConfig.MinReadyDuration = 0
p.config.QueryScheduler.ServiceDiscovery.SchedulerRing.InstanceAddr = address
p.config.Frontend.Addr = address
@@ -305,7 +306,7 @@ func (b *RequestBuilder) Render(metric string) *flamebearer.FlamebearerProfile {
return fb
}
-func (b *RequestBuilder) PushPPROFRequest(file string, metric string) *connect.Request[pushv1.PushRequest] {
+func (b *RequestBuilder) PushPPROFRequestFromFile(file string, metric string) *connect.Request[pushv1.PushRequest] {
updateTimestamp := func(rawProfile []byte) []byte {
expectedProfile, err := pprof.RawFromBytes(rawProfile)
require.NoError(b.t, err)
@@ -337,6 +338,19 @@ func (b *RequestBuilder) PushPPROFRequest(file string, metric string) *connect.R
return req
}
+func (b *RequestBuilder) PushPPROFRequestFromBytes(rawProfile []byte, name string) *connect.Request[pushv1.PushRequest] {
+ req := connect.NewRequest(&pushv1.PushRequest{
+ Series: []*pushv1.RawProfileSeries{{
+ Labels: []*typesv1.LabelPair{
+ {Name: "__name__", Value: name},
+ {Name: "service_name", Value: b.AppName},
+ },
+ Samples: []*pushv1.RawSample{{RawProfile: rawProfile}},
+ }},
+ })
+ return req
+}
+
func (b *RequestBuilder) QueryClient() querierv1connect.QuerierServiceClient {
return querierv1connect.NewQuerierServiceClient(
http.DefaultClient,
@@ -403,7 +417,7 @@ func (b *RequestBuilder) SelectMergeProfile(metric string, query map[string]stri
qc := b.QueryClient()
resp, err := qc.SelectMergeProfile(context.Background(), connect.NewRequest(&querierv1.SelectMergeProfileRequest{
ProfileTypeID: metric,
- Start: time.Unix(0, 0).UnixMilli(),
+ Start: time.Unix(1, 0).UnixMilli(),
End: time.Now().UnixMilli(),
LabelSelector: selector.String(),
}))
diff --git a/pkg/test/integration/ingest_pprof_test.go b/pkg/test/integration/ingest_pprof_test.go
index 0cc8133444..1ffe6b125a 100644
--- a/pkg/test/integration/ingest_pprof_test.go
+++ b/pkg/test/integration/ingest_pprof_test.go
@@ -4,6 +4,9 @@ import (
"fmt"
"os"
"testing"
+ "time"
+
+ "github.com/grafana/pyroscope/pkg/pprof/testhelper"
"connectrpc.com/connect"
"github.com/stretchr/testify/assert"
@@ -318,6 +321,37 @@ func TestIngestPPROFFixPythonLinenumbers(t *testing.T) {
assert.Equal(t, expected, actual)
}
+func TestGodeltaprofRelabelPush(t *testing.T) {
+ const blockSize = 1024
+ const metric = "godeltaprof_memory"
+
+ p := PyroscopeTest{}
+ p.Start(t)
+ defer p.Stop(t)
+
+ p1, _ := testhelper.NewProfileBuilder(time.Now().Add(-time.Second).UnixNano()).
+ MemoryProfile().
+ ForStacktraceString("my", "other").
+ AddSamples(239, 239*blockSize, 1000, 1000*blockSize).
+ Profile.MarshalVT()
+
+ p2, _ := testhelper.NewProfileBuilder(time.Now().UnixNano()).
+ MemoryProfile().
+ ForStacktraceString("my", "other").
+ AddSamples(3, 3*blockSize, 1000, 1000*blockSize).
+ Profile.MarshalVT()
+
+ rb := p.NewRequestBuilder(t)
+ rb.Push(rb.PushPPROFRequestFromBytes(p1, metric), 200, "")
+ rb.Push(rb.PushPPROFRequestFromBytes(p2, metric), 200, "")
+ renderedProfile := rb.SelectMergeProfile("memory:alloc_objects:count:space:bytes", nil)
+ actual := bench.StackCollapseProto(renderedProfile.Msg, 0, 1)
+ expected := []string{
+ "other;my 242",
+ }
+ assert.Equal(t, expected, actual)
+}
+
func TestPush(t *testing.T) {
p := new(PyroscopeTest)
p.Start(t)
@@ -330,7 +364,7 @@ func TestPush(t *testing.T) {
t.Run(td.profile, func(t *testing.T) {
rb := p.NewRequestBuilder(t)
- req := rb.PushPPROFRequest(td.profile, td.metrics[0].name)
+ req := rb.PushPPROFRequestFromFile(td.profile, td.metrics[0].name)
rb.Push(req, td.expectStatusPush, td.expectedError)
if td.expectStatusPush == 200 {
diff --git a/pkg/util/httpgrpc/httpgrpc.pb.go b/pkg/util/httpgrpc/httpgrpc.pb.go
index 41d98cb476..9c4c343748 100644
--- a/pkg/util/httpgrpc/httpgrpc.pb.go
+++ b/pkg/util/httpgrpc/httpgrpc.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.34.1
+// protoc-gen-go v1.34.2
// protoc (unknown)
// source: util/httpgrpc/httpgrpc.proto
@@ -261,7 +261,7 @@ func file_util_httpgrpc_httpgrpc_proto_rawDescGZIP() []byte {
}
var file_util_httpgrpc_httpgrpc_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
-var file_util_httpgrpc_httpgrpc_proto_goTypes = []interface{}{
+var file_util_httpgrpc_httpgrpc_proto_goTypes = []any{
(*HTTPRequest)(nil), // 0: httpgrpc.HTTPRequest
(*HTTPResponse)(nil), // 1: httpgrpc.HTTPResponse
(*Header)(nil), // 2: httpgrpc.Header
@@ -284,7 +284,7 @@ func file_util_httpgrpc_httpgrpc_proto_init() {
return
}
if !protoimpl.UnsafeEnabled {
- file_util_httpgrpc_httpgrpc_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ file_util_httpgrpc_httpgrpc_proto_msgTypes[0].Exporter = func(v any, i int) any {
switch v := v.(*HTTPRequest); i {
case 0:
return &v.state
@@ -296,7 +296,7 @@ func file_util_httpgrpc_httpgrpc_proto_init() {
return nil
}
}
- file_util_httpgrpc_httpgrpc_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ file_util_httpgrpc_httpgrpc_proto_msgTypes[1].Exporter = func(v any, i int) any {
switch v := v.(*HTTPResponse); i {
case 0:
return &v.state
@@ -308,7 +308,7 @@ func file_util_httpgrpc_httpgrpc_proto_init() {
return nil
}
}
- file_util_httpgrpc_httpgrpc_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ file_util_httpgrpc_httpgrpc_proto_msgTypes[2].Exporter = func(v any, i int) any {
switch v := v.(*Header); i {
case 0:
return &v.state
diff --git a/pkg/validation/limits.go b/pkg/validation/limits.go
index 1e023b4bc9..0f53afdf5d 100644
--- a/pkg/validation/limits.go
+++ b/pkg/validation/limits.go
@@ -8,7 +8,6 @@ import (
"github.com/pkg/errors"
"github.com/prometheus/common/model"
- "github.com/prometheus/prometheus/model/relabel"
"gopkg.in/yaml.v3"
"github.com/grafana/pyroscope/pkg/phlaredb/block"
@@ -22,14 +21,6 @@ const (
MinCompactorPartialBlockDeletionDelay = 4 * time.Hour
)
-type RulesPosition string
-
-const (
- RulePositionFirst RulesPosition = "first"
- RulePositionDisabled RulesPosition = "disabled"
- RulePositionLast RulesPosition = "last"
-)
-
// Limits describe all the limits for tenants; can be used to describe global default
// limits via flags, or per-tenant limits via yaml config.
// NOTE: we use custom `model.Duration` instead of standard `time.Duration` because,
@@ -50,13 +41,16 @@ type Limits struct {
MaxProfileStacktraceDepth int `yaml:"max_profile_stacktrace_depth" json:"max_profile_stacktrace_depth"`
MaxProfileSymbolValueLength int `yaml:"max_profile_symbol_value_length" json:"max_profile_symbol_value_length"`
+ // Distributor per-app usage breakdown.
+ DistributorUsageGroups *UsageGroupConfig `yaml:"distributor_usage_groups" json:"distributor_usage_groups"`
+
// Distributor aggregation.
DistributorAggregationWindow model.Duration `yaml:"distributor_aggregation_window" json:"distributor_aggregation_window"`
DistributorAggregationPeriod model.Duration `yaml:"distributor_aggregation_period" json:"distributor_aggregation_period"`
// IngestionRelabelingRules allow to specify additional relabeling rules that get applied before a profile gets ingested. There are some default relabeling rules, which ensure consistency of profiling series. The position of the default rules can be contolled by IngestionRelabelingDefaultRulesPosition
- IngestionRelabelingRules []*relabel.Config `yaml:"ingestion_relabeling_rules" json:"ingestion_relabeling_rules"`
- IngestionRelabelingDefaultRulesPosition RulesPosition `yaml:"ingestion_relabeling_default_rules_position" json:"ingestion_relabeling_default_rules_position"`
+ IngestionRelabelingRules RelabelRules `yaml:"ingestion_relabeling_rules" json:"ingestion_relabeling_rules" category:"advanced"`
+ IngestionRelabelingDefaultRulesPosition RelabelRulesPosition `yaml:"ingestion_relabeling_default_rules_position" json:"ingestion_relabeling_default_rules_position" category:"advanced"`
// The tenant shard size determines the how many ingesters a particular
// tenant will be sharded to. Needs to be specified on distributors for
@@ -170,6 +164,12 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) {
_ = l.RejectOlderThan.Set("1h")
f.Var(&l.RejectOlderThan, "validation.reject-older-than", "This limits how far into the past profiling data can be ingested. This limit is enforced in the distributor. 0 to disable, defaults to 1h.")
+
+ _ = l.IngestionRelabelingDefaultRulesPosition.Set("first")
+ f.Var(&l.IngestionRelabelingDefaultRulesPosition, "distributor.ingestion-relabeling-default-rules-position", "Position of the default ingestion relabeling rules in relation to relabel rules from overrides. Valid values are 'first', 'last' or 'disabled'.")
+ _ = l.IngestionRelabelingRules.Set("[]")
+ f.Var(&l.IngestionRelabelingRules, "distributor.ingestion-relabeling-rules", "List of ingestion relabel configurations. The relabeling rules work the same way, as those of [Prometheus](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config). All rules are applied in the order they are specified. Note: In most situations, it is more effective to use relabeling directly in Grafana Alloy.")
+
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
@@ -195,11 +195,10 @@ func (l *Limits) UnmarshalYAML(unmarshal func(interface{}) error) error {
// Validate validates that this limits config is valid.
func (l *Limits) Validate() error {
- switch l.IngestionRelabelingDefaultRulesPosition {
- case "", RulePositionFirst, RulePositionLast, RulePositionDisabled:
- break
- default:
- return fmt.Errorf("invalid ingestion_relabeling_default_rules_position: %s", l.IngestionRelabelingDefaultRulesPosition)
+ if l.IngestionRelabelingDefaultRulesPosition != "" {
+ if err := l.IngestionRelabelingDefaultRulesPosition.Set(string(l.IngestionRelabelingDefaultRulesPosition)); err != nil {
+ return err
+ }
}
return nil
diff --git a/pkg/validation/relabeling.go b/pkg/validation/relabeling.go
index d5a3703872..58890b2541 100644
--- a/pkg/validation/relabeling.go
+++ b/pkg/validation/relabeling.go
@@ -1,8 +1,12 @@
package validation
import (
+ "encoding/json"
+ "fmt"
+
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/relabel"
+ "gopkg.in/yaml.v3"
)
var (
@@ -16,6 +20,13 @@ var (
TargetLabel: "__name_replaced__",
Replacement: "$0",
},
+ {
+ SourceLabels: []model.LabelName{"__name_replaced__"},
+ Action: relabel.Replace,
+ Regex: godeltaprof,
+ TargetLabel: "__delta__",
+ Replacement: "false",
+ },
{
SourceLabels: []model.LabelName{"__name__"},
Regex: godeltaprof,
@@ -43,11 +54,78 @@ var (
}
)
+type RelabelRulesPosition string
+
+func (p *RelabelRulesPosition) Set(s string) error {
+ switch sp := RelabelRulesPosition(s); sp {
+ case RelabelRulePositionFirst, RelabelRulePositionLast, RelabelRulePositionDisabled:
+ *p = sp
+ return nil
+ }
+ return fmt.Errorf("invalid ingestion_relabeling_default_rules_position: %s", s)
+}
+
+func (p *RelabelRulesPosition) String() string {
+ return string(*p)
+}
+
+const (
+ RelabelRulePositionFirst RelabelRulesPosition = "first"
+ RelabelRulePositionDisabled RelabelRulesPosition = "disabled"
+ RelabelRulePositionLast RelabelRulesPosition = "last"
+)
+
+type RelabelRules []*relabel.Config
+
+func (p *RelabelRules) Set(s string) error {
+
+ v := []*relabel.Config{}
+ if err := yaml.Unmarshal([]byte(s), &v); err != nil {
+ return err
+ }
+
+ for idx, rule := range v {
+ if err := rule.Validate(); err != nil {
+ return fmt.Errorf("rule at pos %d is not valid: %w", idx, err)
+ }
+ }
+ *p = v
+ return nil
+}
+
+func (p RelabelRules) String() string {
+ yamlBytes, err := yaml.Marshal(p)
+ if err != nil {
+ panic(fmt.Errorf("error marshal yaml: %w", err))
+ }
+
+ temp := make([]interface{}, 0, len(p))
+ err = yaml.Unmarshal(yamlBytes, &temp)
+ if err != nil {
+ panic(fmt.Errorf("error unmarshal yaml: %w", err))
+ }
+
+ jsonBytes, err := json.Marshal(temp)
+ if err != nil {
+ panic(fmt.Errorf("error marshal json: %w", err))
+ }
+ return string(jsonBytes)
+}
+
+// ExampleDoc provides an example doc for this config, especially valuable since it's custom-unmarshaled.
+func (r RelabelRules) ExampleDoc() (comment string, yaml interface{}) {
+ return `This example consists of two rules, the first one will drop all profiles received with an label 'environment="secrets"' and the second rule will add a label 'powered_by="Grafana Labs"' to all profile series.`,
+ []map[string]interface{}{
+ {"action": "drop", "source_labels": []interface{}{"environment"}, "regex": "secret"},
+ {"action": "replace", "replacement": "grafana-labs", "target_label": "powered_by"},
+ }
+}
+
func (o *Overrides) IngestionRelabelingRules(tenantID string) []*relabel.Config {
l := o.getOverridesForTenant(tenantID)
// return only custom rules when default rules are disabled
- if l.IngestionRelabelingDefaultRulesPosition == RulePositionDisabled {
+ if l.IngestionRelabelingDefaultRulesPosition == RelabelRulePositionDisabled {
return l.IngestionRelabelingRules
}
@@ -58,7 +136,7 @@ func (o *Overrides) IngestionRelabelingRules(tenantID string) []*relabel.Config
rules := make([]*relabel.Config, 0, len(l.IngestionRelabelingRules)+len(defaultRelabelRules))
- if l.IngestionRelabelingDefaultRulesPosition == "" || l.IngestionRelabelingDefaultRulesPosition == RulePositionFirst {
+ if l.IngestionRelabelingDefaultRulesPosition == "" || l.IngestionRelabelingDefaultRulesPosition == RelabelRulePositionFirst {
rules = append(rules, defaultRelabelRules...)
return append(rules, l.IngestionRelabelingRules...)
}
diff --git a/pkg/validation/relabeling_test.go b/pkg/validation/relabeling_test.go
index 184c9ddb40..45a9d9dc59 100644
--- a/pkg/validation/relabeling_test.go
+++ b/pkg/validation/relabeling_test.go
@@ -122,6 +122,7 @@ func Test_defaultRelabelRules(t *testing.T) {
expected: labels.FromStrings(
phlaremodel.LabelNameProfileName, "memory",
"__name_replaced__", "godeltaprof_memory",
+ "__delta__", "false",
),
kept: true,
},
diff --git a/pkg/validation/usage_groups.go b/pkg/validation/usage_groups.go
new file mode 100644
index 0000000000..5ef79e3420
--- /dev/null
+++ b/pkg/validation/usage_groups.go
@@ -0,0 +1,186 @@
+// This file is a modified copy of the usage groups implementation in Mimir:
+//
+// https://github.com/grafana/mimir/blob/0e8c09f237649e95dc1bf3f7547fd279c24bdcf9/pkg/ingester/activeseries/custom_trackers_config.go#L48
+
+package validation
+
+import (
+ "encoding/json"
+ "fmt"
+ "strings"
+ "unicode/utf8"
+
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/client_golang/prometheus/promauto"
+ "github.com/prometheus/prometheus/model/labels"
+ "github.com/prometheus/prometheus/promql/parser"
+ "gopkg.in/yaml.v3"
+
+ phlaremodel "github.com/grafana/pyroscope/pkg/model"
+)
+
+const (
+ // Maximum number of usage groups that can be configured (per tenant).
+ maxUsageGroups = 50
+
+ // The usage group name to use when no user-defined usage groups matched.
+ noMatchName = "other"
+)
+
+var (
+ // This is a duplicate of distributor_received_decompressed_bytes, but with
+ // usage_group as a label.
+ usageGroupReceivedDecompressedBytes = promauto.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: "pyroscope",
+ Name: "usage_group_received_decompressed_total",
+ Help: "The total number of decompressed bytes per profile received by usage group.",
+ },
+ []string{"type", "tenant", "usage_group"},
+ )
+
+ // This is a duplicate of discarded_bytes_total, but with usage_group as a
+ // label.
+ usageGroupDiscardedBytes = promauto.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: "pyroscope",
+ Name: "usage_group_discarded_bytes_total",
+ Help: "The total number of bytes that were discarded by usage group.",
+ },
+ []string{"reason", "tenant", "usage_group"},
+ )
+)
+
+type UsageGroupConfig struct {
+ config map[string][]*labels.Matcher
+}
+
+func (c *UsageGroupConfig) GetUsageGroups(tenantID string, lbls phlaremodel.Labels) UsageGroupMatch {
+ match := UsageGroupMatch{
+ tenantID: tenantID,
+ }
+
+ for name, matchers := range c.config {
+ if matchesAll(matchers, lbls) {
+ match.names = append(match.names, name)
+ }
+ }
+
+ return match
+}
+
+func (c *UsageGroupConfig) UnmarshalYAML(value *yaml.Node) error {
+ m := make(map[string]string)
+ err := value.DecodeWithOptions(&m, yaml.DecodeOptions{
+ KnownFields: true,
+ })
+ if err != nil {
+ return fmt.Errorf("malformed usage group config: %w", err)
+ }
+
+ *c, err = NewUsageGroupConfig(m)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func (c *UsageGroupConfig) UnmarshalJSON(bytes []byte) error {
+ m := make(map[string]string)
+ err := json.Unmarshal(bytes, &m)
+ if err != nil {
+ return fmt.Errorf("malformed usage group config: %w", err)
+ }
+
+ *c, err = NewUsageGroupConfig(m)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+type UsageGroupMatch struct {
+ tenantID string
+ names []string
+}
+
+func (m UsageGroupMatch) CountReceivedBytes(profileType string, n int64) {
+ if len(m.names) == 0 {
+ usageGroupReceivedDecompressedBytes.WithLabelValues(profileType, m.tenantID, noMatchName).Add(float64(n))
+ return
+ }
+
+ for _, name := range m.names {
+ usageGroupReceivedDecompressedBytes.WithLabelValues(profileType, m.tenantID, name).Add(float64(n))
+ }
+}
+
+func (m UsageGroupMatch) CountDiscardedBytes(reason string, n int64) {
+ if len(m.names) == 0 {
+ usageGroupDiscardedBytes.WithLabelValues(reason, m.tenantID, noMatchName).Add(float64(n))
+ return
+ }
+
+ for _, name := range m.names {
+ usageGroupDiscardedBytes.WithLabelValues(reason, m.tenantID, name).Add(float64(n))
+ }
+}
+
+func NewUsageGroupConfig(m map[string]string) (UsageGroupConfig, error) {
+ if len(m) > maxUsageGroups {
+ return UsageGroupConfig{}, fmt.Errorf("maximum number of usage groups is %d, got %d", maxUsageGroups, len(m))
+ }
+
+ config := UsageGroupConfig{
+ config: make(map[string][]*labels.Matcher),
+ }
+
+ for name, matchersText := range m {
+ if !utf8.ValidString(name) {
+ return UsageGroupConfig{}, fmt.Errorf("usage group name %q is not valid UTF-8", name)
+ }
+
+ name = strings.TrimSpace(name)
+ if name == "" {
+ return UsageGroupConfig{}, fmt.Errorf("usage group name cannot be empty")
+ }
+
+ if name == noMatchName {
+ return UsageGroupConfig{}, fmt.Errorf("usage group name %q is reserved", noMatchName)
+ }
+
+ matchers, err := parser.ParseMetricSelector(matchersText)
+ if err != nil {
+ return UsageGroupConfig{}, fmt.Errorf("failed to parse matchers for usage group %q: %w", name, err)
+ }
+
+ config.config[name] = matchers
+ }
+
+ return config, nil
+}
+
+func (o *Overrides) DistributorUsageGroups(tenantID string) *UsageGroupConfig {
+ config := o.getOverridesForTenant(tenantID).DistributorUsageGroups
+
+ // It should never be nil, but check just in case!
+ if config == nil {
+ config = &UsageGroupConfig{}
+ }
+ return config
+}
+
+func matchesAll(matchers []*labels.Matcher, lbls phlaremodel.Labels) bool {
+ if len(lbls) == 0 {
+ return false
+ }
+
+ for _, m := range matchers {
+ for _, lbl := range lbls {
+ if lbl.Name == m.Name && !m.Matches(lbl.Value) {
+ return false
+ }
+ }
+ }
+ return true
+}
diff --git a/pkg/validation/usage_groups_test.go b/pkg/validation/usage_groups_test.go
new file mode 100644
index 0000000000..2400fef0b2
--- /dev/null
+++ b/pkg/validation/usage_groups_test.go
@@ -0,0 +1,544 @@
+package validation
+
+import (
+ "encoding/json"
+ "fmt"
+ "slices"
+ "testing"
+
+ "github.com/prometheus/client_golang/prometheus/testutil"
+ "github.com/prometheus/prometheus/model/labels"
+ "github.com/prometheus/prometheus/promql/parser"
+ "github.com/stretchr/testify/require"
+ "gopkg.in/yaml.v3"
+
+ phlaremodel "github.com/grafana/pyroscope/pkg/model"
+)
+
+func TestUsageGroupConfig_GetUsageGroups(t *testing.T) {
+ tests := []struct {
+ Name string
+ TenantID string
+ Config UsageGroupConfig
+ Labels phlaremodel.Labels
+ Want UsageGroupMatch
+ }{
+ {
+ Name: "single_usage_group_match",
+ TenantID: "tenant1",
+ Config: UsageGroupConfig{
+ config: map[string][]*labels.Matcher{
+ "app/foo": testMustParseMatcher(t, `{service_name="foo"}`),
+ },
+ },
+ Labels: phlaremodel.Labels{
+ {Name: "service_name", Value: "foo"},
+ },
+ Want: UsageGroupMatch{
+ tenantID: "tenant1",
+ names: []string{"app/foo"},
+ },
+ },
+ {
+ Name: "multiple_usage_group_matches",
+ TenantID: "tenant1",
+ Config: UsageGroupConfig{
+ config: map[string][]*labels.Matcher{
+ "app/foo": testMustParseMatcher(t, `{service_name="foo"}`),
+ "app/foo2": testMustParseMatcher(t, `{service_name="foo", namespace=~"bar.*"}`),
+ },
+ },
+ Labels: phlaremodel.Labels{
+ {Name: "service_name", Value: "foo"},
+ {Name: "namespace", Value: "barbaz"},
+ },
+ Want: UsageGroupMatch{
+ tenantID: "tenant1",
+ names: []string{
+ "app/foo",
+ "app/foo2",
+ },
+ },
+ },
+ {
+ Name: "no_usage_group_matches",
+ TenantID: "tenant1",
+ Config: UsageGroupConfig{
+ config: map[string][]*labels.Matcher{
+ "app/foo": testMustParseMatcher(t, `{service_name="notfound"}`),
+ },
+ },
+ Labels: phlaremodel.Labels{
+ {Name: "service_name", Value: "foo"},
+ },
+ Want: UsageGroupMatch{
+ tenantID: "tenant1",
+ },
+ },
+ {
+ Name: "wildcard_matcher",
+ TenantID: "tenant1",
+ Config: UsageGroupConfig{
+ config: map[string][]*labels.Matcher{
+ "app/foo": testMustParseMatcher(t, `{}`),
+ },
+ },
+ Labels: phlaremodel.Labels{
+ {Name: "service_name", Value: "foo"},
+ },
+ Want: UsageGroupMatch{
+ tenantID: "tenant1",
+ names: []string{"app/foo"},
+ },
+ },
+ {
+ Name: "no_labels",
+ TenantID: "tenant1",
+ Config: UsageGroupConfig{
+ config: map[string][]*labels.Matcher{
+ "app/foo": testMustParseMatcher(t, `{service_name="foo"}`),
+ },
+ },
+ Labels: phlaremodel.Labels{},
+ Want: UsageGroupMatch{
+ tenantID: "tenant1",
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.Name, func(t *testing.T) {
+ got := tt.Config.GetUsageGroups(tt.TenantID, tt.Labels)
+
+ slices.Sort(got.names)
+ slices.Sort(tt.Want.names)
+ require.Equal(t, tt.Want, got)
+ })
+ }
+}
+
+func TestUsageGroupMatch_CountReceivedBytes(t *testing.T) {
+ tests := []struct {
+ Name string
+ Match UsageGroupMatch
+ Count int64
+ WantCounts map[string]float64
+ }{
+ {
+ Name: "single_usage_group_match",
+ Match: UsageGroupMatch{
+ tenantID: "tenant1",
+ names: []string{"app/foo"},
+ },
+ Count: 100,
+ WantCounts: map[string]float64{
+ "app/foo": 100,
+ "app/foo2": 0,
+ "other": 0,
+ },
+ },
+ {
+ Name: "multiple_usage_group_matches",
+ Match: UsageGroupMatch{
+ tenantID: "tenant1",
+ names: []string{
+ "app/foo",
+ "app/foo2",
+ },
+ },
+ Count: 100,
+ WantCounts: map[string]float64{
+ "app/foo": 100,
+ "app/foo2": 100,
+ "other": 0,
+ },
+ },
+ {
+ Name: "no_usage_group_matches",
+ Match: UsageGroupMatch{
+ tenantID: "tenant1",
+ names: []string{},
+ },
+ Count: 100,
+ WantCounts: map[string]float64{
+ "app/foo": 0,
+ "app/foo2": 0,
+ "other": 100,
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.Name, func(t *testing.T) {
+ const profileType = "cpu"
+ usageGroupReceivedDecompressedBytes.Reset()
+
+ tt.Match.CountReceivedBytes(profileType, tt.Count)
+
+ for name, want := range tt.WantCounts {
+ collector := usageGroupReceivedDecompressedBytes.WithLabelValues(
+ profileType,
+ tt.Match.tenantID,
+ name,
+ )
+
+ got := testutil.ToFloat64(collector)
+ require.Equal(t, got, want, "usage group %s has incorrect metric value", name)
+ }
+ })
+ }
+}
+
+func TestUsageGroupMatch_CountDiscardedBytes(t *testing.T) {
+ tests := []struct {
+ Name string
+ Match UsageGroupMatch
+ Count int64
+ WantCounts map[string]float64
+ }{
+ {
+ Name: "single_usage_group_match",
+ Match: UsageGroupMatch{
+ tenantID: "tenant1",
+ names: []string{"app/foo"},
+ },
+ Count: 100,
+ WantCounts: map[string]float64{
+ "app/foo": 100,
+ "app/foo2": 0,
+ "other": 0,
+ },
+ },
+ {
+ Name: "multiple_usage_group_matches",
+ Match: UsageGroupMatch{
+ tenantID: "tenant1",
+ names: []string{
+ "app/foo",
+ "app/foo2",
+ },
+ },
+ Count: 100,
+ WantCounts: map[string]float64{
+ "app/foo": 100,
+ "app/foo2": 100,
+ "other": 0,
+ },
+ },
+ {
+ Name: "no_usage_group_matches",
+ Match: UsageGroupMatch{
+ tenantID: "tenant1",
+ names: []string{},
+ },
+ Count: 100,
+ WantCounts: map[string]float64{
+ "app/foo": 0,
+ "app/foo2": 0,
+ "other": 100,
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.Name, func(t *testing.T) {
+ const reason = "no_reason"
+ usageGroupDiscardedBytes.Reset()
+
+ tt.Match.CountDiscardedBytes(reason, tt.Count)
+
+ for name, want := range tt.WantCounts {
+ collector := usageGroupDiscardedBytes.WithLabelValues(
+ reason,
+ tt.Match.tenantID,
+ name,
+ )
+
+ got := testutil.ToFloat64(collector)
+ require.Equal(t, got, want, "usage group %q has incorrect metric value", name)
+ }
+ })
+ }
+}
+
+func TestNewUsageGroupConfig(t *testing.T) {
+ tests := []struct {
+ Name string
+ ConfigMap map[string]string
+ Want UsageGroupConfig
+ WantErr string
+ }{
+ {
+ Name: "single_usage_group",
+ ConfigMap: map[string]string{
+ "app/foo": `{service_name="foo"}`,
+ },
+ Want: UsageGroupConfig{
+ config: map[string][]*labels.Matcher{
+ "app/foo": testMustParseMatcher(t, `{service_name="foo"}`),
+ },
+ },
+ },
+ {
+ Name: "multiple_usage_groups",
+ ConfigMap: map[string]string{
+ "app/foo": `{service_name="foo"}`,
+ "app/foo2": `{service_name="foo", namespace=~"bar.*"}`,
+ },
+ Want: UsageGroupConfig{
+ config: map[string][]*labels.Matcher{
+ "app/foo": testMustParseMatcher(t, `{service_name="foo"}`),
+ "app/foo2": testMustParseMatcher(t, `{service_name="foo", namespace=~"bar.*"}`),
+ },
+ },
+ },
+ {
+ Name: "no_usage_groups",
+ ConfigMap: map[string]string{},
+ Want: UsageGroupConfig{
+ config: map[string][]*labels.Matcher{},
+ },
+ },
+ {
+ Name: "wildcard_matcher",
+ ConfigMap: map[string]string{
+ "app/foo": `{}`,
+ },
+ Want: UsageGroupConfig{
+ config: map[string][]*labels.Matcher{
+ "app/foo": testMustParseMatcher(t, `{}`),
+ },
+ },
+ },
+ {
+ Name: "too_many_usage_groups",
+ ConfigMap: func() map[string]string {
+ m := make(map[string]string)
+ for i := 0; i < maxUsageGroups+1; i++ {
+ m[fmt.Sprintf("app/foo%d", i)] = `{service_name="foo"}`
+ }
+ return m
+ }(),
+ WantErr: fmt.Sprintf("maximum number of usage groups is %d, got %d", maxUsageGroups, maxUsageGroups+1),
+ },
+ {
+ Name: "invalid_matcher",
+ ConfigMap: map[string]string{
+ "app/foo": `????`,
+ },
+ WantErr: `failed to parse matchers for usage group "app/foo": 1:1: parse error: unexpected character: '?'`,
+ },
+ {
+ Name: "empty_matcher",
+ ConfigMap: map[string]string{
+ "app/foo": ``,
+ },
+ WantErr: `failed to parse matchers for usage group "app/foo": unknown position: parse error: unexpected end of input`,
+ },
+ {
+ Name: "empty_name",
+ ConfigMap: map[string]string{
+ "": `{service_name="foo"}`,
+ },
+ WantErr: "usage group name cannot be empty",
+ },
+ {
+ Name: "whitespace_name",
+ ConfigMap: map[string]string{
+ " app/foo ": `{service_name="foo"}`,
+ },
+ Want: UsageGroupConfig{
+ config: map[string][]*labels.Matcher{
+ "app/foo": testMustParseMatcher(t, `{service_name="foo"}`),
+ },
+ },
+ },
+ {
+ Name: "reserved_name",
+ ConfigMap: map[string]string{
+ noMatchName: `{service_name="foo"}`,
+ },
+ WantErr: fmt.Sprintf("usage group name %q is reserved", noMatchName),
+ },
+ {
+ Name: "invalid_utf8_name",
+ ConfigMap: map[string]string{
+ "app/\x80foo": `{service_name="foo"}`,
+ },
+ WantErr: `usage group name "app/\x80foo" is not valid UTF-8`,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.Name, func(t *testing.T) {
+ got, err := NewUsageGroupConfig(tt.ConfigMap)
+ if tt.WantErr != "" {
+ require.EqualError(t, err, tt.WantErr)
+ } else {
+ require.NoError(t, err)
+ require.Equal(t, tt.Want, got)
+ }
+ })
+ }
+}
+
+func TestUsageGroupConfig_UnmarshalYAML(t *testing.T) {
+ type Object struct {
+ UsageGroups UsageGroupConfig `yaml:"usage_groups"`
+ }
+
+ tests := []struct {
+ Name string
+ YAML string
+ Want UsageGroupConfig
+ WantErr string
+ }{
+ {
+ Name: "single_usage_group",
+ YAML: `
+usage_groups:
+ app/foo: '{service_name="foo"}'`,
+ Want: UsageGroupConfig{
+ config: map[string][]*labels.Matcher{
+ "app/foo": testMustParseMatcher(t, `{service_name="foo"}`),
+ },
+ },
+ },
+ {
+ Name: "multiple_usage_groups",
+ YAML: `
+usage_groups:
+ app/foo: '{service_name="foo"}'
+ app/foo2: '{service_name="foo", namespace=~"bar.*"}'`,
+ Want: UsageGroupConfig{
+ config: map[string][]*labels.Matcher{
+ "app/foo": testMustParseMatcher(t, `{service_name="foo"}`),
+ "app/foo2": testMustParseMatcher(t, `{service_name="foo", namespace=~"bar.*"}`),
+ },
+ },
+ },
+ {
+ Name: "empty_usage_groups",
+ YAML: `
+usage_groups: {}`,
+ Want: UsageGroupConfig{
+ config: map[string][]*labels.Matcher{},
+ },
+ },
+ {
+ Name: "invalid_yaml",
+ YAML: `usage_groups: ?????`,
+ WantErr: "malformed usage group config: yaml: unmarshal errors:\n line 1: cannot unmarshal !!str `?????` into map[string]string",
+ },
+ {
+ Name: "invalid_matcher",
+ YAML: `
+usage_groups:
+ app/foo: ?????`,
+ WantErr: `failed to parse matchers for usage group "app/foo": 1:1: parse error: unexpected character: '?'`,
+ },
+ {
+ Name: "missing_usage_groups_key_in_config",
+ YAML: `
+some_other_config:
+ foo: bar`,
+ Want: UsageGroupConfig{},
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.Name, func(t *testing.T) {
+ got := Object{}
+ err := yaml.Unmarshal([]byte(tt.YAML), &got)
+ if tt.WantErr != "" {
+ require.EqualError(t, err, tt.WantErr)
+ } else {
+ require.NoError(t, err)
+ require.Equal(t, tt.Want, got.UsageGroups)
+ }
+ })
+ }
+}
+
+func TestUsageGroupConfig_UnmarshalJSON(t *testing.T) {
+ type Object struct {
+ UsageGroups UsageGroupConfig `json:"usage_groups"`
+ }
+
+ tests := []struct {
+ Name string
+ JSON string
+ Want UsageGroupConfig
+ WantErr string
+ }{
+ {
+ Name: "single_usage_group",
+ JSON: `{
+ "usage_groups": {
+ "app/foo": "{service_name=\"foo\"}"
+ }
+ }`,
+ Want: UsageGroupConfig{
+ config: map[string][]*labels.Matcher{
+ "app/foo": testMustParseMatcher(t, `{service_name="foo"}`),
+ },
+ },
+ },
+ {
+ Name: "multiple_usage_groups",
+ JSON: `{
+ "usage_groups": {
+ "app/foo": "{service_name=\"foo\"}",
+ "app/foo2": "{service_name=\"foo\", namespace=~\"bar.*\"}"
+ }
+ }`,
+ Want: UsageGroupConfig{
+ config: map[string][]*labels.Matcher{
+ "app/foo": testMustParseMatcher(t, `{service_name="foo"}`),
+ "app/foo2": testMustParseMatcher(t, `{service_name="foo", namespace=~"bar.*"}`),
+ },
+ },
+ },
+ {
+ Name: "empty_usage_groups",
+ JSON: `{"usage_groups": {}}`,
+ Want: UsageGroupConfig{
+ config: map[string][]*labels.Matcher{},
+ },
+ },
+ {
+ Name: "invalid_json",
+ JSON: `{"usage_groups": "?????"}`,
+ WantErr: "malformed usage group config: json: cannot unmarshal string into Go value of type map[string]string",
+ },
+ {
+ Name: "invalid_matcher",
+ JSON: `{"usage_groups": {"app/foo": "?????"}}`,
+ WantErr: `failed to parse matchers for usage group "app/foo": 1:1: parse error: unexpected character: '?'`,
+ },
+ {
+ Name: "missing_usage_groups_key_in_config",
+ JSON: `{"some_other_key": {"foo": "bar"}}`,
+ Want: UsageGroupConfig{},
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.Name, func(t *testing.T) {
+ got := Object{}
+ err := json.Unmarshal([]byte(tt.JSON), &got)
+ if tt.WantErr != "" {
+ require.EqualError(t, err, tt.WantErr)
+ } else {
+ require.NoError(t, err)
+ require.Equal(t, tt.Want, got.UsageGroups)
+ }
+ })
+ }
+}
+
+func testMustParseMatcher(t *testing.T, s string) []*labels.Matcher {
+ m, err := parser.ParseMetricSelector(s)
+ require.NoError(t, err)
+ return m
+}
diff --git a/pkg/validation/validate.go b/pkg/validation/validate.go
index c168bec565..ab7c0413f2 100644
--- a/pkg/validation/validate.go
+++ b/pkg/validation/validate.go
@@ -47,16 +47,17 @@ const (
DuplicateLabelNames Reason = "duplicate_label_names"
// SeriesLimit is a reason for discarding lines when we can't create a new stream
// because the limit of active streams has been reached.
- SeriesLimit Reason = "series_limit"
- QueryLimit Reason = "query_limit"
- SamplesLimit Reason = "samples_limit"
- ProfileSizeLimit Reason = "profile_size_limit"
- SampleLabelsLimit Reason = "sample_labels_limit"
- MalformedProfile Reason = "malformed_profile"
- FlameGraphLimit Reason = "flamegraph_limit"
+ SeriesLimit Reason = "series_limit"
+ QueryLimit Reason = "query_limit"
+ SamplesLimit Reason = "samples_limit"
+ ProfileSizeLimit Reason = "profile_size_limit"
+ SampleLabelsLimit Reason = "sample_labels_limit"
+ MalformedProfile Reason = "malformed_profile"
+ FlameGraphLimit Reason = "flamegraph_limit"
+ QueryMissingTimeRange Reason = "missing_time_range"
// Those profiles were dropped because of relabeling rules
- RelabelRules Reason = "dropped_by_relabel_rules"
+ DroppedByRelabelRules Reason = "dropped_by_relabel_rules"
SeriesLimitErrorMsg = "Maximum active series limit exceeded (%d/%d), reduce the number of active streams (reduce labels or reduce label values), or contact your administrator to see if the limit can be increased"
MissingLabelsErrorMsg = "error at least one label pair is required per profile"
@@ -72,6 +73,7 @@ const (
NotInIngestionWindowErrorMsg = "profile with labels '%s' is outside of ingestion window (profile timestamp: %s, %s)"
MaxFlameGraphNodesErrorMsg = "max flamegraph nodes limit %d is greater than allowed %d"
MaxFlameGraphNodesUnlimitedErrorMsg = "max flamegraph nodes limit must be set (max allowed %d)"
+ QueryMissingTimeRangeErrorMsg = "missing time range in the query"
)
var (
@@ -327,6 +329,10 @@ type ValidatedRangeRequest struct {
}
func ValidateRangeRequest(limits RangeRequestLimits, tenantIDs []string, req model.Interval, now model.Time) (ValidatedRangeRequest, error) {
+ if req.Start == 0 || req.End == 0 {
+ return ValidatedRangeRequest{}, NewErrorf(QueryMissingTimeRange, QueryMissingTimeRangeErrorMsg)
+ }
+
if maxQueryLookback := validation.SmallestPositiveNonZeroDurationPerTenant(tenantIDs, limits.MaxQueryLookback); maxQueryLookback > 0 {
minStartTime := now.Add(-maxQueryLookback)
diff --git a/pkg/validation/validate_test.go b/pkg/validation/validate_test.go
index 9d9de6bf01..696b8f9a0e 100644
--- a/pkg/validation/validate_test.go
+++ b/pkg/validation/validate_test.go
@@ -200,6 +200,30 @@ func Test_ValidateRangeRequest(t *testing.T) {
},
},
},
+ {
+ name: "empty start",
+ in: model.Interval{
+ Start: 0,
+ End: now,
+ },
+ expectedErr: NewErrorf(QueryMissingTimeRange, QueryMissingTimeRangeErrorMsg),
+ },
+ {
+ name: "empty end",
+ in: model.Interval{
+ Start: now,
+ End: 0,
+ },
+ expectedErr: NewErrorf(QueryMissingTimeRange, QueryMissingTimeRangeErrorMsg),
+ },
+ {
+ name: "empty start and end",
+ in: model.Interval{
+ Start: 0,
+ End: 0,
+ },
+ expectedErr: NewErrorf(QueryMissingTimeRange, QueryMissingTimeRangeErrorMsg),
+ },
} {
tt := tt
t.Run(tt.name, func(t *testing.T) {
diff --git a/tools/doc-generator/main.go b/tools/doc-generator/main.go
index c178bd3fa0..7fa1bd8290 100644
--- a/tools/doc-generator/main.go
+++ b/tools/doc-generator/main.go
@@ -97,6 +97,7 @@ func generateBlocksMarkdown(blocks []*parse.ConfigBlock) string {
return md.string()
}
+//nolint:unused
func generateBlockMarkdown(blocks []*parse.ConfigBlock, blockName, fieldName string) string {
// Look for the requested block.
for _, block := range blocks {
@@ -159,21 +160,11 @@ func main() {
// Generate documentation markdown.
data := struct {
- ConfigFile string
- BlocksStorageConfigBlock string
- StoreGatewayConfigBlock string
- CompactorConfigBlock string
- QuerierConfigBlock string
- S3SSEConfigBlock string
- GeneratedFileWarning string
+ ConfigFile string
+ GeneratedFileWarning string
}{
- ConfigFile: generateBlocksMarkdown(blocks),
- BlocksStorageConfigBlock: generateBlockMarkdown(blocks, "blocks_storage_config", "blocks_storage"),
- StoreGatewayConfigBlock: generateBlockMarkdown(blocks, "store_gateway_config", "store_gateway"),
- CompactorConfigBlock: generateBlockMarkdown(blocks, "compactor_config", "compactor"),
- QuerierConfigBlock: generateBlockMarkdown(blocks, "querier_config", "querier"),
- S3SSEConfigBlock: generateBlockMarkdown(blocks, "s3_sse_config", "sse"),
- GeneratedFileWarning: "",
+ ConfigFile: generateBlocksMarkdown(blocks),
+ GeneratedFileWarning: "",
}
// Load the template file.
diff --git a/tools/grafana-phlare b/tools/grafana-phlare
index 17a8216316..31585c5a38 100755
--- a/tools/grafana-phlare
+++ b/tools/grafana-phlare
@@ -35,7 +35,7 @@ docker run $DOCKER_ARGS --rm \
-v "${datasource_provisioning}:/etc/grafana/provisioning/datasources/phlare.yaml:ro" \
-v "$(pwd)"/grafana/phlare-datasource/dist:/var/lib/grafana/plugins/phlare-datasource \
-v "$(pwd)"/grafana/flamegraph/dist:/var/lib/grafana/plugins/flamegraph \
- -e GF_FEATURE_TOGGLES_ENABLE=flameGraph \
+ -e GF_INSTALL_PLUGINS=grafana-pyroscope-app \
-e GF_INSTALL_PLUGINS=pyroscope-datasource,pyroscope-panel \
-e GF_DEFAULT_APP_MODE=development \
-e GF_AUTH_ANONYMOUS_ENABLED=true \
diff --git a/tools/upgrade-alpine-version.sh b/tools/upgrade-alpine-version.sh
deleted file mode 100755
index 3a9c93bf79..0000000000
--- a/tools/upgrade-alpine-version.sh
+++ /dev/null
@@ -1,16 +0,0 @@
-#!/usr/bin/env bash
-
-set -euo pipefail
-
-if [ $# -ne 1 ]; then
- echo "Usage: $0 "
- exit 1
-fi
-
-# search replace all dockefiles
-TARGET="*/*Dockerfile*"
-git ls-files "$TARGET" | xargs sed -i 's/alpine:[0-9\.]\+/alpine:'$1'/g'
-
-# add changes
-git add -u "$TARGET"
-git commit -m "Update alpine version to $1"
diff --git a/yarn.lock b/yarn.lock
index d85a247eee..0a3d1fc59f 100644
--- a/yarn.lock
+++ b/yarn.lock
@@ -10848,9 +10848,9 @@ fast-levenshtein@^2.0.6, fast-levenshtein@~2.0.6:
integrity sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==
fast-loops@^1.1.3:
- version "1.1.3"
- resolved "https://registry.yarnpkg.com/fast-loops/-/fast-loops-1.1.3.tgz#ce96adb86d07e7bf9b4822ab9c6fac9964981f75"
- integrity sha512-8EZzEP0eKkEEVX+drtd9mtuQ+/QrlfW/5MlwcwK5Nds6EkZ/tRzEexkzUY2mIssnAyVLT+TKHuRXmFNNXYUd6g==
+ version "1.1.4"
+ resolved "https://registry.yarnpkg.com/fast-loops/-/fast-loops-1.1.4.tgz#61bc77d518c0af5073a638c6d9d5c7683f069ce2"
+ integrity sha512-8dbd3XWoKCTms18ize6JmQF1SFnnfj5s0B7rRry22EofgMu7B6LKHVh+XfFqFGsqnbH54xgeO83PzpKI+ODhlg==
fast-shallow-equal@^1.0.0:
version "1.0.0"