diff --git a/.github/workflows/k6-tests.yaml b/.github/workflows/k6-tests.yaml
index 5c514ad1..193c4bbc 100644
--- a/.github/workflows/k6-tests.yaml
+++ b/.github/workflows/k6-tests.yaml
@@ -10,6 +10,7 @@ jobs:
image: ghcr.io/grafana/quickpizza-local:latest
ports:
- 3333:3333
+ - 3334:3334
steps:
- name: Checkout
@@ -51,11 +52,11 @@ jobs:
# act -W .github/workflows/k6-tests.yaml --container-architecture linux/amd64
- name: Run k6 foundations tests
- run: ./run-tests.sh -t **/k6/foundations/*.js -u http://localhost:3333
+ run: ./scripts/run-tests.sh -t **/k6/foundations/*.js -u http://localhost:3333
env:
ACT: ${{ env.ACT }}
- name: Run k6 browser tests
- run: ./run-tests.sh -t **/k6/browser/*.js -u http://localhost:3333
+ run: ./scripts/run-tests.sh -t **/k6/browser/*.js -u http://localhost:3333
env:
ACT: ${{ env.ACT }}
diff --git a/Makefile b/Makefile
index 46b7816a..a3a72819 100644
--- a/Makefile
+++ b/Makefile
@@ -1,3 +1,5 @@
+GO_SOURCES=$(shell find . -type f -name '*.go' -not -path "./vendor/*")
+
.PHONY: run
run:
go generate pkg/web/web.go
@@ -8,3 +10,15 @@ run:
build:
go generate pkg/web/web.go
CGO_ENABLED=0 go build -o bin/quickpizza ./cmd
+
+.PHONY: proto
+proto:
+ protoc --go_out=. --go-grpc_out=. proto/quickpizza.proto
+
+.PHONY: format
+format:
+ @goimports -w -l $(GO_SOURCES)
+
+.PHONY: format-check
+format-check:
+ @out=$$(goimports -l $(GO_SOURCES)) && echo "$$out" && test -z "$$out"
diff --git a/cmd/main.go b/cmd/main.go
index 0ee2cd97..5ef66bcc 100644
--- a/cmd/main.go
+++ b/cmd/main.go
@@ -9,13 +9,15 @@ import (
"strings"
"time"
+ "log/slog"
+
"github.com/grafana/pyroscope-go"
"github.com/grafana/quickpizza/pkg/database"
+ qpgrpc "github.com/grafana/quickpizza/pkg/grpc"
qphttp "github.com/grafana/quickpizza/pkg/http"
"github.com/hashicorp/go-retryablehttp"
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
"go.opentelemetry.io/otel/propagation"
- "golang.org/x/exp/slog"
)
func main() {
@@ -126,6 +128,17 @@ func main() {
server = server.WithRecommendations(catalogClient, copyClient)
}
+ if envServe("QUICKPIZZA_GRPC") {
+ grpcServer := qpgrpc.NewServer(":3334")
+ go func() {
+ err := grpcServer.ListenAndServe()
+ if err != nil {
+ slog.Error("Running gRPC server", "err", err)
+ os.Exit(1)
+ }
+ }()
+ }
+
listen := ":3333"
slog.Info("Starting QuickPizza", "listenAddress", listen)
err = http.ListenAndServe(listen, server)
diff --git a/go.mod b/go.mod
index 17f1df8a..71203294 100644
--- a/go.mod
+++ b/go.mod
@@ -27,7 +27,8 @@ require (
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0
go.opentelemetry.io/otel/sdk v1.28.0
go.opentelemetry.io/otel/trace v1.28.0
- golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63
+ google.golang.org/grpc v1.65.0
+ google.golang.org/protobuf v1.34.2
)
require (
@@ -69,8 +70,6 @@ require (
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 // indirect
- google.golang.org/grpc v1.65.0 // indirect
- google.golang.org/protobuf v1.34.2 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
lukechampine.com/uint128 v1.3.0 // indirect
mellium.im/sasl v0.3.1 // indirect
diff --git a/go.sum b/go.sum
index 85c0860e..bfbf5c58 100644
--- a/go.sum
+++ b/go.sum
@@ -326,8 +326,6 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
-golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 h1:m64FZMko/V45gv0bNmrNYoDEq8U5YUhetc9cBWKS1TQ=
-golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63/go.mod h1:0v4NqG35kSWCMzLaMeX+IQrlSnVE/bqGSyC2cz/9Le8=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
diff --git a/k6/foundations/16.grpc.js b/k6/foundations/16.grpc.js
new file mode 100644
index 00000000..18b64b93
--- /dev/null
+++ b/k6/foundations/16.grpc.js
@@ -0,0 +1,25 @@
+import { Client, StatusOK } from 'k6/net/grpc';
+import { check, sleep } from 'k6';
+
+const BASE_URL = 'localhost:3334';
+
+const client = new Client();
+client.load(['definitions'], '../../../proto/quickpizza.proto');
+
+export default () => {
+ client.connect(BASE_URL, {
+ plaintext: true
+ });
+
+ const data = { ingredients: ["Pepperoni", "Mozzarella"], dough: "Stuffed" };
+ const response = client.invoke('quickpizza.GRPC/EvaluatePizza', data);
+
+ check(response, {
+ 'status is OK': (r) => r && r.status === StatusOK,
+ });
+
+ console.log(JSON.stringify(response.message));
+
+ client.close();
+ sleep(1);
+};
diff --git a/pkg/database/catalog.go b/pkg/database/catalog.go
index 0675d549..b22aba48 100644
--- a/pkg/database/catalog.go
+++ b/pkg/database/catalog.go
@@ -3,9 +3,10 @@ package database
import (
"context"
+ "log/slog"
+
"github.com/uptrace/bun"
"github.com/uptrace/bun/migrate"
- "golang.org/x/exp/slog"
"github.com/grafana/quickpizza/pkg/database/migrations"
"github.com/grafana/quickpizza/pkg/errorinjector"
diff --git a/pkg/database/copy.go b/pkg/database/copy.go
index 1f84d57b..05804099 100644
--- a/pkg/database/copy.go
+++ b/pkg/database/copy.go
@@ -3,9 +3,10 @@ package database
import (
"context"
+ "log/slog"
+
"github.com/grafana/quickpizza/pkg/database/migrations"
"github.com/grafana/quickpizza/pkg/model"
- "golang.org/x/exp/slog"
"github.com/uptrace/bun"
"github.com/uptrace/bun/migrate"
diff --git a/pkg/database/database.go b/pkg/database/database.go
index c6e32de5..f66641b2 100644
--- a/pkg/database/database.go
+++ b/pkg/database/database.go
@@ -6,6 +6,8 @@ import (
"runtime"
"strings"
+ "log/slog"
+
"github.com/grafana/quickpizza/pkg/logging"
"github.com/uptrace/bun"
"github.com/uptrace/bun/dialect/pgdialect"
@@ -13,7 +15,6 @@ import (
"github.com/uptrace/bun/driver/pgdriver"
"github.com/uptrace/bun/driver/sqliteshim"
"github.com/uptrace/bun/extra/bunotel"
- "golang.org/x/exp/slog"
)
func initializeDB(connString string) (*bun.DB, error) {
diff --git a/pkg/grpc/grpc.go b/pkg/grpc/grpc.go
new file mode 100644
index 00000000..851064b6
--- /dev/null
+++ b/pkg/grpc/grpc.go
@@ -0,0 +1,52 @@
+package grpc
+
+import (
+ "context"
+ "fmt"
+ "log/slog"
+ "math/rand"
+ "net"
+
+ pb "github.com/grafana/quickpizza/pkg/grpc/quickpizza"
+ "google.golang.org/grpc"
+)
+
+type serverImplementation struct {
+ pb.UnimplementedGRPCServer
+}
+
+type Server struct {
+ grpcServer *grpc.Server
+ listen string
+}
+
+func (s *serverImplementation) Status(_ context.Context, in *pb.StatusRequest) (*pb.StatusResponse, error) {
+ return &pb.StatusResponse{Ready: true}, nil
+}
+
+func (s *serverImplementation) EvaluatePizza(_ context.Context, in *pb.PizzaEvaluationRequest) (*pb.PizzaEvaluationResponse, error) {
+ var rating int32
+ if len(in.Ingredients) > 0 && in.Dough != "" {
+ rating = rand.Int31n(6)
+ }
+ return &pb.PizzaEvaluationResponse{
+ StarsRating: rating,
+ }, nil
+}
+
+func NewServer(listen string) *Server {
+ s := grpc.NewServer()
+ pb.RegisterGRPCServer(s, &serverImplementation{})
+
+ return &Server{grpcServer: s, listen: listen}
+}
+
+func (s *Server) ListenAndServe() error {
+ lis, err := net.Listen("tcp", s.listen)
+ if err != nil {
+ return fmt.Errorf("failed to listen on port: %w", err)
+ }
+
+ slog.Info("Starting QuickPizza gRPC", "listenAddress", s.listen)
+ return s.grpcServer.Serve(lis)
+}
diff --git a/pkg/grpc/quickpizza/quickpizza.pb.go b/pkg/grpc/quickpizza/quickpizza.pb.go
new file mode 100644
index 00000000..95e370bc
--- /dev/null
+++ b/pkg/grpc/quickpizza/quickpizza.pb.go
@@ -0,0 +1,290 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.35.2
+// protoc v5.28.3
+// source: proto/quickpizza.proto
+
+package quickpizza
+
+import (
+ reflect "reflect"
+ sync "sync"
+
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type StatusRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *StatusRequest) Reset() {
+ *x = StatusRequest{}
+ mi := &file_proto_quickpizza_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *StatusRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StatusRequest) ProtoMessage() {}
+
+func (x *StatusRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_proto_quickpizza_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use StatusRequest.ProtoReflect.Descriptor instead.
+func (*StatusRequest) Descriptor() ([]byte, []int) {
+ return file_proto_quickpizza_proto_rawDescGZIP(), []int{0}
+}
+
+type StatusResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Ready bool `protobuf:"varint,1,opt,name=ready,proto3" json:"ready,omitempty"`
+}
+
+func (x *StatusResponse) Reset() {
+ *x = StatusResponse{}
+ mi := &file_proto_quickpizza_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *StatusResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StatusResponse) ProtoMessage() {}
+
+func (x *StatusResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_proto_quickpizza_proto_msgTypes[1]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use StatusResponse.ProtoReflect.Descriptor instead.
+func (*StatusResponse) Descriptor() ([]byte, []int) {
+ return file_proto_quickpizza_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *StatusResponse) GetReady() bool {
+ if x != nil {
+ return x.Ready
+ }
+ return false
+}
+
+type PizzaEvaluationRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Ingredients []string `protobuf:"bytes,1,rep,name=ingredients,proto3" json:"ingredients,omitempty"`
+ Dough string `protobuf:"bytes,2,opt,name=dough,proto3" json:"dough,omitempty"`
+}
+
+func (x *PizzaEvaluationRequest) Reset() {
+ *x = PizzaEvaluationRequest{}
+ mi := &file_proto_quickpizza_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *PizzaEvaluationRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*PizzaEvaluationRequest) ProtoMessage() {}
+
+func (x *PizzaEvaluationRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_proto_quickpizza_proto_msgTypes[2]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use PizzaEvaluationRequest.ProtoReflect.Descriptor instead.
+func (*PizzaEvaluationRequest) Descriptor() ([]byte, []int) {
+ return file_proto_quickpizza_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *PizzaEvaluationRequest) GetIngredients() []string {
+ if x != nil {
+ return x.Ingredients
+ }
+ return nil
+}
+
+func (x *PizzaEvaluationRequest) GetDough() string {
+ if x != nil {
+ return x.Dough
+ }
+ return ""
+}
+
+type PizzaEvaluationResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ StarsRating int32 `protobuf:"varint,1,opt,name=stars_rating,json=starsRating,proto3" json:"stars_rating,omitempty"`
+}
+
+func (x *PizzaEvaluationResponse) Reset() {
+ *x = PizzaEvaluationResponse{}
+ mi := &file_proto_quickpizza_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *PizzaEvaluationResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*PizzaEvaluationResponse) ProtoMessage() {}
+
+func (x *PizzaEvaluationResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_proto_quickpizza_proto_msgTypes[3]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use PizzaEvaluationResponse.ProtoReflect.Descriptor instead.
+func (*PizzaEvaluationResponse) Descriptor() ([]byte, []int) {
+ return file_proto_quickpizza_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *PizzaEvaluationResponse) GetStarsRating() int32 {
+ if x != nil {
+ return x.StarsRating
+ }
+ return 0
+}
+
+var File_proto_quickpizza_proto protoreflect.FileDescriptor
+
+var file_proto_quickpizza_proto_rawDesc = []byte{
+ 0x0a, 0x16, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x71, 0x75, 0x69, 0x63, 0x6b, 0x70, 0x69, 0x7a,
+ 0x7a, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x71, 0x75, 0x69, 0x63, 0x6b, 0x70,
+ 0x69, 0x7a, 0x7a, 0x61, 0x22, 0x0f, 0x0a, 0x0d, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x26, 0x0a, 0x0e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x72, 0x65, 0x61, 0x64, 0x79,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x72, 0x65, 0x61, 0x64, 0x79, 0x22, 0x50, 0x0a,
+ 0x16, 0x50, 0x69, 0x7a, 0x7a, 0x61, 0x45, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x69, 0x6e, 0x67, 0x72, 0x65,
+ 0x64, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x69, 0x6e,
+ 0x67, 0x72, 0x65, 0x64, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x64, 0x6f, 0x75,
+ 0x67, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x64, 0x6f, 0x75, 0x67, 0x68, 0x22,
+ 0x3c, 0x0a, 0x17, 0x50, 0x69, 0x7a, 0x7a, 0x61, 0x45, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x74,
+ 0x61, 0x72, 0x73, 0x5f, 0x72, 0x61, 0x74, 0x69, 0x6e, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05,
+ 0x52, 0x0b, 0x73, 0x74, 0x61, 0x72, 0x73, 0x52, 0x61, 0x74, 0x69, 0x6e, 0x67, 0x32, 0xa5, 0x01,
+ 0x0a, 0x04, 0x47, 0x52, 0x50, 0x43, 0x12, 0x41, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
+ 0x12, 0x19, 0x2e, 0x71, 0x75, 0x69, 0x63, 0x6b, 0x70, 0x69, 0x7a, 0x7a, 0x61, 0x2e, 0x53, 0x74,
+ 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x71, 0x75,
+ 0x69, 0x63, 0x6b, 0x70, 0x69, 0x7a, 0x7a, 0x61, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5a, 0x0a, 0x0d, 0x45, 0x76, 0x61,
+ 0x6c, 0x75, 0x61, 0x74, 0x65, 0x50, 0x69, 0x7a, 0x7a, 0x61, 0x12, 0x22, 0x2e, 0x71, 0x75, 0x69,
+ 0x63, 0x6b, 0x70, 0x69, 0x7a, 0x7a, 0x61, 0x2e, 0x50, 0x69, 0x7a, 0x7a, 0x61, 0x45, 0x76, 0x61,
+ 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23,
+ 0x2e, 0x71, 0x75, 0x69, 0x63, 0x6b, 0x70, 0x69, 0x7a, 0x7a, 0x61, 0x2e, 0x50, 0x69, 0x7a, 0x7a,
+ 0x61, 0x45, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x15, 0x5a, 0x13, 0x70, 0x6b, 0x67, 0x2f, 0x67, 0x72, 0x70,
+ 0x63, 0x2f, 0x71, 0x75, 0x69, 0x63, 0x6b, 0x70, 0x69, 0x7a, 0x7a, 0x61, 0x62, 0x06, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_proto_quickpizza_proto_rawDescOnce sync.Once
+ file_proto_quickpizza_proto_rawDescData = file_proto_quickpizza_proto_rawDesc
+)
+
+func file_proto_quickpizza_proto_rawDescGZIP() []byte {
+ file_proto_quickpizza_proto_rawDescOnce.Do(func() {
+ file_proto_quickpizza_proto_rawDescData = protoimpl.X.CompressGZIP(file_proto_quickpizza_proto_rawDescData)
+ })
+ return file_proto_quickpizza_proto_rawDescData
+}
+
+var file_proto_quickpizza_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
+var file_proto_quickpizza_proto_goTypes = []any{
+ (*StatusRequest)(nil), // 0: quickpizza.StatusRequest
+ (*StatusResponse)(nil), // 1: quickpizza.StatusResponse
+ (*PizzaEvaluationRequest)(nil), // 2: quickpizza.PizzaEvaluationRequest
+ (*PizzaEvaluationResponse)(nil), // 3: quickpizza.PizzaEvaluationResponse
+}
+var file_proto_quickpizza_proto_depIdxs = []int32{
+ 0, // 0: quickpizza.GRPC.Status:input_type -> quickpizza.StatusRequest
+ 2, // 1: quickpizza.GRPC.EvaluatePizza:input_type -> quickpizza.PizzaEvaluationRequest
+ 1, // 2: quickpizza.GRPC.Status:output_type -> quickpizza.StatusResponse
+ 3, // 3: quickpizza.GRPC.EvaluatePizza:output_type -> quickpizza.PizzaEvaluationResponse
+ 2, // [2:4] is the sub-list for method output_type
+ 0, // [0:2] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_proto_quickpizza_proto_init() }
+func file_proto_quickpizza_proto_init() {
+ if File_proto_quickpizza_proto != nil {
+ return
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_proto_quickpizza_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 4,
+ NumExtensions: 0,
+ NumServices: 1,
+ },
+ GoTypes: file_proto_quickpizza_proto_goTypes,
+ DependencyIndexes: file_proto_quickpizza_proto_depIdxs,
+ MessageInfos: file_proto_quickpizza_proto_msgTypes,
+ }.Build()
+ File_proto_quickpizza_proto = out.File
+ file_proto_quickpizza_proto_rawDesc = nil
+ file_proto_quickpizza_proto_goTypes = nil
+ file_proto_quickpizza_proto_depIdxs = nil
+}
diff --git a/pkg/grpc/quickpizza/quickpizza_grpc.pb.go b/pkg/grpc/quickpizza/quickpizza_grpc.pb.go
new file mode 100644
index 00000000..b79d06c1
--- /dev/null
+++ b/pkg/grpc/quickpizza/quickpizza_grpc.pb.go
@@ -0,0 +1,160 @@
+// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
+// versions:
+// - protoc-gen-go-grpc v1.5.1
+// - protoc v5.28.3
+// source: proto/quickpizza.proto
+
+package quickpizza
+
+import (
+ context "context"
+
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+)
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+// Requires gRPC-Go v1.64.0 or later.
+const _ = grpc.SupportPackageIsVersion9
+
+const (
+ GRPC_Status_FullMethodName = "/quickpizza.GRPC/Status"
+ GRPC_EvaluatePizza_FullMethodName = "/quickpizza.GRPC/EvaluatePizza"
+)
+
+// GRPCClient is the client API for GRPC service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
+type GRPCClient interface {
+ Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error)
+ EvaluatePizza(ctx context.Context, in *PizzaEvaluationRequest, opts ...grpc.CallOption) (*PizzaEvaluationResponse, error)
+}
+
+type gRPCClient struct {
+ cc grpc.ClientConnInterface
+}
+
+func NewGRPCClient(cc grpc.ClientConnInterface) GRPCClient {
+ return &gRPCClient{cc}
+}
+
+func (c *gRPCClient) Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
+ out := new(StatusResponse)
+ err := c.cc.Invoke(ctx, GRPC_Status_FullMethodName, in, out, cOpts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *gRPCClient) EvaluatePizza(ctx context.Context, in *PizzaEvaluationRequest, opts ...grpc.CallOption) (*PizzaEvaluationResponse, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
+ out := new(PizzaEvaluationResponse)
+ err := c.cc.Invoke(ctx, GRPC_EvaluatePizza_FullMethodName, in, out, cOpts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// GRPCServer is the server API for GRPC service.
+// All implementations must embed UnimplementedGRPCServer
+// for forward compatibility.
+type GRPCServer interface {
+ Status(context.Context, *StatusRequest) (*StatusResponse, error)
+ EvaluatePizza(context.Context, *PizzaEvaluationRequest) (*PizzaEvaluationResponse, error)
+ mustEmbedUnimplementedGRPCServer()
+}
+
+// UnimplementedGRPCServer must be embedded to have
+// forward compatible implementations.
+//
+// NOTE: this should be embedded by value instead of pointer to avoid a nil
+// pointer dereference when methods are called.
+type UnimplementedGRPCServer struct{}
+
+func (UnimplementedGRPCServer) Status(context.Context, *StatusRequest) (*StatusResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Status not implemented")
+}
+func (UnimplementedGRPCServer) EvaluatePizza(context.Context, *PizzaEvaluationRequest) (*PizzaEvaluationResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method EvaluatePizza not implemented")
+}
+func (UnimplementedGRPCServer) mustEmbedUnimplementedGRPCServer() {}
+func (UnimplementedGRPCServer) testEmbeddedByValue() {}
+
+// UnsafeGRPCServer may be embedded to opt out of forward compatibility for this service.
+// Use of this interface is not recommended, as added methods to GRPCServer will
+// result in compilation errors.
+type UnsafeGRPCServer interface {
+ mustEmbedUnimplementedGRPCServer()
+}
+
+func RegisterGRPCServer(s grpc.ServiceRegistrar, srv GRPCServer) {
+ // If the following call pancis, it indicates UnimplementedGRPCServer was
+ // embedded by pointer and is nil. This will cause panics if an
+ // unimplemented method is ever invoked, so we test this at initialization
+ // time to prevent it from happening at runtime later due to I/O.
+ if t, ok := srv.(interface{ testEmbeddedByValue() }); ok {
+ t.testEmbeddedByValue()
+ }
+ s.RegisterService(&GRPC_ServiceDesc, srv)
+}
+
+func _GRPC_Status_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(StatusRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(GRPCServer).Status(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: GRPC_Status_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(GRPCServer).Status(ctx, req.(*StatusRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _GRPC_EvaluatePizza_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(PizzaEvaluationRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(GRPCServer).EvaluatePizza(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: GRPC_EvaluatePizza_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(GRPCServer).EvaluatePizza(ctx, req.(*PizzaEvaluationRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+// GRPC_ServiceDesc is the grpc.ServiceDesc for GRPC service.
+// It's only intended for direct use with grpc.RegisterService,
+// and not to be introspected or modified (even as a copy)
+var GRPC_ServiceDesc = grpc.ServiceDesc{
+ ServiceName: "quickpizza.GRPC",
+ HandlerType: (*GRPCServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "Status",
+ Handler: _GRPC_Status_Handler,
+ },
+ {
+ MethodName: "EvaluatePizza",
+ Handler: _GRPC_EvaluatePizza_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "proto/quickpizza.proto",
+}
diff --git a/pkg/http/http.go b/pkg/http/http.go
index 75579ef8..b339f770 100644
--- a/pkg/http/http.go
+++ b/pkg/http/http.go
@@ -16,6 +16,8 @@ import (
"strings"
"time"
+ "log/slog"
+
"github.com/go-chi/chi"
"github.com/go-chi/chi/middleware"
"github.com/go-chi/cors"
@@ -27,7 +29,6 @@ import (
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
"go.opentelemetry.io/otel/propagation"
"go.opentelemetry.io/otel/trace"
- "golang.org/x/exp/slog"
k6 "github.com/grafana/pyroscope-go/x/k6"
"github.com/grafana/quickpizza/pkg/database"
diff --git a/pkg/logging/bun.go b/pkg/logging/bun.go
index b0ae98c6..34a2bee7 100644
--- a/pkg/logging/bun.go
+++ b/pkg/logging/bun.go
@@ -4,8 +4,9 @@ import (
"context"
"time"
+ "log/slog"
+
"github.com/uptrace/bun"
- "golang.org/x/exp/slog"
)
// validate interface compliance
diff --git a/pkg/logging/context_logger.go b/pkg/logging/context_logger.go
index bffb8161..3f6024c4 100644
--- a/pkg/logging/context_logger.go
+++ b/pkg/logging/context_logger.go
@@ -3,8 +3,9 @@ package logging
import (
"context"
+ "log/slog"
+
"go.opentelemetry.io/otel/trace"
- "golang.org/x/exp/slog"
)
type ContextLogger struct {
diff --git a/proto/quickpizza.proto b/proto/quickpizza.proto
new file mode 100644
index 00000000..14c93260
--- /dev/null
+++ b/proto/quickpizza.proto
@@ -0,0 +1,24 @@
+syntax = "proto3";
+option go_package = "pkg/grpc/quickpizza";
+package quickpizza;
+
+service GRPC {
+ rpc Status(StatusRequest) returns (StatusResponse) {}
+ rpc EvaluatePizza(PizzaEvaluationRequest) returns (PizzaEvaluationResponse) {}
+}
+
+message StatusRequest {
+}
+
+message StatusResponse {
+ bool ready = 1;
+}
+
+message PizzaEvaluationRequest {
+ repeated string ingredients = 1;
+ string dough = 2;
+}
+
+message PizzaEvaluationResponse {
+ int32 stars_rating = 1;
+}
diff --git a/run-tests.sh b/scripts/run-tests.sh
similarity index 100%
rename from run-tests.sh
rename to scripts/run-tests.sh
diff --git a/vendor/golang.org/x/exp/LICENSE b/vendor/golang.org/x/exp/LICENSE
deleted file mode 100644
index 6a66aea5..00000000
--- a/vendor/golang.org/x/exp/LICENSE
+++ /dev/null
@@ -1,27 +0,0 @@
-Copyright (c) 2009 The Go Authors. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/golang.org/x/exp/PATENTS b/vendor/golang.org/x/exp/PATENTS
deleted file mode 100644
index 73309904..00000000
--- a/vendor/golang.org/x/exp/PATENTS
+++ /dev/null
@@ -1,22 +0,0 @@
-Additional IP Rights Grant (Patents)
-
-"This implementation" means the copyrightable works distributed by
-Google as part of the Go project.
-
-Google hereby grants to You a perpetual, worldwide, non-exclusive,
-no-charge, royalty-free, irrevocable (except as stated in this section)
-patent license to make, have made, use, offer to sell, sell, import,
-transfer and otherwise run, modify and propagate the contents of this
-implementation of Go, where such license applies only to those patent
-claims, both currently owned or controlled by Google and acquired in
-the future, licensable by Google that are necessarily infringed by this
-implementation of Go. This grant does not include claims that would be
-infringed only as a consequence of further modification of this
-implementation. If you or your agent or exclusive licensee institute or
-order or agree to the institution of patent litigation against any
-entity (including a cross-claim or counterclaim in a lawsuit) alleging
-that this implementation of Go or any code incorporated within this
-implementation of Go constitutes direct or contributory patent
-infringement, or inducement of patent infringement, then any patent
-rights granted to you under this License for this implementation of Go
-shall terminate as of the date such litigation is filed.
diff --git a/vendor/golang.org/x/exp/constraints/constraints.go b/vendor/golang.org/x/exp/constraints/constraints.go
deleted file mode 100644
index 2c033dff..00000000
--- a/vendor/golang.org/x/exp/constraints/constraints.go
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package constraints defines a set of useful constraints to be used
-// with type parameters.
-package constraints
-
-// Signed is a constraint that permits any signed integer type.
-// If future releases of Go add new predeclared signed integer types,
-// this constraint will be modified to include them.
-type Signed interface {
- ~int | ~int8 | ~int16 | ~int32 | ~int64
-}
-
-// Unsigned is a constraint that permits any unsigned integer type.
-// If future releases of Go add new predeclared unsigned integer types,
-// this constraint will be modified to include them.
-type Unsigned interface {
- ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr
-}
-
-// Integer is a constraint that permits any integer type.
-// If future releases of Go add new predeclared integer types,
-// this constraint will be modified to include them.
-type Integer interface {
- Signed | Unsigned
-}
-
-// Float is a constraint that permits any floating-point type.
-// If future releases of Go add new predeclared floating-point types,
-// this constraint will be modified to include them.
-type Float interface {
- ~float32 | ~float64
-}
-
-// Complex is a constraint that permits any complex numeric type.
-// If future releases of Go add new predeclared complex numeric types,
-// this constraint will be modified to include them.
-type Complex interface {
- ~complex64 | ~complex128
-}
-
-// Ordered is a constraint that permits any ordered type: any type
-// that supports the operators < <= >= >.
-// If future releases of Go add new ordered types,
-// this constraint will be modified to include them.
-type Ordered interface {
- Integer | Float | ~string
-}
diff --git a/vendor/golang.org/x/exp/slices/cmp.go b/vendor/golang.org/x/exp/slices/cmp.go
deleted file mode 100644
index fbf1934a..00000000
--- a/vendor/golang.org/x/exp/slices/cmp.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2023 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package slices
-
-import "golang.org/x/exp/constraints"
-
-// min is a version of the predeclared function from the Go 1.21 release.
-func min[T constraints.Ordered](a, b T) T {
- if a < b || isNaN(a) {
- return a
- }
- return b
-}
-
-// max is a version of the predeclared function from the Go 1.21 release.
-func max[T constraints.Ordered](a, b T) T {
- if a > b || isNaN(a) {
- return a
- }
- return b
-}
-
-// cmpLess is a copy of cmp.Less from the Go 1.21 release.
-func cmpLess[T constraints.Ordered](x, y T) bool {
- return (isNaN(x) && !isNaN(y)) || x < y
-}
-
-// cmpCompare is a copy of cmp.Compare from the Go 1.21 release.
-func cmpCompare[T constraints.Ordered](x, y T) int {
- xNaN := isNaN(x)
- yNaN := isNaN(y)
- if xNaN && yNaN {
- return 0
- }
- if xNaN || x < y {
- return -1
- }
- if yNaN || x > y {
- return +1
- }
- return 0
-}
diff --git a/vendor/golang.org/x/exp/slices/slices.go b/vendor/golang.org/x/exp/slices/slices.go
deleted file mode 100644
index 5e8158bb..00000000
--- a/vendor/golang.org/x/exp/slices/slices.go
+++ /dev/null
@@ -1,499 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package slices defines various functions useful with slices of any type.
-package slices
-
-import (
- "unsafe"
-
- "golang.org/x/exp/constraints"
-)
-
-// Equal reports whether two slices are equal: the same length and all
-// elements equal. If the lengths are different, Equal returns false.
-// Otherwise, the elements are compared in increasing index order, and the
-// comparison stops at the first unequal pair.
-// Floating point NaNs are not considered equal.
-func Equal[S ~[]E, E comparable](s1, s2 S) bool {
- if len(s1) != len(s2) {
- return false
- }
- for i := range s1 {
- if s1[i] != s2[i] {
- return false
- }
- }
- return true
-}
-
-// EqualFunc reports whether two slices are equal using an equality
-// function on each pair of elements. If the lengths are different,
-// EqualFunc returns false. Otherwise, the elements are compared in
-// increasing index order, and the comparison stops at the first index
-// for which eq returns false.
-func EqualFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, eq func(E1, E2) bool) bool {
- if len(s1) != len(s2) {
- return false
- }
- for i, v1 := range s1 {
- v2 := s2[i]
- if !eq(v1, v2) {
- return false
- }
- }
- return true
-}
-
-// Compare compares the elements of s1 and s2, using [cmp.Compare] on each pair
-// of elements. The elements are compared sequentially, starting at index 0,
-// until one element is not equal to the other.
-// The result of comparing the first non-matching elements is returned.
-// If both slices are equal until one of them ends, the shorter slice is
-// considered less than the longer one.
-// The result is 0 if s1 == s2, -1 if s1 < s2, and +1 if s1 > s2.
-func Compare[S ~[]E, E constraints.Ordered](s1, s2 S) int {
- for i, v1 := range s1 {
- if i >= len(s2) {
- return +1
- }
- v2 := s2[i]
- if c := cmpCompare(v1, v2); c != 0 {
- return c
- }
- }
- if len(s1) < len(s2) {
- return -1
- }
- return 0
-}
-
-// CompareFunc is like [Compare] but uses a custom comparison function on each
-// pair of elements.
-// The result is the first non-zero result of cmp; if cmp always
-// returns 0 the result is 0 if len(s1) == len(s2), -1 if len(s1) < len(s2),
-// and +1 if len(s1) > len(s2).
-func CompareFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, cmp func(E1, E2) int) int {
- for i, v1 := range s1 {
- if i >= len(s2) {
- return +1
- }
- v2 := s2[i]
- if c := cmp(v1, v2); c != 0 {
- return c
- }
- }
- if len(s1) < len(s2) {
- return -1
- }
- return 0
-}
-
-// Index returns the index of the first occurrence of v in s,
-// or -1 if not present.
-func Index[S ~[]E, E comparable](s S, v E) int {
- for i := range s {
- if v == s[i] {
- return i
- }
- }
- return -1
-}
-
-// IndexFunc returns the first index i satisfying f(s[i]),
-// or -1 if none do.
-func IndexFunc[S ~[]E, E any](s S, f func(E) bool) int {
- for i := range s {
- if f(s[i]) {
- return i
- }
- }
- return -1
-}
-
-// Contains reports whether v is present in s.
-func Contains[S ~[]E, E comparable](s S, v E) bool {
- return Index(s, v) >= 0
-}
-
-// ContainsFunc reports whether at least one
-// element e of s satisfies f(e).
-func ContainsFunc[S ~[]E, E any](s S, f func(E) bool) bool {
- return IndexFunc(s, f) >= 0
-}
-
-// Insert inserts the values v... into s at index i,
-// returning the modified slice.
-// The elements at s[i:] are shifted up to make room.
-// In the returned slice r, r[i] == v[0],
-// and r[i+len(v)] == value originally at r[i].
-// Insert panics if i is out of range.
-// This function is O(len(s) + len(v)).
-func Insert[S ~[]E, E any](s S, i int, v ...E) S {
- m := len(v)
- if m == 0 {
- return s
- }
- n := len(s)
- if i == n {
- return append(s, v...)
- }
- if n+m > cap(s) {
- // Use append rather than make so that we bump the size of
- // the slice up to the next storage class.
- // This is what Grow does but we don't call Grow because
- // that might copy the values twice.
- s2 := append(s[:i], make(S, n+m-i)...)
- copy(s2[i:], v)
- copy(s2[i+m:], s[i:])
- return s2
- }
- s = s[:n+m]
-
- // before:
- // s: aaaaaaaabbbbccccccccdddd
- // ^ ^ ^ ^
- // i i+m n n+m
- // after:
- // s: aaaaaaaavvvvbbbbcccccccc
- // ^ ^ ^ ^
- // i i+m n n+m
- //
- // a are the values that don't move in s.
- // v are the values copied in from v.
- // b and c are the values from s that are shifted up in index.
- // d are the values that get overwritten, never to be seen again.
-
- if !overlaps(v, s[i+m:]) {
- // Easy case - v does not overlap either the c or d regions.
- // (It might be in some of a or b, or elsewhere entirely.)
- // The data we copy up doesn't write to v at all, so just do it.
-
- copy(s[i+m:], s[i:])
-
- // Now we have
- // s: aaaaaaaabbbbbbbbcccccccc
- // ^ ^ ^ ^
- // i i+m n n+m
- // Note the b values are duplicated.
-
- copy(s[i:], v)
-
- // Now we have
- // s: aaaaaaaavvvvbbbbcccccccc
- // ^ ^ ^ ^
- // i i+m n n+m
- // That's the result we want.
- return s
- }
-
- // The hard case - v overlaps c or d. We can't just shift up
- // the data because we'd move or clobber the values we're trying
- // to insert.
- // So instead, write v on top of d, then rotate.
- copy(s[n:], v)
-
- // Now we have
- // s: aaaaaaaabbbbccccccccvvvv
- // ^ ^ ^ ^
- // i i+m n n+m
-
- rotateRight(s[i:], m)
-
- // Now we have
- // s: aaaaaaaavvvvbbbbcccccccc
- // ^ ^ ^ ^
- // i i+m n n+m
- // That's the result we want.
- return s
-}
-
-// Delete removes the elements s[i:j] from s, returning the modified slice.
-// Delete panics if s[i:j] is not a valid slice of s.
-// Delete is O(len(s)-j), so if many items must be deleted, it is better to
-// make a single call deleting them all together than to delete one at a time.
-// Delete might not modify the elements s[len(s)-(j-i):len(s)]. If those
-// elements contain pointers you might consider zeroing those elements so that
-// objects they reference can be garbage collected.
-func Delete[S ~[]E, E any](s S, i, j int) S {
- _ = s[i:j] // bounds check
-
- return append(s[:i], s[j:]...)
-}
-
-// DeleteFunc removes any elements from s for which del returns true,
-// returning the modified slice.
-// When DeleteFunc removes m elements, it might not modify the elements
-// s[len(s)-m:len(s)]. If those elements contain pointers you might consider
-// zeroing those elements so that objects they reference can be garbage
-// collected.
-func DeleteFunc[S ~[]E, E any](s S, del func(E) bool) S {
- i := IndexFunc(s, del)
- if i == -1 {
- return s
- }
- // Don't start copying elements until we find one to delete.
- for j := i + 1; j < len(s); j++ {
- if v := s[j]; !del(v) {
- s[i] = v
- i++
- }
- }
- return s[:i]
-}
-
-// Replace replaces the elements s[i:j] by the given v, and returns the
-// modified slice. Replace panics if s[i:j] is not a valid slice of s.
-func Replace[S ~[]E, E any](s S, i, j int, v ...E) S {
- _ = s[i:j] // verify that i:j is a valid subslice
-
- if i == j {
- return Insert(s, i, v...)
- }
- if j == len(s) {
- return append(s[:i], v...)
- }
-
- tot := len(s[:i]) + len(v) + len(s[j:])
- if tot > cap(s) {
- // Too big to fit, allocate and copy over.
- s2 := append(s[:i], make(S, tot-i)...) // See Insert
- copy(s2[i:], v)
- copy(s2[i+len(v):], s[j:])
- return s2
- }
-
- r := s[:tot]
-
- if i+len(v) <= j {
- // Easy, as v fits in the deleted portion.
- copy(r[i:], v)
- if i+len(v) != j {
- copy(r[i+len(v):], s[j:])
- }
- return r
- }
-
- // We are expanding (v is bigger than j-i).
- // The situation is something like this:
- // (example has i=4,j=8,len(s)=16,len(v)=6)
- // s: aaaaxxxxbbbbbbbbyy
- // ^ ^ ^ ^
- // i j len(s) tot
- // a: prefix of s
- // x: deleted range
- // b: more of s
- // y: area to expand into
-
- if !overlaps(r[i+len(v):], v) {
- // Easy, as v is not clobbered by the first copy.
- copy(r[i+len(v):], s[j:])
- copy(r[i:], v)
- return r
- }
-
- // This is a situation where we don't have a single place to which
- // we can copy v. Parts of it need to go to two different places.
- // We want to copy the prefix of v into y and the suffix into x, then
- // rotate |y| spots to the right.
- //
- // v[2:] v[:2]
- // | |
- // s: aaaavvvvbbbbbbbbvv
- // ^ ^ ^ ^
- // i j len(s) tot
- //
- // If either of those two destinations don't alias v, then we're good.
- y := len(v) - (j - i) // length of y portion
-
- if !overlaps(r[i:j], v) {
- copy(r[i:j], v[y:])
- copy(r[len(s):], v[:y])
- rotateRight(r[i:], y)
- return r
- }
- if !overlaps(r[len(s):], v) {
- copy(r[len(s):], v[:y])
- copy(r[i:j], v[y:])
- rotateRight(r[i:], y)
- return r
- }
-
- // Now we know that v overlaps both x and y.
- // That means that the entirety of b is *inside* v.
- // So we don't need to preserve b at all; instead we
- // can copy v first, then copy the b part of v out of
- // v to the right destination.
- k := startIdx(v, s[j:])
- copy(r[i:], v)
- copy(r[i+len(v):], r[i+k:])
- return r
-}
-
-// Clone returns a copy of the slice.
-// The elements are copied using assignment, so this is a shallow clone.
-func Clone[S ~[]E, E any](s S) S {
- // Preserve nil in case it matters.
- if s == nil {
- return nil
- }
- return append(S([]E{}), s...)
-}
-
-// Compact replaces consecutive runs of equal elements with a single copy.
-// This is like the uniq command found on Unix.
-// Compact modifies the contents of the slice s and returns the modified slice,
-// which may have a smaller length.
-// When Compact discards m elements in total, it might not modify the elements
-// s[len(s)-m:len(s)]. If those elements contain pointers you might consider
-// zeroing those elements so that objects they reference can be garbage collected.
-func Compact[S ~[]E, E comparable](s S) S {
- if len(s) < 2 {
- return s
- }
- i := 1
- for k := 1; k < len(s); k++ {
- if s[k] != s[k-1] {
- if i != k {
- s[i] = s[k]
- }
- i++
- }
- }
- return s[:i]
-}
-
-// CompactFunc is like [Compact] but uses an equality function to compare elements.
-// For runs of elements that compare equal, CompactFunc keeps the first one.
-func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S {
- if len(s) < 2 {
- return s
- }
- i := 1
- for k := 1; k < len(s); k++ {
- if !eq(s[k], s[k-1]) {
- if i != k {
- s[i] = s[k]
- }
- i++
- }
- }
- return s[:i]
-}
-
-// Grow increases the slice's capacity, if necessary, to guarantee space for
-// another n elements. After Grow(n), at least n elements can be appended
-// to the slice without another allocation. If n is negative or too large to
-// allocate the memory, Grow panics.
-func Grow[S ~[]E, E any](s S, n int) S {
- if n < 0 {
- panic("cannot be negative")
- }
- if n -= cap(s) - len(s); n > 0 {
- // TODO(https://go.dev/issue/53888): Make using []E instead of S
- // to workaround a compiler bug where the runtime.growslice optimization
- // does not take effect. Revert when the compiler is fixed.
- s = append([]E(s)[:cap(s)], make([]E, n)...)[:len(s)]
- }
- return s
-}
-
-// Clip removes unused capacity from the slice, returning s[:len(s):len(s)].
-func Clip[S ~[]E, E any](s S) S {
- return s[:len(s):len(s)]
-}
-
-// Rotation algorithm explanation:
-//
-// rotate left by 2
-// start with
-// 0123456789
-// split up like this
-// 01 234567 89
-// swap first 2 and last 2
-// 89 234567 01
-// join first parts
-// 89234567 01
-// recursively rotate first left part by 2
-// 23456789 01
-// join at the end
-// 2345678901
-//
-// rotate left by 8
-// start with
-// 0123456789
-// split up like this
-// 01 234567 89
-// swap first 2 and last 2
-// 89 234567 01
-// join last parts
-// 89 23456701
-// recursively rotate second part left by 6
-// 89 01234567
-// join at the end
-// 8901234567
-
-// TODO: There are other rotate algorithms.
-// This algorithm has the desirable property that it moves each element exactly twice.
-// The triple-reverse algorithm is simpler and more cache friendly, but takes more writes.
-// The follow-cycles algorithm can be 1-write but it is not very cache friendly.
-
-// rotateLeft rotates b left by n spaces.
-// s_final[i] = s_orig[i+r], wrapping around.
-func rotateLeft[E any](s []E, r int) {
- for r != 0 && r != len(s) {
- if r*2 <= len(s) {
- swap(s[:r], s[len(s)-r:])
- s = s[:len(s)-r]
- } else {
- swap(s[:len(s)-r], s[r:])
- s, r = s[len(s)-r:], r*2-len(s)
- }
- }
-}
-func rotateRight[E any](s []E, r int) {
- rotateLeft(s, len(s)-r)
-}
-
-// swap swaps the contents of x and y. x and y must be equal length and disjoint.
-func swap[E any](x, y []E) {
- for i := 0; i < len(x); i++ {
- x[i], y[i] = y[i], x[i]
- }
-}
-
-// overlaps reports whether the memory ranges a[0:len(a)] and b[0:len(b)] overlap.
-func overlaps[E any](a, b []E) bool {
- if len(a) == 0 || len(b) == 0 {
- return false
- }
- elemSize := unsafe.Sizeof(a[0])
- if elemSize == 0 {
- return false
- }
- // TODO: use a runtime/unsafe facility once one becomes available. See issue 12445.
- // Also see crypto/internal/alias/alias.go:AnyOverlap
- return uintptr(unsafe.Pointer(&a[0])) <= uintptr(unsafe.Pointer(&b[len(b)-1]))+(elemSize-1) &&
- uintptr(unsafe.Pointer(&b[0])) <= uintptr(unsafe.Pointer(&a[len(a)-1]))+(elemSize-1)
-}
-
-// startIdx returns the index in haystack where the needle starts.
-// prerequisite: the needle must be aliased entirely inside the haystack.
-func startIdx[E any](haystack, needle []E) int {
- p := &needle[0]
- for i := range haystack {
- if p == &haystack[i] {
- return i
- }
- }
- // TODO: what if the overlap is by a non-integral number of Es?
- panic("needle not found")
-}
-
-// Reverse reverses the elements of the slice in place.
-func Reverse[S ~[]E, E any](s S) {
- for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 {
- s[i], s[j] = s[j], s[i]
- }
-}
diff --git a/vendor/golang.org/x/exp/slices/sort.go b/vendor/golang.org/x/exp/slices/sort.go
deleted file mode 100644
index b67897f7..00000000
--- a/vendor/golang.org/x/exp/slices/sort.go
+++ /dev/null
@@ -1,195 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:generate go run $GOROOT/src/sort/gen_sort_variants.go -exp
-
-package slices
-
-import (
- "math/bits"
-
- "golang.org/x/exp/constraints"
-)
-
-// Sort sorts a slice of any ordered type in ascending order.
-// When sorting floating-point numbers, NaNs are ordered before other values.
-func Sort[S ~[]E, E constraints.Ordered](x S) {
- n := len(x)
- pdqsortOrdered(x, 0, n, bits.Len(uint(n)))
-}
-
-// SortFunc sorts the slice x in ascending order as determined by the cmp
-// function. This sort is not guaranteed to be stable.
-// cmp(a, b) should return a negative number when a < b, a positive number when
-// a > b and zero when a == b.
-//
-// SortFunc requires that cmp is a strict weak ordering.
-// See https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings.
-func SortFunc[S ~[]E, E any](x S, cmp func(a, b E) int) {
- n := len(x)
- pdqsortCmpFunc(x, 0, n, bits.Len(uint(n)), cmp)
-}
-
-// SortStableFunc sorts the slice x while keeping the original order of equal
-// elements, using cmp to compare elements in the same way as [SortFunc].
-func SortStableFunc[S ~[]E, E any](x S, cmp func(a, b E) int) {
- stableCmpFunc(x, len(x), cmp)
-}
-
-// IsSorted reports whether x is sorted in ascending order.
-func IsSorted[S ~[]E, E constraints.Ordered](x S) bool {
- for i := len(x) - 1; i > 0; i-- {
- if cmpLess(x[i], x[i-1]) {
- return false
- }
- }
- return true
-}
-
-// IsSortedFunc reports whether x is sorted in ascending order, with cmp as the
-// comparison function as defined by [SortFunc].
-func IsSortedFunc[S ~[]E, E any](x S, cmp func(a, b E) int) bool {
- for i := len(x) - 1; i > 0; i-- {
- if cmp(x[i], x[i-1]) < 0 {
- return false
- }
- }
- return true
-}
-
-// Min returns the minimal value in x. It panics if x is empty.
-// For floating-point numbers, Min propagates NaNs (any NaN value in x
-// forces the output to be NaN).
-func Min[S ~[]E, E constraints.Ordered](x S) E {
- if len(x) < 1 {
- panic("slices.Min: empty list")
- }
- m := x[0]
- for i := 1; i < len(x); i++ {
- m = min(m, x[i])
- }
- return m
-}
-
-// MinFunc returns the minimal value in x, using cmp to compare elements.
-// It panics if x is empty. If there is more than one minimal element
-// according to the cmp function, MinFunc returns the first one.
-func MinFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E {
- if len(x) < 1 {
- panic("slices.MinFunc: empty list")
- }
- m := x[0]
- for i := 1; i < len(x); i++ {
- if cmp(x[i], m) < 0 {
- m = x[i]
- }
- }
- return m
-}
-
-// Max returns the maximal value in x. It panics if x is empty.
-// For floating-point E, Max propagates NaNs (any NaN value in x
-// forces the output to be NaN).
-func Max[S ~[]E, E constraints.Ordered](x S) E {
- if len(x) < 1 {
- panic("slices.Max: empty list")
- }
- m := x[0]
- for i := 1; i < len(x); i++ {
- m = max(m, x[i])
- }
- return m
-}
-
-// MaxFunc returns the maximal value in x, using cmp to compare elements.
-// It panics if x is empty. If there is more than one maximal element
-// according to the cmp function, MaxFunc returns the first one.
-func MaxFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E {
- if len(x) < 1 {
- panic("slices.MaxFunc: empty list")
- }
- m := x[0]
- for i := 1; i < len(x); i++ {
- if cmp(x[i], m) > 0 {
- m = x[i]
- }
- }
- return m
-}
-
-// BinarySearch searches for target in a sorted slice and returns the position
-// where target is found, or the position where target would appear in the
-// sort order; it also returns a bool saying whether the target is really found
-// in the slice. The slice must be sorted in increasing order.
-func BinarySearch[S ~[]E, E constraints.Ordered](x S, target E) (int, bool) {
- // Inlining is faster than calling BinarySearchFunc with a lambda.
- n := len(x)
- // Define x[-1] < target and x[n] >= target.
- // Invariant: x[i-1] < target, x[j] >= target.
- i, j := 0, n
- for i < j {
- h := int(uint(i+j) >> 1) // avoid overflow when computing h
- // i ≤ h < j
- if cmpLess(x[h], target) {
- i = h + 1 // preserves x[i-1] < target
- } else {
- j = h // preserves x[j] >= target
- }
- }
- // i == j, x[i-1] < target, and x[j] (= x[i]) >= target => answer is i.
- return i, i < n && (x[i] == target || (isNaN(x[i]) && isNaN(target)))
-}
-
-// BinarySearchFunc works like [BinarySearch], but uses a custom comparison
-// function. The slice must be sorted in increasing order, where "increasing"
-// is defined by cmp. cmp should return 0 if the slice element matches
-// the target, a negative number if the slice element precedes the target,
-// or a positive number if the slice element follows the target.
-// cmp must implement the same ordering as the slice, such that if
-// cmp(a, t) < 0 and cmp(b, t) >= 0, then a must precede b in the slice.
-func BinarySearchFunc[S ~[]E, E, T any](x S, target T, cmp func(E, T) int) (int, bool) {
- n := len(x)
- // Define cmp(x[-1], target) < 0 and cmp(x[n], target) >= 0 .
- // Invariant: cmp(x[i - 1], target) < 0, cmp(x[j], target) >= 0.
- i, j := 0, n
- for i < j {
- h := int(uint(i+j) >> 1) // avoid overflow when computing h
- // i ≤ h < j
- if cmp(x[h], target) < 0 {
- i = h + 1 // preserves cmp(x[i - 1], target) < 0
- } else {
- j = h // preserves cmp(x[j], target) >= 0
- }
- }
- // i == j, cmp(x[i-1], target) < 0, and cmp(x[j], target) (= cmp(x[i], target)) >= 0 => answer is i.
- return i, i < n && cmp(x[i], target) == 0
-}
-
-type sortedHint int // hint for pdqsort when choosing the pivot
-
-const (
- unknownHint sortedHint = iota
- increasingHint
- decreasingHint
-)
-
-// xorshift paper: https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf
-type xorshift uint64
-
-func (r *xorshift) Next() uint64 {
- *r ^= *r << 13
- *r ^= *r >> 17
- *r ^= *r << 5
- return uint64(*r)
-}
-
-func nextPowerOfTwo(length int) uint {
- return 1 << bits.Len(uint(length))
-}
-
-// isNaN reports whether x is a NaN without requiring the math package.
-// This will always return false if T is not floating-point.
-func isNaN[T constraints.Ordered](x T) bool {
- return x != x
-}
diff --git a/vendor/golang.org/x/exp/slices/zsortanyfunc.go b/vendor/golang.org/x/exp/slices/zsortanyfunc.go
deleted file mode 100644
index 06f2c7a2..00000000
--- a/vendor/golang.org/x/exp/slices/zsortanyfunc.go
+++ /dev/null
@@ -1,479 +0,0 @@
-// Code generated by gen_sort_variants.go; DO NOT EDIT.
-
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package slices
-
-// insertionSortCmpFunc sorts data[a:b] using insertion sort.
-func insertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
- for i := a + 1; i < b; i++ {
- for j := i; j > a && (cmp(data[j], data[j-1]) < 0); j-- {
- data[j], data[j-1] = data[j-1], data[j]
- }
- }
-}
-
-// siftDownCmpFunc implements the heap property on data[lo:hi].
-// first is an offset into the array where the root of the heap lies.
-func siftDownCmpFunc[E any](data []E, lo, hi, first int, cmp func(a, b E) int) {
- root := lo
- for {
- child := 2*root + 1
- if child >= hi {
- break
- }
- if child+1 < hi && (cmp(data[first+child], data[first+child+1]) < 0) {
- child++
- }
- if !(cmp(data[first+root], data[first+child]) < 0) {
- return
- }
- data[first+root], data[first+child] = data[first+child], data[first+root]
- root = child
- }
-}
-
-func heapSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
- first := a
- lo := 0
- hi := b - a
-
- // Build heap with greatest element at top.
- for i := (hi - 1) / 2; i >= 0; i-- {
- siftDownCmpFunc(data, i, hi, first, cmp)
- }
-
- // Pop elements, largest first, into end of data.
- for i := hi - 1; i >= 0; i-- {
- data[first], data[first+i] = data[first+i], data[first]
- siftDownCmpFunc(data, lo, i, first, cmp)
- }
-}
-
-// pdqsortCmpFunc sorts data[a:b].
-// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort.
-// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf
-// C++ implementation: https://github.com/orlp/pdqsort
-// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/
-// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort.
-func pdqsortCmpFunc[E any](data []E, a, b, limit int, cmp func(a, b E) int) {
- const maxInsertion = 12
-
- var (
- wasBalanced = true // whether the last partitioning was reasonably balanced
- wasPartitioned = true // whether the slice was already partitioned
- )
-
- for {
- length := b - a
-
- if length <= maxInsertion {
- insertionSortCmpFunc(data, a, b, cmp)
- return
- }
-
- // Fall back to heapsort if too many bad choices were made.
- if limit == 0 {
- heapSortCmpFunc(data, a, b, cmp)
- return
- }
-
- // If the last partitioning was imbalanced, we need to breaking patterns.
- if !wasBalanced {
- breakPatternsCmpFunc(data, a, b, cmp)
- limit--
- }
-
- pivot, hint := choosePivotCmpFunc(data, a, b, cmp)
- if hint == decreasingHint {
- reverseRangeCmpFunc(data, a, b, cmp)
- // The chosen pivot was pivot-a elements after the start of the array.
- // After reversing it is pivot-a elements before the end of the array.
- // The idea came from Rust's implementation.
- pivot = (b - 1) - (pivot - a)
- hint = increasingHint
- }
-
- // The slice is likely already sorted.
- if wasBalanced && wasPartitioned && hint == increasingHint {
- if partialInsertionSortCmpFunc(data, a, b, cmp) {
- return
- }
- }
-
- // Probably the slice contains many duplicate elements, partition the slice into
- // elements equal to and elements greater than the pivot.
- if a > 0 && !(cmp(data[a-1], data[pivot]) < 0) {
- mid := partitionEqualCmpFunc(data, a, b, pivot, cmp)
- a = mid
- continue
- }
-
- mid, alreadyPartitioned := partitionCmpFunc(data, a, b, pivot, cmp)
- wasPartitioned = alreadyPartitioned
-
- leftLen, rightLen := mid-a, b-mid
- balanceThreshold := length / 8
- if leftLen < rightLen {
- wasBalanced = leftLen >= balanceThreshold
- pdqsortCmpFunc(data, a, mid, limit, cmp)
- a = mid + 1
- } else {
- wasBalanced = rightLen >= balanceThreshold
- pdqsortCmpFunc(data, mid+1, b, limit, cmp)
- b = mid
- }
- }
-}
-
-// partitionCmpFunc does one quicksort partition.
-// Let p = data[pivot]
-// Moves elements in data[a:b] around, so that data[i]
=p for inewpivot.
-// On return, data[newpivot] = p
-func partitionCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int, alreadyPartitioned bool) {
- data[a], data[pivot] = data[pivot], data[a]
- i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
-
- for i <= j && (cmp(data[i], data[a]) < 0) {
- i++
- }
- for i <= j && !(cmp(data[j], data[a]) < 0) {
- j--
- }
- if i > j {
- data[j], data[a] = data[a], data[j]
- return j, true
- }
- data[i], data[j] = data[j], data[i]
- i++
- j--
-
- for {
- for i <= j && (cmp(data[i], data[a]) < 0) {
- i++
- }
- for i <= j && !(cmp(data[j], data[a]) < 0) {
- j--
- }
- if i > j {
- break
- }
- data[i], data[j] = data[j], data[i]
- i++
- j--
- }
- data[j], data[a] = data[a], data[j]
- return j, false
-}
-
-// partitionEqualCmpFunc partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot].
-// It assumed that data[a:b] does not contain elements smaller than the data[pivot].
-func partitionEqualCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int) {
- data[a], data[pivot] = data[pivot], data[a]
- i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
-
- for {
- for i <= j && !(cmp(data[a], data[i]) < 0) {
- i++
- }
- for i <= j && (cmp(data[a], data[j]) < 0) {
- j--
- }
- if i > j {
- break
- }
- data[i], data[j] = data[j], data[i]
- i++
- j--
- }
- return i
-}
-
-// partialInsertionSortCmpFunc partially sorts a slice, returns true if the slice is sorted at the end.
-func partialInsertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) bool {
- const (
- maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted
- shortestShifting = 50 // don't shift any elements on short arrays
- )
- i := a + 1
- for j := 0; j < maxSteps; j++ {
- for i < b && !(cmp(data[i], data[i-1]) < 0) {
- i++
- }
-
- if i == b {
- return true
- }
-
- if b-a < shortestShifting {
- return false
- }
-
- data[i], data[i-1] = data[i-1], data[i]
-
- // Shift the smaller one to the left.
- if i-a >= 2 {
- for j := i - 1; j >= 1; j-- {
- if !(cmp(data[j], data[j-1]) < 0) {
- break
- }
- data[j], data[j-1] = data[j-1], data[j]
- }
- }
- // Shift the greater one to the right.
- if b-i >= 2 {
- for j := i + 1; j < b; j++ {
- if !(cmp(data[j], data[j-1]) < 0) {
- break
- }
- data[j], data[j-1] = data[j-1], data[j]
- }
- }
- }
- return false
-}
-
-// breakPatternsCmpFunc scatters some elements around in an attempt to break some patterns
-// that might cause imbalanced partitions in quicksort.
-func breakPatternsCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
- length := b - a
- if length >= 8 {
- random := xorshift(length)
- modulus := nextPowerOfTwo(length)
-
- for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ {
- other := int(uint(random.Next()) & (modulus - 1))
- if other >= length {
- other -= length
- }
- data[idx], data[a+other] = data[a+other], data[idx]
- }
- }
-}
-
-// choosePivotCmpFunc chooses a pivot in data[a:b].
-//
-// [0,8): chooses a static pivot.
-// [8,shortestNinther): uses the simple median-of-three method.
-// [shortestNinther,∞): uses the Tukey ninther method.
-func choosePivotCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) (pivot int, hint sortedHint) {
- const (
- shortestNinther = 50
- maxSwaps = 4 * 3
- )
-
- l := b - a
-
- var (
- swaps int
- i = a + l/4*1
- j = a + l/4*2
- k = a + l/4*3
- )
-
- if l >= 8 {
- if l >= shortestNinther {
- // Tukey ninther method, the idea came from Rust's implementation.
- i = medianAdjacentCmpFunc(data, i, &swaps, cmp)
- j = medianAdjacentCmpFunc(data, j, &swaps, cmp)
- k = medianAdjacentCmpFunc(data, k, &swaps, cmp)
- }
- // Find the median among i, j, k and stores it into j.
- j = medianCmpFunc(data, i, j, k, &swaps, cmp)
- }
-
- switch swaps {
- case 0:
- return j, increasingHint
- case maxSwaps:
- return j, decreasingHint
- default:
- return j, unknownHint
- }
-}
-
-// order2CmpFunc returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a.
-func order2CmpFunc[E any](data []E, a, b int, swaps *int, cmp func(a, b E) int) (int, int) {
- if cmp(data[b], data[a]) < 0 {
- *swaps++
- return b, a
- }
- return a, b
-}
-
-// medianCmpFunc returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c.
-func medianCmpFunc[E any](data []E, a, b, c int, swaps *int, cmp func(a, b E) int) int {
- a, b = order2CmpFunc(data, a, b, swaps, cmp)
- b, c = order2CmpFunc(data, b, c, swaps, cmp)
- a, b = order2CmpFunc(data, a, b, swaps, cmp)
- return b
-}
-
-// medianAdjacentCmpFunc finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a.
-func medianAdjacentCmpFunc[E any](data []E, a int, swaps *int, cmp func(a, b E) int) int {
- return medianCmpFunc(data, a-1, a, a+1, swaps, cmp)
-}
-
-func reverseRangeCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
- i := a
- j := b - 1
- for i < j {
- data[i], data[j] = data[j], data[i]
- i++
- j--
- }
-}
-
-func swapRangeCmpFunc[E any](data []E, a, b, n int, cmp func(a, b E) int) {
- for i := 0; i < n; i++ {
- data[a+i], data[b+i] = data[b+i], data[a+i]
- }
-}
-
-func stableCmpFunc[E any](data []E, n int, cmp func(a, b E) int) {
- blockSize := 20 // must be > 0
- a, b := 0, blockSize
- for b <= n {
- insertionSortCmpFunc(data, a, b, cmp)
- a = b
- b += blockSize
- }
- insertionSortCmpFunc(data, a, n, cmp)
-
- for blockSize < n {
- a, b = 0, 2*blockSize
- for b <= n {
- symMergeCmpFunc(data, a, a+blockSize, b, cmp)
- a = b
- b += 2 * blockSize
- }
- if m := a + blockSize; m < n {
- symMergeCmpFunc(data, a, m, n, cmp)
- }
- blockSize *= 2
- }
-}
-
-// symMergeCmpFunc merges the two sorted subsequences data[a:m] and data[m:b] using
-// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum
-// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz
-// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in
-// Computer Science, pages 714-723. Springer, 2004.
-//
-// Let M = m-a and N = b-n. Wolog M < N.
-// The recursion depth is bound by ceil(log(N+M)).
-// The algorithm needs O(M*log(N/M + 1)) calls to data.Less.
-// The algorithm needs O((M+N)*log(M)) calls to data.Swap.
-//
-// The paper gives O((M+N)*log(M)) as the number of assignments assuming a
-// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation
-// in the paper carries through for Swap operations, especially as the block
-// swapping rotate uses only O(M+N) Swaps.
-//
-// symMerge assumes non-degenerate arguments: a < m && m < b.
-// Having the caller check this condition eliminates many leaf recursion calls,
-// which improves performance.
-func symMergeCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) {
- // Avoid unnecessary recursions of symMerge
- // by direct insertion of data[a] into data[m:b]
- // if data[a:m] only contains one element.
- if m-a == 1 {
- // Use binary search to find the lowest index i
- // such that data[i] >= data[a] for m <= i < b.
- // Exit the search loop with i == b in case no such index exists.
- i := m
- j := b
- for i < j {
- h := int(uint(i+j) >> 1)
- if cmp(data[h], data[a]) < 0 {
- i = h + 1
- } else {
- j = h
- }
- }
- // Swap values until data[a] reaches the position before i.
- for k := a; k < i-1; k++ {
- data[k], data[k+1] = data[k+1], data[k]
- }
- return
- }
-
- // Avoid unnecessary recursions of symMerge
- // by direct insertion of data[m] into data[a:m]
- // if data[m:b] only contains one element.
- if b-m == 1 {
- // Use binary search to find the lowest index i
- // such that data[i] > data[m] for a <= i < m.
- // Exit the search loop with i == m in case no such index exists.
- i := a
- j := m
- for i < j {
- h := int(uint(i+j) >> 1)
- if !(cmp(data[m], data[h]) < 0) {
- i = h + 1
- } else {
- j = h
- }
- }
- // Swap values until data[m] reaches the position i.
- for k := m; k > i; k-- {
- data[k], data[k-1] = data[k-1], data[k]
- }
- return
- }
-
- mid := int(uint(a+b) >> 1)
- n := mid + m
- var start, r int
- if m > mid {
- start = n - b
- r = mid
- } else {
- start = a
- r = m
- }
- p := n - 1
-
- for start < r {
- c := int(uint(start+r) >> 1)
- if !(cmp(data[p-c], data[c]) < 0) {
- start = c + 1
- } else {
- r = c
- }
- }
-
- end := n - start
- if start < m && m < end {
- rotateCmpFunc(data, start, m, end, cmp)
- }
- if a < start && start < mid {
- symMergeCmpFunc(data, a, start, mid, cmp)
- }
- if mid < end && end < b {
- symMergeCmpFunc(data, mid, end, b, cmp)
- }
-}
-
-// rotateCmpFunc rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data:
-// Data of the form 'x u v y' is changed to 'x v u y'.
-// rotate performs at most b-a many calls to data.Swap,
-// and it assumes non-degenerate arguments: a < m && m < b.
-func rotateCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) {
- i := m - a
- j := b - m
-
- for i != j {
- if i > j {
- swapRangeCmpFunc(data, m-i, m, j, cmp)
- i -= j
- } else {
- swapRangeCmpFunc(data, m-i, m+j-i, i, cmp)
- j -= i
- }
- }
- // i == j
- swapRangeCmpFunc(data, m-i, m, i, cmp)
-}
diff --git a/vendor/golang.org/x/exp/slices/zsortordered.go b/vendor/golang.org/x/exp/slices/zsortordered.go
deleted file mode 100644
index 99b47c39..00000000
--- a/vendor/golang.org/x/exp/slices/zsortordered.go
+++ /dev/null
@@ -1,481 +0,0 @@
-// Code generated by gen_sort_variants.go; DO NOT EDIT.
-
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package slices
-
-import "golang.org/x/exp/constraints"
-
-// insertionSortOrdered sorts data[a:b] using insertion sort.
-func insertionSortOrdered[E constraints.Ordered](data []E, a, b int) {
- for i := a + 1; i < b; i++ {
- for j := i; j > a && cmpLess(data[j], data[j-1]); j-- {
- data[j], data[j-1] = data[j-1], data[j]
- }
- }
-}
-
-// siftDownOrdered implements the heap property on data[lo:hi].
-// first is an offset into the array where the root of the heap lies.
-func siftDownOrdered[E constraints.Ordered](data []E, lo, hi, first int) {
- root := lo
- for {
- child := 2*root + 1
- if child >= hi {
- break
- }
- if child+1 < hi && cmpLess(data[first+child], data[first+child+1]) {
- child++
- }
- if !cmpLess(data[first+root], data[first+child]) {
- return
- }
- data[first+root], data[first+child] = data[first+child], data[first+root]
- root = child
- }
-}
-
-func heapSortOrdered[E constraints.Ordered](data []E, a, b int) {
- first := a
- lo := 0
- hi := b - a
-
- // Build heap with greatest element at top.
- for i := (hi - 1) / 2; i >= 0; i-- {
- siftDownOrdered(data, i, hi, first)
- }
-
- // Pop elements, largest first, into end of data.
- for i := hi - 1; i >= 0; i-- {
- data[first], data[first+i] = data[first+i], data[first]
- siftDownOrdered(data, lo, i, first)
- }
-}
-
-// pdqsortOrdered sorts data[a:b].
-// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort.
-// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf
-// C++ implementation: https://github.com/orlp/pdqsort
-// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/
-// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort.
-func pdqsortOrdered[E constraints.Ordered](data []E, a, b, limit int) {
- const maxInsertion = 12
-
- var (
- wasBalanced = true // whether the last partitioning was reasonably balanced
- wasPartitioned = true // whether the slice was already partitioned
- )
-
- for {
- length := b - a
-
- if length <= maxInsertion {
- insertionSortOrdered(data, a, b)
- return
- }
-
- // Fall back to heapsort if too many bad choices were made.
- if limit == 0 {
- heapSortOrdered(data, a, b)
- return
- }
-
- // If the last partitioning was imbalanced, we need to breaking patterns.
- if !wasBalanced {
- breakPatternsOrdered(data, a, b)
- limit--
- }
-
- pivot, hint := choosePivotOrdered(data, a, b)
- if hint == decreasingHint {
- reverseRangeOrdered(data, a, b)
- // The chosen pivot was pivot-a elements after the start of the array.
- // After reversing it is pivot-a elements before the end of the array.
- // The idea came from Rust's implementation.
- pivot = (b - 1) - (pivot - a)
- hint = increasingHint
- }
-
- // The slice is likely already sorted.
- if wasBalanced && wasPartitioned && hint == increasingHint {
- if partialInsertionSortOrdered(data, a, b) {
- return
- }
- }
-
- // Probably the slice contains many duplicate elements, partition the slice into
- // elements equal to and elements greater than the pivot.
- if a > 0 && !cmpLess(data[a-1], data[pivot]) {
- mid := partitionEqualOrdered(data, a, b, pivot)
- a = mid
- continue
- }
-
- mid, alreadyPartitioned := partitionOrdered(data, a, b, pivot)
- wasPartitioned = alreadyPartitioned
-
- leftLen, rightLen := mid-a, b-mid
- balanceThreshold := length / 8
- if leftLen < rightLen {
- wasBalanced = leftLen >= balanceThreshold
- pdqsortOrdered(data, a, mid, limit)
- a = mid + 1
- } else {
- wasBalanced = rightLen >= balanceThreshold
- pdqsortOrdered(data, mid+1, b, limit)
- b = mid
- }
- }
-}
-
-// partitionOrdered does one quicksort partition.
-// Let p = data[pivot]
-// Moves elements in data[a:b] around, so that data[i]=p for inewpivot.
-// On return, data[newpivot] = p
-func partitionOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int, alreadyPartitioned bool) {
- data[a], data[pivot] = data[pivot], data[a]
- i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
-
- for i <= j && cmpLess(data[i], data[a]) {
- i++
- }
- for i <= j && !cmpLess(data[j], data[a]) {
- j--
- }
- if i > j {
- data[j], data[a] = data[a], data[j]
- return j, true
- }
- data[i], data[j] = data[j], data[i]
- i++
- j--
-
- for {
- for i <= j && cmpLess(data[i], data[a]) {
- i++
- }
- for i <= j && !cmpLess(data[j], data[a]) {
- j--
- }
- if i > j {
- break
- }
- data[i], data[j] = data[j], data[i]
- i++
- j--
- }
- data[j], data[a] = data[a], data[j]
- return j, false
-}
-
-// partitionEqualOrdered partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot].
-// It assumed that data[a:b] does not contain elements smaller than the data[pivot].
-func partitionEqualOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int) {
- data[a], data[pivot] = data[pivot], data[a]
- i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
-
- for {
- for i <= j && !cmpLess(data[a], data[i]) {
- i++
- }
- for i <= j && cmpLess(data[a], data[j]) {
- j--
- }
- if i > j {
- break
- }
- data[i], data[j] = data[j], data[i]
- i++
- j--
- }
- return i
-}
-
-// partialInsertionSortOrdered partially sorts a slice, returns true if the slice is sorted at the end.
-func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool {
- const (
- maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted
- shortestShifting = 50 // don't shift any elements on short arrays
- )
- i := a + 1
- for j := 0; j < maxSteps; j++ {
- for i < b && !cmpLess(data[i], data[i-1]) {
- i++
- }
-
- if i == b {
- return true
- }
-
- if b-a < shortestShifting {
- return false
- }
-
- data[i], data[i-1] = data[i-1], data[i]
-
- // Shift the smaller one to the left.
- if i-a >= 2 {
- for j := i - 1; j >= 1; j-- {
- if !cmpLess(data[j], data[j-1]) {
- break
- }
- data[j], data[j-1] = data[j-1], data[j]
- }
- }
- // Shift the greater one to the right.
- if b-i >= 2 {
- for j := i + 1; j < b; j++ {
- if !cmpLess(data[j], data[j-1]) {
- break
- }
- data[j], data[j-1] = data[j-1], data[j]
- }
- }
- }
- return false
-}
-
-// breakPatternsOrdered scatters some elements around in an attempt to break some patterns
-// that might cause imbalanced partitions in quicksort.
-func breakPatternsOrdered[E constraints.Ordered](data []E, a, b int) {
- length := b - a
- if length >= 8 {
- random := xorshift(length)
- modulus := nextPowerOfTwo(length)
-
- for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ {
- other := int(uint(random.Next()) & (modulus - 1))
- if other >= length {
- other -= length
- }
- data[idx], data[a+other] = data[a+other], data[idx]
- }
- }
-}
-
-// choosePivotOrdered chooses a pivot in data[a:b].
-//
-// [0,8): chooses a static pivot.
-// [8,shortestNinther): uses the simple median-of-three method.
-// [shortestNinther,∞): uses the Tukey ninther method.
-func choosePivotOrdered[E constraints.Ordered](data []E, a, b int) (pivot int, hint sortedHint) {
- const (
- shortestNinther = 50
- maxSwaps = 4 * 3
- )
-
- l := b - a
-
- var (
- swaps int
- i = a + l/4*1
- j = a + l/4*2
- k = a + l/4*3
- )
-
- if l >= 8 {
- if l >= shortestNinther {
- // Tukey ninther method, the idea came from Rust's implementation.
- i = medianAdjacentOrdered(data, i, &swaps)
- j = medianAdjacentOrdered(data, j, &swaps)
- k = medianAdjacentOrdered(data, k, &swaps)
- }
- // Find the median among i, j, k and stores it into j.
- j = medianOrdered(data, i, j, k, &swaps)
- }
-
- switch swaps {
- case 0:
- return j, increasingHint
- case maxSwaps:
- return j, decreasingHint
- default:
- return j, unknownHint
- }
-}
-
-// order2Ordered returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a.
-func order2Ordered[E constraints.Ordered](data []E, a, b int, swaps *int) (int, int) {
- if cmpLess(data[b], data[a]) {
- *swaps++
- return b, a
- }
- return a, b
-}
-
-// medianOrdered returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c.
-func medianOrdered[E constraints.Ordered](data []E, a, b, c int, swaps *int) int {
- a, b = order2Ordered(data, a, b, swaps)
- b, c = order2Ordered(data, b, c, swaps)
- a, b = order2Ordered(data, a, b, swaps)
- return b
-}
-
-// medianAdjacentOrdered finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a.
-func medianAdjacentOrdered[E constraints.Ordered](data []E, a int, swaps *int) int {
- return medianOrdered(data, a-1, a, a+1, swaps)
-}
-
-func reverseRangeOrdered[E constraints.Ordered](data []E, a, b int) {
- i := a
- j := b - 1
- for i < j {
- data[i], data[j] = data[j], data[i]
- i++
- j--
- }
-}
-
-func swapRangeOrdered[E constraints.Ordered](data []E, a, b, n int) {
- for i := 0; i < n; i++ {
- data[a+i], data[b+i] = data[b+i], data[a+i]
- }
-}
-
-func stableOrdered[E constraints.Ordered](data []E, n int) {
- blockSize := 20 // must be > 0
- a, b := 0, blockSize
- for b <= n {
- insertionSortOrdered(data, a, b)
- a = b
- b += blockSize
- }
- insertionSortOrdered(data, a, n)
-
- for blockSize < n {
- a, b = 0, 2*blockSize
- for b <= n {
- symMergeOrdered(data, a, a+blockSize, b)
- a = b
- b += 2 * blockSize
- }
- if m := a + blockSize; m < n {
- symMergeOrdered(data, a, m, n)
- }
- blockSize *= 2
- }
-}
-
-// symMergeOrdered merges the two sorted subsequences data[a:m] and data[m:b] using
-// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum
-// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz
-// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in
-// Computer Science, pages 714-723. Springer, 2004.
-//
-// Let M = m-a and N = b-n. Wolog M < N.
-// The recursion depth is bound by ceil(log(N+M)).
-// The algorithm needs O(M*log(N/M + 1)) calls to data.Less.
-// The algorithm needs O((M+N)*log(M)) calls to data.Swap.
-//
-// The paper gives O((M+N)*log(M)) as the number of assignments assuming a
-// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation
-// in the paper carries through for Swap operations, especially as the block
-// swapping rotate uses only O(M+N) Swaps.
-//
-// symMerge assumes non-degenerate arguments: a < m && m < b.
-// Having the caller check this condition eliminates many leaf recursion calls,
-// which improves performance.
-func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) {
- // Avoid unnecessary recursions of symMerge
- // by direct insertion of data[a] into data[m:b]
- // if data[a:m] only contains one element.
- if m-a == 1 {
- // Use binary search to find the lowest index i
- // such that data[i] >= data[a] for m <= i < b.
- // Exit the search loop with i == b in case no such index exists.
- i := m
- j := b
- for i < j {
- h := int(uint(i+j) >> 1)
- if cmpLess(data[h], data[a]) {
- i = h + 1
- } else {
- j = h
- }
- }
- // Swap values until data[a] reaches the position before i.
- for k := a; k < i-1; k++ {
- data[k], data[k+1] = data[k+1], data[k]
- }
- return
- }
-
- // Avoid unnecessary recursions of symMerge
- // by direct insertion of data[m] into data[a:m]
- // if data[m:b] only contains one element.
- if b-m == 1 {
- // Use binary search to find the lowest index i
- // such that data[i] > data[m] for a <= i < m.
- // Exit the search loop with i == m in case no such index exists.
- i := a
- j := m
- for i < j {
- h := int(uint(i+j) >> 1)
- if !cmpLess(data[m], data[h]) {
- i = h + 1
- } else {
- j = h
- }
- }
- // Swap values until data[m] reaches the position i.
- for k := m; k > i; k-- {
- data[k], data[k-1] = data[k-1], data[k]
- }
- return
- }
-
- mid := int(uint(a+b) >> 1)
- n := mid + m
- var start, r int
- if m > mid {
- start = n - b
- r = mid
- } else {
- start = a
- r = m
- }
- p := n - 1
-
- for start < r {
- c := int(uint(start+r) >> 1)
- if !cmpLess(data[p-c], data[c]) {
- start = c + 1
- } else {
- r = c
- }
- }
-
- end := n - start
- if start < m && m < end {
- rotateOrdered(data, start, m, end)
- }
- if a < start && start < mid {
- symMergeOrdered(data, a, start, mid)
- }
- if mid < end && end < b {
- symMergeOrdered(data, mid, end, b)
- }
-}
-
-// rotateOrdered rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data:
-// Data of the form 'x u v y' is changed to 'x v u y'.
-// rotate performs at most b-a many calls to data.Swap,
-// and it assumes non-degenerate arguments: a < m && m < b.
-func rotateOrdered[E constraints.Ordered](data []E, a, m, b int) {
- i := m - a
- j := b - m
-
- for i != j {
- if i > j {
- swapRangeOrdered(data, m-i, m, j)
- i -= j
- } else {
- swapRangeOrdered(data, m-i, m+j-i, i)
- j -= i
- }
- }
- // i == j
- swapRangeOrdered(data, m-i, m, i)
-}
diff --git a/vendor/golang.org/x/exp/slog/attr.go b/vendor/golang.org/x/exp/slog/attr.go
deleted file mode 100644
index a180d0e1..00000000
--- a/vendor/golang.org/x/exp/slog/attr.go
+++ /dev/null
@@ -1,102 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package slog
-
-import (
- "fmt"
- "time"
-)
-
-// An Attr is a key-value pair.
-type Attr struct {
- Key string
- Value Value
-}
-
-// String returns an Attr for a string value.
-func String(key, value string) Attr {
- return Attr{key, StringValue(value)}
-}
-
-// Int64 returns an Attr for an int64.
-func Int64(key string, value int64) Attr {
- return Attr{key, Int64Value(value)}
-}
-
-// Int converts an int to an int64 and returns
-// an Attr with that value.
-func Int(key string, value int) Attr {
- return Int64(key, int64(value))
-}
-
-// Uint64 returns an Attr for a uint64.
-func Uint64(key string, v uint64) Attr {
- return Attr{key, Uint64Value(v)}
-}
-
-// Float64 returns an Attr for a floating-point number.
-func Float64(key string, v float64) Attr {
- return Attr{key, Float64Value(v)}
-}
-
-// Bool returns an Attr for a bool.
-func Bool(key string, v bool) Attr {
- return Attr{key, BoolValue(v)}
-}
-
-// Time returns an Attr for a time.Time.
-// It discards the monotonic portion.
-func Time(key string, v time.Time) Attr {
- return Attr{key, TimeValue(v)}
-}
-
-// Duration returns an Attr for a time.Duration.
-func Duration(key string, v time.Duration) Attr {
- return Attr{key, DurationValue(v)}
-}
-
-// Group returns an Attr for a Group Value.
-// The first argument is the key; the remaining arguments
-// are converted to Attrs as in [Logger.Log].
-//
-// Use Group to collect several key-value pairs under a single
-// key on a log line, or as the result of LogValue
-// in order to log a single value as multiple Attrs.
-func Group(key string, args ...any) Attr {
- return Attr{key, GroupValue(argsToAttrSlice(args)...)}
-}
-
-func argsToAttrSlice(args []any) []Attr {
- var (
- attr Attr
- attrs []Attr
- )
- for len(args) > 0 {
- attr, args = argsToAttr(args)
- attrs = append(attrs, attr)
- }
- return attrs
-}
-
-// Any returns an Attr for the supplied value.
-// See [Value.AnyValue] for how values are treated.
-func Any(key string, value any) Attr {
- return Attr{key, AnyValue(value)}
-}
-
-// Equal reports whether a and b have equal keys and values.
-func (a Attr) Equal(b Attr) bool {
- return a.Key == b.Key && a.Value.Equal(b.Value)
-}
-
-func (a Attr) String() string {
- return fmt.Sprintf("%s=%s", a.Key, a.Value)
-}
-
-// isEmpty reports whether a has an empty key and a nil value.
-// That can be written as Attr{} or Any("", nil).
-func (a Attr) isEmpty() bool {
- return a.Key == "" && a.Value.num == 0 && a.Value.any == nil
-}
diff --git a/vendor/golang.org/x/exp/slog/doc.go b/vendor/golang.org/x/exp/slog/doc.go
deleted file mode 100644
index 4beaf867..00000000
--- a/vendor/golang.org/x/exp/slog/doc.go
+++ /dev/null
@@ -1,316 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-/*
-Package slog provides structured logging,
-in which log records include a message,
-a severity level, and various other attributes
-expressed as key-value pairs.
-
-It defines a type, [Logger],
-which provides several methods (such as [Logger.Info] and [Logger.Error])
-for reporting events of interest.
-
-Each Logger is associated with a [Handler].
-A Logger output method creates a [Record] from the method arguments
-and passes it to the Handler, which decides how to handle it.
-There is a default Logger accessible through top-level functions
-(such as [Info] and [Error]) that call the corresponding Logger methods.
-
-A log record consists of a time, a level, a message, and a set of key-value
-pairs, where the keys are strings and the values may be of any type.
-As an example,
-
- slog.Info("hello", "count", 3)
-
-creates a record containing the time of the call,
-a level of Info, the message "hello", and a single
-pair with key "count" and value 3.
-
-The [Info] top-level function calls the [Logger.Info] method on the default Logger.
-In addition to [Logger.Info], there are methods for Debug, Warn and Error levels.
-Besides these convenience methods for common levels,
-there is also a [Logger.Log] method which takes the level as an argument.
-Each of these methods has a corresponding top-level function that uses the
-default logger.
-
-The default handler formats the log record's message, time, level, and attributes
-as a string and passes it to the [log] package.
-
- 2022/11/08 15:28:26 INFO hello count=3
-
-For more control over the output format, create a logger with a different handler.
-This statement uses [New] to create a new logger with a TextHandler
-that writes structured records in text form to standard error:
-
- logger := slog.New(slog.NewTextHandler(os.Stderr, nil))
-
-[TextHandler] output is a sequence of key=value pairs, easily and unambiguously
-parsed by machine. This statement:
-
- logger.Info("hello", "count", 3)
-
-produces this output:
-
- time=2022-11-08T15:28:26.000-05:00 level=INFO msg=hello count=3
-
-The package also provides [JSONHandler], whose output is line-delimited JSON:
-
- logger := slog.New(slog.NewJSONHandler(os.Stdout, nil))
- logger.Info("hello", "count", 3)
-
-produces this output:
-
- {"time":"2022-11-08T15:28:26.000000000-05:00","level":"INFO","msg":"hello","count":3}
-
-Both [TextHandler] and [JSONHandler] can be configured with [HandlerOptions].
-There are options for setting the minimum level (see Levels, below),
-displaying the source file and line of the log call, and
-modifying attributes before they are logged.
-
-Setting a logger as the default with
-
- slog.SetDefault(logger)
-
-will cause the top-level functions like [Info] to use it.
-[SetDefault] also updates the default logger used by the [log] package,
-so that existing applications that use [log.Printf] and related functions
-will send log records to the logger's handler without needing to be rewritten.
-
-Some attributes are common to many log calls.
-For example, you may wish to include the URL or trace identifier of a server request
-with all log events arising from the request.
-Rather than repeat the attribute with every log call, you can use [Logger.With]
-to construct a new Logger containing the attributes:
-
- logger2 := logger.With("url", r.URL)
-
-The arguments to With are the same key-value pairs used in [Logger.Info].
-The result is a new Logger with the same handler as the original, but additional
-attributes that will appear in the output of every call.
-
-# Levels
-
-A [Level] is an integer representing the importance or severity of a log event.
-The higher the level, the more severe the event.
-This package defines constants for the most common levels,
-but any int can be used as a level.
-
-In an application, you may wish to log messages only at a certain level or greater.
-One common configuration is to log messages at Info or higher levels,
-suppressing debug logging until it is needed.
-The built-in handlers can be configured with the minimum level to output by
-setting [HandlerOptions.Level].
-The program's `main` function typically does this.
-The default value is LevelInfo.
-
-Setting the [HandlerOptions.Level] field to a [Level] value
-fixes the handler's minimum level throughout its lifetime.
-Setting it to a [LevelVar] allows the level to be varied dynamically.
-A LevelVar holds a Level and is safe to read or write from multiple
-goroutines.
-To vary the level dynamically for an entire program, first initialize
-a global LevelVar:
-
- var programLevel = new(slog.LevelVar) // Info by default
-
-Then use the LevelVar to construct a handler, and make it the default:
-
- h := slog.NewJSONHandler(os.Stderr, &slog.HandlerOptions{Level: programLevel})
- slog.SetDefault(slog.New(h))
-
-Now the program can change its logging level with a single statement:
-
- programLevel.Set(slog.LevelDebug)
-
-# Groups
-
-Attributes can be collected into groups.
-A group has a name that is used to qualify the names of its attributes.
-How this qualification is displayed depends on the handler.
-[TextHandler] separates the group and attribute names with a dot.
-[JSONHandler] treats each group as a separate JSON object, with the group name as the key.
-
-Use [Group] to create a Group attribute from a name and a list of key-value pairs:
-
- slog.Group("request",
- "method", r.Method,
- "url", r.URL)
-
-TextHandler would display this group as
-
- request.method=GET request.url=http://example.com
-
-JSONHandler would display it as
-
- "request":{"method":"GET","url":"http://example.com"}
-
-Use [Logger.WithGroup] to qualify all of a Logger's output
-with a group name. Calling WithGroup on a Logger results in a
-new Logger with the same Handler as the original, but with all
-its attributes qualified by the group name.
-
-This can help prevent duplicate attribute keys in large systems,
-where subsystems might use the same keys.
-Pass each subsystem a different Logger with its own group name so that
-potential duplicates are qualified:
-
- logger := slog.Default().With("id", systemID)
- parserLogger := logger.WithGroup("parser")
- parseInput(input, parserLogger)
-
-When parseInput logs with parserLogger, its keys will be qualified with "parser",
-so even if it uses the common key "id", the log line will have distinct keys.
-
-# Contexts
-
-Some handlers may wish to include information from the [context.Context] that is
-available at the call site. One example of such information
-is the identifier for the current span when tracing is enabled.
-
-The [Logger.Log] and [Logger.LogAttrs] methods take a context as a first
-argument, as do their corresponding top-level functions.
-
-Although the convenience methods on Logger (Info and so on) and the
-corresponding top-level functions do not take a context, the alternatives ending
-in "Context" do. For example,
-
- slog.InfoContext(ctx, "message")
-
-It is recommended to pass a context to an output method if one is available.
-
-# Attrs and Values
-
-An [Attr] is a key-value pair. The Logger output methods accept Attrs as well as
-alternating keys and values. The statement
-
- slog.Info("hello", slog.Int("count", 3))
-
-behaves the same as
-
- slog.Info("hello", "count", 3)
-
-There are convenience constructors for [Attr] such as [Int], [String], and [Bool]
-for common types, as well as the function [Any] for constructing Attrs of any
-type.
-
-The value part of an Attr is a type called [Value].
-Like an [any], a Value can hold any Go value,
-but it can represent typical values, including all numbers and strings,
-without an allocation.
-
-For the most efficient log output, use [Logger.LogAttrs].
-It is similar to [Logger.Log] but accepts only Attrs, not alternating
-keys and values; this allows it, too, to avoid allocation.
-
-The call
-
- logger.LogAttrs(nil, slog.LevelInfo, "hello", slog.Int("count", 3))
-
-is the most efficient way to achieve the same output as
-
- slog.Info("hello", "count", 3)
-
-# Customizing a type's logging behavior
-
-If a type implements the [LogValuer] interface, the [Value] returned from its LogValue
-method is used for logging. You can use this to control how values of the type
-appear in logs. For example, you can redact secret information like passwords,
-or gather a struct's fields in a Group. See the examples under [LogValuer] for
-details.
-
-A LogValue method may return a Value that itself implements [LogValuer]. The [Value.Resolve]
-method handles these cases carefully, avoiding infinite loops and unbounded recursion.
-Handler authors and others may wish to use Value.Resolve instead of calling LogValue directly.
-
-# Wrapping output methods
-
-The logger functions use reflection over the call stack to find the file name
-and line number of the logging call within the application. This can produce
-incorrect source information for functions that wrap slog. For instance, if you
-define this function in file mylog.go:
-
- func Infof(format string, args ...any) {
- slog.Default().Info(fmt.Sprintf(format, args...))
- }
-
-and you call it like this in main.go:
-
- Infof(slog.Default(), "hello, %s", "world")
-
-then slog will report the source file as mylog.go, not main.go.
-
-A correct implementation of Infof will obtain the source location
-(pc) and pass it to NewRecord.
-The Infof function in the package-level example called "wrapping"
-demonstrates how to do this.
-
-# Working with Records
-
-Sometimes a Handler will need to modify a Record
-before passing it on to another Handler or backend.
-A Record contains a mixture of simple public fields (e.g. Time, Level, Message)
-and hidden fields that refer to state (such as attributes) indirectly. This
-means that modifying a simple copy of a Record (e.g. by calling
-[Record.Add] or [Record.AddAttrs] to add attributes)
-may have unexpected effects on the original.
-Before modifying a Record, use [Clone] to
-create a copy that shares no state with the original,
-or create a new Record with [NewRecord]
-and build up its Attrs by traversing the old ones with [Record.Attrs].
-
-# Performance considerations
-
-If profiling your application demonstrates that logging is taking significant time,
-the following suggestions may help.
-
-If many log lines have a common attribute, use [Logger.With] to create a Logger with
-that attribute. The built-in handlers will format that attribute only once, at the
-call to [Logger.With]. The [Handler] interface is designed to allow that optimization,
-and a well-written Handler should take advantage of it.
-
-The arguments to a log call are always evaluated, even if the log event is discarded.
-If possible, defer computation so that it happens only if the value is actually logged.
-For example, consider the call
-
- slog.Info("starting request", "url", r.URL.String()) // may compute String unnecessarily
-
-The URL.String method will be called even if the logger discards Info-level events.
-Instead, pass the URL directly:
-
- slog.Info("starting request", "url", &r.URL) // calls URL.String only if needed
-
-The built-in [TextHandler] will call its String method, but only
-if the log event is enabled.
-Avoiding the call to String also preserves the structure of the underlying value.
-For example [JSONHandler] emits the components of the parsed URL as a JSON object.
-If you want to avoid eagerly paying the cost of the String call
-without causing the handler to potentially inspect the structure of the value,
-wrap the value in a fmt.Stringer implementation that hides its Marshal methods.
-
-You can also use the [LogValuer] interface to avoid unnecessary work in disabled log
-calls. Say you need to log some expensive value:
-
- slog.Debug("frobbing", "value", computeExpensiveValue(arg))
-
-Even if this line is disabled, computeExpensiveValue will be called.
-To avoid that, define a type implementing LogValuer:
-
- type expensive struct { arg int }
-
- func (e expensive) LogValue() slog.Value {
- return slog.AnyValue(computeExpensiveValue(e.arg))
- }
-
-Then use a value of that type in log calls:
-
- slog.Debug("frobbing", "value", expensive{arg})
-
-Now computeExpensiveValue will only be called when the line is enabled.
-
-The built-in handlers acquire a lock before calling [io.Writer.Write]
-to ensure that each record is written in one piece. User-defined
-handlers are responsible for their own locking.
-*/
-package slog
diff --git a/vendor/golang.org/x/exp/slog/handler.go b/vendor/golang.org/x/exp/slog/handler.go
deleted file mode 100644
index 74f88738..00000000
--- a/vendor/golang.org/x/exp/slog/handler.go
+++ /dev/null
@@ -1,559 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package slog
-
-import (
- "context"
- "fmt"
- "io"
- "strconv"
- "sync"
- "time"
-
- "golang.org/x/exp/slices"
- "golang.org/x/exp/slog/internal/buffer"
-)
-
-// A Handler handles log records produced by a Logger..
-//
-// A typical handler may print log records to standard error,
-// or write them to a file or database, or perhaps augment them
-// with additional attributes and pass them on to another handler.
-//
-// Any of the Handler's methods may be called concurrently with itself
-// or with other methods. It is the responsibility of the Handler to
-// manage this concurrency.
-//
-// Users of the slog package should not invoke Handler methods directly.
-// They should use the methods of [Logger] instead.
-type Handler interface {
- // Enabled reports whether the handler handles records at the given level.
- // The handler ignores records whose level is lower.
- // It is called early, before any arguments are processed,
- // to save effort if the log event should be discarded.
- // If called from a Logger method, the first argument is the context
- // passed to that method, or context.Background() if nil was passed
- // or the method does not take a context.
- // The context is passed so Enabled can use its values
- // to make a decision.
- Enabled(context.Context, Level) bool
-
- // Handle handles the Record.
- // It will only be called when Enabled returns true.
- // The Context argument is as for Enabled.
- // It is present solely to provide Handlers access to the context's values.
- // Canceling the context should not affect record processing.
- // (Among other things, log messages may be necessary to debug a
- // cancellation-related problem.)
- //
- // Handle methods that produce output should observe the following rules:
- // - If r.Time is the zero time, ignore the time.
- // - If r.PC is zero, ignore it.
- // - Attr's values should be resolved.
- // - If an Attr's key and value are both the zero value, ignore the Attr.
- // This can be tested with attr.Equal(Attr{}).
- // - If a group's key is empty, inline the group's Attrs.
- // - If a group has no Attrs (even if it has a non-empty key),
- // ignore it.
- Handle(context.Context, Record) error
-
- // WithAttrs returns a new Handler whose attributes consist of
- // both the receiver's attributes and the arguments.
- // The Handler owns the slice: it may retain, modify or discard it.
- WithAttrs(attrs []Attr) Handler
-
- // WithGroup returns a new Handler with the given group appended to
- // the receiver's existing groups.
- // The keys of all subsequent attributes, whether added by With or in a
- // Record, should be qualified by the sequence of group names.
- //
- // How this qualification happens is up to the Handler, so long as
- // this Handler's attribute keys differ from those of another Handler
- // with a different sequence of group names.
- //
- // A Handler should treat WithGroup as starting a Group of Attrs that ends
- // at the end of the log event. That is,
- //
- // logger.WithGroup("s").LogAttrs(level, msg, slog.Int("a", 1), slog.Int("b", 2))
- //
- // should behave like
- //
- // logger.LogAttrs(level, msg, slog.Group("s", slog.Int("a", 1), slog.Int("b", 2)))
- //
- // If the name is empty, WithGroup returns the receiver.
- WithGroup(name string) Handler
-}
-
-type defaultHandler struct {
- ch *commonHandler
- // log.Output, except for testing
- output func(calldepth int, message string) error
-}
-
-func newDefaultHandler(output func(int, string) error) *defaultHandler {
- return &defaultHandler{
- ch: &commonHandler{json: false},
- output: output,
- }
-}
-
-func (*defaultHandler) Enabled(_ context.Context, l Level) bool {
- return l >= LevelInfo
-}
-
-// Collect the level, attributes and message in a string and
-// write it with the default log.Logger.
-// Let the log.Logger handle time and file/line.
-func (h *defaultHandler) Handle(ctx context.Context, r Record) error {
- buf := buffer.New()
- buf.WriteString(r.Level.String())
- buf.WriteByte(' ')
- buf.WriteString(r.Message)
- state := h.ch.newHandleState(buf, true, " ", nil)
- defer state.free()
- state.appendNonBuiltIns(r)
-
- // skip [h.output, defaultHandler.Handle, handlerWriter.Write, log.Output]
- return h.output(4, buf.String())
-}
-
-func (h *defaultHandler) WithAttrs(as []Attr) Handler {
- return &defaultHandler{h.ch.withAttrs(as), h.output}
-}
-
-func (h *defaultHandler) WithGroup(name string) Handler {
- return &defaultHandler{h.ch.withGroup(name), h.output}
-}
-
-// HandlerOptions are options for a TextHandler or JSONHandler.
-// A zero HandlerOptions consists entirely of default values.
-type HandlerOptions struct {
- // AddSource causes the handler to compute the source code position
- // of the log statement and add a SourceKey attribute to the output.
- AddSource bool
-
- // Level reports the minimum record level that will be logged.
- // The handler discards records with lower levels.
- // If Level is nil, the handler assumes LevelInfo.
- // The handler calls Level.Level for each record processed;
- // to adjust the minimum level dynamically, use a LevelVar.
- Level Leveler
-
- // ReplaceAttr is called to rewrite each non-group attribute before it is logged.
- // The attribute's value has been resolved (see [Value.Resolve]).
- // If ReplaceAttr returns an Attr with Key == "", the attribute is discarded.
- //
- // The built-in attributes with keys "time", "level", "source", and "msg"
- // are passed to this function, except that time is omitted
- // if zero, and source is omitted if AddSource is false.
- //
- // The first argument is a list of currently open groups that contain the
- // Attr. It must not be retained or modified. ReplaceAttr is never called
- // for Group attributes, only their contents. For example, the attribute
- // list
- //
- // Int("a", 1), Group("g", Int("b", 2)), Int("c", 3)
- //
- // results in consecutive calls to ReplaceAttr with the following arguments:
- //
- // nil, Int("a", 1)
- // []string{"g"}, Int("b", 2)
- // nil, Int("c", 3)
- //
- // ReplaceAttr can be used to change the default keys of the built-in
- // attributes, convert types (for example, to replace a `time.Time` with the
- // integer seconds since the Unix epoch), sanitize personal information, or
- // remove attributes from the output.
- ReplaceAttr func(groups []string, a Attr) Attr
-}
-
-// Keys for "built-in" attributes.
-const (
- // TimeKey is the key used by the built-in handlers for the time
- // when the log method is called. The associated Value is a [time.Time].
- TimeKey = "time"
- // LevelKey is the key used by the built-in handlers for the level
- // of the log call. The associated value is a [Level].
- LevelKey = "level"
- // MessageKey is the key used by the built-in handlers for the
- // message of the log call. The associated value is a string.
- MessageKey = "msg"
- // SourceKey is the key used by the built-in handlers for the source file
- // and line of the log call. The associated value is a string.
- SourceKey = "source"
-)
-
-type commonHandler struct {
- json bool // true => output JSON; false => output text
- opts HandlerOptions
- preformattedAttrs []byte
- groupPrefix string // for text: prefix of groups opened in preformatting
- groups []string // all groups started from WithGroup
- nOpenGroups int // the number of groups opened in preformattedAttrs
- mu sync.Mutex
- w io.Writer
-}
-
-func (h *commonHandler) clone() *commonHandler {
- // We can't use assignment because we can't copy the mutex.
- return &commonHandler{
- json: h.json,
- opts: h.opts,
- preformattedAttrs: slices.Clip(h.preformattedAttrs),
- groupPrefix: h.groupPrefix,
- groups: slices.Clip(h.groups),
- nOpenGroups: h.nOpenGroups,
- w: h.w,
- }
-}
-
-// enabled reports whether l is greater than or equal to the
-// minimum level.
-func (h *commonHandler) enabled(l Level) bool {
- minLevel := LevelInfo
- if h.opts.Level != nil {
- minLevel = h.opts.Level.Level()
- }
- return l >= minLevel
-}
-
-func (h *commonHandler) withAttrs(as []Attr) *commonHandler {
- h2 := h.clone()
- // Pre-format the attributes as an optimization.
- prefix := buffer.New()
- defer prefix.Free()
- prefix.WriteString(h.groupPrefix)
- state := h2.newHandleState((*buffer.Buffer)(&h2.preformattedAttrs), false, "", prefix)
- defer state.free()
- if len(h2.preformattedAttrs) > 0 {
- state.sep = h.attrSep()
- }
- state.openGroups()
- for _, a := range as {
- state.appendAttr(a)
- }
- // Remember the new prefix for later keys.
- h2.groupPrefix = state.prefix.String()
- // Remember how many opened groups are in preformattedAttrs,
- // so we don't open them again when we handle a Record.
- h2.nOpenGroups = len(h2.groups)
- return h2
-}
-
-func (h *commonHandler) withGroup(name string) *commonHandler {
- if name == "" {
- return h
- }
- h2 := h.clone()
- h2.groups = append(h2.groups, name)
- return h2
-}
-
-func (h *commonHandler) handle(r Record) error {
- state := h.newHandleState(buffer.New(), true, "", nil)
- defer state.free()
- if h.json {
- state.buf.WriteByte('{')
- }
- // Built-in attributes. They are not in a group.
- stateGroups := state.groups
- state.groups = nil // So ReplaceAttrs sees no groups instead of the pre groups.
- rep := h.opts.ReplaceAttr
- // time
- if !r.Time.IsZero() {
- key := TimeKey
- val := r.Time.Round(0) // strip monotonic to match Attr behavior
- if rep == nil {
- state.appendKey(key)
- state.appendTime(val)
- } else {
- state.appendAttr(Time(key, val))
- }
- }
- // level
- key := LevelKey
- val := r.Level
- if rep == nil {
- state.appendKey(key)
- state.appendString(val.String())
- } else {
- state.appendAttr(Any(key, val))
- }
- // source
- if h.opts.AddSource {
- state.appendAttr(Any(SourceKey, r.source()))
- }
- key = MessageKey
- msg := r.Message
- if rep == nil {
- state.appendKey(key)
- state.appendString(msg)
- } else {
- state.appendAttr(String(key, msg))
- }
- state.groups = stateGroups // Restore groups passed to ReplaceAttrs.
- state.appendNonBuiltIns(r)
- state.buf.WriteByte('\n')
-
- h.mu.Lock()
- defer h.mu.Unlock()
- _, err := h.w.Write(*state.buf)
- return err
-}
-
-func (s *handleState) appendNonBuiltIns(r Record) {
- // preformatted Attrs
- if len(s.h.preformattedAttrs) > 0 {
- s.buf.WriteString(s.sep)
- s.buf.Write(s.h.preformattedAttrs)
- s.sep = s.h.attrSep()
- }
- // Attrs in Record -- unlike the built-in ones, they are in groups started
- // from WithGroup.
- s.prefix = buffer.New()
- defer s.prefix.Free()
- s.prefix.WriteString(s.h.groupPrefix)
- s.openGroups()
- r.Attrs(func(a Attr) bool {
- s.appendAttr(a)
- return true
- })
- if s.h.json {
- // Close all open groups.
- for range s.h.groups {
- s.buf.WriteByte('}')
- }
- // Close the top-level object.
- s.buf.WriteByte('}')
- }
-}
-
-// attrSep returns the separator between attributes.
-func (h *commonHandler) attrSep() string {
- if h.json {
- return ","
- }
- return " "
-}
-
-// handleState holds state for a single call to commonHandler.handle.
-// The initial value of sep determines whether to emit a separator
-// before the next key, after which it stays true.
-type handleState struct {
- h *commonHandler
- buf *buffer.Buffer
- freeBuf bool // should buf be freed?
- sep string // separator to write before next key
- prefix *buffer.Buffer // for text: key prefix
- groups *[]string // pool-allocated slice of active groups, for ReplaceAttr
-}
-
-var groupPool = sync.Pool{New: func() any {
- s := make([]string, 0, 10)
- return &s
-}}
-
-func (h *commonHandler) newHandleState(buf *buffer.Buffer, freeBuf bool, sep string, prefix *buffer.Buffer) handleState {
- s := handleState{
- h: h,
- buf: buf,
- freeBuf: freeBuf,
- sep: sep,
- prefix: prefix,
- }
- if h.opts.ReplaceAttr != nil {
- s.groups = groupPool.Get().(*[]string)
- *s.groups = append(*s.groups, h.groups[:h.nOpenGroups]...)
- }
- return s
-}
-
-func (s *handleState) free() {
- if s.freeBuf {
- s.buf.Free()
- }
- if gs := s.groups; gs != nil {
- *gs = (*gs)[:0]
- groupPool.Put(gs)
- }
-}
-
-func (s *handleState) openGroups() {
- for _, n := range s.h.groups[s.h.nOpenGroups:] {
- s.openGroup(n)
- }
-}
-
-// Separator for group names and keys.
-const keyComponentSep = '.'
-
-// openGroup starts a new group of attributes
-// with the given name.
-func (s *handleState) openGroup(name string) {
- if s.h.json {
- s.appendKey(name)
- s.buf.WriteByte('{')
- s.sep = ""
- } else {
- s.prefix.WriteString(name)
- s.prefix.WriteByte(keyComponentSep)
- }
- // Collect group names for ReplaceAttr.
- if s.groups != nil {
- *s.groups = append(*s.groups, name)
- }
-}
-
-// closeGroup ends the group with the given name.
-func (s *handleState) closeGroup(name string) {
- if s.h.json {
- s.buf.WriteByte('}')
- } else {
- (*s.prefix) = (*s.prefix)[:len(*s.prefix)-len(name)-1 /* for keyComponentSep */]
- }
- s.sep = s.h.attrSep()
- if s.groups != nil {
- *s.groups = (*s.groups)[:len(*s.groups)-1]
- }
-}
-
-// appendAttr appends the Attr's key and value using app.
-// It handles replacement and checking for an empty key.
-// after replacement).
-func (s *handleState) appendAttr(a Attr) {
- if rep := s.h.opts.ReplaceAttr; rep != nil && a.Value.Kind() != KindGroup {
- var gs []string
- if s.groups != nil {
- gs = *s.groups
- }
- // Resolve before calling ReplaceAttr, so the user doesn't have to.
- a.Value = a.Value.Resolve()
- a = rep(gs, a)
- }
- a.Value = a.Value.Resolve()
- // Elide empty Attrs.
- if a.isEmpty() {
- return
- }
- // Special case: Source.
- if v := a.Value; v.Kind() == KindAny {
- if src, ok := v.Any().(*Source); ok {
- if s.h.json {
- a.Value = src.group()
- } else {
- a.Value = StringValue(fmt.Sprintf("%s:%d", src.File, src.Line))
- }
- }
- }
- if a.Value.Kind() == KindGroup {
- attrs := a.Value.Group()
- // Output only non-empty groups.
- if len(attrs) > 0 {
- // Inline a group with an empty key.
- if a.Key != "" {
- s.openGroup(a.Key)
- }
- for _, aa := range attrs {
- s.appendAttr(aa)
- }
- if a.Key != "" {
- s.closeGroup(a.Key)
- }
- }
- } else {
- s.appendKey(a.Key)
- s.appendValue(a.Value)
- }
-}
-
-func (s *handleState) appendError(err error) {
- s.appendString(fmt.Sprintf("!ERROR:%v", err))
-}
-
-func (s *handleState) appendKey(key string) {
- s.buf.WriteString(s.sep)
- if s.prefix != nil {
- // TODO: optimize by avoiding allocation.
- s.appendString(string(*s.prefix) + key)
- } else {
- s.appendString(key)
- }
- if s.h.json {
- s.buf.WriteByte(':')
- } else {
- s.buf.WriteByte('=')
- }
- s.sep = s.h.attrSep()
-}
-
-func (s *handleState) appendString(str string) {
- if s.h.json {
- s.buf.WriteByte('"')
- *s.buf = appendEscapedJSONString(*s.buf, str)
- s.buf.WriteByte('"')
- } else {
- // text
- if needsQuoting(str) {
- *s.buf = strconv.AppendQuote(*s.buf, str)
- } else {
- s.buf.WriteString(str)
- }
- }
-}
-
-func (s *handleState) appendValue(v Value) {
- var err error
- if s.h.json {
- err = appendJSONValue(s, v)
- } else {
- err = appendTextValue(s, v)
- }
- if err != nil {
- s.appendError(err)
- }
-}
-
-func (s *handleState) appendTime(t time.Time) {
- if s.h.json {
- appendJSONTime(s, t)
- } else {
- writeTimeRFC3339Millis(s.buf, t)
- }
-}
-
-// This takes half the time of Time.AppendFormat.
-func writeTimeRFC3339Millis(buf *buffer.Buffer, t time.Time) {
- year, month, day := t.Date()
- buf.WritePosIntWidth(year, 4)
- buf.WriteByte('-')
- buf.WritePosIntWidth(int(month), 2)
- buf.WriteByte('-')
- buf.WritePosIntWidth(day, 2)
- buf.WriteByte('T')
- hour, min, sec := t.Clock()
- buf.WritePosIntWidth(hour, 2)
- buf.WriteByte(':')
- buf.WritePosIntWidth(min, 2)
- buf.WriteByte(':')
- buf.WritePosIntWidth(sec, 2)
- ns := t.Nanosecond()
- buf.WriteByte('.')
- buf.WritePosIntWidth(ns/1e6, 3)
- _, offsetSeconds := t.Zone()
- if offsetSeconds == 0 {
- buf.WriteByte('Z')
- } else {
- offsetMinutes := offsetSeconds / 60
- if offsetMinutes < 0 {
- buf.WriteByte('-')
- offsetMinutes = -offsetMinutes
- } else {
- buf.WriteByte('+')
- }
- buf.WritePosIntWidth(offsetMinutes/60, 2)
- buf.WriteByte(':')
- buf.WritePosIntWidth(offsetMinutes%60, 2)
- }
-}
diff --git a/vendor/golang.org/x/exp/slog/internal/buffer/buffer.go b/vendor/golang.org/x/exp/slog/internal/buffer/buffer.go
deleted file mode 100644
index 7786c166..00000000
--- a/vendor/golang.org/x/exp/slog/internal/buffer/buffer.go
+++ /dev/null
@@ -1,84 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package buffer provides a pool-allocated byte buffer.
-package buffer
-
-import (
- "sync"
-)
-
-// Buffer adapted from go/src/fmt/print.go
-type Buffer []byte
-
-// Having an initial size gives a dramatic speedup.
-var bufPool = sync.Pool{
- New: func() any {
- b := make([]byte, 0, 1024)
- return (*Buffer)(&b)
- },
-}
-
-func New() *Buffer {
- return bufPool.Get().(*Buffer)
-}
-
-func (b *Buffer) Free() {
- // To reduce peak allocation, return only smaller buffers to the pool.
- const maxBufferSize = 16 << 10
- if cap(*b) <= maxBufferSize {
- *b = (*b)[:0]
- bufPool.Put(b)
- }
-}
-
-func (b *Buffer) Reset() {
- *b = (*b)[:0]
-}
-
-func (b *Buffer) Write(p []byte) (int, error) {
- *b = append(*b, p...)
- return len(p), nil
-}
-
-func (b *Buffer) WriteString(s string) {
- *b = append(*b, s...)
-}
-
-func (b *Buffer) WriteByte(c byte) {
- *b = append(*b, c)
-}
-
-func (b *Buffer) WritePosInt(i int) {
- b.WritePosIntWidth(i, 0)
-}
-
-// WritePosIntWidth writes non-negative integer i to the buffer, padded on the left
-// by zeroes to the given width. Use a width of 0 to omit padding.
-func (b *Buffer) WritePosIntWidth(i, width int) {
- // Cheap integer to fixed-width decimal ASCII.
- // Copied from log/log.go.
-
- if i < 0 {
- panic("negative int")
- }
-
- // Assemble decimal in reverse order.
- var bb [20]byte
- bp := len(bb) - 1
- for i >= 10 || width > 1 {
- width--
- q := i / 10
- bb[bp] = byte('0' + i - q*10)
- bp--
- i = q
- }
- // i < 10
- bb[bp] = byte('0' + i)
- b.Write(bb[bp:])
-}
-
-func (b *Buffer) String() string {
- return string(*b)
-}
diff --git a/vendor/golang.org/x/exp/slog/internal/ignorepc.go b/vendor/golang.org/x/exp/slog/internal/ignorepc.go
deleted file mode 100644
index d1256426..00000000
--- a/vendor/golang.org/x/exp/slog/internal/ignorepc.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright 2023 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package internal
-
-// If IgnorePC is true, do not invoke runtime.Callers to get the pc.
-// This is solely for benchmarking the slowdown from runtime.Callers.
-var IgnorePC = false
diff --git a/vendor/golang.org/x/exp/slog/json_handler.go b/vendor/golang.org/x/exp/slog/json_handler.go
deleted file mode 100644
index 157ada86..00000000
--- a/vendor/golang.org/x/exp/slog/json_handler.go
+++ /dev/null
@@ -1,336 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package slog
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "errors"
- "fmt"
- "io"
- "strconv"
- "time"
- "unicode/utf8"
-
- "golang.org/x/exp/slog/internal/buffer"
-)
-
-// JSONHandler is a Handler that writes Records to an io.Writer as
-// line-delimited JSON objects.
-type JSONHandler struct {
- *commonHandler
-}
-
-// NewJSONHandler creates a JSONHandler that writes to w,
-// using the given options.
-// If opts is nil, the default options are used.
-func NewJSONHandler(w io.Writer, opts *HandlerOptions) *JSONHandler {
- if opts == nil {
- opts = &HandlerOptions{}
- }
- return &JSONHandler{
- &commonHandler{
- json: true,
- w: w,
- opts: *opts,
- },
- }
-}
-
-// Enabled reports whether the handler handles records at the given level.
-// The handler ignores records whose level is lower.
-func (h *JSONHandler) Enabled(_ context.Context, level Level) bool {
- return h.commonHandler.enabled(level)
-}
-
-// WithAttrs returns a new JSONHandler whose attributes consists
-// of h's attributes followed by attrs.
-func (h *JSONHandler) WithAttrs(attrs []Attr) Handler {
- return &JSONHandler{commonHandler: h.commonHandler.withAttrs(attrs)}
-}
-
-func (h *JSONHandler) WithGroup(name string) Handler {
- return &JSONHandler{commonHandler: h.commonHandler.withGroup(name)}
-}
-
-// Handle formats its argument Record as a JSON object on a single line.
-//
-// If the Record's time is zero, the time is omitted.
-// Otherwise, the key is "time"
-// and the value is output as with json.Marshal.
-//
-// If the Record's level is zero, the level is omitted.
-// Otherwise, the key is "level"
-// and the value of [Level.String] is output.
-//
-// If the AddSource option is set and source information is available,
-// the key is "source"
-// and the value is output as "FILE:LINE".
-//
-// The message's key is "msg".
-//
-// To modify these or other attributes, or remove them from the output, use
-// [HandlerOptions.ReplaceAttr].
-//
-// Values are formatted as with an [encoding/json.Encoder] with SetEscapeHTML(false),
-// with two exceptions.
-//
-// First, an Attr whose Value is of type error is formatted as a string, by
-// calling its Error method. Only errors in Attrs receive this special treatment,
-// not errors embedded in structs, slices, maps or other data structures that
-// are processed by the encoding/json package.
-//
-// Second, an encoding failure does not cause Handle to return an error.
-// Instead, the error message is formatted as a string.
-//
-// Each call to Handle results in a single serialized call to io.Writer.Write.
-func (h *JSONHandler) Handle(_ context.Context, r Record) error {
- return h.commonHandler.handle(r)
-}
-
-// Adapted from time.Time.MarshalJSON to avoid allocation.
-func appendJSONTime(s *handleState, t time.Time) {
- if y := t.Year(); y < 0 || y >= 10000 {
- // RFC 3339 is clear that years are 4 digits exactly.
- // See golang.org/issue/4556#c15 for more discussion.
- s.appendError(errors.New("time.Time year outside of range [0,9999]"))
- }
- s.buf.WriteByte('"')
- *s.buf = t.AppendFormat(*s.buf, time.RFC3339Nano)
- s.buf.WriteByte('"')
-}
-
-func appendJSONValue(s *handleState, v Value) error {
- switch v.Kind() {
- case KindString:
- s.appendString(v.str())
- case KindInt64:
- *s.buf = strconv.AppendInt(*s.buf, v.Int64(), 10)
- case KindUint64:
- *s.buf = strconv.AppendUint(*s.buf, v.Uint64(), 10)
- case KindFloat64:
- // json.Marshal is funny about floats; it doesn't
- // always match strconv.AppendFloat. So just call it.
- // That's expensive, but floats are rare.
- if err := appendJSONMarshal(s.buf, v.Float64()); err != nil {
- return err
- }
- case KindBool:
- *s.buf = strconv.AppendBool(*s.buf, v.Bool())
- case KindDuration:
- // Do what json.Marshal does.
- *s.buf = strconv.AppendInt(*s.buf, int64(v.Duration()), 10)
- case KindTime:
- s.appendTime(v.Time())
- case KindAny:
- a := v.Any()
- _, jm := a.(json.Marshaler)
- if err, ok := a.(error); ok && !jm {
- s.appendString(err.Error())
- } else {
- return appendJSONMarshal(s.buf, a)
- }
- default:
- panic(fmt.Sprintf("bad kind: %s", v.Kind()))
- }
- return nil
-}
-
-func appendJSONMarshal(buf *buffer.Buffer, v any) error {
- // Use a json.Encoder to avoid escaping HTML.
- var bb bytes.Buffer
- enc := json.NewEncoder(&bb)
- enc.SetEscapeHTML(false)
- if err := enc.Encode(v); err != nil {
- return err
- }
- bs := bb.Bytes()
- buf.Write(bs[:len(bs)-1]) // remove final newline
- return nil
-}
-
-// appendEscapedJSONString escapes s for JSON and appends it to buf.
-// It does not surround the string in quotation marks.
-//
-// Modified from encoding/json/encode.go:encodeState.string,
-// with escapeHTML set to false.
-func appendEscapedJSONString(buf []byte, s string) []byte {
- char := func(b byte) { buf = append(buf, b) }
- str := func(s string) { buf = append(buf, s...) }
-
- start := 0
- for i := 0; i < len(s); {
- if b := s[i]; b < utf8.RuneSelf {
- if safeSet[b] {
- i++
- continue
- }
- if start < i {
- str(s[start:i])
- }
- char('\\')
- switch b {
- case '\\', '"':
- char(b)
- case '\n':
- char('n')
- case '\r':
- char('r')
- case '\t':
- char('t')
- default:
- // This encodes bytes < 0x20 except for \t, \n and \r.
- str(`u00`)
- char(hex[b>>4])
- char(hex[b&0xF])
- }
- i++
- start = i
- continue
- }
- c, size := utf8.DecodeRuneInString(s[i:])
- if c == utf8.RuneError && size == 1 {
- if start < i {
- str(s[start:i])
- }
- str(`\ufffd`)
- i += size
- start = i
- continue
- }
- // U+2028 is LINE SEPARATOR.
- // U+2029 is PARAGRAPH SEPARATOR.
- // They are both technically valid characters in JSON strings,
- // but don't work in JSONP, which has to be evaluated as JavaScript,
- // and can lead to security holes there. It is valid JSON to
- // escape them, so we do so unconditionally.
- // See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion.
- if c == '\u2028' || c == '\u2029' {
- if start < i {
- str(s[start:i])
- }
- str(`\u202`)
- char(hex[c&0xF])
- i += size
- start = i
- continue
- }
- i += size
- }
- if start < len(s) {
- str(s[start:])
- }
- return buf
-}
-
-var hex = "0123456789abcdef"
-
-// Copied from encoding/json/tables.go.
-//
-// safeSet holds the value true if the ASCII character with the given array
-// position can be represented inside a JSON string without any further
-// escaping.
-//
-// All values are true except for the ASCII control characters (0-31), the
-// double quote ("), and the backslash character ("\").
-var safeSet = [utf8.RuneSelf]bool{
- ' ': true,
- '!': true,
- '"': false,
- '#': true,
- '$': true,
- '%': true,
- '&': true,
- '\'': true,
- '(': true,
- ')': true,
- '*': true,
- '+': true,
- ',': true,
- '-': true,
- '.': true,
- '/': true,
- '0': true,
- '1': true,
- '2': true,
- '3': true,
- '4': true,
- '5': true,
- '6': true,
- '7': true,
- '8': true,
- '9': true,
- ':': true,
- ';': true,
- '<': true,
- '=': true,
- '>': true,
- '?': true,
- '@': true,
- 'A': true,
- 'B': true,
- 'C': true,
- 'D': true,
- 'E': true,
- 'F': true,
- 'G': true,
- 'H': true,
- 'I': true,
- 'J': true,
- 'K': true,
- 'L': true,
- 'M': true,
- 'N': true,
- 'O': true,
- 'P': true,
- 'Q': true,
- 'R': true,
- 'S': true,
- 'T': true,
- 'U': true,
- 'V': true,
- 'W': true,
- 'X': true,
- 'Y': true,
- 'Z': true,
- '[': true,
- '\\': false,
- ']': true,
- '^': true,
- '_': true,
- '`': true,
- 'a': true,
- 'b': true,
- 'c': true,
- 'd': true,
- 'e': true,
- 'f': true,
- 'g': true,
- 'h': true,
- 'i': true,
- 'j': true,
- 'k': true,
- 'l': true,
- 'm': true,
- 'n': true,
- 'o': true,
- 'p': true,
- 'q': true,
- 'r': true,
- 's': true,
- 't': true,
- 'u': true,
- 'v': true,
- 'w': true,
- 'x': true,
- 'y': true,
- 'z': true,
- '{': true,
- '|': true,
- '}': true,
- '~': true,
- '\u007f': true,
-}
diff --git a/vendor/golang.org/x/exp/slog/level.go b/vendor/golang.org/x/exp/slog/level.go
deleted file mode 100644
index b2365f0a..00000000
--- a/vendor/golang.org/x/exp/slog/level.go
+++ /dev/null
@@ -1,201 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package slog
-
-import (
- "errors"
- "fmt"
- "strconv"
- "strings"
- "sync/atomic"
-)
-
-// A Level is the importance or severity of a log event.
-// The higher the level, the more important or severe the event.
-type Level int
-
-// Level numbers are inherently arbitrary,
-// but we picked them to satisfy three constraints.
-// Any system can map them to another numbering scheme if it wishes.
-//
-// First, we wanted the default level to be Info, Since Levels are ints, Info is
-// the default value for int, zero.
-//
-
-// Second, we wanted to make it easy to use levels to specify logger verbosity.
-// Since a larger level means a more severe event, a logger that accepts events
-// with smaller (or more negative) level means a more verbose logger. Logger
-// verbosity is thus the negation of event severity, and the default verbosity
-// of 0 accepts all events at least as severe as INFO.
-//
-// Third, we wanted some room between levels to accommodate schemes with named
-// levels between ours. For example, Google Cloud Logging defines a Notice level
-// between Info and Warn. Since there are only a few of these intermediate
-// levels, the gap between the numbers need not be large. Our gap of 4 matches
-// OpenTelemetry's mapping. Subtracting 9 from an OpenTelemetry level in the
-// DEBUG, INFO, WARN and ERROR ranges converts it to the corresponding slog
-// Level range. OpenTelemetry also has the names TRACE and FATAL, which slog
-// does not. But those OpenTelemetry levels can still be represented as slog
-// Levels by using the appropriate integers.
-//
-// Names for common levels.
-const (
- LevelDebug Level = -4
- LevelInfo Level = 0
- LevelWarn Level = 4
- LevelError Level = 8
-)
-
-// String returns a name for the level.
-// If the level has a name, then that name
-// in uppercase is returned.
-// If the level is between named values, then
-// an integer is appended to the uppercased name.
-// Examples:
-//
-// LevelWarn.String() => "WARN"
-// (LevelInfo+2).String() => "INFO+2"
-func (l Level) String() string {
- str := func(base string, val Level) string {
- if val == 0 {
- return base
- }
- return fmt.Sprintf("%s%+d", base, val)
- }
-
- switch {
- case l < LevelInfo:
- return str("DEBUG", l-LevelDebug)
- case l < LevelWarn:
- return str("INFO", l-LevelInfo)
- case l < LevelError:
- return str("WARN", l-LevelWarn)
- default:
- return str("ERROR", l-LevelError)
- }
-}
-
-// MarshalJSON implements [encoding/json.Marshaler]
-// by quoting the output of [Level.String].
-func (l Level) MarshalJSON() ([]byte, error) {
- // AppendQuote is sufficient for JSON-encoding all Level strings.
- // They don't contain any runes that would produce invalid JSON
- // when escaped.
- return strconv.AppendQuote(nil, l.String()), nil
-}
-
-// UnmarshalJSON implements [encoding/json.Unmarshaler]
-// It accepts any string produced by [Level.MarshalJSON],
-// ignoring case.
-// It also accepts numeric offsets that would result in a different string on
-// output. For example, "Error-8" would marshal as "INFO".
-func (l *Level) UnmarshalJSON(data []byte) error {
- s, err := strconv.Unquote(string(data))
- if err != nil {
- return err
- }
- return l.parse(s)
-}
-
-// MarshalText implements [encoding.TextMarshaler]
-// by calling [Level.String].
-func (l Level) MarshalText() ([]byte, error) {
- return []byte(l.String()), nil
-}
-
-// UnmarshalText implements [encoding.TextUnmarshaler].
-// It accepts any string produced by [Level.MarshalText],
-// ignoring case.
-// It also accepts numeric offsets that would result in a different string on
-// output. For example, "Error-8" would marshal as "INFO".
-func (l *Level) UnmarshalText(data []byte) error {
- return l.parse(string(data))
-}
-
-func (l *Level) parse(s string) (err error) {
- defer func() {
- if err != nil {
- err = fmt.Errorf("slog: level string %q: %w", s, err)
- }
- }()
-
- name := s
- offset := 0
- if i := strings.IndexAny(s, "+-"); i >= 0 {
- name = s[:i]
- offset, err = strconv.Atoi(s[i:])
- if err != nil {
- return err
- }
- }
- switch strings.ToUpper(name) {
- case "DEBUG":
- *l = LevelDebug
- case "INFO":
- *l = LevelInfo
- case "WARN":
- *l = LevelWarn
- case "ERROR":
- *l = LevelError
- default:
- return errors.New("unknown name")
- }
- *l += Level(offset)
- return nil
-}
-
-// Level returns the receiver.
-// It implements Leveler.
-func (l Level) Level() Level { return l }
-
-// A LevelVar is a Level variable, to allow a Handler level to change
-// dynamically.
-// It implements Leveler as well as a Set method,
-// and it is safe for use by multiple goroutines.
-// The zero LevelVar corresponds to LevelInfo.
-type LevelVar struct {
- val atomic.Int64
-}
-
-// Level returns v's level.
-func (v *LevelVar) Level() Level {
- return Level(int(v.val.Load()))
-}
-
-// Set sets v's level to l.
-func (v *LevelVar) Set(l Level) {
- v.val.Store(int64(l))
-}
-
-func (v *LevelVar) String() string {
- return fmt.Sprintf("LevelVar(%s)", v.Level())
-}
-
-// MarshalText implements [encoding.TextMarshaler]
-// by calling [Level.MarshalText].
-func (v *LevelVar) MarshalText() ([]byte, error) {
- return v.Level().MarshalText()
-}
-
-// UnmarshalText implements [encoding.TextUnmarshaler]
-// by calling [Level.UnmarshalText].
-func (v *LevelVar) UnmarshalText(data []byte) error {
- var l Level
- if err := l.UnmarshalText(data); err != nil {
- return err
- }
- v.Set(l)
- return nil
-}
-
-// A Leveler provides a Level value.
-//
-// As Level itself implements Leveler, clients typically supply
-// a Level value wherever a Leveler is needed, such as in HandlerOptions.
-// Clients who need to vary the level dynamically can provide a more complex
-// Leveler implementation such as *LevelVar.
-type Leveler interface {
- Level() Level
-}
diff --git a/vendor/golang.org/x/exp/slog/logger.go b/vendor/golang.org/x/exp/slog/logger.go
deleted file mode 100644
index e87ec993..00000000
--- a/vendor/golang.org/x/exp/slog/logger.go
+++ /dev/null
@@ -1,343 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package slog
-
-import (
- "context"
- "log"
- "runtime"
- "sync/atomic"
- "time"
-
- "golang.org/x/exp/slog/internal"
-)
-
-var defaultLogger atomic.Value
-
-func init() {
- defaultLogger.Store(New(newDefaultHandler(log.Output)))
-}
-
-// Default returns the default Logger.
-func Default() *Logger { return defaultLogger.Load().(*Logger) }
-
-// SetDefault makes l the default Logger.
-// After this call, output from the log package's default Logger
-// (as with [log.Print], etc.) will be logged at LevelInfo using l's Handler.
-func SetDefault(l *Logger) {
- defaultLogger.Store(l)
- // If the default's handler is a defaultHandler, then don't use a handleWriter,
- // or we'll deadlock as they both try to acquire the log default mutex.
- // The defaultHandler will use whatever the log default writer is currently
- // set to, which is correct.
- // This can occur with SetDefault(Default()).
- // See TestSetDefault.
- if _, ok := l.Handler().(*defaultHandler); !ok {
- capturePC := log.Flags()&(log.Lshortfile|log.Llongfile) != 0
- log.SetOutput(&handlerWriter{l.Handler(), LevelInfo, capturePC})
- log.SetFlags(0) // we want just the log message, no time or location
- }
-}
-
-// handlerWriter is an io.Writer that calls a Handler.
-// It is used to link the default log.Logger to the default slog.Logger.
-type handlerWriter struct {
- h Handler
- level Level
- capturePC bool
-}
-
-func (w *handlerWriter) Write(buf []byte) (int, error) {
- if !w.h.Enabled(context.Background(), w.level) {
- return 0, nil
- }
- var pc uintptr
- if !internal.IgnorePC && w.capturePC {
- // skip [runtime.Callers, w.Write, Logger.Output, log.Print]
- var pcs [1]uintptr
- runtime.Callers(4, pcs[:])
- pc = pcs[0]
- }
-
- // Remove final newline.
- origLen := len(buf) // Report that the entire buf was written.
- if len(buf) > 0 && buf[len(buf)-1] == '\n' {
- buf = buf[:len(buf)-1]
- }
- r := NewRecord(time.Now(), w.level, string(buf), pc)
- return origLen, w.h.Handle(context.Background(), r)
-}
-
-// A Logger records structured information about each call to its
-// Log, Debug, Info, Warn, and Error methods.
-// For each call, it creates a Record and passes it to a Handler.
-//
-// To create a new Logger, call [New] or a Logger method
-// that begins "With".
-type Logger struct {
- handler Handler // for structured logging
-}
-
-func (l *Logger) clone() *Logger {
- c := *l
- return &c
-}
-
-// Handler returns l's Handler.
-func (l *Logger) Handler() Handler { return l.handler }
-
-// With returns a new Logger that includes the given arguments, converted to
-// Attrs as in [Logger.Log].
-// The Attrs will be added to each output from the Logger.
-// The new Logger shares the old Logger's context.
-// The new Logger's handler is the result of calling WithAttrs on the receiver's
-// handler.
-func (l *Logger) With(args ...any) *Logger {
- c := l.clone()
- c.handler = l.handler.WithAttrs(argsToAttrSlice(args))
- return c
-}
-
-// WithGroup returns a new Logger that starts a group. The keys of all
-// attributes added to the Logger will be qualified by the given name.
-// (How that qualification happens depends on the [Handler.WithGroup]
-// method of the Logger's Handler.)
-// The new Logger shares the old Logger's context.
-//
-// The new Logger's handler is the result of calling WithGroup on the receiver's
-// handler.
-func (l *Logger) WithGroup(name string) *Logger {
- c := l.clone()
- c.handler = l.handler.WithGroup(name)
- return c
-
-}
-
-// New creates a new Logger with the given non-nil Handler and a nil context.
-func New(h Handler) *Logger {
- if h == nil {
- panic("nil Handler")
- }
- return &Logger{handler: h}
-}
-
-// With calls Logger.With on the default logger.
-func With(args ...any) *Logger {
- return Default().With(args...)
-}
-
-// Enabled reports whether l emits log records at the given context and level.
-func (l *Logger) Enabled(ctx context.Context, level Level) bool {
- if ctx == nil {
- ctx = context.Background()
- }
- return l.Handler().Enabled(ctx, level)
-}
-
-// NewLogLogger returns a new log.Logger such that each call to its Output method
-// dispatches a Record to the specified handler. The logger acts as a bridge from
-// the older log API to newer structured logging handlers.
-func NewLogLogger(h Handler, level Level) *log.Logger {
- return log.New(&handlerWriter{h, level, true}, "", 0)
-}
-
-// Log emits a log record with the current time and the given level and message.
-// The Record's Attrs consist of the Logger's attributes followed by
-// the Attrs specified by args.
-//
-// The attribute arguments are processed as follows:
-// - If an argument is an Attr, it is used as is.
-// - If an argument is a string and this is not the last argument,
-// the following argument is treated as the value and the two are combined
-// into an Attr.
-// - Otherwise, the argument is treated as a value with key "!BADKEY".
-func (l *Logger) Log(ctx context.Context, level Level, msg string, args ...any) {
- l.log(ctx, level, msg, args...)
-}
-
-// LogAttrs is a more efficient version of [Logger.Log] that accepts only Attrs.
-func (l *Logger) LogAttrs(ctx context.Context, level Level, msg string, attrs ...Attr) {
- l.logAttrs(ctx, level, msg, attrs...)
-}
-
-// Debug logs at LevelDebug.
-func (l *Logger) Debug(msg string, args ...any) {
- l.log(nil, LevelDebug, msg, args...)
-}
-
-// DebugContext logs at LevelDebug with the given context.
-func (l *Logger) DebugContext(ctx context.Context, msg string, args ...any) {
- l.log(ctx, LevelDebug, msg, args...)
-}
-
-// DebugCtx logs at LevelDebug with the given context.
-// Deprecated: Use Logger.DebugContext.
-func (l *Logger) DebugCtx(ctx context.Context, msg string, args ...any) {
- l.log(ctx, LevelDebug, msg, args...)
-}
-
-// Info logs at LevelInfo.
-func (l *Logger) Info(msg string, args ...any) {
- l.log(nil, LevelInfo, msg, args...)
-}
-
-// InfoContext logs at LevelInfo with the given context.
-func (l *Logger) InfoContext(ctx context.Context, msg string, args ...any) {
- l.log(ctx, LevelInfo, msg, args...)
-}
-
-// InfoCtx logs at LevelInfo with the given context.
-// Deprecated: Use Logger.InfoContext.
-func (l *Logger) InfoCtx(ctx context.Context, msg string, args ...any) {
- l.log(ctx, LevelInfo, msg, args...)
-}
-
-// Warn logs at LevelWarn.
-func (l *Logger) Warn(msg string, args ...any) {
- l.log(nil, LevelWarn, msg, args...)
-}
-
-// WarnContext logs at LevelWarn with the given context.
-func (l *Logger) WarnContext(ctx context.Context, msg string, args ...any) {
- l.log(ctx, LevelWarn, msg, args...)
-}
-
-// WarnCtx logs at LevelWarn with the given context.
-// Deprecated: Use Logger.WarnContext.
-func (l *Logger) WarnCtx(ctx context.Context, msg string, args ...any) {
- l.log(ctx, LevelWarn, msg, args...)
-}
-
-// Error logs at LevelError.
-func (l *Logger) Error(msg string, args ...any) {
- l.log(nil, LevelError, msg, args...)
-}
-
-// ErrorContext logs at LevelError with the given context.
-func (l *Logger) ErrorContext(ctx context.Context, msg string, args ...any) {
- l.log(ctx, LevelError, msg, args...)
-}
-
-// ErrorCtx logs at LevelError with the given context.
-// Deprecated: Use Logger.ErrorContext.
-func (l *Logger) ErrorCtx(ctx context.Context, msg string, args ...any) {
- l.log(ctx, LevelError, msg, args...)
-}
-
-// log is the low-level logging method for methods that take ...any.
-// It must always be called directly by an exported logging method
-// or function, because it uses a fixed call depth to obtain the pc.
-func (l *Logger) log(ctx context.Context, level Level, msg string, args ...any) {
- if !l.Enabled(ctx, level) {
- return
- }
- var pc uintptr
- if !internal.IgnorePC {
- var pcs [1]uintptr
- // skip [runtime.Callers, this function, this function's caller]
- runtime.Callers(3, pcs[:])
- pc = pcs[0]
- }
- r := NewRecord(time.Now(), level, msg, pc)
- r.Add(args...)
- if ctx == nil {
- ctx = context.Background()
- }
- _ = l.Handler().Handle(ctx, r)
-}
-
-// logAttrs is like [Logger.log], but for methods that take ...Attr.
-func (l *Logger) logAttrs(ctx context.Context, level Level, msg string, attrs ...Attr) {
- if !l.Enabled(ctx, level) {
- return
- }
- var pc uintptr
- if !internal.IgnorePC {
- var pcs [1]uintptr
- // skip [runtime.Callers, this function, this function's caller]
- runtime.Callers(3, pcs[:])
- pc = pcs[0]
- }
- r := NewRecord(time.Now(), level, msg, pc)
- r.AddAttrs(attrs...)
- if ctx == nil {
- ctx = context.Background()
- }
- _ = l.Handler().Handle(ctx, r)
-}
-
-// Debug calls Logger.Debug on the default logger.
-func Debug(msg string, args ...any) {
- Default().log(nil, LevelDebug, msg, args...)
-}
-
-// DebugContext calls Logger.DebugContext on the default logger.
-func DebugContext(ctx context.Context, msg string, args ...any) {
- Default().log(ctx, LevelDebug, msg, args...)
-}
-
-// Info calls Logger.Info on the default logger.
-func Info(msg string, args ...any) {
- Default().log(nil, LevelInfo, msg, args...)
-}
-
-// InfoContext calls Logger.InfoContext on the default logger.
-func InfoContext(ctx context.Context, msg string, args ...any) {
- Default().log(ctx, LevelInfo, msg, args...)
-}
-
-// Warn calls Logger.Warn on the default logger.
-func Warn(msg string, args ...any) {
- Default().log(nil, LevelWarn, msg, args...)
-}
-
-// WarnContext calls Logger.WarnContext on the default logger.
-func WarnContext(ctx context.Context, msg string, args ...any) {
- Default().log(ctx, LevelWarn, msg, args...)
-}
-
-// Error calls Logger.Error on the default logger.
-func Error(msg string, args ...any) {
- Default().log(nil, LevelError, msg, args...)
-}
-
-// ErrorContext calls Logger.ErrorContext on the default logger.
-func ErrorContext(ctx context.Context, msg string, args ...any) {
- Default().log(ctx, LevelError, msg, args...)
-}
-
-// DebugCtx calls Logger.DebugContext on the default logger.
-// Deprecated: call DebugContext.
-func DebugCtx(ctx context.Context, msg string, args ...any) {
- Default().log(ctx, LevelDebug, msg, args...)
-}
-
-// InfoCtx calls Logger.InfoContext on the default logger.
-// Deprecated: call InfoContext.
-func InfoCtx(ctx context.Context, msg string, args ...any) {
- Default().log(ctx, LevelInfo, msg, args...)
-}
-
-// WarnCtx calls Logger.WarnContext on the default logger.
-// Deprecated: call WarnContext.
-func WarnCtx(ctx context.Context, msg string, args ...any) {
- Default().log(ctx, LevelWarn, msg, args...)
-}
-
-// ErrorCtx calls Logger.ErrorContext on the default logger.
-// Deprecated: call ErrorContext.
-func ErrorCtx(ctx context.Context, msg string, args ...any) {
- Default().log(ctx, LevelError, msg, args...)
-}
-
-// Log calls Logger.Log on the default logger.
-func Log(ctx context.Context, level Level, msg string, args ...any) {
- Default().log(ctx, level, msg, args...)
-}
-
-// LogAttrs calls Logger.LogAttrs on the default logger.
-func LogAttrs(ctx context.Context, level Level, msg string, attrs ...Attr) {
- Default().logAttrs(ctx, level, msg, attrs...)
-}
diff --git a/vendor/golang.org/x/exp/slog/noplog.bench b/vendor/golang.org/x/exp/slog/noplog.bench
deleted file mode 100644
index ed9296ff..00000000
--- a/vendor/golang.org/x/exp/slog/noplog.bench
+++ /dev/null
@@ -1,36 +0,0 @@
-goos: linux
-goarch: amd64
-pkg: golang.org/x/exp/slog
-cpu: Intel(R) Xeon(R) CPU @ 2.20GHz
-BenchmarkNopLog/attrs-8 1000000 1090 ns/op 0 B/op 0 allocs/op
-BenchmarkNopLog/attrs-8 1000000 1097 ns/op 0 B/op 0 allocs/op
-BenchmarkNopLog/attrs-8 1000000 1078 ns/op 0 B/op 0 allocs/op
-BenchmarkNopLog/attrs-8 1000000 1095 ns/op 0 B/op 0 allocs/op
-BenchmarkNopLog/attrs-8 1000000 1096 ns/op 0 B/op 0 allocs/op
-BenchmarkNopLog/attrs-parallel-8 4007268 308.2 ns/op 0 B/op 0 allocs/op
-BenchmarkNopLog/attrs-parallel-8 4016138 299.7 ns/op 0 B/op 0 allocs/op
-BenchmarkNopLog/attrs-parallel-8 4020529 305.9 ns/op 0 B/op 0 allocs/op
-BenchmarkNopLog/attrs-parallel-8 3977829 303.4 ns/op 0 B/op 0 allocs/op
-BenchmarkNopLog/attrs-parallel-8 3225438 318.5 ns/op 0 B/op 0 allocs/op
-BenchmarkNopLog/keys-values-8 1179256 994.2 ns/op 0 B/op 0 allocs/op
-BenchmarkNopLog/keys-values-8 1000000 1002 ns/op 0 B/op 0 allocs/op
-BenchmarkNopLog/keys-values-8 1216710 993.2 ns/op 0 B/op 0 allocs/op
-BenchmarkNopLog/keys-values-8 1000000 1013 ns/op 0 B/op 0 allocs/op
-BenchmarkNopLog/keys-values-8 1000000 1016 ns/op 0 B/op 0 allocs/op
-BenchmarkNopLog/WithContext-8 989066 1163 ns/op 0 B/op 0 allocs/op
-BenchmarkNopLog/WithContext-8 994116 1163 ns/op 0 B/op 0 allocs/op
-BenchmarkNopLog/WithContext-8 1000000 1152 ns/op 0 B/op 0 allocs/op
-BenchmarkNopLog/WithContext-8 991675 1165 ns/op 0 B/op 0 allocs/op
-BenchmarkNopLog/WithContext-8 965268 1166 ns/op 0 B/op 0 allocs/op
-BenchmarkNopLog/WithContext-parallel-8 3955503 303.3 ns/op 0 B/op 0 allocs/op
-BenchmarkNopLog/WithContext-parallel-8 3861188 307.8 ns/op 0 B/op 0 allocs/op
-BenchmarkNopLog/WithContext-parallel-8 3967752 303.9 ns/op 0 B/op 0 allocs/op
-BenchmarkNopLog/WithContext-parallel-8 3955203 302.7 ns/op 0 B/op 0 allocs/op
-BenchmarkNopLog/WithContext-parallel-8 3948278 301.1 ns/op 0 B/op 0 allocs/op
-BenchmarkNopLog/Ctx-8 940622 1247 ns/op 0 B/op 0 allocs/op
-BenchmarkNopLog/Ctx-8 936381 1257 ns/op 0 B/op 0 allocs/op
-BenchmarkNopLog/Ctx-8 959730 1266 ns/op 0 B/op 0 allocs/op
-BenchmarkNopLog/Ctx-8 943473 1290 ns/op 0 B/op 0 allocs/op
-BenchmarkNopLog/Ctx-8 919414 1259 ns/op 0 B/op 0 allocs/op
-PASS
-ok golang.org/x/exp/slog 40.566s
diff --git a/vendor/golang.org/x/exp/slog/record.go b/vendor/golang.org/x/exp/slog/record.go
deleted file mode 100644
index 38b3440f..00000000
--- a/vendor/golang.org/x/exp/slog/record.go
+++ /dev/null
@@ -1,207 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package slog
-
-import (
- "runtime"
- "time"
-
- "golang.org/x/exp/slices"
-)
-
-const nAttrsInline = 5
-
-// A Record holds information about a log event.
-// Copies of a Record share state.
-// Do not modify a Record after handing out a copy to it.
-// Use [Record.Clone] to create a copy with no shared state.
-type Record struct {
- // The time at which the output method (Log, Info, etc.) was called.
- Time time.Time
-
- // The log message.
- Message string
-
- // The level of the event.
- Level Level
-
- // The program counter at the time the record was constructed, as determined
- // by runtime.Callers. If zero, no program counter is available.
- //
- // The only valid use for this value is as an argument to
- // [runtime.CallersFrames]. In particular, it must not be passed to
- // [runtime.FuncForPC].
- PC uintptr
-
- // Allocation optimization: an inline array sized to hold
- // the majority of log calls (based on examination of open-source
- // code). It holds the start of the list of Attrs.
- front [nAttrsInline]Attr
-
- // The number of Attrs in front.
- nFront int
-
- // The list of Attrs except for those in front.
- // Invariants:
- // - len(back) > 0 iff nFront == len(front)
- // - Unused array elements are zero. Used to detect mistakes.
- back []Attr
-}
-
-// NewRecord creates a Record from the given arguments.
-// Use [Record.AddAttrs] to add attributes to the Record.
-//
-// NewRecord is intended for logging APIs that want to support a [Handler] as
-// a backend.
-func NewRecord(t time.Time, level Level, msg string, pc uintptr) Record {
- return Record{
- Time: t,
- Message: msg,
- Level: level,
- PC: pc,
- }
-}
-
-// Clone returns a copy of the record with no shared state.
-// The original record and the clone can both be modified
-// without interfering with each other.
-func (r Record) Clone() Record {
- r.back = slices.Clip(r.back) // prevent append from mutating shared array
- return r
-}
-
-// NumAttrs returns the number of attributes in the Record.
-func (r Record) NumAttrs() int {
- return r.nFront + len(r.back)
-}
-
-// Attrs calls f on each Attr in the Record.
-// Iteration stops if f returns false.
-func (r Record) Attrs(f func(Attr) bool) {
- for i := 0; i < r.nFront; i++ {
- if !f(r.front[i]) {
- return
- }
- }
- for _, a := range r.back {
- if !f(a) {
- return
- }
- }
-}
-
-// AddAttrs appends the given Attrs to the Record's list of Attrs.
-func (r *Record) AddAttrs(attrs ...Attr) {
- n := copy(r.front[r.nFront:], attrs)
- r.nFront += n
- // Check if a copy was modified by slicing past the end
- // and seeing if the Attr there is non-zero.
- if cap(r.back) > len(r.back) {
- end := r.back[:len(r.back)+1][len(r.back)]
- if !end.isEmpty() {
- panic("copies of a slog.Record were both modified")
- }
- }
- r.back = append(r.back, attrs[n:]...)
-}
-
-// Add converts the args to Attrs as described in [Logger.Log],
-// then appends the Attrs to the Record's list of Attrs.
-func (r *Record) Add(args ...any) {
- var a Attr
- for len(args) > 0 {
- a, args = argsToAttr(args)
- if r.nFront < len(r.front) {
- r.front[r.nFront] = a
- r.nFront++
- } else {
- if r.back == nil {
- r.back = make([]Attr, 0, countAttrs(args))
- }
- r.back = append(r.back, a)
- }
- }
-
-}
-
-// countAttrs returns the number of Attrs that would be created from args.
-func countAttrs(args []any) int {
- n := 0
- for i := 0; i < len(args); i++ {
- n++
- if _, ok := args[i].(string); ok {
- i++
- }
- }
- return n
-}
-
-const badKey = "!BADKEY"
-
-// argsToAttr turns a prefix of the nonempty args slice into an Attr
-// and returns the unconsumed portion of the slice.
-// If args[0] is an Attr, it returns it.
-// If args[0] is a string, it treats the first two elements as
-// a key-value pair.
-// Otherwise, it treats args[0] as a value with a missing key.
-func argsToAttr(args []any) (Attr, []any) {
- switch x := args[0].(type) {
- case string:
- if len(args) == 1 {
- return String(badKey, x), nil
- }
- return Any(x, args[1]), args[2:]
-
- case Attr:
- return x, args[1:]
-
- default:
- return Any(badKey, x), args[1:]
- }
-}
-
-// Source describes the location of a line of source code.
-type Source struct {
- // Function is the package path-qualified function name containing the
- // source line. If non-empty, this string uniquely identifies a single
- // function in the program. This may be the empty string if not known.
- Function string `json:"function"`
- // File and Line are the file name and line number (1-based) of the source
- // line. These may be the empty string and zero, respectively, if not known.
- File string `json:"file"`
- Line int `json:"line"`
-}
-
-// attrs returns the non-zero fields of s as a slice of attrs.
-// It is similar to a LogValue method, but we don't want Source
-// to implement LogValuer because it would be resolved before
-// the ReplaceAttr function was called.
-func (s *Source) group() Value {
- var as []Attr
- if s.Function != "" {
- as = append(as, String("function", s.Function))
- }
- if s.File != "" {
- as = append(as, String("file", s.File))
- }
- if s.Line != 0 {
- as = append(as, Int("line", s.Line))
- }
- return GroupValue(as...)
-}
-
-// source returns a Source for the log event.
-// If the Record was created without the necessary information,
-// or if the location is unavailable, it returns a non-nil *Source
-// with zero fields.
-func (r Record) source() *Source {
- fs := runtime.CallersFrames([]uintptr{r.PC})
- f, _ := fs.Next()
- return &Source{
- Function: f.Function,
- File: f.File,
- Line: f.Line,
- }
-}
diff --git a/vendor/golang.org/x/exp/slog/text_handler.go b/vendor/golang.org/x/exp/slog/text_handler.go
deleted file mode 100644
index 75b66b71..00000000
--- a/vendor/golang.org/x/exp/slog/text_handler.go
+++ /dev/null
@@ -1,161 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package slog
-
-import (
- "context"
- "encoding"
- "fmt"
- "io"
- "reflect"
- "strconv"
- "unicode"
- "unicode/utf8"
-)
-
-// TextHandler is a Handler that writes Records to an io.Writer as a
-// sequence of key=value pairs separated by spaces and followed by a newline.
-type TextHandler struct {
- *commonHandler
-}
-
-// NewTextHandler creates a TextHandler that writes to w,
-// using the given options.
-// If opts is nil, the default options are used.
-func NewTextHandler(w io.Writer, opts *HandlerOptions) *TextHandler {
- if opts == nil {
- opts = &HandlerOptions{}
- }
- return &TextHandler{
- &commonHandler{
- json: false,
- w: w,
- opts: *opts,
- },
- }
-}
-
-// Enabled reports whether the handler handles records at the given level.
-// The handler ignores records whose level is lower.
-func (h *TextHandler) Enabled(_ context.Context, level Level) bool {
- return h.commonHandler.enabled(level)
-}
-
-// WithAttrs returns a new TextHandler whose attributes consists
-// of h's attributes followed by attrs.
-func (h *TextHandler) WithAttrs(attrs []Attr) Handler {
- return &TextHandler{commonHandler: h.commonHandler.withAttrs(attrs)}
-}
-
-func (h *TextHandler) WithGroup(name string) Handler {
- return &TextHandler{commonHandler: h.commonHandler.withGroup(name)}
-}
-
-// Handle formats its argument Record as a single line of space-separated
-// key=value items.
-//
-// If the Record's time is zero, the time is omitted.
-// Otherwise, the key is "time"
-// and the value is output in RFC3339 format with millisecond precision.
-//
-// If the Record's level is zero, the level is omitted.
-// Otherwise, the key is "level"
-// and the value of [Level.String] is output.
-//
-// If the AddSource option is set and source information is available,
-// the key is "source" and the value is output as FILE:LINE.
-//
-// The message's key is "msg".
-//
-// To modify these or other attributes, or remove them from the output, use
-// [HandlerOptions.ReplaceAttr].
-//
-// If a value implements [encoding.TextMarshaler], the result of MarshalText is
-// written. Otherwise, the result of fmt.Sprint is written.
-//
-// Keys and values are quoted with [strconv.Quote] if they contain Unicode space
-// characters, non-printing characters, '"' or '='.
-//
-// Keys inside groups consist of components (keys or group names) separated by
-// dots. No further escaping is performed.
-// Thus there is no way to determine from the key "a.b.c" whether there
-// are two groups "a" and "b" and a key "c", or a single group "a.b" and a key "c",
-// or single group "a" and a key "b.c".
-// If it is necessary to reconstruct the group structure of a key
-// even in the presence of dots inside components, use
-// [HandlerOptions.ReplaceAttr] to encode that information in the key.
-//
-// Each call to Handle results in a single serialized call to
-// io.Writer.Write.
-func (h *TextHandler) Handle(_ context.Context, r Record) error {
- return h.commonHandler.handle(r)
-}
-
-func appendTextValue(s *handleState, v Value) error {
- switch v.Kind() {
- case KindString:
- s.appendString(v.str())
- case KindTime:
- s.appendTime(v.time())
- case KindAny:
- if tm, ok := v.any.(encoding.TextMarshaler); ok {
- data, err := tm.MarshalText()
- if err != nil {
- return err
- }
- // TODO: avoid the conversion to string.
- s.appendString(string(data))
- return nil
- }
- if bs, ok := byteSlice(v.any); ok {
- // As of Go 1.19, this only allocates for strings longer than 32 bytes.
- s.buf.WriteString(strconv.Quote(string(bs)))
- return nil
- }
- s.appendString(fmt.Sprintf("%+v", v.Any()))
- default:
- *s.buf = v.append(*s.buf)
- }
- return nil
-}
-
-// byteSlice returns its argument as a []byte if the argument's
-// underlying type is []byte, along with a second return value of true.
-// Otherwise it returns nil, false.
-func byteSlice(a any) ([]byte, bool) {
- if bs, ok := a.([]byte); ok {
- return bs, true
- }
- // Like Printf's %s, we allow both the slice type and the byte element type to be named.
- t := reflect.TypeOf(a)
- if t != nil && t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8 {
- return reflect.ValueOf(a).Bytes(), true
- }
- return nil, false
-}
-
-func needsQuoting(s string) bool {
- if len(s) == 0 {
- return true
- }
- for i := 0; i < len(s); {
- b := s[i]
- if b < utf8.RuneSelf {
- // Quote anything except a backslash that would need quoting in a
- // JSON string, as well as space and '='
- if b != '\\' && (b == ' ' || b == '=' || !safeSet[b]) {
- return true
- }
- i++
- continue
- }
- r, size := utf8.DecodeRuneInString(s[i:])
- if r == utf8.RuneError || unicode.IsSpace(r) || !unicode.IsPrint(r) {
- return true
- }
- i += size
- }
- return false
-}
diff --git a/vendor/golang.org/x/exp/slog/value.go b/vendor/golang.org/x/exp/slog/value.go
deleted file mode 100644
index 3550c46f..00000000
--- a/vendor/golang.org/x/exp/slog/value.go
+++ /dev/null
@@ -1,456 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package slog
-
-import (
- "fmt"
- "math"
- "runtime"
- "strconv"
- "strings"
- "time"
- "unsafe"
-
- "golang.org/x/exp/slices"
-)
-
-// A Value can represent any Go value, but unlike type any,
-// it can represent most small values without an allocation.
-// The zero Value corresponds to nil.
-type Value struct {
- _ [0]func() // disallow ==
- // num holds the value for Kinds Int64, Uint64, Float64, Bool and Duration,
- // the string length for KindString, and nanoseconds since the epoch for KindTime.
- num uint64
- // If any is of type Kind, then the value is in num as described above.
- // If any is of type *time.Location, then the Kind is Time and time.Time value
- // can be constructed from the Unix nanos in num and the location (monotonic time
- // is not preserved).
- // If any is of type stringptr, then the Kind is String and the string value
- // consists of the length in num and the pointer in any.
- // Otherwise, the Kind is Any and any is the value.
- // (This implies that Attrs cannot store values of type Kind, *time.Location
- // or stringptr.)
- any any
-}
-
-// Kind is the kind of a Value.
-type Kind int
-
-// The following list is sorted alphabetically, but it's also important that
-// KindAny is 0 so that a zero Value represents nil.
-
-const (
- KindAny Kind = iota
- KindBool
- KindDuration
- KindFloat64
- KindInt64
- KindString
- KindTime
- KindUint64
- KindGroup
- KindLogValuer
-)
-
-var kindStrings = []string{
- "Any",
- "Bool",
- "Duration",
- "Float64",
- "Int64",
- "String",
- "Time",
- "Uint64",
- "Group",
- "LogValuer",
-}
-
-func (k Kind) String() string {
- if k >= 0 && int(k) < len(kindStrings) {
- return kindStrings[k]
- }
- return ""
-}
-
-// Unexported version of Kind, just so we can store Kinds in Values.
-// (No user-provided value has this type.)
-type kind Kind
-
-// Kind returns v's Kind.
-func (v Value) Kind() Kind {
- switch x := v.any.(type) {
- case Kind:
- return x
- case stringptr:
- return KindString
- case timeLocation:
- return KindTime
- case groupptr:
- return KindGroup
- case LogValuer:
- return KindLogValuer
- case kind: // a kind is just a wrapper for a Kind
- return KindAny
- default:
- return KindAny
- }
-}
-
-//////////////// Constructors
-
-// IntValue returns a Value for an int.
-func IntValue(v int) Value {
- return Int64Value(int64(v))
-}
-
-// Int64Value returns a Value for an int64.
-func Int64Value(v int64) Value {
- return Value{num: uint64(v), any: KindInt64}
-}
-
-// Uint64Value returns a Value for a uint64.
-func Uint64Value(v uint64) Value {
- return Value{num: v, any: KindUint64}
-}
-
-// Float64Value returns a Value for a floating-point number.
-func Float64Value(v float64) Value {
- return Value{num: math.Float64bits(v), any: KindFloat64}
-}
-
-// BoolValue returns a Value for a bool.
-func BoolValue(v bool) Value {
- u := uint64(0)
- if v {
- u = 1
- }
- return Value{num: u, any: KindBool}
-}
-
-// Unexported version of *time.Location, just so we can store *time.Locations in
-// Values. (No user-provided value has this type.)
-type timeLocation *time.Location
-
-// TimeValue returns a Value for a time.Time.
-// It discards the monotonic portion.
-func TimeValue(v time.Time) Value {
- if v.IsZero() {
- // UnixNano on the zero time is undefined, so represent the zero time
- // with a nil *time.Location instead. time.Time.Location method never
- // returns nil, so a Value with any == timeLocation(nil) cannot be
- // mistaken for any other Value, time.Time or otherwise.
- return Value{any: timeLocation(nil)}
- }
- return Value{num: uint64(v.UnixNano()), any: timeLocation(v.Location())}
-}
-
-// DurationValue returns a Value for a time.Duration.
-func DurationValue(v time.Duration) Value {
- return Value{num: uint64(v.Nanoseconds()), any: KindDuration}
-}
-
-// AnyValue returns a Value for the supplied value.
-//
-// If the supplied value is of type Value, it is returned
-// unmodified.
-//
-// Given a value of one of Go's predeclared string, bool, or
-// (non-complex) numeric types, AnyValue returns a Value of kind
-// String, Bool, Uint64, Int64, or Float64. The width of the
-// original numeric type is not preserved.
-//
-// Given a time.Time or time.Duration value, AnyValue returns a Value of kind
-// KindTime or KindDuration. The monotonic time is not preserved.
-//
-// For nil, or values of all other types, including named types whose
-// underlying type is numeric, AnyValue returns a value of kind KindAny.
-func AnyValue(v any) Value {
- switch v := v.(type) {
- case string:
- return StringValue(v)
- case int:
- return Int64Value(int64(v))
- case uint:
- return Uint64Value(uint64(v))
- case int64:
- return Int64Value(v)
- case uint64:
- return Uint64Value(v)
- case bool:
- return BoolValue(v)
- case time.Duration:
- return DurationValue(v)
- case time.Time:
- return TimeValue(v)
- case uint8:
- return Uint64Value(uint64(v))
- case uint16:
- return Uint64Value(uint64(v))
- case uint32:
- return Uint64Value(uint64(v))
- case uintptr:
- return Uint64Value(uint64(v))
- case int8:
- return Int64Value(int64(v))
- case int16:
- return Int64Value(int64(v))
- case int32:
- return Int64Value(int64(v))
- case float64:
- return Float64Value(v)
- case float32:
- return Float64Value(float64(v))
- case []Attr:
- return GroupValue(v...)
- case Kind:
- return Value{any: kind(v)}
- case Value:
- return v
- default:
- return Value{any: v}
- }
-}
-
-//////////////// Accessors
-
-// Any returns v's value as an any.
-func (v Value) Any() any {
- switch v.Kind() {
- case KindAny:
- if k, ok := v.any.(kind); ok {
- return Kind(k)
- }
- return v.any
- case KindLogValuer:
- return v.any
- case KindGroup:
- return v.group()
- case KindInt64:
- return int64(v.num)
- case KindUint64:
- return v.num
- case KindFloat64:
- return v.float()
- case KindString:
- return v.str()
- case KindBool:
- return v.bool()
- case KindDuration:
- return v.duration()
- case KindTime:
- return v.time()
- default:
- panic(fmt.Sprintf("bad kind: %s", v.Kind()))
- }
-}
-
-// Int64 returns v's value as an int64. It panics
-// if v is not a signed integer.
-func (v Value) Int64() int64 {
- if g, w := v.Kind(), KindInt64; g != w {
- panic(fmt.Sprintf("Value kind is %s, not %s", g, w))
- }
- return int64(v.num)
-}
-
-// Uint64 returns v's value as a uint64. It panics
-// if v is not an unsigned integer.
-func (v Value) Uint64() uint64 {
- if g, w := v.Kind(), KindUint64; g != w {
- panic(fmt.Sprintf("Value kind is %s, not %s", g, w))
- }
- return v.num
-}
-
-// Bool returns v's value as a bool. It panics
-// if v is not a bool.
-func (v Value) Bool() bool {
- if g, w := v.Kind(), KindBool; g != w {
- panic(fmt.Sprintf("Value kind is %s, not %s", g, w))
- }
- return v.bool()
-}
-
-func (v Value) bool() bool {
- return v.num == 1
-}
-
-// Duration returns v's value as a time.Duration. It panics
-// if v is not a time.Duration.
-func (v Value) Duration() time.Duration {
- if g, w := v.Kind(), KindDuration; g != w {
- panic(fmt.Sprintf("Value kind is %s, not %s", g, w))
- }
-
- return v.duration()
-}
-
-func (v Value) duration() time.Duration {
- return time.Duration(int64(v.num))
-}
-
-// Float64 returns v's value as a float64. It panics
-// if v is not a float64.
-func (v Value) Float64() float64 {
- if g, w := v.Kind(), KindFloat64; g != w {
- panic(fmt.Sprintf("Value kind is %s, not %s", g, w))
- }
-
- return v.float()
-}
-
-func (v Value) float() float64 {
- return math.Float64frombits(v.num)
-}
-
-// Time returns v's value as a time.Time. It panics
-// if v is not a time.Time.
-func (v Value) Time() time.Time {
- if g, w := v.Kind(), KindTime; g != w {
- panic(fmt.Sprintf("Value kind is %s, not %s", g, w))
- }
- return v.time()
-}
-
-func (v Value) time() time.Time {
- loc := v.any.(timeLocation)
- if loc == nil {
- return time.Time{}
- }
- return time.Unix(0, int64(v.num)).In(loc)
-}
-
-// LogValuer returns v's value as a LogValuer. It panics
-// if v is not a LogValuer.
-func (v Value) LogValuer() LogValuer {
- return v.any.(LogValuer)
-}
-
-// Group returns v's value as a []Attr.
-// It panics if v's Kind is not KindGroup.
-func (v Value) Group() []Attr {
- if sp, ok := v.any.(groupptr); ok {
- return unsafe.Slice((*Attr)(sp), v.num)
- }
- panic("Group: bad kind")
-}
-
-func (v Value) group() []Attr {
- return unsafe.Slice((*Attr)(v.any.(groupptr)), v.num)
-}
-
-//////////////// Other
-
-// Equal reports whether v and w represent the same Go value.
-func (v Value) Equal(w Value) bool {
- k1 := v.Kind()
- k2 := w.Kind()
- if k1 != k2 {
- return false
- }
- switch k1 {
- case KindInt64, KindUint64, KindBool, KindDuration:
- return v.num == w.num
- case KindString:
- return v.str() == w.str()
- case KindFloat64:
- return v.float() == w.float()
- case KindTime:
- return v.time().Equal(w.time())
- case KindAny, KindLogValuer:
- return v.any == w.any // may panic if non-comparable
- case KindGroup:
- return slices.EqualFunc(v.group(), w.group(), Attr.Equal)
- default:
- panic(fmt.Sprintf("bad kind: %s", k1))
- }
-}
-
-// append appends a text representation of v to dst.
-// v is formatted as with fmt.Sprint.
-func (v Value) append(dst []byte) []byte {
- switch v.Kind() {
- case KindString:
- return append(dst, v.str()...)
- case KindInt64:
- return strconv.AppendInt(dst, int64(v.num), 10)
- case KindUint64:
- return strconv.AppendUint(dst, v.num, 10)
- case KindFloat64:
- return strconv.AppendFloat(dst, v.float(), 'g', -1, 64)
- case KindBool:
- return strconv.AppendBool(dst, v.bool())
- case KindDuration:
- return append(dst, v.duration().String()...)
- case KindTime:
- return append(dst, v.time().String()...)
- case KindGroup:
- return fmt.Append(dst, v.group())
- case KindAny, KindLogValuer:
- return fmt.Append(dst, v.any)
- default:
- panic(fmt.Sprintf("bad kind: %s", v.Kind()))
- }
-}
-
-// A LogValuer is any Go value that can convert itself into a Value for logging.
-//
-// This mechanism may be used to defer expensive operations until they are
-// needed, or to expand a single value into a sequence of components.
-type LogValuer interface {
- LogValue() Value
-}
-
-const maxLogValues = 100
-
-// Resolve repeatedly calls LogValue on v while it implements LogValuer,
-// and returns the result.
-// If v resolves to a group, the group's attributes' values are not recursively
-// resolved.
-// If the number of LogValue calls exceeds a threshold, a Value containing an
-// error is returned.
-// Resolve's return value is guaranteed not to be of Kind KindLogValuer.
-func (v Value) Resolve() (rv Value) {
- orig := v
- defer func() {
- if r := recover(); r != nil {
- rv = AnyValue(fmt.Errorf("LogValue panicked\n%s", stack(3, 5)))
- }
- }()
-
- for i := 0; i < maxLogValues; i++ {
- if v.Kind() != KindLogValuer {
- return v
- }
- v = v.LogValuer().LogValue()
- }
- err := fmt.Errorf("LogValue called too many times on Value of type %T", orig.Any())
- return AnyValue(err)
-}
-
-func stack(skip, nFrames int) string {
- pcs := make([]uintptr, nFrames+1)
- n := runtime.Callers(skip+1, pcs)
- if n == 0 {
- return "(no stack)"
- }
- frames := runtime.CallersFrames(pcs[:n])
- var b strings.Builder
- i := 0
- for {
- frame, more := frames.Next()
- fmt.Fprintf(&b, "called from %s (%s:%d)\n", frame.Function, frame.File, frame.Line)
- if !more {
- break
- }
- i++
- if i >= nFrames {
- fmt.Fprintf(&b, "(rest of stack elided)\n")
- break
- }
- }
- return b.String()
-}
diff --git a/vendor/golang.org/x/exp/slog/value_119.go b/vendor/golang.org/x/exp/slog/value_119.go
deleted file mode 100644
index 29b0d732..00000000
--- a/vendor/golang.org/x/exp/slog/value_119.go
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.19 && !go1.20
-
-package slog
-
-import (
- "reflect"
- "unsafe"
-)
-
-type (
- stringptr unsafe.Pointer // used in Value.any when the Value is a string
- groupptr unsafe.Pointer // used in Value.any when the Value is a []Attr
-)
-
-// StringValue returns a new Value for a string.
-func StringValue(value string) Value {
- hdr := (*reflect.StringHeader)(unsafe.Pointer(&value))
- return Value{num: uint64(hdr.Len), any: stringptr(hdr.Data)}
-}
-
-func (v Value) str() string {
- var s string
- hdr := (*reflect.StringHeader)(unsafe.Pointer(&s))
- hdr.Data = uintptr(v.any.(stringptr))
- hdr.Len = int(v.num)
- return s
-}
-
-// String returns Value's value as a string, formatted like fmt.Sprint. Unlike
-// the methods Int64, Float64, and so on, which panic if v is of the
-// wrong kind, String never panics.
-func (v Value) String() string {
- if sp, ok := v.any.(stringptr); ok {
- // Inlining this code makes a huge difference.
- var s string
- hdr := (*reflect.StringHeader)(unsafe.Pointer(&s))
- hdr.Data = uintptr(sp)
- hdr.Len = int(v.num)
- return s
- }
- return string(v.append(nil))
-}
-
-// GroupValue returns a new Value for a list of Attrs.
-// The caller must not subsequently mutate the argument slice.
-func GroupValue(as ...Attr) Value {
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&as))
- return Value{num: uint64(hdr.Len), any: groupptr(hdr.Data)}
-}
diff --git a/vendor/golang.org/x/exp/slog/value_120.go b/vendor/golang.org/x/exp/slog/value_120.go
deleted file mode 100644
index f7d4c093..00000000
--- a/vendor/golang.org/x/exp/slog/value_120.go
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.20
-
-package slog
-
-import "unsafe"
-
-type (
- stringptr *byte // used in Value.any when the Value is a string
- groupptr *Attr // used in Value.any when the Value is a []Attr
-)
-
-// StringValue returns a new Value for a string.
-func StringValue(value string) Value {
- return Value{num: uint64(len(value)), any: stringptr(unsafe.StringData(value))}
-}
-
-// GroupValue returns a new Value for a list of Attrs.
-// The caller must not subsequently mutate the argument slice.
-func GroupValue(as ...Attr) Value {
- return Value{num: uint64(len(as)), any: groupptr(unsafe.SliceData(as))}
-}
-
-// String returns Value's value as a string, formatted like fmt.Sprint. Unlike
-// the methods Int64, Float64, and so on, which panic if v is of the
-// wrong kind, String never panics.
-func (v Value) String() string {
- if sp, ok := v.any.(stringptr); ok {
- return unsafe.String(sp, v.num)
- }
- return string(v.append(nil))
-}
-
-func (v Value) str() string {
- return unsafe.String(v.any.(stringptr), v.num)
-}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index bbd40f4b..55ba1b63 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -221,13 +221,6 @@ go.opentelemetry.io/proto/otlp/trace/v1
# golang.org/x/crypto v0.24.0
## explicit; go 1.18
golang.org/x/crypto/pbkdf2
-# golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63
-## explicit; go 1.20
-golang.org/x/exp/constraints
-golang.org/x/exp/slices
-golang.org/x/exp/slog
-golang.org/x/exp/slog/internal
-golang.org/x/exp/slog/internal/buffer
# golang.org/x/mod v0.17.0
## explicit; go 1.18
golang.org/x/mod/semver