From ca19e2f3ec4ae58254e3c662f0fbc9302bcb3458 Mon Sep 17 00:00:00 2001 From: Alejandro Do Nascimento Mora Date: Mon, 19 Dec 2022 21:24:38 +0100 Subject: [PATCH] Upgrade to pgx v5 Version 5 of pgx introduced several breaking changes that affected our code base. You can read more about them here: https://github.com/jackc/pgx/blob/29ad306e47c491a0ecc52d502241539aedd636bd/CHANGELOG.md#codec-and-value-split In summary: - NULL Representation > Previously, types had a Status field that could be Undefined, Null, or > Present. This has been changed to a Valid bool field to harmonize with > how database/sql represents NULL and to make the zero value useable. - Codec and Value Split > Previously, the type system combined decoding and encoding values with > the value types... > This concepts have been separated. A Codec only has responsibility for > encoding and decoding values. Value types are generally defined by > implementing an interface that a particular Codec understands (e.g. > PointScanner and PointValuer for the PostgreSQL point type). - Array Types > All array types are now handled by ArrayCodec instead of using code > generation for each new array type... - Other Changes > JSON and JSONB types removed. Use []byte or string directly. There are some changes that don't seem to appear in the CHANGELOG: - An error is returned when scanning NULL into a variable that can't handle nil values. For example, scanning NULL into an int64. - Custom types must be register into the connection's `typeMap *pgtype.Map`, otherwise, an error will be returned when the encoding/decoding plan is not found for the custom type OID. - Requires go 1.18 since it uses generics for some features, like arrays. To deal with the custom types, we added the register logic in the `afterConnect` method of the pool. Every time a connection is created a new pgtype.Map is generated with all the default PostgreSQL, meaning that whenever a connection is created, we need to register our custom types with it. --- go.mod | 23 +- go.sum | 103 +------ pkg/dataset/config.go | 17 +- pkg/ha/client/client.go | 4 +- pkg/internal/testhelpers/containers_test.go | 4 +- .../testhelpers/postgres_container.go | 36 ++- pkg/jaeger/store/find_trace_ids.go | 2 +- pkg/jaeger/store/get_operations.go | 29 +- pkg/jaeger/store/get_services.go | 14 +- pkg/jaeger/store/trace_query.go | 7 +- pkg/jaeger/store/trace_scan.go | 73 +++-- pkg/pgclient/client.go | 50 +++- pkg/pgclient/config.go | 2 +- pkg/pgclient/metrics.go | 2 +- pkg/pgmodel/common/extension/extension.go | 2 +- pkg/pgmodel/ingestor/copier.go | 9 +- pkg/pgmodel/ingestor/dispatcher.go | 8 +- pkg/pgmodel/ingestor/handler_test.go | 44 ++- pkg/pgmodel/ingestor/ingestor_sql_test.go | 75 +++-- pkg/pgmodel/ingestor/series_writer.go | 83 +++--- pkg/pgmodel/ingestor/trace/batch.go | 16 +- .../trace/instrumentation_lib_batch.go | 12 +- .../trace/instrumentation_lib_batch_test.go | 76 ++--- pkg/pgmodel/ingestor/trace/operation_batch.go | 4 +- .../ingestor/trace/operation_batch_test.go | 14 +- .../ingestor/trace/schema_url_batch.go | 6 +- .../ingestor/trace/schema_url_batch_test.go | 16 +- pkg/pgmodel/ingestor/trace/tag_batch.go | 26 +- pkg/pgmodel/ingestor/trace/tag_batch_test.go | 20 +- pkg/pgmodel/ingestor/trace/writer.go | 38 +-- pkg/pgmodel/lreader/labels_reader.go | 27 +- pkg/pgmodel/metrics/database/database.go | 2 +- pkg/pgmodel/migrate.go | 4 +- pkg/pgmodel/model/custom_types.go | 277 +++++++++++++----- pkg/pgmodel/model/interface.go | 2 +- pkg/pgmodel/model/label_list.go | 56 ++-- pkg/pgmodel/model/pgutf8str/text_types.go | 114 ++----- pkg/pgmodel/model/sql_test_utils.go | 136 ++++++--- pkg/pgmodel/new_migrate.go | 2 +- pkg/pgmodel/querier/querier_sql_test.go | 97 ++++-- pkg/pgmodel/querier/query_builder.go | 16 +- pkg/pgmodel/querier/query_builder_exemplar.go | 2 +- pkg/pgmodel/querier/query_builder_samples.go | 2 +- pkg/pgmodel/querier/query_exemplar.go | 2 +- pkg/pgmodel/querier/query_sample.go | 2 +- pkg/pgmodel/querier/query_tools.go | 2 +- pkg/pgmodel/querier/row.go | 143 ++------- pkg/pgmodel/querier/series_exemplar.go | 25 +- pkg/pgmodel/querier/series_exemplar_test.go | 3 +- pkg/pgmodel/querier/series_set.go | 24 +- pkg/pgmodel/querier/series_set_test.go | 147 ++++------ pkg/pgmodel/querier/timestamp_series.go | 10 +- pkg/pgxconn/implement.go | 11 +- pkg/pgxconn/pgx_conn.go | 8 +- pkg/runner/client.go | 2 +- pkg/runner/runner.go | 2 +- pkg/telemetry/telemetry.go | 17 +- pkg/tests/end_to_end_tests/alerts_test.go | 2 +- .../end_to_end_tests/concurrent_sql_test.go | 4 +- .../end_to_end_tests/config_dataset_test.go | 6 +- .../end_to_end_tests/continuous_agg_test.go | 6 +- pkg/tests/end_to_end_tests/create_test.go | 8 +- .../end_to_end_tests/database_metrics_test.go | 2 +- .../end_to_end_tests/db_connections_test.go | 6 +- pkg/tests/end_to_end_tests/delete_test.go | 4 +- pkg/tests/end_to_end_tests/drop_test.go | 8 +- .../exemplar_query_endpoint_test.go | 2 +- pkg/tests/end_to_end_tests/exemplar_test.go | 2 +- pkg/tests/end_to_end_tests/functions_test.go | 4 +- .../end_to_end_tests/golden_files_test.go | 4 +- .../ha_check_insert_sql_test.go | 2 +- .../ha_multiple_promscales_test.go | 2 +- .../ha_single_promscale_test.go | 2 +- .../ha_try_change_leader_sql_test.go | 2 +- .../end_to_end_tests/ingest_trace_test.go | 2 +- .../insert_compressed_chunks_test.go | 2 +- .../jaeger_store_integration_test.go | 2 +- .../end_to_end_tests/jaeger_store_test.go | 2 +- pkg/tests/end_to_end_tests/main_test.go | 16 +- pkg/tests/end_to_end_tests/metadata_test.go | 2 +- .../metric_ingest_bench_test.go | 2 +- .../metrics_duplicate_insert_test.go | 2 +- pkg/tests/end_to_end_tests/migrate_test.go | 2 +- .../end_to_end_tests/multi_tenancy_test.go | 2 +- pkg/tests/end_to_end_tests/nan_test.go | 4 +- .../end_to_end_tests/new_migrate_test.go | 2 +- .../end_to_end_tests/no_timescaledb_test.go | 2 +- pkg/tests/end_to_end_tests/null_chars_test.go | 2 +- .../promql_endpoint_integration_test.go | 2 +- .../promql_label_endpoint_test.go | 2 +- .../promql_query_endpoint_test.go | 2 +- .../promql_series_endpoint_test.go | 2 +- .../promql_write_endpoint_test.go | 2 +- .../query_integration_test.go | 4 +- pkg/tests/end_to_end_tests/router_test.go | 2 +- pkg/tests/end_to_end_tests/rules_test.go | 2 +- pkg/tests/end_to_end_tests/sql_bench_test.go | 12 +- .../end_to_end_tests/sync_commit_test.go | 28 +- pkg/tests/end_to_end_tests/tag_op_test.go | 2 +- pkg/tests/end_to_end_tests/telemetry_test.go | 2 +- .../trace_ingest_bench_test.go | 2 +- .../trace_operation_calls_test.go | 2 +- pkg/tests/end_to_end_tests/trace_put_test.go | 2 +- .../trace_query_integration_test.go | 2 +- .../end_to_end_tests/trace_retention_test.go | 4 +- pkg/tests/end_to_end_tests/trace_tree_test.go | 2 +- pkg/tests/end_to_end_tests/vacuum_test.go | 2 +- pkg/tests/end_to_end_tests/view_test.go | 4 +- pkg/tests/end_to_end_tests/zlast_test.go | 2 +- pkg/tests/testsupport/mock_pgx_conn.go | 17 +- pkg/tests/upgrade_tests/shapshot.go | 4 +- pkg/tests/upgrade_tests/upgrade_test.go | 19 +- pkg/util/lock.go | 2 +- pkg/util/util.go | 6 + pkg/vacuum/vacuum.go | 2 +- 115 files changed, 1082 insertions(+), 1112 deletions(-) diff --git a/go.mod b/go.mod index 00789639a4..2791c5cfda 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/timescale/promscale -go 1.18 +go 1.19 require ( github.com/NYTimes/gziphandler v1.1.1 @@ -19,12 +19,10 @@ require ( github.com/grafana/regexp v0.0.0-20221005093135-b4c2bcb0a4b6 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 github.com/hashicorp/go-hclog v1.3.1 - github.com/jackc/pgconn v1.13.0 github.com/jackc/pgerrcode v0.0.0-20220416144525-469b46aa5efa - github.com/jackc/pgproto3/v2 v2.3.1 - github.com/jackc/pgtype v1.12.0 - github.com/jackc/pgx/v4 v4.17.0 + github.com/jackc/pgx/v5 v5.2.0 github.com/jaegertracing/jaeger v1.38.2-0.20221006002917-5bf8a28fe06d + github.com/mitchellh/mapstructure v1.5.0 github.com/oklog/run v1.1.0 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.61.0 github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 @@ -37,6 +35,7 @@ require ( github.com/prometheus/prometheus v0.39.2-0.20221021121301-51a44e6657c3 github.com/sergi/go-diff v1.2.0 github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546 + github.com/spf13/viper v1.13.0 github.com/spyzhov/ajson v0.7.1 github.com/stretchr/testify v1.8.0 github.com/testcontainers/testcontainers-go v0.13.0 @@ -53,7 +52,7 @@ require ( go.uber.org/atomic v1.10.0 go.uber.org/automaxprocs v1.5.1 go.uber.org/goleak v1.2.0 - golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e + golang.org/x/sync v0.0.0-20220923202941-7f9b1623fab7 golang.org/x/sys v0.0.0-20220919091848-fb04ddd9f9c8 golang.org/x/time v0.0.0-20220920022843-2ce7c2934d45 google.golang.org/grpc v1.49.0 @@ -117,11 +116,9 @@ require ( github.com/hashicorp/memberlist v0.3.1 // indirect github.com/hashicorp/yamux v0.0.0-20190923154419-df201c70410d // indirect github.com/inconshreveable/mousetrap v1.0.0 // indirect - github.com/jackc/chunkreader/v2 v2.0.1 // indirect - github.com/jackc/pgio v1.0.0 // indirect github.com/jackc/pgpassfile v1.0.0 // indirect github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b // indirect - github.com/jackc/puddle v1.2.1 // indirect + github.com/jackc/puddle/v2 v2.1.2 // indirect github.com/jessevdk/go-flags v1.5.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect @@ -137,7 +134,6 @@ require ( github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect github.com/miekg/dns v1.1.50 // indirect github.com/mitchellh/go-testing-interface v1.0.0 // indirect - github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/moby/sys/mount v0.2.0 // indirect github.com/moby/sys/mountinfo v0.5.0 // indirect github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect @@ -170,7 +166,6 @@ require ( github.com/spf13/cobra v1.5.0 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect - github.com/spf13/viper v1.13.0 // indirect github.com/subosito/gotenv v1.4.1 // indirect github.com/uber/jaeger-client-go v2.30.0+incompatible // indirect github.com/uber/jaeger-lib v2.4.1+incompatible // indirect @@ -182,13 +177,13 @@ require ( go.opentelemetry.io/proto/otlp v0.19.0 // indirect go.uber.org/multierr v1.8.0 // indirect go.uber.org/zap v1.23.0 // indirect - golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa // indirect + golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90 // indirect + golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 // indirect golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect golang.org/x/net v0.0.0-20220926192436-02166a98028e // indirect golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1 // indirect - golang.org/x/sync v0.0.0-20220907140024-f12130a52804 // indirect - golang.org/x/text v0.3.7 // indirect + golang.org/x/text v0.3.8 // indirect golang.org/x/tools v0.1.12 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006 // indirect diff --git a/go.sum b/go.sum index 46d19f8fc3..9eb549c0e3 100644 --- a/go.sum +++ b/go.sum @@ -66,8 +66,6 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/HdrHistogram/hdrhistogram-go v1.1.2 h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob0t8PQPMybUNFM= -github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc= -github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= @@ -176,8 +174,6 @@ github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc h1:PYXxkRUBGUMa5xgMVMDl62vEklZvKpVaxQeN9ie7Hfk= -github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= -github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE= github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU= @@ -276,7 +272,6 @@ github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3Ee github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= @@ -479,7 +474,6 @@ github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5x github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.6 h1:mkgN1ofwASrYnJ5W6U/BxG15eXXXjirgZc7CLqkcaro= github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gofrs/uuid v4.2.0+incompatible h1:yyYWMnhkhrKwwr8gAOcOCYxOOscHgDS9yZgBrnJfGa0= github.com/gofrs/uuid v4.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU= @@ -669,56 +663,16 @@ github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NH github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/ionos-cloud/sdk-go/v6 v6.1.3 h1:vb6yqdpiqaytvreM0bsn2pXw+1YDvEk2RKSmBAQvgDQ= github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= -github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= -github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= -github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8= -github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= -github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA= -github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE= -github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s= -github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o= -github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY= -github.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= -github.com/jackc/pgconn v1.13.0 h1:3L1XMNV2Zvca/8BYhzcRFS70Lr0WlDg16Di6SFGAbys= -github.com/jackc/pgconn v1.13.0/go.mod h1:AnowpAqO4CMIIJNZl2VJp+KrkAZciAkhEl0W0JIobpI= github.com/jackc/pgerrcode v0.0.0-20220416144525-469b46aa5efa h1:s+4MhCQ6YrzisK6hFJUX53drDT4UsSW3DEhKn0ifuHw= github.com/jackc/pgerrcode v0.0.0-20220416144525-469b46aa5efa/go.mod h1:a/s9Lp5W7n/DD0VrVoyJ00FbP2ytTPDVOivvn2bMlds= -github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= -github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= -github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= -github.com/jackc/pgmock v0.0.0-20201204152224-4fe30f7445fd/go.mod h1:hrBW0Enj2AZTNpt/7Y5rr2xe/9Mn757Wtb2xeBzPv2c= -github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65 h1:DadwsjnMwFjfWc9y5Wi/+Zz7xoE5ALHsRQlOctkOiHc= -github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65/go.mod h1:5R2h2EEX+qri8jOWMbJCtaPWkrrNc7OHwsp2TCqp7ak= github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= -github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78= -github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA= -github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg= -github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= -github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= -github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgproto3/v2 v2.3.1 h1:nwj7qwf0S+Q7ISFfBndqeLwSwxs+4DPsbRFjECT1Y4Y= -github.com/jackc/pgproto3/v2 v2.3.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b h1:C8S2+VttkHFdOOCXJe+YGfa4vHYwlt4Zx+IVXQ97jYg= github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= -github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg= -github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc= -github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= -github.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM= -github.com/jackc/pgtype v1.12.0 h1:Dlq8Qvcch7kiehm8wPGIW0W3KsCCHJnRacKW0UM8n5w= -github.com/jackc/pgtype v1.12.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= -github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= -github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= -github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= -github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs= -github.com/jackc/pgx/v4 v4.17.0 h1:Hsx+baY8/zU2WtPLQyZi8WbecgcsWEeyoK1jvg/WgIo= -github.com/jackc/pgx/v4 v4.17.0/go.mod h1:Gd6RmOhtFLTu8cp/Fhq4kP195KrshxYJH3oW8AWJ1pw= -github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jackc/puddle v1.2.1 h1:gI8os0wpRXFd4FiAY2dWiqRK037tjj3t7rKFeO4X5iw= -github.com/jackc/puddle v1.2.1/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/pgx/v5 v5.2.0 h1:NdPpngX0Y6z6XDFKqmFQaE+bCtkqzvQIOt1wvBlAqs8= +github.com/jackc/pgx/v5 v5.2.0/go.mod h1:Ptn7zmohNsWEsdxRawMzk3gaKma2obW+NWTnKa0S4nk= +github.com/jackc/puddle/v2 v2.1.2 h1:0f7vaaXINONKTsxYDn4otOAiJanX/BMeAtY//BXqzlg= +github.com/jackc/puddle/v2 v2.1.2/go.mod h1:2lpufsF5mRHO6SuZkm0fNYxM6SWHfvyFj62KwNzgels= github.com/jaegertracing/jaeger v1.38.2-0.20221006002917-5bf8a28fe06d h1:urUtcvGCAopdLu67U9pQNoUxMot7JQS2I5gSXwYaSTQ= github.com/jaegertracing/jaeger v1.38.2-0.20221006002917-5bf8a28fe06d/go.mod h1:T5RFOZgRQBXR9rpQq8HsiIg39gu0DAYGQbDzpKw9gU8= github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= @@ -778,17 +732,11 @@ github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= -github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= -github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.10.2 h1:AqzbZs4ZoCBp+GtejcpCpcxM3zlSMx29dXbUSeVtJb8= -github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/linode/linodego v1.9.3 h1:+lxNZw4avRxhCqGjwfPgQ2PvMT+vOL0OMsTdzixR7hQ= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= @@ -804,16 +752,12 @@ github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsI github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= -github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= @@ -1020,9 +964,6 @@ github.com/rogpeppe/go-internal v1.6.2 h1:aIihoIOHCiLZHxyoNQ+ABL4NKhFTgKLBdMLyEA github.com/rogpeppe/go-internal v1.6.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= -github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= -github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= -github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= @@ -1034,9 +975,6 @@ github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= -github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= -github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 h1:bUGsEnyNbVPw06Bs80sCeARAlK8lhwqGyi6UT8ymuGk= github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= @@ -1163,7 +1101,6 @@ github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1 github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= -github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= @@ -1213,8 +1150,6 @@ go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJP go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= @@ -1224,43 +1159,34 @@ go.uber.org/automaxprocs v1.5.1/go.mod h1:BF4eumQw0P9GtnuxxovUd06vwm1o18oMzFtK66 go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= -go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= -go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.23.0 h1:OjGQ5KQDEUawVHxNwQgPpiypGHOxo2mNZsOqTak4fFY= go.uber.org/zap v1.23.0/go.mod h1:D+nX8jyLsMHMYrln8A0rJjFt/T/9/bGgIhAqxv5URuY= golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa h1:zuSxTR4o9y82ebqCUJYNGJbGPo6sKVl54f/TVDObg1c= -golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90 h1:Y/gsMcFOcR+6S6f3YeMKl5g+dZMEWqcz5Czj/GWYbkM= +golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1384,8 +1310,8 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220907140024-f12130a52804 h1:0SH2R3f1b1VmIMG7BXbEZCBUu2dKmHschSmjqGUrW8A= -golang.org/x/sync v0.0.0-20220907140024-f12130a52804/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220923202941-7f9b1623fab7 h1:ZrnxWX62AgTKOSagEqxvb3ffipvEDX2pl7E1TdqLqIc= +golang.org/x/sync v0.0.0-20220923202941-7f9b1623fab7/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1393,7 +1319,6 @@ golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1412,7 +1337,6 @@ golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1500,8 +1424,9 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8 h1:nAL+RVCQ9uMn3vJZbV+MRnydTJFPf8qqY42YiA6MrqY= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1522,7 +1447,6 @@ golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3 golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= @@ -1532,12 +1456,9 @@ golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1545,7 +1466,6 @@ golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= @@ -1577,8 +1497,6 @@ golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyj golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1708,7 +1626,6 @@ gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qS gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= -gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= diff --git a/pkg/dataset/config.go b/pkg/dataset/config.go index f61c7f8b06..5fe68e475f 100644 --- a/pkg/dataset/config.go +++ b/pkg/dataset/config.go @@ -9,7 +9,7 @@ import ( "fmt" "time" - "github.com/jackc/pgx/v4" + "github.com/jackc/pgx/v5" "github.com/timescale/promscale/pkg/log" "gopkg.in/yaml.v2" ) @@ -72,12 +72,15 @@ func (c *Config) Apply(conn *pgx.Conn) error { log.Info("msg", fmt.Sprintf("Setting trace dataset default retention period to %s", c.Traces.RetentionPeriod)) queries := map[string]interface{}{ - setDefaultMetricChunkIntervalSQL: time.Duration(c.Metrics.ChunkInterval), - setDefaultMetricCompressionSQL: c.Metrics.Compression, - setDefaultMetricHAReleaseRefreshSQL: time.Duration(c.Metrics.HALeaseRefresh), - setDefaultMetricHAReleaseTimeoutSQL: time.Duration(c.Metrics.HALeaseTimeout), - setDefaultMetricRetentionPeriodSQL: time.Duration(c.Metrics.RetentionPeriod), - setDefaultTraceRetentionPeriodSQL: time.Duration(c.Traces.RetentionPeriod), + setDefaultMetricChunkIntervalSQL: time.Duration(c.Metrics.ChunkInterval), + setDefaultMetricCompressionSQL: c.Metrics.Compression, + setDefaultMetricRetentionPeriodSQL: time.Duration(c.Metrics.RetentionPeriod), + setDefaultTraceRetentionPeriodSQL: time.Duration(c.Traces.RetentionPeriod), + // These need to be sent as string because the SQL does `$1::text` making + // PGX require a string, []byte or a TextValuer. + // https://github.com/jackc/pgx/blob/74f9b9f0a483f95513c621364f2c3912181ee360/pgtype/text.go#L92-L106 + setDefaultMetricHAReleaseRefreshSQL: time.Duration(c.Metrics.HALeaseRefresh).String(), + setDefaultMetricHAReleaseTimeoutSQL: time.Duration(c.Metrics.HALeaseTimeout).String(), } for sql, param := range queries { diff --git a/pkg/ha/client/client.go b/pkg/ha/client/client.go index dcb5038045..e4dd5fed02 100644 --- a/pkg/ha/client/client.go +++ b/pkg/ha/client/client.go @@ -9,8 +9,8 @@ import ( "fmt" "time" - "github.com/jackc/pgconn" - "github.com/jackc/pgx/v4" + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgconn" "github.com/timescale/promscale/pkg/pgxconn" ) diff --git a/pkg/internal/testhelpers/containers_test.go b/pkg/internal/testhelpers/containers_test.go index 0a678139c3..a67a12cf07 100644 --- a/pkg/internal/testhelpers/containers_test.go +++ b/pkg/internal/testhelpers/containers_test.go @@ -15,8 +15,8 @@ import ( constants "github.com/timescale/promscale/pkg/tests" - "github.com/jackc/pgx/v4" - "github.com/jackc/pgx/v4/pgxpool" + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgxpool" ) var ( diff --git a/pkg/internal/testhelpers/postgres_container.go b/pkg/internal/testhelpers/postgres_container.go index 3aaf13f891..45f7b4d528 100644 --- a/pkg/internal/testhelpers/postgres_container.go +++ b/pkg/internal/testhelpers/postgres_container.go @@ -15,13 +15,15 @@ import ( "time" "github.com/blang/semver/v4" + "github.com/timescale/promscale/pkg/pgmodel/model" "github.com/docker/go-connections/nat" - "github.com/jackc/pgconn" - "github.com/jackc/pgx/v4" - "github.com/jackc/pgx/v4/pgxpool" - _ "github.com/jackc/pgx/v4/stdlib" + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgconn" + "github.com/jackc/pgx/v5/pgxpool" + _ "github.com/jackc/pgx/v5/stdlib" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/testcontainers/testcontainers-go" "github.com/testcontainers/testcontainers-go/wait" ) @@ -174,14 +176,24 @@ func MakePromUserPromAdmin(t testing.TB, dbName string) { func PgxPoolWithRole(t testing.TB, dbName string, role string) *pgxpool.Pool { user := getRoleUser(role) setupRole(t, dbName, role) - pool, err := pgxpool.Connect(context.Background(), PgConnectURLUser(dbName, user)) - assert.NoError(t, err) + pool, err := PgxPoolWithRegisteredTypes(PgConnectURLUser(dbName, user)) + require.NoError(t, err) return pool } +func PgxPoolWithRegisteredTypes(connectURL string) (*pgxpool.Pool, error) { + config, err := pgxpool.ParseConfig(connectURL) + if err != nil { + return nil, err + } + config.AfterConnect = model.RegisterCustomPgTypes + return pgxpool.NewWithConfig(context.Background(), config) +} + // WithDB establishes a database for testing and calls the callback func WithDB(t testing.TB, DBName string, superuser SuperuserStatus, deferNode2Setup bool, extensionState TestOptions, f func(db *pgxpool.Pool, t testing.TB, connectString string)) { db, err := DbSetup(DBName, superuser, deferNode2Setup, extensionState) + defer model.UnRegisterCustomPgTypes(db.Config().ConnConfig.Config) if err != nil { t.Fatal(err) return @@ -199,11 +211,14 @@ func GetReadOnlyConnection(t testing.TB, DBName string) *pgxpool.Pool { assert.NoError(t, err) pgConfig.AfterConnect = func(ctx context.Context, conn *pgx.Conn) error { - _, err := conn.Exec(context.Background(), "SET SESSION CHARACTERISTICS AS TRANSACTION READ ONLY") - return err + _, err := conn.Exec(ctx, "SET SESSION CHARACTERISTICS AS TRANSACTION READ ONLY") + if err != nil { + return err + } + return model.RegisterCustomPgTypes(ctx, conn) } - dbPool, err := pgxpool.ConnectConfig(context.Background(), pgConfig) + dbPool, err := pgxpool.NewWithConfig(context.Background(), pgConfig) if err != nil { t.Fatal(err) } @@ -252,6 +267,7 @@ func DbSetup(DBName string, superuser SuperuserStatus, deferNode2Setup bool, ext if err != nil { return nil, err } + model.UnRegisterCustomPgTypes(ourDb.Config().Config) if extensionState.UsesMultinode() { // Multinode requires the administrator to set up data nodes, so in @@ -287,7 +303,7 @@ func DbSetup(DBName string, superuser SuperuserStatus, deferNode2Setup bool, ext return nil, err } - dbPool, err := pgxpool.Connect(context.Background(), PgConnectURL(DBName, superuser)) + dbPool, err := pgxpool.New(context.Background(), PgConnectURL(DBName, superuser)) if err != nil { return nil, err } diff --git a/pkg/jaeger/store/find_trace_ids.go b/pkg/jaeger/store/find_trace_ids.go index 24d88b085a..a549bbbcd5 100644 --- a/pkg/jaeger/store/find_trace_ids.go +++ b/pkg/jaeger/store/find_trace_ids.go @@ -8,7 +8,7 @@ import ( "context" "fmt" - "github.com/jackc/pgtype" + "github.com/jackc/pgx/v5/pgtype" "github.com/jaegertracing/jaeger/model" "github.com/jaegertracing/jaeger/storage/spanstore" "github.com/timescale/promscale/pkg/pgxconn" diff --git a/pkg/jaeger/store/get_operations.go b/pkg/jaeger/store/get_operations.go index 5bb8af8536..ea3716a362 100644 --- a/pkg/jaeger/store/get_operations.go +++ b/pkg/jaeger/store/get_operations.go @@ -6,9 +6,10 @@ package store import ( "context" + "errors" "fmt" - "github.com/jackc/pgtype" + "github.com/jackc/pgx/v5/pgtype" "github.com/jaegertracing/jaeger/storage/spanstore" "github.com/timescale/promscale/pkg/pgxconn" ) @@ -34,8 +35,8 @@ WHERE func getOperations(ctx context.Context, conn pgxconn.PgxConn, query spanstore.OperationQueryParameters) ([]spanstore.Operation, error) { var ( - pgOperationNames, pgSpanKinds pgtype.TextArray - operationsResp []spanstore.Operation + operationNames, spanKinds []string + operationsResp []spanstore.Operation ) args := []interface{}{query.ServiceName} @@ -51,19 +52,10 @@ func getOperations(ctx context.Context, conn pgxconn.PgxConn, query spanstore.Op sqlQuery := fmt.Sprintf(getOperationsSQLFormat, kindQual) - if err := conn.QueryRow(ctx, sqlQuery, args...).Scan(&pgOperationNames, &pgSpanKinds); err != nil { + if err := conn.QueryRow(ctx, sqlQuery, args...).Scan(&operationNames, &spanKinds); err != nil { return operationsResp, fmt.Errorf("fetching operations: %w", err) } - operationNames, err := textArraytoStringArr(pgOperationNames) - if err != nil { - return operationsResp, fmt.Errorf("operation names: text-array-to-string-array: %w", err) - } - spanKinds, err := textArraytoStringArr(pgSpanKinds) - if err != nil { - return operationsResp, fmt.Errorf("span kinds: text-array-to-string-array: %w", err) - } - if len(operationNames) != len(spanKinds) { return operationsResp, fmt.Errorf("entries not same in operation-name and span-kind") } @@ -76,10 +68,13 @@ func getOperations(ctx context.Context, conn pgxconn.PgxConn, query spanstore.Op return operationsResp, nil } -func textArraytoStringArr(s pgtype.TextArray) ([]string, error) { - var d []string - if err := s.AssignTo(&d); err != nil { - return []string{}, fmt.Errorf("assign to: %w", err) +func textArraytoStringArr(s pgtype.FlatArray[pgtype.Text]) ([]string, error) { + d := make([]string, len(s)) + for i, v := range s { + if !v.Valid { + return nil, errors.New("can't assign NULL to string") + } + d[i] = v.String } return d, nil } diff --git a/pkg/jaeger/store/get_services.go b/pkg/jaeger/store/get_services.go index 93adff5b10..662c49f408 100644 --- a/pkg/jaeger/store/get_services.go +++ b/pkg/jaeger/store/get_services.go @@ -8,31 +8,27 @@ import ( "context" "fmt" - "github.com/jackc/pgtype" - "github.com/jackc/pgx/v4" + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgtype" "github.com/pkg/errors" "github.com/timescale/promscale/pkg/pgxconn" ) const getServicesSQL = ` SELECT - array_agg(value#>>'{}' ORDER BY value) + array_agg(value#>>'{}' ORDER BY value) FROM _ps_trace.tag WHERE key='service.name' and value IS NOT NULL` func getServices(ctx context.Context, conn pgxconn.PgxConn) ([]string, error) { - var pgServices pgtype.TextArray + var pgServices pgtype.FlatArray[pgtype.Text] if err := conn.QueryRow(ctx, getServicesSQL).Scan(&pgServices); err != nil { if errors.Is(err, pgx.ErrNoRows) { return []string{}, nil } return nil, fmt.Errorf("fetching services: %w", err) } - s, err := textArraytoStringArr(pgServices) - if err != nil { - return nil, fmt.Errorf("services: converting text-array-to-string-arr: %w", err) - } - return s, nil + return textArraytoStringArr(pgServices) } diff --git a/pkg/jaeger/store/trace_query.go b/pkg/jaeger/store/trace_query.go index 05ad2e3653..12bd2218b3 100644 --- a/pkg/jaeger/store/trace_query.go +++ b/pkg/jaeger/store/trace_query.go @@ -9,7 +9,7 @@ import ( "strings" "time" - "github.com/jackc/pgtype" + "github.com/jackc/pgx/v5/pgtype" "github.com/jaegertracing/jaeger/model" "github.com/jaegertracing/jaeger/storage/spanstore" ) @@ -192,10 +192,7 @@ func getUUIDFromTraceID(traceID model.TraceID) (pgtype.UUID, error) { return uuid, fmt.Errorf("marshaling TraceID: %w", err) } - if err := uuid.Set(buf); err != nil { - return uuid, fmt.Errorf("setting TraceID: %w", err) - } - return uuid, nil + return pgtype.UUID{Bytes: buf, Valid: true}, nil } func (b *Builder) getTraceQuery(traceID model.TraceID) (string, []interface{}, error) { diff --git a/pkg/jaeger/store/trace_scan.go b/pkg/jaeger/store/trace_scan.go index 799e535f70..efad7b872e 100644 --- a/pkg/jaeger/store/trace_scan.go +++ b/pkg/jaeger/store/trace_scan.go @@ -5,13 +5,14 @@ package store import ( + "encoding/json" "fmt" "time" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/ptrace" - "github.com/jackc/pgtype" + "github.com/jackc/pgx/v5/pgtype" "github.com/timescale/promscale/pkg/pgmodel/ingestor/trace" "github.com/timescale/promscale/pkg/pgxconn" @@ -24,23 +25,23 @@ type spanDBResult struct { startTime time.Time endTime time.Time kind pgtype.Text - droppedTagsCounts int - droppedEventsCounts int - droppedLinkCounts int + droppedTagsCounts int32 + droppedEventsCounts int32 + droppedLinkCounts int32 statusCode string statusMessage pgtype.Text traceState pgtype.Text schemaUrl pgtype.Text spanName string - resourceTags pgtype.JSONB - spanTags pgtype.JSONB + resourceTags []byte + spanTags []byte // From events table. // for events, the entire slice can be nil but not any element within the slice eventNames *[]string eventTimes *[]time.Time - eventDroppedTagsCount *[]int - eventTags pgtype.JSONBArray + eventDroppedTagsCount *[]int32 + eventTags pgtype.FlatArray[[]byte] // From instrumentation lib table. instLibName *string @@ -48,11 +49,11 @@ type spanDBResult struct { instLibSchemaUrl *string // From link table. - linksLinkedTraceIds pgtype.UUIDArray + linksLinkedTraceIds pgtype.FlatArray[pgtype.UUID] linksLinkedSpanIds *[]int64 - linksTraceStates *[]*string - linksDroppedTagsCount *[]int - linksTags pgtype.JSONBArray + linksTraceStates pgtype.FlatArray[*string] + linksDroppedTagsCount *[]int32 + linksTags pgtype.FlatArray[[]byte] } func ScanRow(row pgxconn.PgxRows, traces *ptrace.Traces) error { @@ -140,10 +141,7 @@ func populateSpan( ref := instrumentationLibSpan.Spans().AppendEmpty() // Type preprocessing. - traceId, err := makeTraceId(dbResult.traceId) - if err != nil { - return fmt.Errorf("makeTraceId: %w", err) - } + traceId := makeTraceId(dbResult.traceId) ref.SetTraceID(traceId) id := makeSpanId(&dbResult.spanId) @@ -151,25 +149,25 @@ func populateSpan( // We use a pointer since parent id can be nil. If we use normal int64, we can get parsing errors. var temp *int64 - if err := dbResult.parentSpanId.AssignTo(&temp); err != nil { - return fmt.Errorf("assigning parent span id: %w", err) + if dbResult.parentSpanId.Valid { + temp = &dbResult.parentSpanId.Int64 } parentId := makeSpanId(temp) ref.SetParentSpanID(parentId) - if dbResult.traceState.Status == pgtype.Present { + if dbResult.traceState.Valid { ts := pcommon.NewTraceState() ts.FromRaw(dbResult.traceState.String) ts.MoveTo(ref.TraceState()) } - if dbResult.schemaUrl.Status == pgtype.Present { + if dbResult.schemaUrl.Valid { resourceSpan.SetSchemaUrl(dbResult.schemaUrl.String) } ref.SetName(dbResult.spanName) - if dbResult.kind.Status == pgtype.Present { + if dbResult.kind.Valid { ref.SetKind(internalToSpanKind(dbResult.kind.String)) } @@ -207,7 +205,7 @@ func setStatus(ref ptrace.Span, dbRes *spanDBResult) error { if dbRes.statusCode != "" { ref.Status().SetCode(internalToStatusCode(dbRes.statusCode)) } - if dbRes.statusMessage.Status != pgtype.Null { + if dbRes.statusMessage.Valid { message := dbRes.statusMessage.String ref.Status().SetMessage(message) } @@ -224,7 +222,7 @@ func populateEvents( event.SetName((*dbResult.eventNames)[i]) event.SetTimestamp(pcommon.NewTimestampFromTime((*dbResult.eventTimes)[i])) event.SetDroppedAttributesCount(uint32((*dbResult.eventDroppedTagsCount)[i])) - attr, err := makeAttributes(dbResult.eventTags.Elements[i]) + attr, err := makeAttributes(dbResult.eventTags[i]) if err != nil { return fmt.Errorf("making event tags: %w", err) } @@ -239,27 +237,22 @@ func populateLinks( n := len(*dbResult.linksLinkedSpanIds) - var linkedTraceIds []pcommon.TraceID - if err := dbResult.linksLinkedTraceIds.AssignTo(&linkedTraceIds); err != nil { - return fmt.Errorf("linksLinkedTraceIds: AssignTo: %w", err) - } - for i := 0; i < n; i++ { link := spanEventSlice.AppendEmpty() - link.SetTraceID(linkedTraceIds[i]) + link.SetTraceID(dbResult.linksLinkedTraceIds[i].Bytes) spanId := makeSpanId(&(*dbResult.linksLinkedSpanIds)[i]) link.SetSpanID(spanId) - if (*dbResult.linksTraceStates)[i] != nil { - traceState := *((*dbResult.linksTraceStates)[i]) + if dbResult.linksTraceStates[i] != nil { + traceState := *(dbResult.linksTraceStates[i]) ts := pcommon.NewTraceState() ts.FromRaw(traceState) ts.MoveTo(link.TraceState()) } link.SetDroppedAttributesCount(uint32((*dbResult.linksDroppedTagsCount)[i])) - attr, err := makeAttributes(dbResult.linksTags.Elements[i]) + attr, err := makeAttributes(dbResult.linksTags[i]) if err != nil { return fmt.Errorf("making link tags: %w", err) } @@ -269,9 +262,12 @@ func populateLinks( } // makeAttributes makes raw attribute map using tags. -func makeAttributes(tagsJson pgtype.JSONB) (map[string]interface{}, error) { +func makeAttributes(tagsJson []byte) (map[string]interface{}, error) { + if tagsJson == nil { + return nil, nil + } var tags map[string]interface{} - if err := tagsJson.AssignTo(&tags); err != nil { + if err := json.Unmarshal(tagsJson, &tags); err != nil { return map[string]interface{}{}, fmt.Errorf("tags assign to: %w", err) } tags = sanitizeInt(tags) @@ -295,12 +291,11 @@ func isIntegral(val float64) bool { return val == float64(int(val)) } -func makeTraceId(s pgtype.UUID) (pcommon.TraceID, error) { - var traceId pcommon.TraceID - if err := s.AssignTo(&traceId); err != nil { - return pcommon.TraceID{}, fmt.Errorf("trace id assign to: %w", err) +func makeTraceId(s pgtype.UUID) pcommon.TraceID { + if !s.Valid { + return pcommon.TraceID{} } - return traceId, nil + return pcommon.TraceID(s.Bytes) } func makeSpanId(s *int64) pcommon.SpanID { diff --git a/pkg/pgclient/client.go b/pkg/pgclient/client.go index b9bb8f3689..8eeb44b332 100644 --- a/pkg/pgclient/client.go +++ b/pkg/pgclient/client.go @@ -9,8 +9,8 @@ import ( "fmt" "net/url" - "github.com/jackc/pgx/v4" - "github.com/jackc/pgx/v4/pgxpool" + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgxpool" "github.com/prometheus/client_golang/prometheus" "go.opentelemetry.io/collector/pdata/ptrace" @@ -20,6 +20,7 @@ import ( "github.com/timescale/promscale/pkg/pgmodel/health" "github.com/timescale/promscale/pkg/pgmodel/ingestor" "github.com/timescale/promscale/pkg/pgmodel/lreader" + "github.com/timescale/promscale/pkg/pgmodel/model" "github.com/timescale/promscale/pkg/pgmodel/querier" "github.com/timescale/promscale/pkg/pgxconn" "github.com/timescale/promscale/pkg/prompb" @@ -87,7 +88,7 @@ func NewClient(r prometheus.Registerer, cfg *Config, mt tenancy.Authorizer, sche if err != nil { return nil, fmt.Errorf("get maint pg-config: %w", err) } - maintPool, err = pgxpool.ConnectConfig(context.Background(), maintPgConfig) + maintPool, err = pgxpool.NewWithConfig(context.Background(), maintPgConfig) if err != nil { return nil, fmt.Errorf("err creating maintenance connection pool: %w", err) } @@ -107,8 +108,8 @@ func NewClient(r prometheus.Registerer, cfg *Config, mt tenancy.Authorizer, sche if err != nil { return nil, fmt.Errorf("get writer pg-config: %w", err) } - SetWriterPoolAfterConnect(writerPgConfig, schemaLocker, cfg.WriterSynchronousCommit) - writerPool, err = pgxpool.ConnectConfig(context.Background(), writerPgConfig) + writerPgConfig.AfterConnect = WriterPoolAfterConnect(schemaLocker, cfg.WriterSynchronousCommit) + writerPool, err = pgxpool.NewWithConfig(context.Background(), writerPgConfig) if err != nil { return nil, fmt.Errorf("err creating writer connection pool: %w", err) } @@ -132,8 +133,8 @@ func NewClient(r prometheus.Registerer, cfg *Config, mt tenancy.Authorizer, sche "num-copiers", numCopiers, "statement-cache", statementCacheLog) - readerPgConfig.AfterConnect = schemaLocker - readerPool, err := pgxpool.ConnectConfig(context.Background(), readerPgConfig) + readerPgConfig.AfterConnect = ReaderPoolAfterConnect(schemaLocker) + readerPool, err := pgxpool.NewWithConfig(context.Background(), readerPgConfig) if err != nil { return nil, fmt.Errorf("err creating reader connection pool: %w", err) } @@ -145,19 +146,35 @@ func NewClient(r prometheus.Registerer, cfg *Config, mt tenancy.Authorizer, sche return client, err } -func SetWriterPoolAfterConnect(writerPgConfig *pgxpool.Config, schemaLocker LockFunc, synchronousCommit bool) { - if !synchronousCommit { - // if synchronous_commit should be disabled, we use the AfterConnect hook so that we can set it to 'off' - // for the session before the connection is added to the pool for use - writerPgConfig.AfterConnect = func(ctx context.Context, conn *pgx.Conn) error { +func ReaderPoolAfterConnect(schemaLocker LockFunc) func(context.Context, *pgx.Conn) error { + return func(ctx context.Context, c *pgx.Conn) error { + if schemaLocker != nil { + err := schemaLocker(ctx, c) + if err != nil { + return err + } + } + return model.RegisterCustomPgTypes(ctx, c) + } +} + +func WriterPoolAfterConnect(schemaLocker LockFunc, synchronousCommit bool) func(context.Context, *pgx.Conn) error { + return func(ctx context.Context, conn *pgx.Conn) error { + if !synchronousCommit { _, err := conn.Exec(ctx, "SET SESSION synchronous_commit to 'off'") if err != nil { return err } - return schemaLocker(ctx, conn) } - } else { - writerPgConfig.AfterConnect = schemaLocker + + if schemaLocker != nil { + err := schemaLocker(ctx, conn) + if err != nil { + return err + } + } + + return model.RegisterCustomPgTypes(ctx, conn) } } @@ -175,7 +192,8 @@ func (cfg *Config) getPgConfig(poolSize int) (*pgxpool.Config, error) { pgConfig.MinConns = int32(min) if !cfg.EnableStatementsCache { - pgConfig.ConnConfig.PreferSimpleProtocol = true + // TODO use QueryExecModeExec + pgConfig.ConnConfig.DefaultQueryExecMode = pgx.QueryExecModeSimpleProtocol } return pgConfig, nil } diff --git a/pkg/pgclient/config.go b/pkg/pgclient/config.go index 8d03f5be4a..d854a015f9 100644 --- a/pkg/pgclient/config.go +++ b/pkg/pgclient/config.go @@ -13,7 +13,7 @@ import ( "strconv" "time" - "github.com/jackc/pgx/v4" + "github.com/jackc/pgx/v5" "github.com/timescale/promscale/pkg/limits" "github.com/timescale/promscale/pkg/log" "github.com/timescale/promscale/pkg/pgmodel/cache" diff --git a/pkg/pgclient/metrics.go b/pkg/pgclient/metrics.go index c762aac9fa..2df0a328b3 100644 --- a/pkg/pgclient/metrics.go +++ b/pkg/pgclient/metrics.go @@ -5,7 +5,7 @@ package pgclient import ( - "github.com/jackc/pgx/v4/pgxpool" + "github.com/jackc/pgx/v5/pgxpool" "github.com/prometheus/client_golang/prometheus" "github.com/timescale/promscale/pkg/util" diff --git a/pkg/pgmodel/common/extension/extension.go b/pkg/pgmodel/common/extension/extension.go index dd46587264..e739e5ab01 100644 --- a/pkg/pgmodel/common/extension/extension.go +++ b/pkg/pgmodel/common/extension/extension.go @@ -11,7 +11,7 @@ import ( "strings" "github.com/blang/semver/v4" - "github.com/jackc/pgx/v4" + "github.com/jackc/pgx/v5" "github.com/timescale/promscale/pkg/log" "github.com/timescale/promscale/pkg/pgmodel/common/errors" "github.com/timescale/promscale/pkg/pgmodel/common/schema" diff --git a/pkg/pgmodel/ingestor/copier.go b/pkg/pgmodel/ingestor/copier.go index 7337a466d7..c1a5c7256d 100644 --- a/pkg/pgmodel/ingestor/copier.go +++ b/pkg/pgmodel/ingestor/copier.go @@ -6,6 +6,7 @@ package ingestor import ( "context" + "errors" "fmt" "math" "sort" @@ -13,8 +14,8 @@ import ( "sync" "time" - "github.com/jackc/pgconn" - "github.com/jackc/pgx/v4" + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgconn" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "go.opentelemetry.io/otel/attribute" @@ -227,8 +228,8 @@ func isPGUniqueViolation(err error) bool { if err == nil { return false } - pgErr, ok := err.(*pgconn.PgError) - if ok && pgErr.Code == "23505" { + var e *pgconn.PgError + if errors.As(err, &e) && e.Code == "23505" { return true } return false diff --git a/pkg/pgmodel/ingestor/dispatcher.go b/pkg/pgmodel/ingestor/dispatcher.go index 44dbf755dd..935e506dac 100644 --- a/pkg/pgmodel/ingestor/dispatcher.go +++ b/pkg/pgmodel/ingestor/dispatcher.go @@ -71,13 +71,7 @@ func newPgxDispatcher(conn pgxconn.PgxConn, mCache cache.MetricCache, scache cac handleDecompression = skipDecompression } - if err := model.RegisterCustomPgTypes(conn); err != nil { - return nil, fmt.Errorf("registering custom pg types: %w", err) - } - - labelArrayOID := model.GetCustomTypeOID(model.LabelArray) - - sw := NewSeriesWriter(conn, labelArrayOID, lCache) + sw := NewSeriesWriter(conn, lCache) elf := NewExamplarLabelFormatter(conn, eCache) for i := 0; i < numCopiers; i++ { diff --git a/pkg/pgmodel/ingestor/handler_test.go b/pkg/pgmodel/ingestor/handler_test.go index 07b08b3b24..6d5ef3eb0c 100644 --- a/pkg/pgmodel/ingestor/handler_test.go +++ b/pkg/pgmodel/ingestor/handler_test.go @@ -38,18 +38,28 @@ func TestLabelArrayCreator(t *testing.T) { res, _, err := createLabelArrays(seriesSet, labelMap, 2) require.NoError(t, err) - expected := [][]int32{{2, 3}} + expected := model.ArrayOfLabelArray{ + model.LabelArray{ + {Int32: 2, Valid: true}, + {Int32: 3, Valid: true}, + }, + } require.Equal(t, res, expected) res, _, err = createLabelArrays(seriesSet, labelMap, 3) require.NoError(t, err) - expected = [][]int32{{2, 3}} require.Equal(t, res, expected) labelMap[makeLabelKey(valOne)] = cache.NewLabelInfo(3, 3) res, _, err = createLabelArrays(seriesSet, labelMap, 3) require.NoError(t, err) - expected = [][]int32{{2, 0, 3}} + expected = model.ArrayOfLabelArray{ + model.LabelArray{ + {Int32: 2, Valid: true}, + {Int32: 0, Valid: false}, + {Int32: 3, Valid: true}, + }, + } require.Equal(t, res, expected) /* test two series */ @@ -66,9 +76,21 @@ func TestLabelArrayCreator(t *testing.T) { res, ser, err := createLabelArrays(seriesSet, labelMap, 5) require.NoError(t, err) require.Equal(t, len(ser), 2) - expected = [][]int32{ - {100, 0, 0, 0, 1}, - {100, 0, 0, 0, 2}, + expected = model.ArrayOfLabelArray{ + model.LabelArray{ + {Int32: 100, Valid: true}, + {Int32: 0, Valid: false}, + {Int32: 0, Valid: false}, + {Int32: 0, Valid: false}, + {Int32: 1, Valid: true}, + }, + model.LabelArray{ + {Int32: 100, Valid: true}, + {Int32: 0, Valid: false}, + {Int32: 0, Valid: false}, + {Int32: 0, Valid: false}, + {Int32: 2, Valid: true}, + }, } require.Equal(t, res, expected) @@ -87,6 +109,14 @@ func TestLabelArrayCreator(t *testing.T) { res, ser, err = createLabelArrays(seriesSet, labelMap, 5) require.NoError(t, err) require.Equal(t, len(ser), 1) - expected = [][]int32{{100, 0, 0, 0, 1}} + expected = model.ArrayOfLabelArray{ + model.LabelArray{ + {Int32: 100, Valid: true}, + {Int32: 0, Valid: false}, + {Int32: 0, Valid: false}, + {Int32: 0, Valid: false}, + {Int32: 1, Valid: true}, + }, + } require.Equal(t, res, expected) } diff --git a/pkg/pgmodel/ingestor/ingestor_sql_test.go b/pkg/pgmodel/ingestor/ingestor_sql_test.go index ef85704ac4..87f879c293 100644 --- a/pkg/pgmodel/ingestor/ingestor_sql_test.go +++ b/pkg/pgmodel/ingestor/ingestor_sql_test.go @@ -11,9 +11,9 @@ import ( "testing" "time" - "github.com/jackc/pgconn" - "github.com/jackc/pgtype" - "github.com/jackc/pgx/v4" + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgconn" + "github.com/jackc/pgx/v5/pgtype" "github.com/pkg/errors" "github.com/prometheus/prometheus/model/labels" "github.com/stretchr/testify/require" @@ -31,12 +31,21 @@ const ( tableName string = "table name" ) -func getTestLabelArray(t *testing.T, l [][]int32) *pgtype.ArrayType { - model.SetLabelArrayOIDForTest(0) - labelArrayArray := model.GetCustomType(model.LabelArray) - err := labelArrayArray.Set(l) - require.NoError(t, err) - return labelArrayArray +func getTestLabelArray(t *testing.T, labelsSlices [][]int32) model.ArrayOfLabelArray { + labelsArrays := make(model.ArrayOfLabelArray, 0, len(labelsSlices)) + for _, labelsSlice := range labelsSlices { + labelsArray := make(model.LabelArray, 0, len(labelsSlice)) + for _, l := range labelsSlice { + valid := true + if l == 0 { + valid = false + } + labelsArray = append(labelsArray, pgtype.Int4{Int32: l, Valid: valid}) + } + labelsArrays = append(labelsArrays, model.LabelArray(labelsArray)) + } + + return labelsArrays } func init() { @@ -265,10 +274,13 @@ func TestPGXInserterInsertSeries(t *testing.T) { t.Run(c.name, func(t *testing.T) { for i := range c.sqlQueries { for j := range c.sqlQueries[i].Args { - if _, ok := c.sqlQueries[i].Args[j].([]string); ok { - tmp := &pgutf8str.TextArray{} - err := tmp.Set(c.sqlQueries[i].Args[j]) - require.NoError(t, err) + if slice, ok := c.sqlQueries[i].Args[j].([]string); ok { + tmp := make(pgtype.FlatArray[pgutf8str.Text], 0, len(slice)) + for _, item := range slice { + text := pgutf8str.Text{} + require.NoError(t, text.Scan(item)) + tmp = append(tmp, text) + } c.sqlQueries[i].Args[j] = tmp } } @@ -277,8 +289,7 @@ func TestPGXInserterInsertSeries(t *testing.T) { scache := cache.NewSeriesCache(cache.DefaultConfig, nil) scache.Reset() lCache := cache.NewInvertedLabelsCache(cache.DefaultConfig, nil) - sw := NewSeriesWriter(mock, 0, lCache) - + sw := NewSeriesWriter(mock, lCache) lsi := make([]model.Insertable, 0) for _, ser := range c.series { ls, err := scache.GetSeriesFromLabels(ser) @@ -423,10 +434,16 @@ func TestPGXInserterCacheReset(t *testing.T) { for i := range sqlQueries { for j := range sqlQueries[i].Args { - if _, ok := sqlQueries[i].Args[j].([]string); ok { - tmp := &pgutf8str.TextArray{} - err := tmp.Set(sqlQueries[i].Args[j]) - require.NoError(t, err) + if slice, ok := sqlQueries[i].Args[j].([]string); ok { + tmp := make(pgtype.FlatArray[pgutf8str.Text], 0, len(slice)) + for _, item := range slice { + tmp = append(tmp, pgutf8str.Text{ + Text: pgtype.Text{ + String: item, + Valid: true, + }, + }) + } sqlQueries[i].Args[j] = tmp } } @@ -435,7 +452,7 @@ func TestPGXInserterCacheReset(t *testing.T) { mock := model.NewSqlRecorder(sqlQueries, t) scache := cache.NewSeriesCache(cache.DefaultConfig, nil) lcache := cache.NewInvertedLabelsCache(cache.DefaultConfig, nil) - sw := NewSeriesWriter(mock, 0, lcache) + sw := NewSeriesWriter(mock, lcache) inserter := pgxDispatcher{ conn: mock, scache: scache, @@ -544,8 +561,6 @@ func TestPGXInserterInsertData(t *testing.T) { { name: "Zero data", sqlQueries: []model.SqlQuery{ - {Sql: "SELECT 'prom_api.label_array'::regtype::oid", Results: model.RowResults{{uint32(434)}}}, - {Sql: "SELECT 'prom_api.label_value_array'::regtype::oid", Results: model.RowResults{{uint32(435)}}}, {Sql: "CALL _prom_catalog.finalize_metric_creation()"}, }, }, @@ -555,8 +570,6 @@ func TestPGXInserterInsertData(t *testing.T) { "metric_0": {model.NewPromSamples(makeLabel(), make([]prompb.Sample, 1))}, }, sqlQueries: []model.SqlQuery{ - {Sql: "SELECT 'prom_api.label_array'::regtype::oid", Results: model.RowResults{{uint32(434)}}}, - {Sql: "SELECT 'prom_api.label_value_array'::regtype::oid", Results: model.RowResults{{uint32(435)}}}, {Sql: "CALL _prom_catalog.finalize_metric_creation()"}, { Sql: "SELECT id, table_name, possibly_new FROM _prom_catalog.get_or_create_metric_table_name($1)", @@ -591,8 +604,6 @@ func TestPGXInserterInsertData(t *testing.T) { }, }, sqlQueries: []model.SqlQuery{ - {Sql: "SELECT 'prom_api.label_array'::regtype::oid", Results: model.RowResults{{uint32(434)}}}, - {Sql: "SELECT 'prom_api.label_value_array'::regtype::oid", Results: model.RowResults{{uint32(435)}}}, {Sql: "CALL _prom_catalog.finalize_metric_creation()"}, { Sql: "SELECT id, table_name, possibly_new FROM _prom_catalog.get_or_create_metric_table_name($1)", @@ -639,8 +650,6 @@ func TestPGXInserterInsertData(t *testing.T) { }, }, sqlQueries: []model.SqlQuery{ - {Sql: "SELECT 'prom_api.label_array'::regtype::oid", Results: model.RowResults{{uint32(434)}}}, - {Sql: "SELECT 'prom_api.label_value_array'::regtype::oid", Results: model.RowResults{{uint32(435)}}}, {Sql: "CALL _prom_catalog.finalize_metric_creation()"}, { Sql: "SELECT id, table_name, possibly_new FROM _prom_catalog.get_or_create_metric_table_name($1)", @@ -658,8 +667,6 @@ func TestPGXInserterInsertData(t *testing.T) { }, }, sqlQueries: []model.SqlQuery{ - {Sql: "SELECT 'prom_api.label_array'::regtype::oid", Results: model.RowResults{{uint32(434)}}}, - {Sql: "SELECT 'prom_api.label_value_array'::regtype::oid", Results: model.RowResults{{uint32(435)}}}, {Sql: "CALL _prom_catalog.finalize_metric_creation()"}, { Sql: "SELECT id, table_name, possibly_new FROM _prom_catalog.get_or_create_metric_table_name($1)", @@ -730,8 +737,6 @@ func TestPGXInserterInsertData(t *testing.T) { }, sqlQueries: []model.SqlQuery{ - {Sql: "SELECT 'prom_api.label_array'::regtype::oid", Results: model.RowResults{{uint32(434)}}}, - {Sql: "SELECT 'prom_api.label_value_array'::regtype::oid", Results: model.RowResults{{uint32(435)}}}, {Sql: "CALL _prom_catalog.finalize_metric_creation()"}, { Sql: "SELECT id, table_name, possibly_new FROM _prom_catalog.get_or_create_metric_table_name($1)", @@ -790,8 +795,6 @@ func TestPGXInserterInsertData(t *testing.T) { }, }, sqlQueries: []model.SqlQuery{ - {Sql: "SELECT 'prom_api.label_array'::regtype::oid", Results: model.RowResults{{uint32(434)}}}, - {Sql: "SELECT 'prom_api.label_value_array'::regtype::oid", Results: model.RowResults{{uint32(435)}}}, {Sql: "CALL _prom_catalog.finalize_metric_creation()"}, { Sql: "SELECT id, table_name, possibly_new FROM _prom_catalog.get_or_create_metric_table_name($1)", @@ -812,8 +815,6 @@ func TestPGXInserterInsertData(t *testing.T) { }, metricsGetErr: fmt.Errorf("some metrics error"), sqlQueries: []model.SqlQuery{ - {Sql: "SELECT 'prom_api.label_array'::regtype::oid", Results: model.RowResults{{uint32(434)}}}, - {Sql: "SELECT 'prom_api.label_value_array'::regtype::oid", Results: model.RowResults{{uint32(435)}}}, {Sql: "CALL _prom_catalog.finalize_metric_creation()"}, { Sql: "SELECT id, table_name, possibly_new FROM _prom_catalog.get_or_create_metric_table_name($1)", @@ -850,8 +851,6 @@ func TestPGXInserterInsertData(t *testing.T) { }, sqlQueries: []model.SqlQuery{ - {Sql: "SELECT 'prom_api.label_array'::regtype::oid", Results: model.RowResults{{uint32(434)}}}, - {Sql: "SELECT 'prom_api.label_value_array'::regtype::oid", Results: model.RowResults{{uint32(435)}}}, {Sql: "CALL _prom_catalog.finalize_metric_creation()"}, { Sql: "SELECT id, table_name, possibly_new FROM _prom_catalog.get_or_create_metric_table_name($1)", diff --git a/pkg/pgmodel/ingestor/series_writer.go b/pkg/pgmodel/ingestor/series_writer.go index 44ae573f5b..9b19825211 100644 --- a/pkg/pgmodel/ingestor/series_writer.go +++ b/pkg/pgmodel/ingestor/series_writer.go @@ -8,7 +8,7 @@ import ( "context" "fmt" - "github.com/jackc/pgtype" + "github.com/jackc/pgx/v5/pgtype" "github.com/timescale/promscale/pkg/log" "github.com/timescale/promscale/pkg/pgmodel/cache" "github.com/timescale/promscale/pkg/pgmodel/model" @@ -24,30 +24,26 @@ const ( ) type seriesWriter struct { - conn pgxconn.PgxConn - labelArrayOID uint32 - labelsCache *cache.InvertedLabelsCache + conn pgxconn.PgxConn + labelsCache *cache.InvertedLabelsCache } type SeriesVisitor interface { VisitSeries(func(info *pgmodel.MetricInfo, s *model.Series) error) error } -func labelArrayTranscoder() pgtype.ValueTranscoder { return &pgtype.Int4Array{} } - -func NewSeriesWriter(conn pgxconn.PgxConn, labelArrayOID uint32, labelsCache *cache.InvertedLabelsCache) *seriesWriter { - return &seriesWriter{conn, labelArrayOID, labelsCache} +func NewSeriesWriter(conn pgxconn.PgxConn, labelsCache *cache.InvertedLabelsCache) *seriesWriter { + return &seriesWriter{conn, labelsCache} } type perMetricInfo struct { - metricName string - series []*model.Series - labelsToFetch *model.LabelList // labels we haven't found in our cache and need to fetch from DB - cachedLabels []cache.LabelKey - maxPos int - labelArraySet *pgtype.ArrayType - labelArraySetNumLabels int - metricInfo *pgmodel.MetricInfo + metricName string + series []*model.Series + labelsToFetch *model.LabelList // labels we haven't found in our cache and need to fetch from DB + cachedLabels []cache.LabelKey + maxPos int + labelArraySet model.ArrayOfLabelArray + metricInfo *pgmodel.MetricInfo } // PopulateOrCreateSeries examines all series in SeriesVisitor, checking if the labels or @@ -138,7 +134,7 @@ func (h *seriesWriter) PopulateOrCreateSeries(ctx context.Context, sv SeriesVisi batch := h.conn.NewBatch() batchInfos := make([]*perMetricInfo, 0, len(infos)) for _, info := range infos { - if info.labelArraySetNumLabels == 0 { + if len(info.labelArraySet) == 0 { continue } @@ -219,24 +215,19 @@ func (h *seriesWriter) fillLabelIDs(ctx context.Context, infos map[string]*perMe //getLabels in batches of 1000 to prevent locks on label creation //from being taken for too long. itemsPerBatch := 1000 - for i := 0; i < len(names.Elements); i += itemsPerBatch { + for i := 0; i < len(names); i += itemsPerBatch { high := i + itemsPerBatch - if len(names.Elements) < high { - high = len(names.Elements) - } - namesSlice, err := names.Slice(i, high) - if err != nil { - return dbEpoch, fmt.Errorf("error filling labels: slicing names: %w", err) - } - valuesSlice, err := values.Slice(i, high) - if err != nil { - return dbEpoch, fmt.Errorf("error filling labels: slicing values: %w", err) + if len(names) < high { + high = len(names) } + namesSlice := names[i:high] + valuesSlice := values[i:high] + batch.Queue("BEGIN;") batch.Queue("SELECT * FROM _prom_catalog.get_or_create_label_ids($1, $2, $3, $4)", metricName, info.metricInfo.TableName, namesSlice, valuesSlice) batch.Queue("COMMIT;") infoBatches = append(infoBatches, info) - items += len(namesSlice.Elements) + items += len(namesSlice) } // since info.maxPos is only updated with fetched labels we need to iterate through cached as well for _, cachedLabel := range info.cachedLabels { @@ -275,8 +266,8 @@ func (h *seriesWriter) fillLabelIDs(ctx context.Context, infos map[string]*perMe var ( pos []int32 labelIDs []int32 - labelNames pgutf8str.TextArray - labelValues pgutf8str.TextArray + labelNames pgtype.FlatArray[pgutf8str.Text] + labelValues pgtype.FlatArray[pgutf8str.Text] names []string values []string ) @@ -284,8 +275,8 @@ func (h *seriesWriter) fillLabelIDs(ctx context.Context, infos map[string]*perMe if err != nil { return fmt.Errorf("error filling labels: %w", err) } - names = labelNames.Get().([]string) - values = labelValues.Get().([]string) + names = pgutf8str.TextArrayToSlice(labelNames) + values = pgutf8str.TextArrayToSlice(labelValues) for i := range pos { res := cache.NewLabelInfo(labelIDs[i], pos[i]) @@ -327,27 +318,28 @@ func (h *seriesWriter) buildLabelArrays(ctx context.Context, infos map[string]*p return fmt.Errorf("error building label array: cannot create label_array: %w", err) } info.series = newSeries - info.labelArraySet = pgtype.NewArrayType("prom_api.label_array[]", h.labelArrayOID, labelArrayTranscoder) - err = info.labelArraySet.Set(labelArraySet) - if err != nil { - return fmt.Errorf("error setting series id: cannot set label_array: %w", err) - } - info.labelArraySetNumLabels = len(labelArraySet) - + info.labelArraySet = labelArraySet } return nil } -func createLabelArrays(series []*model.Series, labelMap map[cache.LabelKey]cache.LabelInfo, maxPos int) ([][]int32, []*model.Series, error) { - labelArraySet := make([][]int32, 0, len(series)) +// TODO test that for the items that are not in place there's no issue with +// Valid = false +func createLabelArrays( + series []*model.Series, + labelMap map[cache.LabelKey]cache.LabelInfo, + maxPos int, +) (model.ArrayOfLabelArray, []*model.Series, error) { + + labelArraySet := make(model.ArrayOfLabelArray, 0, len(series)) dest := 0 for src := 0; src < len(series); src++ { names, values, ok := series[src].NameValues() if !ok { continue } - lArray := make([]int32, maxPos) + lArray := make(model.LabelArray, maxPos) maxIndex := 0 for i := range names { key := cache.LabelKey{MetricName: series[src].MetricName(), Name: names[i], Value: values[i]} @@ -360,7 +352,10 @@ func createLabelArrays(series []*model.Series, labelMap map[cache.LabelKey]cache } //Pos is 1-indexed, slices are 0-indexed sliceIndex := int(res.Pos) - 1 - lArray[sliceIndex] = int32(res.LabelID) + lArray[sliceIndex] = pgtype.Int4{ + Int32: int32(res.LabelID), + Valid: true, + } if sliceIndex > maxIndex { maxIndex = sliceIndex } diff --git a/pkg/pgmodel/ingestor/trace/batch.go b/pkg/pgmodel/ingestor/trace/batch.go index d719e5bfae..32fca22381 100644 --- a/pkg/pgmodel/ingestor/trace/batch.go +++ b/pkg/pgmodel/ingestor/trace/batch.go @@ -5,8 +5,8 @@ import ( "fmt" "sort" - "github.com/jackc/pgtype" - "github.com/jackc/pgx/v4" + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgtype" "github.com/timescale/promscale/pkg/pgmodel/common/errors" "github.com/timescale/promscale/pkg/pgxconn" ) @@ -133,18 +133,18 @@ func (b batcher) SendBatch(ctx context.Context, conn pgxconn.PgxConn) (err error func (b batcher) GetID(i batchItem) (pgtype.Int8, error) { entry, ok := b.batch[i] if !ok { - return pgtype.Int8{Status: pgtype.Null}, fmt.Errorf("error getting ID from batch") + return pgtype.Int8{Valid: false}, fmt.Errorf("error getting ID from batch") } id, ok := entry.(pgtype.Int8) if !ok { - return pgtype.Int8{Status: pgtype.Null}, errors.ErrInvalidCacheEntryType + return pgtype.Int8{Valid: false}, errors.ErrInvalidCacheEntryType } - if id.Status != pgtype.Present { - return pgtype.Int8{Status: pgtype.Null}, fmt.Errorf("ID is null") + if !id.Valid { + return id, fmt.Errorf("ID is null") } - if id.Int == 0 { - return pgtype.Int8{Status: pgtype.Null}, fmt.Errorf("ID is 0") + if id.Int64 == 0 { + return pgtype.Int8{Valid: false}, fmt.Errorf("ID is 0") } return id, nil } diff --git a/pkg/pgmodel/ingestor/trace/instrumentation_lib_batch.go b/pkg/pgmodel/ingestor/trace/instrumentation_lib_batch.go index bae66e042a..e2a39efe10 100644 --- a/pkg/pgmodel/ingestor/trace/instrumentation_lib_batch.go +++ b/pkg/pgmodel/ingestor/trace/instrumentation_lib_batch.go @@ -8,8 +8,8 @@ import ( "context" "fmt" - "github.com/jackc/pgtype" - pgx "github.com/jackc/pgx/v4" + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgtype" "github.com/timescale/promscale/pkg/pgxconn" ) @@ -36,10 +36,10 @@ func (il instrumentationLibrary) Before(item sortable) bool { if il.version != otherIl.version { return il.version < otherIl.version } - if il.schemaURLID.Status != otherIl.schemaURLID.Status { - return il.schemaURLID.Status < otherIl.schemaURLID.Status + if il.schemaURLID.Valid != otherIl.schemaURLID.Valid { + return !il.schemaURLID.Valid } - return il.schemaURLID.Int < otherIl.schemaURLID.Int + return il.schemaURLID.Int64 < otherIl.schemaURLID.Int64 } func (il instrumentationLibrary) AddToDBBatch(batch pgxconn.PgxBatch) { @@ -76,7 +76,7 @@ func (lib instrumentationLibraryBatch) SendBatch(ctx context.Context, conn pgxco } func (lib instrumentationLibraryBatch) GetID(name, version string, schemaURLID pgtype.Int8) (pgtype.Int8, error) { if name == "" { - return pgtype.Int8{Status: pgtype.Null}, nil + return pgtype.Int8{Valid: false}, nil } il := instrumentationLibrary{name, version, schemaURLID} id, err := lib.b.GetID(il) diff --git a/pkg/pgmodel/ingestor/trace/instrumentation_lib_batch_test.go b/pkg/pgmodel/ingestor/trace/instrumentation_lib_batch_test.go index ca62404daa..9f4acced22 100644 --- a/pkg/pgmodel/ingestor/trace/instrumentation_lib_batch_test.go +++ b/pkg/pgmodel/ingestor/trace/instrumentation_lib_batch_test.go @@ -5,7 +5,7 @@ import ( "fmt" "testing" - "github.com/jackc/pgtype" + "github.com/jackc/pgx/v5/pgtype" "github.com/stretchr/testify/require" "github.com/timescale/promscale/pkg/pgmodel/common/errors" "github.com/timescale/promscale/pkg/pgmodel/model" @@ -13,9 +13,9 @@ import ( func TestInstrumentationLibraryBatch(t *testing.T) { cache := newInstrumentationLibraryCache() - incache := instrumentationLibrary{"incache", "", pgtype.Int8{Int: 99, Status: pgtype.Present}} - invalid := instrumentationLibrary{"invalid", "", pgtype.Int8{Int: 10, Status: pgtype.Present}} - cache.Insert(incache, pgtype.Int8{Int: 1337, Status: pgtype.Present}, incache.SizeInCache()) + incache := instrumentationLibrary{"incache", "", pgtype.Int8{Int64: 99, Valid: true}} + invalid := instrumentationLibrary{"invalid", "", pgtype.Int8{Int64: 10, Valid: true}} + cache.Insert(incache, pgtype.Int8{Int64: 1337, Valid: true}, incache.SizeInCache()) cache.Insert(invalid, "foo", 0) testCases := []struct { @@ -30,101 +30,101 @@ func TestInstrumentationLibraryBatch(t *testing.T) { { name: "happy path", instLibs: []instrumentationLibrary{ - {"", "ignored empty name lib", pgtype.Int8{Int: 1, Status: pgtype.Present}}, - {"test", "first", pgtype.Int8{Int: 2, Status: pgtype.Present}}, - {"test", "first", pgtype.Int8{Int: 1, Status: pgtype.Present}}, - {"test", "first", pgtype.Int8{Int: 1, Status: pgtype.Null}}, - {"anotherTest", "second", pgtype.Int8{Int: 1, Status: pgtype.Present}}, - {"anotherTest", "first", pgtype.Int8{Int: 1, Status: pgtype.Null}}, - {"null", "", pgtype.Int8{Int: 1, Status: pgtype.Present}}, - {"zero", "", pgtype.Int8{Int: 1, Status: pgtype.Present}}, - {"incache", "", pgtype.Int8{Int: 99, Status: pgtype.Present}}, + {"", "ignored empty name lib", pgtype.Int8{Int64: 1, Valid: true}}, + {"test", "first", pgtype.Int8{Int64: 2, Valid: true}}, + {"test", "first", pgtype.Int8{Int64: 1, Valid: true}}, + {"test", "first", pgtype.Int8{Int64: 1, Valid: false}}, + {"anotherTest", "second", pgtype.Int8{Int64: 1, Valid: true}}, + {"anotherTest", "first", pgtype.Int8{Int64: 1, Valid: false}}, + {"null", "", pgtype.Int8{Int64: 1, Valid: true}}, + {"zero", "", pgtype.Int8{Int64: 1, Valid: true}}, + {"incache", "", pgtype.Int8{Int64: 99, Valid: true}}, }, expectedBatchQueue: 8, queries: []model.SqlQuery{ { Sql: insertInstrumentationLibSQL, - Args: []interface{}{"anotherTest", "first", pgtype.Int8{Int: 1, Status: pgtype.Null}}, + Args: []interface{}{"anotherTest", "first", pgtype.Int8{Int64: 1, Valid: false}}, Results: [][]interface{}{{int64(7)}}, }, { Sql: insertInstrumentationLibSQL, - Args: []interface{}{"anotherTest", "second", pgtype.Int8{Int: 1, Status: pgtype.Present}}, + Args: []interface{}{"anotherTest", "second", pgtype.Int8{Int64: 1, Valid: true}}, Results: [][]interface{}{{int64(8)}}, }, { Sql: insertInstrumentationLibSQL, - Args: []interface{}{"null", "", pgtype.Int8{Int: 1, Status: pgtype.Present}}, + Args: []interface{}{"null", "", pgtype.Int8{Int64: 1, Valid: true}}, Results: [][]interface{}{{nil}}, }, { Sql: insertInstrumentationLibSQL, - Args: []interface{}{"test", "first", pgtype.Int8{Int: 1, Status: pgtype.Null}}, + Args: []interface{}{"test", "first", pgtype.Int8{Int64: 1, Valid: false}}, Results: [][]interface{}{{int64(5)}}, }, { Sql: insertInstrumentationLibSQL, - Args: []interface{}{"test", "first", pgtype.Int8{Int: 1, Status: pgtype.Present}}, + Args: []interface{}{"test", "first", pgtype.Int8{Int64: 1, Valid: true}}, Results: [][]interface{}{{int64(6)}}, }, { Sql: insertInstrumentationLibSQL, - Args: []interface{}{"test", "first", pgtype.Int8{Int: 2, Status: pgtype.Present}}, + Args: []interface{}{"test", "first", pgtype.Int8{Int64: 2, Valid: true}}, Results: [][]interface{}{{int64(6)}}, }, { Sql: insertInstrumentationLibSQL, - Args: []interface{}{"zero", "", pgtype.Int8{Int: 1, Status: pgtype.Present}}, + Args: []interface{}{"zero", "", pgtype.Int8{Int64: 1, Valid: true}}, Results: [][]interface{}{{int64(0)}}, }, }, getIDCheck: func(t *testing.T, batch instrumentationLibraryBatch) { - id, err := batch.GetID("test", "first", pgtype.Int8{Int: 1, Status: pgtype.Present}) + id, err := batch.GetID("test", "first", pgtype.Int8{Int64: 1, Valid: true}) require.Nil(t, err) - require.Equal(t, pgtype.Int8{Int: 6, Status: pgtype.Present}, id) + require.Equal(t, pgtype.Int8{Int64: 6, Valid: true}, id) id, err = batch.GetID("", "missing name", pgtype.Int8{}) require.NoError(t, err) - require.Equal(t, pgtype.Int8{Status: pgtype.Null}, id) + require.Equal(t, pgtype.Int8{Valid: false}, id) id, err = batch.GetID("nonexistant", "", pgtype.Int8{}) - require.EqualError(t, err, "error getting ID for instrumentation library {nonexistant {0 0}}: error getting ID from batch") - require.Equal(t, pgtype.Int8{Status: pgtype.Null}, id) + require.EqualError(t, err, "error getting ID for instrumentation library {nonexistant {0 false}}: error getting ID from batch") + require.Equal(t, pgtype.Int8{Valid: false}, id) - id, err = batch.GetID("zero", "", pgtype.Int8{Int: 1, Status: pgtype.Present}) - require.EqualError(t, err, "error getting ID for instrumentation library {zero {1 2}}: ID is 0") - require.Equal(t, pgtype.Int8{Status: pgtype.Null}, id) + id, err = batch.GetID("zero", "", pgtype.Int8{Int64: 1, Valid: true}) + require.EqualError(t, err, "error getting ID for instrumentation library {zero {1 true}}: ID is 0") + require.Equal(t, pgtype.Int8{Valid: false}, id) - id, err = batch.GetID("null", "", pgtype.Int8{Int: 1, Status: pgtype.Present}) - require.EqualError(t, err, "error getting ID for instrumentation library {null {1 2}}: ID is null") - require.Equal(t, pgtype.Int8{Status: pgtype.Null}, id) + id, err = batch.GetID("null", "", pgtype.Int8{Int64: 1, Valid: true}) + require.EqualError(t, err, "error getting ID for instrumentation library {null {1 true}}: ID is null") + require.Equal(t, pgtype.Int8{Valid: false}, id) }, }, { name: "all urls in cache", - instLibs: []instrumentationLibrary{{"incache", "", pgtype.Int8{Int: 99, Status: pgtype.Present}}}, + instLibs: []instrumentationLibrary{{"incache", "", pgtype.Int8{Int64: 99, Valid: true}}}, expectedBatchQueue: 1, getIDCheck: func(t *testing.T, batch instrumentationLibraryBatch) { - id, err := batch.GetID("incache", "", pgtype.Int8{Int: 99, Status: pgtype.Present}) + id, err := batch.GetID("incache", "", pgtype.Int8{Int64: 99, Valid: true}) require.Nil(t, err) - require.Equal(t, pgtype.Int8{Int: 1337, Status: pgtype.Present}, id) + require.Equal(t, pgtype.Int8{Int64: 1337, Valid: true}, id) }, }, { name: "send batch error", - instLibs: []instrumentationLibrary{{"non-cached url", "", pgtype.Int8{Int: 0, Status: pgtype.Null}}}, + instLibs: []instrumentationLibrary{{"non-cached url", "", pgtype.Int8{Int64: 0, Valid: false}}}, expectedBatchQueue: 1, sendBatchError: fmt.Errorf("some error"), expectedError: "some error", }, { name: "scan error", - instLibs: []instrumentationLibrary{{"non-cached", "", pgtype.Int8{Int: 0, Status: pgtype.Null}}}, + instLibs: []instrumentationLibrary{{"non-cached", "", pgtype.Int8{Int64: 0, Valid: false}}}, expectedBatchQueue: 1, queries: []model.SqlQuery{ { Sql: insertInstrumentationLibSQL, - Args: []interface{}{"non-cached", "", pgtype.Int8{Int: 0, Status: pgtype.Null}}, + Args: []interface{}{"non-cached", "", pgtype.Int8{Int64: 0, Valid: false}}, Results: [][]interface{}{{"wrong type"}}, }, }, @@ -132,7 +132,7 @@ func TestInstrumentationLibraryBatch(t *testing.T) { }, { name: "cache error", - instLibs: []instrumentationLibrary{{"invalid", "", pgtype.Int8{Int: 10, Status: pgtype.Present}}}, + instLibs: []instrumentationLibrary{{"invalid", "", pgtype.Int8{Int64: 10, Valid: true}}}, expectedBatchQueue: 1, getIDCheck: func(t *testing.T, batch instrumentationLibraryBatch) { _, err := batch.GetID(invalid.name, invalid.version, invalid.schemaURLID) diff --git a/pkg/pgmodel/ingestor/trace/operation_batch.go b/pkg/pgmodel/ingestor/trace/operation_batch.go index 9d8da2defc..f9b1f722bd 100644 --- a/pkg/pgmodel/ingestor/trace/operation_batch.go +++ b/pkg/pgmodel/ingestor/trace/operation_batch.go @@ -10,8 +10,8 @@ import ( "go.opentelemetry.io/collector/pdata/ptrace" - "github.com/jackc/pgtype" - pgx "github.com/jackc/pgx/v4" + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgtype" "github.com/timescale/promscale/pkg/pgxconn" ) diff --git a/pkg/pgmodel/ingestor/trace/operation_batch_test.go b/pkg/pgmodel/ingestor/trace/operation_batch_test.go index 2de55ed9d5..638aa6deef 100644 --- a/pkg/pgmodel/ingestor/trace/operation_batch_test.go +++ b/pkg/pgmodel/ingestor/trace/operation_batch_test.go @@ -5,7 +5,7 @@ import ( "fmt" "testing" - "github.com/jackc/pgtype" + "github.com/jackc/pgx/v5/pgtype" "github.com/stretchr/testify/require" "github.com/timescale/promscale/pkg/pgmodel/common/errors" "github.com/timescale/promscale/pkg/pgmodel/model" @@ -20,7 +20,7 @@ func TestOperationBatch(t *testing.T) { cache := newOperationCache() incache := operation{"", "incache", ""} invalid := operation{"", "invalid", ""} - cache.Insert(incache, pgtype.Int8{Int: 1337, Status: pgtype.Present}, incache.SizeInCache()) + cache.Insert(incache, pgtype.Int8{Int64: 1337, Valid: true}, incache.SizeInCache()) cache.Insert(invalid, "foo", 0) testCases := []struct { @@ -79,19 +79,19 @@ func TestOperationBatch(t *testing.T) { getIDCheck: func(t *testing.T, batch operationBatch) { id, err := batch.GetID("first", "test", "") require.Nil(t, err) - require.Equal(t, pgtype.Int8{Int: 5, Status: pgtype.Present}, id) + require.Equal(t, pgtype.Int8{Int64: 5, Valid: true}, id) id, err = batch.GetID("", "nonexistant", "") require.EqualError(t, err, "error getting ID for operation { nonexistant }: error getting ID from batch") - require.Equal(t, pgtype.Int8{Status: pgtype.Null}, id) + require.Equal(t, pgtype.Int8{Valid: false}, id) id, err = batch.GetID("", "zero", "") require.EqualError(t, err, "error getting ID for operation { zero }: ID is 0") - require.Equal(t, pgtype.Int8{Status: pgtype.Null}, id) + require.Equal(t, pgtype.Int8{Valid: false}, id) id, err = batch.GetID("", "null", "") require.EqualError(t, err, "error getting ID for operation { null }: ID is null") - require.Equal(t, pgtype.Int8{Status: pgtype.Null}, id) + require.Equal(t, pgtype.Int8{Valid: false}, id) }, }, { @@ -101,7 +101,7 @@ func TestOperationBatch(t *testing.T) { getIDCheck: func(t *testing.T, batch operationBatch) { id, err := batch.GetID("", "incache", "") require.Nil(t, err) - require.Equal(t, pgtype.Int8{Int: 1337, Status: pgtype.Present}, id) + require.Equal(t, pgtype.Int8{Int64: 1337, Valid: true}, id) }, }, { diff --git a/pkg/pgmodel/ingestor/trace/schema_url_batch.go b/pkg/pgmodel/ingestor/trace/schema_url_batch.go index 69bfdc0095..4d75a47f70 100644 --- a/pkg/pgmodel/ingestor/trace/schema_url_batch.go +++ b/pkg/pgmodel/ingestor/trace/schema_url_batch.go @@ -8,8 +8,8 @@ import ( "context" "fmt" - "github.com/jackc/pgtype" - pgx "github.com/jackc/pgx/v4" + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgtype" "github.com/timescale/promscale/pkg/pgxconn" ) @@ -61,7 +61,7 @@ func (s schemaURLBatch) SendBatch(ctx context.Context, conn pgxconn.PgxConn) (er func (s schemaURLBatch) GetID(url string) (pgtype.Int8, error) { if url == "" { - return pgtype.Int8{Status: pgtype.Null}, nil + return pgtype.Int8{Valid: false}, nil } id, err := s.b.GetID(schemaURL(url)) if err != nil { diff --git a/pkg/pgmodel/ingestor/trace/schema_url_batch_test.go b/pkg/pgmodel/ingestor/trace/schema_url_batch_test.go index 8693ddfad7..d68f08af4e 100644 --- a/pkg/pgmodel/ingestor/trace/schema_url_batch_test.go +++ b/pkg/pgmodel/ingestor/trace/schema_url_batch_test.go @@ -5,7 +5,7 @@ import ( "fmt" "testing" - "github.com/jackc/pgtype" + "github.com/jackc/pgx/v5/pgtype" "github.com/stretchr/testify/require" "github.com/timescale/promscale/pkg/pgmodel/common/errors" "github.com/timescale/promscale/pkg/pgmodel/model" @@ -14,7 +14,7 @@ import ( func TestSchemaURLBatch(t *testing.T) { cache := newSchemaCache() incache := schemaURL("incache") - cache.Insert(incache, pgtype.Int8{Int: 1337, Status: pgtype.Present}, incache.SizeInCache()) + cache.Insert(incache, pgtype.Int8{Int64: 1337, Valid: true}, incache.SizeInCache()) cache.Insert(batchItem(schemaURL("invalid")), "foo", 0) testCases := []struct { @@ -55,23 +55,23 @@ func TestSchemaURLBatch(t *testing.T) { getIDCheck: func(t *testing.T, batch schemaURLBatch) { id, err := batch.GetID("test") require.Nil(t, err) - require.Equal(t, pgtype.Int8{Int: 6, Status: pgtype.Present}, id) + require.Equal(t, pgtype.Int8{Int64: 6, Valid: true}, id) id, err = batch.GetID("") require.Nil(t, err) - require.Equal(t, pgtype.Int8{Status: pgtype.Null}, id) + require.Equal(t, pgtype.Int8{}, id) id, err = batch.GetID("nonexistant") require.EqualError(t, err, "error getting ID for schema url nonexistant: error getting ID from batch") - require.Equal(t, pgtype.Int8{Status: pgtype.Null}, id) + require.Equal(t, pgtype.Int8{}, id) id, err = batch.GetID("zero") require.EqualError(t, err, "error getting ID for schema url zero: ID is 0") - require.Equal(t, pgtype.Int8{Status: pgtype.Null}, id) + require.Equal(t, pgtype.Int8{}, id) id, err = batch.GetID("null") require.EqualError(t, err, "error getting ID for schema url null: ID is null") - require.Equal(t, pgtype.Int8{Status: pgtype.Null}, id) + require.Equal(t, pgtype.Int8{}, id) }, }, { @@ -81,7 +81,7 @@ func TestSchemaURLBatch(t *testing.T) { getIDCheck: func(t *testing.T, batch schemaURLBatch) { id, err := batch.GetID("incache") require.Nil(t, err) - require.Equal(t, pgtype.Int8{Int: 1337, Status: pgtype.Present}, id) + require.Equal(t, pgtype.Int8{Int64: 1337, Valid: true}, id) }, }, { diff --git a/pkg/pgmodel/ingestor/trace/tag_batch.go b/pkg/pgmodel/ingestor/trace/tag_batch.go index 375a01ac4d..f0f9cf7720 100644 --- a/pkg/pgmodel/ingestor/trace/tag_batch.go +++ b/pkg/pgmodel/ingestor/trace/tag_batch.go @@ -9,8 +9,8 @@ import ( "encoding/json" "fmt" - "github.com/jackc/pgtype" - pgx "github.com/jackc/pgx/v4" + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgtype" "github.com/timescale/promscale/pkg/pgmodel/common/errors" "github.com/timescale/promscale/pkg/pgxconn" ) @@ -94,34 +94,34 @@ func (t tagBatch) SendBatch(ctx context.Context, conn pgxconn.PgxConn) (err erro return t.b.SendBatch(ctx, conn) } -func (tb tagBatch) GetTagMapJSON(tags map[string]interface{}, typ TagType) (pgtype.JSONB, error) { +func (tb tagBatch) GetTagMapJSON(tags map[string]interface{}, typ TagType) ([]byte, error) { tagMap := make(map[int64]int64) for k, v := range tags { byteVal, err := json.Marshal(v) if err != nil { - return pgtype.JSONB{}, err + return nil, err } t := tag{k, string(byteVal), typ} ids, err := tb.b.Get(t) if err != nil { - return pgtype.JSONB{}, fmt.Errorf("error getting tag from batch %v: %w", t, err) + return nil, fmt.Errorf("error getting tag from batch %v: %w", t, err) } tagIDs, ok := ids.(tagIDs) if !ok { - return pgtype.JSONB{}, fmt.Errorf("error getting tag %v from batch: %w", t, errors.ErrInvalidCacheEntryType) + return nil, fmt.Errorf("error getting tag %v from batch: %w", t, errors.ErrInvalidCacheEntryType) } - if tagIDs.keyID.Status != pgtype.Present || tagIDs.valueID.Status != pgtype.Present { - return pgtype.JSONB{}, fmt.Errorf("tag IDs have NULL values: %#v", tagIDs) + if !(tagIDs.keyID.Valid && tagIDs.valueID.Valid) { + return nil, fmt.Errorf("tag IDs have NULL values: %#v", tagIDs) } - if tagIDs.keyID.Int == 0 || tagIDs.valueID.Int == 0 { - return pgtype.JSONB{}, fmt.Errorf("tag IDs have 0 values: %#v", tagIDs) + if tagIDs.keyID.Int64 == 0 || tagIDs.valueID.Int64 == 0 { + return nil, fmt.Errorf("tag IDs have 0 values: %#v", tagIDs) } - tagMap[tagIDs.keyID.Int] = tagIDs.valueID.Int + tagMap[tagIDs.keyID.Int64] = tagIDs.valueID.Int64 } jsonBytes, err := json.Marshal(tagMap) if err != nil { - return pgtype.JSONB{}, err + return nil, err } - return pgtype.JSONB{Bytes: jsonBytes, Status: pgtype.Present}, nil + return jsonBytes, nil } diff --git a/pkg/pgmodel/ingestor/trace/tag_batch_test.go b/pkg/pgmodel/ingestor/trace/tag_batch_test.go index 574ef64974..bee90f29cd 100644 --- a/pkg/pgmodel/ingestor/trace/tag_batch_test.go +++ b/pkg/pgmodel/ingestor/trace/tag_batch_test.go @@ -9,7 +9,7 @@ import ( "fmt" "testing" - "github.com/jackc/pgtype" + "github.com/jackc/pgx/v5/pgtype" "github.com/stretchr/testify/require" "github.com/timescale/promscale/pkg/pgmodel/model" ) @@ -18,7 +18,7 @@ func TestTagBatch(t *testing.T) { cache := newTagCache() incache := tag{"incache", `""`, SpanTagType} invalid := tag{"invalid", `""`, SpanTagType} - cache.Insert(incache, tagIDs{pgtype.Int8{Int: 1, Status: pgtype.Present}, pgtype.Int8{Int: 2, Status: pgtype.Present}}, incache.SizeInCache()) + cache.Insert(incache, tagIDs{pgtype.Int8{Int64: 1, Valid: true}, pgtype.Int8{Int64: 2, Valid: true}}, incache.SizeInCache()) cache.Insert(invalid, "foo", 0) testCases := []struct { @@ -125,23 +125,23 @@ func TestTagBatch(t *testing.T) { getTagMapJSONCheck: func(t *testing.T, batch tagBatch) { tagMap, err := batch.GetTagMapJSON(map[string]interface{}{"test": ""}, SpanTagType) require.Nil(t, err) - require.Equal(t, `{"7":8}`, string(tagMap.Bytes)) + require.Equal(t, `{"7":8}`, string(tagMap)) tagMap, err = batch.GetTagMapJSON(map[string]interface{}{"nonexistant": ""}, SpanTagType) require.EqualError(t, err, "error getting tag from batch {nonexistant \"\" 1}: error getting item from batch") - require.Equal(t, []byte(nil), tagMap.Bytes) + require.Equal(t, []byte(nil), tagMap) tagMap, err = batch.GetTagMapJSON(map[string]interface{}{"zero": ""}, SpanTagType) - require.EqualError(t, err, "tag IDs have 0 values: trace.tagIDs{keyID:pgtype.Int8{Int:0, Status:0x2}, valueID:pgtype.Int8{Int:0, Status:0x2}}") - require.Equal(t, []byte(nil), tagMap.Bytes) + require.EqualError(t, err, "tag IDs have 0 values: trace.tagIDs{keyID:pgtype.Int8{Int64:0, Valid:true}, valueID:pgtype.Int8{Int64:0, Valid:true}}") + require.Equal(t, []byte(nil), tagMap) tagMap, err = batch.GetTagMapJSON(map[string]interface{}{"null": ""}, SpanTagType) - require.EqualError(t, err, "tag IDs have NULL values: trace.tagIDs{keyID:pgtype.Int8{Int:0, Status:0x1}, valueID:pgtype.Int8{Int:0, Status:0x1}}") - require.Equal(t, []byte(nil), tagMap.Bytes) + require.EqualError(t, err, "tag IDs have NULL values: trace.tagIDs{keyID:pgtype.Int8{Int64:0, Valid:false}, valueID:pgtype.Int8{Int64:0, Valid:false}}") + require.Equal(t, []byte(nil), tagMap) tagMap, err = batch.GetTagMapJSON(map[string]interface{}{"test": make(chan struct{})}, SpanTagType) require.EqualError(t, err, "json: unsupported type: chan struct {}") - require.Equal(t, []byte(nil), tagMap.Bytes) + require.Equal(t, []byte(nil), tagMap) }, }, { @@ -151,7 +151,7 @@ func TestTagBatch(t *testing.T) { getTagMapJSONCheck: func(t *testing.T, batch tagBatch) { tagMap, err := batch.GetTagMapJSON(map[string]interface{}{"incache": ""}, SpanTagType) require.Nil(t, err) - require.Equal(t, `{"1":2}`, string(tagMap.Bytes)) + require.Equal(t, `{"1":2}`, string(tagMap)) }, }, { diff --git a/pkg/pgmodel/ingestor/trace/writer.go b/pkg/pgmodel/ingestor/trace/writer.go index 5e8d544361..567a9f7125 100644 --- a/pkg/pgmodel/ingestor/trace/writer.go +++ b/pkg/pgmodel/ingestor/trace/writer.go @@ -17,8 +17,8 @@ import ( "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/ptrace" - "github.com/jackc/pgtype" - "github.com/jackc/pgx/v4" + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgtype" "github.com/timescale/promscale/pkg/clockcache" "github.com/timescale/promscale/pkg/log" "github.com/timescale/promscale/pkg/pgmodel/common/schema" @@ -81,13 +81,13 @@ func RegisterTelemetryMetrics(t telemetry.Engine) error { } func (t *traceWriterImpl) addSpanLinks(linkRows *[][]interface{}, tagsBatch tagBatch, links ptrace.SpanLinkSlice, traceID pgtype.UUID, spanID pgtype.Int8, spanStartTime time.Time) error { - if spanID.Status != pgtype.Present { + if !spanID.Valid { return fmt.Errorf("spanID must be set") } for i := 0; i < links.Len(); i++ { link := links.At(i) linkedSpanID := getSpanID(link.SpanID()) - if linkedSpanID.Status != pgtype.Present { + if !linkedSpanID.Valid { return fmt.Errorf("linkedSpanID must be set") } @@ -103,7 +103,7 @@ func (t *traceWriterImpl) addSpanLinks(linkRows *[][]interface{}, tagsBatch tagB } func (t *traceWriterImpl) addSpanEvents(eventRows *[][]interface{}, tagsBatch tagBatch, events ptrace.SpanEventSlice, traceID pgtype.UUID, spanID pgtype.Int8) error { - if spanID.Status != pgtype.Present { + if !spanID.Valid { return fmt.Errorf("spanID must be set") } for i := 0; i < events.Len(); i++ { @@ -269,7 +269,7 @@ func (t *traceWriterImpl) InsertTraces(ctx context.Context, traces ptrace.Traces span := spans.At(k) traceID := TraceIDToUUID(span.TraceID()) spanID := getSpanID(span.SpanID()) - if spanID.Status != pgtype.Present { + if !spanID.Valid { return fmt.Errorf("spanID must be set") } parentSpanID := getSpanID(span.ParentSpanID()) @@ -422,8 +422,8 @@ func Int64ToByteArray(x int64) [8]byte { func TraceIDToUUID(buf [16]byte) pgtype.UUID { return pgtype.UUID{ - Bytes: buf, - Status: pgtype.Present, + Bytes: buf, + Valid: true, } } @@ -432,19 +432,19 @@ func getSpanID(buf [8]byte) pgtype.Int8 { if i != 0 { return pgtype.Int8{ - Int: i, - Status: pgtype.Present, + Int64: i, + Valid: true, } } return pgtype.Int8{ - Status: pgtype.Null, + Valid: false, } } -func getEventTimeRange(events ptrace.SpanEventSlice) (result pgtype.Tstzrange) { +func getEventTimeRange(events ptrace.SpanEventSlice) (result pgtype.Range[pgtype.Timestamptz]) { if events.Len() == 0 { - result.Status = pgtype.Null + result.Valid = false return result } @@ -461,12 +461,12 @@ func getEventTimeRange(events ptrace.SpanEventSlice) (result pgtype.Tstzrange) { } } - result = pgtype.Tstzrange{ - Lower: pgtype.Timestamptz{Time: lowerTime, Status: pgtype.Present}, - Upper: pgtype.Timestamptz{Time: upperTime, Status: pgtype.Present}, + result = pgtype.Range[pgtype.Timestamptz]{ + Lower: pgtype.Timestamptz{Time: lowerTime, Valid: true}, + Upper: pgtype.Timestamptz{Time: upperTime, Valid: true}, LowerType: pgtype.Inclusive, UpperType: pgtype.Exclusive, - Status: pgtype.Present, + Valid: true, } return result @@ -475,10 +475,10 @@ func getEventTimeRange(events ptrace.SpanEventSlice) (result pgtype.Tstzrange) { func getTraceStateValue(ts pcommon.TraceState) (result pgtype.Text) { tsRaw := ts.AsRaw() if tsRaw == "" { - result.Status = pgtype.Null + result.Valid = false } else { result.String = tsRaw - result.Status = pgtype.Present + result.Valid = true } return result diff --git a/pkg/pgmodel/lreader/labels_reader.go b/pkg/pgmodel/lreader/labels_reader.go index f928b8be03..51edafa766 100644 --- a/pkg/pgmodel/lreader/labels_reader.go +++ b/pkg/pgmodel/lreader/labels_reader.go @@ -11,6 +11,7 @@ import ( "strings" "unsafe" + "github.com/jackc/pgx/v5/pgtype" "github.com/prometheus/prometheus/model/labels" "github.com/timescale/promscale/pkg/log" @@ -262,8 +263,8 @@ func (lr *labelsReader) fetchMissingLabels(misses []interface{}, missedIds []int defer rows.Close() var ( - keys pgutf8str.TextArray - vals pgutf8str.TextArray + keys pgtype.FlatArray[pgutf8str.Text] + vals pgtype.FlatArray[pgutf8str.Text] ) for rows.Next() { @@ -272,21 +273,21 @@ func (lr *labelsReader) fetchMissingLabels(misses []interface{}, missedIds []int if err != nil { return 0, err } - if len(ids) != len(keys.Elements) { - return 0, fmt.Errorf("query returned a mismatch in ids and keys: %d, %d", len(ids), len(keys.Elements)) + if len(ids) != len(keys) { + return 0, fmt.Errorf("query returned a mismatch in ids and keys: %d, %d", len(ids), len(keys)) } - if len(keys.Elements) != len(vals.Elements) { - return 0, fmt.Errorf("query returned a mismatch in timestamps and values: %d, %d", len(keys.Elements), len(vals.Elements)) + if len(keys) != len(vals) { + return 0, fmt.Errorf("query returned a mismatch in timestamps and values: %d, %d", len(keys), len(vals)) } - if len(keys.Elements) > len(misses) { - return 0, fmt.Errorf("query returned wrong number of labels: %d, %d", len(misses), len(keys.Elements)) + if len(keys) > len(misses) { + return 0, fmt.Errorf("query returned wrong number of labels: %d, %d", len(misses), len(keys)) } - numNewLabels = len(keys.Elements) - misses = misses[:len(keys.Elements)] - newLabels = newLabels[:len(keys.Elements)] - keyStrArr := keys.Get().([]string) - valStrArr := vals.Get().([]string) + numNewLabels = len(keys) + misses = misses[:len(keys)] + newLabels = newLabels[:len(keys)] + keyStrArr := pgutf8str.TextArrayToSlice(keys) + valStrArr := pgutf8str.TextArrayToSlice(vals) sizes := make([]uint64, numNewLabels) for i := range newLabels { misses[i] = ids[i] diff --git a/pkg/pgmodel/metrics/database/database.go b/pkg/pgmodel/metrics/database/database.go index 9241a32294..041b68a43d 100644 --- a/pkg/pgmodel/metrics/database/database.go +++ b/pkg/pgmodel/metrics/database/database.go @@ -6,7 +6,7 @@ import ( "sync/atomic" "time" - "github.com/jackc/pgx/v4" + "github.com/jackc/pgx/v5" "github.com/prometheus/client_golang/prometheus" "github.com/timescale/promscale/pkg/log" diff --git a/pkg/pgmodel/migrate.go b/pkg/pgmodel/migrate.go index 6c3ad9b1cd..52f2c8092f 100644 --- a/pkg/pgmodel/migrate.go +++ b/pkg/pgmodel/migrate.go @@ -16,8 +16,8 @@ import ( "sort" "github.com/blang/semver/v4" - "github.com/jackc/pgconn" - "github.com/jackc/pgx/v4" + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgconn" "github.com/timescale/promscale/pkg/log" ) diff --git a/pkg/pgmodel/model/custom_types.go b/pkg/pgmodel/model/custom_types.go index 7b737618b0..e392ca95f5 100644 --- a/pkg/pgmodel/model/custom_types.go +++ b/pkg/pgmodel/model/custom_types.go @@ -7,113 +7,232 @@ package model import ( "context" "fmt" + "strconv" "sync" - "github.com/jackc/pgtype" - "github.com/timescale/promscale/pkg/pgxconn" + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgconn" + "github.com/jackc/pgx/v5/pgtype" ) +type LabelArray = pgtype.FlatArray[pgtype.Int4] +type ArrayOfLabelArray = pgtype.FlatArray[LabelArray] + var ( - // Read-only fields after the ingestor inits. - labelArrayOID uint32 - isLabelArrayOIDSet bool - labelArrayOIDMux sync.Mutex + registerTypesMux sync.RWMutex + // We use a map of slice instead of a single slice because we run some tests + // in parallel each targeting a different DB. Since each DB might assigns + // different OIDs to the custom types, if we don't distinguish between + // them, pgx won't be able to find the corresponding encoding/decoding + // plan and will return an error. + registeredTypes = make(map[string][]*pgtype.Type) ) -func labelArrayTranscoder() pgtype.ValueTranscoder { return new(pgtype.Int4Array) } - -func registerLabelArrayOID(conn pgxconn.PgxConn) error { - labelArrayOIDMux.Lock() - defer labelArrayOIDMux.Unlock() - err := conn.QueryRow(context.Background(), `SELECT 'prom_api.label_array'::regtype::oid`).Scan(&labelArrayOID) - if err != nil { - return fmt.Errorf("registering prom_api.label_array oid: %w", err) +// RegisterCustomPgTypes registers the custom types specified in the `oidSql` +// query, into the connection's `pgtype.Map`. The types are cached to avoid +// querying the database every time a connection is created. +func RegisterCustomPgTypes(ctx context.Context, conn *pgx.Conn) error { + cfg := conn.Config().Config + types, ok := getRegisteredTypes(cfg) + if !ok { + var err error + types, err = fetchCustomPgTypes(ctx, conn) + if err != nil { + return fmt.Errorf("couldn't register custom PostgreSQL types: %w", err) + } } - isLabelArrayOIDSet = true + registerCustomPgTypes(conn.TypeMap(), types) return nil } -var ( - // Read-only fields after the ingestor inits. - labelValueArrayOID uint32 - isLabelValueArrayOIDSet bool - labelValueArrayOIDMux sync.Mutex -) +// UnRegisterCustomPgTypes deletes the cached types for the given connection. +// This is useful for post test cleanup. +func UnRegisterCustomPgTypes(cfg pgconn.Config) { + registerTypesMux.Lock() + defer registerTypesMux.Unlock() + delete(registeredTypes, key(cfg)) +} + +func key(cfg pgconn.Config) string { + return cfg.Host + strconv.FormatUint(uint64(cfg.Port), 10) + cfg.Database +} -func labelValueArrayTranscoder() pgtype.ValueTranscoder { return new(pgtype.TextArray) } +var oidsSql = `SELECT +'prom_api.label_array'::regtype::oid, +'prom_api._label_array'::regtype::oid, +'prom_api.label_value_array'::regtype::oid, +'prom_api._label_value_array'::regtype::oid, +'ps_trace.tag_type'::regtype::oid, +'ps_trace.trace_id'::regtype::oid, +'ps_trace.tag_map'::regtype::oid, +'ps_trace._tag_map'::regtype::oid, +'ps_trace.status_code'::regtype::oid +` + +func fetchCustomPgTypes(ctx context.Context, conn *pgx.Conn) ([]*pgtype.Type, error) { + registerTypesMux.Lock() + defer registerTypesMux.Unlock() + + cfg := conn.Config().Config + types, ok := registeredTypes[key(cfg)] + if ok { + return types, nil + } -func registerLabelValueArrayOID(conn pgxconn.PgxConn) error { - labelValueArrayOIDMux.Lock() - defer labelValueArrayOIDMux.Unlock() - err := conn.QueryRow(context.Background(), `SELECT 'prom_api.label_value_array'::regtype::oid`).Scan(&labelValueArrayOID) + var ( + // Read-only fields after the ingestor inits. + labelArrayOID uint32 + arrayOfLabelArrayOID uint32 + labelValueArrayOID uint32 + arrayOfLabelValueArrayOID uint32 + traceTagTypeOID uint32 + traceIdOID uint32 + traceTagMapOID uint32 + arrayOfTraceTagMapOID uint32 + traceStatusCodeOID uint32 + ) + err := conn. + QueryRow(ctx, oidsSql). + Scan( + &labelArrayOID, + &arrayOfLabelArrayOID, + &labelValueArrayOID, + &arrayOfLabelValueArrayOID, + &traceTagTypeOID, + &traceIdOID, + &traceTagMapOID, + &arrayOfTraceTagMapOID, + &traceStatusCodeOID, + ) if err != nil { - return fmt.Errorf("registering prom_api.label_value_array oid: %w", err) + return nil, fmt.Errorf("query to retrieve custom types oids failed: %w", err) } - isLabelValueArrayOIDSet = true - return nil -} -func RegisterCustomPgTypes(conn pgxconn.PgxConn) error { - var err error - if err = registerLabelArrayOID(conn); err != nil { - return fmt.Errorf("register label array oid: %w", err) + m := conn.TypeMap() + // This type is registered with static values, so it's always present + int4Type, _ := m.TypeForOID(pgtype.Int4OID) + labelArrayType := &pgtype.Type{ + Name: "_prom_api.label_array", + OID: labelArrayOID, + Codec: &pgtype.ArrayCodec{ + ElementType: int4Type, + }, } - if err = registerLabelValueArrayOID(conn); err != nil { - return fmt.Errorf("register label value array oid: %w", err) + + // This type is registered with static values, so it's always present + textType, _ := m.TypeForOID(pgtype.TextOID) + labelValueArrayType := &pgtype.Type{ + Name: "_prom_api.label_value_array", + OID: labelValueArrayOID, + Codec: &pgtype.ArrayCodec{ElementType: textType}, } - return nil + traceTagMapType := &pgtype.Type{ + Name: "ps_trace.tag_map", + OID: traceTagMapOID, + Codec: &pgtype.JSONBCodec{}, + } + + types = []*pgtype.Type{ + labelArrayType, + labelValueArrayType, + traceTagMapType, + { + Name: "_prom_api._label_array", + OID: arrayOfLabelValueArrayOID, + Codec: &pgtype.ArrayCodec{ElementType: labelValueArrayType}, + }, + { + Name: "_prom_api._label_array", + OID: arrayOfLabelArrayOID, + Codec: &pgtype.ArrayCodec{ElementType: labelArrayType}, + }, + { + Name: "ps_trace.tag_type", + OID: traceTagTypeOID, + Codec: &pgtype.Int2Codec{}, + }, + { + Name: "ps_trace.trace_id", + OID: traceIdOID, + Codec: &pgtype.UUIDCodec{}, + }, + { + Name: "ps_trace._tag_map", + OID: arrayOfTraceTagMapOID, + Codec: &pgtype.ArrayCodec{ElementType: traceTagMapType}, + }, + { + Name: "ps_trace.status_code", + OID: traceStatusCodeOID, + Codec: &pgtype.EnumCodec{}, + }, + } + + registeredTypes[key(conn.Config().Config)] = types + + return types, nil } -type PgCustomType uint8 +func getRegisteredTypes(cfg pgconn.Config) ([]*pgtype.Type, bool) { + registerTypesMux.RLock() + defer registerTypesMux.RUnlock() + types, ok := registeredTypes[key(cfg)] + return types, ok +} -const ( - LabelArray PgCustomType = iota - LabelValueArray -) +func registerCustomPgTypes(m *pgtype.Map, types []*pgtype.Type) { + for _, t := range types { + m.RegisterType(t) + } +} -func GetCustomTypeOID(t PgCustomType) uint32 { - switch t { - case LabelArray: - labelArrayOIDMux.Lock() - defer labelArrayOIDMux.Unlock() - if !isLabelArrayOIDSet { - panic("label_array oid is not set. This needs to be set first before calling the type.") +func SliceToArrayOfLabelArray(src [][]int32) ArrayOfLabelArray { + a := make(ArrayOfLabelArray, 0, len(src)) + for _, i := range src { + la := make(LabelArray, 0, len(i)) + for _, j := range i { + la = append(la, pgtype.Int4{Int32: j, Valid: true}) } - return labelArrayOID - case LabelValueArray: - labelValueArrayOIDMux.Lock() - defer labelValueArrayOIDMux.Unlock() - if !isLabelValueArrayOIDSet { - panic("label_value_array oid is not set. This needs to be set first before calling the type.") - } - return labelValueArrayOID - default: - panic("invalid type") + a = append(a, la) } + return a } -// GetCustomType returns a custom pgtype. -func GetCustomType(t PgCustomType) *pgtype.ArrayType { - switch t { - case LabelArray: - if !isLabelArrayOIDSet { - panic("label_array oid is not set. This needs to be set first before calling the type.") - } - return pgtype.NewArrayType("prom_api.label_array", GetCustomTypeOID(t), labelArrayTranscoder) - case LabelValueArray: - if !isLabelValueArrayOIDSet { - panic("label_value_array oid is not set. This needs to be set first before calling the type.") - } - return pgtype.NewArrayType("prom_api.label_value_array", GetCustomTypeOID(t), labelValueArrayTranscoder) - default: - panic("invalid type") +// Wrapper to allow DecodeBinary to reuse the existing array so that a pool is +// effective +type ReusableArray[T any] struct { + pgtype.FlatArray[T] +} + +func (a *ReusableArray[T]) SetDimensions(dimensions []pgtype.ArrayDimension) error { + if dimensions == nil { + a.FlatArray = nil + return nil } + + elementCount := cardinality(dimensions) + + // Reuse the current array if it's capable to support the new dimensions + // constraint. Otherwise, create a new array. + if cap(a.FlatArray) > int(elementCount) { + a.FlatArray = a.FlatArray[:elementCount] + } else { + a.FlatArray = make(pgtype.FlatArray[T], elementCount) + } + + return nil } -func SetLabelArrayOIDForTest(oid uint32) { - labelArrayOID = oid - isLabelArrayOIDSet = true +// Cardinality returns the number of elements in an array of dimensions size. +func cardinality(dimensions []pgtype.ArrayDimension) int { + if len(dimensions) == 0 { + return 0 + } + + elementCount := int(dimensions[0].Length) + for _, d := range dimensions[1:] { + elementCount *= int(d.Length) + } - labelValueArrayOID = oid - isLabelValueArrayOIDSet = true + return elementCount } diff --git a/pkg/pgmodel/model/interface.go b/pkg/pgmodel/model/interface.go index 55e98720dd..89e96f20d0 100644 --- a/pkg/pgmodel/model/interface.go +++ b/pkg/pgmodel/model/interface.go @@ -9,7 +9,7 @@ import ( "math" "time" - "github.com/jackc/pgtype" + "github.com/jackc/pgx/v5/pgtype" ) const ( diff --git a/pkg/pgmodel/model/label_list.go b/pkg/pgmodel/model/label_list.go index 7c0673bc22..3f16ee50a8 100644 --- a/pkg/pgmodel/model/label_list.go +++ b/pkg/pgmodel/model/label_list.go @@ -7,37 +7,23 @@ package model import ( "fmt" - "github.com/jackc/pgtype" + "github.com/jackc/pgx/v5/pgtype" "github.com/timescale/promscale/pkg/pgmodel/model/pgutf8str" ) type LabelList struct { - names *pgutf8str.TextArray - values *pgutf8str.TextArray + names pgtype.FlatArray[pgutf8str.Text] + values pgtype.FlatArray[pgutf8str.Text] } func NewLabelList(size int) *LabelList { - nameElements := make([]pgtype.Text, 0, size) - valueElements := make([]pgtype.Text, 0, size) return &LabelList{ // We want to avoid runtime conversion of []string to pgutf8str.TextArray. The best way to do that is // to use directly the pgutf8str.TextArray under the hood. // The implementations done here are kept in line with what happens in // https://github.com/jackc/pgtype/blob/master/text_array.go - names: &pgutf8str.TextArray{ - TextArray: pgtype.TextArray{ - Elements: nameElements, - Dimensions: []pgtype.ArrayDimension{{Length: int32(size), LowerBound: 1}}, - Status: pgtype.Present, - }, - }, - values: &pgutf8str.TextArray{ - TextArray: pgtype.TextArray{ - Elements: valueElements, - Dimensions: []pgtype.ArrayDimension{{Length: int32(size), LowerBound: 1}}, - Status: pgtype.Present, - }, - }, + names: pgtype.FlatArray[pgutf8str.Text](make([]pgutf8str.Text, 0, size)), + values: pgtype.FlatArray[pgutf8str.Text](make([]pgutf8str.Text, 0, size)), } } @@ -46,36 +32,30 @@ func (ls *LabelList) Add(name string, value string) error { nameT pgutf8str.Text valueT pgutf8str.Text ) - if err := nameT.Set(name); err != nil { - return fmt.Errorf("setting pgtype.Text: %w", err) + if err := nameT.Scan(name); err != nil { + return fmt.Errorf("setting pgutf8str.Text: %w", err) } - if err := valueT.Set(value); err != nil { - return fmt.Errorf("setting pgtype.Text: %w", err) + if err := valueT.Scan(value); err != nil { + return fmt.Errorf("setting pgutf8str.Text: %w", err) } - ls.names.Elements = append(ls.names.Elements, nameT.Text) - ls.values.Elements = append(ls.values.Elements, valueT.Text) + ls.names = append(ls.names, nameT) + ls.values = append(ls.values, valueT) return nil } -func (ls *LabelList) updateArrayDimensions() { - l := int32(len(ls.names.Elements)) - ls.names.Dimensions[0].Length = l - ls.values.Dimensions[0].Length = l -} - // Get returns the addresses of names and values slice after updating the array dimensions. -func (ls *LabelList) Get() (*pgutf8str.TextArray, *pgutf8str.TextArray) { - ls.updateArrayDimensions() +func (ls *LabelList) Get() (pgtype.FlatArray[pgutf8str.Text], pgtype.FlatArray[pgutf8str.Text]) { return ls.names, ls.values } -func (ls *LabelList) Len() int { return len(ls.names.Elements) } +func (ls *LabelList) Len() int { return len(ls.names) } func (ls *LabelList) Swap(i, j int) { - ls.names.Elements[i], ls.names.Elements[j] = ls.names.Elements[j], ls.names.Elements[i] - ls.values.Elements[i], ls.values.Elements[j] = ls.values.Elements[j], ls.values.Elements[i] + ls.names[i], ls.names[j] = ls.names[j], ls.names[i] + ls.values[i], ls.values[j] = ls.values[j], ls.values[i] } + func (ls LabelList) Less(i, j int) bool { - elemI := ls.names.Elements[i].String - elemJ := ls.names.Elements[j].String + elemI := ls.names[i].String + elemJ := ls.names[j].String return elemI < elemJ || (elemI == elemJ && elemI < elemJ) } diff --git a/pkg/pgmodel/model/pgutf8str/text_types.go b/pkg/pgmodel/model/pgutf8str/text_types.go index 67679a994b..223a618418 100644 --- a/pkg/pgmodel/model/pgutf8str/text_types.go +++ b/pkg/pgmodel/model/pgutf8str/text_types.go @@ -12,7 +12,7 @@ import ( "fmt" "strings" - "github.com/jackc/pgtype" + "github.com/jackc/pgx/v5/pgtype" ) const ( @@ -25,109 +25,53 @@ type Text struct { pgtype.Text // Contains sanitized string. } -func (t *Text) DecodeBinary(ci *pgtype.ConnInfo, src []byte) error { - if err := t.Text.DecodeBinary(ci, src); err != nil { - return fmt.Errorf("pgutf8str.Text.DecodeBinary: %w", err) - } - return nil -} - -func (t *Text) Set(src interface{}) error { - text, ok := src.(string) - if !ok { - return fmt.Errorf("pgutf8str.Text.Set(): src is not of 'string' type") +func (t *Text) ScanText(v pgtype.Text) error { + if !v.Valid { + t.Text = v + return nil } - if err := t.Text.Set(sanitizeNullChars(text)); err != nil { - return fmt.Errorf("safe text: %w", err) + t.Text = pgtype.Text{ + String: sanitizeNullChars(v.String), + Valid: true, } return nil } -func (t *Text) Get() interface{} { - s, ok := t.Text.Get().(string) - if !ok { - panic("'string' type not received from underlying 'pgtype.Text'") +// Scan implements the database/sql Scanner interface. +func (dst *Text) Scan(src any) error { + if src == nil { + dst.Text = pgtype.Text{} + return nil } - return revertSanitization(s) -} - -func (t *Text) AssignTo(_ interface{}) error { - panic("pgutf8str.Text.AssignTo(): not implemented") -} - -func (t *Text) Scan(_ interface{}) error { - panic("pgutf8str.Text.Scan(): not implemented") -} - -// TextArray is a custom pgtype that wraps the pgtype.TextArray. It is safe from null characters. -type TextArray struct { - pgtype.TextArray // Contains sanitized string. -} -func (t *TextArray) DecodeBinary(ci *pgtype.ConnInfo, src []byte) error { - if err := t.TextArray.DecodeBinary(ci, src); err != nil { - return fmt.Errorf("pgutf8str.TextArray.DecodeBinary: %w", err) + switch src := src.(type) { + case string: + dst.Text = pgtype.Text{String: sanitizeNullChars(src), Valid: true} + return nil + case []byte: + dst.Text = pgtype.Text{String: sanitizeNullChars(string(src)), Valid: true} + return nil } - return nil -} - -func (t *TextArray) Slice(low, high int) (*TextArray, error) { - var new TextArray - err := new.TextArray.Set(t.Elements[low:high]) - return &new, err -} -func (t *TextArray) Set(src interface{}) error { - textArray, ok := src.([]string) - if !ok { - return fmt.Errorf("pgutf8str.TextArray.Set(): src is not of '[]string' type") - } - buf := make([]pgtype.Text, len(textArray)) - for i := range textArray { - var text pgtype.Text - if err := text.Set(sanitizeNullChars(textArray[i])); err != nil { - return fmt.Errorf("safe text-array: %w", err) - } - buf[i] = text - } - if err := t.TextArray.Set(buf); err != nil { - return fmt.Errorf("safe textarray: %w", err) - } - return nil + return fmt.Errorf("cannot scan %T", src) } -func (t *TextArray) revertSanitization() []string { - val := t.TextArray.Get() - if val == nil { +func TextArrayToSlice(src pgtype.FlatArray[Text]) []string { + if src == nil { return nil } - arr, ok := val.(pgtype.TextArray) - if !ok { - panic("underlying data not in 'pgutf8str.TextArray' type") - } - originalElements := make([]string, len(arr.Elements)) - for i := range arr.Elements { - tmp, ok := arr.Elements[i].Get().(string) - if !ok { + + originalElements := make([]string, len(src)) + for i, v := range src { + // TODO should we maybe return an error? + if !v.Valid { panic("'string' type not received from underlying 'pgtype.Text' in 'pgutf8str.TextArray'") } - originalElements[i] = revertSanitization(tmp) + originalElements[i] = revertSanitization(v.String) } return originalElements } -func (t *TextArray) Get() interface{} { - return t.revertSanitization() -} - -func (t *TextArray) AssignTo(_ interface{}) error { - panic("pgutf8str.TextArray.AssignTo(): not implemented") -} - -func (t *TextArray) Scan(_ interface{}) error { - panic("pgutf8str.TextArray.Scan(): not implemented") -} - func replaceFunc(r rune) rune { if r == NullChar { return NullCharSanitize diff --git a/pkg/pgmodel/model/sql_test_utils.go b/pkg/pgmodel/model/sql_test_utils.go index 124965b1ee..e6612342fb 100644 --- a/pkg/pgmodel/model/sql_test_utils.go +++ b/pkg/pgmodel/model/sql_test_utils.go @@ -6,6 +6,8 @@ package model import ( "context" + "database/sql" + "database/sql/driver" "fmt" "reflect" "regexp" @@ -14,13 +16,13 @@ import ( "testing" "time" - "github.com/jackc/pgconn" - "github.com/jackc/pgproto3/v2" - "github.com/jackc/pgtype" - "github.com/jackc/pgx/v4" - "github.com/jackc/pgx/v4/pgxpool" + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgconn" + "github.com/jackc/pgx/v5/pgtype" + "github.com/jackc/pgx/v5/pgxpool" "github.com/sergi/go-diff/diffmatchpatch" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/timescale/promscale/pkg/pgmodel/common/errors" "github.com/timescale/promscale/pkg/pgmodel/model/pgutf8str" "github.com/timescale/promscale/pkg/pgxconn" @@ -77,17 +79,17 @@ func (r *SqlRecorder) Exec(ctx context.Context, sql string, arguments ...interfa results, err := r.checkQuery(sql, arguments...) if len(results) == 0 { - return nil, err + return pgconn.NewCommandTag(""), err } if len(results) != 1 { r.t.Errorf("mock exec: too many return rows %v\n in Exec\n %v\n args %v", results, sql, arguments) - return nil, err + return pgconn.NewCommandTag(""), err } if len(results[0]) != 1 { r.t.Errorf("mock exec: too many return values %v\n in Exec\n %v\n args %v", results, sql, arguments) - return nil, err + return pgconn.NewCommandTag(""), err } return results[0][0].(pgconn.CommandTag), err @@ -153,14 +155,19 @@ func (r *SqlRecorder) checkQuery(sql string, args ...interface{}) (RowResults, e } assert.Equal(r.t, len(row.Args), len(args), "Args of different lengths @ %d %s", idx, sql) + for i := range row.Args { switch row.Args[i].(type) { - case pgtype.TextEncoder: - ci := pgtype.NewConnInfo() - got, err := args[i].(pgtype.TextEncoder).EncodeText(ci, nil) - assert.NoError(r.t, err) - expected, err := row.Args[i].(pgtype.TextEncoder).EncodeText(ci, nil) - assert.NoError(r.t, err) + case driver.Valuer, pgtype.ArraySetter: + typeMap := pgtype.NewMap() + t, ok := typeMap.TypeForValue(row.Args[i]) + require.True(r.t, ok) + plan := t.Codec.PlanEncode(typeMap, t.OID, pgtype.TextFormatCode, row.Args[i]) + + got, err := plan.Encode(args[i], nil) + require.NoError(r.t, err) + expected, err := plan.Encode(row.Args[i], nil) + require.NoError(r.t, err) assert.Equal(r.t, string(expected), string(got), "sql args aren't equal for query # %v: %v", idx, sql) default: if !row.ArgsUnordered { @@ -217,11 +224,12 @@ type MockBatch struct { items []batchItem } -func (b *MockBatch) Queue(query string, arguments ...interface{}) { +func (b *MockBatch) Queue(query string, arguments ...any) *pgx.QueuedQuery { b.items = append(b.items, batchItem{ query: query, arguments: arguments, }) + return nil } func (b *MockBatch) Len() int { @@ -234,11 +242,6 @@ type MockBatchResult struct { t *testing.T } -// QueryFunc reads the results from the next query in the batch as if the query has been sent with Conn.QueryFunc. -func (m *MockBatchResult) QueryFunc(scans []interface{}, f func(pgx.QueryFuncRow) error) (pgconn.CommandTag, error) { - panic("not implemented") -} - // Exec reads the results from the next query in the batch as if the query has been sent with Conn.Exec. func (m *MockBatchResult) Exec() (pgconn.CommandTag, error) { defer func() { m.idx++ }() @@ -246,15 +249,15 @@ func (m *MockBatchResult) Exec() (pgconn.CommandTag, error) { q := m.queries[m.idx] if len(q.Results) == 0 { - return nil, q.Err + return pgconn.NewCommandTag(""), q.Err } if len(q.Results) != 1 { m.t.Errorf("mock exec: too many return rows %v\n in batch Exec\n %+v", q.Results, q) - return nil, q.Err + return pgconn.NewCommandTag(""), q.Err } if len(q.Results[0]) != 1 { m.t.Errorf("mock exec: too many return values %v\n in batch Exec\n %+v", q.Results, q) - return nil, q.Err + return pgconn.NewCommandTag(""), q.Err } return q.Results[0][0].(pgconn.CommandTag), q.Err @@ -294,6 +297,12 @@ type MockRows struct { func (m *MockRows) Close() { } +// Conn returns the underlying *Conn on which the query was executed. This may return nil if Rows did not come from a +// *Conn (e.g. if it was created by RowsFromResultReader) +func (m *MockRows) Conn() *pgx.Conn { + return nil +} + // Err returns any error that occurred while reading. func (m *MockRows) Err() error { return m.err @@ -304,7 +313,7 @@ func (m *MockRows) CommandTag() pgconn.CommandTag { panic("not implemented") } -func (m *MockRows) FieldDescriptions() []pgproto3.FieldDescription { +func (m *MockRows) FieldDescriptions() []pgconn.FieldDescription { panic("not implemented") } @@ -337,28 +346,47 @@ func (m *MockRows) Scan(dest ...interface{}) error { len(dest), ) } - for i := range dest { switch s := m.results[m.idx][i].(type) { case []time.Time: if d, ok := dest[i].(*[]time.Time); ok { *d = s - } else if d, ok := dest[i].(pgtype.Value); ok { - err := d.Set(s) + } else if d, ok := dest[i].(pgtype.ArraySetter); ok { + err := d.SetDimensions([]pgtype.ArrayDimension{{Length: int32(len(s))}}) if err != nil { return err } + for i, r := range s { + v, ok := d.ScanIndex(i).(*pgtype.Timestamptz) + if !ok { + return fmt.Errorf("expected array of timestamptz as target") + } + *v = pgtype.Timestamptz{ + Time: r, + Valid: true, + } + } } else { return fmt.Errorf("wrong value type []time.Time") } case []float64: if d, ok := dest[i].(*[]float64); ok { *d = s - } else if d, ok := dest[i].(pgtype.Value); ok { - err := d.Set(s) + } else if d, ok := dest[i].(pgtype.ArraySetter); ok { + err := d.SetDimensions([]pgtype.ArrayDimension{{Length: int32(len(s))}}) if err != nil { return err } + for i, r := range s { + v, ok := d.ScanIndex(i).(*pgtype.Float8) + if !ok { + return fmt.Errorf("expected array of float8 as target") + } + *v = pgtype.Float8{ + Float64: r, + Valid: true, + } + } } else { return fmt.Errorf("wrong value type []float64") } @@ -374,6 +402,12 @@ func (m *MockRows) Scan(dest ...interface{}) error { continue } return fmt.Errorf("wrong value type []int64") + case []*int64: + if d, ok := dest[i].(*[]*int64); ok { + *d = s + continue + } + return fmt.Errorf("wrong value type []*int64") case []int32: if d, ok := dest[i].(*[]int32); ok { *d = s @@ -391,6 +425,8 @@ func (m *MockRows) Scan(dest ...interface{}) error { *d = s continue } + // TODO review this explanation + // // Ideally, we should be doing pgtype.BinaryDecoder. but doing that here will allow using only // a single function that the interface pgtype.BinaryDecoder allows, i.e, DecodeBinary(). DecodeBinary() takes // a pgtype.ConnInfo and []byte, which is the main problem. The ConnInfo can be nil, but []byte needs to be set @@ -399,11 +435,14 @@ func (m *MockRows) Scan(dest ...interface{}) error { // existing test setup to have the [][]byte (particularly the expected results part), which is lengthy. // Plus, not all types convert to [][]byte. types like int will require binary.Little.Endian conversion // which can be a overdo for just writing the results of the tests. So, we do a short-cut to directly - // leverage the .Set() of our custom type and quick the process. - if d, ok := dest[i].(*pgutf8str.TextArray); ok { - pgta := pgutf8str.TextArray{} - if err := pgta.Set(s); err != nil { - panic(err) + if d, ok := dest[i].(*pgtype.FlatArray[pgutf8str.Text]); ok { + pgta := make(pgtype.FlatArray[pgutf8str.Text], 0, len(s)) + for _, v := range s { + t := pgutf8str.Text{} + if err := t.Scan(v); err != nil { + panic(err) + } + pgta = append(pgta, t) } *d = pgta continue @@ -463,8 +502,8 @@ func (m *MockRows) Scan(dest ...interface{}) error { dvp := reflect.Indirect(dv) dvp.SetUint(m.results[m.idx][i].(uint64)) case int64: - if d, ok := dest[i].(pgtype.Value); ok { - if err := d.Set(m.results[m.idx][i]); err != nil { + if d, ok := dest[i].(*pgtype.Int8); ok { + if err := d.Scan(m.results[m.idx][i]); err != nil { return err } continue @@ -479,8 +518,8 @@ func (m *MockRows) Scan(dest ...interface{}) error { dvp := reflect.Indirect(dv) dvp.SetInt(m.results[m.idx][i].(int64)) case string: - if d, ok := dest[i].(pgtype.Value); ok { - if err := d.Set(m.results[m.idx][i]); err != nil { + if d, ok := dest[i].(sql.Scanner); ok { + if err := d.Scan(m.results[m.idx][i]); err != nil { return err } continue @@ -491,18 +530,24 @@ func (m *MockRows) Scan(dest ...interface{}) error { dvp.SetString(m.results[m.idx][i].(string)) continue } - if d, ok := dest[i].(*pgutf8str.Text); ok { - pgt := pgutf8str.Text{} - if err := pgt.Set(s); err != nil { - panic(err) + + if d, ok := dest[i].(*pgtype.FlatArray[pgutf8str.Text]); ok { + // TODO try to use scan on the array + pgt := make(pgtype.FlatArray[pgutf8str.Text], 0, len(s)) + for _, v := range s { + t := pgutf8str.Text{} + if err := t.Scan(v); err != nil { + panic(err) + } + pgt = append(pgt, t) } *d = pgt continue } return fmt.Errorf("wrong value type: neither 'string' or 'pgutf8str'") case nil: - if d, ok := dest[i].(pgtype.Value); ok { - if err := d.Set(m.results[m.idx][i]); err != nil { + if d, ok := dest[i].(sql.Scanner); ok { + if err := d.Scan(m.results[m.idx][i]); err != nil { return err } continue @@ -675,9 +720,6 @@ func (t *MockTx) QueryRow(ctx context.Context, sql string, args ...interface{}) return &MockRows{results: rows, err: err} } -func (t *MockTx) QueryFunc(ctx context.Context, sql string, args []interface{}, scans []interface{}, f func(pgx.QueryFuncRow) error) (pgconn.CommandTag, error) { - return pgconn.CommandTag{}, nil -} func (t *MockTx) Conn() *pgx.Conn { return nil } diff --git a/pkg/pgmodel/new_migrate.go b/pkg/pgmodel/new_migrate.go index 6563d83a07..7fd5f19dae 100644 --- a/pkg/pgmodel/new_migrate.go +++ b/pkg/pgmodel/new_migrate.go @@ -10,7 +10,7 @@ import ( "sync" "github.com/blang/semver/v4" - "github.com/jackc/pgx/v4" + "github.com/jackc/pgx/v5" "github.com/timescale/promscale/pkg/log" "github.com/timescale/promscale/pkg/migrations" "github.com/timescale/promscale/pkg/pgmodel/common/extension" diff --git a/pkg/pgmodel/querier/querier_sql_test.go b/pkg/pgmodel/querier/querier_sql_test.go index ce43cddc1f..c691251fc0 100644 --- a/pkg/pgmodel/querier/querier_sql_test.go +++ b/pkg/pgmodel/querier/querier_sql_test.go @@ -13,12 +13,13 @@ import ( "github.com/prometheus/prometheus/model/timestamp" - "github.com/jackc/pgx/v4" + "github.com/jackc/pgx/v5" "github.com/timescale/promscale/pkg/clockcache" "github.com/timescale/promscale/pkg/pgmodel/lreader" "github.com/timescale/promscale/pkg/pgmodel/model" "github.com/timescale/promscale/pkg/prompb" "github.com/timescale/promscale/pkg/tenancy" + "github.com/timescale/promscale/pkg/util" ) func TestPGXQuerierQuery(t *testing.T) { @@ -267,7 +268,7 @@ func TestPGXQuerierQuery(t *testing.T) { "AND time <= '1970-01-01T00:00:02Z'\n\t" + "GROUP BY s.id", Args: []interface{}(nil), - Results: model.RowResults{{[]int64{1}, []time.Time{time.Unix(0, 0)}, []float64{1}}}, + Results: model.RowResults{{[]*int64{util.Pointer(int64(1))}, []time.Time{time.Unix(0, 0)}, []float64{1}}}, Err: error(nil), }, { @@ -310,9 +311,11 @@ func TestPGXQuerierQuery(t *testing.T) { ORDER BY series_id, time ) as time_ordered_rows GROUP BY series_id ) as result ON (result.value_array is not null AND result.series_id = series.id)`, - Args: nil, - Results: model.RowResults{{[]int64{2}, []time.Time{time.Unix(0, 0)}, []float64{1}}}, - Err: error(nil), + Args: nil, + Results: model.RowResults{ + {[]*int64{util.Pointer(int64(2))}, []time.Time{time.Unix(0, 0)}, []float64{1}}, + }, + Err: error(nil), }, { Sql: "SELECT (prom_api.labels_info($1::int[])).*", @@ -357,9 +360,15 @@ func TestPGXQuerierQuery(t *testing.T) { ORDER BY series_id, time ) as time_ordered_rows GROUP BY series_id ) as result ON (result.value_array is not null AND result.series_id = series.id)`, - Args: nil, - Results: model.RowResults{{[]int64{2}, []time.Time{time.Unix(0, 0)}, []float64{1}}}, - Err: error(nil), + Args: nil, + Results: model.RowResults{ + { + []*int64{util.Pointer(int64(2))}, + []time.Time{time.Unix(0, 0)}, + []float64{1}, + }, + }, + Err: error(nil), }, { Sql: "SELECT (prom_api.labels_info($1::int[])).*", @@ -405,9 +414,15 @@ func TestPGXQuerierQuery(t *testing.T) { ORDER BY series_id, time ) as time_ordered_rows GROUP BY series_id ) as result ON (result.value_array is not null AND result.series_id = series.id)`, - Args: nil, - Results: model.RowResults{{[]int64{2}, []time.Time{time.Unix(0, 0)}, []float64{1}}}, - Err: error(nil), + Args: nil, + Results: model.RowResults{ + { + []*int64{util.Pointer(int64(2))}, + []time.Time{time.Unix(0, 0)}, + []float64{1}, + }, + }, + Err: error(nil), }, { Sql: "SELECT (prom_api.labels_info($1::int[])).*", @@ -470,9 +485,15 @@ func TestPGXQuerierQuery(t *testing.T) { "AND time >= '1970-01-01T00:00:01Z'\n\t" + "AND time <= '1970-01-01T00:00:02Z'\n\t" + "GROUP BY s.id", - Args: []interface{}(nil), - Results: model.RowResults{{[]int64{3}, []time.Time{time.Unix(0, 0)}, []float64{1}}}, - Err: error(nil), + Args: []interface{}(nil), + Results: model.RowResults{ + { + []*int64{util.Pointer(int64(3))}, + []time.Time{time.Unix(0, 0)}, + []float64{1}, + }, + }, + Err: error(nil), }, { Sql: "SELECT s.labels, array_agg(m.time ORDER BY time), array_agg(m.value ORDER BY time)\n\t" + @@ -483,9 +504,15 @@ func TestPGXQuerierQuery(t *testing.T) { "AND time >= '1970-01-01T00:00:01Z'\n\t" + "AND time <= '1970-01-01T00:00:02Z'\n\t" + "GROUP BY s.id", - Args: []interface{}(nil), - Results: model.RowResults{{[]int64{4}, []time.Time{time.Unix(0, 0)}, []float64{1}}}, - Err: error(nil), + Args: []interface{}(nil), + Results: model.RowResults{ + { + []*int64{util.Pointer(int64(4))}, + []time.Time{time.Unix(0, 0)}, + []float64{1}, + }, + }, + Err: error(nil), }, { Sql: "SELECT (prom_api.labels_info($1::int[])).*", @@ -568,9 +595,15 @@ func TestPGXQuerierQuery(t *testing.T) { "AND time >= '1970-01-01T00:00:01Z'\n\t" + "AND time <= '1970-01-01T00:00:02Z'\n\t" + "GROUP BY s.id", - Args: []interface{}(nil), - Results: model.RowResults{{[]int64{7}, []time.Time{time.Unix(0, 0)}, []float64{1}}}, - Err: error(nil), + Args: []interface{}(nil), + Results: model.RowResults{ + { + []*int64{util.Pointer(int64(7))}, + []time.Time{time.Unix(0, 0)}, + []float64{1}, + }, + }, + Err: error(nil), }, { Sql: "SELECT (prom_api.labels_info($1::int[])).*", @@ -629,9 +662,15 @@ func TestPGXQuerierQuery(t *testing.T) { "AND time >= '1970-01-01T00:00:01Z'\n\t" + "AND time <= '1970-01-01T00:00:02Z'\n\t" + "GROUP BY s.id", - Args: []interface{}(nil), - Results: model.RowResults{{[]int64{8, 9}, []time.Time{time.Unix(0, 0)}, []float64{1}}}, - Err: error(nil), + Args: []interface{}(nil), + Results: model.RowResults{ + { + []*int64{util.Pointer(int64(8)), util.Pointer(int64(9))}, + []time.Time{time.Unix(0, 0)}, + []float64{1}, + }, + }, + Err: error(nil), }, { Sql: "SELECT (prom_api.labels_info($1::int[])).*", @@ -690,9 +729,15 @@ func TestPGXQuerierQuery(t *testing.T) { "AND time >= '1970-01-01T00:00:01Z'\n\t" + "AND time <= '1970-01-01T00:00:02Z'\n\t" + "GROUP BY s.id", - Args: []interface{}(nil), - Results: model.RowResults{{[]int64{10}, []time.Time{time.Unix(0, 0)}, []float64{1}}}, - Err: error(nil), + Args: []interface{}(nil), + Results: model.RowResults{ + { + []*int64{util.Pointer(int64(10))}, + []time.Time{time.Unix(0, 0)}, + []float64{1}, + }, + }, + Err: error(nil), }, { Sql: "SELECT (prom_api.labels_info($1::int[])).*", diff --git a/pkg/pgmodel/querier/query_builder.go b/pkg/pgmodel/querier/query_builder.go index cedf1f9376..1e4ea3285b 100644 --- a/pkg/pgmodel/querier/query_builder.go +++ b/pkg/pgmodel/querier/query_builder.go @@ -126,10 +126,10 @@ func initLabelIdIndexForSamples(index map[int64]labels.Label, rows []sampleRow) for i := range rows { for _, id := range rows[i].labelIds { //id==0 means there is no label for the key, so nothing to look up - if id == 0 { + if id == nil || *id == 0 { continue } - index[id] = labels.Label{} + index[*id] = labels.Label{} } } } @@ -149,21 +149,21 @@ func buildTimeSeries(rows []sampleRow, lr lreader.LabelsReader) ([]*prompb.TimeS return nil, row.err } - if row.times.Len() != len(row.values.Elements) { + if row.times.Len() != len(row.values.FlatArray) { return nil, errors.ErrQueryMismatchTimestampValue } promLabels := make([]prompb.Label, 0, len(row.labelIds)) for _, id := range row.labelIds { - if id == 0 { + if id == nil || *id == 0 { continue } - label, ok := labelIDMap[id] + label, ok := labelIDMap[*id] if !ok { - return nil, fmt.Errorf("missing label for id %v", id) + return nil, fmt.Errorf("missing label for id %v", *id) } if label == (labels.Label{}) { - return nil, fmt.Errorf("label not found for id %v", id) + return nil, fmt.Errorf("label not found for id %v", *id) } promLabels = append(promLabels, prompb.Label{Name: label.Name, Value: label.Value}) @@ -196,7 +196,7 @@ func buildTimeSeries(rows []sampleRow, lr lreader.LabelsReader) ([]*prompb.TimeS } result.Samples = append(result.Samples, prompb.Sample{ Timestamp: ts, - Value: row.values.Elements[i].Float, + Value: row.values.FlatArray[i].Float64, }) } diff --git a/pkg/pgmodel/querier/query_builder_exemplar.go b/pkg/pgmodel/querier/query_builder_exemplar.go index 104dac0e60..63ad596f53 100644 --- a/pkg/pgmodel/querier/query_builder_exemplar.go +++ b/pkg/pgmodel/querier/query_builder_exemplar.go @@ -4,7 +4,7 @@ import ( "fmt" "strings" - "github.com/jackc/pgx/v4" + "github.com/jackc/pgx/v5" "github.com/timescale/promscale/pkg/pgmodel/common/schema" pgmodel "github.com/timescale/promscale/pkg/pgmodel/model" ) diff --git a/pkg/pgmodel/querier/query_builder_samples.go b/pkg/pgmodel/querier/query_builder_samples.go index 8250cd78d6..dfc217866a 100644 --- a/pkg/pgmodel/querier/query_builder_samples.go +++ b/pkg/pgmodel/querier/query_builder_samples.go @@ -4,7 +4,7 @@ import ( "fmt" "strings" - "github.com/jackc/pgx/v4" + "github.com/jackc/pgx/v5" "github.com/prometheus/prometheus/promql/parser" "github.com/timescale/promscale/pkg/pgmodel/common/schema" pgmodel "github.com/timescale/promscale/pkg/pgmodel/model" diff --git a/pkg/pgmodel/querier/query_exemplar.go b/pkg/pgmodel/querier/query_exemplar.go index fe957ee8a6..d201d7989a 100644 --- a/pkg/pgmodel/querier/query_exemplar.go +++ b/pkg/pgmodel/querier/query_exemplar.go @@ -6,8 +6,8 @@ import ( "sort" "time" - "github.com/jackc/pgconn" "github.com/jackc/pgerrcode" + "github.com/jackc/pgx/v5/pgconn" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/timestamp" diff --git a/pkg/pgmodel/querier/query_sample.go b/pkg/pgmodel/querier/query_sample.go index afd4ce0bf0..16be042ec5 100644 --- a/pkg/pgmodel/querier/query_sample.go +++ b/pkg/pgmodel/querier/query_sample.go @@ -4,8 +4,8 @@ import ( "context" "fmt" - "github.com/jackc/pgconn" "github.com/jackc/pgerrcode" + "github.com/jackc/pgx/v5/pgconn" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/storage" diff --git a/pkg/pgmodel/querier/query_tools.go b/pkg/pgmodel/querier/query_tools.go index 75da5f723f..8b0a82c1e4 100644 --- a/pkg/pgmodel/querier/query_tools.go +++ b/pkg/pgmodel/querier/query_tools.go @@ -4,7 +4,7 @@ import ( "context" "fmt" - "github.com/jackc/pgx/v4" + "github.com/jackc/pgx/v5" "github.com/timescale/promscale/pkg/pgmodel/cache" "github.com/timescale/promscale/pkg/pgmodel/common/errors" "github.com/timescale/promscale/pkg/pgmodel/common/schema" diff --git a/pkg/pgmodel/querier/row.go b/pkg/pgmodel/querier/row.go index 951936f626..9dbfebd8e6 100644 --- a/pkg/pgmodel/querier/row.go +++ b/pkg/pgmodel/querier/row.go @@ -1,10 +1,9 @@ package querier import ( - "encoding/binary" "sync" - "github.com/jackc/pgtype" + "github.com/jackc/pgx/v5/pgtype" "github.com/prometheus/prometheus/model/labels" "github.com/timescale/promscale/pkg/log" "github.com/timescale/promscale/pkg/pgmodel/common/schema" @@ -12,137 +11,30 @@ import ( "github.com/timescale/promscale/pkg/pgxconn" ) +// QUESTION(adn) should we cache only the underlying array like it was before? var fPool = sync.Pool{ New: func() interface{} { - return new(pgtype.Float8Array) + return new(model.ReusableArray[pgtype.Float8]) }, } var tPool = sync.Pool{ New: func() interface{} { - return new(pgtype.TimestamptzArray) + return new(model.ReusableArray[pgtype.Timestamptz]) }, } -// wrapper to allow DecodeBinary to reuse the existing array so that a pool is effective -type timestamptzArrayWrapper struct { - *pgtype.TimestamptzArray -} - -func (dstwrapper *timestamptzArrayWrapper) DecodeBinary(ci *pgtype.ConnInfo, src []byte) error { - dst := dstwrapper.TimestamptzArray - if src == nil { - *dst = pgtype.TimestamptzArray{Status: pgtype.Null} - return nil - } - - var arrayHeader pgtype.ArrayHeader - rp, err := arrayHeader.DecodeBinary(ci, src) - if err != nil { - return err - } - - if len(arrayHeader.Dimensions) == 0 { - *dst = pgtype.TimestamptzArray{Dimensions: arrayHeader.Dimensions, Status: pgtype.Present} - return nil - } - - elementCount := arrayHeader.Dimensions[0].Length - for _, d := range arrayHeader.Dimensions[1:] { - elementCount *= d.Length - } - - //reuse logic - elements := dst.Elements - if cap(dst.Elements) < int(elementCount) { - elements = make([]pgtype.Timestamptz, elementCount) - } else { - elements = elements[:elementCount] - } - - for i := range elements { - elemLen := int(int32(binary.BigEndian.Uint32(src[rp:]))) - rp += 4 - var elemSrc []byte - if elemLen >= 0 { - elemSrc = src[rp : rp+elemLen] - rp += elemLen - } - err = elements[i].DecodeBinary(ci, elemSrc) - if err != nil { - return err - } - } - - *dst = pgtype.TimestamptzArray{Elements: elements, Dimensions: arrayHeader.Dimensions, Status: pgtype.Present} - return nil -} - -// wrapper to to allow DecodeBinary to reuse existing array so that a pool is effective -type float8ArrayWrapper struct { - *pgtype.Float8Array -} - -func (dstwrapper *float8ArrayWrapper) DecodeBinary(ci *pgtype.ConnInfo, src []byte) error { - dst := dstwrapper.Float8Array - if src == nil { - *dst = pgtype.Float8Array{Status: pgtype.Null} - return nil - } - - var arrayHeader pgtype.ArrayHeader - rp, err := arrayHeader.DecodeBinary(ci, src) - if err != nil { - return err - } - - if len(arrayHeader.Dimensions) == 0 { - *dst = pgtype.Float8Array{Dimensions: arrayHeader.Dimensions, Status: pgtype.Present} - return nil - } - - elementCount := arrayHeader.Dimensions[0].Length - for _, d := range arrayHeader.Dimensions[1:] { - elementCount *= d.Length - } - - //reuse logic - elements := dst.Elements - if cap(dst.Elements) < int(elementCount) { - elements = make([]pgtype.Float8, elementCount) - } else { - elements = elements[:elementCount] - } - - for i := range elements { - elemLen := int(int32(binary.BigEndian.Uint32(src[rp:]))) - rp += 4 - var elemSrc []byte - if elemLen >= 0 { - elemSrc = src[rp : rp+elemLen] - rp += elemLen - } - err = elements[i].DecodeBinary(ci, elemSrc) - if err != nil { - return err - } - } - - *dst = pgtype.Float8Array{Elements: elements, Dimensions: arrayHeader.Dimensions, Status: pgtype.Present} - return nil -} - type sampleRow struct { - labelIds []int64 + labelIds []*int64 times TimestampSeries - values *pgtype.Float8Array + values *model.ReusableArray[pgtype.Float8] err error metricOverride string schema string column string //only used to hold ownership for releasing to pool - timeArrayOwnership *pgtype.TimestamptzArray + timeArrayOwnership *model.ReusableArray[pgtype.Timestamptz] } func (r *sampleRow) Close() { @@ -170,23 +62,28 @@ func appendSampleRows(out []sampleRow, in pgxconn.PgxRows, tsSeries TimestampSer } for in.Next() { var row sampleRow - values := fPool.Get().(*pgtype.Float8Array) - values.Elements = values.Elements[:0] - valuesWrapper := float8ArrayWrapper{values} + values := fPool.Get().(*model.ReusableArray[pgtype.Float8]) + if values.FlatArray != nil { + values.FlatArray = values.FlatArray[:0] + } + var labelIds []*int64 //if a timeseries isn't provided it will be fetched from the database if tsSeries == nil { - times := tPool.Get().(*pgtype.TimestamptzArray) - times.Elements = times.Elements[:0] - timesWrapper := timestamptzArrayWrapper{times} - row.err = in.Scan(&row.labelIds, ×Wrapper, &valuesWrapper) + times := tPool.Get().(*model.ReusableArray[pgtype.Timestamptz]) + if times.FlatArray != nil { + times.FlatArray = times.FlatArray[:0] + } + row.err = in.Scan(&labelIds, times, values) row.timeArrayOwnership = times row.times = newRowTimestampSeries(times) } else { - row.err = in.Scan(&row.labelIds, &valuesWrapper) + row.err = in.Scan(&labelIds, values) row.times = tsSeries } + // TODO + row.labelIds = labelIds row.values = values row.metricOverride = metric row.schema = schema diff --git a/pkg/pgmodel/querier/series_exemplar.go b/pkg/pgmodel/querier/series_exemplar.go index 1e0f194426..cb62a92b14 100644 --- a/pkg/pgmodel/querier/series_exemplar.go +++ b/pkg/pgmodel/querier/series_exemplar.go @@ -8,6 +8,7 @@ import ( "context" "fmt" "sort" + "strconv" "strings" "time" @@ -24,7 +25,7 @@ const getExemplarLabelPositions = "SELECT * FROM _prom_catalog.get_exemplar_labe type exemplarSeriesRow struct { metricName string - labelIds []int64 + labelIds []*int64 data []exemplarRow } @@ -46,7 +47,7 @@ func getExemplarSeriesRows(metricName string, in pgxconn.PgxRows) (rows []exempl var ( err error row exemplarRow - labelIds []int64 + labelIds []*int64 ) err = in.Scan(&labelIds, &row.time, &row.value, &row.labelValues) if err != nil { @@ -54,7 +55,7 @@ func getExemplarSeriesRows(metricName string, in pgxconn.PgxRows) (rows []exempl return rows, err } - key := fmt.Sprintf("%v", labelIds) + key := labelIdsToKey(labelIds) if existingSeriesRow, exists := seriesRowMap[key]; exists { existingSeriesRow.data = append(existingSeriesRow.data, row) continue @@ -71,6 +72,18 @@ func getExemplarSeriesRows(metricName string, in pgxconn.PgxRows) (rows []exempl return getExemplarSeriesSlice(seriesRowMap), in.Err() } +func labelIdsToKey(labelIds []*int64) string { + key := "" + for _, v := range labelIds { + if v == nil { + key += "nil " + continue + } + key += strconv.FormatInt(*v, 10) + " " + } + return key +} + func getExemplarSeriesSlice(m map[string]*exemplarSeriesRow) []exemplarSeriesRow { s := make([]exemplarSeriesRow, len(m)) i := 0 @@ -142,13 +155,13 @@ func getPositionIndex(tools *queryTools, posCache cache.PositionCache, metric st return keyPosIndex, nil } -func initLabelIdIndexForExemplars(index map[int64]labels.Label, labelIds []int64) { +func initLabelIdIndexForExemplars(index map[int64]labels.Label, labelIds []*int64) { for _, labelId := range labelIds { - if labelId == 0 { + if labelId == nil || *labelId == 0 { // no label to look-up for. continue } - index[labelId] = labels.Label{} + index[*labelId] = labels.Label{} } } diff --git a/pkg/pgmodel/querier/series_exemplar_test.go b/pkg/pgmodel/querier/series_exemplar_test.go index 90dff2a5b5..f7ded3c426 100644 --- a/pkg/pgmodel/querier/series_exemplar_test.go +++ b/pkg/pgmodel/querier/series_exemplar_test.go @@ -13,6 +13,7 @@ import ( "github.com/stretchr/testify/require" "github.com/timescale/promscale/pkg/pgmodel/cache" "github.com/timescale/promscale/pkg/pgmodel/model" + "github.com/timescale/promscale/pkg/util" ) func TestPrepareExemplarQueryResult(t *testing.T) { @@ -29,7 +30,7 @@ func TestPrepareExemplarQueryResult(t *testing.T) { } seriesRow := exemplarSeriesRow{ metricName: "test_metric_exemplar", - labelIds: []int64{1, 3}, + labelIds: []*int64{util.Pointer(int64(1)), util.Pointer(int64(3))}, data: exemplarRows, } lrCache := newMockLabelsReader([]int64{1, 3}, []labels.Label{{Name: "__name__", Value: "test_metric_exemplar"}, {Name: "instance", Value: "localhost:9100"}}) diff --git a/pkg/pgmodel/querier/series_set.go b/pkg/pgmodel/querier/series_set.go index f75d76b04a..9636530da4 100644 --- a/pkg/pgmodel/querier/series_set.go +++ b/pkg/pgmodel/querier/series_set.go @@ -8,7 +8,7 @@ import ( "fmt" "sort" - "github.com/jackc/pgtype" + "github.com/jackc/pgx/v5/pgtype" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" @@ -77,7 +77,7 @@ func (p *pgxSamplesSeriesSet) At() storage.Series { if row.err != nil { return nil } - if row.times.Len() != len(row.values.Elements) { + if row.times.Len() != len(row.values.FlatArray) { p.err = errors.ErrInvalidRowData return nil } @@ -114,18 +114,18 @@ func (p *pgxSamplesSeriesSet) At() storage.Series { return ps } -func getLabelsFromLabelIds(labelIds []int64, index map[int64]labels.Label) (labels.Labels, error) { +func getLabelsFromLabelIds(labelIds []*int64, index map[int64]labels.Label) (labels.Labels, error) { lls := make([]labels.Label, 0, len(labelIds)) for _, id := range labelIds { - if id == 0 { + if id == nil || *id == 0 { continue } - label, ok := index[id] + label, ok := index[*id] if !ok { - return nil, fmt.Errorf("missing label for id %v", id) + return nil, fmt.Errorf("missing label for id %v", *id) } if label == (labels.Label{}) { - return nil, fmt.Errorf("missing label for id %v", id) + return nil, fmt.Errorf("missing label for id %v", *id) } lls = append(lls, label) } @@ -152,7 +152,7 @@ func (p *pgxSamplesSeriesSet) Close() { type pgxSeries struct { labels labels.Labels times TimestampSeries - values *pgtype.Float8Array + values *model.ReusableArray[pgtype.Float8] } // Labels returns the label names and values for the series. @@ -170,11 +170,11 @@ type pgxSeriesIterator struct { cur int totalSamples int times TimestampSeries - values *pgtype.Float8Array + values *model.ReusableArray[pgtype.Float8] } // newIterator returns an iterator over the samples. It expects times and values to be the same length. -func newIterator(times TimestampSeries, values *pgtype.Float8Array) *pgxSeriesIterator { +func newIterator(times TimestampSeries, values *model.ReusableArray[pgtype.Float8]) *pgxSeriesIterator { return &pgxSeriesIterator{ cur: -1, totalSamples: times.Len(), @@ -203,7 +203,7 @@ func (p *pgxSeriesIterator) getTs() int64 { } func (p *pgxSeriesIterator) getVal() float64 { - return p.values.Elements[p.cur].Float + return p.values.FlatArray[p.cur].Float64 } // At returns a Unix timestamp in milliseconds and value of the sample. @@ -222,7 +222,7 @@ func (p *pgxSeriesIterator) Next() bool { return false } _, ok := p.times.At(p.cur) - if ok && p.values.Elements[p.cur].Status == pgtype.Present { + if ok && p.values.FlatArray[p.cur].Valid { return true } } diff --git a/pkg/pgmodel/querier/series_set_test.go b/pkg/pgmodel/querier/series_set_test.go index 79d3568cb8..7d9d5f6e93 100644 --- a/pkg/pgmodel/querier/series_set_test.go +++ b/pkg/pgmodel/querier/series_set_test.go @@ -14,13 +14,14 @@ import ( "testing" "time" - "github.com/jackc/pgconn" - "github.com/jackc/pgproto3/v2" - "github.com/jackc/pgtype" + "github.com/jackc/pgx/v5/pgconn" + "github.com/jackc/pgx/v5/pgproto3" + "github.com/jackc/pgx/v5/pgtype" "github.com/prometheus/prometheus/model/labels" pgmodelErrs "github.com/timescale/promscale/pkg/pgmodel/common/errors" "github.com/timescale/promscale/pkg/pgmodel/common/schema" "github.com/timescale/promscale/pkg/pgmodel/model" + "github.com/timescale/promscale/pkg/util" ) //nolint:all @@ -74,7 +75,7 @@ func (m *mockPgxRows) Next() bool { } // Scan reads the values from the current row into dest values positionally. -// dest can include pointers to core types, values implementing the Scanner +// dest can include pointers to core model, values implementing the Scanner // interface, []byte, and nil. []byte will skip the decoding process and directly // copy the raw bytes received from PostgreSQL. nil will skip the value entirely. // @@ -87,22 +88,22 @@ func (m *mockPgxRows) Scan(dest ...interface{}) error { return fmt.Errorf("incorrect number of destinations to scan in the results") } - ln, ok := dest[0].(*[]int64) + ln, ok := dest[0].(*[]*int64) if !ok { panic("label names incorrect type, expected int64") } *ln = m.results[m.idx].labels - ts, ok := dest[1].(*pgtype.TimestamptzArray) + ts, ok := dest[1].(*model.ReusableArray[pgtype.Timestamptz]) if !ok { panic("sample timestamps incorrect type") } - ts.Elements = m.results[m.idx].timestamps + ts.FlatArray = m.results[m.idx].timestamps //TODO dims? - vs, ok := dest[2].(*pgtype.Float8Array) + vs, ok := dest[2].(*model.ReusableArray[pgtype.Float8]) if !ok { return fmt.Errorf("sample values incorrect type") } - vs.Elements = m.results[m.idx].values + vs.FlatArray = m.results[m.idx].values //TODO dims? return nil @@ -137,7 +138,7 @@ func generateArrayHeader(numDim, containsNull, elemOID, arrayLength uint32, addD } type seriesSetRow struct { - labels []int64 + labels []*int64 timestamps []pgtype.Timestamptz values []pgtype.Float8 schema string @@ -150,7 +151,7 @@ func TestPgxSeriesSet(t *testing.T) { testCases := []struct { name string input [][]seriesSetRow - labels []int64 + labels []*int64 ts []pgtype.Timestamptz vs []pgtype.Float8 metricSchema string @@ -174,9 +175,9 @@ func TestPgxSeriesSet(t *testing.T) { name: "timestamp/value count mismatch", input: [][]seriesSetRow{{ genSeries( - []int64{1}, + []*int64{util.Pointer(int64(1))}, []pgtype.Timestamptz{}, - []pgtype.Float8{{Float: 1.0}}, + []pgtype.Float8{{Float64: 1.0, Valid: true}}, "", ""), }}, @@ -185,105 +186,105 @@ func TestPgxSeriesSet(t *testing.T) { }, { name: "happy path 1", - labels: []int64{1}, - ts: []pgtype.Timestamptz{{Time: time.Now()}}, - vs: []pgtype.Float8{{Float: 1}}, + labels: []*int64{util.Pointer(int64(1))}, + ts: []pgtype.Timestamptz{{Time: time.Now(), Valid: true}}, + vs: []pgtype.Float8{{Float64: 1, Valid: true}}, rowCount: 1, }, { name: "happy path 2", - labels: []int64{2, 3}, + labels: []*int64{util.Pointer(int64(2)), util.Pointer(int64(3))}, ts: []pgtype.Timestamptz{ - {Time: time.Unix(0, 500000)}, - {Time: time.Unix(0, 6000000)}, + {Time: time.Unix(0, 500000), Valid: true}, + {Time: time.Unix(0, 6000000), Valid: true}, }, vs: []pgtype.Float8{ - {Float: 30000}, - {Float: 40000}, + {Float64: 30000, Valid: true}, + {Float64: 40000, Valid: true}, }, rowCount: 1, }, { name: "check nulls (ts and vs negative values are encoded as null)", - labels: []int64{2, 3}, + labels: []*int64{util.Pointer(int64(2)), util.Pointer(int64(3))}, ts: []pgtype.Timestamptz{ - {Status: pgtype.Null}, - {Time: time.Unix(0, 0)}, - {Time: time.Unix(0, 6000000)}, + {Valid: false}, + {Time: time.Unix(0, 0), Valid: true}, + {Time: time.Unix(0, 6000000), Valid: true}, }, vs: []pgtype.Float8{ - {Float: 30000}, - {Float: 40000}, - {Status: pgtype.Null}, + {Float64: 30000, Valid: true}, + {Float64: 40000, Valid: true}, + {Valid: false}, }, rowCount: 1, }, { name: "check all nulls", - labels: []int64{2, 3}, + labels: []*int64{util.Pointer(int64(2)), util.Pointer(int64(3))}, ts: []pgtype.Timestamptz{ - {Status: pgtype.Null}, - {Time: time.Unix(0, 0)}, - {Time: time.Unix(0, 6000000)}, + {Valid: false}, + {Time: time.Unix(0, 0), Valid: true}, + {Time: time.Unix(0, 6000000), Valid: true}, }, vs: []pgtype.Float8{ - {Float: 30000}, - {Status: pgtype.Null}, - {Status: pgtype.Null}, + {Float64: 30000, Valid: true}, + {Valid: false}, + {Valid: false}, }, rowCount: 1, }, { name: "check infinity", - labels: []int64{2, 3}, + labels: []*int64{util.Pointer(int64(2)), util.Pointer(int64(3))}, ts: []pgtype.Timestamptz{ {InfinityModifier: pgtype.NegativeInfinity}, {InfinityModifier: pgtype.Infinity}, }, vs: []pgtype.Float8{ - {Float: 30000}, - {Float: 100}, + {Float64: 30000, Valid: true}, + {Float64: 100, Valid: true}, }, rowCount: 1, }, { name: "check default metric schema", - labels: []int64{2, 3}, + labels: []*int64{util.Pointer(int64(2)), util.Pointer(int64(3))}, ts: []pgtype.Timestamptz{ - {Time: time.Unix(0, 500000)}, - {Time: time.Unix(0, 6000000)}, + {Time: time.Unix(0, 500000), Valid: true}, + {Time: time.Unix(0, 6000000), Valid: true}, }, vs: []pgtype.Float8{ - {Float: 30000}, - {Float: 100}, + {Float64: 30000, Valid: true}, + {Float64: 100, Valid: true}, }, metricSchema: schema.PromData, rowCount: 1, }, { name: "check custom metric schema", - labels: []int64{2, 3}, + labels: []*int64{util.Pointer(int64(2)), util.Pointer(int64(3))}, ts: []pgtype.Timestamptz{ - {Time: time.Unix(0, 500000)}, - {Time: time.Unix(0, 6000000)}, + {Time: time.Unix(0, 500000), Valid: true}, + {Time: time.Unix(0, 6000000), Valid: true}, }, vs: []pgtype.Float8{ - {Float: 30000}, - {Float: 100}, + {Float64: 30000, Valid: true}, + {Float64: 100, Valid: true}, }, metricSchema: "customSchema", rowCount: 1, }, { name: "check custom column name", - labels: []int64{2, 3}, + labels: []*int64{util.Pointer(int64(2)), util.Pointer(int64(3))}, ts: []pgtype.Timestamptz{ - {Time: time.Unix(0, 500000)}, - {Time: time.Unix(0, 6000000)}, + {Time: time.Unix(0, 500000), Valid: true}, + {Time: time.Unix(0, 6000000), Valid: true}, }, vs: []pgtype.Float8{ - {Float: 30000}, - {Float: 100}, + {Float64: 30000, Valid: true}, + {Float64: 100, Valid: true}, }, columnName: "max", rowCount: 1, @@ -309,7 +310,7 @@ func TestPgxSeriesSet(t *testing.T) { if c.columnName == "" { c.columnName = defaultColumnName } - labels := make([]int64, len(c.labels)) + labels := make([]*int64, len(c.labels)) copy(labels, c.labels) c.input = [][]seriesSetRow{{ genSeries(labels, c.ts, c.vs, c.metricSchema, c.columnName)}} @@ -344,7 +345,7 @@ func TestPgxSeriesSet(t *testing.T) { expectedLabels := make([]labels.Label, 0, len(c.labels)) for _, v := range c.labels { - expectedLabels = append(expectedLabels, labels.Label{Name: labelMapping[v].k, Value: labelMapping[v].v}) + expectedLabels = append(expectedLabels, labels.Label{Name: labelMapping[*v].k, Value: labelMapping[*v].v}) } if c.metricSchema != "" && c.metricSchema != schema.PromData { @@ -369,7 +370,7 @@ func TestPgxSeriesSet(t *testing.T) { for i, ts = range c.ts { // Skipping 0/NULL values for ts and vs. - if ts.Status == pgtype.Null || c.vs[i].Status == pgtype.Null { + if !ts.Valid || !c.vs[i].Valid { continue } if !iter.Next() { @@ -389,8 +390,8 @@ func TestPgxSeriesSet(t *testing.T) { t.Errorf("unexpected time value: got %d, wanted %d", gotTs, wanted) } - if gotVs != c.vs[i].Float { - t.Errorf("unexpected value: got %f, wanted %f", gotVs, c.vs[i].Float) + if gotVs != c.vs[i].Float64 { + t.Errorf("unexpected value: got %f, wanted %f", gotVs, c.vs[i].Float64) } lastTs = gotTs @@ -501,35 +502,15 @@ func genPgxRows(m [][]seriesSetRow, err error) []sampleRow { return result } -func toTimestampTzArray(times []pgtype.Timestamptz) *pgtype.TimestamptzArray { - return &pgtype.TimestamptzArray{ - Elements: times, - Dimensions: nil, - Status: pgtype.Present, - } +func toTimestampTzArray(times []pgtype.Timestamptz) *model.ReusableArray[pgtype.Timestamptz] { + return &model.ReusableArray[pgtype.Timestamptz]{FlatArray: times} } -func toFloat8Array(values []pgtype.Float8) *pgtype.Float8Array { - return &pgtype.Float8Array{ - Elements: values, - Dimensions: nil, - Status: pgtype.Present, - } +func toFloat8Array(values []pgtype.Float8) *model.ReusableArray[pgtype.Float8] { + return &model.ReusableArray[pgtype.Float8]{FlatArray: values} } -func genSeries(labels []int64, ts []pgtype.Timestamptz, vs []pgtype.Float8, schema, column string) seriesSetRow { - - for i := range ts { - if ts[i].Status == pgtype.Undefined { - ts[i].Status = pgtype.Present - } - } - - for i := range vs { - if vs[i].Status == pgtype.Undefined { - vs[i].Status = pgtype.Present - } - } +func genSeries(labels []*int64, ts []pgtype.Timestamptz, vs []pgtype.Float8, schema, column string) seriesSetRow { return seriesSetRow{ labels: labels, diff --git a/pkg/pgmodel/querier/timestamp_series.go b/pkg/pgmodel/querier/timestamp_series.go index 4557b7f176..722b338912 100644 --- a/pkg/pgmodel/querier/timestamp_series.go +++ b/pkg/pgmodel/querier/timestamp_series.go @@ -3,7 +3,7 @@ package querier import ( "time" - "github.com/jackc/pgtype" + "github.com/jackc/pgx/v5/pgtype" prommodel "github.com/prometheus/common/model" "github.com/timescale/promscale/pkg/pgmodel/model" ) @@ -18,19 +18,19 @@ type TimestampSeries interface { // rowTimestampSeries is a TimestampSeries based on data fetched from a database row type rowTimestampSeries struct { - times *pgtype.TimestamptzArray + times *model.ReusableArray[pgtype.Timestamptz] } -func newRowTimestampSeries(times *pgtype.TimestamptzArray) *rowTimestampSeries { +func newRowTimestampSeries(times *model.ReusableArray[pgtype.Timestamptz]) *rowTimestampSeries { return &rowTimestampSeries{times: times} } func (t *rowTimestampSeries) At(index int) (int64, bool) { - return model.TimestamptzToMs(t.times.Elements[index]), t.times.Elements[index].Status == pgtype.Present + return model.TimestamptzToMs(t.times.FlatArray[index]), t.times.FlatArray[index].Valid } func (t *rowTimestampSeries) Len() int { - return len(t.times.Elements) + return len(t.times.FlatArray) } // regularTimestampSeries represents a time-series that is regular (e.g. each timestamp is step duration ahead of the previous one) diff --git a/pkg/pgxconn/implement.go b/pkg/pgxconn/implement.go index 16cbeb8633..8f6921b137 100644 --- a/pkg/pgxconn/implement.go +++ b/pkg/pgxconn/implement.go @@ -3,9 +3,8 @@ package pgxconn import ( "time" - "github.com/jackc/pgconn" - "github.com/jackc/pgproto3/v2" - "github.com/jackc/pgx/v4" + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgconn" ) // rowWithTelemetry wraps the row returned by QueryRow() for metrics telemetry. @@ -52,7 +51,7 @@ func (r rowsWithDuration) CommandTag() pgconn.CommandTag { return r.rows.CommandTag() } -func (r rowsWithDuration) FieldDescriptions() []pgproto3.FieldDescription { +func (r rowsWithDuration) FieldDescriptions() []pgconn.FieldDescription { return r.rows.FieldDescriptions() } @@ -93,7 +92,3 @@ func (w batchResultsWithDuration) Query() (pgx.Rows, error) { func (w batchResultsWithDuration) QueryRow() pgx.Row { return rowWithTelemetry{w.batch.QueryRow()} } - -func (w batchResultsWithDuration) QueryFunc(scans []interface{}, f func(pgx.QueryFuncRow) error) (pgconn.CommandTag, error) { - return w.batch.QueryFunc(scans, f) -} diff --git a/pkg/pgxconn/pgx_conn.go b/pkg/pgxconn/pgx_conn.go index 543ca0edd4..832db12b0f 100644 --- a/pkg/pgxconn/pgx_conn.go +++ b/pkg/pgxconn/pgx_conn.go @@ -10,9 +10,9 @@ import ( "strings" "time" - "github.com/jackc/pgconn" - "github.com/jackc/pgx/v4" - "github.com/jackc/pgx/v4/pgxpool" + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgconn" + "github.com/jackc/pgx/v5/pgxpool" "github.com/prometheus/client_golang/prometheus" "github.com/timescale/promscale/pkg/log" @@ -56,7 +56,7 @@ func promMethodLabel(method string) prometheus.Labels { } type PgxBatch interface { - Queue(query string, arguments ...interface{}) + Queue(query string, arguments ...any) *pgx.QueuedQuery Len() int } diff --git a/pkg/runner/client.go b/pkg/runner/client.go index 3e43fc5d66..7c88051234 100644 --- a/pkg/runner/client.go +++ b/pkg/runner/client.go @@ -10,7 +10,7 @@ import ( "strconv" "github.com/grafana/regexp" - "github.com/jackc/pgx/v4" + "github.com/jackc/pgx/v5" "github.com/prometheus/client_golang/prometheus" "github.com/timescale/promscale/pkg/dataset" diff --git a/pkg/runner/runner.go b/pkg/runner/runner.go index 1f3fe8017d..ce53f385b5 100644 --- a/pkg/runner/runner.go +++ b/pkg/runner/runner.go @@ -18,7 +18,7 @@ import ( "syscall" "time" - _ "github.com/jackc/pgx/v4/stdlib" + _ "github.com/jackc/pgx/v5/stdlib" "github.com/oklog/run" "github.com/timescale/promscale/pkg/vacuum" "go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp" diff --git a/pkg/telemetry/telemetry.go b/pkg/telemetry/telemetry.go index 972d3da6c1..0f58580760 100644 --- a/pkg/telemetry/telemetry.go +++ b/pkg/telemetry/telemetry.go @@ -12,11 +12,10 @@ import ( "sync" "time" - "github.com/jackc/pgtype" - + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgtype" "github.com/prometheus/client_golang/prometheus" "github.com/timescale/promscale/pkg/log" - "github.com/timescale/promscale/pkg/pgmodel/model/pgutf8str" "github.com/timescale/promscale/pkg/pgxconn" "github.com/timescale/promscale/pkg/promql" "github.com/timescale/promscale/pkg/util" @@ -171,12 +170,8 @@ func (t *engineImpl) syncDynamicMetadata() error { func (t *engineImpl) syncWithMetadataTable(queryFormat string, m Metadata) error { batch := t.conn.NewBatch() for key, metadata := range m { - safe := pgutf8str.Text{} - if err := safe.Set(metadata); err != nil { - return fmt.Errorf("setting in pgutf8 safe string: %w", err) - } query := queryFormat - batch.Queue(query, key, safe, true) + batch.Queue(query, key, metadata, true) } results, err := t.conn.SendBatch(context.Background(), batch) @@ -310,8 +305,10 @@ func isGauge(metric prometheus.Metric) bool { // syncInfoTable stats with promscale_instance_information table. func (t *engineImpl) syncInfoTable(stats map[string]float64) error { + pgUUID := new(pgtype.UUID) - if err := pgUUID.Set(t.uuid); err != nil { + err := pgtype.UUIDCodec{}.PlanScan(nil, 0, pgx.BinaryFormatCode, pgUUID).Scan(t.uuid[:], pgUUID) + if err != nil { return fmt.Errorf("setting pg-uuid: %w", err) } lastUpdated := time.Now() @@ -349,7 +346,7 @@ func (t *engineImpl) syncInfoTable(stats map[string]float64) error { strings.Join(indexes, ", "), strings.Join(updateStatements, ", "), ) - _, err := t.conn.Exec(context.Background(), query, columnValues...) + _, err = t.conn.Exec(context.Background(), query, columnValues...) if err != nil { return fmt.Errorf("executing telemetry sync query: %w", err) } diff --git a/pkg/tests/end_to_end_tests/alerts_test.go b/pkg/tests/end_to_end_tests/alerts_test.go index 9f7902c670..19093fdeea 100644 --- a/pkg/tests/end_to_end_tests/alerts_test.go +++ b/pkg/tests/end_to_end_tests/alerts_test.go @@ -13,7 +13,7 @@ import ( "testing" "time" - "github.com/jackc/pgx/v4/pgxpool" + "github.com/jackc/pgx/v5/pgxpool" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/prometheus/common/route" diff --git a/pkg/tests/end_to_end_tests/concurrent_sql_test.go b/pkg/tests/end_to_end_tests/concurrent_sql_test.go index c0f77704cf..2817085a7c 100644 --- a/pkg/tests/end_to_end_tests/concurrent_sql_test.go +++ b/pkg/tests/end_to_end_tests/concurrent_sql_test.go @@ -11,8 +11,8 @@ import ( "sync" "testing" - "github.com/jackc/pgx/v4/pgxpool" - _ "github.com/jackc/pgx/v4/stdlib" + "github.com/jackc/pgx/v5/pgxpool" + _ "github.com/jackc/pgx/v5/stdlib" "github.com/prometheus/common/model" ingstr "github.com/timescale/promscale/pkg/pgmodel/ingestor" pgmodel "github.com/timescale/promscale/pkg/pgmodel/model" diff --git a/pkg/tests/end_to_end_tests/config_dataset_test.go b/pkg/tests/end_to_end_tests/config_dataset_test.go index 02592ef79c..4290783c94 100644 --- a/pkg/tests/end_to_end_tests/config_dataset_test.go +++ b/pkg/tests/end_to_end_tests/config_dataset_test.go @@ -5,10 +5,11 @@ import ( "testing" "time" - "github.com/jackc/pgx/v4" - "github.com/jackc/pgx/v4/pgxpool" + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgxpool" "github.com/stretchr/testify/require" "github.com/timescale/promscale/pkg/dataset" + "github.com/timescale/promscale/pkg/pgmodel/model" ) func TestDatasetConfigApply(t *testing.T) { @@ -19,6 +20,7 @@ func TestDatasetConfigApply(t *testing.T) { disableCompression := false pgxConn := conn.Conn() + require.NoError(t, model.RegisterCustomPgTypes(context.Background(), pgxConn)) require.Equal(t, 8*time.Hour, getMetricsDefaultChunkInterval(t, pgxConn)) require.Equal(t, true, getMetricsDefaultCompressionSetting(t, pgxConn)) require.Equal(t, 10*time.Second, getMetricsDefaultHALeaseRefresh(t, pgxConn)) diff --git a/pkg/tests/end_to_end_tests/continuous_agg_test.go b/pkg/tests/end_to_end_tests/continuous_agg_test.go index 850140e590..d4bc0e7503 100644 --- a/pkg/tests/end_to_end_tests/continuous_agg_test.go +++ b/pkg/tests/end_to_end_tests/continuous_agg_test.go @@ -8,7 +8,7 @@ import ( "github.com/timescale/promscale/pkg/pgmodel/common/schema" - "github.com/jackc/pgx/v4/pgxpool" + "github.com/jackc/pgx/v5/pgxpool" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" "github.com/stretchr/testify/require" @@ -338,7 +338,7 @@ func TestContinuousAggDataRetention(t *testing.T) { withDB(t, *testDatabase, func(db *pgxpool.Pool, t testing.TB) { dbJob := testhelpers.PgxPoolWithRole(t, *testDatabase, "prom_maintenance") defer dbJob.Close() - dbSuper, err := pgxpool.Connect(context.Background(), testhelpers.PgConnectURL(*testDatabase, testhelpers.Superuser)) + dbSuper, err := testhelpers.PgxPoolWithRegisteredTypes(testhelpers.PgConnectURL(*testDatabase, testhelpers.Superuser)) require.NoError(t, err) defer dbSuper.Close() //a chunk way back in 2009 @@ -423,7 +423,7 @@ func TestContinuousAgg2StepAgg(t *testing.T) { withDB(t, *testDatabase, func(db *pgxpool.Pool, t testing.TB) { dbJob := testhelpers.PgxPoolWithRole(t, *testDatabase, "prom_maintenance") defer dbJob.Close() - dbSuper, err := pgxpool.Connect(context.Background(), testhelpers.PgConnectURL(*testDatabase, testhelpers.Superuser)) + dbSuper, err := pgxpool.New(context.Background(), testhelpers.PgConnectURL(*testDatabase, testhelpers.Superuser)) require.NoError(t, err) defer dbSuper.Close() _, err = dbSuper.Exec(context.Background(), "CREATE EXTENSION IF NOT EXISTS timescaledb_toolkit") diff --git a/pkg/tests/end_to_end_tests/create_test.go b/pkg/tests/end_to_end_tests/create_test.go index 978da08863..45ae0945a1 100644 --- a/pkg/tests/end_to_end_tests/create_test.go +++ b/pkg/tests/end_to_end_tests/create_test.go @@ -11,10 +11,10 @@ import ( "testing" "time" - "github.com/jackc/pgconn" "github.com/jackc/pgerrcode" - "github.com/jackc/pgx/v4/pgxpool" - _ "github.com/jackc/pgx/v4/stdlib" + "github.com/jackc/pgx/v5/pgconn" + "github.com/jackc/pgx/v5/pgxpool" + _ "github.com/jackc/pgx/v5/stdlib" "github.com/stretchr/testify/require" "github.com/timescale/promscale/pkg/internal/testhelpers" "github.com/timescale/promscale/pkg/pgmodel/common/errors" @@ -1309,7 +1309,7 @@ func TestExecuteMaintJob(t *testing.T) { t.Skip("skipping integration test") } withDB(t, *testDatabase, func(dbOwner *pgxpool.Pool, t testing.TB) { - dbSuper, err := pgxpool.Connect(context.Background(), testhelpers.PgConnectURL(*testDatabase, testhelpers.Superuser)) + dbSuper, err := pgxpool.New(context.Background(), testhelpers.PgConnectURL(*testDatabase, testhelpers.Superuser)) if err != nil { t.Fatal(err) } diff --git a/pkg/tests/end_to_end_tests/database_metrics_test.go b/pkg/tests/end_to_end_tests/database_metrics_test.go index 2dd81cb470..2639883913 100644 --- a/pkg/tests/end_to_end_tests/database_metrics_test.go +++ b/pkg/tests/end_to_end_tests/database_metrics_test.go @@ -4,7 +4,7 @@ import ( "context" "testing" - "github.com/jackc/pgx/v4/pgxpool" + "github.com/jackc/pgx/v5/pgxpool" "github.com/stretchr/testify/require" "github.com/timescale/promscale/pkg/internal/testhelpers" diff --git a/pkg/tests/end_to_end_tests/db_connections_test.go b/pkg/tests/end_to_end_tests/db_connections_test.go index bc534a0eed..bbb21337a1 100755 --- a/pkg/tests/end_to_end_tests/db_connections_test.go +++ b/pkg/tests/end_to_end_tests/db_connections_test.go @@ -9,7 +9,7 @@ import ( "strings" "testing" - "github.com/jackc/pgx/v4/pgxpool" + "github.com/jackc/pgx/v5/pgxpool" "github.com/timescale/promscale/pkg/internal/testhelpers" "github.com/timescale/promscale/pkg/pgmodel/model" "github.com/timescale/promscale/pkg/prompb" @@ -134,7 +134,7 @@ func ignoreBlockedConnectionError(err error) error { func blockAllConnections(db *pgxpool.Pool, dbName string, user string) error { // Connect using superuser since our previous user has revoked connect privilege. - dbPool, err := pgxpool.Connect(context.Background(), testhelpers.PgConnectURL(dbName, true)) + dbPool, err := pgxpool.New(context.Background(), testhelpers.PgConnectURL(dbName, true)) if err != nil { return err } @@ -150,7 +150,7 @@ func blockAllConnections(db *pgxpool.Pool, dbName string, user string) error { func allowAllConnections(dbName string, user string) error { // Connect using superuser since our previous user has revoked connect privilege. - dbPool, err := pgxpool.Connect(context.Background(), testhelpers.PgConnectURL(dbName, true)) + dbPool, err := pgxpool.New(context.Background(), testhelpers.PgConnectURL(dbName, true)) if err != nil { return err } diff --git a/pkg/tests/end_to_end_tests/delete_test.go b/pkg/tests/end_to_end_tests/delete_test.go index b675995687..c9b48fa305 100644 --- a/pkg/tests/end_to_end_tests/delete_test.go +++ b/pkg/tests/end_to_end_tests/delete_test.go @@ -13,9 +13,9 @@ import ( "testing" "time" - "github.com/jackc/pgconn" "github.com/jackc/pgerrcode" - "github.com/jackc/pgx/v4/pgxpool" + "github.com/jackc/pgx/v5/pgconn" + "github.com/jackc/pgx/v5/pgxpool" "github.com/pkg/errors" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql/parser" diff --git a/pkg/tests/end_to_end_tests/drop_test.go b/pkg/tests/end_to_end_tests/drop_test.go index c20cbb1295..9c40f82533 100644 --- a/pkg/tests/end_to_end_tests/drop_test.go +++ b/pkg/tests/end_to_end_tests/drop_test.go @@ -10,9 +10,9 @@ import ( "testing" "time" - "github.com/jackc/pgx/v4" - "github.com/jackc/pgx/v4/pgxpool" - _ "github.com/jackc/pgx/v4/stdlib" + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgxpool" + _ "github.com/jackc/pgx/v5/stdlib" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "github.com/timescale/promscale/pkg/internal/testhelpers" @@ -225,7 +225,7 @@ func TestSQLDropChunkWithLocked(t *testing.T) { withDB(t, *testDatabase, func(db *pgxpool.Pool, t testing.TB) { dbJob := testhelpers.PgxPoolWithRole(t, *testDatabase, "prom_maintenance") defer dbJob.Close() - dbSuper, err := pgxpool.Connect(context.Background(), testhelpers.PgConnectURL(*testDatabase, testhelpers.Superuser)) + dbSuper, err := pgxpool.New(context.Background(), testhelpers.PgConnectURL(*testDatabase, testhelpers.Superuser)) require.NoError(t, err) defer dbSuper.Close() //a chunk way back in 2009 diff --git a/pkg/tests/end_to_end_tests/exemplar_query_endpoint_test.go b/pkg/tests/end_to_end_tests/exemplar_query_endpoint_test.go index 2cba40222e..9fa33d9aa2 100644 --- a/pkg/tests/end_to_end_tests/exemplar_query_endpoint_test.go +++ b/pkg/tests/end_to_end_tests/exemplar_query_endpoint_test.go @@ -8,7 +8,7 @@ import ( "testing" "time" - "github.com/jackc/pgx/v4/pgxpool" + "github.com/jackc/pgx/v5/pgxpool" "github.com/sergi/go-diff/diffmatchpatch" "github.com/timescale/promscale/pkg/internal/testhelpers" ) diff --git a/pkg/tests/end_to_end_tests/exemplar_test.go b/pkg/tests/end_to_end_tests/exemplar_test.go index 2cedc067d8..781ad98cec 100644 --- a/pkg/tests/end_to_end_tests/exemplar_test.go +++ b/pkg/tests/end_to_end_tests/exemplar_test.go @@ -11,7 +11,7 @@ import ( "testing" "time" - "github.com/jackc/pgx/v4/pgxpool" + "github.com/jackc/pgx/v5/pgxpool" "github.com/stretchr/testify/require" "github.com/timescale/promscale/pkg/internal/testhelpers" "github.com/timescale/promscale/pkg/pgmodel/cache" diff --git a/pkg/tests/end_to_end_tests/functions_test.go b/pkg/tests/end_to_end_tests/functions_test.go index a205e7628a..57320e543e 100644 --- a/pkg/tests/end_to_end_tests/functions_test.go +++ b/pkg/tests/end_to_end_tests/functions_test.go @@ -12,9 +12,9 @@ import ( "strings" "testing" - "github.com/jackc/pgx/v4/pgxpool" + "github.com/jackc/pgx/v5/pgxpool" - _ "github.com/jackc/pgx/v4/stdlib" + _ "github.com/jackc/pgx/v5/stdlib" "github.com/prometheus/common/model" "github.com/timescale/promscale/pkg/internal/testhelpers" "github.com/timescale/promscale/pkg/prompb" diff --git a/pkg/tests/end_to_end_tests/golden_files_test.go b/pkg/tests/end_to_end_tests/golden_files_test.go index d9f9860899..94e3952e31 100644 --- a/pkg/tests/end_to_end_tests/golden_files_test.go +++ b/pkg/tests/end_to_end_tests/golden_files_test.go @@ -11,10 +11,10 @@ import ( "strings" "testing" - "github.com/jackc/pgx/v4/pgxpool" + "github.com/jackc/pgx/v5/pgxpool" "github.com/sergi/go-diff/diffmatchpatch" - _ "github.com/jackc/pgx/v4/stdlib" + _ "github.com/jackc/pgx/v5/stdlib" ) var outputDifferWithoutTimescale = map[string]bool{"info_view": true} diff --git a/pkg/tests/end_to_end_tests/ha_check_insert_sql_test.go b/pkg/tests/end_to_end_tests/ha_check_insert_sql_test.go index 8cba1e51bf..22389a45df 100644 --- a/pkg/tests/end_to_end_tests/ha_check_insert_sql_test.go +++ b/pkg/tests/end_to_end_tests/ha_check_insert_sql_test.go @@ -13,7 +13,7 @@ import ( "testing" "time" - "github.com/jackc/pgx/v4/pgxpool" + "github.com/jackc/pgx/v5/pgxpool" "github.com/timescale/promscale/pkg/internal/testhelpers" ) diff --git a/pkg/tests/end_to_end_tests/ha_multiple_promscales_test.go b/pkg/tests/end_to_end_tests/ha_multiple_promscales_test.go index b5e77f500e..8244c44989 100644 --- a/pkg/tests/end_to_end_tests/ha_multiple_promscales_test.go +++ b/pkg/tests/end_to_end_tests/ha_multiple_promscales_test.go @@ -9,7 +9,7 @@ import ( "testing" "time" - "github.com/jackc/pgx/v4/pgxpool" + "github.com/jackc/pgx/v5/pgxpool" ) const ( diff --git a/pkg/tests/end_to_end_tests/ha_single_promscale_test.go b/pkg/tests/end_to_end_tests/ha_single_promscale_test.go index f1dc6307db..a6125daacb 100644 --- a/pkg/tests/end_to_end_tests/ha_single_promscale_test.go +++ b/pkg/tests/end_to_end_tests/ha_single_promscale_test.go @@ -22,7 +22,7 @@ import ( "github.com/timescale/promscale/pkg/internal/testhelpers" "github.com/timescale/promscale/pkg/util" - "github.com/jackc/pgx/v4/pgxpool" + "github.com/jackc/pgx/v5/pgxpool" promModel "github.com/prometheus/common/model" "github.com/timescale/promscale/pkg/clockcache" "github.com/timescale/promscale/pkg/ha" diff --git a/pkg/tests/end_to_end_tests/ha_try_change_leader_sql_test.go b/pkg/tests/end_to_end_tests/ha_try_change_leader_sql_test.go index 6634f7af5a..edf1c6601e 100644 --- a/pkg/tests/end_to_end_tests/ha_try_change_leader_sql_test.go +++ b/pkg/tests/end_to_end_tests/ha_try_change_leader_sql_test.go @@ -10,7 +10,7 @@ import ( "testing" "time" - "github.com/jackc/pgx/v4/pgxpool" + "github.com/jackc/pgx/v5/pgxpool" "github.com/timescale/promscale/pkg/internal/testhelpers" ) diff --git a/pkg/tests/end_to_end_tests/ingest_trace_test.go b/pkg/tests/end_to_end_tests/ingest_trace_test.go index ac00005020..6eeed411ae 100644 --- a/pkg/tests/end_to_end_tests/ingest_trace_test.go +++ b/pkg/tests/end_to_end_tests/ingest_trace_test.go @@ -9,7 +9,7 @@ import ( "testing" "time" - "github.com/jackc/pgx/v4/pgxpool" + "github.com/jackc/pgx/v5/pgxpool" "github.com/jaegertracing/jaeger/model" jaeger_integration_tests "github.com/jaegertracing/jaeger/plugin/storage/integration" "github.com/jaegertracing/jaeger/storage/spanstore" diff --git a/pkg/tests/end_to_end_tests/insert_compressed_chunks_test.go b/pkg/tests/end_to_end_tests/insert_compressed_chunks_test.go index 85a2ad080a..239cacc70c 100644 --- a/pkg/tests/end_to_end_tests/insert_compressed_chunks_test.go +++ b/pkg/tests/end_to_end_tests/insert_compressed_chunks_test.go @@ -8,7 +8,7 @@ import ( "context" "testing" - "github.com/jackc/pgx/v4/pgxpool" + "github.com/jackc/pgx/v5/pgxpool" "github.com/stretchr/testify/require" "github.com/timescale/promscale/pkg/pgmodel/cache" ingstr "github.com/timescale/promscale/pkg/pgmodel/ingestor" diff --git a/pkg/tests/end_to_end_tests/jaeger_store_integration_test.go b/pkg/tests/end_to_end_tests/jaeger_store_integration_test.go index e91ddc7c6f..e04cd284d0 100644 --- a/pkg/tests/end_to_end_tests/jaeger_store_integration_test.go +++ b/pkg/tests/end_to_end_tests/jaeger_store_integration_test.go @@ -9,7 +9,7 @@ import ( "testing" "time" - "github.com/jackc/pgx/v4/pgxpool" + "github.com/jackc/pgx/v5/pgxpool" "github.com/jaegertracing/jaeger/model" "github.com/jaegertracing/jaeger/storage/spanstore" "github.com/stretchr/testify/require" diff --git a/pkg/tests/end_to_end_tests/jaeger_store_test.go b/pkg/tests/end_to_end_tests/jaeger_store_test.go index 1eee07c185..cc89d3f6ff 100644 --- a/pkg/tests/end_to_end_tests/jaeger_store_test.go +++ b/pkg/tests/end_to_end_tests/jaeger_store_test.go @@ -4,7 +4,7 @@ import ( "context" "testing" - "github.com/jackc/pgx/v4/pgxpool" + "github.com/jackc/pgx/v5/pgxpool" "github.com/stretchr/testify/require" "github.com/timescale/promscale/pkg/jaeger/store" jaegerstore "github.com/timescale/promscale/pkg/jaeger/store" diff --git a/pkg/tests/end_to_end_tests/main_test.go b/pkg/tests/end_to_end_tests/main_test.go index e38a08b384..74d9c7e196 100644 --- a/pkg/tests/end_to_end_tests/main_test.go +++ b/pkg/tests/end_to_end_tests/main_test.go @@ -18,8 +18,8 @@ import ( constants "github.com/timescale/promscale/pkg/tests" "github.com/docker/go-connections/nat" - "github.com/jackc/pgx/v4" - "github.com/jackc/pgx/v4/pgxpool" + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgxpool" "github.com/testcontainers/testcontainers-go" "github.com/timescale/promscale/pkg/internal/testhelpers" "github.com/timescale/promscale/pkg/log" @@ -30,8 +30,6 @@ import ( "github.com/timescale/promscale/pkg/prompb" tput "github.com/timescale/promscale/pkg/util/throughput" "github.com/timescale/promscale/pkg/version" - - _ "github.com/jackc/pgx/v4/stdlib" ) var ( @@ -204,7 +202,7 @@ func withDBAttachNode(t testing.TB, DBName string, attachExisting bool, beforeAd t.Fatal("Shouldn't be using beforeAddNode unless testing multinode") } func() { - pool, err := pgxpool.Connect(context.Background(), connectURL) + pool, err := testhelpers.PgxPoolWithRegisteredTypes(connectURL) if err != nil { t.Fatal(err) } @@ -220,8 +218,10 @@ func withDBAttachNode(t testing.TB, DBName string, attachExisting bool, beforeAd attachDataNode2(t, DBName, connectURL) } - // need to get a new pool after the Migrate to catch any GUC changes made during Migrate - pool, err := pgxpool.Connect(context.Background(), connectURL) + // need to get a new pool after the Migrate to catch any GUC changes made + // during Migrate and to set the afterConnect that registers the custom + // PG types. + pool, err := testhelpers.PgxPoolWithRegisteredTypes(connectURL) if err != nil { t.Fatal(err) } @@ -245,7 +245,7 @@ func performMigrate(t testing.TB, connectURL string) { t.Fatal(err) } - migratePool, err := pgxpool.Connect(context.Background(), connectURL) + migratePool, err := pgxpool.New(context.Background(), connectURL) if err != nil { t.Fatal(err) } diff --git a/pkg/tests/end_to_end_tests/metadata_test.go b/pkg/tests/end_to_end_tests/metadata_test.go index 95342159ff..d852c465dc 100644 --- a/pkg/tests/end_to_end_tests/metadata_test.go +++ b/pkg/tests/end_to_end_tests/metadata_test.go @@ -8,7 +8,7 @@ import ( "context" "testing" - "github.com/jackc/pgx/v4/pgxpool" + "github.com/jackc/pgx/v5/pgxpool" "github.com/stretchr/testify/require" "github.com/timescale/promscale/pkg/internal/testhelpers" ingstr "github.com/timescale/promscale/pkg/pgmodel/ingestor" diff --git a/pkg/tests/end_to_end_tests/metric_ingest_bench_test.go b/pkg/tests/end_to_end_tests/metric_ingest_bench_test.go index a586caa71e..d4329ca399 100644 --- a/pkg/tests/end_to_end_tests/metric_ingest_bench_test.go +++ b/pkg/tests/end_to_end_tests/metric_ingest_bench_test.go @@ -11,7 +11,7 @@ import ( "github.com/walle/targz" - "github.com/jackc/pgx/v4/pgxpool" + "github.com/jackc/pgx/v5/pgxpool" "github.com/stretchr/testify/require" "github.com/timescale/promscale/pkg/pgmodel/cache" "github.com/timescale/promscale/pkg/pgmodel/ingestor" diff --git a/pkg/tests/end_to_end_tests/metrics_duplicate_insert_test.go b/pkg/tests/end_to_end_tests/metrics_duplicate_insert_test.go index d5f85c3f8f..5e430c56d8 100644 --- a/pkg/tests/end_to_end_tests/metrics_duplicate_insert_test.go +++ b/pkg/tests/end_to_end_tests/metrics_duplicate_insert_test.go @@ -4,7 +4,7 @@ import ( "context" "testing" - "github.com/jackc/pgx/v4/pgxpool" + "github.com/jackc/pgx/v5/pgxpool" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/testutil" "github.com/stretchr/testify/require" diff --git a/pkg/tests/end_to_end_tests/migrate_test.go b/pkg/tests/end_to_end_tests/migrate_test.go index bdbf752a3f..d25791135a 100644 --- a/pkg/tests/end_to_end_tests/migrate_test.go +++ b/pkg/tests/end_to_end_tests/migrate_test.go @@ -10,7 +10,7 @@ import ( "testing" "github.com/blang/semver/v4" - "github.com/jackc/pgx/v4/pgxpool" + "github.com/jackc/pgx/v5/pgxpool" "github.com/prometheus/client_golang/prometheus" "github.com/timescale/promscale/pkg/internal/testhelpers" diff --git a/pkg/tests/end_to_end_tests/multi_tenancy_test.go b/pkg/tests/end_to_end_tests/multi_tenancy_test.go index 956bcb497e..8cb5dc2ee4 100644 --- a/pkg/tests/end_to_end_tests/multi_tenancy_test.go +++ b/pkg/tests/end_to_end_tests/multi_tenancy_test.go @@ -11,7 +11,7 @@ import ( "sort" "testing" - "github.com/jackc/pgx/v4/pgxpool" + "github.com/jackc/pgx/v5/pgxpool" "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" "github.com/timescale/promscale/pkg/clockcache" diff --git a/pkg/tests/end_to_end_tests/nan_test.go b/pkg/tests/end_to_end_tests/nan_test.go index 578923bf4e..ae1ba80c32 100644 --- a/pkg/tests/end_to_end_tests/nan_test.go +++ b/pkg/tests/end_to_end_tests/nan_test.go @@ -11,8 +11,8 @@ import ( "testing" "time" - "github.com/jackc/pgx/v4/pgxpool" - _ "github.com/jackc/pgx/v4/stdlib" + "github.com/jackc/pgx/v5/pgxpool" + _ "github.com/jackc/pgx/v5/stdlib" "github.com/prometheus/prometheus/model/value" "github.com/timescale/promscale/pkg/clockcache" "github.com/timescale/promscale/pkg/internal/testhelpers" diff --git a/pkg/tests/end_to_end_tests/new_migrate_test.go b/pkg/tests/end_to_end_tests/new_migrate_test.go index 27cc162644..dc2c958e52 100644 --- a/pkg/tests/end_to_end_tests/new_migrate_test.go +++ b/pkg/tests/end_to_end_tests/new_migrate_test.go @@ -6,7 +6,7 @@ package end_to_end_tests import ( "context" "github.com/blang/semver/v4" - "github.com/jackc/pgx/v4/pgxpool" + "github.com/jackc/pgx/v5/pgxpool" "github.com/stretchr/testify/require" "github.com/timescale/promscale/pkg/internal/testhelpers" "github.com/timescale/promscale/pkg/migrations" diff --git a/pkg/tests/end_to_end_tests/no_timescaledb_test.go b/pkg/tests/end_to_end_tests/no_timescaledb_test.go index 805cd5f152..dd95bc69db 100644 --- a/pkg/tests/end_to_end_tests/no_timescaledb_test.go +++ b/pkg/tests/end_to_end_tests/no_timescaledb_test.go @@ -5,7 +5,7 @@ package end_to_end_tests import ( "context" - "github.com/jackc/pgx/v4/pgxpool" + "github.com/jackc/pgx/v5/pgxpool" "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" "github.com/timescale/promscale/pkg/internal/testhelpers" diff --git a/pkg/tests/end_to_end_tests/null_chars_test.go b/pkg/tests/end_to_end_tests/null_chars_test.go index 073a7f9a7f..c9908f186b 100644 --- a/pkg/tests/end_to_end_tests/null_chars_test.go +++ b/pkg/tests/end_to_end_tests/null_chars_test.go @@ -10,7 +10,7 @@ import ( "reflect" "testing" - "github.com/jackc/pgx/v4/pgxpool" + "github.com/jackc/pgx/v5/pgxpool" "github.com/stretchr/testify/require" "github.com/timescale/promscale/pkg/clockcache" "github.com/timescale/promscale/pkg/internal/testhelpers" diff --git a/pkg/tests/end_to_end_tests/promql_endpoint_integration_test.go b/pkg/tests/end_to_end_tests/promql_endpoint_integration_test.go index d3434540ee..13819836eb 100644 --- a/pkg/tests/end_to_end_tests/promql_endpoint_integration_test.go +++ b/pkg/tests/end_to_end_tests/promql_endpoint_integration_test.go @@ -18,7 +18,7 @@ import ( "github.com/gorilla/mux" "github.com/grafana/regexp" - "github.com/jackc/pgx/v4/pgxpool" + "github.com/jackc/pgx/v5/pgxpool" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" diff --git a/pkg/tests/end_to_end_tests/promql_label_endpoint_test.go b/pkg/tests/end_to_end_tests/promql_label_endpoint_test.go index 2da180f18d..af67bd5691 100644 --- a/pkg/tests/end_to_end_tests/promql_label_endpoint_test.go +++ b/pkg/tests/end_to_end_tests/promql_label_endpoint_test.go @@ -14,7 +14,7 @@ import ( "testing" "time" - "github.com/jackc/pgx/v4/pgxpool" + "github.com/jackc/pgx/v5/pgxpool" "github.com/timescale/promscale/pkg/clockcache" "github.com/timescale/promscale/pkg/internal/testhelpers" "github.com/timescale/promscale/pkg/pgmodel/lreader" diff --git a/pkg/tests/end_to_end_tests/promql_query_endpoint_test.go b/pkg/tests/end_to_end_tests/promql_query_endpoint_test.go index f7ce887c08..e4c97d37e1 100644 --- a/pkg/tests/end_to_end_tests/promql_query_endpoint_test.go +++ b/pkg/tests/end_to_end_tests/promql_query_endpoint_test.go @@ -15,7 +15,7 @@ import ( "testing" "time" - "github.com/jackc/pgx/v4/pgxpool" + "github.com/jackc/pgx/v5/pgxpool" "github.com/sergi/go-diff/diffmatchpatch" "github.com/timescale/promscale/pkg/internal/testhelpers" ) diff --git a/pkg/tests/end_to_end_tests/promql_series_endpoint_test.go b/pkg/tests/end_to_end_tests/promql_series_endpoint_test.go index 3e97eabc18..7ed9aae87c 100644 --- a/pkg/tests/end_to_end_tests/promql_series_endpoint_test.go +++ b/pkg/tests/end_to_end_tests/promql_series_endpoint_test.go @@ -14,7 +14,7 @@ import ( "testing" "time" - "github.com/jackc/pgx/v4/pgxpool" + "github.com/jackc/pgx/v5/pgxpool" "github.com/prometheus/prometheus/model/labels" "github.com/timescale/promscale/pkg/internal/testhelpers" ) diff --git a/pkg/tests/end_to_end_tests/promql_write_endpoint_test.go b/pkg/tests/end_to_end_tests/promql_write_endpoint_test.go index 71db836f81..f2534b9075 100644 --- a/pkg/tests/end_to_end_tests/promql_write_endpoint_test.go +++ b/pkg/tests/end_to_end_tests/promql_write_endpoint_test.go @@ -18,7 +18,7 @@ import ( "github.com/gogo/protobuf/proto" "github.com/golang/snappy" - "github.com/jackc/pgx/v4/pgxpool" + "github.com/jackc/pgx/v5/pgxpool" "github.com/prometheus/common/model" pgmodel "github.com/timescale/promscale/pkg/pgmodel/model" "github.com/timescale/promscale/pkg/prompb" diff --git a/pkg/tests/end_to_end_tests/query_integration_test.go b/pkg/tests/end_to_end_tests/query_integration_test.go index 2b25312305..37223118d4 100644 --- a/pkg/tests/end_to_end_tests/query_integration_test.go +++ b/pkg/tests/end_to_end_tests/query_integration_test.go @@ -18,8 +18,8 @@ import ( "github.com/gogo/protobuf/proto" "github.com/golang/snappy" - pgx "github.com/jackc/pgx/v4" - "github.com/jackc/pgx/v4/pgxpool" + pgx "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgxpool" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/timestamp" diff --git a/pkg/tests/end_to_end_tests/router_test.go b/pkg/tests/end_to_end_tests/router_test.go index 42210e83fe..8d136d54a8 100644 --- a/pkg/tests/end_to_end_tests/router_test.go +++ b/pkg/tests/end_to_end_tests/router_test.go @@ -12,7 +12,7 @@ import ( "time" "github.com/grafana/regexp" - "github.com/jackc/pgx/v4/pgxpool" + "github.com/jackc/pgx/v5/pgxpool" "github.com/timescale/promscale/pkg/api" "github.com/timescale/promscale/pkg/auth" diff --git a/pkg/tests/end_to_end_tests/rules_test.go b/pkg/tests/end_to_end_tests/rules_test.go index 56b2be39df..7395d43019 100644 --- a/pkg/tests/end_to_end_tests/rules_test.go +++ b/pkg/tests/end_to_end_tests/rules_test.go @@ -10,7 +10,7 @@ import ( "time" "github.com/go-kit/log" - "github.com/jackc/pgx/v4/pgxpool" + "github.com/jackc/pgx/v5/pgxpool" "github.com/prometheus/client_golang/prometheus" prom_rules "github.com/prometheus/prometheus/rules" "github.com/stretchr/testify/require" diff --git a/pkg/tests/end_to_end_tests/sql_bench_test.go b/pkg/tests/end_to_end_tests/sql_bench_test.go index afe47d2e83..1e710097c4 100644 --- a/pkg/tests/end_to_end_tests/sql_bench_test.go +++ b/pkg/tests/end_to_end_tests/sql_bench_test.go @@ -11,9 +11,9 @@ import ( "testing" "time" - "github.com/jackc/pgtype" - "github.com/jackc/pgx/v4" - "github.com/jackc/pgx/v4/pgxpool" + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgxpool" + "github.com/timescale/promscale/pkg/pgmodel/model" ) const ( @@ -458,11 +458,7 @@ func getSeriesIDForKeyValueArrayBatchUsingLabelArrays(db *pgxpool.Pool, metricID count := 0 if true { - labelArrayArray := pgtype.NewArrayType("prom_api.label_array[]", labelArrayOID, func() pgtype.ValueTranscoder { return &pgtype.Int4Array{} }) - err = labelArrayArray.Set(labelArraySet) - if err != nil { - panic(err) - } + labelArrayArray := model.SliceToArrayOfLabelArray(labelArraySet) start = time.Now() res, err := db.Query( context.Background(), diff --git a/pkg/tests/end_to_end_tests/sync_commit_test.go b/pkg/tests/end_to_end_tests/sync_commit_test.go index 549eba9e3e..204217ccb1 100644 --- a/pkg/tests/end_to_end_tests/sync_commit_test.go +++ b/pkg/tests/end_to_end_tests/sync_commit_test.go @@ -2,10 +2,11 @@ package end_to_end_tests import ( "context" + "sync/atomic" "testing" - "github.com/jackc/pgx/v4" - "github.com/jackc/pgx/v4/pgxpool" + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgxpool" "github.com/stretchr/testify/require" "github.com/timescale/promscale/pkg/pgclient" ) @@ -15,20 +16,21 @@ import ( func TestWriterSynchronousCommit(t *testing.T) { // creates a new pool of database connections as would be created for the writer path - createWriterPool := func(connStr string, lockerCalled *bool, synchronousCommit bool) *pgxpool.Pool { + createWriterPool := func(connStr string, lockerCalled *atomic.Bool, synchronousCommit bool) *pgxpool.Pool { pgConfig, err := pgxpool.ParseConfig(connStr) if err != nil { t.Fatal(err) } pgConfig.MaxConns = 2 pgConfig.MinConns = 1 - *lockerCalled = false + + lockerCalled.Store(false) schemaLocker := func(ctx context.Context, conn *pgx.Conn) error { - *lockerCalled = true + lockerCalled.Store(true) return nil } - pgclient.SetWriterPoolAfterConnect(pgConfig, schemaLocker, synchronousCommit) - writerPool, err := pgxpool.ConnectConfig(context.Background(), pgConfig) + pgConfig.AfterConnect = pgclient.WriterPoolAfterConnect(schemaLocker, synchronousCommit) + writerPool, err := pgxpool.NewWithConfig(context.Background(), pgConfig) if err != nil { t.Fatal(err) } @@ -46,13 +48,13 @@ func TestWriterSynchronousCommit(t *testing.T) { } withDB(t, "writer_sync_commit", func(db *pgxpool.Pool, t testing.TB) { - var lockerCalled bool // used to ensure that the schema locker function is still called + lockerCalled := &atomic.Bool{} // used to ensure that the schema locker function is still called // create a writer pool with synchronous_commit turned off - writerPool1 := createWriterPool(db.Config().ConnString(), &lockerCalled, false) + writerPool1 := createWriterPool(db.Config().ConnString(), lockerCalled, false) setting := getSynchronousCommit(writerPool1) require.Equal(t, "off", setting, "expected synchronous_commit to be off but it was %s", setting) - require.True(t, lockerCalled, "schemaLocker function should have been called and wasn't") + require.True(t, lockerCalled.Load(), "schemaLocker function should have been called and wasn't") // ensure that setting synchronous_commit to off on the writer pool did not impact the setting // in other database sessions @@ -65,13 +67,13 @@ func TestWriterSynchronousCommit(t *testing.T) { writerPool1.Close() // now create a writer pool with synchronous_commit turned on - lockerCalled = false - writerPool2 := createWriterPool(db.Config().ConnString(), &lockerCalled, true) + lockerCalled.Store(false) + writerPool2 := createWriterPool(db.Config().ConnString(), lockerCalled, true) // make sure the setting is on and the schema locker function was called setting = getSynchronousCommit(writerPool2) require.Equal(t, "on", setting, "expected synchronous_commit to be on but it was %s", setting) - require.True(t, lockerCalled, "schemaLocker function should have been called and wasn't") + require.True(t, lockerCalled.Load(), "schemaLocker function should have been called and wasn't") writerPool2.Close() }) } diff --git a/pkg/tests/end_to_end_tests/tag_op_test.go b/pkg/tests/end_to_end_tests/tag_op_test.go index f0ba3b793a..60350ff611 100644 --- a/pkg/tests/end_to_end_tests/tag_op_test.go +++ b/pkg/tests/end_to_end_tests/tag_op_test.go @@ -11,7 +11,7 @@ import ( "github.com/stretchr/testify/require" - "github.com/jackc/pgx/v4/pgxpool" + "github.com/jackc/pgx/v5/pgxpool" ) type operator struct { diff --git a/pkg/tests/end_to_end_tests/telemetry_test.go b/pkg/tests/end_to_end_tests/telemetry_test.go index f4c531b01a..2a121a4228 100644 --- a/pkg/tests/end_to_end_tests/telemetry_test.go +++ b/pkg/tests/end_to_end_tests/telemetry_test.go @@ -13,7 +13,7 @@ import ( "time" "github.com/google/uuid" - "github.com/jackc/pgx/v4/pgxpool" + "github.com/jackc/pgx/v5/pgxpool" "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" diff --git a/pkg/tests/end_to_end_tests/trace_ingest_bench_test.go b/pkg/tests/end_to_end_tests/trace_ingest_bench_test.go index 06a6351202..7720036276 100644 --- a/pkg/tests/end_to_end_tests/trace_ingest_bench_test.go +++ b/pkg/tests/end_to_end_tests/trace_ingest_bench_test.go @@ -6,7 +6,7 @@ import ( "sync" "testing" - "github.com/jackc/pgx/v4/pgxpool" + "github.com/jackc/pgx/v5/pgxpool" "github.com/stretchr/testify/require" "github.com/timescale/promscale/pkg/pgmodel/cache" "github.com/timescale/promscale/pkg/pgmodel/ingestor" diff --git a/pkg/tests/end_to_end_tests/trace_operation_calls_test.go b/pkg/tests/end_to_end_tests/trace_operation_calls_test.go index 5f83c83e6e..0c45fb7346 100644 --- a/pkg/tests/end_to_end_tests/trace_operation_calls_test.go +++ b/pkg/tests/end_to_end_tests/trace_operation_calls_test.go @@ -10,7 +10,7 @@ import ( "testing" "time" - "github.com/jackc/pgx/v4/pgxpool" + "github.com/jackc/pgx/v5/pgxpool" "github.com/stretchr/testify/require" ) diff --git a/pkg/tests/end_to_end_tests/trace_put_test.go b/pkg/tests/end_to_end_tests/trace_put_test.go index 3a2a0d428e..2684d05dc2 100644 --- a/pkg/tests/end_to_end_tests/trace_put_test.go +++ b/pkg/tests/end_to_end_tests/trace_put_test.go @@ -9,7 +9,7 @@ import ( "fmt" "testing" - "github.com/jackc/pgx/v4/pgxpool" + "github.com/jackc/pgx/v5/pgxpool" "github.com/stretchr/testify/require" ) diff --git a/pkg/tests/end_to_end_tests/trace_query_integration_test.go b/pkg/tests/end_to_end_tests/trace_query_integration_test.go index 6bfcd46a64..666b9d4010 100644 --- a/pkg/tests/end_to_end_tests/trace_query_integration_test.go +++ b/pkg/tests/end_to_end_tests/trace_query_integration_test.go @@ -15,7 +15,7 @@ import ( "sort" "testing" - "github.com/jackc/pgx/v4/pgxpool" + "github.com/jackc/pgx/v5/pgxpool" jaegerJSONModel "github.com/jaegertracing/jaeger/model/json" "github.com/prometheus/prometheus/model/timestamp" "github.com/stretchr/testify/require" diff --git a/pkg/tests/end_to_end_tests/trace_retention_test.go b/pkg/tests/end_to_end_tests/trace_retention_test.go index ef7bea8342..184ef95877 100644 --- a/pkg/tests/end_to_end_tests/trace_retention_test.go +++ b/pkg/tests/end_to_end_tests/trace_retention_test.go @@ -10,8 +10,8 @@ import ( "testing" "time" - "github.com/jackc/pgx/v4/pgxpool" - _ "github.com/jackc/pgx/v4/stdlib" + "github.com/jackc/pgx/v5/pgxpool" + _ "github.com/jackc/pgx/v5/stdlib" "github.com/stretchr/testify/require" "github.com/timescale/promscale/pkg/internal/testhelpers" ) diff --git a/pkg/tests/end_to_end_tests/trace_tree_test.go b/pkg/tests/end_to_end_tests/trace_tree_test.go index 1cd1cbb7f2..7a84c41e9e 100644 --- a/pkg/tests/end_to_end_tests/trace_tree_test.go +++ b/pkg/tests/end_to_end_tests/trace_tree_test.go @@ -9,7 +9,7 @@ import ( "fmt" "testing" - "github.com/jackc/pgx/v4/pgxpool" + "github.com/jackc/pgx/v5/pgxpool" "github.com/stretchr/testify/require" ) diff --git a/pkg/tests/end_to_end_tests/vacuum_test.go b/pkg/tests/end_to_end_tests/vacuum_test.go index 7a413a946c..d3e1db3e84 100644 --- a/pkg/tests/end_to_end_tests/vacuum_test.go +++ b/pkg/tests/end_to_end_tests/vacuum_test.go @@ -6,7 +6,7 @@ import ( "testing" "time" - "github.com/jackc/pgx/v4/pgxpool" + "github.com/jackc/pgx/v5/pgxpool" "github.com/stretchr/testify/require" "github.com/timescale/promscale/pkg/pgxconn" "github.com/timescale/promscale/pkg/vacuum" diff --git a/pkg/tests/end_to_end_tests/view_test.go b/pkg/tests/end_to_end_tests/view_test.go index 6c273fb7f7..3dfca7c464 100644 --- a/pkg/tests/end_to_end_tests/view_test.go +++ b/pkg/tests/end_to_end_tests/view_test.go @@ -10,8 +10,8 @@ import ( "strings" "testing" - "github.com/jackc/pgx/v4/pgxpool" - _ "github.com/jackc/pgx/v4/stdlib" + "github.com/jackc/pgx/v5/pgxpool" + _ "github.com/jackc/pgx/v5/stdlib" ingstr "github.com/timescale/promscale/pkg/pgmodel/ingestor" "github.com/timescale/promscale/pkg/pgmodel/model" "github.com/timescale/promscale/pkg/pgxconn" diff --git a/pkg/tests/end_to_end_tests/zlast_test.go b/pkg/tests/end_to_end_tests/zlast_test.go index 987d29510e..79f8172c0f 100644 --- a/pkg/tests/end_to_end_tests/zlast_test.go +++ b/pkg/tests/end_to_end_tests/zlast_test.go @@ -14,7 +14,7 @@ import ( "reflect" "testing" - "github.com/jackc/pgx/v4/pgxpool" + "github.com/jackc/pgx/v5/pgxpool" ingstr "github.com/timescale/promscale/pkg/pgmodel/ingestor" "github.com/timescale/promscale/pkg/pgxconn" "github.com/timescale/promscale/pkg/tests/upgrade_tests" diff --git a/pkg/tests/testsupport/mock_pgx_conn.go b/pkg/tests/testsupport/mock_pgx_conn.go index a30e7d2e0c..42123ee58f 100644 --- a/pkg/tests/testsupport/mock_pgx_conn.go +++ b/pkg/tests/testsupport/mock_pgx_conn.go @@ -3,10 +3,9 @@ package testsupport import ( "context" - "github.com/jackc/pgconn" - "github.com/jackc/pgx/v4" - "github.com/jackc/pgx/v4/pgxpool" - + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgconn" + "github.com/jackc/pgx/v5/pgxpool" "github.com/timescale/promscale/pkg/pgxconn" ) @@ -17,23 +16,23 @@ func (MockRow) Scan(dest ...interface{}) error { return nil } type MockBatchResults struct{} func (MockBatchResults) Exec() (pgconn.CommandTag, error) { - return nil, nil + return pgconn.CommandTag{}, nil } func (MockBatchResults) Query() (pgx.Rows, error) { return nil, nil } + func (MockBatchResults) QueryRow() pgx.Row { return MockRow{} } -func (MockBatchResults) QueryFunc(scans []interface{}, f func(pgx.QueryFuncRow) error) (pgconn.CommandTag, error) { - return nil, nil -} + func (MockBatchResults) Close() error { return nil } type MockBatch struct{} -func (MockBatch) Queue(query string, arguments ...interface{}) {} +func (MockBatch) Queue(query string, arguments ...any) *pgx.QueuedQuery { return nil } + func (MockBatch) Len() int { return 0 } diff --git a/pkg/tests/upgrade_tests/shapshot.go b/pkg/tests/upgrade_tests/shapshot.go index 94f93f695b..69d0838631 100644 --- a/pkg/tests/upgrade_tests/shapshot.go +++ b/pkg/tests/upgrade_tests/shapshot.go @@ -14,8 +14,8 @@ import ( "regexp" "testing" - "github.com/jackc/pgx/v4" - "github.com/jackc/pgx/v4/pgxpool" + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgxpool" "github.com/sergi/go-diff/diffmatchpatch" "github.com/testcontainers/testcontainers-go" "github.com/timescale/promscale/pkg/internal/testhelpers" diff --git a/pkg/tests/upgrade_tests/upgrade_test.go b/pkg/tests/upgrade_tests/upgrade_test.go index 0181fbd28b..e43c6e6b67 100644 --- a/pkg/tests/upgrade_tests/upgrade_test.go +++ b/pkg/tests/upgrade_tests/upgrade_test.go @@ -24,9 +24,9 @@ import ( "github.com/docker/go-connections/nat" "github.com/gogo/protobuf/proto" "github.com/golang/snappy" - "github.com/jackc/pgx/v4" - "github.com/jackc/pgx/v4/pgxpool" - _ "github.com/jackc/pgx/v4/stdlib" + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgxpool" + _ "github.com/jackc/pgx/v5/stdlib" "github.com/testcontainers/testcontainers-go" "github.com/timescale/promscale/pkg/internal/testhelpers" "github.com/timescale/promscale/pkg/log" @@ -186,7 +186,7 @@ func TestUpgradeFromEarliestNoData(t *testing.T) { } func turnOffCompressionOnMetric(t *testing.T) { - db, err := pgxpool.Connect(context.Background(), testhelpers.PgConnectURL(*testDatabase, testhelpers.NoSuperuser)) + db, err := pgxpool.New(context.Background(), testhelpers.PgConnectURL(*testDatabase, testhelpers.NoSuperuser)) if err != nil { t.Fatal(err) } @@ -221,8 +221,7 @@ func getUpgradedDbInfo(t *testing.T, noData bool, prevVersionStr string, extensi if !noData { func() { connectURL := testhelpers.PgConnectURL(*testDatabase, testhelpers.NoSuperuser) - - db, err := pgxpool.Connect(context.Background(), connectURL) + db, err := testhelpers.PgxPoolWithRegisteredTypes(connectURL) if err != nil { t.Fatal(err) } @@ -240,7 +239,7 @@ func getUpgradedDbInfo(t *testing.T, noData bool, prevVersionStr string, extensi } connectURL := testhelpers.PgConnectURL(*testDatabase, testhelpers.Superuser) - db, err := pgxpool.Connect(context.Background(), connectURL) + db, err := testhelpers.PgxPoolWithRegisteredTypes(connectURL) if err != nil { t.Fatal(err) } @@ -270,7 +269,7 @@ func getPristineDbInfo(t *testing.T, noData bool, extensionState testhelpers.Tes if !noData { func() { connectURL := testhelpers.PgConnectURL(*testDatabase, testhelpers.NoSuperuser) - db, err := pgxpool.Connect(context.Background(), connectURL) + db, err := testhelpers.PgxPoolWithRegisteredTypes(connectURL) if err != nil { t.Fatal(err) } @@ -512,7 +511,7 @@ func withNewDBAtCurrentVersion(t testing.TB, DBName string, extensionState testh testhelpers.MakePromUserPromAdmin(t, DBName) // need to get a new pool after the Migrate to catch any GUC changes made during Migrate - db, err := pgxpool.Connect(context.Background(), connectURL) + db, err := testhelpers.PgxPoolWithRegisteredTypes(connectURL) if err != nil { t.Fatal(err) } @@ -531,7 +530,7 @@ func withNewDBAtCurrentVersion(t testing.TB, DBName string, extensionState testh } defer func() { _ = closer.Close() }() connectURL := testhelpers.PgConnectURL(*testDatabase, testhelpers.Superuser) - db, err := pgxpool.Connect(context.Background(), connectURL) + db, err := pgxpool.New(context.Background(), connectURL) if err != nil { t.Fatal(err) } diff --git a/pkg/util/lock.go b/pkg/util/lock.go index cbb3364a92..26ae10c474 100644 --- a/pkg/util/lock.go +++ b/pkg/util/lock.go @@ -10,7 +10,7 @@ import ( "sync" "time" - pgx "github.com/jackc/pgx/v4" + pgx "github.com/jackc/pgx/v5" "github.com/timescale/promscale/pkg/log" ) diff --git a/pkg/util/util.go b/pkg/util/util.go index 507f216370..50791ff32d 100644 --- a/pkg/util/util.go +++ b/pkg/util/util.go @@ -97,3 +97,9 @@ func IsTimescaleDBInstalled(conn pgxconn.PgxConn) bool { } return installed } + +// Pointer returns a pointer to the given variabel. Useful in tests for +// primitive value pointer arguments. +func Pointer[T any](x T) *T { + return &x +} diff --git a/pkg/vacuum/vacuum.go b/pkg/vacuum/vacuum.go index 560f479d3d..ba1952015a 100644 --- a/pkg/vacuum/vacuum.go +++ b/pkg/vacuum/vacuum.go @@ -52,7 +52,7 @@ import ( "sync" "time" - "github.com/jackc/pgx/v4/pgxpool" + "github.com/jackc/pgx/v5/pgxpool" "github.com/prometheus/client_golang/prometheus" "github.com/timescale/promscale/pkg/log" "github.com/timescale/promscale/pkg/pgxconn"