From 297fbbdc03adce3b3b0bc6d8932cf4d310a63389 Mon Sep 17 00:00:00 2001 From: Harkishen-Singh Date: Tue, 18 Oct 2022 18:56:05 +0530 Subject: [PATCH 01/13] Adds support to create/delete metric rollups via dataset-config. Signed-off-by: Harkishen-Singh This commit adds support for creation or deletion of metric rollups by reading the dataset-config file. The dataset config file contains a new field `downsample:` which supports 2 options: `automatic` & `resolution`. If `automatic` is false, then connector updates in the database so that `_prom_catalog.scan_for_new_rollups` does not create any new rollups (Caggs). `resolution` is a comma separated list of resolutions for downsampling, in a way that label:resolution:retention is the format that is needed. Example: `short:5m:90d,long:1h:365d` -> The resolution works in a strict manner. Its aim is to make sure that the database contains exactly those resolutions that are mentioned in the dataset config. Like, if the database already contains `short` and `long` downsampled data and the dataset is supplied with `short:5m:90d,very_long:1d:365d` then `long` downsampled case is deleted and `very_long` is created. -> `resolution` is applied if `automatic` is `true`. If automatic is not given in dataset config, it defaults to `true` based on our plan to keep downsampling applied as a default setting. --- pkg/dataset/config.go | 19 ++- pkg/dataset/config_test.go | 33 +++-- pkg/dataset/downsample.go | 76 ++++++++++ pkg/rollup/rollup.go | 133 +++++++++++++++++ pkg/rollup/rollup_test.go | 138 ++++++++++++++++++ pkg/runner/client.go | 7 + .../end_to_end_tests/config_dataset_test.go | 11 +- .../duration.go => util/day_duration.go} | 3 +- 8 files changed, 392 insertions(+), 28 deletions(-) create mode 100644 pkg/dataset/downsample.go create mode 100644 pkg/rollup/rollup.go create mode 100644 pkg/rollup/rollup_test.go rename pkg/{dataset/duration.go => util/day_duration.go} (99%) diff --git a/pkg/dataset/config.go b/pkg/dataset/config.go index f61c7f8b06..cbd18c6ef2 100644 --- a/pkg/dataset/config.go +++ b/pkg/dataset/config.go @@ -10,8 +10,10 @@ import ( "time" "github.com/jackc/pgx/v4" - "github.com/timescale/promscale/pkg/log" "gopkg.in/yaml.v2" + + "github.com/timescale/promscale/pkg/log" + "github.com/timescale/promscale/pkg/util" ) const ( @@ -47,6 +49,7 @@ type Metrics struct { HALeaseRefresh DayDuration `mapstructure:"ha_lease_refresh" yaml:"ha_lease_refresh"` HALeaseTimeout DayDuration `mapstructure:"ha_lease_timeout" yaml:"ha_lease_timeout"` RetentionPeriod DayDuration `mapstructure:"default_retention_period" yaml:"default_retention_period"` + Downsample `mapstructure:"downsample" yaml:"downsample,omitempty"` } // Traces contains dataset configuration options for traces data. @@ -64,6 +67,10 @@ func NewConfig(contents string) (cfg Config, err error) { func (c *Config) Apply(conn *pgx.Conn) error { c.applyDefaults() + if err := c.Downsample.Apply(conn); err != nil { + return fmt.Errorf("error applying configuration for downsampling: %w", err) + } + log.Info("msg", fmt.Sprintf("Setting metric dataset default chunk interval to %s", c.Metrics.ChunkInterval)) log.Info("msg", fmt.Sprintf("Setting metric dataset default compression to %t", *c.Metrics.Compression)) log.Info("msg", fmt.Sprintf("Setting metric dataset default high availability lease refresh to %s", c.Metrics.HALeaseRefresh)) @@ -91,21 +98,21 @@ func (c *Config) Apply(conn *pgx.Conn) error { func (c *Config) applyDefaults() { if c.Metrics.ChunkInterval <= 0 { - c.Metrics.ChunkInterval = DayDuration(defaultMetricChunkInterval) + c.Metrics.ChunkInterval = util.DayDuration(defaultMetricChunkInterval) } if c.Metrics.Compression == nil { c.Metrics.Compression = &defaultMetricCompressionVar } if c.Metrics.HALeaseRefresh <= 0 { - c.Metrics.HALeaseRefresh = DayDuration(defaultMetricHALeaseRefresh) + c.Metrics.HALeaseRefresh = util.DayDuration(defaultMetricHALeaseRefresh) } if c.Metrics.HALeaseTimeout <= 0 { - c.Metrics.HALeaseTimeout = DayDuration(defaultMetricHALeaseTimeout) + c.Metrics.HALeaseTimeout = util.DayDuration(defaultMetricHALeaseTimeout) } if c.Metrics.RetentionPeriod <= 0 { - c.Metrics.RetentionPeriod = DayDuration(defaultMetricRetentionPeriod) + c.Metrics.RetentionPeriod = util.DayDuration(defaultMetricRetentionPeriod) } if c.Traces.RetentionPeriod <= 0 { - c.Traces.RetentionPeriod = DayDuration(defaultTraceRetentionPeriod) + c.Traces.RetentionPeriod = util.DayDuration(defaultTraceRetentionPeriod) } } diff --git a/pkg/dataset/config_test.go b/pkg/dataset/config_test.go index eafa102231..2cc217651f 100644 --- a/pkg/dataset/config_test.go +++ b/pkg/dataset/config_test.go @@ -4,6 +4,7 @@ package dataset import ( + "github.com/timescale/promscale/pkg/util" "testing" "time" @@ -48,7 +49,7 @@ func TestNewConfig(t *testing.T) { default_retention_period: 3d2h`, cfg: Config{ Metrics: Metrics{ - RetentionPeriod: DayDuration(((3 * 24) + 2) * time.Hour), + RetentionPeriod: util.DayDuration(((3 * 24) + 2) * time.Hour), }, }, }, @@ -64,14 +65,14 @@ traces: default_retention_period: 15d`, cfg: Config{ Metrics: Metrics{ - ChunkInterval: DayDuration(3 * time.Hour), + ChunkInterval: util.DayDuration(3 * time.Hour), Compression: &testCompressionSetting, - HALeaseRefresh: DayDuration(2 * time.Minute), - HALeaseTimeout: DayDuration(5 * time.Second), - RetentionPeriod: DayDuration(30 * 24 * time.Hour), + HALeaseRefresh: util.DayDuration(2 * time.Minute), + HALeaseTimeout: util.DayDuration(5 * time.Second), + RetentionPeriod: util.DayDuration(30 * 24 * time.Hour), }, Traces: Traces{ - RetentionPeriod: DayDuration(15 * 24 * time.Hour), + RetentionPeriod: util.DayDuration(15 * 24 * time.Hour), }, }, }, @@ -100,14 +101,14 @@ func TestApplyDefaults(t *testing.T) { t, Config{ Metrics: Metrics{ - ChunkInterval: DayDuration(defaultMetricChunkInterval), + ChunkInterval: util.DayDuration(defaultMetricChunkInterval), Compression: &defaultMetricCompressionVar, - HALeaseRefresh: DayDuration(defaultMetricHALeaseRefresh), - HALeaseTimeout: DayDuration(defaultMetricHALeaseTimeout), - RetentionPeriod: DayDuration(defaultMetricRetentionPeriod), + HALeaseRefresh: util.DayDuration(defaultMetricHALeaseRefresh), + HALeaseTimeout: util.DayDuration(defaultMetricHALeaseTimeout), + RetentionPeriod: util.DayDuration(defaultMetricRetentionPeriod), }, Traces: Traces{ - RetentionPeriod: DayDuration(defaultTraceRetentionPeriod), + RetentionPeriod: util.DayDuration(defaultTraceRetentionPeriod), }, }, c, @@ -115,14 +116,14 @@ func TestApplyDefaults(t *testing.T) { untouched := Config{ Metrics: Metrics{ - ChunkInterval: DayDuration(3 * time.Hour), + ChunkInterval: util.DayDuration(3 * time.Hour), Compression: &testCompressionSetting, - HALeaseRefresh: DayDuration(2 * time.Minute), - HALeaseTimeout: DayDuration(5 * time.Second), - RetentionPeriod: DayDuration(30 * 24 * time.Hour), + HALeaseRefresh: util.DayDuration(2 * time.Minute), + HALeaseTimeout: util.DayDuration(5 * time.Second), + RetentionPeriod: util.DayDuration(30 * 24 * time.Hour), }, Traces: Traces{ - RetentionPeriod: DayDuration(15 * 24 * time.Hour), + RetentionPeriod: util.DayDuration(15 * 24 * time.Hour), }, } diff --git a/pkg/dataset/downsample.go b/pkg/dataset/downsample.go new file mode 100644 index 0000000000..4a61275408 --- /dev/null +++ b/pkg/dataset/downsample.go @@ -0,0 +1,76 @@ +// This file and its contents are licensed under the Apache License 2.0. +// Please see the included NOTICE for copyright information and +// LICENSE for a copy of the license. + +package dataset + +import ( + "context" + "fmt" + + "github.com/jackc/pgx/v4" + + "github.com/timescale/promscale/pkg/log" + "github.com/timescale/promscale/pkg/rollup" +) + +const ( + defaultDownsampleState = true + defaultDownsampleResolution = "short:5m:90d,long:1h:395d" // label:resolution:retention +) + +var ( + setDefaultDownsampleStateSQL = "SELECT prom_api.set_automatic_downsample($1)" + setDefaultDownsampleResolutionSQL = "SELECT prom_api.set_downsample_resolution($1)" + + defaultDownsampleStateVar = defaultDownsampleState +) + +type Downsample struct { + Automatic *bool `yaml:"automatic,omitempty"` + Resolution string `yaml:"resolution,omitempty"` +} + +func (d *Downsample) Apply(conn *pgx.Conn) error { + d.applyDefaults() + + queries := map[string]interface{}{ + setDefaultDownsampleStateSQL: d.Automatic, + } + if *d.Automatic { + if err := handleDownsampling(conn, d.Resolution); err != nil { + return fmt.Errorf("handle downsample: %w", err) + } + queries[setDefaultDownsampleResolutionSQL] = d.Resolution + log.Info("msg", fmt.Sprintf("Setting metric downsample resolution to %s", d.Resolution)) + } + log.Info("msg", fmt.Sprintf("Setting metric automatic downsample to %t", *d.Automatic)) + + for sql, param := range queries { + if _, err := conn.Exec(context.Background(), sql, param); err != nil { + return err + } + } + return nil +} + +func (d *Downsample) applyDefaults() { + if d.Automatic == nil { + // In default case, we plan to downsample data. + d.Automatic = &defaultDownsampleStateVar + } + if d.Resolution == "" { + d.Resolution = defaultDownsampleResolution + } +} + +func handleDownsampling(conn *pgx.Conn, resolutionStr string) error { + downsampleResolutions, err := rollup.Parse(resolutionStr) + if err != nil { + return fmt.Errorf("error parsing downsample resolution: %w", err) + } + if err = rollup.EnsureRollupWith(conn, downsampleResolutions); err != nil { + return fmt.Errorf("ensure rollup with: %w", err) + } + return nil +} diff --git a/pkg/rollup/rollup.go b/pkg/rollup/rollup.go new file mode 100644 index 0000000000..c42cdf8ff3 --- /dev/null +++ b/pkg/rollup/rollup.go @@ -0,0 +1,133 @@ +// This file and its contents are licensed under the Apache License 2.0. +// Please see the included NOTICE for copyright information and +// LICENSE for a copy of the license. + +package rollup + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/jackc/pgx/v4" + "github.com/timescale/promscale/pkg/util" +) + +type DownsampleResolution struct { + label string + resolution util.DayDuration + retention util.DayDuration +} + +// Parse parses the rollup resolution string and returns true if it respects the format of downsampling resolution, +// which is a string of comma separated label:resolution:retention. +// Example: short:5m:90d,long:1h:395d +func Parse(resolutionStr string) ([]DownsampleResolution, error) { + resolutions := strings.Split(resolutionStr, ",") + if len(resolutions) < 1 { + return nil, fmt.Errorf("resolutions cannot be less than 1") + } + var r []DownsampleResolution + for _, resolution := range resolutions { + resolution = strings.TrimSpace(resolution) + if resolution == "" { + continue + } + + items := strings.Split(resolution, ":") + if len(items) != 3 { + return nil, fmt.Errorf("expected items not found: needed in format of label:resolution:retention but found %v for %s resolution", strings.Join(items, ":"), resolution) + } + lName := items[0] + + var res util.DayDuration + err := res.UnmarshalText([]byte(items[1])) + if err != nil { + return nil, fmt.Errorf("error parsing resolution %s: %w", items[1], err) + } + + var retention util.DayDuration + err = retention.UnmarshalText([]byte(items[2])) + if err != nil { + return nil, fmt.Errorf("error parsing retention %s: %w", items[2], err) + } + r = append(r, DownsampleResolution{lName, res, retention}) + } + return r, nil +} + +// EnsureRollupWith ensures "strictly" that the given new resolutions are applied in the database. +// Note: It follows a "strict" behaviour, meaning any existing resolutions of downsampling in +// the database will be removed, so that the all downsampling data in the database strictly +// matches the provided newResolutions. +func EnsureRollupWith(conn *pgx.Conn, newResolutions []DownsampleResolution) error { + rows, err := conn.Query(context.Background(), "SELECT name, resolution, retention FROM _prom_catalog.rollup") + if err != nil { + return fmt.Errorf("querying existing resolutions: %w", err) + } + defer rows.Close() + + var existingResolutions []DownsampleResolution + for rows.Next() { + var lName string + var resolution, retention time.Duration + if err := rows.Scan(&lName, &resolution, &retention); err != nil { + return fmt.Errorf("error scanning output rows for existing resolutions: %w", err) + } + existingResolutions = append(existingResolutions, DownsampleResolution{label: lName, resolution: util.DayDuration(resolution), retention: util.DayDuration(retention)}) + } + + // Determine which resolutions need to be created and deleted from the DB. + pendingCreation := diff(newResolutions, existingResolutions) + pendingDeletion := diff(existingResolutions, newResolutions) + + // Delete rollups that are no longer required. + if err = deleteRollups(conn, pendingDeletion); err != nil { + return fmt.Errorf("delete rollups: %w", err) + } + + // Create new rollups. + if err = createRollups(conn, pendingCreation); err != nil { + return fmt.Errorf("create rollups: %w", err) + } + return nil +} + +func createRollups(conn *pgx.Conn, res []DownsampleResolution) error { + for _, r := range res { + _, err := conn.Exec(context.Background(), "CALL _prom_catalog.create_metric_rollup($1, $2, $3)", r.label, time.Duration(r.resolution), time.Duration(r.retention)) + if err != nil { + return fmt.Errorf("error creating rollup for %s: %w", r.label, err) + } + } + return nil +} + +func deleteRollups(conn *pgx.Conn, res []DownsampleResolution) error { + for _, r := range res { + _, err := conn.Exec(context.Background(), "CALL _prom_catalog.delete_metric_rollup($1)", r.label) + if err != nil { + return fmt.Errorf("error deleting rollup for %s: %w", r.label, err) + } + } + return nil +} + +// diff returns the elements of a that are not in b. +func diff(a, b []DownsampleResolution) []DownsampleResolution { + var difference []DownsampleResolution + for i := range a { + found := false + for j := range b { + if a[i].label == b[j].label { + found = true + break + } + } + if !found { + difference = append(difference, a[i]) + } + } + return difference +} diff --git a/pkg/rollup/rollup_test.go b/pkg/rollup/rollup_test.go new file mode 100644 index 0000000000..10b4b78de1 --- /dev/null +++ b/pkg/rollup/rollup_test.go @@ -0,0 +1,138 @@ +// This file and its contents are licensed under the Apache License 2.0. +// Please see the included NOTICE for copyright information and +// LICENSE for a copy of the license. + +package rollup + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + "github.com/timescale/promscale/pkg/util" +) + +func TestParse(t *testing.T) { + tcs := []struct { + name string + input string + numResolutions int + parsedResolution []DownsampleResolution + shouldFail bool + }{ + { + name: "default entries", + input: "short:5m:90d,long:1h:395d", + parsedResolution: []DownsampleResolution{ + {"short", util.DayDuration(5 * time.Minute), util.DayDuration(90 * 24 * time.Hour)}, + {"long", util.DayDuration(time.Hour), util.DayDuration(395 * 24 * time.Hour)}, + }, + numResolutions: 2, + }, + { + name: "default entries with space", + input: "short:5m:90d, long:1h:395d", + parsedResolution: []DownsampleResolution{ + {"short", util.DayDuration(5 * time.Minute), util.DayDuration(90 * 24 * time.Hour)}, + {"long", util.DayDuration(time.Hour), util.DayDuration(395 * 24 * time.Hour)}, + }, + numResolutions: 2, + }, + { + name: "only short", + input: "short:5m:90d", + parsedResolution: []DownsampleResolution{ + {"short", util.DayDuration(5 * time.Minute), util.DayDuration(90 * 24 * time.Hour)}, + }, + numResolutions: 1, + }, + { + name: "only short with comma", + input: "short:5m:90d,", + parsedResolution: []DownsampleResolution{ + {"short", util.DayDuration(5 * time.Minute), util.DayDuration(90 * 24 * time.Hour)}, + }, + numResolutions: 1, + }, + { + name: "only short with space", + input: "short:5m:90d, ", + parsedResolution: []DownsampleResolution{ + {"short", util.DayDuration(5 * time.Minute), util.DayDuration(90 * 24 * time.Hour)}, + }, + numResolutions: 1, + }, + { + name: "mix", + input: "short:5m:90d,long:1h:395d, very_long:2d:365d , very_short:1m:90d", + parsedResolution: []DownsampleResolution{ + {"short", util.DayDuration(5 * time.Minute), util.DayDuration(90 * 24 * time.Hour)}, + {"long", util.DayDuration(time.Hour), util.DayDuration(395 * 24 * time.Hour)}, + {"very_long", util.DayDuration(2 * 24 * time.Hour), util.DayDuration(365 * 24 * time.Hour)}, + {"very_short", util.DayDuration(time.Minute), util.DayDuration(90 * 24 * time.Hour)}, + }, + numResolutions: 4, + }, + } + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + resolutions, err := Parse(tc.input) + if tc.shouldFail { + require.Error(t, err, tc.name) + return + } + require.NoError(t, err, tc.name) + require.Equal(t, tc.numResolutions, len(resolutions), tc.name) + require.Equal(t, tc.parsedResolution, resolutions, tc.name) + }) + } +} + +func TestDiff(t *testing.T) { + tcs := []struct { + name string + a, b, expected []DownsampleResolution + }{ + { + name: "some inclusive elements", + a: []DownsampleResolution{{label: "a"}, {label: "b"}, {label: "c"}, {label: "d"}}, + b: []DownsampleResolution{{label: "c"}, {label: "d"}, {label: "e"}}, + expected: []DownsampleResolution{{label: "a"}, {label: "b"}}, + }, + { + name: "b superset of a", + a: []DownsampleResolution{{label: "c"}, {label: "d"}}, + b: []DownsampleResolution{{label: "c"}, {label: "d"}, {label: "e"}}, + expected: []DownsampleResolution(nil), + }, + { + name: "a empty", + a: []DownsampleResolution{}, + b: []DownsampleResolution{{label: "c"}, {label: "d"}, {label: "e"}}, + expected: []DownsampleResolution(nil), + }, + { + name: "all elements exclusive", + a: []DownsampleResolution{{label: "a"}}, + b: []DownsampleResolution{{label: "c"}, {label: "d"}, {label: "e"}}, + expected: []DownsampleResolution{{label: "a"}}, + }, + { + name: "same", + a: []DownsampleResolution{{label: "a"}, {label: "b"}, {label: "c"}, {label: "d"}}, + b: []DownsampleResolution{{label: "a"}, {label: "b"}, {label: "c"}, {label: "d"}}, + expected: []DownsampleResolution(nil), + }, + { + name: "empty", + a: []DownsampleResolution{}, + b: []DownsampleResolution{}, + expected: []DownsampleResolution(nil), + }, + } + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + require.Equal(t, tc.expected, diff(tc.a, tc.b), tc.name) + }) + } +} diff --git a/pkg/runner/client.go b/pkg/runner/client.go index 3e43fc5d66..81beb674a8 100644 --- a/pkg/runner/client.go +++ b/pkg/runner/client.go @@ -172,6 +172,13 @@ func CreateClient(r prometheus.Registerer, cfg *Config) (*pgclient.Client, error err = cfg.DatasetCfg.Apply(conn) } else if cfg.DatasetConfig != "" { err = applyDatasetConfig(conn, cfg.DatasetConfig) + } else { + // We apply downsampling settings even when DatasetConfig is not given, which is the most common case. + downsampleCfg := &dataset.Downsample{} + err = downsampleCfg.Apply(conn) + if err != nil { + return nil, fmt.Errorf("error applying downsampling configuration: %w", err) + } } if err != nil { return nil, fmt.Errorf("error applying dataset configuration: %w", err) diff --git a/pkg/tests/end_to_end_tests/config_dataset_test.go b/pkg/tests/end_to_end_tests/config_dataset_test.go index 02592ef79c..f07f169d23 100644 --- a/pkg/tests/end_to_end_tests/config_dataset_test.go +++ b/pkg/tests/end_to_end_tests/config_dataset_test.go @@ -2,6 +2,7 @@ package end_to_end_tests import ( "context" + "github.com/timescale/promscale/pkg/util" "testing" "time" @@ -28,14 +29,14 @@ func TestDatasetConfigApply(t *testing.T) { cfg := dataset.Config{ Metrics: dataset.Metrics{ - ChunkInterval: dataset.DayDuration(4 * time.Hour), + ChunkInterval: util.DayDuration(4 * time.Hour), Compression: &disableCompression, - HALeaseRefresh: dataset.DayDuration(15 * time.Second), - HALeaseTimeout: dataset.DayDuration(2 * time.Minute), - RetentionPeriod: dataset.DayDuration(15 * 24 * time.Hour), + HALeaseRefresh: util.DayDuration(15 * time.Second), + HALeaseTimeout: util.DayDuration(2 * time.Minute), + RetentionPeriod: util.DayDuration(15 * 24 * time.Hour), }, Traces: dataset.Traces{ - RetentionPeriod: dataset.DayDuration(10 * 24 * time.Hour), + RetentionPeriod: util.DayDuration(10 * 24 * time.Hour), }, } diff --git a/pkg/dataset/duration.go b/pkg/util/day_duration.go similarity index 99% rename from pkg/dataset/duration.go rename to pkg/util/day_duration.go index 128e41e4fb..9ae0dbef48 100644 --- a/pkg/dataset/duration.go +++ b/pkg/util/day_duration.go @@ -1,7 +1,8 @@ // This file and its contents are licensed under the Apache License 2.0. // Please see the included NOTICE for copyright information and // LICENSE for a copy of the license. -package dataset + +package util import ( "fmt" From a9fc0486b09ae09e1c1c8846522c3762d22ee80f Mon Sep 17 00:00:00 2001 From: Harkishen-Singh Date: Tue, 25 Oct 2022 17:28:15 +0530 Subject: [PATCH 02/13] Update resolution config and add E2E tests for creation/deletion of rollups. Signed-off-by: Harkishen-Singh --- pkg/dataset/config.go | 16 ++-- pkg/dataset/downsample.go | 60 ++++++------- pkg/rollup/rollup.go | 67 +++++---------- pkg/rollup/rollup_test.go | 100 +++------------------- pkg/tests/end_to_end_tests/rollup_test.go | 71 +++++++++++++++ 5 files changed, 137 insertions(+), 177 deletions(-) create mode 100644 pkg/tests/end_to_end_tests/rollup_test.go diff --git a/pkg/dataset/config.go b/pkg/dataset/config.go index cbd18c6ef2..dcb44778a8 100644 --- a/pkg/dataset/config.go +++ b/pkg/dataset/config.go @@ -44,12 +44,12 @@ type Config struct { // Metrics contains dataset configuration options for metrics data. type Metrics struct { - ChunkInterval DayDuration `mapstructure:"default_chunk_interval" yaml:"default_chunk_interval"` + ChunkInterval util.DayDuration `mapstructure:"default_chunk_interval" yaml:"default_chunk_interval"` Compression *bool `mapstructure:"compress_data" yaml:"compress_data"` // Using pointer to check if the the value was set. - HALeaseRefresh DayDuration `mapstructure:"ha_lease_refresh" yaml:"ha_lease_refresh"` - HALeaseTimeout DayDuration `mapstructure:"ha_lease_timeout" yaml:"ha_lease_timeout"` - RetentionPeriod DayDuration `mapstructure:"default_retention_period" yaml:"default_retention_period"` - Downsample `mapstructure:"downsample" yaml:"downsample,omitempty"` + HALeaseRefresh util.DayDuration `mapstructure:"ha_lease_refresh" yaml:"ha_lease_refresh"` + HALeaseTimeout util.DayDuration `mapstructure:"ha_lease_timeout" yaml:"ha_lease_timeout"` + RetentionPeriod util.DayDuration `mapstructure:"default_retention_period" yaml:"default_retention_period"` + Downsample *Downsample `mapstructure:"downsample" yaml:"downsample,omitempty"` } // Traces contains dataset configuration options for traces data. @@ -67,8 +67,10 @@ func NewConfig(contents string) (cfg Config, err error) { func (c *Config) Apply(conn *pgx.Conn) error { c.applyDefaults() - if err := c.Downsample.Apply(conn); err != nil { - return fmt.Errorf("error applying configuration for downsampling: %w", err) + if c.Metrics.Downsample != nil { + if err := c.Metrics.Downsample.Apply(conn); err != nil { + return fmt.Errorf("error applying configuration for downsampling: %w", err) + } } log.Info("msg", fmt.Sprintf("Setting metric dataset default chunk interval to %s", c.Metrics.ChunkInterval)) diff --git a/pkg/dataset/downsample.go b/pkg/dataset/downsample.go index 4a61275408..cb4ab5b633 100644 --- a/pkg/dataset/downsample.go +++ b/pkg/dataset/downsample.go @@ -7,48 +7,51 @@ package dataset import ( "context" "fmt" + "time" "github.com/jackc/pgx/v4" "github.com/timescale/promscale/pkg/log" "github.com/timescale/promscale/pkg/rollup" + "github.com/timescale/promscale/pkg/util" ) -const ( - defaultDownsampleState = true - defaultDownsampleResolution = "short:5m:90d,long:1h:395d" // label:resolution:retention -) +const defaultDownsampleState = true var ( - setDefaultDownsampleStateSQL = "SELECT prom_api.set_automatic_downsample($1)" - setDefaultDownsampleResolutionSQL = "SELECT prom_api.set_downsample_resolution($1)" + setDefaultDownsampleStateSQL = "SELECT prom_api.set_automatic_downsample($1)" - defaultDownsampleStateVar = defaultDownsampleState + defaultDownsampleStateVar = defaultDownsampleState + defaultDownsampleResolution = []rollup.DownsampleResolution{ + { + Label: "short", + Resolution: util.DayDuration(5 * time.Minute), + Retention: util.DayDuration(90 * 24 * time.Hour), + }, + { + Label: "long", + Resolution: util.DayDuration(time.Hour), + Retention: util.DayDuration(395 * 24 * time.Hour), + }, + } ) type Downsample struct { - Automatic *bool `yaml:"automatic,omitempty"` - Resolution string `yaml:"resolution,omitempty"` + Automatic *bool `yaml:"automatic,omitempty"` + Resolution []rollup.DownsampleResolution `yaml:"resolutions,omitempty"` } func (d *Downsample) Apply(conn *pgx.Conn) error { d.applyDefaults() - queries := map[string]interface{}{ - setDefaultDownsampleStateSQL: d.Automatic, - } - if *d.Automatic { - if err := handleDownsampling(conn, d.Resolution); err != nil { - return fmt.Errorf("handle downsample: %w", err) - } - queries[setDefaultDownsampleResolutionSQL] = d.Resolution - log.Info("msg", fmt.Sprintf("Setting metric downsample resolution to %s", d.Resolution)) - } log.Info("msg", fmt.Sprintf("Setting metric automatic downsample to %t", *d.Automatic)) + if _, err := conn.Exec(context.Background(), setDefaultDownsampleStateSQL, d.Automatic); err != nil { + return err + } - for sql, param := range queries { - if _, err := conn.Exec(context.Background(), sql, param); err != nil { - return err + if *d.Automatic { + if err := rollup.EnsureRollupWith(conn, d.Resolution); err != nil { + return fmt.Errorf("ensure rollup with: %w", err) } } return nil @@ -59,18 +62,7 @@ func (d *Downsample) applyDefaults() { // In default case, we plan to downsample data. d.Automatic = &defaultDownsampleStateVar } - if d.Resolution == "" { + if d.Resolution == nil { d.Resolution = defaultDownsampleResolution } } - -func handleDownsampling(conn *pgx.Conn, resolutionStr string) error { - downsampleResolutions, err := rollup.Parse(resolutionStr) - if err != nil { - return fmt.Errorf("error parsing downsample resolution: %w", err) - } - if err = rollup.EnsureRollupWith(conn, downsampleResolutions); err != nil { - return fmt.Errorf("ensure rollup with: %w", err) - } - return nil -} diff --git a/pkg/rollup/rollup.go b/pkg/rollup/rollup.go index c42cdf8ff3..55ede6ad67 100644 --- a/pkg/rollup/rollup.go +++ b/pkg/rollup/rollup.go @@ -7,60 +7,28 @@ package rollup import ( "context" "fmt" - "strings" "time" "github.com/jackc/pgx/v4" + "github.com/timescale/promscale/pkg/util" ) type DownsampleResolution struct { - label string - resolution util.DayDuration - retention util.DayDuration -} - -// Parse parses the rollup resolution string and returns true if it respects the format of downsampling resolution, -// which is a string of comma separated label:resolution:retention. -// Example: short:5m:90d,long:1h:395d -func Parse(resolutionStr string) ([]DownsampleResolution, error) { - resolutions := strings.Split(resolutionStr, ",") - if len(resolutions) < 1 { - return nil, fmt.Errorf("resolutions cannot be less than 1") - } - var r []DownsampleResolution - for _, resolution := range resolutions { - resolution = strings.TrimSpace(resolution) - if resolution == "" { - continue - } - - items := strings.Split(resolution, ":") - if len(items) != 3 { - return nil, fmt.Errorf("expected items not found: needed in format of label:resolution:retention but found %v for %s resolution", strings.Join(items, ":"), resolution) - } - lName := items[0] - - var res util.DayDuration - err := res.UnmarshalText([]byte(items[1])) - if err != nil { - return nil, fmt.Errorf("error parsing resolution %s: %w", items[1], err) - } - - var retention util.DayDuration - err = retention.UnmarshalText([]byte(items[2])) - if err != nil { - return nil, fmt.Errorf("error parsing retention %s: %w", items[2], err) - } - r = append(r, DownsampleResolution{lName, res, retention}) - } - return r, nil + Label string `yaml:"label"` + Resolution util.DayDuration `yaml:"resolution"` + Retention util.DayDuration `yaml:"retention"` } // EnsureRollupWith ensures "strictly" that the given new resolutions are applied in the database. +// // Note: It follows a "strict" behaviour, meaning any existing resolutions of downsampling in // the database will be removed, so that the all downsampling data in the database strictly // matches the provided newResolutions. +// +// Example: If the DB already contains metric rollups for `short` and `long`, and in dataset-config, +// connector sees `very_short` and `long`, then EnsureRollupWith will remove the `short` downsampled data +// and create `very_short`, while not touching `long`. func EnsureRollupWith(conn *pgx.Conn, newResolutions []DownsampleResolution) error { rows, err := conn.Query(context.Background(), "SELECT name, resolution, retention FROM _prom_catalog.rollup") if err != nil { @@ -75,7 +43,7 @@ func EnsureRollupWith(conn *pgx.Conn, newResolutions []DownsampleResolution) err if err := rows.Scan(&lName, &resolution, &retention); err != nil { return fmt.Errorf("error scanning output rows for existing resolutions: %w", err) } - existingResolutions = append(existingResolutions, DownsampleResolution{label: lName, resolution: util.DayDuration(resolution), retention: util.DayDuration(retention)}) + existingResolutions = append(existingResolutions, DownsampleResolution{Label: lName, Resolution: util.DayDuration(resolution), Retention: util.DayDuration(retention)}) } // Determine which resolutions need to be created and deleted from the DB. @@ -96,9 +64,9 @@ func EnsureRollupWith(conn *pgx.Conn, newResolutions []DownsampleResolution) err func createRollups(conn *pgx.Conn, res []DownsampleResolution) error { for _, r := range res { - _, err := conn.Exec(context.Background(), "CALL _prom_catalog.create_metric_rollup($1, $2, $3)", r.label, time.Duration(r.resolution), time.Duration(r.retention)) + _, err := conn.Exec(context.Background(), "CALL _prom_catalog.create_rollup($1, $2, $3)", r.Label, time.Duration(r.Resolution), time.Duration(r.Retention)) if err != nil { - return fmt.Errorf("error creating rollup for %s: %w", r.label, err) + return fmt.Errorf("error creating rollup for %s: %w", r.Label, err) } } return nil @@ -106,21 +74,26 @@ func createRollups(conn *pgx.Conn, res []DownsampleResolution) error { func deleteRollups(conn *pgx.Conn, res []DownsampleResolution) error { for _, r := range res { - _, err := conn.Exec(context.Background(), "CALL _prom_catalog.delete_metric_rollup($1)", r.label) + _, err := conn.Exec(context.Background(), "CALL _prom_catalog.delete_rollup($1)", r.Label) if err != nil { - return fmt.Errorf("error deleting rollup for %s: %w", r.label, err) + return fmt.Errorf("error deleting rollup for %s: %w", r.Label, err) } } return nil } // diff returns the elements of a that are not in b. +// +// We need this since we want to support a "strict" behaviour in downsampling. This basically means, to have the exact +// downsampling data in the DB based on what's mentioned in the dataset-config. +// +// See the comment for EnsureRollupWith for example. func diff(a, b []DownsampleResolution) []DownsampleResolution { var difference []DownsampleResolution for i := range a { found := false for j := range b { - if a[i].label == b[j].label { + if a[i].Label == b[j].Label { found = true break } diff --git a/pkg/rollup/rollup_test.go b/pkg/rollup/rollup_test.go index 10b4b78de1..e9822541c8 100644 --- a/pkg/rollup/rollup_test.go +++ b/pkg/rollup/rollup_test.go @@ -6,88 +6,10 @@ package rollup import ( "testing" - "time" "github.com/stretchr/testify/require" - "github.com/timescale/promscale/pkg/util" ) -func TestParse(t *testing.T) { - tcs := []struct { - name string - input string - numResolutions int - parsedResolution []DownsampleResolution - shouldFail bool - }{ - { - name: "default entries", - input: "short:5m:90d,long:1h:395d", - parsedResolution: []DownsampleResolution{ - {"short", util.DayDuration(5 * time.Minute), util.DayDuration(90 * 24 * time.Hour)}, - {"long", util.DayDuration(time.Hour), util.DayDuration(395 * 24 * time.Hour)}, - }, - numResolutions: 2, - }, - { - name: "default entries with space", - input: "short:5m:90d, long:1h:395d", - parsedResolution: []DownsampleResolution{ - {"short", util.DayDuration(5 * time.Minute), util.DayDuration(90 * 24 * time.Hour)}, - {"long", util.DayDuration(time.Hour), util.DayDuration(395 * 24 * time.Hour)}, - }, - numResolutions: 2, - }, - { - name: "only short", - input: "short:5m:90d", - parsedResolution: []DownsampleResolution{ - {"short", util.DayDuration(5 * time.Minute), util.DayDuration(90 * 24 * time.Hour)}, - }, - numResolutions: 1, - }, - { - name: "only short with comma", - input: "short:5m:90d,", - parsedResolution: []DownsampleResolution{ - {"short", util.DayDuration(5 * time.Minute), util.DayDuration(90 * 24 * time.Hour)}, - }, - numResolutions: 1, - }, - { - name: "only short with space", - input: "short:5m:90d, ", - parsedResolution: []DownsampleResolution{ - {"short", util.DayDuration(5 * time.Minute), util.DayDuration(90 * 24 * time.Hour)}, - }, - numResolutions: 1, - }, - { - name: "mix", - input: "short:5m:90d,long:1h:395d, very_long:2d:365d , very_short:1m:90d", - parsedResolution: []DownsampleResolution{ - {"short", util.DayDuration(5 * time.Minute), util.DayDuration(90 * 24 * time.Hour)}, - {"long", util.DayDuration(time.Hour), util.DayDuration(395 * 24 * time.Hour)}, - {"very_long", util.DayDuration(2 * 24 * time.Hour), util.DayDuration(365 * 24 * time.Hour)}, - {"very_short", util.DayDuration(time.Minute), util.DayDuration(90 * 24 * time.Hour)}, - }, - numResolutions: 4, - }, - } - for _, tc := range tcs { - t.Run(tc.name, func(t *testing.T) { - resolutions, err := Parse(tc.input) - if tc.shouldFail { - require.Error(t, err, tc.name) - return - } - require.NoError(t, err, tc.name) - require.Equal(t, tc.numResolutions, len(resolutions), tc.name) - require.Equal(t, tc.parsedResolution, resolutions, tc.name) - }) - } -} - func TestDiff(t *testing.T) { tcs := []struct { name string @@ -95,32 +17,32 @@ func TestDiff(t *testing.T) { }{ { name: "some inclusive elements", - a: []DownsampleResolution{{label: "a"}, {label: "b"}, {label: "c"}, {label: "d"}}, - b: []DownsampleResolution{{label: "c"}, {label: "d"}, {label: "e"}}, - expected: []DownsampleResolution{{label: "a"}, {label: "b"}}, + a: []DownsampleResolution{{Label: "a"}, {Label: "b"}, {Label: "c"}, {Label: "d"}}, + b: []DownsampleResolution{{Label: "c"}, {Label: "d"}, {Label: "e"}}, + expected: []DownsampleResolution{{Label: "a"}, {Label: "b"}}, }, { name: "b superset of a", - a: []DownsampleResolution{{label: "c"}, {label: "d"}}, - b: []DownsampleResolution{{label: "c"}, {label: "d"}, {label: "e"}}, + a: []DownsampleResolution{{Label: "c"}, {Label: "d"}}, + b: []DownsampleResolution{{Label: "c"}, {Label: "d"}, {Label: "e"}}, expected: []DownsampleResolution(nil), }, { name: "a empty", a: []DownsampleResolution{}, - b: []DownsampleResolution{{label: "c"}, {label: "d"}, {label: "e"}}, + b: []DownsampleResolution{{Label: "c"}, {Label: "d"}, {Label: "e"}}, expected: []DownsampleResolution(nil), }, { name: "all elements exclusive", - a: []DownsampleResolution{{label: "a"}}, - b: []DownsampleResolution{{label: "c"}, {label: "d"}, {label: "e"}}, - expected: []DownsampleResolution{{label: "a"}}, + a: []DownsampleResolution{{Label: "a"}}, + b: []DownsampleResolution{{Label: "c"}, {Label: "d"}, {Label: "e"}}, + expected: []DownsampleResolution{{Label: "a"}}, }, { name: "same", - a: []DownsampleResolution{{label: "a"}, {label: "b"}, {label: "c"}, {label: "d"}}, - b: []DownsampleResolution{{label: "a"}, {label: "b"}, {label: "c"}, {label: "d"}}, + a: []DownsampleResolution{{Label: "a"}, {Label: "b"}, {Label: "c"}, {Label: "d"}}, + b: []DownsampleResolution{{Label: "a"}, {Label: "b"}, {Label: "c"}, {Label: "d"}}, expected: []DownsampleResolution(nil), }, { diff --git a/pkg/tests/end_to_end_tests/rollup_test.go b/pkg/tests/end_to_end_tests/rollup_test.go new file mode 100644 index 0000000000..6806d38ba6 --- /dev/null +++ b/pkg/tests/end_to_end_tests/rollup_test.go @@ -0,0 +1,71 @@ +package end_to_end_tests + +import ( + "context" + "testing" + "time" + + "github.com/jackc/pgx/v4" + "github.com/jackc/pgx/v4/pgxpool" + "github.com/stretchr/testify/require" + + "github.com/timescale/promscale/pkg/rollup" + "github.com/timescale/promscale/pkg/util" +) + +func TestRollupCreationDeletion(t *testing.T) { + withDB(t, *testDatabase, func(db *pgxpool.Pool, t testing.TB) { + rollupResolutions := []rollup.DownsampleResolution{ + { + Label: "short", + Resolution: util.DayDuration(5 * time.Minute), + Retention: util.DayDuration(30 * 24 * time.Hour), + }, + } + + pgCon, err := db.Acquire(context.Background()) + require.NoError(t, err) + defer pgCon.Release() + + err = rollup.EnsureRollupWith(pgCon.Conn(), rollupResolutions) + require.NoError(t, err) + + verifyRollupExistence(t, pgCon.Conn(), rollupResolutions[0].Label, time.Duration(rollupResolutions[0].Resolution), time.Duration(rollupResolutions[0].Retention), false) + + rollupResolutions = append(rollupResolutions, rollup.DownsampleResolution{ + Label: "long", + Resolution: util.DayDuration(time.Hour), + Retention: util.DayDuration(395 * 24 * time.Hour), + }) + + err = rollup.EnsureRollupWith(pgCon.Conn(), rollupResolutions) + require.NoError(t, err) + + verifyRollupExistence(t, pgCon.Conn(), rollupResolutions[1].Label, time.Duration(rollupResolutions[1].Resolution), time.Duration(rollupResolutions[1].Retention), false) + + // Remove the first entry and see if the entry is removed or not. + newRes := rollupResolutions[1:] + err = rollup.EnsureRollupWith(pgCon.Conn(), newRes) + require.NoError(t, err) + // Check if long exists. + verifyRollupExistence(t, pgCon.Conn(), rollupResolutions[1].Label, time.Duration(rollupResolutions[1].Resolution), time.Duration(rollupResolutions[1].Retention), false) + // Check if short does not exist. + verifyRollupExistence(t, pgCon.Conn(), rollupResolutions[0].Label, time.Duration(rollupResolutions[0].Resolution), time.Duration(rollupResolutions[0].Retention), true) + }) +} + +func verifyRollupExistence(t testing.TB, pgCon *pgx.Conn, name string, resolution, retention time.Duration, shouldError bool) { + var ( + rName string + rResolution time.Duration + rRetention time.Duration + ) + err := pgCon.QueryRow(context.Background(), "SELECT name, resolution, retention FROM _prom_catalog.rollup WHERE name = $1", name).Scan(&rName, &rResolution, &rRetention) + if shouldError { + require.Error(t, err) + return + } + require.NoError(t, err) + require.Equal(t, resolution, rResolution) + require.Equal(t, retention, rRetention) +} From 7125609208d9567dae08cc6e2a8942ffdf7129bd Mon Sep 17 00:00:00 2001 From: Harkishen-Singh Date: Wed, 2 Nov 2022 15:12:48 +0530 Subject: [PATCH 03/13] Refactor DayDuration -> day.Duation Signed-off-by: Harkishen-Singh --- pkg/dataset/config.go | 22 ++++++------ pkg/dataset/config_test.go | 34 +++++++++---------- pkg/dataset/downsample.go | 10 +++--- .../day/duration.go} | 12 +++---- pkg/rollup/rollup.go | 10 +++--- .../end_to_end_tests/config_dataset_test.go | 12 +++---- pkg/tests/end_to_end_tests/rollup_test.go | 10 +++--- 7 files changed, 55 insertions(+), 55 deletions(-) rename pkg/{util/day_duration.go => internal/day/duration.go} (90%) diff --git a/pkg/dataset/config.go b/pkg/dataset/config.go index dcb44778a8..3f1a3cfad1 100644 --- a/pkg/dataset/config.go +++ b/pkg/dataset/config.go @@ -12,8 +12,8 @@ import ( "github.com/jackc/pgx/v4" "gopkg.in/yaml.v2" + "github.com/timescale/promscale/pkg/internal/day" "github.com/timescale/promscale/pkg/log" - "github.com/timescale/promscale/pkg/util" ) const ( @@ -44,17 +44,17 @@ type Config struct { // Metrics contains dataset configuration options for metrics data. type Metrics struct { - ChunkInterval util.DayDuration `mapstructure:"default_chunk_interval" yaml:"default_chunk_interval"` + ChunkInterval day.Duration `mapstructure:"default_chunk_interval" yaml:"default_chunk_interval"` Compression *bool `mapstructure:"compress_data" yaml:"compress_data"` // Using pointer to check if the the value was set. - HALeaseRefresh util.DayDuration `mapstructure:"ha_lease_refresh" yaml:"ha_lease_refresh"` - HALeaseTimeout util.DayDuration `mapstructure:"ha_lease_timeout" yaml:"ha_lease_timeout"` - RetentionPeriod util.DayDuration `mapstructure:"default_retention_period" yaml:"default_retention_period"` + HALeaseRefresh day.Duration `mapstructure:"ha_lease_refresh" yaml:"ha_lease_refresh"` + HALeaseTimeout day.Duration `mapstructure:"ha_lease_timeout" yaml:"ha_lease_timeout"` + RetentionPeriod day.Duration `mapstructure:"default_retention_period" yaml:"default_retention_period"` Downsample *Downsample `mapstructure:"downsample" yaml:"downsample,omitempty"` } // Traces contains dataset configuration options for traces data. type Traces struct { - RetentionPeriod DayDuration `mapstructure:"default_retention_period" yaml:"default_retention_period"` + RetentionPeriod day.Duration `mapstructure:"default_retention_period" yaml:"default_retention_period"` } // NewConfig creates a new dataset config based on the configuration YAML contents. @@ -100,21 +100,21 @@ func (c *Config) Apply(conn *pgx.Conn) error { func (c *Config) applyDefaults() { if c.Metrics.ChunkInterval <= 0 { - c.Metrics.ChunkInterval = util.DayDuration(defaultMetricChunkInterval) + c.Metrics.ChunkInterval = day.Duration(defaultMetricChunkInterval) } if c.Metrics.Compression == nil { c.Metrics.Compression = &defaultMetricCompressionVar } if c.Metrics.HALeaseRefresh <= 0 { - c.Metrics.HALeaseRefresh = util.DayDuration(defaultMetricHALeaseRefresh) + c.Metrics.HALeaseRefresh = day.Duration(defaultMetricHALeaseRefresh) } if c.Metrics.HALeaseTimeout <= 0 { - c.Metrics.HALeaseTimeout = util.DayDuration(defaultMetricHALeaseTimeout) + c.Metrics.HALeaseTimeout = day.Duration(defaultMetricHALeaseTimeout) } if c.Metrics.RetentionPeriod <= 0 { - c.Metrics.RetentionPeriod = util.DayDuration(defaultMetricRetentionPeriod) + c.Metrics.RetentionPeriod = day.Duration(defaultMetricRetentionPeriod) } if c.Traces.RetentionPeriod <= 0 { - c.Traces.RetentionPeriod = util.DayDuration(defaultTraceRetentionPeriod) + c.Traces.RetentionPeriod = day.Duration(defaultTraceRetentionPeriod) } } diff --git a/pkg/dataset/config_test.go b/pkg/dataset/config_test.go index 2cc217651f..9a0d0aefd5 100644 --- a/pkg/dataset/config_test.go +++ b/pkg/dataset/config_test.go @@ -4,11 +4,11 @@ package dataset import ( - "github.com/timescale/promscale/pkg/util" "testing" "time" "github.com/stretchr/testify/require" + "github.com/timescale/promscale/pkg/internal/day" ) var testCompressionSetting = true @@ -49,7 +49,7 @@ func TestNewConfig(t *testing.T) { default_retention_period: 3d2h`, cfg: Config{ Metrics: Metrics{ - RetentionPeriod: util.DayDuration(((3 * 24) + 2) * time.Hour), + RetentionPeriod: day.Duration(((3 * 24) + 2) * time.Hour), }, }, }, @@ -65,14 +65,14 @@ traces: default_retention_period: 15d`, cfg: Config{ Metrics: Metrics{ - ChunkInterval: util.DayDuration(3 * time.Hour), + ChunkInterval: day.Duration(3 * time.Hour), Compression: &testCompressionSetting, - HALeaseRefresh: util.DayDuration(2 * time.Minute), - HALeaseTimeout: util.DayDuration(5 * time.Second), - RetentionPeriod: util.DayDuration(30 * 24 * time.Hour), + HALeaseRefresh: day.Duration(2 * time.Minute), + HALeaseTimeout: day.Duration(5 * time.Second), + RetentionPeriod: day.Duration(30 * 24 * time.Hour), }, Traces: Traces{ - RetentionPeriod: util.DayDuration(15 * 24 * time.Hour), + RetentionPeriod: day.Duration(15 * 24 * time.Hour), }, }, }, @@ -101,14 +101,14 @@ func TestApplyDefaults(t *testing.T) { t, Config{ Metrics: Metrics{ - ChunkInterval: util.DayDuration(defaultMetricChunkInterval), + ChunkInterval: day.Duration(defaultMetricChunkInterval), Compression: &defaultMetricCompressionVar, - HALeaseRefresh: util.DayDuration(defaultMetricHALeaseRefresh), - HALeaseTimeout: util.DayDuration(defaultMetricHALeaseTimeout), - RetentionPeriod: util.DayDuration(defaultMetricRetentionPeriod), + HALeaseRefresh: day.Duration(defaultMetricHALeaseRefresh), + HALeaseTimeout: day.Duration(defaultMetricHALeaseTimeout), + RetentionPeriod: day.Duration(defaultMetricRetentionPeriod), }, Traces: Traces{ - RetentionPeriod: util.DayDuration(defaultTraceRetentionPeriod), + RetentionPeriod: day.Duration(defaultTraceRetentionPeriod), }, }, c, @@ -116,14 +116,14 @@ func TestApplyDefaults(t *testing.T) { untouched := Config{ Metrics: Metrics{ - ChunkInterval: util.DayDuration(3 * time.Hour), + ChunkInterval: day.Duration(3 * time.Hour), Compression: &testCompressionSetting, - HALeaseRefresh: util.DayDuration(2 * time.Minute), - HALeaseTimeout: util.DayDuration(5 * time.Second), - RetentionPeriod: util.DayDuration(30 * 24 * time.Hour), + HALeaseRefresh: day.Duration(2 * time.Minute), + HALeaseTimeout: day.Duration(5 * time.Second), + RetentionPeriod: day.Duration(30 * 24 * time.Hour), }, Traces: Traces{ - RetentionPeriod: util.DayDuration(15 * 24 * time.Hour), + RetentionPeriod: day.Duration(15 * 24 * time.Hour), }, } diff --git a/pkg/dataset/downsample.go b/pkg/dataset/downsample.go index cb4ab5b633..f65b0cff84 100644 --- a/pkg/dataset/downsample.go +++ b/pkg/dataset/downsample.go @@ -11,9 +11,9 @@ import ( "github.com/jackc/pgx/v4" + "github.com/timescale/promscale/pkg/internal/day" "github.com/timescale/promscale/pkg/log" "github.com/timescale/promscale/pkg/rollup" - "github.com/timescale/promscale/pkg/util" ) const defaultDownsampleState = true @@ -25,13 +25,13 @@ var ( defaultDownsampleResolution = []rollup.DownsampleResolution{ { Label: "short", - Resolution: util.DayDuration(5 * time.Minute), - Retention: util.DayDuration(90 * 24 * time.Hour), + Resolution: day.Duration(5 * time.Minute), + Retention: day.Duration(90 * 24 * time.Hour), }, { Label: "long", - Resolution: util.DayDuration(time.Hour), - Retention: util.DayDuration(395 * 24 * time.Hour), + Resolution: day.Duration(time.Hour), + Retention: day.Duration(395 * 24 * time.Hour), }, } ) diff --git a/pkg/util/day_duration.go b/pkg/internal/day/duration.go similarity index 90% rename from pkg/util/day_duration.go rename to pkg/internal/day/duration.go index 9ae0dbef48..4ad324533b 100644 --- a/pkg/util/day_duration.go +++ b/pkg/internal/day/duration.go @@ -2,7 +2,7 @@ // Please see the included NOTICE for copyright information and // LICENSE for a copy of the license. -package util +package day import ( "fmt" @@ -18,13 +18,13 @@ const ( unknownUnitDErrorPrefix = `time: unknown unit "d"` ) -// DayDuration acts like a time.Duration with support for "d" unit +// Duration acts like a time.Duration with support for "d" unit // which is used for specifying number of days in duration. -type DayDuration time.Duration +type Duration time.Duration // UnmarshalText unmarshals strings into DayDuration values while // handling the day unit. It leans heavily into time.ParseDuration. -func (d *DayDuration) UnmarshalText(s []byte) error { +func (d *Duration) UnmarshalText(s []byte) error { val, err := time.ParseDuration(string(s)) if err != nil { // Check for specific error indicating we are using days unit. @@ -37,7 +37,7 @@ func (d *DayDuration) UnmarshalText(s []byte) error { return err } } - *d = DayDuration(val) + *d = Duration(val) return nil } @@ -68,7 +68,7 @@ func handleDays(s []byte) (time.Duration, error) { } // String returns a string value of DayDuration. -func (d DayDuration) String() string { +func (d Duration) String() string { return time.Duration(d).String() } diff --git a/pkg/rollup/rollup.go b/pkg/rollup/rollup.go index 55ede6ad67..663659d4e9 100644 --- a/pkg/rollup/rollup.go +++ b/pkg/rollup/rollup.go @@ -11,13 +11,13 @@ import ( "github.com/jackc/pgx/v4" - "github.com/timescale/promscale/pkg/util" + "github.com/timescale/promscale/pkg/internal/day" ) type DownsampleResolution struct { - Label string `yaml:"label"` - Resolution util.DayDuration `yaml:"resolution"` - Retention util.DayDuration `yaml:"retention"` + Label string `yaml:"label"` + Resolution day.Duration `yaml:"resolution"` + Retention day.Duration `yaml:"retention"` } // EnsureRollupWith ensures "strictly" that the given new resolutions are applied in the database. @@ -43,7 +43,7 @@ func EnsureRollupWith(conn *pgx.Conn, newResolutions []DownsampleResolution) err if err := rows.Scan(&lName, &resolution, &retention); err != nil { return fmt.Errorf("error scanning output rows for existing resolutions: %w", err) } - existingResolutions = append(existingResolutions, DownsampleResolution{Label: lName, Resolution: util.DayDuration(resolution), Retention: util.DayDuration(retention)}) + existingResolutions = append(existingResolutions, DownsampleResolution{Label: lName, Resolution: day.Duration(resolution), Retention: day.Duration(retention)}) } // Determine which resolutions need to be created and deleted from the DB. diff --git a/pkg/tests/end_to_end_tests/config_dataset_test.go b/pkg/tests/end_to_end_tests/config_dataset_test.go index f07f169d23..409900e2ef 100644 --- a/pkg/tests/end_to_end_tests/config_dataset_test.go +++ b/pkg/tests/end_to_end_tests/config_dataset_test.go @@ -2,7 +2,6 @@ package end_to_end_tests import ( "context" - "github.com/timescale/promscale/pkg/util" "testing" "time" @@ -10,6 +9,7 @@ import ( "github.com/jackc/pgx/v4/pgxpool" "github.com/stretchr/testify/require" "github.com/timescale/promscale/pkg/dataset" + "github.com/timescale/promscale/pkg/internal/day" ) func TestDatasetConfigApply(t *testing.T) { @@ -29,14 +29,14 @@ func TestDatasetConfigApply(t *testing.T) { cfg := dataset.Config{ Metrics: dataset.Metrics{ - ChunkInterval: util.DayDuration(4 * time.Hour), + ChunkInterval: day.Duration(4 * time.Hour), Compression: &disableCompression, - HALeaseRefresh: util.DayDuration(15 * time.Second), - HALeaseTimeout: util.DayDuration(2 * time.Minute), - RetentionPeriod: util.DayDuration(15 * 24 * time.Hour), + HALeaseRefresh: day.Duration(15 * time.Second), + HALeaseTimeout: day.Duration(2 * time.Minute), + RetentionPeriod: day.Duration(15 * 24 * time.Hour), }, Traces: dataset.Traces{ - RetentionPeriod: util.DayDuration(10 * 24 * time.Hour), + RetentionPeriod: day.Duration(10 * 24 * time.Hour), }, } diff --git a/pkg/tests/end_to_end_tests/rollup_test.go b/pkg/tests/end_to_end_tests/rollup_test.go index 6806d38ba6..ce36dc51e9 100644 --- a/pkg/tests/end_to_end_tests/rollup_test.go +++ b/pkg/tests/end_to_end_tests/rollup_test.go @@ -9,8 +9,8 @@ import ( "github.com/jackc/pgx/v4/pgxpool" "github.com/stretchr/testify/require" + "github.com/timescale/promscale/pkg/internal/day" "github.com/timescale/promscale/pkg/rollup" - "github.com/timescale/promscale/pkg/util" ) func TestRollupCreationDeletion(t *testing.T) { @@ -18,8 +18,8 @@ func TestRollupCreationDeletion(t *testing.T) { rollupResolutions := []rollup.DownsampleResolution{ { Label: "short", - Resolution: util.DayDuration(5 * time.Minute), - Retention: util.DayDuration(30 * 24 * time.Hour), + Resolution: day.Duration(5 * time.Minute), + Retention: day.Duration(30 * 24 * time.Hour), }, } @@ -34,8 +34,8 @@ func TestRollupCreationDeletion(t *testing.T) { rollupResolutions = append(rollupResolutions, rollup.DownsampleResolution{ Label: "long", - Resolution: util.DayDuration(time.Hour), - Retention: util.DayDuration(395 * 24 * time.Hour), + Resolution: day.Duration(time.Hour), + Retention: day.Duration(395 * 24 * time.Hour), }) err = rollup.EnsureRollupWith(pgCon.Conn(), rollupResolutions) From f2bdb387125e4f3a68b7e9ac1da0cdf5a6b61a48 Mon Sep 17 00:00:00 2001 From: Harkishen-Singh Date: Fri, 11 Nov 2022 12:25:42 +0530 Subject: [PATCH 04/13] Update tests related to register_metric_view(). Signed-off-by: Harkishen-Singh --- .../end_to_end_tests/continuous_agg_test.go | 6 +++--- pkg/tests/end_to_end_tests/create_test.go | 18 +++++++++--------- .../end_to_end_tests/query_integration_test.go | 2 +- 3 files changed, 13 insertions(+), 13 deletions(-) diff --git a/pkg/tests/end_to_end_tests/continuous_agg_test.go b/pkg/tests/end_to_end_tests/continuous_agg_test.go index 850140e590..56883b6d29 100644 --- a/pkg/tests/end_to_end_tests/continuous_agg_test.go +++ b/pkg/tests/end_to_end_tests/continuous_agg_test.go @@ -190,7 +190,7 @@ WITH (timescaledb.continuous) AS t.Fatalf("unexpected error while creating metric view: %s", err) } - if _, err := db.Exec(context.Background(), "SELECT prom_api.register_metric_view('cagg_schema', 'cagg')"); err != nil { + if _, err := db.Exec(context.Background(), "SELECT prom_api.register_metric_view('cagg_schema', 'cagg', NULL)"); err != nil { t.Fatalf("unexpected error while registering metric view: %s", err) } @@ -372,7 +372,7 @@ WITH (timescaledb.continuous) AS FROM prom_data.test GROUP BY public.time_bucket('1hour', time), series_id`) require.NoError(t, err) - _, err = db.Exec(context.Background(), "SELECT prom_api.register_metric_view('cagg_schema', 'cagg')") + _, err = db.Exec(context.Background(), "SELECT prom_api.register_metric_view('cagg_schema', 'cagg', NULL)") require.NoError(t, err) _, err = db.Exec(context.Background(), "SELECT prom_api.set_metric_retention_period('cagg_schema', 'cagg', INTERVAL '180 days')") @@ -450,7 +450,7 @@ WITH (timescaledb.continuous) AS t.Fatalf("unexpected error while creating metric view: %s", err) } - if _, err := db.Exec(context.Background(), "SELECT prom_api.register_metric_view('public', 'tw_1hour')"); err != nil { + if _, err := db.Exec(context.Background(), "SELECT prom_api.register_metric_view('public', 'tw_1hour', NULL)"); err != nil { t.Fatalf("unexpected error while registering metric view: %s", err) } diff --git a/pkg/tests/end_to_end_tests/create_test.go b/pkg/tests/end_to_end_tests/create_test.go index 978da08863..0687949a0b 100644 --- a/pkg/tests/end_to_end_tests/create_test.go +++ b/pkg/tests/end_to_end_tests/create_test.go @@ -1351,7 +1351,7 @@ func TestRegisterMetricView(t *testing.T) { withDB(t, *testDatabase, func(db *pgxpool.Pool, t testing.TB) { // Cannot register non-existant schema. - if _, err := db.Exec(context.Background(), "SELECT prom_api.register_metric_view('nonexistant', 'missing')"); err == nil { + if _, err := db.Exec(context.Background(), "SELECT prom_api.register_metric_view('nonexistant', 'missing', NULL)"); err == nil { t.Fatal("Should not be able to register a metric view from a non-existant schema") } @@ -1360,7 +1360,7 @@ func TestRegisterMetricView(t *testing.T) { } // Cannot register non-existant view. - if _, err := db.Exec(context.Background(), "SELECT prom_api.register_metric_view('prom_view', 'missing')"); err == nil { + if _, err := db.Exec(context.Background(), "SELECT prom_api.register_metric_view('prom_view', 'missing', NULL)"); err == nil { t.Fatal("Should not be able to register a metric view from a non-existant metric view") } @@ -1394,7 +1394,7 @@ func TestRegisterMetricView(t *testing.T) { if _, err = db.Exec(context.Background(), `CREATE VIEW prom_view.metric_view_in_data_schema AS SELECT * FROM prom_data."rawMetric"`); err != nil { t.Fatalf("unexpected error while creating view in data schema: %s", err) } - if _, err := db.Exec(context.Background(), "SELECT prom_api.register_metric_view('prom_data', 'metric_view_in_data_schema')"); err == nil { + if _, err := db.Exec(context.Background(), "SELECT prom_api.register_metric_view('prom_data', 'metric_view_in_data_schema', NULL)"); err == nil { t.Fatal("Should not be able to register a metric view in data schema") } @@ -1402,7 +1402,7 @@ func TestRegisterMetricView(t *testing.T) { if _, err = db.Exec(context.Background(), `CREATE VIEW prom_view.metric_view_bad_columns AS SELECT time, series_id, true as bad_column FROM prom_data."rawMetric"`); err != nil { t.Fatalf("unexpected error while creating view: %s", err) } - if _, err := db.Exec(context.Background(), "SELECT prom_api.register_metric_view('prom_view', 'metric_view_bad_columns')"); err == nil { + if _, err := db.Exec(context.Background(), "SELECT prom_api.register_metric_view('prom_view', 'metric_view_bad_columns', NULL)"); err == nil { t.Fatal("Should not be able to register a metric view with different columns than raw metric") } @@ -1410,7 +1410,7 @@ func TestRegisterMetricView(t *testing.T) { if _, err = db.Exec(context.Background(), `CREATE VIEW prom_view.metric_view_bad_column_types AS SELECT time, series_id, true as value FROM prom_data."rawMetric"`); err != nil { t.Fatalf("unexpected error while creating view: %s", err) } - if _, err := db.Exec(context.Background(), "SELECT prom_api.register_metric_view('prom_view', 'metric_view_bad_column_types')"); err == nil { + if _, err := db.Exec(context.Background(), "SELECT prom_api.register_metric_view('prom_view', 'metric_view_bad_column_types', NULL)"); err == nil { t.Fatal("Should not be able to register a metric view with column types different than raw metric") } @@ -1418,7 +1418,7 @@ func TestRegisterMetricView(t *testing.T) { if _, err = db.Exec(context.Background(), `CREATE VIEW prom_view.metric_view_not_based AS SELECT time, series_id, 1.0 as value FROM prom_view."metric_view_bad_columns"`); err != nil { t.Fatalf("unexpected error while creating view: %s", err) } - if _, err := db.Exec(context.Background(), "SELECT prom_api.register_metric_view('prom_view', 'metric_view_not_based')"); err == nil { + if _, err := db.Exec(context.Background(), "SELECT prom_api.register_metric_view('prom_view', 'metric_view_not_based', NULL)"); err == nil { t.Fatal("Should not be able to register a metric view with column types different than raw metric") } @@ -1426,7 +1426,7 @@ func TestRegisterMetricView(t *testing.T) { if _, err = db.Exec(context.Background(), `CREATE VIEW prom_view.metric_view AS SELECT * FROM prom_data."rawMetric"`); err != nil { t.Fatalf("unexpected error while creating view: %s", err) } - if _, err := db.Exec(context.Background(), "SELECT prom_api.register_metric_view('prom_view', 'metric_view')"); err != nil { + if _, err := db.Exec(context.Background(), "SELECT prom_api.register_metric_view('prom_view', 'metric_view', NULL)"); err != nil { t.Fatalf("Error creating valid metric view: %v", err) } @@ -1448,12 +1448,12 @@ func TestRegisterMetricView(t *testing.T) { } // Cannot register the same view twice. - if _, err := db.Exec(context.Background(), "SELECT prom_api.register_metric_view('prom_view', 'metric_view')"); err == nil { + if _, err := db.Exec(context.Background(), "SELECT prom_api.register_metric_view('prom_view', 'metric_view', NULL)"); err == nil { t.Fatal("Should not be able to register the same view twice") } // Should succeed if we register same view twice but also use `if_not_exists` - if _, err := db.Exec(context.Background(), "SELECT prom_api.register_metric_view('prom_view', 'metric_view', true)"); err != nil { + if _, err := db.Exec(context.Background(), "SELECT prom_api.register_metric_view('prom_view', 'metric_view', NULL, true)"); err != nil { t.Fatalf("Should be able to register the same view twice when using `if_not_exists`: %v", err) } diff --git a/pkg/tests/end_to_end_tests/query_integration_test.go b/pkg/tests/end_to_end_tests/query_integration_test.go index 2b25312305..0f75d24b40 100644 --- a/pkg/tests/end_to_end_tests/query_integration_test.go +++ b/pkg/tests/end_to_end_tests/query_integration_test.go @@ -766,7 +766,7 @@ func createMetricView(db *pgxpool.Pool, t testing.TB, schemaName, viewName, metr if _, err := db.Exec(context.Background(), fmt.Sprintf(`CREATE VIEW "%s"."%s" AS SELECT * FROM prom_data."%s"`, schemaName, viewName, metricName)); err != nil { t.Fatalf("unexpected error while creating metric view: %s", err) } - if _, err := db.Exec(context.Background(), fmt.Sprintf("SELECT prom_api.register_metric_view('%s', '%s')", schemaName, viewName)); err != nil { + if _, err := db.Exec(context.Background(), fmt.Sprintf("SELECT prom_api.register_metric_view('%s', '%s', NULL)", schemaName, viewName)); err != nil { t.Fatalf("unexpected error while registering metric view: %s", err) } } From e7e515c9d8edbd4be04c596308ac0c1f5cdb168d Mon Sep 17 00:00:00 2001 From: Harkishen-Singh Date: Tue, 22 Nov 2022 22:51:44 +0530 Subject: [PATCH 05/13] Adds support to instrument metric-rollups behaviour Signed-off-by: Harkishen-Singh --- pkg/pgmodel/metrics/database/database.go | 23 +++++-- pkg/pgmodel/metrics/database/metric_series.go | 66 +++++++++++++++++++ pkg/pgmodel/metrics/database/metrics.go | 42 ++++++++++++ 3 files changed, 124 insertions(+), 7 deletions(-) create mode 100644 pkg/pgmodel/metrics/database/metric_series.go diff --git a/pkg/pgmodel/metrics/database/database.go b/pkg/pgmodel/metrics/database/database.go index 9241a32294..8e877c2837 100644 --- a/pkg/pgmodel/metrics/database/database.go +++ b/pkg/pgmodel/metrics/database/database.go @@ -19,10 +19,11 @@ type Engine interface { } type metricsEngineImpl struct { - conn pgxconn.PgxConn - ctx context.Context - isRunning atomic.Value - metrics []metricQueryWrap + conn pgxconn.PgxConn + ctx context.Context + isRunning atomic.Value + metrics []metricQueryWrap + metricSeries []metricsWithSeries } // NewEngine creates an engine that performs database metrics evaluation every evalInterval. @@ -33,9 +34,10 @@ type metricsEngineImpl struct { // will cause evaluation errors. func NewEngine(ctx context.Context, conn pgxconn.PgxConn) *metricsEngineImpl { engine := &metricsEngineImpl{ - conn: conn, - ctx: ctx, - metrics: metrics, + conn: conn, + ctx: ctx, + metrics: metrics, + metricSeries: metricSeries, } engine.isRunning.Store(false) return engine @@ -142,6 +144,13 @@ func (e *metricsEngineImpl) Update() error { return err } handleResults(results, batchMetrics) + + for _, ms := range e.metricSeries { + if err = ms.update(e.conn); err != nil { + log.Warn("msg", "error evaluating metrics with series", "err", err.Error()) + continue // We shouldn't stop everything if this fails. + } + } return results.Close() } diff --git a/pkg/pgmodel/metrics/database/metric_series.go b/pkg/pgmodel/metrics/database/metric_series.go new file mode 100644 index 0000000000..fd84fc63ee --- /dev/null +++ b/pkg/pgmodel/metrics/database/metric_series.go @@ -0,0 +1,66 @@ +package database + +import ( + "context" + "fmt" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/timescale/promscale/pkg/pgxconn" + "github.com/timescale/promscale/pkg/util" +) + +var ( + caggsRefreshTotal = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: util.PromNamespace, + Subsystem: "sql_database", + Name: "caggs_refresh_total", + Help: "Total number of caggs policy executed.", + }, []string{"refresh_interval"}) + caggsRefreshSuccess = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: util.PromNamespace, + Subsystem: "sql_database", + Name: "caggs_refresh_success", + Help: "Total number of caggs policy executed successfully.", + }, []string{"refresh_interval"}) +) + +func init() { + prometheus.MustRegister(caggsRefreshSuccess, caggsRefreshTotal) +} + +type metricsWithSeries struct { + update func(conn pgxconn.PgxConn) error +} + +var metricSeries = []metricsWithSeries{ + { + update: func(conn pgxconn.PgxConn) error { + rows, err := conn.Query(context.Background(), ` +SELECT + total_successes, + total_runs, + (config ->> 'refresh_interval')::INTERVAL +FROM timescaledb_information.jobs j +INNER JOIN timescaledb_information.job_stats js ON ( j.job_id = js.job_id AND j.proc_name = 'execute_caggs_refresh_policy') + `) + if err != nil { + return fmt.Errorf("error running instrumentation for execute_caggs_refresh_policy: %w", err) + } + defer rows.Close() + for rows.Next() { + var ( + success, total int64 + refreshInterval time.Duration + ) + err = rows.Scan(&success, &total, &refreshInterval) + if err != nil { + return fmt.Errorf("error scanning values for execute_caggs_refresh_policy: %w", err) + } + caggsRefreshSuccess.With(prometheus.Labels{"refresh_interval": refreshInterval.String()}).Set(float64(success)) + caggsRefreshTotal.With(prometheus.Labels{"refresh_interval": refreshInterval.String()}).Set(float64(total)) + } + return nil + }, + }, +} diff --git a/pkg/pgmodel/metrics/database/metrics.go b/pkg/pgmodel/metrics/database/metrics.go index d02728776e..28a66556b7 100644 --- a/pkg/pgmodel/metrics/database/metrics.go +++ b/pkg/pgmodel/metrics/database/metrics.go @@ -586,6 +586,48 @@ var metrics = []metricQueryWrap{ }, ), query: `select count(*)::bigint from _prom_catalog.metric`, + }, { + metrics: gauges( + prometheus.GaugeOpts{ + Namespace: util.PromNamespace, + Subsystem: "sql_database", + Name: "caggs_compression_policy_total", + Help: "Total number of caggs compression policy executed.", + }, + prometheus.GaugeOpts{ + Namespace: util.PromNamespace, + Subsystem: "sql_database", + Name: "caggs_compression_policy_success", + Help: "Total number of caggs compression policy executed successfully.", + }, + ), + query: ` +SELECT + total_runs, + total_successes +FROM timescaledb_information.jobs j + INNER JOIN timescaledb_information.job_stats js ON ( j.job_id = js.job_id AND j.proc_name = 'execute_caggs_compression_policy')`, + }, { + metrics: gauges( + prometheus.GaugeOpts{ + Namespace: util.PromNamespace, + Subsystem: "sql_database", + Name: "caggs_retention_policy_total", + Help: "Total number of caggs retention policy executed.", + }, + prometheus.GaugeOpts{ + Namespace: util.PromNamespace, + Subsystem: "sql_database", + Name: "caggs_retention_policy_success", + Help: "Total number of caggs retention policy executed successfully.", + }, + ), + query: ` +SELECT + total_runs, + total_successes +FROM timescaledb_information.jobs j + INNER JOIN timescaledb_information.job_stats js ON ( j.job_id = js.job_id AND j.proc_name = 'execute_caggs_retention_policy')`, }, { metrics: gauges( From 432d5fe8f8af9c8a96f462d2f8820604249cbd23 Mon Sep 17 00:00:00 2001 From: Harkishen-Singh Date: Thu, 1 Dec 2022 17:12:50 +0530 Subject: [PATCH 06/13] Update rollups creation/deletion/updation using dataset-config. Signed-off-by: Harkishen-Singh --- pkg/dataset/config.go | 9 +- pkg/dataset/downsample.go | 68 ---------- pkg/rollup/config.go | 97 ++++++++++++++ pkg/rollup/rollup.go | 118 ++++++++++-------- pkg/rollup/rollup_test.go | 60 --------- pkg/runner/client.go | 4 +- .../end_to_end_tests/config_dataset_test.go | 4 +- pkg/tests/end_to_end_tests/rollup_test.go | 72 ++++++++--- 8 files changed, 229 insertions(+), 203 deletions(-) delete mode 100644 pkg/dataset/downsample.go create mode 100644 pkg/rollup/config.go delete mode 100644 pkg/rollup/rollup_test.go diff --git a/pkg/dataset/config.go b/pkg/dataset/config.go index 3f1a3cfad1..aebd841234 100644 --- a/pkg/dataset/config.go +++ b/pkg/dataset/config.go @@ -14,6 +14,7 @@ import ( "github.com/timescale/promscale/pkg/internal/day" "github.com/timescale/promscale/pkg/log" + "github.com/timescale/promscale/pkg/rollup" ) const ( @@ -49,7 +50,7 @@ type Metrics struct { HALeaseRefresh day.Duration `mapstructure:"ha_lease_refresh" yaml:"ha_lease_refresh"` HALeaseTimeout day.Duration `mapstructure:"ha_lease_timeout" yaml:"ha_lease_timeout"` RetentionPeriod day.Duration `mapstructure:"default_retention_period" yaml:"default_retention_period"` - Downsample *Downsample `mapstructure:"downsample" yaml:"downsample,omitempty"` + Rollups *rollup.Config `mapstructure:"rollups" yaml:"rollups,omitempty"` } // Traces contains dataset configuration options for traces data. @@ -64,11 +65,11 @@ func NewConfig(contents string) (cfg Config, err error) { } // Apply applies the configuration to the database via the supplied DB connection. -func (c *Config) Apply(conn *pgx.Conn) error { +func (c *Config) Apply(ctx context.Context, conn *pgx.Conn) error { c.applyDefaults() - if c.Metrics.Downsample != nil { - if err := c.Metrics.Downsample.Apply(conn); err != nil { + if c.Metrics.Rollups != nil { + if err := c.Metrics.Rollups.Apply(ctx, conn); err != nil { return fmt.Errorf("error applying configuration for downsampling: %w", err) } } diff --git a/pkg/dataset/downsample.go b/pkg/dataset/downsample.go deleted file mode 100644 index f65b0cff84..0000000000 --- a/pkg/dataset/downsample.go +++ /dev/null @@ -1,68 +0,0 @@ -// This file and its contents are licensed under the Apache License 2.0. -// Please see the included NOTICE for copyright information and -// LICENSE for a copy of the license. - -package dataset - -import ( - "context" - "fmt" - "time" - - "github.com/jackc/pgx/v4" - - "github.com/timescale/promscale/pkg/internal/day" - "github.com/timescale/promscale/pkg/log" - "github.com/timescale/promscale/pkg/rollup" -) - -const defaultDownsampleState = true - -var ( - setDefaultDownsampleStateSQL = "SELECT prom_api.set_automatic_downsample($1)" - - defaultDownsampleStateVar = defaultDownsampleState - defaultDownsampleResolution = []rollup.DownsampleResolution{ - { - Label: "short", - Resolution: day.Duration(5 * time.Minute), - Retention: day.Duration(90 * 24 * time.Hour), - }, - { - Label: "long", - Resolution: day.Duration(time.Hour), - Retention: day.Duration(395 * 24 * time.Hour), - }, - } -) - -type Downsample struct { - Automatic *bool `yaml:"automatic,omitempty"` - Resolution []rollup.DownsampleResolution `yaml:"resolutions,omitempty"` -} - -func (d *Downsample) Apply(conn *pgx.Conn) error { - d.applyDefaults() - - log.Info("msg", fmt.Sprintf("Setting metric automatic downsample to %t", *d.Automatic)) - if _, err := conn.Exec(context.Background(), setDefaultDownsampleStateSQL, d.Automatic); err != nil { - return err - } - - if *d.Automatic { - if err := rollup.EnsureRollupWith(conn, d.Resolution); err != nil { - return fmt.Errorf("ensure rollup with: %w", err) - } - } - return nil -} - -func (d *Downsample) applyDefaults() { - if d.Automatic == nil { - // In default case, we plan to downsample data. - d.Automatic = &defaultDownsampleStateVar - } - if d.Resolution == nil { - d.Resolution = defaultDownsampleResolution - } -} diff --git a/pkg/rollup/config.go b/pkg/rollup/config.go new file mode 100644 index 0000000000..d1794bf3b5 --- /dev/null +++ b/pkg/rollup/config.go @@ -0,0 +1,97 @@ +// This file and its contents are licensed under the Apache License 2.0. +// Please see the included NOTICE for copyright information and +// LICENSE for a copy of the license. + +package rollup + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/jackc/pgx/v4" + + "github.com/timescale/promscale/pkg/internal/day" + "github.com/timescale/promscale/pkg/log" +) + +const ( + setDefaultDownsampleStateSQL = "SELECT prom_api.set_automatic_downsample($1)" + + // short and long represent system resolutions. + short = "short" + long = "long" +) + +var ( + defaultDownsampleState = false + useDefaultResolution = false + systemResolution = map[string]Definition{ + short: { + Resolution: day.Duration(5 * time.Minute), + Retention: day.Duration(90 * 24 * time.Hour), + }, + long: { + Resolution: day.Duration(time.Hour), + Retention: day.Duration(395 * 24 * time.Hour), + }, + } +) + +type Config struct { + Enabled *bool `yaml:"enabled,omitempty"` + UseDefaultResolution *bool `yaml:"use_default_resolution"` + Resolutions `yaml:"resolutions,omitempty"` +} + +type Definition struct { + Resolution day.Duration `yaml:"resolution"` + Retention day.Duration `yaml:"retention"` + Delete bool `yaml:"delete"` +} + +type Resolutions map[string]Definition + +func (d *Config) Apply(ctx context.Context, conn *pgx.Conn) error { + d.applyDefaults() + + if containsSystemResolutions(d.Resolutions) { + return fmt.Errorf("'short' and 'long' are system resolutions. These cannot be applied as rollup labels") + } + + log.Info("msg", fmt.Sprintf("Setting automatic metric downsample to %t", *d.Enabled)) + if _, err := conn.Exec(context.Background(), setDefaultDownsampleStateSQL, d.Enabled); err != nil { + return err + } + + if *d.Enabled { + if *d.UseDefaultResolution { + d.Resolutions["short"] = systemResolution["short"] + d.Resolutions["long"] = systemResolution["long"] + } + if err := Sync(ctx, conn, d.Resolutions); err != nil { + return fmt.Errorf("ensure rollup with: %w", err) + } + } + return nil +} + +func (d *Config) applyDefaults() { + if d.Enabled == nil { + d.Enabled = &defaultDownsampleState + } + if d.UseDefaultResolution == nil { + d.UseDefaultResolution = &useDefaultResolution + } +} + +func containsSystemResolutions(r Resolutions) bool { + for k := range r { + k = strings.ToLower(k) + if k == short || k == long { + return true + } + } + return false +} diff --git a/pkg/rollup/rollup.go b/pkg/rollup/rollup.go index 663659d4e9..fe6a9134aa 100644 --- a/pkg/rollup/rollup.go +++ b/pkg/rollup/rollup.go @@ -14,93 +14,109 @@ import ( "github.com/timescale/promscale/pkg/internal/day" ) -type DownsampleResolution struct { - Label string `yaml:"label"` - Resolution day.Duration `yaml:"resolution"` - Retention day.Duration `yaml:"retention"` -} - -// EnsureRollupWith ensures "strictly" that the given new resolutions are applied in the database. -// -// Note: It follows a "strict" behaviour, meaning any existing resolutions of downsampling in -// the database will be removed, so that the all downsampling data in the database strictly -// matches the provided newResolutions. -// -// Example: If the DB already contains metric rollups for `short` and `long`, and in dataset-config, -// connector sees `very_short` and `long`, then EnsureRollupWith will remove the `short` downsampled data -// and create `very_short`, while not touching `long`. -func EnsureRollupWith(conn *pgx.Conn, newResolutions []DownsampleResolution) error { +// Sync updates the rollups in the DB in accordance with the given resolutions. It handles: +// 1. Creating of new rollups +// 2. Deletion of rollups that have `delete: true` +// 3. Update retention duration of rollups that have same label name but different retention duration. If resolution of +// existing rollups are updated, an error is returned +func Sync(ctx context.Context, conn *pgx.Conn, r Resolutions) error { rows, err := conn.Query(context.Background(), "SELECT name, resolution, retention FROM _prom_catalog.rollup") if err != nil { return fmt.Errorf("querying existing resolutions: %w", err) } defer rows.Close() - var existingResolutions []DownsampleResolution + existingResolutions := make(Resolutions) for rows.Next() { var lName string var resolution, retention time.Duration if err := rows.Scan(&lName, &resolution, &retention); err != nil { return fmt.Errorf("error scanning output rows for existing resolutions: %w", err) } - existingResolutions = append(existingResolutions, DownsampleResolution{Label: lName, Resolution: day.Duration(resolution), Retention: day.Duration(retention)}) + existingResolutions[lName] = Definition{Resolution: day.Duration(resolution), Retention: day.Duration(retention)} } - // Determine which resolutions need to be created and deleted from the DB. - pendingCreation := diff(newResolutions, existingResolutions) - pendingDeletion := diff(existingResolutions, newResolutions) + if err := errOnResolutionMismatch(existingResolutions, r); err != nil { + return fmt.Errorf("error on existing resolution mismatch: %w", err) + } + + if err := updateExistingRollups(ctx, conn, existingResolutions, r); err != nil { + return fmt.Errorf("update existing rollups: %w", err) + } // Delete rollups that are no longer required. - if err = deleteRollups(conn, pendingDeletion); err != nil { + if err = deleteRollups(ctx, conn, existingResolutions, r); err != nil { return fmt.Errorf("delete rollups: %w", err) } // Create new rollups. - if err = createRollups(conn, pendingCreation); err != nil { + if err = createRollups(ctx, conn, existingResolutions, r); err != nil { return fmt.Errorf("create rollups: %w", err) } return nil } -func createRollups(conn *pgx.Conn, res []DownsampleResolution) error { - for _, r := range res { - _, err := conn.Exec(context.Background(), "CALL _prom_catalog.create_rollup($1, $2, $3)", r.Label, time.Duration(r.Resolution), time.Duration(r.Retention)) - if err != nil { - return fmt.Errorf("error creating rollup for %s: %w", r.Label, err) +// errOnResolutionMismatch returns an error if a given resolution exists in the DB with a different resolution duration. +func errOnResolutionMismatch(existing, r Resolutions) error { + for labelName, res := range r { + if oldRes, exists := existing[labelName]; exists { + if oldRes.Resolution != res.Resolution { + return fmt.Errorf("existing rollup resolutions cannot be updated. Either keep the resolution of existing rollup labels same or remove them") + } } } return nil } -func deleteRollups(conn *pgx.Conn, res []DownsampleResolution) error { - for _, r := range res { - _, err := conn.Exec(context.Background(), "CALL _prom_catalog.delete_rollup($1)", r.Label) - if err != nil { - return fmt.Errorf("error deleting rollup for %s: %w", r.Label, err) +// updateExistingRollups updates the existing rollups retention if the new resolutions with a same name has +// different retention duration. +func updateExistingRollups(ctx context.Context, conn *pgx.Conn, existingRes, r Resolutions) error { + var batch pgx.Batch + for labelName, res := range r { + if oldRes, exists := existingRes[labelName]; exists && oldRes.Retention != res.Retention { + batch.Queue("UPDATE _prom_catalog.rollup SET retention = $1 WHERE name = $2", time.Duration(res.Retention), labelName) + } + } + if batch.Len() > 0 { + results := conn.SendBatch(ctx, &batch) + if err := results.Close(); err != nil { + return fmt.Errorf("error closing batch: %w", err) } } return nil } -// diff returns the elements of a that are not in b. -// -// We need this since we want to support a "strict" behaviour in downsampling. This basically means, to have the exact -// downsampling data in the DB based on what's mentioned in the dataset-config. -// -// See the comment for EnsureRollupWith for example. -func diff(a, b []DownsampleResolution) []DownsampleResolution { - var difference []DownsampleResolution - for i := range a { - found := false - for j := range b { - if a[i].Label == b[j].Label { - found = true - break - } +func createRollups(ctx context.Context, conn *pgx.Conn, existingRes, r Resolutions) error { + var batch pgx.Batch + for lName, res := range r { + _, exists := existingRes[lName] + if !exists && !res.Delete { + batch.Queue("CALL _prom_catalog.create_rollup($1, $2, $3)", lName, time.Duration(res.Resolution), time.Duration(res.Retention)) } - if !found { - difference = append(difference, a[i]) + } + if batch.Len() > 0 { + results := conn.SendBatch(ctx, &batch) + if err := results.Close(); err != nil { + return fmt.Errorf("error creating new rollups: %w", err) } } - return difference + return nil +} + +func deleteRollups(ctx context.Context, conn *pgx.Conn, existingRes, r Resolutions) error { + var batch pgx.Batch + for lName, res := range r { + _, exists := existingRes[lName] + if exists && res.Delete { + // Delete the rollup only if it exists in the DB. + batch.Queue("CALL _prom_catalog.delete_rollup($1)", lName) + } + } + if batch.Len() > 0 { + results := conn.SendBatch(ctx, &batch) + if err := results.Close(); err != nil { + return fmt.Errorf("error deleting new rollups: %w", err) + } + } + return nil } diff --git a/pkg/rollup/rollup_test.go b/pkg/rollup/rollup_test.go deleted file mode 100644 index e9822541c8..0000000000 --- a/pkg/rollup/rollup_test.go +++ /dev/null @@ -1,60 +0,0 @@ -// This file and its contents are licensed under the Apache License 2.0. -// Please see the included NOTICE for copyright information and -// LICENSE for a copy of the license. - -package rollup - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func TestDiff(t *testing.T) { - tcs := []struct { - name string - a, b, expected []DownsampleResolution - }{ - { - name: "some inclusive elements", - a: []DownsampleResolution{{Label: "a"}, {Label: "b"}, {Label: "c"}, {Label: "d"}}, - b: []DownsampleResolution{{Label: "c"}, {Label: "d"}, {Label: "e"}}, - expected: []DownsampleResolution{{Label: "a"}, {Label: "b"}}, - }, - { - name: "b superset of a", - a: []DownsampleResolution{{Label: "c"}, {Label: "d"}}, - b: []DownsampleResolution{{Label: "c"}, {Label: "d"}, {Label: "e"}}, - expected: []DownsampleResolution(nil), - }, - { - name: "a empty", - a: []DownsampleResolution{}, - b: []DownsampleResolution{{Label: "c"}, {Label: "d"}, {Label: "e"}}, - expected: []DownsampleResolution(nil), - }, - { - name: "all elements exclusive", - a: []DownsampleResolution{{Label: "a"}}, - b: []DownsampleResolution{{Label: "c"}, {Label: "d"}, {Label: "e"}}, - expected: []DownsampleResolution{{Label: "a"}}, - }, - { - name: "same", - a: []DownsampleResolution{{Label: "a"}, {Label: "b"}, {Label: "c"}, {Label: "d"}}, - b: []DownsampleResolution{{Label: "a"}, {Label: "b"}, {Label: "c"}, {Label: "d"}}, - expected: []DownsampleResolution(nil), - }, - { - name: "empty", - a: []DownsampleResolution{}, - b: []DownsampleResolution{}, - expected: []DownsampleResolution(nil), - }, - } - for _, tc := range tcs { - t.Run(tc.name, func(t *testing.T) { - require.Equal(t, tc.expected, diff(tc.a, tc.b), tc.name) - }) - } -} diff --git a/pkg/runner/client.go b/pkg/runner/client.go index 81beb674a8..a2ff99d7a8 100644 --- a/pkg/runner/client.go +++ b/pkg/runner/client.go @@ -238,13 +238,13 @@ func isBGWLessThanDBs(conn *pgx.Conn) (bool, error) { return false, nil } -func applyDatasetConfig(conn *pgx.Conn, cfgFilename string) error { +func applyDatasetConfig(ctx context.Context, conn *pgx.Conn, cfgFilename string) error { cfg, err := dataset.NewConfig(cfgFilename) if err != nil { return err } - return cfg.Apply(conn) + return cfg.Apply(ctx, conn) } func compileAnchoredRegexString(s string) (*regexp.Regexp, error) { diff --git a/pkg/tests/end_to_end_tests/config_dataset_test.go b/pkg/tests/end_to_end_tests/config_dataset_test.go index 409900e2ef..86473e57b3 100644 --- a/pkg/tests/end_to_end_tests/config_dataset_test.go +++ b/pkg/tests/end_to_end_tests/config_dataset_test.go @@ -40,7 +40,7 @@ func TestDatasetConfigApply(t *testing.T) { }, } - err = cfg.Apply(pgxConn) + err = cfg.Apply(context.Background(), pgxConn) require.NoError(t, err) require.Equal(t, 4*time.Hour, getMetricsDefaultChunkInterval(t, pgxConn)) @@ -53,7 +53,7 @@ func TestDatasetConfigApply(t *testing.T) { // Set to default if chunk interval is not specified. cfg = dataset.Config{} - err = cfg.Apply(pgxConn) + err = cfg.Apply(context.Background(), pgxConn) require.NoError(t, err) require.Equal(t, 8*time.Hour, getMetricsDefaultChunkInterval(t, pgxConn)) diff --git a/pkg/tests/end_to_end_tests/rollup_test.go b/pkg/tests/end_to_end_tests/rollup_test.go index ce36dc51e9..7f38a9f32c 100644 --- a/pkg/tests/end_to_end_tests/rollup_test.go +++ b/pkg/tests/end_to_end_tests/rollup_test.go @@ -1,3 +1,7 @@ +// This file and its contents are licensed under the Apache License 2.0. +// Please see the included NOTICE for copyright information and +// LICENSE for a copy of the license. + package end_to_end_tests import ( @@ -13,11 +17,10 @@ import ( "github.com/timescale/promscale/pkg/rollup" ) -func TestRollupCreationDeletion(t *testing.T) { +func TestRollupSync(t *testing.T) { withDB(t, *testDatabase, func(db *pgxpool.Pool, t testing.TB) { - rollupResolutions := []rollup.DownsampleResolution{ - { - Label: "short", + rollupResolutions := rollup.Resolutions{ + "short": { Resolution: day.Duration(5 * time.Minute), Retention: day.Duration(30 * 24 * time.Hour), }, @@ -27,30 +30,67 @@ func TestRollupCreationDeletion(t *testing.T) { require.NoError(t, err) defer pgCon.Release() - err = rollup.EnsureRollupWith(pgCon.Conn(), rollupResolutions) + // Test 1: Check if 'short' rollup is created. + err = rollup.Sync(context.Background(), pgCon.Conn(), rollupResolutions) require.NoError(t, err) - verifyRollupExistence(t, pgCon.Conn(), rollupResolutions[0].Label, time.Duration(rollupResolutions[0].Resolution), time.Duration(rollupResolutions[0].Retention), false) + verifyRollupExistence(t, pgCon.Conn(), "short", + time.Duration(rollupResolutions["short"].Resolution), time.Duration(rollupResolutions["short"].Retention), false) - rollupResolutions = append(rollupResolutions, rollup.DownsampleResolution{ - Label: "long", + rollupResolutions["long"] = rollup.Definition{ Resolution: day.Duration(time.Hour), Retention: day.Duration(395 * 24 * time.Hour), - }) + } - err = rollup.EnsureRollupWith(pgCon.Conn(), rollupResolutions) + // Test 2: Check if 'long' rollup is created. + err = rollup.Sync(context.Background(), pgCon.Conn(), rollupResolutions) require.NoError(t, err) - verifyRollupExistence(t, pgCon.Conn(), rollupResolutions[1].Label, time.Duration(rollupResolutions[1].Resolution), time.Duration(rollupResolutions[1].Retention), false) + verifyRollupExistence(t, pgCon.Conn(), "long", + time.Duration(rollupResolutions["long"].Resolution), time.Duration(rollupResolutions["long"].Retention), false) + + // Test 3: Update the resolution and check if error is returned. + rollupResolutions["short"] = rollup.Definition{ + Resolution: day.Duration(4 * time.Minute), + Retention: day.Duration(30 * 24 * time.Hour), + } + err = rollup.Sync(context.Background(), pgCon.Conn(), rollupResolutions) + require.Equal(t, + "error on existing resolution mismatch: existing rollup resolutions cannot be updated. Either keep the resolution of existing rollup labels same or remove them", + err.Error()) + // Reset back to original resolution. + rollupResolutions["short"] = rollup.Definition{ + Resolution: day.Duration(5 * time.Minute), + Retention: day.Duration(30 * 24 * time.Hour), + } - // Remove the first entry and see if the entry is removed or not. - newRes := rollupResolutions[1:] - err = rollup.EnsureRollupWith(pgCon.Conn(), newRes) + // Test 4: Remove the first entry and see if the entry is removed or not. + rollupResolutions["short"] = rollup.Definition{ + Resolution: day.Duration(5 * time.Minute), + Retention: day.Duration(30 * 24 * time.Hour), + Delete: true, + } + err = rollup.Sync(context.Background(), pgCon.Conn(), rollupResolutions) require.NoError(t, err) // Check if long exists. - verifyRollupExistence(t, pgCon.Conn(), rollupResolutions[1].Label, time.Duration(rollupResolutions[1].Resolution), time.Duration(rollupResolutions[1].Retention), false) + verifyRollupExistence(t, pgCon.Conn(), "long", + time.Duration(rollupResolutions["long"].Resolution), time.Duration(rollupResolutions["long"].Retention), false) // Check if short does not exist. - verifyRollupExistence(t, pgCon.Conn(), rollupResolutions[0].Label, time.Duration(rollupResolutions[0].Resolution), time.Duration(rollupResolutions[0].Retention), true) + verifyRollupExistence(t, pgCon.Conn(), "short", + time.Duration(rollupResolutions["short"].Resolution), time.Duration(rollupResolutions["short"].Retention), true) + + // Test 5: Update retention of long and check if the same is reflected in the DB. + rollupResolutions["long"] = rollup.Definition{ + Resolution: day.Duration(time.Hour), + Retention: day.Duration(500 * 24 * time.Hour), // Updated retention duration. + } + err = rollup.Sync(context.Background(), pgCon.Conn(), rollupResolutions) + require.NoError(t, err) + verifyRollupExistence(t, pgCon.Conn(), "long", + time.Duration(rollupResolutions["long"].Resolution), time.Duration(rollupResolutions["long"].Retention), false) + // Short should still not exists. + verifyRollupExistence(t, pgCon.Conn(), "short", + time.Duration(rollupResolutions["short"].Resolution), time.Duration(rollupResolutions["short"].Retention), true) }) } From 6ca60077ce63ee4a96867c5a4a0296aa3dff308c Mon Sep 17 00:00:00 2001 From: Harkishen-Singh Date: Thu, 15 Dec 2022 21:14:36 +0530 Subject: [PATCH 07/13] squashable: Resolve conflicts Signed-off-by: Harkishen-Singh --- pkg/dataset/config.go | 12 ++++++------ pkg/internal/day/duration.go | 4 ++-- pkg/runner/client.go | 9 +++++---- pkg/runner/config_parser.go | 4 ++-- pkg/runner/flags_test.go | 12 ++++++------ 5 files changed, 21 insertions(+), 20 deletions(-) diff --git a/pkg/dataset/config.go b/pkg/dataset/config.go index aebd841234..ca0938c360 100644 --- a/pkg/dataset/config.go +++ b/pkg/dataset/config.go @@ -45,12 +45,12 @@ type Config struct { // Metrics contains dataset configuration options for metrics data. type Metrics struct { - ChunkInterval day.Duration `mapstructure:"default_chunk_interval" yaml:"default_chunk_interval"` - Compression *bool `mapstructure:"compress_data" yaml:"compress_data"` // Using pointer to check if the the value was set. - HALeaseRefresh day.Duration `mapstructure:"ha_lease_refresh" yaml:"ha_lease_refresh"` - HALeaseTimeout day.Duration `mapstructure:"ha_lease_timeout" yaml:"ha_lease_timeout"` - RetentionPeriod day.Duration `mapstructure:"default_retention_period" yaml:"default_retention_period"` - Rollups *rollup.Config `mapstructure:"rollups" yaml:"rollups,omitempty"` + ChunkInterval day.Duration `mapstructure:"default_chunk_interval" yaml:"default_chunk_interval"` + Compression *bool `mapstructure:"compress_data" yaml:"compress_data"` // Using pointer to check if the the value was set. + HALeaseRefresh day.Duration `mapstructure:"ha_lease_refresh" yaml:"ha_lease_refresh"` + HALeaseTimeout day.Duration `mapstructure:"ha_lease_timeout" yaml:"ha_lease_timeout"` + RetentionPeriod day.Duration `mapstructure:"default_retention_period" yaml:"default_retention_period"` + Rollups *rollup.Config `mapstructure:"rollup" yaml:"rollup,omitempty"` } // Traces contains dataset configuration options for traces data. diff --git a/pkg/internal/day/duration.go b/pkg/internal/day/duration.go index 4ad324533b..45a2606569 100644 --- a/pkg/internal/day/duration.go +++ b/pkg/internal/day/duration.go @@ -83,7 +83,7 @@ func StringToDayDurationHookFunc() mapstructure.DecodeHookFunc { return data, nil } - var d DayDuration + var d Duration if t != reflect.TypeOf(d) { return data, nil @@ -93,6 +93,6 @@ func StringToDayDurationHookFunc() mapstructure.DecodeHookFunc { if err != nil { return nil, err } - return DayDuration(d), nil + return Duration(d), nil } } diff --git a/pkg/runner/client.go b/pkg/runner/client.go index a2ff99d7a8..3f77769859 100644 --- a/pkg/runner/client.go +++ b/pkg/runner/client.go @@ -19,6 +19,7 @@ import ( "github.com/timescale/promscale/pkg/pgmodel" "github.com/timescale/promscale/pkg/pgmodel/common/extension" "github.com/timescale/promscale/pkg/pgmodel/common/schema" + "github.com/timescale/promscale/pkg/rollup" "github.com/timescale/promscale/pkg/tenancy" "github.com/timescale/promscale/pkg/util" "github.com/timescale/promscale/pkg/version" @@ -169,13 +170,13 @@ func CreateClient(r prometheus.Registerer, cfg *Config) (*pgclient.Client, error if cfg.DatasetConfig != "" { log.Warn("msg", "Ignoring `startup.dataset.config` in favor of the newer `startup.dataset` config option since both were set.") } - err = cfg.DatasetCfg.Apply(conn) + err = cfg.DatasetCfg.Apply(context.Background(), conn) } else if cfg.DatasetConfig != "" { - err = applyDatasetConfig(conn, cfg.DatasetConfig) + err = applyDatasetConfig(context.Background(), conn, cfg.DatasetConfig) } else { // We apply downsampling settings even when DatasetConfig is not given, which is the most common case. - downsampleCfg := &dataset.Downsample{} - err = downsampleCfg.Apply(conn) + rollupCfg := &rollup.Config{} + err = rollupCfg.Apply(context.Background(), conn) if err != nil { return nil, fmt.Errorf("error applying downsampling configuration: %w", err) } diff --git a/pkg/runner/config_parser.go b/pkg/runner/config_parser.go index 6e94ef4fbe..65e894fd28 100644 --- a/pkg/runner/config_parser.go +++ b/pkg/runner/config_parser.go @@ -12,7 +12,7 @@ import ( "github.com/mitchellh/mapstructure" "github.com/spf13/viper" - "github.com/timescale/promscale/pkg/dataset" + "github.com/timescale/promscale/pkg/internal/day" ) // unmarshalRule defines that the subtree located on the `key` of the Viper @@ -260,7 +260,7 @@ func applyUnmarshalRules(v *viper.Viper, unmarshalRules []unmarshalRule) error { rule.target, viper.DecodeHook( mapstructure.ComposeDecodeHookFunc( - dataset.StringToDayDurationHookFunc(), + day.StringToDayDurationHookFunc(), mapstructure.StringToTimeDurationHookFunc(), mapstructure.StringToSliceHookFunc(","), ), diff --git a/pkg/runner/flags_test.go b/pkg/runner/flags_test.go index 74d484bf99..f6eee4ba49 100644 --- a/pkg/runner/flags_test.go +++ b/pkg/runner/flags_test.go @@ -13,7 +13,7 @@ import ( "time" "github.com/stretchr/testify/require" - "github.com/timescale/promscale/pkg/dataset" + "github.com/timescale/promscale/pkg/internal/day" ) func TestParseFlags(t *testing.T) { @@ -281,12 +281,12 @@ startup: c.ListenAddr = "localhost:9201" c.AuthConfig.BasicAuthUsername = "promscale" c.AuthConfig.BasicAuthPassword = "my-password" - c.DatasetCfg.Metrics.ChunkInterval = dataset.DayDuration(24 * time.Hour) + c.DatasetCfg.Metrics.ChunkInterval = day.Duration(24 * time.Hour) c.DatasetCfg.Metrics.Compression = func(b bool) *bool { return &b }(false) - c.DatasetCfg.Metrics.HALeaseRefresh = dataset.DayDuration(24 * time.Hour * 2) - c.DatasetCfg.Metrics.HALeaseTimeout = dataset.DayDuration(24 * time.Hour * 3) - c.DatasetCfg.Metrics.RetentionPeriod = dataset.DayDuration(24 * time.Hour * 4) - c.DatasetCfg.Traces.RetentionPeriod = dataset.DayDuration(24 * time.Hour * 5) + c.DatasetCfg.Metrics.HALeaseRefresh = day.Duration(24 * time.Hour * 2) + c.DatasetCfg.Metrics.HALeaseTimeout = day.Duration(24 * time.Hour * 3) + c.DatasetCfg.Metrics.RetentionPeriod = day.Duration(24 * time.Hour * 4) + c.DatasetCfg.Traces.RetentionPeriod = day.Duration(24 * time.Hour * 5) c.DatasetConfig = "metrics:\n default_chunk_interval: 1h\n" return c }, From de545275743b4bec24d0033f45419f7c7cdf0c18 Mon Sep 17 00:00:00 2001 From: Harkishen-Singh Date: Fri, 16 Dec 2022 13:19:47 +0530 Subject: [PATCH 08/13] Fix deepcopy-gen errors. Signed-off-by: Harkishen-Singh --- pkg/dataset/config.go | 2 +- pkg/dataset/deepcopy_generated.go | 4 ++++ pkg/rollup/config.go | 29 ++++++++++++++--------------- pkg/runner/client.go | 2 +- 4 files changed, 20 insertions(+), 17 deletions(-) diff --git a/pkg/dataset/config.go b/pkg/dataset/config.go index ca0938c360..1079cb3d15 100644 --- a/pkg/dataset/config.go +++ b/pkg/dataset/config.go @@ -68,7 +68,7 @@ func NewConfig(contents string) (cfg Config, err error) { func (c *Config) Apply(ctx context.Context, conn *pgx.Conn) error { c.applyDefaults() - if c.Metrics.Rollups != nil { + if c.Metrics.Rollups.Enabled { if err := c.Metrics.Rollups.Apply(ctx, conn); err != nil { return fmt.Errorf("error applying configuration for downsampling: %w", err) } diff --git a/pkg/dataset/deepcopy_generated.go b/pkg/dataset/deepcopy_generated.go index d018fcd394..c7b9fbb1af 100644 --- a/pkg/dataset/deepcopy_generated.go +++ b/pkg/dataset/deepcopy_generated.go @@ -35,6 +35,10 @@ func (in *Metrics) DeepCopyInto(out *Metrics) { *out = new(bool) **out = **in } + if in.Rollups != nil { + in, out := &in.Rollups, &out.Rollups + *out = (*in).DeepCopy() + } return } diff --git a/pkg/rollup/config.go b/pkg/rollup/config.go index d1794bf3b5..3c86a985aa 100644 --- a/pkg/rollup/config.go +++ b/pkg/rollup/config.go @@ -25,9 +25,7 @@ const ( ) var ( - defaultDownsampleState = false - useDefaultResolution = false - systemResolution = map[string]Definition{ + systemResolution = map[string]Definition{ short: { Resolution: day.Duration(5 * time.Minute), Retention: day.Duration(90 * 24 * time.Hour), @@ -40,8 +38,8 @@ var ( ) type Config struct { - Enabled *bool `yaml:"enabled,omitempty"` - UseDefaultResolution *bool `yaml:"use_default_resolution"` + Enabled bool `yaml:"enabled,omitempty"` + UseDefaultResolution bool `yaml:"use_default_resolution"` Resolutions `yaml:"resolutions,omitempty"` } @@ -54,19 +52,17 @@ type Definition struct { type Resolutions map[string]Definition func (d *Config) Apply(ctx context.Context, conn *pgx.Conn) error { - d.applyDefaults() - if containsSystemResolutions(d.Resolutions) { return fmt.Errorf("'short' and 'long' are system resolutions. These cannot be applied as rollup labels") } - log.Info("msg", fmt.Sprintf("Setting automatic metric downsample to %t", *d.Enabled)) + log.Info("msg", fmt.Sprintf("Setting automatic metric downsample to %t", d.Enabled)) if _, err := conn.Exec(context.Background(), setDefaultDownsampleStateSQL, d.Enabled); err != nil { return err } - if *d.Enabled { - if *d.UseDefaultResolution { + if d.Enabled { + if d.UseDefaultResolution { d.Resolutions["short"] = systemResolution["short"] d.Resolutions["long"] = systemResolution["long"] } @@ -77,13 +73,16 @@ func (d *Config) Apply(ctx context.Context, conn *pgx.Conn) error { return nil } -func (d *Config) applyDefaults() { - if d.Enabled == nil { - d.Enabled = &defaultDownsampleState +func (d *Config) DeepCopy() *Config { + out := Config{ + Enabled: d.Enabled, + UseDefaultResolution: d.UseDefaultResolution, + Resolutions: make(Resolutions, len(d.Resolutions)), } - if d.UseDefaultResolution == nil { - d.UseDefaultResolution = &useDefaultResolution + for k, v := range d.Resolutions { + out.Resolutions[k] = v } + return &out } func containsSystemResolutions(r Resolutions) bool { diff --git a/pkg/runner/client.go b/pkg/runner/client.go index 3f77769859..7f5cae1082 100644 --- a/pkg/runner/client.go +++ b/pkg/runner/client.go @@ -166,7 +166,7 @@ func CreateClient(r prometheus.Registerer, cfg *Config) (*pgclient.Client, error cfg.APICfg.MultiTenancy = multiTenancy } - if (cfg.DatasetCfg != dataset.Config{}) { + if cfg.DatasetCfg != (dataset.Config{}) { if cfg.DatasetConfig != "" { log.Warn("msg", "Ignoring `startup.dataset.config` in favor of the newer `startup.dataset` config option since both were set.") } From c3fb475b984ffd49cf7742d1ea7ba60f0622e3c1 Mon Sep 17 00:00:00 2001 From: Harkishen-Singh Date: Fri, 16 Dec 2022 14:59:25 +0530 Subject: [PATCH 09/13] squashable: fix tests. Signed-off-by: Harkishen-Singh --- EXTENSION_VERSION | 2 +- pkg/dataset/config.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/EXTENSION_VERSION b/EXTENSION_VERSION index 1f7391f92b..be59ceb929 100644 --- a/EXTENSION_VERSION +++ b/EXTENSION_VERSION @@ -1 +1 @@ -master +feature_metric_rollup diff --git a/pkg/dataset/config.go b/pkg/dataset/config.go index 1079cb3d15..4911f27a25 100644 --- a/pkg/dataset/config.go +++ b/pkg/dataset/config.go @@ -68,7 +68,7 @@ func NewConfig(contents string) (cfg Config, err error) { func (c *Config) Apply(ctx context.Context, conn *pgx.Conn) error { c.applyDefaults() - if c.Metrics.Rollups.Enabled { + if c.Metrics.Rollups != nil && c.Metrics.Rollups.Enabled { if err := c.Metrics.Rollups.Apply(ctx, conn); err != nil { return fmt.Errorf("error applying configuration for downsampling: %w", err) } From 4c23c32fd9bc10712213b5c684feb9a8c0057a25 Mon Sep 17 00:00:00 2001 From: Harkishen-Singh Date: Tue, 20 Dec 2022 21:24:46 +0530 Subject: [PATCH 10/13] Adds support for new downsampling UX. Signed-off-by: Harkishen-Singh This commit implements the update UX for creating downsampled data for Prometheus metrics. Now, downsampling can be done by ``` metrics: downsampling: 5m:30d,1h:365d ``` The above dataset-config setting will create 2 downsampling, ds_5m (interval 5m & retention 30d) and ds_1h (interval 1h & retention 365d). If an entry is removed, like ``` metrics: downsampling: 1h:365d ``` Then we disable the ds_5m downsampling from refreshing. Deletion can only happen via SQL, `CALL _prom_catalog.delete_downsampling()`. This keeps the UX clean and simple. If the removed dowmsapling is applied again later on, we simply enable the refreshing. This also means that this commit adds ability to enable or disable downsampling as well. --- EXTENSION_VERSION | 2 +- pkg/dataset/config.go | 61 +++--- pkg/dataset/deepcopy_generated.go | 20 +- pkg/downsample/downsample.go | 174 ++++++++++++++++++ pkg/internal/day/duration.go | 32 +++- pkg/rollup/config.go | 96 ---------- pkg/rollup/rollup.go | 122 ------------ pkg/runner/client.go | 8 - pkg/tests/constants.go | 1 + .../metric_downsampling_test.go | 87 +++++++++ pkg/tests/end_to_end_tests/rollup_test.go | 111 ----------- 11 files changed, 343 insertions(+), 371 deletions(-) create mode 100644 pkg/downsample/downsample.go delete mode 100644 pkg/rollup/config.go delete mode 100644 pkg/rollup/rollup.go create mode 100644 pkg/tests/end_to_end_tests/metric_downsampling_test.go delete mode 100644 pkg/tests/end_to_end_tests/rollup_test.go diff --git a/EXTENSION_VERSION b/EXTENSION_VERSION index be59ceb929..1e5c905f50 100644 --- a/EXTENSION_VERSION +++ b/EXTENSION_VERSION @@ -1 +1 @@ -feature_metric_rollup +downsampling_new_config diff --git a/pkg/dataset/config.go b/pkg/dataset/config.go index 4911f27a25..69ce1c196a 100644 --- a/pkg/dataset/config.go +++ b/pkg/dataset/config.go @@ -2,6 +2,7 @@ // Please see the included NOTICE for copyright information and // LICENSE for a copy of the license. // +k8s:deepcopy-gen=package + package dataset import ( @@ -12,9 +13,9 @@ import ( "github.com/jackc/pgx/v4" "gopkg.in/yaml.v2" + "github.com/timescale/promscale/pkg/downsample" "github.com/timescale/promscale/pkg/internal/day" "github.com/timescale/promscale/pkg/log" - "github.com/timescale/promscale/pkg/rollup" ) const ( @@ -45,12 +46,12 @@ type Config struct { // Metrics contains dataset configuration options for metrics data. type Metrics struct { - ChunkInterval day.Duration `mapstructure:"default_chunk_interval" yaml:"default_chunk_interval"` - Compression *bool `mapstructure:"compress_data" yaml:"compress_data"` // Using pointer to check if the the value was set. - HALeaseRefresh day.Duration `mapstructure:"ha_lease_refresh" yaml:"ha_lease_refresh"` - HALeaseTimeout day.Duration `mapstructure:"ha_lease_timeout" yaml:"ha_lease_timeout"` - RetentionPeriod day.Duration `mapstructure:"default_retention_period" yaml:"default_retention_period"` - Rollups *rollup.Config `mapstructure:"rollup" yaml:"rollup,omitempty"` + ChunkInterval day.Duration `mapstructure:"default_chunk_interval" yaml:"default_chunk_interval"` + Compression *bool `mapstructure:"compress_data" yaml:"compress_data"` // Using pointer to check if the the value was set. + HALeaseRefresh day.Duration `mapstructure:"ha_lease_refresh" yaml:"ha_lease_refresh"` + HALeaseTimeout day.Duration `mapstructure:"ha_lease_timeout" yaml:"ha_lease_timeout"` + RetentionPeriod day.Duration `mapstructure:"default_retention_period" yaml:"default_retention_period"` + Downsampling *[]downsample.Config `mapstructure:"downsampling" yaml:"downsampling,omitempty"` } // Traces contains dataset configuration options for traces data. @@ -68,10 +69,20 @@ func NewConfig(contents string) (cfg Config, err error) { func (c *Config) Apply(ctx context.Context, conn *pgx.Conn) error { c.applyDefaults() - if c.Metrics.Rollups != nil && c.Metrics.Rollups.Enabled { - if err := c.Metrics.Rollups.Apply(ctx, conn); err != nil { - return fmt.Errorf("error applying configuration for downsampling: %w", err) + if c.Metrics.Downsampling == nil { + if err := downsample.SetState(ctx, conn, false); err != nil { + return fmt.Errorf("error setting state for automatic-downsampling: %w", err) + } + log.Info("msg", "Metric downsampling is disabled") + } else { + if err := downsample.SetState(ctx, conn, true); err != nil { + return fmt.Errorf("error setting state for automatic-downsampling: %w", err) + } + log.Info("msg", "Metric downsampling is enabled") + if err := downsample.Sync(ctx, conn, *c.Metrics.Downsampling); err != nil { + return fmt.Errorf("error syncing downsampling configurations: %w", err) } + log.Info("msg", "Metric downsampling configurations synced", "configuration", fmt.Sprint(*c.Metrics.Downsampling)) } log.Info("msg", fmt.Sprintf("Setting metric dataset default chunk interval to %s", c.Metrics.ChunkInterval)) @@ -82,12 +93,12 @@ func (c *Config) Apply(ctx context.Context, conn *pgx.Conn) error { log.Info("msg", fmt.Sprintf("Setting trace dataset default retention period to %s", c.Traces.RetentionPeriod)) queries := map[string]interface{}{ - setDefaultMetricChunkIntervalSQL: time.Duration(c.Metrics.ChunkInterval), + setDefaultMetricChunkIntervalSQL: c.Metrics.ChunkInterval.Duration(), setDefaultMetricCompressionSQL: c.Metrics.Compression, - setDefaultMetricHAReleaseRefreshSQL: time.Duration(c.Metrics.HALeaseRefresh), - setDefaultMetricHAReleaseTimeoutSQL: time.Duration(c.Metrics.HALeaseTimeout), - setDefaultMetricRetentionPeriodSQL: time.Duration(c.Metrics.RetentionPeriod), - setDefaultTraceRetentionPeriodSQL: time.Duration(c.Traces.RetentionPeriod), + setDefaultMetricHAReleaseRefreshSQL: c.Metrics.HALeaseRefresh.Duration(), + setDefaultMetricHAReleaseTimeoutSQL: c.Metrics.HALeaseTimeout.Duration(), + setDefaultMetricRetentionPeriodSQL: c.Metrics.RetentionPeriod.Duration(), + setDefaultTraceRetentionPeriodSQL: c.Traces.RetentionPeriod.Duration(), } for sql, param := range queries { @@ -100,22 +111,22 @@ func (c *Config) Apply(ctx context.Context, conn *pgx.Conn) error { } func (c *Config) applyDefaults() { - if c.Metrics.ChunkInterval <= 0 { - c.Metrics.ChunkInterval = day.Duration(defaultMetricChunkInterval) + if c.Metrics.ChunkInterval.Duration() <= 0 { + c.Metrics.ChunkInterval.SetDuration(defaultMetricChunkInterval) } if c.Metrics.Compression == nil { c.Metrics.Compression = &defaultMetricCompressionVar } - if c.Metrics.HALeaseRefresh <= 0 { - c.Metrics.HALeaseRefresh = day.Duration(defaultMetricHALeaseRefresh) + if c.Metrics.HALeaseRefresh.Duration() <= 0 { + c.Metrics.HALeaseRefresh.SetDuration(defaultMetricHALeaseRefresh) } - if c.Metrics.HALeaseTimeout <= 0 { - c.Metrics.HALeaseTimeout = day.Duration(defaultMetricHALeaseTimeout) + if c.Metrics.HALeaseTimeout.Duration() <= 0 { + c.Metrics.HALeaseTimeout.SetDuration(defaultMetricHALeaseTimeout) } - if c.Metrics.RetentionPeriod <= 0 { - c.Metrics.RetentionPeriod = day.Duration(defaultMetricRetentionPeriod) + if c.Metrics.RetentionPeriod.Duration() <= 0 { + c.Metrics.RetentionPeriod.SetDuration(defaultMetricRetentionPeriod) } - if c.Traces.RetentionPeriod <= 0 { - c.Traces.RetentionPeriod = day.Duration(defaultTraceRetentionPeriod) + if c.Traces.RetentionPeriod.Duration() <= 0 { + c.Traces.RetentionPeriod.SetDuration(defaultTraceRetentionPeriod) } } diff --git a/pkg/dataset/deepcopy_generated.go b/pkg/dataset/deepcopy_generated.go index c7b9fbb1af..8fde2e35a9 100644 --- a/pkg/dataset/deepcopy_generated.go +++ b/pkg/dataset/deepcopy_generated.go @@ -9,6 +9,10 @@ package dataset +import ( + downsample "github.com/timescale/promscale/pkg/downsample" +) + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Config) DeepCopyInto(out *Config) { *out = *in @@ -30,14 +34,23 @@ func (in *Config) DeepCopy() *Config { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Metrics) DeepCopyInto(out *Metrics) { *out = *in + out.ChunkInterval = in.ChunkInterval if in.Compression != nil { in, out := &in.Compression, &out.Compression *out = new(bool) **out = **in } - if in.Rollups != nil { - in, out := &in.Rollups, &out.Rollups - *out = (*in).DeepCopy() + out.HALeaseRefresh = in.HALeaseRefresh + out.HALeaseTimeout = in.HALeaseTimeout + out.RetentionPeriod = in.RetentionPeriod + if in.Downsampling != nil { + in, out := &in.Downsampling, &out.Downsampling + *out = new([]downsample.Config) + if **in != nil { + in, out := *in, *out + *out = make([]downsample.Config, len(*in)) + copy(*out, *in) + } } return } @@ -55,6 +68,7 @@ func (in *Metrics) DeepCopy() *Metrics { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Traces) DeepCopyInto(out *Traces) { *out = *in + out.RetentionPeriod = in.RetentionPeriod return } diff --git a/pkg/downsample/downsample.go b/pkg/downsample/downsample.go new file mode 100644 index 0000000000..09c5606da3 --- /dev/null +++ b/pkg/downsample/downsample.go @@ -0,0 +1,174 @@ +// This file and its contents are licensed under the Apache License 2.0. +// Please see the included NOTICE for copyright information and +// LICENSE for a copy of the license. + +package downsample + +import ( + "context" + "fmt" + "time" + + "github.com/jackc/pgx/v4" + + "github.com/timescale/promscale/pkg/internal/day" +) + +const ( + setDownsamplingStateSQL = "SELECT prom_api.set_downsampling_state($1)" + createDownsamplingSQL = "CALL _prom_catalog.create_downsampling($1, $2, $3)" + updateDownsamplingStateForSQL = "SELECT _prom_catalog.update_downsampling_state($1, $2)" + downsamplePrefix = "ds_" // Stands of downsample_ +) + +type Config struct { + Interval day.Duration `yaml:"interval"` + Retention day.Duration `yaml:"retention"` + disabled bool +} + +func (c Config) Name() string { + return downsamplePrefix + c.Interval.Text() +} + +func SetState(ctx context.Context, conn *pgx.Conn, state bool) error { + _, err := conn.Exec(ctx, setDownsamplingStateSQL, state) + if err != nil { + return fmt.Errorf("error setting downsampling state: %w", err) + } + return nil +} + +// Sync updates the downsampling cfgs in the DB in accordance with the given new cfgs. It: +// 1. Creates of new downsampling cfgs that are not in the database +// 2. Updates retention duration of downsampling cfgs that are present in the database but with a different retention duration +// 3. Disables refreshing of downsampling cfgs in the database that were not found in the new cfgs +// 4. Enables refreshing of downsampling cfgs in the database that are in the new cfgs but were previously disabled +func Sync(ctx context.Context, conn *pgx.Conn, cfgs []Config) error { + newCfgs := make(map[string]Config) // These are the new downsampling cfgs that the user provided. Relation => schema_name: definition{} + for _, c := range cfgs { + newCfgs[c.Name()] = Config{Interval: c.Interval, Retention: c.Retention} + } + + rows, err := conn.Query(ctx, "SELECT schema_name, resolution, retention, should_refresh FROM _prom_catalog.downsample") + if err != nil { + return fmt.Errorf("querying existing resolutions: %w", err) + } + defer rows.Close() + + existingCfgs := make(map[string]Config) // These are the existing downsampling cfgs in the database. + for rows.Next() { + var ( + schemaName string + shouldRefresh bool + interval, retention time.Duration + ) + if err := rows.Scan(&schemaName, &interval, &retention, &shouldRefresh); err != nil { + return fmt.Errorf("error scanning output rows for existing resolutions: %w", err) + } + existingCfgs[schemaName] = Config{Interval: day.Duration{T: interval}, Retention: day.Duration{T: retention}, disabled: !shouldRefresh} + } + + // Update cfgs that have a different retention duration than the new cfgs. + update := make(map[string]Config) + for name, newDef := range newCfgs { + existingDef, found := existingCfgs[name] + if found && newDef.Retention.Duration() != existingDef.Retention.Duration() { + update[name] = newDef + } + } + if len(update) > 0 { + if err = updateRetention(ctx, conn, update); err != nil { + return fmt.Errorf("updating retention of downsampling cfg: %w", err) + } + } + + // Enable downsampling cfgs that were previously disabled. + disabled := []string{} + if err := conn.QueryRow(ctx, "SELECT array_agg(schema_name) FROM _prom_catalog.downsample WHERE NOT should_refresh").Scan(&disabled); err != nil { + return fmt.Errorf("error fetching downsampling configs that are disabled: %w", err) + } + disabledDownsampleConfig := map[string]struct{}{} + for _, n := range disabled { + disabledDownsampleConfig[n] = struct{}{} + } + enable := []string{} + for name := range newCfgs { + if _, found := disabledDownsampleConfig[name]; found { + enable = append(enable, name) + } + } + if len(enable) > 0 { + if err = updateState(ctx, conn, enable, true); err != nil { + return fmt.Errorf("error enabling downsampling cfgs: %w", err) + } + } + + // Disable downsampling cfgs that are applied in the database but are not present in the new downsampling cfgs. + disable := []string{} + for existingName := range existingCfgs { + if _, found := newCfgs[existingName]; !found { + disable = append(disable, existingName) + } + } + if len(disable) > 0 { + if err = updateState(ctx, conn, disable, false); err != nil { + return fmt.Errorf("error disabling downsampling cfgs: %w", err) + } + } + + // Create new downsampling cfgs that are not present in the database. + createDownsamplingCfgs := make(map[string]Config) + for newName, newDef := range newCfgs { + if _, found := existingCfgs[newName]; !found { + createDownsamplingCfgs[newName] = newDef + } + } + if len(createDownsamplingCfgs) > 0 { + if err = create(ctx, conn, createDownsamplingCfgs); err != nil { + return fmt.Errorf("creating new downsampling configurations: %w", err) + } + } + + return nil +} + +// updateRetention of existing downsampled cfgs. +func updateRetention(ctx context.Context, conn *pgx.Conn, cfgs map[string]Config) error { + var batch pgx.Batch + for schemaName, def := range cfgs { + batch.Queue("UPDATE _prom_catalog.downsample SET retention = $1 WHERE schema_name = $2", def.Retention.Duration(), schemaName) + } + if batch.Len() > 0 { + results := conn.SendBatch(ctx, &batch) + if err := results.Close(); err != nil { + return fmt.Errorf("error closing batch: %w", err) + } + } + return nil +} + +func create(ctx context.Context, conn *pgx.Conn, cfgs map[string]Config) error { + var batch pgx.Batch + for lName, def := range cfgs { + batch.Queue(createDownsamplingSQL, lName, def.Interval.Duration(), def.Retention.Duration()) + } + results := conn.SendBatch(ctx, &batch) + if err := results.Close(); err != nil { + return fmt.Errorf("error closing batch: %w", err) + } + return nil +} + +// updateState enables or disables a particular downsampling cfg based on new state. +func updateState(ctx context.Context, conn *pgx.Conn, name []string, newState bool) error { + var batch pgx.Batch + for _, n := range name { + batch.Queue(updateDownsamplingStateForSQL, n, newState) + } + results := conn.SendBatch(ctx, &batch) + if err := results.Close(); err != nil { + return fmt.Errorf("error closing batch: %w", err) + } + return nil +} diff --git a/pkg/internal/day/duration.go b/pkg/internal/day/duration.go index 45a2606569..3f9cebe928 100644 --- a/pkg/internal/day/duration.go +++ b/pkg/internal/day/duration.go @@ -20,7 +20,13 @@ const ( // Duration acts like a time.Duration with support for "d" unit // which is used for specifying number of days in duration. -type Duration time.Duration +// It stores the text of duration while parsing, which can be retrieved via Text(). +// This can be useful when we need to know the num of days user wanted, since +// this information is lost after parsing. +type Duration struct { + text string // Holds the original duration text. + T time.Duration +} // UnmarshalText unmarshals strings into DayDuration values while // handling the day unit. It leans heavily into time.ParseDuration. @@ -37,7 +43,8 @@ func (d *Duration) UnmarshalText(s []byte) error { return err } } - *d = Duration(val) + d.T = val + d.text = string(s) return nil } @@ -68,8 +75,23 @@ func handleDays(s []byte) (time.Duration, error) { } // String returns a string value of DayDuration. -func (d Duration) String() string { - return time.Duration(d).String() +func (d *Duration) String() string { + return d.T.String() +} + +// Text returns the original text received while parsing. +func (d *Duration) Text() string { + return d.text +} + +// Duration returns the parsed duration. +func (d *Duration) Duration() time.Duration { + return d.T +} + +// SetDuration returns the parsed duration. +func (d *Duration) SetDuration(t time.Duration) { + d.T = t } // StringToDayDurationHookFunc returns a mapstructure.DecodeHookFunc that @@ -93,6 +115,6 @@ func StringToDayDurationHookFunc() mapstructure.DecodeHookFunc { if err != nil { return nil, err } - return Duration(d), nil + return d, nil } } diff --git a/pkg/rollup/config.go b/pkg/rollup/config.go deleted file mode 100644 index 3c86a985aa..0000000000 --- a/pkg/rollup/config.go +++ /dev/null @@ -1,96 +0,0 @@ -// This file and its contents are licensed under the Apache License 2.0. -// Please see the included NOTICE for copyright information and -// LICENSE for a copy of the license. - -package rollup - -import ( - "context" - "fmt" - "strings" - "time" - - "github.com/jackc/pgx/v4" - - "github.com/timescale/promscale/pkg/internal/day" - "github.com/timescale/promscale/pkg/log" -) - -const ( - setDefaultDownsampleStateSQL = "SELECT prom_api.set_automatic_downsample($1)" - - // short and long represent system resolutions. - short = "short" - long = "long" -) - -var ( - systemResolution = map[string]Definition{ - short: { - Resolution: day.Duration(5 * time.Minute), - Retention: day.Duration(90 * 24 * time.Hour), - }, - long: { - Resolution: day.Duration(time.Hour), - Retention: day.Duration(395 * 24 * time.Hour), - }, - } -) - -type Config struct { - Enabled bool `yaml:"enabled,omitempty"` - UseDefaultResolution bool `yaml:"use_default_resolution"` - Resolutions `yaml:"resolutions,omitempty"` -} - -type Definition struct { - Resolution day.Duration `yaml:"resolution"` - Retention day.Duration `yaml:"retention"` - Delete bool `yaml:"delete"` -} - -type Resolutions map[string]Definition - -func (d *Config) Apply(ctx context.Context, conn *pgx.Conn) error { - if containsSystemResolutions(d.Resolutions) { - return fmt.Errorf("'short' and 'long' are system resolutions. These cannot be applied as rollup labels") - } - - log.Info("msg", fmt.Sprintf("Setting automatic metric downsample to %t", d.Enabled)) - if _, err := conn.Exec(context.Background(), setDefaultDownsampleStateSQL, d.Enabled); err != nil { - return err - } - - if d.Enabled { - if d.UseDefaultResolution { - d.Resolutions["short"] = systemResolution["short"] - d.Resolutions["long"] = systemResolution["long"] - } - if err := Sync(ctx, conn, d.Resolutions); err != nil { - return fmt.Errorf("ensure rollup with: %w", err) - } - } - return nil -} - -func (d *Config) DeepCopy() *Config { - out := Config{ - Enabled: d.Enabled, - UseDefaultResolution: d.UseDefaultResolution, - Resolutions: make(Resolutions, len(d.Resolutions)), - } - for k, v := range d.Resolutions { - out.Resolutions[k] = v - } - return &out -} - -func containsSystemResolutions(r Resolutions) bool { - for k := range r { - k = strings.ToLower(k) - if k == short || k == long { - return true - } - } - return false -} diff --git a/pkg/rollup/rollup.go b/pkg/rollup/rollup.go deleted file mode 100644 index fe6a9134aa..0000000000 --- a/pkg/rollup/rollup.go +++ /dev/null @@ -1,122 +0,0 @@ -// This file and its contents are licensed under the Apache License 2.0. -// Please see the included NOTICE for copyright information and -// LICENSE for a copy of the license. - -package rollup - -import ( - "context" - "fmt" - "time" - - "github.com/jackc/pgx/v4" - - "github.com/timescale/promscale/pkg/internal/day" -) - -// Sync updates the rollups in the DB in accordance with the given resolutions. It handles: -// 1. Creating of new rollups -// 2. Deletion of rollups that have `delete: true` -// 3. Update retention duration of rollups that have same label name but different retention duration. If resolution of -// existing rollups are updated, an error is returned -func Sync(ctx context.Context, conn *pgx.Conn, r Resolutions) error { - rows, err := conn.Query(context.Background(), "SELECT name, resolution, retention FROM _prom_catalog.rollup") - if err != nil { - return fmt.Errorf("querying existing resolutions: %w", err) - } - defer rows.Close() - - existingResolutions := make(Resolutions) - for rows.Next() { - var lName string - var resolution, retention time.Duration - if err := rows.Scan(&lName, &resolution, &retention); err != nil { - return fmt.Errorf("error scanning output rows for existing resolutions: %w", err) - } - existingResolutions[lName] = Definition{Resolution: day.Duration(resolution), Retention: day.Duration(retention)} - } - - if err := errOnResolutionMismatch(existingResolutions, r); err != nil { - return fmt.Errorf("error on existing resolution mismatch: %w", err) - } - - if err := updateExistingRollups(ctx, conn, existingResolutions, r); err != nil { - return fmt.Errorf("update existing rollups: %w", err) - } - - // Delete rollups that are no longer required. - if err = deleteRollups(ctx, conn, existingResolutions, r); err != nil { - return fmt.Errorf("delete rollups: %w", err) - } - - // Create new rollups. - if err = createRollups(ctx, conn, existingResolutions, r); err != nil { - return fmt.Errorf("create rollups: %w", err) - } - return nil -} - -// errOnResolutionMismatch returns an error if a given resolution exists in the DB with a different resolution duration. -func errOnResolutionMismatch(existing, r Resolutions) error { - for labelName, res := range r { - if oldRes, exists := existing[labelName]; exists { - if oldRes.Resolution != res.Resolution { - return fmt.Errorf("existing rollup resolutions cannot be updated. Either keep the resolution of existing rollup labels same or remove them") - } - } - } - return nil -} - -// updateExistingRollups updates the existing rollups retention if the new resolutions with a same name has -// different retention duration. -func updateExistingRollups(ctx context.Context, conn *pgx.Conn, existingRes, r Resolutions) error { - var batch pgx.Batch - for labelName, res := range r { - if oldRes, exists := existingRes[labelName]; exists && oldRes.Retention != res.Retention { - batch.Queue("UPDATE _prom_catalog.rollup SET retention = $1 WHERE name = $2", time.Duration(res.Retention), labelName) - } - } - if batch.Len() > 0 { - results := conn.SendBatch(ctx, &batch) - if err := results.Close(); err != nil { - return fmt.Errorf("error closing batch: %w", err) - } - } - return nil -} - -func createRollups(ctx context.Context, conn *pgx.Conn, existingRes, r Resolutions) error { - var batch pgx.Batch - for lName, res := range r { - _, exists := existingRes[lName] - if !exists && !res.Delete { - batch.Queue("CALL _prom_catalog.create_rollup($1, $2, $3)", lName, time.Duration(res.Resolution), time.Duration(res.Retention)) - } - } - if batch.Len() > 0 { - results := conn.SendBatch(ctx, &batch) - if err := results.Close(); err != nil { - return fmt.Errorf("error creating new rollups: %w", err) - } - } - return nil -} - -func deleteRollups(ctx context.Context, conn *pgx.Conn, existingRes, r Resolutions) error { - var batch pgx.Batch - for lName, res := range r { - _, exists := existingRes[lName] - if exists && res.Delete { - // Delete the rollup only if it exists in the DB. - batch.Queue("CALL _prom_catalog.delete_rollup($1)", lName) - } - } - if batch.Len() > 0 { - results := conn.SendBatch(ctx, &batch) - if err := results.Close(); err != nil { - return fmt.Errorf("error deleting new rollups: %w", err) - } - } - return nil -} diff --git a/pkg/runner/client.go b/pkg/runner/client.go index 7f5cae1082..c265cc0caa 100644 --- a/pkg/runner/client.go +++ b/pkg/runner/client.go @@ -19,7 +19,6 @@ import ( "github.com/timescale/promscale/pkg/pgmodel" "github.com/timescale/promscale/pkg/pgmodel/common/extension" "github.com/timescale/promscale/pkg/pgmodel/common/schema" - "github.com/timescale/promscale/pkg/rollup" "github.com/timescale/promscale/pkg/tenancy" "github.com/timescale/promscale/pkg/util" "github.com/timescale/promscale/pkg/version" @@ -173,13 +172,6 @@ func CreateClient(r prometheus.Registerer, cfg *Config) (*pgclient.Client, error err = cfg.DatasetCfg.Apply(context.Background(), conn) } else if cfg.DatasetConfig != "" { err = applyDatasetConfig(context.Background(), conn, cfg.DatasetConfig) - } else { - // We apply downsampling settings even when DatasetConfig is not given, which is the most common case. - rollupCfg := &rollup.Config{} - err = rollupCfg.Apply(context.Background(), conn) - if err != nil { - return nil, fmt.Errorf("error applying downsampling configuration: %w", err) - } } if err != nil { return nil, fmt.Errorf("error applying dataset configuration: %w", err) diff --git a/pkg/tests/constants.go b/pkg/tests/constants.go index e88e8aa959..b8e5e49819 100644 --- a/pkg/tests/constants.go +++ b/pkg/tests/constants.go @@ -18,4 +18,5 @@ func init() { PromscaleExtensionVersion = strings.TrimSpace(string(content)) PromscaleExtensionContainer = "ghcr.io/timescale/dev_promscale_extension:" + PromscaleExtensionVersion + "-ts2-pg14" + //PromscaleExtensionContainer = "local/dev_promscale_extension:head-ts2-pg14" // This will be removed once the PR against master is made. } diff --git a/pkg/tests/end_to_end_tests/metric_downsampling_test.go b/pkg/tests/end_to_end_tests/metric_downsampling_test.go new file mode 100644 index 0000000000..307e439048 --- /dev/null +++ b/pkg/tests/end_to_end_tests/metric_downsampling_test.go @@ -0,0 +1,87 @@ +// This file and its contents are licensed under the Apache License 2.0. +// Please see the included NOTICE for copyright information and +// LICENSE for a copy of the license. + +package end_to_end_tests + +import ( + "context" + "testing" + "time" + + "github.com/jackc/pgx/v4" + "github.com/jackc/pgx/v4/pgxpool" + "github.com/stretchr/testify/require" + + "github.com/timescale/promscale/pkg/downsample" +) + +func TestMetricDownsampleSync(t *testing.T) { + withDB(t, *testDatabase, func(db *pgxpool.Pool, t testing.TB) { + downsamplingCfgs := []downsample.Config{ + {Name: "5m", Interval: time.Minute * 5, Retention: time.Hour * 24 * 30}, + } + + pgCon, err := db.Acquire(context.Background()) + require.NoError(t, err) + defer pgCon.Release() + + // Test 1: Check if 'ds_5m' downsampling is created. + err = downsample.Sync(context.Background(), pgCon.Conn(), downsamplingCfgs) + require.NoError(t, err) + + verifyDownsamplingExistence(t, pgCon.Conn(), "ds_5m", + downsamplingCfgs[0].Interval, downsamplingCfgs[0].Retention, false) + + downsamplingCfgs = append(downsamplingCfgs, downsample.Config{Name: "1h", Interval: time.Hour, Retention: time.Hour * 24 * 365}) + // Test 2: Check if 'ds_1h' downsampling is created. + err = downsample.Sync(context.Background(), pgCon.Conn(), downsamplingCfgs) + require.NoError(t, err) + + verifyDownsamplingExistence(t, pgCon.Conn(), "ds_1h", + downsamplingCfgs[1].Interval, downsamplingCfgs[1].Retention, false) + + // Test 3: Remove the first entry and see if the entry is disabled or not. + downsamplingCfgs = downsamplingCfgs[1:] + err = downsample.Sync(context.Background(), pgCon.Conn(), downsamplingCfgs) + require.NoError(t, err) + // Check if ds_1h exists. + verifyDownsamplingExistence(t, pgCon.Conn(), "ds_1h", + downsamplingCfgs[0].Interval, downsamplingCfgs[0].Retention, false) + // Check if ds_5m is disabled. + verifyDownsamplingExistence(t, pgCon.Conn(), "ds_5m", + time.Minute*5, time.Hour*24*30, true) + + // Test 4: Update retention of ds_1h and check if the same is reflected in the DB. + downsamplingCfgs[0].Retention = time.Hour * 24 * 500 + err = downsample.Sync(context.Background(), pgCon.Conn(), downsamplingCfgs) + require.NoError(t, err) + verifyDownsamplingExistence(t, pgCon.Conn(), "ds_1h", + downsamplingCfgs[0].Interval, downsamplingCfgs[0].Retention, false) + // ds_5m should still be disabled. + verifyDownsamplingExistence(t, pgCon.Conn(), "ds_5m", + time.Minute*5, time.Hour*24*30, true) + + // Test 5: Enable the ds_5m downsampling that was already in the database. + downsamplingCfgs = append(downsamplingCfgs, downsample.Config{Name: "5m", Interval: time.Minute * 5, Retention: time.Hour * 24 * 30}) + err = downsample.Sync(context.Background(), pgCon.Conn(), downsamplingCfgs) + require.NoError(t, err) + verifyDownsamplingExistence(t, pgCon.Conn(), "ds_5m", + downsamplingCfgs[1].Interval, downsamplingCfgs[1].Retention, false) + }) +} + +func verifyDownsamplingExistence(t testing.TB, pgCon *pgx.Conn, schemaName string, interval, retention time.Duration, shouldBeDisabled bool) { + var ( + dSchemaName string + dInterval time.Duration + dRetention time.Duration + dShouldRefresh bool + ) + err := pgCon.QueryRow(context.Background(), "SELECT schema_name, resolution, retention, should_refresh FROM _prom_catalog.downsample WHERE schema_name = $1", schemaName).Scan(&dSchemaName, &dInterval, &dRetention, &dShouldRefresh) + require.NoError(t, err) + require.Equal(t, schemaName, dSchemaName) + require.Equal(t, interval, dInterval) + require.Equal(t, retention, dRetention) + require.Equal(t, shouldBeDisabled, !dShouldRefresh) +} diff --git a/pkg/tests/end_to_end_tests/rollup_test.go b/pkg/tests/end_to_end_tests/rollup_test.go deleted file mode 100644 index 7f38a9f32c..0000000000 --- a/pkg/tests/end_to_end_tests/rollup_test.go +++ /dev/null @@ -1,111 +0,0 @@ -// This file and its contents are licensed under the Apache License 2.0. -// Please see the included NOTICE for copyright information and -// LICENSE for a copy of the license. - -package end_to_end_tests - -import ( - "context" - "testing" - "time" - - "github.com/jackc/pgx/v4" - "github.com/jackc/pgx/v4/pgxpool" - "github.com/stretchr/testify/require" - - "github.com/timescale/promscale/pkg/internal/day" - "github.com/timescale/promscale/pkg/rollup" -) - -func TestRollupSync(t *testing.T) { - withDB(t, *testDatabase, func(db *pgxpool.Pool, t testing.TB) { - rollupResolutions := rollup.Resolutions{ - "short": { - Resolution: day.Duration(5 * time.Minute), - Retention: day.Duration(30 * 24 * time.Hour), - }, - } - - pgCon, err := db.Acquire(context.Background()) - require.NoError(t, err) - defer pgCon.Release() - - // Test 1: Check if 'short' rollup is created. - err = rollup.Sync(context.Background(), pgCon.Conn(), rollupResolutions) - require.NoError(t, err) - - verifyRollupExistence(t, pgCon.Conn(), "short", - time.Duration(rollupResolutions["short"].Resolution), time.Duration(rollupResolutions["short"].Retention), false) - - rollupResolutions["long"] = rollup.Definition{ - Resolution: day.Duration(time.Hour), - Retention: day.Duration(395 * 24 * time.Hour), - } - - // Test 2: Check if 'long' rollup is created. - err = rollup.Sync(context.Background(), pgCon.Conn(), rollupResolutions) - require.NoError(t, err) - - verifyRollupExistence(t, pgCon.Conn(), "long", - time.Duration(rollupResolutions["long"].Resolution), time.Duration(rollupResolutions["long"].Retention), false) - - // Test 3: Update the resolution and check if error is returned. - rollupResolutions["short"] = rollup.Definition{ - Resolution: day.Duration(4 * time.Minute), - Retention: day.Duration(30 * 24 * time.Hour), - } - err = rollup.Sync(context.Background(), pgCon.Conn(), rollupResolutions) - require.Equal(t, - "error on existing resolution mismatch: existing rollup resolutions cannot be updated. Either keep the resolution of existing rollup labels same or remove them", - err.Error()) - // Reset back to original resolution. - rollupResolutions["short"] = rollup.Definition{ - Resolution: day.Duration(5 * time.Minute), - Retention: day.Duration(30 * 24 * time.Hour), - } - - // Test 4: Remove the first entry and see if the entry is removed or not. - rollupResolutions["short"] = rollup.Definition{ - Resolution: day.Duration(5 * time.Minute), - Retention: day.Duration(30 * 24 * time.Hour), - Delete: true, - } - err = rollup.Sync(context.Background(), pgCon.Conn(), rollupResolutions) - require.NoError(t, err) - // Check if long exists. - verifyRollupExistence(t, pgCon.Conn(), "long", - time.Duration(rollupResolutions["long"].Resolution), time.Duration(rollupResolutions["long"].Retention), false) - // Check if short does not exist. - verifyRollupExistence(t, pgCon.Conn(), "short", - time.Duration(rollupResolutions["short"].Resolution), time.Duration(rollupResolutions["short"].Retention), true) - - // Test 5: Update retention of long and check if the same is reflected in the DB. - rollupResolutions["long"] = rollup.Definition{ - Resolution: day.Duration(time.Hour), - Retention: day.Duration(500 * 24 * time.Hour), // Updated retention duration. - } - err = rollup.Sync(context.Background(), pgCon.Conn(), rollupResolutions) - require.NoError(t, err) - verifyRollupExistence(t, pgCon.Conn(), "long", - time.Duration(rollupResolutions["long"].Resolution), time.Duration(rollupResolutions["long"].Retention), false) - // Short should still not exists. - verifyRollupExistence(t, pgCon.Conn(), "short", - time.Duration(rollupResolutions["short"].Resolution), time.Duration(rollupResolutions["short"].Retention), true) - }) -} - -func verifyRollupExistence(t testing.TB, pgCon *pgx.Conn, name string, resolution, retention time.Duration, shouldError bool) { - var ( - rName string - rResolution time.Duration - rRetention time.Duration - ) - err := pgCon.QueryRow(context.Background(), "SELECT name, resolution, retention FROM _prom_catalog.rollup WHERE name = $1", name).Scan(&rName, &rResolution, &rRetention) - if shouldError { - require.Error(t, err) - return - } - require.NoError(t, err) - require.Equal(t, resolution, rResolution) - require.Equal(t, retention, rRetention) -} From 4c4ca474cff0990202dd6c333a627d243c3550d7 Mon Sep 17 00:00:00 2001 From: Harkishen-Singh Date: Tue, 3 Jan 2023 18:59:41 +0530 Subject: [PATCH 11/13] Adds support to query default downsampling views in absence of __column__. Signed-off-by: Harkishen-Singh --- pkg/dataset/config.go | 10 +- pkg/dataset/config_test.go | 36 ++-- pkg/downsample/downsample.go | 182 +++++++++--------- pkg/internal/day/duration.go | 8 +- pkg/pgmodel/querier/clauses.go | 25 ++- pkg/pgmodel/querier/metadata.go | 24 +-- pkg/pgmodel/querier/query_builder.go | 29 ++- pkg/pgmodel/querier/query_builder_samples.go | 6 +- pkg/pgmodel/querier/query_sample.go | 15 +- pkg/runner/flags_test.go | 10 +- .../end_to_end_tests/config_dataset_test.go | 11 +- .../metric_downsampling_test.go | 55 ++++-- 12 files changed, 228 insertions(+), 183 deletions(-) diff --git a/pkg/dataset/config.go b/pkg/dataset/config.go index 69ce1c196a..4ec1b4f054 100644 --- a/pkg/dataset/config.go +++ b/pkg/dataset/config.go @@ -85,12 +85,12 @@ func (c *Config) Apply(ctx context.Context, conn *pgx.Conn) error { log.Info("msg", "Metric downsampling configurations synced", "configuration", fmt.Sprint(*c.Metrics.Downsampling)) } - log.Info("msg", fmt.Sprintf("Setting metric dataset default chunk interval to %s", c.Metrics.ChunkInterval)) + log.Info("msg", fmt.Sprintf("Setting metric dataset default chunk interval to %s", c.Metrics.ChunkInterval.Duration())) log.Info("msg", fmt.Sprintf("Setting metric dataset default compression to %t", *c.Metrics.Compression)) - log.Info("msg", fmt.Sprintf("Setting metric dataset default high availability lease refresh to %s", c.Metrics.HALeaseRefresh)) - log.Info("msg", fmt.Sprintf("Setting metric dataset default high availability lease timeout to %s", c.Metrics.HALeaseTimeout)) - log.Info("msg", fmt.Sprintf("Setting metric dataset default retention period to %s", c.Metrics.RetentionPeriod)) - log.Info("msg", fmt.Sprintf("Setting trace dataset default retention period to %s", c.Traces.RetentionPeriod)) + log.Info("msg", fmt.Sprintf("Setting metric dataset default high availability lease refresh to %s", c.Metrics.HALeaseRefresh.Duration())) + log.Info("msg", fmt.Sprintf("Setting metric dataset default high availability lease timeout to %s", c.Metrics.HALeaseTimeout.Duration())) + log.Info("msg", fmt.Sprintf("Setting metric dataset default retention period to %s", c.Metrics.RetentionPeriod.Duration())) + log.Info("msg", fmt.Sprintf("Setting trace dataset default retention period to %s", c.Traces.RetentionPeriod.Duration())) queries := map[string]interface{}{ setDefaultMetricChunkIntervalSQL: c.Metrics.ChunkInterval.Duration(), diff --git a/pkg/dataset/config_test.go b/pkg/dataset/config_test.go index 9a0d0aefd5..c32c853a60 100644 --- a/pkg/dataset/config_test.go +++ b/pkg/dataset/config_test.go @@ -49,7 +49,7 @@ func TestNewConfig(t *testing.T) { default_retention_period: 3d2h`, cfg: Config{ Metrics: Metrics{ - RetentionPeriod: day.Duration(((3 * 24) + 2) * time.Hour), + RetentionPeriod: dayDuration(((3*24)+2)*time.Hour, "3d2h"), }, }, }, @@ -65,14 +65,14 @@ traces: default_retention_period: 15d`, cfg: Config{ Metrics: Metrics{ - ChunkInterval: day.Duration(3 * time.Hour), + ChunkInterval: dayDuration(3*time.Hour, "3h"), Compression: &testCompressionSetting, - HALeaseRefresh: day.Duration(2 * time.Minute), - HALeaseTimeout: day.Duration(5 * time.Second), - RetentionPeriod: day.Duration(30 * 24 * time.Hour), + HALeaseRefresh: dayDuration(2*time.Minute, "2m"), + HALeaseTimeout: dayDuration(5*time.Second, "5s"), + RetentionPeriod: dayDuration(30*24*time.Hour, "30d"), }, Traces: Traces{ - RetentionPeriod: day.Duration(15 * 24 * time.Hour), + RetentionPeriod: dayDuration(15*24*time.Hour, "15d"), }, }, }, @@ -101,14 +101,14 @@ func TestApplyDefaults(t *testing.T) { t, Config{ Metrics: Metrics{ - ChunkInterval: day.Duration(defaultMetricChunkInterval), + ChunkInterval: dayDuration(defaultMetricChunkInterval, ""), Compression: &defaultMetricCompressionVar, - HALeaseRefresh: day.Duration(defaultMetricHALeaseRefresh), - HALeaseTimeout: day.Duration(defaultMetricHALeaseTimeout), - RetentionPeriod: day.Duration(defaultMetricRetentionPeriod), + HALeaseRefresh: dayDuration(defaultMetricHALeaseRefresh, ""), + HALeaseTimeout: dayDuration(defaultMetricHALeaseTimeout, ""), + RetentionPeriod: dayDuration(defaultMetricRetentionPeriod, ""), }, Traces: Traces{ - RetentionPeriod: day.Duration(defaultTraceRetentionPeriod), + RetentionPeriod: dayDuration(defaultTraceRetentionPeriod, ""), }, }, c, @@ -116,14 +116,14 @@ func TestApplyDefaults(t *testing.T) { untouched := Config{ Metrics: Metrics{ - ChunkInterval: day.Duration(3 * time.Hour), + ChunkInterval: dayDuration(3*time.Hour, "3h"), Compression: &testCompressionSetting, - HALeaseRefresh: day.Duration(2 * time.Minute), - HALeaseTimeout: day.Duration(5 * time.Second), - RetentionPeriod: day.Duration(30 * 24 * time.Hour), + HALeaseRefresh: dayDuration(2*time.Minute, "2m"), + HALeaseTimeout: dayDuration(5*time.Second, "5s"), + RetentionPeriod: dayDuration(30*24*time.Hour, "30d"), }, Traces: Traces{ - RetentionPeriod: day.Duration(15 * 24 * time.Hour), + RetentionPeriod: dayDuration(15*24*time.Hour, "15d"), }, } @@ -132,3 +132,7 @@ func TestApplyDefaults(t *testing.T) { require.Equal(t, untouched, copyConfig) } + +func dayDuration(d time.Duration, txt string) day.Duration { + return day.Duration{T: d, Txt: txt} +} diff --git a/pkg/downsample/downsample.go b/pkg/downsample/downsample.go index 09c5606da3..cdbbdb9dff 100644 --- a/pkg/downsample/downsample.go +++ b/pkg/downsample/downsample.go @@ -12,19 +12,22 @@ import ( "github.com/jackc/pgx/v4" "github.com/timescale/promscale/pkg/internal/day" + "github.com/timescale/promscale/pkg/log" + "github.com/timescale/promscale/pkg/util" ) const ( setDownsamplingStateSQL = "SELECT prom_api.set_downsampling_state($1)" - createDownsamplingSQL = "CALL _prom_catalog.create_downsampling($1, $2, $3)" + createOrUpdateDownsamplingSQL = "CALL _prom_catalog.create_or_update_downsampling($1, $2, $3)" updateDownsamplingStateForSQL = "SELECT _prom_catalog.update_downsampling_state($1, $2)" - downsamplePrefix = "ds_" // Stands of downsample_ + downsamplePrefix = "ds_" // Stands of downsample_ + lockID = 55851985173278 // Choosen randomly ) type Config struct { - Interval day.Duration `yaml:"interval"` - Retention day.Duration `yaml:"retention"` - disabled bool + Interval day.Duration `yaml:"interval"` + Retention day.Duration `yaml:"retention"` + shouldRefresh bool } func (c Config) Name() string { @@ -42,116 +45,97 @@ func SetState(ctx context.Context, conn *pgx.Conn, state bool) error { // Sync updates the downsampling cfgs in the DB in accordance with the given new cfgs. It: // 1. Creates of new downsampling cfgs that are not in the database // 2. Updates retention duration of downsampling cfgs that are present in the database but with a different retention duration -// 3. Disables refreshing of downsampling cfgs in the database that were not found in the new cfgs -// 4. Enables refreshing of downsampling cfgs in the database that are in the new cfgs but were previously disabled +// 3. Enables refreshing of downsampling cfgs in the database that are in the new cfgs but were previously disabled +// 4. Disables refreshing of downsampling cfgs in the database that were not found in the new cfgs func Sync(ctx context.Context, conn *pgx.Conn, cfgs []Config) error { - newCfgs := make(map[string]Config) // These are the new downsampling cfgs that the user provided. Relation => schema_name: definition{} - for _, c := range cfgs { - newCfgs[c.Name()] = Config{Interval: c.Interval, Retention: c.Retention} - } - - rows, err := conn.Query(ctx, "SELECT schema_name, resolution, retention, should_refresh FROM _prom_catalog.downsample") + pgLock, err := util.NewPgAdvisoryLock(lockID, conn.Config().ConnString()) if err != nil { - return fmt.Errorf("querying existing resolutions: %w", err) + return fmt.Errorf("error getting lock for syncing downsampling config") } - defer rows.Close() - - existingCfgs := make(map[string]Config) // These are the existing downsampling cfgs in the database. - for rows.Next() { - var ( - schemaName string - shouldRefresh bool - interval, retention time.Duration - ) - if err := rows.Scan(&schemaName, &interval, &retention, &shouldRefresh); err != nil { - return fmt.Errorf("error scanning output rows for existing resolutions: %w", err) - } - existingCfgs[schemaName] = Config{Interval: day.Duration{T: interval}, Retention: day.Duration{T: retention}, disabled: !shouldRefresh} + defer pgLock.Close() + got, err := pgLock.GetAdvisoryLock() // To prevent failure when multiple Promscale start at the same time. + if err != nil { + return fmt.Errorf("error trying pg advisory_lock") } - - // Update cfgs that have a different retention duration than the new cfgs. - update := make(map[string]Config) - for name, newDef := range newCfgs { - existingDef, found := existingCfgs[name] - if found && newDef.Retention.Duration() != existingDef.Retention.Duration() { - update[name] = newDef - } + if !got { + // Some other Promscale instance is already working on the downsampling.Sync() + // Hence, we should skip. + return nil } - if len(update) > 0 { - if err = updateRetention(ctx, conn, update); err != nil { - return fmt.Errorf("updating retention of downsampling cfg: %w", err) + defer func() { + if _, err = pgLock.Unlock(); err != nil { + log.Error("msg", "error unlocking downsampling.Sync advisory_lock", "err", err.Error()) } - } + }() - // Enable downsampling cfgs that were previously disabled. - disabled := []string{} - if err := conn.QueryRow(ctx, "SELECT array_agg(schema_name) FROM _prom_catalog.downsample WHERE NOT should_refresh").Scan(&disabled); err != nil { - return fmt.Errorf("error fetching downsampling configs that are disabled: %w", err) - } - disabledDownsampleConfig := map[string]struct{}{} - for _, n := range disabled { - disabledDownsampleConfig[n] = struct{}{} - } - enable := []string{} - for name := range newCfgs { - if _, found := disabledDownsampleConfig[name]; found { - enable = append(enable, name) - } - } - if len(enable) > 0 { - if err = updateState(ctx, conn, enable, true); err != nil { - return fmt.Errorf("error enabling downsampling cfgs: %w", err) - } + newCfgs := make(map[string]Config) // These are the new downsampling cfgs that the user provided. Relation => schema_name: definition{} + for _, c := range cfgs { + newCfgs[c.Name()] = Config{Interval: c.Interval, Retention: c.Retention} } - // Disable downsampling cfgs that are applied in the database but are not present in the new downsampling cfgs. - disable := []string{} - for existingName := range existingCfgs { - if _, found := newCfgs[existingName]; !found { - disable = append(disable, existingName) - } - } - if len(disable) > 0 { - if err = updateState(ctx, conn, disable, false); err != nil { - return fmt.Errorf("error disabling downsampling cfgs: %w", err) + existingCfgs, err := getExistingCfgs(ctx, conn) + if err != nil { + return fmt.Errorf("error fetching existing downsampling cfgs: %w", err) + } + + createOrUpdate := make(map[string]Config) + for newLabel, newCfg := range newCfgs { + if existingCfg, found := existingCfgs[newLabel]; found { + if !existingCfg.shouldRefresh || existingCfg.Retention.Duration() != newCfg.Retention.Duration() { + createOrUpdate[newLabel] = newCfg + } + if existingCfg.Interval.Duration() != newCfg.Interval.Duration() { + // This should never be the case since newlabel is schema specific. But we still check for safety purpose. + return fmt.Errorf("interval mismatch: existing interval %v, new interval %v", existingCfg.Interval.Duration(), newCfg.Interval.Duration()) + } + } else { + createOrUpdate[newLabel] = newCfg } } - // Create new downsampling cfgs that are not present in the database. - createDownsamplingCfgs := make(map[string]Config) - for newName, newDef := range newCfgs { - if _, found := existingCfgs[newName]; !found { - createDownsamplingCfgs[newName] = newDef - } - } - if len(createDownsamplingCfgs) > 0 { - if err = create(ctx, conn, createDownsamplingCfgs); err != nil { - return fmt.Errorf("creating new downsampling configurations: %w", err) + if len(createOrUpdate) > 0 { + if err = createOrUpdateDownsampling(ctx, conn, createOrUpdate); err != nil { + return fmt.Errorf("error creating or updating given downsampling configs: %w", err) } } + if err = disable(ctx, conn, newCfgs, existingCfgs); err != nil { + return fmt.Errorf("error disabling downsampling configs: %w", err) + } return nil } -// updateRetention of existing downsampled cfgs. -func updateRetention(ctx context.Context, conn *pgx.Conn, cfgs map[string]Config) error { - var batch pgx.Batch - for schemaName, def := range cfgs { - batch.Queue("UPDATE _prom_catalog.downsample SET retention = $1 WHERE schema_name = $2", def.Retention.Duration(), schemaName) +func getExistingCfgs(ctx context.Context, conn *pgx.Conn) (map[string]Config, error) { + rows, err := conn.Query(ctx, "SELECT schema_name, resolution, retention, should_refresh FROM _prom_catalog.downsample") + if err != nil { + return nil, fmt.Errorf("querying existing resolutions: %w", err) } - if batch.Len() > 0 { - results := conn.SendBatch(ctx, &batch) - if err := results.Close(); err != nil { - return fmt.Errorf("error closing batch: %w", err) + defer rows.Close() + + existingCfgs := make(map[string]Config) // These are the existing downsampling cfgs in the database. + for rows.Next() { + var ( + schemaName string + shouldRefresh bool + interval, retention time.Duration + ) + if err := rows.Scan(&schemaName, &interval, &retention, &shouldRefresh); err != nil { + return nil, fmt.Errorf("error scanning output rows for existing resolutions: %w", err) } + existingCfgs[schemaName] = Config{Interval: day.Duration{T: interval}, Retention: day.Duration{T: retention}, shouldRefresh: shouldRefresh} } - return nil + return existingCfgs, nil } -func create(ctx context.Context, conn *pgx.Conn, cfgs map[string]Config) error { +// createOrUpdateDownsampling does 3 things: +// 1. It creates new downsampling configurations that are given in 'cfgs' +// 2. It updates the retention of a downsampling configuration if it is present in the database with the same lName +// 3. It enables a downsampling if it was previously disabled +// Refer to _prom_catalog.create_or_update_downsampling($1, $2, $3) to learn more. +func createOrUpdateDownsampling(ctx context.Context, conn *pgx.Conn, cfgs map[string]Config) error { var batch pgx.Batch for lName, def := range cfgs { - batch.Queue(createDownsamplingSQL, lName, def.Interval.Duration(), def.Retention.Duration()) + batch.Queue(createOrUpdateDownsamplingSQL, lName, def.Interval.Duration(), def.Retention.Duration()) } results := conn.SendBatch(ctx, &batch) if err := results.Close(); err != nil { @@ -160,11 +144,21 @@ func create(ctx context.Context, conn *pgx.Conn, cfgs map[string]Config) error { return nil } -// updateState enables or disables a particular downsampling cfg based on new state. -func updateState(ctx context.Context, conn *pgx.Conn, name []string, newState bool) error { +// disable downsampling cfgs. +func disable(ctx context.Context, conn *pgx.Conn, newCfgs, existingCfgs map[string]Config) error { + disable := []string{} + for existingName := range existingCfgs { + if _, found := newCfgs[existingName]; !found { + disable = append(disable, existingName) + } + } + if len(disable) == 0 { + return nil + } + var batch pgx.Batch - for _, n := range name { - batch.Queue(updateDownsamplingStateForSQL, n, newState) + for _, n := range disable { + batch.Queue(updateDownsamplingStateForSQL, n, false) } results := conn.SendBatch(ctx, &batch) if err := results.Close(); err != nil { diff --git a/pkg/internal/day/duration.go b/pkg/internal/day/duration.go index 3f9cebe928..6b7c6b18e1 100644 --- a/pkg/internal/day/duration.go +++ b/pkg/internal/day/duration.go @@ -24,8 +24,8 @@ const ( // This can be useful when we need to know the num of days user wanted, since // this information is lost after parsing. type Duration struct { - text string // Holds the original duration text. - T time.Duration + Txt string // Holds the original duration text. + T time.Duration } // UnmarshalText unmarshals strings into DayDuration values while @@ -44,7 +44,7 @@ func (d *Duration) UnmarshalText(s []byte) error { } } d.T = val - d.text = string(s) + d.Txt = string(s) return nil } @@ -81,7 +81,7 @@ func (d *Duration) String() string { // Text returns the original text received while parsing. func (d *Duration) Text() string { - return d.text + return d.Txt } // Duration returns the parsed duration. diff --git a/pkg/pgmodel/querier/clauses.go b/pkg/pgmodel/querier/clauses.go index 02fe6cdb0b..9470b0ae8c 100644 --- a/pkg/pgmodel/querier/clauses.go +++ b/pkg/pgmodel/querier/clauses.go @@ -33,12 +33,13 @@ func setParameterNumbers(clause string, existingArgs []interface{}, newArgs ...i } type clauseBuilder struct { - schemaName string - metricName string - columnName string - contradiction bool - clauses []string - args []interface{} + schemaName string + metricName string + columnName string + contradiction bool + downsamplingView bool + clauses []string + args []interface{} } func (c *clauseBuilder) SetMetricName(name string) { @@ -92,6 +93,18 @@ func (c *clauseBuilder) GetColumnName() string { return c.columnName } +// UseDefaultDownsamplingView is set to true when the user applies __schema__ only (and not __column__). In this, we +// query from q_ views since it contains the 'value' column that the connector's SQL query needs. +// Raw downsampled data does not contain a 'value' column, hence we create these default downsampling views in the database +// for querying. +func (c *clauseBuilder) UseDefaultDownsamplingView(b bool) { + c.downsamplingView = b +} + +func (c *clauseBuilder) DefaultDownsamplingView() bool { + return c.downsamplingView +} + func (c *clauseBuilder) addClause(clause string, args ...interface{}) error { if len(args) > 0 { switch args[0] { diff --git a/pkg/pgmodel/querier/metadata.go b/pkg/pgmodel/querier/metadata.go index e98b1daa3a..a63faa3e88 100644 --- a/pkg/pgmodel/querier/metadata.go +++ b/pkg/pgmodel/querier/metadata.go @@ -24,12 +24,13 @@ func GetPromQLMetadata(matchers []*labels.Matcher, hints *storage.SelectHints, q } type timeFilter struct { - metric string - schema string - column string - seriesTable string - start string - end string + metric string + schema string + column string + seriesTable string + start string + end string + useDownsamplingViews bool } type evalMetadata struct { @@ -60,11 +61,12 @@ func getEvaluationMetadata(tools *queryTools, start, end int64, promMetadata *pr metric := builder.GetMetricName() timeFilter := timeFilter{ - metric: metric, - schema: builder.GetSchemaName(), - column: builder.GetColumnName(), - start: toRFC3339Nano(start), - end: toRFC3339Nano(end), + metric: metric, + schema: builder.GetSchemaName(), + column: builder.GetColumnName(), + start: toRFC3339Nano(start), + end: toRFC3339Nano(end), + useDownsamplingViews: builder.DefaultDownsamplingView(), } // If all metric matchers match on a single metric (common case), diff --git a/pkg/pgmodel/querier/query_builder.go b/pkg/pgmodel/querier/query_builder.go index cedf1f9376..cf163f6d4c 100644 --- a/pkg/pgmodel/querier/query_builder.go +++ b/pkg/pgmodel/querier/query_builder.go @@ -55,6 +55,7 @@ var ( func BuildSubQueries(matchers []*labels.Matcher) (*clauseBuilder, error) { var err error cb := &clauseBuilder{} + var hasColumnLabel, hasSchemaLabel bool for _, m := range matchers { // From the PromQL docs: "Label matchers that match @@ -68,8 +69,10 @@ func BuildSubQueries(matchers []*labels.Matcher) (*clauseBuilder, error) { case pgmodel.MetricNameLabelName: cb.SetMetricName(m.Value) case pgmodel.SchemaNameLabelName: + hasSchemaLabel = true cb.SetSchemaName(m.Value) case pgmodel.ColumnNameLabelName: + hasColumnLabel = true cb.SetColumnName(m.Value) default: sq := subQueryEQ @@ -118,6 +121,15 @@ func BuildSubQueries(matchers []*labels.Matcher) (*clauseBuilder, error) { return nil, err } } + if hasSchemaLabel && !hasColumnLabel { + // There is no __column__ label in the given PromQL query. Hence, we need to use a default column to respond. + // We create 'q_' views in the database in the downsampling schema. The aim of these views is to provide 'value' column + // that the connector needs while querying. + cb.UseDefaultDownsamplingView(true) + } else if hasColumnLabel && !hasSchemaLabel { + // We cannot decide which schema to query from if the __schema__ is not provided. Hence, we should error here. + return nil, fmt.Errorf("'__schema__' label not found") + } return cb, err } @@ -247,13 +259,16 @@ type aggregators struct { // getAggregators returns the aggregator which should be used to fetch data for // a single metric. It may apply pushdowns to functions. -func getAggregators(metadata *promqlMetadata) (*aggregators, parser.Node) { - - agg, node, err := tryPushDown(metadata) - if err != nil { - log.Info("msg", "error while trying to push down, will skip pushdown optimization", "error", err) - } else if agg != nil { - return agg, node +func getAggregators(metadata *evalMetadata) (*aggregators, parser.Node) { + if !metadata.timeFilter.useDownsamplingViews { + // Pushdown functions do not behave properly with downsampling views. + // Hence, try a pushdown only if we do not aim to use a downsampling views. + agg, node, err := tryPushDown(metadata.promqlMetadata) + if err != nil { + log.Info("msg", "error while trying to push down, will skip pushdown optimization", "error", err) + } else if agg != nil { + return agg, node + } } defaultAggregators := &aggregators{ diff --git a/pkg/pgmodel/querier/query_builder_samples.go b/pkg/pgmodel/querier/query_builder_samples.go index 8250cd78d6..e11f811cd2 100644 --- a/pkg/pgmodel/querier/query_builder_samples.go +++ b/pkg/pgmodel/querier/query_builder_samples.go @@ -127,7 +127,7 @@ func buildSingleMetricSamplesQuery(metadata *evalMetadata) (string, []interface{ // When pushdowns are available, the is a pushdown // function which the promscale extension provides. - qf, node := getAggregators(metadata.promqlMetadata) + qf, node := getAggregators(metadata) var selectors, selectorClauses []string values := metadata.values @@ -213,6 +213,10 @@ func buildSingleMetricSamplesQuery(metadata *evalMetadata) (string, []interface{ start, end = metadata.timeFilter.start, metadata.timeFilter.end } + if filter.useDownsamplingViews { + filter.metric = "q_" + filter.metric + } + finalSQL := fmt.Sprintf(template, pgx.Identifier{filter.schema, filter.metric}.Sanitize(), pgx.Identifier{schema.PromDataSeries, filter.seriesTable}.Sanitize(), diff --git a/pkg/pgmodel/querier/query_sample.go b/pkg/pgmodel/querier/query_sample.go index afd4ce0bf0..59d59f2ba7 100644 --- a/pkg/pgmodel/querier/query_sample.go +++ b/pkg/pgmodel/querier/query_sample.go @@ -122,6 +122,14 @@ func fetchMultipleMetricsSamples(ctx context.Context, tools *queryTools, metadat return nil, err } + // We only support default data schema for multi-metric queries + // NOTE: this needs to be updated once we add support for storing + // non-view metrics into multiple schemas. This also applies to + // fetching downsampling data too. + if metadata.timeFilter.schema != schema.PromData { + return nil, fmt.Errorf("__schema__ not allowed when fetching multiple metrics in single PromQL expression") + } + // TODO this assume on average on row per-metric. Is this right? results := make([]sampleRow, 0, len(metrics)) numQueries := 0 @@ -139,13 +147,6 @@ func fetchMultipleMetricsSamples(ctx context.Context, tools *queryTools, metadat return nil, err } - // We only support default data schema for multi-metric queries - // NOTE: this needs to be updated once we add support for storing - // non-view metrics into multiple schemas - if metricInfo.TableSchema != schema.PromData { - return nil, fmt.Errorf("found unsupported metric schema in multi-metric matching query") - } - filter := timeFilter{ metric: metricInfo.TableName, schema: metricInfo.TableSchema, diff --git a/pkg/runner/flags_test.go b/pkg/runner/flags_test.go index f6eee4ba49..19317ea070 100644 --- a/pkg/runner/flags_test.go +++ b/pkg/runner/flags_test.go @@ -281,12 +281,12 @@ startup: c.ListenAddr = "localhost:9201" c.AuthConfig.BasicAuthUsername = "promscale" c.AuthConfig.BasicAuthPassword = "my-password" - c.DatasetCfg.Metrics.ChunkInterval = day.Duration(24 * time.Hour) + c.DatasetCfg.Metrics.ChunkInterval = day.Duration{T: 24 * time.Hour, Txt: "1d"} c.DatasetCfg.Metrics.Compression = func(b bool) *bool { return &b }(false) - c.DatasetCfg.Metrics.HALeaseRefresh = day.Duration(24 * time.Hour * 2) - c.DatasetCfg.Metrics.HALeaseTimeout = day.Duration(24 * time.Hour * 3) - c.DatasetCfg.Metrics.RetentionPeriod = day.Duration(24 * time.Hour * 4) - c.DatasetCfg.Traces.RetentionPeriod = day.Duration(24 * time.Hour * 5) + c.DatasetCfg.Metrics.HALeaseRefresh = day.Duration{T: 24 * time.Hour * 2, Txt: "2d"} + c.DatasetCfg.Metrics.HALeaseTimeout = day.Duration{T: 24 * time.Hour * 3, Txt: "3d"} + c.DatasetCfg.Metrics.RetentionPeriod = day.Duration{T: 24 * time.Hour * 4, Txt: "4d"} + c.DatasetCfg.Traces.RetentionPeriod = day.Duration{T: 24 * time.Hour * 5, Txt: "5d"} c.DatasetConfig = "metrics:\n default_chunk_interval: 1h\n" return c }, diff --git a/pkg/tests/end_to_end_tests/config_dataset_test.go b/pkg/tests/end_to_end_tests/config_dataset_test.go index 86473e57b3..112659ec20 100644 --- a/pkg/tests/end_to_end_tests/config_dataset_test.go +++ b/pkg/tests/end_to_end_tests/config_dataset_test.go @@ -9,7 +9,6 @@ import ( "github.com/jackc/pgx/v4/pgxpool" "github.com/stretchr/testify/require" "github.com/timescale/promscale/pkg/dataset" - "github.com/timescale/promscale/pkg/internal/day" ) func TestDatasetConfigApply(t *testing.T) { @@ -29,14 +28,14 @@ func TestDatasetConfigApply(t *testing.T) { cfg := dataset.Config{ Metrics: dataset.Metrics{ - ChunkInterval: day.Duration(4 * time.Hour), + ChunkInterval: dayDuration(4*time.Hour, "4h"), Compression: &disableCompression, - HALeaseRefresh: day.Duration(15 * time.Second), - HALeaseTimeout: day.Duration(2 * time.Minute), - RetentionPeriod: day.Duration(15 * 24 * time.Hour), + HALeaseRefresh: dayDuration(15*time.Second, "15s"), + HALeaseTimeout: dayDuration(2*time.Minute, "2m"), + RetentionPeriod: dayDuration(15*24*time.Hour, "15d"), }, Traces: dataset.Traces{ - RetentionPeriod: day.Duration(10 * 24 * time.Hour), + RetentionPeriod: dayDuration(10*24*time.Hour, "10d"), }, } diff --git a/pkg/tests/end_to_end_tests/metric_downsampling_test.go b/pkg/tests/end_to_end_tests/metric_downsampling_test.go index 307e439048..b5e69b4f45 100644 --- a/pkg/tests/end_to_end_tests/metric_downsampling_test.go +++ b/pkg/tests/end_to_end_tests/metric_downsampling_test.go @@ -14,63 +14,76 @@ import ( "github.com/stretchr/testify/require" "github.com/timescale/promscale/pkg/downsample" + "github.com/timescale/promscale/pkg/internal/day" ) func TestMetricDownsampleSync(t *testing.T) { withDB(t, *testDatabase, func(db *pgxpool.Pool, t testing.TB) { downsamplingCfgs := []downsample.Config{ - {Name: "5m", Interval: time.Minute * 5, Retention: time.Hour * 24 * 30}, + {Interval: dayDuration(time.Minute*5, "5m"), Retention: dayDuration(time.Hour*24*30, "30d")}, } pgCon, err := db.Acquire(context.Background()) require.NoError(t, err) defer pgCon.Release() + ctx := context.Background() + pc := pgCon.Conn() + // Test 1: Check if 'ds_5m' downsampling is created. - err = downsample.Sync(context.Background(), pgCon.Conn(), downsamplingCfgs) + err = downsample.Sync(ctx, pc, downsamplingCfgs) require.NoError(t, err) - verifyDownsamplingExistence(t, pgCon.Conn(), "ds_5m", - downsamplingCfgs[0].Interval, downsamplingCfgs[0].Retention, false) + verifyDownsamplingExistence(t, pc, "ds_5m", + downsamplingCfgs[0].Interval.Duration(), downsamplingCfgs[0].Retention.Duration(), false) - downsamplingCfgs = append(downsamplingCfgs, downsample.Config{Name: "1h", Interval: time.Hour, Retention: time.Hour * 24 * 365}) + downsamplingCfgs = append(downsamplingCfgs, downsample.Config{Interval: dayDuration(time.Hour, "1h"), Retention: dayDuration(time.Hour*24*365, "365d")}) // Test 2: Check if 'ds_1h' downsampling is created. - err = downsample.Sync(context.Background(), pgCon.Conn(), downsamplingCfgs) + err = downsample.Sync(ctx, pc, downsamplingCfgs) require.NoError(t, err) - verifyDownsamplingExistence(t, pgCon.Conn(), "ds_1h", - downsamplingCfgs[1].Interval, downsamplingCfgs[1].Retention, false) + verifyDownsamplingExistence(t, pc, "ds_1h", + downsamplingCfgs[1].Interval.Duration(), downsamplingCfgs[1].Retention.Duration(), false) // Test 3: Remove the first entry and see if the entry is disabled or not. downsamplingCfgs = downsamplingCfgs[1:] - err = downsample.Sync(context.Background(), pgCon.Conn(), downsamplingCfgs) + err = downsample.Sync(ctx, pc, downsamplingCfgs) require.NoError(t, err) // Check if ds_1h exists. - verifyDownsamplingExistence(t, pgCon.Conn(), "ds_1h", - downsamplingCfgs[0].Interval, downsamplingCfgs[0].Retention, false) + verifyDownsamplingExistence(t, pc, "ds_1h", + downsamplingCfgs[0].Interval.Duration(), downsamplingCfgs[0].Retention.Duration(), false) // Check if ds_5m is disabled. - verifyDownsamplingExistence(t, pgCon.Conn(), "ds_5m", + verifyDownsamplingExistence(t, pc, "ds_5m", time.Minute*5, time.Hour*24*30, true) // Test 4: Update retention of ds_1h and check if the same is reflected in the DB. - downsamplingCfgs[0].Retention = time.Hour * 24 * 500 - err = downsample.Sync(context.Background(), pgCon.Conn(), downsamplingCfgs) + downsamplingCfgs[0].Retention = dayDuration(time.Hour*24*500, "500d") + err = downsample.Sync(ctx, pc, downsamplingCfgs) require.NoError(t, err) - verifyDownsamplingExistence(t, pgCon.Conn(), "ds_1h", - downsamplingCfgs[0].Interval, downsamplingCfgs[0].Retention, false) + verifyDownsamplingExistence(t, pc, "ds_1h", + downsamplingCfgs[0].Interval.Duration(), downsamplingCfgs[0].Retention.Duration(), false) // ds_5m should still be disabled. - verifyDownsamplingExistence(t, pgCon.Conn(), "ds_5m", + verifyDownsamplingExistence(t, pc, "ds_5m", time.Minute*5, time.Hour*24*30, true) // Test 5: Enable the ds_5m downsampling that was already in the database. - downsamplingCfgs = append(downsamplingCfgs, downsample.Config{Name: "5m", Interval: time.Minute * 5, Retention: time.Hour * 24 * 30}) - err = downsample.Sync(context.Background(), pgCon.Conn(), downsamplingCfgs) + downsamplingCfgs = append(downsamplingCfgs, downsample.Config{Interval: dayDuration(time.Minute*5, "5m"), Retention: dayDuration(time.Hour*24*30, "30d")}) + err = downsample.Sync(ctx, pc, downsamplingCfgs) require.NoError(t, err) - verifyDownsamplingExistence(t, pgCon.Conn(), "ds_5m", - downsamplingCfgs[1].Interval, downsamplingCfgs[1].Retention, false) + verifyDownsamplingExistence(t, pc, "ds_5m", + downsamplingCfgs[1].Interval.Duration(), downsamplingCfgs[1].Retention.Duration(), false) + + // Test 6: Add a resolution similar to 5m, but with different unit. This should error. + downsamplingCfgs = append(downsamplingCfgs, downsample.Config{Interval: dayDuration(time.Second*300, "300s"), Retention: dayDuration(time.Hour*24*30, "30d")}) + err = downsample.Sync(ctx, pc, downsamplingCfgs) + require.Error(t, err) }) } +func dayDuration(d time.Duration, text string) day.Duration { + return day.Duration{T: d, Txt: text} +} + func verifyDownsamplingExistence(t testing.TB, pgCon *pgx.Conn, schemaName string, interval, retention time.Duration, shouldBeDisabled bool) { var ( dSchemaName string From e8c4cdd00d4e1be400ed32bb9ad292ff77a2e800 Mon Sep 17 00:00:00 2001 From: Harkishen-Singh Date: Mon, 9 Jan 2023 15:57:04 +0530 Subject: [PATCH 12/13] Support for apply_downsample_config() Signed-off-by: Harkishen-Singh --- pkg/dataset/config.go | 40 ++--- pkg/dataset/config_test.go | 36 ++--- pkg/downsample/downsample.go | 139 ++++-------------- pkg/internal/day/duration.go | 59 ++++---- pkg/internal/day/duration_test.go | 51 +++++++ pkg/pgmodel/querier/querier_sql_test.go | 3 +- pkg/pgmodel/querier/query_sample.go | 2 +- pkg/runner/flags_test.go | 10 +- pkg/tests/constants.go | 2 +- .../end_to_end_tests/config_dataset_test.go | 11 +- .../metric_downsampling_test.go | 36 ++--- 11 files changed, 179 insertions(+), 210 deletions(-) create mode 100644 pkg/internal/day/duration_test.go diff --git a/pkg/dataset/config.go b/pkg/dataset/config.go index 4ec1b4f054..354693f1ca 100644 --- a/pkg/dataset/config.go +++ b/pkg/dataset/config.go @@ -85,20 +85,20 @@ func (c *Config) Apply(ctx context.Context, conn *pgx.Conn) error { log.Info("msg", "Metric downsampling configurations synced", "configuration", fmt.Sprint(*c.Metrics.Downsampling)) } - log.Info("msg", fmt.Sprintf("Setting metric dataset default chunk interval to %s", c.Metrics.ChunkInterval.Duration())) + log.Info("msg", fmt.Sprintf("Setting metric dataset default chunk interval to %s", c.Metrics.ChunkInterval)) log.Info("msg", fmt.Sprintf("Setting metric dataset default compression to %t", *c.Metrics.Compression)) - log.Info("msg", fmt.Sprintf("Setting metric dataset default high availability lease refresh to %s", c.Metrics.HALeaseRefresh.Duration())) - log.Info("msg", fmt.Sprintf("Setting metric dataset default high availability lease timeout to %s", c.Metrics.HALeaseTimeout.Duration())) - log.Info("msg", fmt.Sprintf("Setting metric dataset default retention period to %s", c.Metrics.RetentionPeriod.Duration())) - log.Info("msg", fmt.Sprintf("Setting trace dataset default retention period to %s", c.Traces.RetentionPeriod.Duration())) + log.Info("msg", fmt.Sprintf("Setting metric dataset default high availability lease refresh to %s", c.Metrics.HALeaseRefresh)) + log.Info("msg", fmt.Sprintf("Setting metric dataset default high availability lease timeout to %s", c.Metrics.HALeaseTimeout)) + log.Info("msg", fmt.Sprintf("Setting metric dataset default retention period to %s", c.Metrics.RetentionPeriod)) + log.Info("msg", fmt.Sprintf("Setting trace dataset default retention period to %s", c.Traces.RetentionPeriod)) queries := map[string]interface{}{ - setDefaultMetricChunkIntervalSQL: c.Metrics.ChunkInterval.Duration(), + setDefaultMetricChunkIntervalSQL: time.Duration(c.Metrics.ChunkInterval), setDefaultMetricCompressionSQL: c.Metrics.Compression, - setDefaultMetricHAReleaseRefreshSQL: c.Metrics.HALeaseRefresh.Duration(), - setDefaultMetricHAReleaseTimeoutSQL: c.Metrics.HALeaseTimeout.Duration(), - setDefaultMetricRetentionPeriodSQL: c.Metrics.RetentionPeriod.Duration(), - setDefaultTraceRetentionPeriodSQL: c.Traces.RetentionPeriod.Duration(), + setDefaultMetricHAReleaseRefreshSQL: time.Duration(c.Metrics.HALeaseRefresh), + setDefaultMetricHAReleaseTimeoutSQL: time.Duration(c.Metrics.HALeaseTimeout), + setDefaultMetricRetentionPeriodSQL: time.Duration(c.Metrics.RetentionPeriod), + setDefaultTraceRetentionPeriodSQL: time.Duration(c.Traces.RetentionPeriod), } for sql, param := range queries { @@ -111,22 +111,22 @@ func (c *Config) Apply(ctx context.Context, conn *pgx.Conn) error { } func (c *Config) applyDefaults() { - if c.Metrics.ChunkInterval.Duration() <= 0 { - c.Metrics.ChunkInterval.SetDuration(defaultMetricChunkInterval) + if c.Metrics.ChunkInterval <= 0 { + c.Metrics.ChunkInterval = day.Duration(defaultMetricChunkInterval) } if c.Metrics.Compression == nil { c.Metrics.Compression = &defaultMetricCompressionVar } - if c.Metrics.HALeaseRefresh.Duration() <= 0 { - c.Metrics.HALeaseRefresh.SetDuration(defaultMetricHALeaseRefresh) + if c.Metrics.HALeaseRefresh <= 0 { + c.Metrics.HALeaseRefresh = day.Duration(defaultMetricHALeaseRefresh) } - if c.Metrics.HALeaseTimeout.Duration() <= 0 { - c.Metrics.HALeaseTimeout.SetDuration(defaultMetricHALeaseTimeout) + if c.Metrics.HALeaseTimeout <= 0 { + c.Metrics.HALeaseTimeout = day.Duration(defaultMetricHALeaseTimeout) } - if c.Metrics.RetentionPeriod.Duration() <= 0 { - c.Metrics.RetentionPeriod.SetDuration(defaultMetricRetentionPeriod) + if c.Metrics.RetentionPeriod <= 0 { + c.Metrics.RetentionPeriod = day.Duration(defaultMetricRetentionPeriod) } - if c.Traces.RetentionPeriod.Duration() <= 0 { - c.Traces.RetentionPeriod.SetDuration(defaultTraceRetentionPeriod) + if c.Traces.RetentionPeriod <= 0 { + c.Traces.RetentionPeriod = day.Duration(defaultTraceRetentionPeriod) } } diff --git a/pkg/dataset/config_test.go b/pkg/dataset/config_test.go index c32c853a60..9a0d0aefd5 100644 --- a/pkg/dataset/config_test.go +++ b/pkg/dataset/config_test.go @@ -49,7 +49,7 @@ func TestNewConfig(t *testing.T) { default_retention_period: 3d2h`, cfg: Config{ Metrics: Metrics{ - RetentionPeriod: dayDuration(((3*24)+2)*time.Hour, "3d2h"), + RetentionPeriod: day.Duration(((3 * 24) + 2) * time.Hour), }, }, }, @@ -65,14 +65,14 @@ traces: default_retention_period: 15d`, cfg: Config{ Metrics: Metrics{ - ChunkInterval: dayDuration(3*time.Hour, "3h"), + ChunkInterval: day.Duration(3 * time.Hour), Compression: &testCompressionSetting, - HALeaseRefresh: dayDuration(2*time.Minute, "2m"), - HALeaseTimeout: dayDuration(5*time.Second, "5s"), - RetentionPeriod: dayDuration(30*24*time.Hour, "30d"), + HALeaseRefresh: day.Duration(2 * time.Minute), + HALeaseTimeout: day.Duration(5 * time.Second), + RetentionPeriod: day.Duration(30 * 24 * time.Hour), }, Traces: Traces{ - RetentionPeriod: dayDuration(15*24*time.Hour, "15d"), + RetentionPeriod: day.Duration(15 * 24 * time.Hour), }, }, }, @@ -101,14 +101,14 @@ func TestApplyDefaults(t *testing.T) { t, Config{ Metrics: Metrics{ - ChunkInterval: dayDuration(defaultMetricChunkInterval, ""), + ChunkInterval: day.Duration(defaultMetricChunkInterval), Compression: &defaultMetricCompressionVar, - HALeaseRefresh: dayDuration(defaultMetricHALeaseRefresh, ""), - HALeaseTimeout: dayDuration(defaultMetricHALeaseTimeout, ""), - RetentionPeriod: dayDuration(defaultMetricRetentionPeriod, ""), + HALeaseRefresh: day.Duration(defaultMetricHALeaseRefresh), + HALeaseTimeout: day.Duration(defaultMetricHALeaseTimeout), + RetentionPeriod: day.Duration(defaultMetricRetentionPeriod), }, Traces: Traces{ - RetentionPeriod: dayDuration(defaultTraceRetentionPeriod, ""), + RetentionPeriod: day.Duration(defaultTraceRetentionPeriod), }, }, c, @@ -116,14 +116,14 @@ func TestApplyDefaults(t *testing.T) { untouched := Config{ Metrics: Metrics{ - ChunkInterval: dayDuration(3*time.Hour, "3h"), + ChunkInterval: day.Duration(3 * time.Hour), Compression: &testCompressionSetting, - HALeaseRefresh: dayDuration(2*time.Minute, "2m"), - HALeaseTimeout: dayDuration(5*time.Second, "5s"), - RetentionPeriod: dayDuration(30*24*time.Hour, "30d"), + HALeaseRefresh: day.Duration(2 * time.Minute), + HALeaseTimeout: day.Duration(5 * time.Second), + RetentionPeriod: day.Duration(30 * 24 * time.Hour), }, Traces: Traces{ - RetentionPeriod: dayDuration(15*24*time.Hour, "15d"), + RetentionPeriod: day.Duration(15 * 24 * time.Hour), }, } @@ -132,7 +132,3 @@ func TestApplyDefaults(t *testing.T) { require.Equal(t, untouched, copyConfig) } - -func dayDuration(d time.Duration, txt string) day.Duration { - return day.Duration{T: d, Txt: txt} -} diff --git a/pkg/downsample/downsample.go b/pkg/downsample/downsample.go index cdbbdb9dff..3ea640e72e 100644 --- a/pkg/downsample/downsample.go +++ b/pkg/downsample/downsample.go @@ -6,9 +6,8 @@ package downsample import ( "context" + "encoding/json" "fmt" - "time" - "github.com/jackc/pgx/v4" "github.com/timescale/promscale/pkg/internal/day" @@ -17,36 +16,36 @@ import ( ) const ( - setDownsamplingStateSQL = "SELECT prom_api.set_downsampling_state($1)" - createOrUpdateDownsamplingSQL = "CALL _prom_catalog.create_or_update_downsampling($1, $2, $3)" - updateDownsamplingStateForSQL = "SELECT _prom_catalog.update_downsampling_state($1, $2)" - downsamplePrefix = "ds_" // Stands of downsample_ - lockID = 55851985173278 // Choosen randomly + setGlobalDownsamplingStateSQL = "SELECT prom_api.set_global_downsampling_state($1)" + applyDownsampleConfigSQL = "SELECT _prom_catalog.apply_downsample_config($1::jsonb)" + downsamplePrefix = "ds_" // Stands for downsample_ + lockID = 55985173312278 // Choosen randomly ) type Config struct { - Interval day.Duration `yaml:"interval"` - Retention day.Duration `yaml:"retention"` - shouldRefresh bool + Interval day.Duration `yaml:"interval"` + Retention day.Duration `yaml:"retention"` } func (c Config) Name() string { - return downsamplePrefix + c.Interval.Text() + return downsamplePrefix + day.String(c.Interval) } func SetState(ctx context.Context, conn *pgx.Conn, state bool) error { - _, err := conn.Exec(ctx, setDownsamplingStateSQL, state) + _, err := conn.Exec(ctx, setGlobalDownsamplingStateSQL, state) if err != nil { return fmt.Errorf("error setting downsampling state: %w", err) } return nil } -// Sync updates the downsampling cfgs in the DB in accordance with the given new cfgs. It: -// 1. Creates of new downsampling cfgs that are not in the database -// 2. Updates retention duration of downsampling cfgs that are present in the database but with a different retention duration -// 3. Enables refreshing of downsampling cfgs in the database that are in the new cfgs but were previously disabled -// 4. Disables refreshing of downsampling cfgs in the database that were not found in the new cfgs +type cfgWithName struct { + Name string `json:"schema_name"` + Interval string `json:"ds_interval"` + Retention string `json:"retention"` +} + +// Sync the given downsampling configurations with the database. func Sync(ctx context.Context, conn *pgx.Conn, cfgs []Config) error { pgLock, err := util.NewPgAdvisoryLock(lockID, conn.Config().ConnString()) if err != nil { @@ -67,102 +66,20 @@ func Sync(ctx context.Context, conn *pgx.Conn, cfgs []Config) error { log.Error("msg", "error unlocking downsampling.Sync advisory_lock", "err", err.Error()) } }() - - newCfgs := make(map[string]Config) // These are the new downsampling cfgs that the user provided. Relation => schema_name: definition{} - for _, c := range cfgs { - newCfgs[c.Name()] = Config{Interval: c.Interval, Retention: c.Retention} - } - - existingCfgs, err := getExistingCfgs(ctx, conn) - if err != nil { - return fmt.Errorf("error fetching existing downsampling cfgs: %w", err) - } - - createOrUpdate := make(map[string]Config) - for newLabel, newCfg := range newCfgs { - if existingCfg, found := existingCfgs[newLabel]; found { - if !existingCfg.shouldRefresh || existingCfg.Retention.Duration() != newCfg.Retention.Duration() { - createOrUpdate[newLabel] = newCfg - } - if existingCfg.Interval.Duration() != newCfg.Interval.Duration() { - // This should never be the case since newlabel is schema specific. But we still check for safety purpose. - return fmt.Errorf("interval mismatch: existing interval %v, new interval %v", existingCfg.Interval.Duration(), newCfg.Interval.Duration()) - } - } else { - createOrUpdate[newLabel] = newCfg + var applyCfgs []cfgWithName + for i := range cfgs { + c := cfgs[i] + applyCfgs = append(applyCfgs, cfgWithName{Name: c.Name(), Interval: day.String(c.Interval), Retention: day.String(c.Retention)}) + } + if len(applyCfgs) > 0 { + str, err := json.Marshal(applyCfgs) + if err != nil { + return fmt.Errorf("error marshalling configs: %w", err) } - } - - if len(createOrUpdate) > 0 { - if err = createOrUpdateDownsampling(ctx, conn, createOrUpdate); err != nil { - return fmt.Errorf("error creating or updating given downsampling configs: %w", err) + fmt.Println("str", string(str)) + if _, err = conn.Exec(ctx, applyDownsampleConfigSQL, str); err != nil { + return fmt.Errorf("error applying downsample config: %w", err) } } - - if err = disable(ctx, conn, newCfgs, existingCfgs); err != nil { - return fmt.Errorf("error disabling downsampling configs: %w", err) - } - return nil -} - -func getExistingCfgs(ctx context.Context, conn *pgx.Conn) (map[string]Config, error) { - rows, err := conn.Query(ctx, "SELECT schema_name, resolution, retention, should_refresh FROM _prom_catalog.downsample") - if err != nil { - return nil, fmt.Errorf("querying existing resolutions: %w", err) - } - defer rows.Close() - - existingCfgs := make(map[string]Config) // These are the existing downsampling cfgs in the database. - for rows.Next() { - var ( - schemaName string - shouldRefresh bool - interval, retention time.Duration - ) - if err := rows.Scan(&schemaName, &interval, &retention, &shouldRefresh); err != nil { - return nil, fmt.Errorf("error scanning output rows for existing resolutions: %w", err) - } - existingCfgs[schemaName] = Config{Interval: day.Duration{T: interval}, Retention: day.Duration{T: retention}, shouldRefresh: shouldRefresh} - } - return existingCfgs, nil -} - -// createOrUpdateDownsampling does 3 things: -// 1. It creates new downsampling configurations that are given in 'cfgs' -// 2. It updates the retention of a downsampling configuration if it is present in the database with the same lName -// 3. It enables a downsampling if it was previously disabled -// Refer to _prom_catalog.create_or_update_downsampling($1, $2, $3) to learn more. -func createOrUpdateDownsampling(ctx context.Context, conn *pgx.Conn, cfgs map[string]Config) error { - var batch pgx.Batch - for lName, def := range cfgs { - batch.Queue(createOrUpdateDownsamplingSQL, lName, def.Interval.Duration(), def.Retention.Duration()) - } - results := conn.SendBatch(ctx, &batch) - if err := results.Close(); err != nil { - return fmt.Errorf("error closing batch: %w", err) - } - return nil -} - -// disable downsampling cfgs. -func disable(ctx context.Context, conn *pgx.Conn, newCfgs, existingCfgs map[string]Config) error { - disable := []string{} - for existingName := range existingCfgs { - if _, found := newCfgs[existingName]; !found { - disable = append(disable, existingName) - } - } - if len(disable) == 0 { - return nil - } - - var batch pgx.Batch - for _, n := range disable { - batch.Queue(updateDownsamplingStateForSQL, n, false) - } - results := conn.SendBatch(ctx, &batch) - if err := results.Close(); err != nil { - return fmt.Errorf("error closing batch: %w", err) - } return nil } diff --git a/pkg/internal/day/duration.go b/pkg/internal/day/duration.go index 6b7c6b18e1..0611d18939 100644 --- a/pkg/internal/day/duration.go +++ b/pkg/internal/day/duration.go @@ -20,13 +20,7 @@ const ( // Duration acts like a time.Duration with support for "d" unit // which is used for specifying number of days in duration. -// It stores the text of duration while parsing, which can be retrieved via Text(). -// This can be useful when we need to know the num of days user wanted, since -// this information is lost after parsing. -type Duration struct { - Txt string // Holds the original duration text. - T time.Duration -} +type Duration time.Duration // UnmarshalText unmarshals strings into DayDuration values while // handling the day unit. It leans heavily into time.ParseDuration. @@ -43,8 +37,7 @@ func (d *Duration) UnmarshalText(s []byte) error { return err } } - d.T = val - d.Txt = string(s) + *d = Duration(val) return nil } @@ -75,23 +68,8 @@ func handleDays(s []byte) (time.Duration, error) { } // String returns a string value of DayDuration. -func (d *Duration) String() string { - return d.T.String() -} - -// Text returns the original text received while parsing. -func (d *Duration) Text() string { - return d.Txt -} - -// Duration returns the parsed duration. -func (d *Duration) Duration() time.Duration { - return d.T -} - -// SetDuration returns the parsed duration. -func (d *Duration) SetDuration(t time.Duration) { - d.T = t +func (d Duration) String() string { + return time.Duration(d).String() } // StringToDayDurationHookFunc returns a mapstructure.DecodeHookFunc that @@ -118,3 +96,32 @@ func StringToDayDurationHookFunc() mapstructure.DecodeHookFunc { return d, nil } } + +// String returns the output in form of days:hours:mins:secs +func String(d Duration) string { + const day = int64(time.Hour * 24) + + remainder := int64(d) + days := remainder / day + remainder = remainder % day + hours := remainder / int64(time.Hour) + remainder = remainder % int64(time.Hour) + mins := remainder / int64(time.Minute) + remainder = remainder % int64(time.Minute) + secs := remainder / int64(time.Second) + + display := "" + if days != 0 { + display = fmt.Sprintf("%dd", days) + } + if hours != 0 { + display = fmt.Sprintf("%s%dh", display, hours) + } + if mins != 0 { + display = fmt.Sprintf("%s%dm", display, mins) + } + if secs != 0 { + display = fmt.Sprintf("%s%ds", display, secs) + } + return display +} diff --git a/pkg/internal/day/duration_test.go b/pkg/internal/day/duration_test.go new file mode 100644 index 0000000000..b307fc39c1 --- /dev/null +++ b/pkg/internal/day/duration_test.go @@ -0,0 +1,51 @@ +// This file and its contents are licensed under the Apache License 2.0. +// Please see the included NOTICE for copyright information and +// LICENSE for a copy of the license. + +package day + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestString(t *testing.T) { + tcs := []struct { + in string + out string + }{ + { + in: "5m", + out: "5m", + }, { + in: "2.5m", + out: "2m30s", + }, { + in: "1.1d1s", + out: "1d2h24m1s", + }, { + in: "24d", + out: "24d", + }, { + in: "1.5d", + out: "1d12h", + }, { + in: "4d1h5m", + out: "4d1h5m", + }, { + in: "1000h", + out: "41d16h", + }, { + in: "1000h1m", + out: "41d16h1m", + }, + } + for _, tc := range tcs { + t.Run(tc.in, func(t *testing.T) { + var d Duration + require.NoError(t, d.UnmarshalText([]byte(tc.in))) + require.Equal(t, tc.out, String(d)) + }) + } +} diff --git a/pkg/pgmodel/querier/querier_sql_test.go b/pkg/pgmodel/querier/querier_sql_test.go index ce43cddc1f..cfaa072bab 100644 --- a/pkg/pgmodel/querier/querier_sql_test.go +++ b/pkg/pgmodel/querier/querier_sql_test.go @@ -370,7 +370,7 @@ func TestPGXQuerierQuery(t *testing.T) { }, }, { - name: "Simple query, metric name matcher, custom column", + name: "Should error for simple query, metric name matcher, custom column but no schema name", query: &prompb.Query{ StartTimestampMs: 1000, EndTimestampMs: 2000, @@ -379,6 +379,7 @@ func TestPGXQuerierQuery(t *testing.T) { {Type: prompb.LabelMatcher_EQ, Name: model.MetricNameLabelName, Value: "bar"}, }, }, + err: fmt.Errorf("get evaluation metadata: build subQueries: '__schema__' label not found"), result: []*prompb.TimeSeries{ { Labels: []prompb.Label{ diff --git a/pkg/pgmodel/querier/query_sample.go b/pkg/pgmodel/querier/query_sample.go index 59d59f2ba7..9aff1d0fe3 100644 --- a/pkg/pgmodel/querier/query_sample.go +++ b/pkg/pgmodel/querier/query_sample.go @@ -126,7 +126,7 @@ func fetchMultipleMetricsSamples(ctx context.Context, tools *queryTools, metadat // NOTE: this needs to be updated once we add support for storing // non-view metrics into multiple schemas. This also applies to // fetching downsampling data too. - if metadata.timeFilter.schema != schema.PromData { + if metadata.timeFilter.schema != schema.PromData && metadata.timeFilter.schema != "" { return nil, fmt.Errorf("__schema__ not allowed when fetching multiple metrics in single PromQL expression") } diff --git a/pkg/runner/flags_test.go b/pkg/runner/flags_test.go index 19317ea070..f6eee4ba49 100644 --- a/pkg/runner/flags_test.go +++ b/pkg/runner/flags_test.go @@ -281,12 +281,12 @@ startup: c.ListenAddr = "localhost:9201" c.AuthConfig.BasicAuthUsername = "promscale" c.AuthConfig.BasicAuthPassword = "my-password" - c.DatasetCfg.Metrics.ChunkInterval = day.Duration{T: 24 * time.Hour, Txt: "1d"} + c.DatasetCfg.Metrics.ChunkInterval = day.Duration(24 * time.Hour) c.DatasetCfg.Metrics.Compression = func(b bool) *bool { return &b }(false) - c.DatasetCfg.Metrics.HALeaseRefresh = day.Duration{T: 24 * time.Hour * 2, Txt: "2d"} - c.DatasetCfg.Metrics.HALeaseTimeout = day.Duration{T: 24 * time.Hour * 3, Txt: "3d"} - c.DatasetCfg.Metrics.RetentionPeriod = day.Duration{T: 24 * time.Hour * 4, Txt: "4d"} - c.DatasetCfg.Traces.RetentionPeriod = day.Duration{T: 24 * time.Hour * 5, Txt: "5d"} + c.DatasetCfg.Metrics.HALeaseRefresh = day.Duration(24 * time.Hour * 2) + c.DatasetCfg.Metrics.HALeaseTimeout = day.Duration(24 * time.Hour * 3) + c.DatasetCfg.Metrics.RetentionPeriod = day.Duration(24 * time.Hour * 4) + c.DatasetCfg.Traces.RetentionPeriod = day.Duration(24 * time.Hour * 5) c.DatasetConfig = "metrics:\n default_chunk_interval: 1h\n" return c }, diff --git a/pkg/tests/constants.go b/pkg/tests/constants.go index b8e5e49819..2de9dfdc02 100644 --- a/pkg/tests/constants.go +++ b/pkg/tests/constants.go @@ -18,5 +18,5 @@ func init() { PromscaleExtensionVersion = strings.TrimSpace(string(content)) PromscaleExtensionContainer = "ghcr.io/timescale/dev_promscale_extension:" + PromscaleExtensionVersion + "-ts2-pg14" - //PromscaleExtensionContainer = "local/dev_promscale_extension:head-ts2-pg14" // This will be removed once the PR against master is made. + PromscaleExtensionContainer = "local/dev_promscale_extension:head-ts2-pg14" // This will be removed once the PR against master is made. } diff --git a/pkg/tests/end_to_end_tests/config_dataset_test.go b/pkg/tests/end_to_end_tests/config_dataset_test.go index 112659ec20..86473e57b3 100644 --- a/pkg/tests/end_to_end_tests/config_dataset_test.go +++ b/pkg/tests/end_to_end_tests/config_dataset_test.go @@ -9,6 +9,7 @@ import ( "github.com/jackc/pgx/v4/pgxpool" "github.com/stretchr/testify/require" "github.com/timescale/promscale/pkg/dataset" + "github.com/timescale/promscale/pkg/internal/day" ) func TestDatasetConfigApply(t *testing.T) { @@ -28,14 +29,14 @@ func TestDatasetConfigApply(t *testing.T) { cfg := dataset.Config{ Metrics: dataset.Metrics{ - ChunkInterval: dayDuration(4*time.Hour, "4h"), + ChunkInterval: day.Duration(4 * time.Hour), Compression: &disableCompression, - HALeaseRefresh: dayDuration(15*time.Second, "15s"), - HALeaseTimeout: dayDuration(2*time.Minute, "2m"), - RetentionPeriod: dayDuration(15*24*time.Hour, "15d"), + HALeaseRefresh: day.Duration(15 * time.Second), + HALeaseTimeout: day.Duration(2 * time.Minute), + RetentionPeriod: day.Duration(15 * 24 * time.Hour), }, Traces: dataset.Traces{ - RetentionPeriod: dayDuration(10*24*time.Hour, "10d"), + RetentionPeriod: day.Duration(10 * 24 * time.Hour), }, } diff --git a/pkg/tests/end_to_end_tests/metric_downsampling_test.go b/pkg/tests/end_to_end_tests/metric_downsampling_test.go index b5e69b4f45..46acd91145 100644 --- a/pkg/tests/end_to_end_tests/metric_downsampling_test.go +++ b/pkg/tests/end_to_end_tests/metric_downsampling_test.go @@ -20,7 +20,7 @@ import ( func TestMetricDownsampleSync(t *testing.T) { withDB(t, *testDatabase, func(db *pgxpool.Pool, t testing.TB) { downsamplingCfgs := []downsample.Config{ - {Interval: dayDuration(time.Minute*5, "5m"), Retention: dayDuration(time.Hour*24*30, "30d")}, + {Interval: day.Duration(time.Minute * 5), Retention: day.Duration(time.Hour * 24 * 30)}, } pgCon, err := db.Acquire(context.Background()) @@ -35,15 +35,15 @@ func TestMetricDownsampleSync(t *testing.T) { require.NoError(t, err) verifyDownsamplingExistence(t, pc, "ds_5m", - downsamplingCfgs[0].Interval.Duration(), downsamplingCfgs[0].Retention.Duration(), false) + downsamplingCfgs[0].Interval, downsamplingCfgs[0].Retention, false) - downsamplingCfgs = append(downsamplingCfgs, downsample.Config{Interval: dayDuration(time.Hour, "1h"), Retention: dayDuration(time.Hour*24*365, "365d")}) + downsamplingCfgs = append(downsamplingCfgs, downsample.Config{Interval: day.Duration(time.Hour), Retention: day.Duration(time.Hour * 24 * 365)}) // Test 2: Check if 'ds_1h' downsampling is created. err = downsample.Sync(ctx, pc, downsamplingCfgs) require.NoError(t, err) verifyDownsamplingExistence(t, pc, "ds_1h", - downsamplingCfgs[1].Interval.Duration(), downsamplingCfgs[1].Retention.Duration(), false) + downsamplingCfgs[1].Interval, downsamplingCfgs[1].Retention, false) // Test 3: Remove the first entry and see if the entry is disabled or not. downsamplingCfgs = downsamplingCfgs[1:] @@ -51,50 +51,46 @@ func TestMetricDownsampleSync(t *testing.T) { require.NoError(t, err) // Check if ds_1h exists. verifyDownsamplingExistence(t, pc, "ds_1h", - downsamplingCfgs[0].Interval.Duration(), downsamplingCfgs[0].Retention.Duration(), false) + downsamplingCfgs[0].Interval, downsamplingCfgs[0].Retention, false) // Check if ds_5m is disabled. verifyDownsamplingExistence(t, pc, "ds_5m", - time.Minute*5, time.Hour*24*30, true) + day.Duration(time.Minute*5), day.Duration(time.Hour*24*30), true) // Test 4: Update retention of ds_1h and check if the same is reflected in the DB. - downsamplingCfgs[0].Retention = dayDuration(time.Hour*24*500, "500d") + downsamplingCfgs[0].Retention = day.Duration(time.Hour * 24 * 500) err = downsample.Sync(ctx, pc, downsamplingCfgs) require.NoError(t, err) verifyDownsamplingExistence(t, pc, "ds_1h", - downsamplingCfgs[0].Interval.Duration(), downsamplingCfgs[0].Retention.Duration(), false) + downsamplingCfgs[0].Interval, downsamplingCfgs[0].Retention, false) // ds_5m should still be disabled. verifyDownsamplingExistence(t, pc, "ds_5m", - time.Minute*5, time.Hour*24*30, true) + day.Duration(time.Minute*5), day.Duration(time.Hour*24*30), true) // Test 5: Enable the ds_5m downsampling that was already in the database. - downsamplingCfgs = append(downsamplingCfgs, downsample.Config{Interval: dayDuration(time.Minute*5, "5m"), Retention: dayDuration(time.Hour*24*30, "30d")}) + downsamplingCfgs = append(downsamplingCfgs, downsample.Config{Interval: day.Duration(time.Minute * 5), Retention: day.Duration(time.Hour * 24 * 30)}) err = downsample.Sync(ctx, pc, downsamplingCfgs) require.NoError(t, err) verifyDownsamplingExistence(t, pc, "ds_5m", - downsamplingCfgs[1].Interval.Duration(), downsamplingCfgs[1].Retention.Duration(), false) + downsamplingCfgs[1].Interval, downsamplingCfgs[1].Retention, false) // Test 6: Add a resolution similar to 5m, but with different unit. This should error. - downsamplingCfgs = append(downsamplingCfgs, downsample.Config{Interval: dayDuration(time.Second*300, "300s"), Retention: dayDuration(time.Hour*24*30, "30d")}) + downsamplingCfgs = append(downsamplingCfgs, downsample.Config{Interval: day.Duration(time.Second * 300), Retention: day.Duration(time.Hour * 24 * 30)}) err = downsample.Sync(ctx, pc, downsamplingCfgs) require.Error(t, err) }) } -func dayDuration(d time.Duration, text string) day.Duration { - return day.Duration{T: d, Txt: text} -} - -func verifyDownsamplingExistence(t testing.TB, pgCon *pgx.Conn, schemaName string, interval, retention time.Duration, shouldBeDisabled bool) { +func verifyDownsamplingExistence(t testing.TB, pgCon *pgx.Conn, schemaName string, interval, retention day.Duration, shouldBeDisabled bool) { var ( dSchemaName string dInterval time.Duration dRetention time.Duration dShouldRefresh bool ) - err := pgCon.QueryRow(context.Background(), "SELECT schema_name, resolution, retention, should_refresh FROM _prom_catalog.downsample WHERE schema_name = $1", schemaName).Scan(&dSchemaName, &dInterval, &dRetention, &dShouldRefresh) + err := pgCon.QueryRow(context.Background(), "SELECT schema_name, ds_interval, retention, should_refresh FROM _prom_catalog.downsample WHERE schema_name = $1", schemaName).Scan(&dSchemaName, &dInterval, &dRetention, &dShouldRefresh) require.NoError(t, err) require.Equal(t, schemaName, dSchemaName) - require.Equal(t, interval, dInterval) - require.Equal(t, retention, dRetention) + require.Equal(t, time.Duration(interval), dInterval) + require.Equal(t, time.Duration(retention), dRetention) require.Equal(t, shouldBeDisabled, !dShouldRefresh) } From c334d345d1b711bc713c4c5821a129c8755905c0 Mon Sep 17 00:00:00 2001 From: Harkishen-Singh Date: Mon, 9 Jan 2023 16:41:56 +0530 Subject: [PATCH 13/13] Add timeout for trying apply_downsamplg_config lock. Signed-off-by: Harkishen-Singh --- EXTENSION_VERSION | 2 +- pkg/dataset/deepcopy_generated.go | 5 -- pkg/downsample/downsample.go | 42 ++++++++------- pkg/internal/day/duration.go | 54 +++++++++---------- pkg/internal/day/duration_test.go | 2 +- pkg/pgmodel/metrics/database/metric_series.go | 7 ++- pkg/tests/constants.go | 2 +- .../end_to_end_tests/continuous_agg_test.go | 8 +-- pkg/tests/end_to_end_tests/telemetry_test.go | 4 +- 9 files changed, 62 insertions(+), 64 deletions(-) diff --git a/EXTENSION_VERSION b/EXTENSION_VERSION index 1e5c905f50..318b82ecbb 100644 --- a/EXTENSION_VERSION +++ b/EXTENSION_VERSION @@ -1 +1 @@ -downsampling_new_config +feature_metric_rollup \ No newline at end of file diff --git a/pkg/dataset/deepcopy_generated.go b/pkg/dataset/deepcopy_generated.go index 8fde2e35a9..c9041f6857 100644 --- a/pkg/dataset/deepcopy_generated.go +++ b/pkg/dataset/deepcopy_generated.go @@ -34,15 +34,11 @@ func (in *Config) DeepCopy() *Config { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Metrics) DeepCopyInto(out *Metrics) { *out = *in - out.ChunkInterval = in.ChunkInterval if in.Compression != nil { in, out := &in.Compression, &out.Compression *out = new(bool) **out = **in } - out.HALeaseRefresh = in.HALeaseRefresh - out.HALeaseTimeout = in.HALeaseTimeout - out.RetentionPeriod = in.RetentionPeriod if in.Downsampling != nil { in, out := &in.Downsampling, &out.Downsampling *out = new([]downsample.Config) @@ -68,7 +64,6 @@ func (in *Metrics) DeepCopy() *Metrics { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Traces) DeepCopyInto(out *Traces) { *out = *in - out.RetentionPeriod = in.RetentionPeriod return } diff --git a/pkg/downsample/downsample.go b/pkg/downsample/downsample.go index 3ea640e72e..cf213e2063 100644 --- a/pkg/downsample/downsample.go +++ b/pkg/downsample/downsample.go @@ -8,10 +8,12 @@ import ( "context" "encoding/json" "fmt" + "time" + "github.com/jackc/pgx/v4" + "github.com/pkg/errors" "github.com/timescale/promscale/pkg/internal/day" - "github.com/timescale/promscale/pkg/log" "github.com/timescale/promscale/pkg/util" ) @@ -28,15 +30,12 @@ type Config struct { } func (c Config) Name() string { - return downsamplePrefix + day.String(c.Interval) + return downsamplePrefix + c.Interval.String() } func SetState(ctx context.Context, conn *pgx.Conn, state bool) error { _, err := conn.Exec(ctx, setGlobalDownsamplingStateSQL, state) - if err != nil { - return fmt.Errorf("error setting downsampling state: %w", err) - } - return nil + return errors.WithMessage(err, "error setting downsampling state") } type cfgWithName struct { @@ -52,31 +51,38 @@ func Sync(ctx context.Context, conn *pgx.Conn, cfgs []Config) error { return fmt.Errorf("error getting lock for syncing downsampling config") } defer pgLock.Close() - got, err := pgLock.GetAdvisoryLock() // To prevent failure when multiple Promscale start at the same time. + + try := func() (bool, error) { + got, err := pgLock.GetAdvisoryLock() // To prevent failure when multiple Promscale start at the same time. + return got, errors.WithMessage(err, "error trying pg advisory_lock") + } + + got, err := try() if err != nil { - return fmt.Errorf("error trying pg advisory_lock") + return err } if !got { - // Some other Promscale instance is already working on the downsampling.Sync() - // Hence, we should skip. - return nil - } - defer func() { - if _, err = pgLock.Unlock(); err != nil { - log.Error("msg", "error unlocking downsampling.Sync advisory_lock", "err", err.Error()) + // Wait for sometime and try again. If we still did not get the lock, throw an error. + time.Sleep(time.Second * 5) + got, err = try() + if err != nil { + return err } - }() + if !got { + return fmt.Errorf("timeout: unable to take the advisory lock for syncing downsampling state") + } + } + var applyCfgs []cfgWithName for i := range cfgs { c := cfgs[i] - applyCfgs = append(applyCfgs, cfgWithName{Name: c.Name(), Interval: day.String(c.Interval), Retention: day.String(c.Retention)}) + applyCfgs = append(applyCfgs, cfgWithName{Name: c.Name(), Interval: c.Interval.String(), Retention: c.Retention.String()}) } if len(applyCfgs) > 0 { str, err := json.Marshal(applyCfgs) if err != nil { return fmt.Errorf("error marshalling configs: %w", err) } - fmt.Println("str", string(str)) if _, err = conn.Exec(ctx, applyDownsampleConfigSQL, str); err != nil { return fmt.Errorf("error applying downsample config: %w", err) } diff --git a/pkg/internal/day/duration.go b/pkg/internal/day/duration.go index 0611d18939..88d043da29 100644 --- a/pkg/internal/day/duration.go +++ b/pkg/internal/day/duration.go @@ -16,6 +16,7 @@ import ( const ( dayUnit = 'd' unknownUnitDErrorPrefix = `time: unknown unit "d"` + day = int64(time.Hour * 24) ) // Duration acts like a time.Duration with support for "d" unit @@ -69,7 +70,29 @@ func handleDays(s []byte) (time.Duration, error) { // String returns a string value of DayDuration. func (d Duration) String() string { - return time.Duration(d).String() + remainder := int64(d) + days := remainder / day + remainder = remainder % day + hours := remainder / int64(time.Hour) + remainder = remainder % int64(time.Hour) + mins := remainder / int64(time.Minute) + remainder = remainder % int64(time.Minute) + secs := remainder / int64(time.Second) + + display := "" + if days != 0 { + display = fmt.Sprintf("%dd", days) + } + if hours != 0 { + display = fmt.Sprintf("%s%dh", display, hours) + } + if mins != 0 { + display = fmt.Sprintf("%s%dm", display, mins) + } + if secs != 0 { + display = fmt.Sprintf("%s%ds", display, secs) + } + return display } // StringToDayDurationHookFunc returns a mapstructure.DecodeHookFunc that @@ -96,32 +119,3 @@ func StringToDayDurationHookFunc() mapstructure.DecodeHookFunc { return d, nil } } - -// String returns the output in form of days:hours:mins:secs -func String(d Duration) string { - const day = int64(time.Hour * 24) - - remainder := int64(d) - days := remainder / day - remainder = remainder % day - hours := remainder / int64(time.Hour) - remainder = remainder % int64(time.Hour) - mins := remainder / int64(time.Minute) - remainder = remainder % int64(time.Minute) - secs := remainder / int64(time.Second) - - display := "" - if days != 0 { - display = fmt.Sprintf("%dd", days) - } - if hours != 0 { - display = fmt.Sprintf("%s%dh", display, hours) - } - if mins != 0 { - display = fmt.Sprintf("%s%dm", display, mins) - } - if secs != 0 { - display = fmt.Sprintf("%s%ds", display, secs) - } - return display -} diff --git a/pkg/internal/day/duration_test.go b/pkg/internal/day/duration_test.go index b307fc39c1..c19b95602b 100644 --- a/pkg/internal/day/duration_test.go +++ b/pkg/internal/day/duration_test.go @@ -45,7 +45,7 @@ func TestString(t *testing.T) { t.Run(tc.in, func(t *testing.T) { var d Duration require.NoError(t, d.UnmarshalText([]byte(tc.in))) - require.Equal(t, tc.out, String(d)) + require.Equal(t, tc.out, d.String()) }) } } diff --git a/pkg/pgmodel/metrics/database/metric_series.go b/pkg/pgmodel/metrics/database/metric_series.go index fd84fc63ee..bc23b5e812 100644 --- a/pkg/pgmodel/metrics/database/metric_series.go +++ b/pkg/pgmodel/metrics/database/metric_series.go @@ -6,6 +6,8 @@ import ( "time" "github.com/prometheus/client_golang/prometheus" + + "github.com/timescale/promscale/pkg/internal/day" "github.com/timescale/promscale/pkg/pgxconn" "github.com/timescale/promscale/pkg/util" ) @@ -57,8 +59,9 @@ INNER JOIN timescaledb_information.job_stats js ON ( j.job_id = js.job_id AND j. if err != nil { return fmt.Errorf("error scanning values for execute_caggs_refresh_policy: %w", err) } - caggsRefreshSuccess.With(prometheus.Labels{"refresh_interval": refreshInterval.String()}).Set(float64(success)) - caggsRefreshTotal.With(prometheus.Labels{"refresh_interval": refreshInterval.String()}).Set(float64(total)) + tmp := day.Duration(refreshInterval) // This allows label values to have 25h -> 1d1h, which is easier to understand and matches more to the user's original input. + caggsRefreshSuccess.With(prometheus.Labels{"refresh_interval": tmp.String()}).Set(float64(success)) + caggsRefreshTotal.With(prometheus.Labels{"refresh_interval": tmp.String()}).Set(float64(total)) } return nil }, diff --git a/pkg/tests/constants.go b/pkg/tests/constants.go index 2de9dfdc02..b8e5e49819 100644 --- a/pkg/tests/constants.go +++ b/pkg/tests/constants.go @@ -18,5 +18,5 @@ func init() { PromscaleExtensionVersion = strings.TrimSpace(string(content)) PromscaleExtensionContainer = "ghcr.io/timescale/dev_promscale_extension:" + PromscaleExtensionVersion + "-ts2-pg14" - PromscaleExtensionContainer = "local/dev_promscale_extension:head-ts2-pg14" // This will be removed once the PR against master is made. + //PromscaleExtensionContainer = "local/dev_promscale_extension:head-ts2-pg14" // This will be removed once the PR against master is made. } diff --git a/pkg/tests/end_to_end_tests/continuous_agg_test.go b/pkg/tests/end_to_end_tests/continuous_agg_test.go index 56883b6d29..e82204f69c 100644 --- a/pkg/tests/end_to_end_tests/continuous_agg_test.go +++ b/pkg/tests/end_to_end_tests/continuous_agg_test.go @@ -43,7 +43,7 @@ func TestContinuousAggDownsampling(t *testing.T) { }{ { name: "Query non-existant column, empty result", - query: `cagg{__column__="nonexistant"}`, + query: `cagg{__schema__="cagg_schema",__column__="nonexistant"}`, startMs: startTime, endMs: endTime, stepMs: 360 * 1000, @@ -78,7 +78,7 @@ func TestContinuousAggDownsampling(t *testing.T) { }, { name: "Query max column", - query: `cagg{__column__="max",instance="1"}`, + query: `cagg{__schema__="cagg_schema",__column__="max",instance="1"}`, startMs: startTime, endMs: startTime + 4*3600*1000 - 1, // -1ms to exclude fifth value stepMs: 3600 * 1000, @@ -104,7 +104,7 @@ func TestContinuousAggDownsampling(t *testing.T) { }, { name: "Query min column", - query: `cagg{__column__="min",instance="1"}`, + query: `cagg{__schema__="cagg_schema",__column__="min",instance="1"}`, startMs: startTime, endMs: startTime + 4*3600*1000 - 1, // -1ms to exclude fifth value stepMs: 3600 * 1000, @@ -130,7 +130,7 @@ func TestContinuousAggDownsampling(t *testing.T) { }, { name: "Query avg column", - query: `cagg{__column__="avg",instance="1"}`, + query: `cagg{__schema__="cagg_schema",__column__="avg",instance="1"}`, startMs: startTime, endMs: startTime + 4*3600*1000 - 1, // -1ms to exclude fifth value stepMs: 3600 * 1000, diff --git a/pkg/tests/end_to_end_tests/telemetry_test.go b/pkg/tests/end_to_end_tests/telemetry_test.go index f4c531b01a..141a88d28e 100644 --- a/pkg/tests/end_to_end_tests/telemetry_test.go +++ b/pkg/tests/end_to_end_tests/telemetry_test.go @@ -288,8 +288,8 @@ func TestTelemetrySQLStats(t *testing.T) { require.NoError(t, engine.Sync()) err = conn.QueryRow(context.Background(), "SELECT value FROM _timescaledb_catalog.metadata WHERE key = 'promscale_metrics_total' AND value IS NOT NULL").Scan(&metrics) - require.NoError(t, err) - require.Equal(t, "0", metrics) // Without promscale_extension, this will give error saying "no rows in result set". + require.NoError(t, err) // Without promscale_extension, this will give error saying "no rows in result set". + require.Equal(t, "0", metrics) // Add dummy metric. _, err = conn.Exec(context.Background(), "SELECT _prom_catalog.create_metric_table('test_metric')")