From 99c8361d093ad6bc71c06af2a90e73dc79752dc0 Mon Sep 17 00:00:00 2001 From: Slach Date: Mon, 31 Jul 2023 21:27:07 +0400 Subject: [PATCH] restore RBAC / configs, refactoring restart clickhouse-server via SYSTEM SHUTDOWN, add `--rbac-only` and `--configs-only` options to `create`, `upload`, `download`, `restore` command. fix https://github.com/Altinity/clickhouse-backup/issues/706 refactoring integration_test.go, split containers to clickhouse-backup + clickhouse fix TestSkipNotExistsTable for 19.1+ version --- ChangeLog.md | 1 + Dockerfile | 1 + ReadMe.md | 7 +- cmd/clickhouse-backup/main.go | 52 ++- pkg/backup/create.go | 75 ++-- pkg/backup/create_remote.go | 4 +- pkg/backup/restore.go | 84 +++- pkg/backup/restore_remote.go | 4 +- pkg/backup/upload.go | 4 +- pkg/backup/watch.go | 6 +- pkg/clickhouse/clickhouse.go | 2 +- pkg/config/config.go | 2 +- pkg/server/server.go | 28 +- pkg/storage/gcs.go | 36 +- pkg/storage/s3.go | 2 +- test/integration/clickhouse-keeper.xml | 2 +- test/integration/config-azblob-embedded.yml | 4 +- test/integration/config-azblob.yml | 2 +- test/integration/config-custom-kopia.yml | 8 +- test/integration/config-custom-restic.yml | 8 +- test/integration/config-custom-rsync.yml | 8 +- test/integration/config-database-mapping.yml | 2 +- test/integration/config-ftp.yaml | 2 +- test/integration/config-gcs.yml | 2 +- test/integration/config-s3-embedded.yml | 4 +- test/integration/config-s3-fips.yml | 2 +- test/integration/config-s3-nodelete.yml | 2 +- test/integration/config-s3-plain-embedded.yml | 4 +- test/integration/config-s3.yml | 6 +- test/integration/config-sftp-auth-key.yaml | 2 +- .../config-sftp-auth-password.yaml | 2 +- test/integration/docker-compose.yml | 83 +++- test/integration/docker-compose_advanced.yml | 95 ++++- test/integration/integration_test.go | 383 +++++++++--------- test/integration/kopia/init.sh | 1 + test/integration/kopia/upload.sh | 2 +- test/integration/restic/init.sh | 1 + test/integration/restic/upload.sh | 2 +- test/integration/rsync/delete.sh | 2 +- test/integration/rsync/download.sh | 4 +- test/integration/rsync/init.sh | 6 + test/integration/rsync/list.sh | 2 +- test/integration/rsync/settings.sh | 5 - test/integration/rsync/upload.sh | 4 +- test/integration/ssl.xml | 2 +- .../docker-compose/docker-compose.yml | 2 - .../tests/snapshots/cli.py.cli.snapshot | 2 +- 47 files changed, 582 insertions(+), 382 deletions(-) create mode 100644 test/integration/rsync/init.sh delete mode 100755 test/integration/rsync/settings.sh diff --git a/ChangeLog.md b/ChangeLog.md index 03dab293..b9fce036 100644 --- a/ChangeLog.md +++ b/ChangeLog.md @@ -2,6 +2,7 @@ IMPROVEMENTS - first implementation for properly backup S3/GCS/Azure disks, support server-side copy to back up bucket during `clickhouse-backup` create and during `clickhouse-backup restore`, requires add `object_disk_path` to `s3`,`gcs`,`azblob` section, fix [447](https://github.com/Altinity/clickhouse-backup/issues/447) - Implementation blacklist for table engines during backup / download / upload / restore [537](https://github.com/Altinity/clickhouse-backup/issues/537) +- restore RBAC / configs, refactoring restart clickhouse-server via `sql:SYSTEM SHUTDOWN` or `exec:systemctl restart clickhouse-server`, add `--rbac-only` and `--configs-only` options to `create`, `upload`, `download`, `restore` command. fix [706]https://github.com/Altinity/clickhouse-backup/issues/706 BUG FIXES - fix possible create backup failures during UNFREEZE not exists tables, affected 2.2.7+ version, fix [704](https://github.com/Altinity/clickhouse-backup/issues/704) diff --git a/Dockerfile b/Dockerfile index 4987e823..bd1381e8 100644 --- a/Dockerfile +++ b/Dockerfile @@ -104,6 +104,7 @@ MAINTAINER Eugene Klimov RUN apt-get update && apt-get install -y gpg && wget -qO- https://kopia.io/signing-key | gpg --dearmor -o /usr/share/keyrings/kopia-keyring.gpg && \ echo "deb [signed-by=/usr/share/keyrings/kopia-keyring.gpg] http://packages.kopia.io/apt/ stable main" > /etc/apt/sources.list.d/kopia.list && \ + wget -c "https://github.com/mikefarah/yq/releases/latest/download/yq_linux_$(dpkg --print-architecture)" -O /usr/bin/yq && chmod +x /usr/bin/yq && \ apt-get update -y && \ apt-get install -y ca-certificates tzdata bash curl restic rsync rclone jq gpg kopia && \ update-ca-certificates && \ diff --git a/ReadMe.md b/ReadMe.md index 811e1fa6..c6f63256 100644 --- a/ReadMe.md +++ b/ReadMe.md @@ -409,7 +409,12 @@ clickhouse: log_sql_queries: true # CLICKHOUSE_LOG_SQL_QUERIES, enable logging `clickhouse-backup` SQL queries on `system.query_log` table inside clickhouse-server debug: false # CLICKHOUSE_DEBUG config_dir: "/etc/clickhouse-server" # CLICKHOUSE_CONFIG_DIR - restart_command: "systemctl restart clickhouse-server" # CLICKHOUSE_RESTART_COMMAND, use this command when restoring with --rbac or --configs options + # CLICKHOUSE_RESTART_COMMAND, use this command when restoring with --rbac, --rbac-only or --configs, --configs-only options + # will split command by ; and execute one by one, all errors will logged and ignore + # available prefixes + # - sql: will execute SQL query + # - exec: will execute command via shell + restart_command: "sql:SYSTEM SHUTDOWN" ignore_not_exists_error_during_freeze: true # CLICKHOUSE_IGNORE_NOT_EXISTS_ERROR_DURING_FREEZE, helps to avoid backup failures when running frequent CREATE / DROP tables and databases during backup, `clickhouse-backup` will ignore `code: 60` and `code: 81` errors during execution of `ALTER TABLE ... FREEZE` check_replicas_before_attach: true # CLICKHOUSE_CHECK_REPLICAS_BEFORE_ATTACH, helps avoiding concurrent ATTACH PART execution when restoring ReplicatedMergeTree tables use_embedded_backup_restore: false # CLICKHOUSE_USE_EMBEDDED_BACKUP_RESTORE, use BACKUP / RESTORE SQL statements instead of regular SQL queries to use features of modern ClickHouse server versions diff --git a/cmd/clickhouse-backup/main.go b/cmd/clickhouse-backup/main.go index 8521fed8..fcaac668 100644 --- a/cmd/clickhouse-backup/main.go +++ b/cmd/clickhouse-backup/main.go @@ -90,7 +90,7 @@ func main() { Description: "Create new backup", Action: func(c *cli.Context) error { b := backup.NewBackuper(config.GetConfigFromCli(c)) - return b.CreateBackup(c.Args().First(), c.String("t"), c.StringSlice("partitions"), c.Bool("s"), c.Bool("rbac"), c.Bool("configs"), c.Bool("skip-check-parts-columns"), version, c.Int("command-id")) + return b.CreateBackup(c.Args().First(), c.String("t"), c.StringSlice("partitions"), c.Bool("s"), c.Bool("rbac"), c.Bool("rbac-only"), c.Bool("configs"), c.Bool("configs-only"), c.Bool("skip-check-parts-columns"), version, c.Int("command-id")) }, Flags: append(cliapp.Flags, cli.StringFlag{ @@ -111,7 +111,7 @@ func main() { cli.BoolFlag{ Name: "schema, s", Hidden: false, - Usage: "Backup schemas only", + Usage: "Backup schemas only, will skip data", }, cli.BoolFlag{ Name: "rbac, backup-rbac, do-backup-rbac", @@ -123,6 +123,16 @@ func main() { Hidden: false, Usage: "Backup 'clickhouse-server' configuration files", }, + cli.BoolFlag{ + Name: "rbac-only", + Hidden: false, + Usage: "Backup RBAC related objects only, will skip backup data, will backup schema only if --schema added", + }, + cli.BoolFlag{ + Name: "configs-only", + Hidden: false, + Usage: "Backup 'clickhouse-server' configuration files only, will skip backup data, will backup schema only if --schema added", + }, cli.BoolFlag{ Name: "skip-check-parts-columns", Hidden: false, @@ -137,7 +147,7 @@ func main() { Description: "Create and upload", Action: func(c *cli.Context) error { b := backup.NewBackuper(config.GetConfigFromCli(c)) - return b.CreateToRemote(c.Args().First(), c.String("diff-from"), c.String("diff-from-remote"), c.String("t"), c.StringSlice("partitions"), c.Bool("s"), c.Bool("rbac"), c.Bool("configs"), c.Bool("resume"), c.Bool("skip-check-parts-columns"), version, c.Int("command-id")) + return b.CreateToRemote(c.Args().First(), c.String("diff-from"), c.String("diff-from-remote"), c.String("t"), c.StringSlice("partitions"), c.Bool("s"), c.Bool("rbac"), c.Bool("rbac-only"), c.Bool("configs"), c.Bool("configs-only"), c.Bool("resume"), c.Bool("skip-check-parts-columns"), version, c.Int("command-id")) }, Flags: append(cliapp.Flags, cli.StringFlag{ @@ -168,7 +178,7 @@ func main() { cli.BoolFlag{ Name: "schema, s", Hidden: false, - Usage: "Backup and upload metadata schema only", + Usage: "Backup and upload metadata schema only, will skip data backup", }, cli.BoolFlag{ Name: "rbac, backup-rbac, do-backup-rbac", @@ -180,6 +190,16 @@ func main() { Hidden: false, Usage: "Backup and upload 'clickhouse-server' configuration files", }, + cli.BoolFlag{ + Name: "rbac-only", + Hidden: false, + Usage: "Backup RBAC related objects only, will skip backup data, will backup schema only if --schema added", + }, + cli.BoolFlag{ + Name: "configs-only", + Hidden: false, + Usage: "Backup 'clickhouse-server' configuration files only, will skip backup data, will backup schema only if --schema added", + }, cli.BoolFlag{ Name: "resume, resumable", Hidden: false, @@ -290,7 +310,7 @@ func main() { UsageText: "clickhouse-backup restore [-t, --tables=.] [-m, --restore-database-mapping=:[,<...>]] [--partitions=] [-s, --schema] [-d, --data] [--rm, --drop] [-i, --ignore-dependencies] [--rbac] [--configs] ", Action: func(c *cli.Context) error { b := backup.NewBackuper(config.GetConfigFromCli(c)) - return b.Restore(c.Args().First(), c.String("t"), c.StringSlice("restore-database-mapping"), c.StringSlice("partitions"), c.Bool("s"), c.Bool("d"), c.Bool("rm"), c.Bool("ignore-dependencies"), c.Bool("rbac"), c.Bool("configs"), c.Int("command-id")) + return b.Restore(c.Args().First(), c.String("t"), c.StringSlice("restore-database-mapping"), c.StringSlice("partitions"), c.Bool("s"), c.Bool("d"), c.Bool("rm"), c.Bool("ignore-dependencies"), c.Bool("rbac"), c.Bool("rbac-only"), c.Bool("configs"), c.Bool("configs-only"), c.Int("command-id")) }, Flags: append(cliapp.Flags, cli.StringFlag{ @@ -343,6 +363,16 @@ func main() { Hidden: false, Usage: "Restore 'clickhouse-server' CONFIG related files", }, + cli.BoolFlag{ + Name: "rbac-only", + Hidden: false, + Usage: "Restore RBAC related objects only, will skip backup data, will backup schema only if --schema added", + }, + cli.BoolFlag{ + Name: "configs-only", + Hidden: false, + Usage: "Restore 'clickhouse-server' configuration files only, will skip backup data, will backup schema only if --schema added", + }, ), }, { @@ -351,7 +381,7 @@ func main() { UsageText: "clickhouse-backup restore_remote [--schema] [--data] [-t, --tables=.
] [-m, --restore-database-mapping=:[,<...>]] [--partitions=] [--rm, --drop] [-i, --ignore-dependencies] [--rbac] [--configs] [--skip-rbac] [--skip-configs] [--resumable] ", Action: func(c *cli.Context) error { b := backup.NewBackuper(config.GetConfigFromCli(c)) - return b.RestoreFromRemote(c.Args().First(), c.String("t"), c.StringSlice("restore-database-mapping"), c.StringSlice("partitions"), c.Bool("s"), c.Bool("d"), c.Bool("rm"), c.Bool("i"), c.Bool("rbac"), c.Bool("configs"), c.Bool("resume"), c.Int("command-id")) + return b.RestoreFromRemote(c.Args().First(), c.String("t"), c.StringSlice("restore-database-mapping"), c.StringSlice("partitions"), c.Bool("s"), c.Bool("d"), c.Bool("rm"), c.Bool("i"), c.Bool("rbac"), c.Bool("rbac-only"), c.Bool("configs"), c.Bool("configs-only"), c.Bool("resume"), c.Int("command-id")) }, Flags: append(cliapp.Flags, cli.StringFlag{ @@ -404,6 +434,16 @@ func main() { Hidden: false, Usage: "Download and Restore 'clickhouse-server' CONFIG related files", }, + cli.BoolFlag{ + Name: "rbac-only", + Hidden: false, + Usage: "Restore RBAC related objects only, will skip backup data, will backup schema only if --schema added", + }, + cli.BoolFlag{ + Name: "configs-only", + Hidden: false, + Usage: "Restore 'clickhouse-server' configuration files only, will skip backup data, will backup schema only if --schema added", + }, cli.BoolFlag{ Name: "resume, resumable", Hidden: false, diff --git a/pkg/backup/create.go b/pkg/backup/create.go index 54c23466..62085029 100644 --- a/pkg/backup/create.go +++ b/pkg/backup/create.go @@ -50,7 +50,7 @@ func NewBackupName() string { // CreateBackup - create new backup of all tables matched by tablePattern // If backupName is empty string will use default backup name -func (b *Backuper) CreateBackup(backupName, tablePattern string, partitions []string, schemaOnly, rbacOnly, configsOnly, skipCheckPartsColumns bool, version string, commandId int) error { +func (b *Backuper) CreateBackup(backupName, tablePattern string, partitions []string, schemaOnly, createRBAC, rbacOnly, createConfigs, configsOnly, skipCheckPartsColumns bool, version string, commandId int) error { ctx, cancel, err := status.Current.GetContextWithCancel(commandId) if err != nil { return err @@ -59,7 +59,7 @@ func (b *Backuper) CreateBackup(backupName, tablePattern string, partitions []st defer cancel() startBackup := time.Now() - doBackupData := !schemaOnly + doBackupData := !schemaOnly && !rbacOnly && !configsOnly if backupName == "" { backupName = NewBackupName() } @@ -115,9 +115,9 @@ func (b *Backuper) CreateBackup(backupName, tablePattern string, partitions []st partitionsIdMap, partitionsNameList := partition.ConvertPartitionsToIdsMapAndNamesList(ctx, b.ch, tables, nil, partitions) // create if b.cfg.ClickHouse.UseEmbeddedBackupRestore { - err = b.createBackupEmbedded(ctx, backupName, tablePattern, partitionsNameList, partitionsIdMap, schemaOnly, rbacOnly, configsOnly, tables, allDatabases, allFunctions, disks, diskMap, diskTypes, log, startBackup, version) + err = b.createBackupEmbedded(ctx, backupName, tablePattern, partitionsNameList, partitionsIdMap, schemaOnly, createRBAC, createConfigs, tables, allDatabases, allFunctions, disks, diskMap, diskTypes, log, startBackup, version) } else { - err = b.createBackupLocal(ctx, backupName, partitionsIdMap, tables, doBackupData, schemaOnly, rbacOnly, configsOnly, version, disks, diskMap, diskTypes, allDatabases, allFunctions, log, startBackup) + err = b.createBackupLocal(ctx, backupName, partitionsIdMap, tables, doBackupData, schemaOnly, createRBAC, rbacOnly, createConfigs, configsOnly, version, disks, diskMap, diskTypes, allDatabases, allFunctions, log, startBackup) } if err != nil { return err @@ -130,7 +130,7 @@ func (b *Backuper) CreateBackup(backupName, tablePattern string, partitions []st return nil } -func (b *Backuper) createBackupLocal(ctx context.Context, backupName string, partitionsIdMap map[metadata.TableTitle]common.EmptyMap, tables []clickhouse.Table, doBackupData bool, schemaOnly bool, rbacOnly bool, configsOnly bool, version string, disks []clickhouse.Disk, diskMap, diskTypes map[string]string, allDatabases []clickhouse.Database, allFunctions []clickhouse.Function, log *apexLog.Entry, startBackup time.Time) error { +func (b *Backuper) createBackupLocal(ctx context.Context, backupName string, partitionsIdMap map[metadata.TableTitle]common.EmptyMap, tables []clickhouse.Table, doBackupData bool, schemaOnly bool, createRBAC, rbacOnly bool, createConfigs, configsOnly bool, version string, disks []clickhouse.Disk, diskMap, diskTypes map[string]string, allDatabases []clickhouse.Database, allFunctions []clickhouse.Function, log *apexLog.Entry, startBackup time.Time) error { // Create backup dir on all clickhouse disks for _, disk := range disks { if err := filesystemhelper.Mkdir(path.Join(disk.Path, "backup"), b.ch, disks); err != nil { @@ -199,45 +199,46 @@ func (b *Backuper) createBackupLocal(ctx context.Context, backupName string, par } } log.Debug("create metadata") - - metadataSize, err := b.createTableMetadata(path.Join(backupPath, "metadata"), metadata.TableMetadata{ - Table: table.Name, - Database: table.Database, - Query: table.CreateTableQuery, - TotalBytes: table.TotalBytes, - Size: realSize, - Parts: disksToPartsMap, - Mutations: inProgressMutations, - MetadataOnly: schemaOnly, - }, disks) - if err != nil { - if removeBackupErr := b.RemoveBackupLocal(ctx, backupName, disks); removeBackupErr != nil { - log.Error(removeBackupErr.Error()) + if schemaOnly || doBackupData { + metadataSize, err := b.createTableMetadata(path.Join(backupPath, "metadata"), metadata.TableMetadata{ + Table: table.Name, + Database: table.Database, + Query: table.CreateTableQuery, + TotalBytes: table.TotalBytes, + Size: realSize, + Parts: disksToPartsMap, + Mutations: inProgressMutations, + MetadataOnly: schemaOnly, + }, disks) + if err != nil { + if removeBackupErr := b.RemoveBackupLocal(ctx, backupName, disks); removeBackupErr != nil { + log.Error(removeBackupErr.Error()) + } + return err } - return err + backupMetadataSize += metadataSize + tableMetas = append(tableMetas, metadata.TableTitle{ + Database: table.Database, + Table: table.Name, + }) } - backupMetadataSize += metadataSize - tableMetas = append(tableMetas, metadata.TableTitle{ - Database: table.Database, - Table: table.Name, - }) log.Infof("done") } } backupRBACSize, backupConfigSize := uint64(0), uint64(0) - if rbacOnly { - if backupRBACSize, err = b.createRBACBackup(ctx, backupPath, disks); err != nil { + if createRBAC || rbacOnly { + if backupRBACSize, err = b.createBackupRBAC(ctx, backupPath, disks); err != nil { log.Errorf("error during do RBAC backup: %v", err) } else { - log.WithField("size", utils.FormatBytes(backupRBACSize)).Info("done createRBACBackup") + log.WithField("size", utils.FormatBytes(backupRBACSize)).Info("done createBackupRBAC") } } - if configsOnly { - if backupConfigSize, err = b.createConfigBackup(ctx, backupPath); err != nil { + if createConfigs || configsOnly { + if backupConfigSize, err = b.createBackupConfigs(ctx, backupPath); err != nil { log.Errorf("error during do CONFIG backup: %v", err) } else { - log.WithField("size", utils.FormatBytes(backupConfigSize)).Info("done createConfigBackup") + log.WithField("size", utils.FormatBytes(backupConfigSize)).Info("done createBackupConfigs") } } @@ -249,11 +250,11 @@ func (b *Backuper) createBackupLocal(ctx context.Context, backupName string, par return nil } -func (b *Backuper) createBackupEmbedded(ctx context.Context, backupName, tablePattern string, partitionsNameList map[metadata.TableTitle][]string, partitionsIdMap map[metadata.TableTitle]common.EmptyMap, schemaOnly, rbacOnly, configsOnly bool, tables []clickhouse.Table, allDatabases []clickhouse.Database, allFunctions []clickhouse.Function, disks []clickhouse.Disk, diskMap, diskTypes map[string]string, log *apexLog.Entry, startBackup time.Time, backupVersion string) error { +func (b *Backuper) createBackupEmbedded(ctx context.Context, backupName, tablePattern string, partitionsNameList map[metadata.TableTitle][]string, partitionsIdMap map[metadata.TableTitle]common.EmptyMap, schemaOnly, createRBAC, createConfigs bool, tables []clickhouse.Table, allDatabases []clickhouse.Database, allFunctions []clickhouse.Function, disks []clickhouse.Disk, diskMap, diskTypes map[string]string, log *apexLog.Entry, startBackup time.Time, backupVersion string) error { if _, isBackupDiskExists := diskMap[b.cfg.ClickHouse.EmbeddedBackupDisk]; !isBackupDiskExists { return fmt.Errorf("backup disk `%s` not exists in system.disks", b.cfg.ClickHouse.EmbeddedBackupDisk) } - if rbacOnly || configsOnly { + if createRBAC || createConfigs { return fmt.Errorf("`use_embedded_backup_restore: true` doesn't support --rbac, --configs parameters") } l := 0 @@ -412,8 +413,8 @@ func (b *Backuper) getPartsFromBackupDisk(backupPath string, table clickhouse.Ta return parts, nil } -func (b *Backuper) createConfigBackup(ctx context.Context, backupPath string) (uint64, error) { - log := b.log.WithField("logger", "createConfigBackup") +func (b *Backuper) createBackupConfigs(ctx context.Context, backupPath string) (uint64, error) { + log := b.log.WithField("logger", "createBackupConfigs") select { case <-ctx.Done(): return 0, ctx.Err() @@ -431,8 +432,8 @@ func (b *Backuper) createConfigBackup(ctx context.Context, backupPath string) (u } } -func (b *Backuper) createRBACBackup(ctx context.Context, backupPath string, disks []clickhouse.Disk) (uint64, error) { - log := b.log.WithField("logger", "createRBACBackup") +func (b *Backuper) createBackupRBAC(ctx context.Context, backupPath string, disks []clickhouse.Disk) (uint64, error) { + log := b.log.WithField("logger", "createBackupRBAC") select { case <-ctx.Done(): return 0, ctx.Err() diff --git a/pkg/backup/create_remote.go b/pkg/backup/create_remote.go index 1d246ccb..863a05e4 100644 --- a/pkg/backup/create_remote.go +++ b/pkg/backup/create_remote.go @@ -6,7 +6,7 @@ import ( "github.com/Altinity/clickhouse-backup/pkg/status" ) -func (b *Backuper) CreateToRemote(backupName, diffFrom, diffFromRemote, tablePattern string, partitions []string, schemaOnly, rbac, backupConfig, skipCheckPartsColumns, resume bool, version string, commandId int) error { +func (b *Backuper) CreateToRemote(backupName, diffFrom, diffFromRemote, tablePattern string, partitions []string, schemaOnly, backupRBAC, rbacOnly, backupConfigs, configsOnly, skipCheckPartsColumns, resume bool, version string, commandId int) error { ctx, cancel, err := status.Current.GetContextWithCancel(commandId) if err != nil { return err @@ -16,7 +16,7 @@ func (b *Backuper) CreateToRemote(backupName, diffFrom, diffFromRemote, tablePat if backupName == "" { backupName = NewBackupName() } - if err := b.CreateBackup(backupName, tablePattern, partitions, schemaOnly, rbac, backupConfig, skipCheckPartsColumns, version, commandId); err != nil { + if err := b.CreateBackup(backupName, tablePattern, partitions, schemaOnly, backupRBAC, rbacOnly, backupConfigs, configsOnly, skipCheckPartsColumns, version, commandId); err != nil { return err } if err := b.Upload(backupName, diffFrom, diffFromRemote, tablePattern, partitions, schemaOnly, resume, commandId); err != nil { diff --git a/pkg/backup/restore.go b/pkg/backup/restore.go index 35f6e00e..21df7c9d 100644 --- a/pkg/backup/restore.go +++ b/pkg/backup/restore.go @@ -34,7 +34,7 @@ import ( var CreateDatabaseRE = regexp.MustCompile(`(?m)^CREATE DATABASE (\s*)(\S+)(\s*)`) // Restore - restore tables matched by tablePattern from backupName -func (b *Backuper) Restore(backupName, tablePattern string, databaseMapping, partitions []string, schemaOnly, dataOnly, dropTable, ignoreDependencies, rbacOnly, configsOnly bool, commandId int) error { +func (b *Backuper) Restore(backupName, tablePattern string, databaseMapping, partitions []string, schemaOnly, dataOnly, dropTable, ignoreDependencies, restoreRBAC, rbacOnly, restoreConfigs, configsOnly bool, commandId int) error { ctx, cancel, err := status.Current.GetContextWithCancel(commandId) if err != nil { return err @@ -116,7 +116,7 @@ func (b *Backuper) Restore(backupName, tablePattern string, databaseMapping, par } if len(backupMetadata.Tables) == 0 { log.Warnf("'%s' doesn't contains tables for restore", backupName) - if (!rbacOnly) && (!configsOnly) { + if (!restoreRBAC) && (!restoreConfigs) { return nil } } @@ -124,13 +124,13 @@ func (b *Backuper) Restore(backupName, tablePattern string, databaseMapping, par return err } needRestart := false - if rbacOnly && !b.isEmbedded { + if (rbacOnly || restoreRBAC) && !b.isEmbedded { if err := b.restoreRBAC(ctx, backupName, disks); err != nil { return err } needRestart = true } - if configsOnly && !b.isEmbedded { + if (configsOnly || restoreConfigs) && !b.isEmbedded { if err := b.restoreConfigs(backupName, disks); err != nil { return err } @@ -138,23 +138,11 @@ func (b *Backuper) Restore(backupName, tablePattern string, databaseMapping, par } if needRestart { - log.Warnf("%s contains `access` or `configs` directory, so we need exec %s", backupName, b.ch.Config.RestartCommand) - cmd, err := shellwords.Parse(b.ch.Config.RestartCommand) - if err != nil { + if err := b.restartClickHouse(ctx, backupName, log); err != nil { return err } - ctx, cancel := context.WithTimeout(ctx, 180*time.Second) - defer cancel() - log.Infof("run %s", b.ch.Config.RestartCommand) - var out []byte - if len(cmd) > 1 { - out, err = exec.CommandContext(ctx, cmd[0], cmd[1:]...).CombinedOutput() - } else { - out, err = exec.CommandContext(ctx, cmd[0]).CombinedOutput() - } - if err != nil { - log.Debug(string(out)) - return err + if rbacOnly || configsOnly { + return nil } } @@ -172,6 +160,64 @@ func (b *Backuper) Restore(backupName, tablePattern string, databaseMapping, par return nil } +func (b *Backuper) restartClickHouse(ctx context.Context, backupName string, log *apexLog.Entry) error { + log.Warnf("%s contains `access` or `configs` directory, so we need exec %s", backupName, b.ch.Config.RestartCommand) + for _, cmd := range strings.Split(b.ch.Config.RestartCommand, ";") { + cmd = strings.Trim(cmd, " \t\r\n") + if strings.HasPrefix(cmd, "sql:") { + cmd = strings.TrimPrefix(cmd, "sql:") + if err := b.ch.QueryContext(ctx, cmd); err != nil { + log.Warnf("restart sql: %s, error: %v", cmd, err) + } + } + if strings.HasPrefix(cmd, "exec:") { + cmd = strings.TrimPrefix(cmd, "exec:") + if err := b.executeShellCommandWithTimeout(ctx, cmd, log); err != nil { + return err + } + } + } + b.ch.Close() + closeCtx, cancel := context.WithTimeout(ctx, 180*time.Second) + defer cancel() + +breakByReconnect: + for i := 1; i <= 60; i++ { + select { + case <-closeCtx.Done(): + return fmt.Errorf("reconnect after '%s' timeout exceeded", b.ch.Config.RestartCommand) + default: + if err := b.ch.Connect(); err == nil { + break breakByReconnect + } + log.Infof("wait 3 seconds") + time.Sleep(3 * time.Second) + } + } + return nil +} + +func (b *Backuper) executeShellCommandWithTimeout(ctx context.Context, cmd string, log *apexLog.Entry) error { + shellCmd, err := shellwords.Parse(cmd) + if err != nil { + return err + } + shellCtx, shellCancel := context.WithTimeout(ctx, 180*time.Second) + defer shellCancel() + log.Infof("run %s", cmd) + var out []byte + if len(shellCmd) > 1 { + out, err = exec.CommandContext(shellCtx, shellCmd[0], shellCmd[1:]...).CombinedOutput() + } else { + out, err = exec.CommandContext(shellCtx, shellCmd[0]).CombinedOutput() + } + if err != nil { + log.Debug(string(out)) + log.Warnf("restart exec: %s, error: %v", cmd, err) + } + return nil +} + func (b *Backuper) restoreEmptyDatabase(ctx context.Context, targetDB, tablePattern string, database metadata.DatabasesMeta, dropTable, schemaOnly, ignoreDependencies bool) error { isMapped := false if targetDB, isMapped = b.cfg.General.RestoreDatabaseMapping[database.Name]; !isMapped { diff --git a/pkg/backup/restore_remote.go b/pkg/backup/restore_remote.go index a89c4cb8..b7781094 100644 --- a/pkg/backup/restore_remote.go +++ b/pkg/backup/restore_remote.go @@ -1,11 +1,11 @@ package backup -func (b *Backuper) RestoreFromRemote(backupName, tablePattern string, databaseMapping, partitions []string, schemaOnly, dataOnly, dropTable, ignoreDependencies, rbacOnly, configsOnly, resume bool, commandId int) error { +func (b *Backuper) RestoreFromRemote(backupName, tablePattern string, databaseMapping, partitions []string, schemaOnly, dataOnly, dropTable, ignoreDependencies, restoreRBAC, rbacOnly, restoreConfigs, configsOnly, resume bool, commandId int) error { if err := b.Download(backupName, tablePattern, partitions, schemaOnly, resume, commandId); err != nil { // https://github.com/Altinity/clickhouse-backup/issues/625 if err != ErrBackupIsAlreadyExists { return err } } - return b.Restore(backupName, tablePattern, databaseMapping, partitions, schemaOnly, dataOnly, dropTable, ignoreDependencies, rbacOnly, configsOnly, commandId) + return b.Restore(backupName, tablePattern, databaseMapping, partitions, schemaOnly, dataOnly, dropTable, ignoreDependencies, restoreRBAC, rbacOnly, restoreConfigs, configsOnly, commandId) } diff --git a/pkg/backup/upload.go b/pkg/backup/upload.go index 59199244..b8a9c58b 100644 --- a/pkg/backup/upload.go +++ b/pkg/backup/upload.go @@ -459,7 +459,7 @@ func (b *Backuper) uploadAndArchiveBackupRelatedDir(ctx context.Context, localBa } remoteUploaded, err := b.dst.StatFile(ctx, remoteFile) if err != nil { - return 0, fmt.Errorf("can't check uploaded %s file: %v", remoteFile, err) + return 0, fmt.Errorf("can't check uploaded remoteFile: %s, error: %v", remoteFile, err) } if b.resume { b.resumableState.AppendToState(remoteFile, remoteUploaded.Size()) @@ -557,7 +557,7 @@ breakByError: } remoteFile, err := b.dst.StatFile(ctx, remoteDataFile) if err != nil { - return fmt.Errorf("can't check uploaded file: %v", err) + return fmt.Errorf("can't check uploaded remoteDataFile: %s, error: %v", remoteDataFile, err) } atomic.AddInt64(&uploadedBytes, remoteFile.Size()) if b.resume { diff --git a/pkg/backup/watch.go b/pkg/backup/watch.go index ecc1ad14..d482e5af 100644 --- a/pkg/backup/watch.go +++ b/pkg/backup/watch.go @@ -66,7 +66,7 @@ func (b *Backuper) ValidateWatchParams(watchInterval, fullInterval, watchBackupN // // - each watch-interval, run create_remote increment --diff-from=prev-name + delete local increment, even when upload failed // - save previous backup type incremental, next try will also incremental, until reach full interval -func (b *Backuper) Watch(watchInterval, fullInterval, watchBackupNameTemplate, tablePattern string, partitions []string, schemaOnly, rbac, backupConfig, skipCheckPartsColumns bool, version string, commandId int, metrics metrics.APIMetricsInterface, cliCtx *cli.Context) error { +func (b *Backuper) Watch(watchInterval, fullInterval, watchBackupNameTemplate, tablePattern string, partitions []string, schemaOnly, backupRBAC, backupConfigs, skipCheckPartsColumns bool, version string, commandId int, metrics metrics.APIMetricsInterface, cliCtx *cli.Context) error { ctx, cancel, err := status.Current.GetContextWithCancel(commandId) if err != nil { return err @@ -120,14 +120,14 @@ func (b *Backuper) Watch(watchInterval, fullInterval, watchBackupNameTemplate, t } if metrics != nil { createRemoteErr, createRemoteErrCount = metrics.ExecuteWithMetrics("create_remote", createRemoteErrCount, func() error { - return b.CreateToRemote(backupName, "", diffFromRemote, tablePattern, partitions, schemaOnly, rbac, backupConfig, skipCheckPartsColumns, false, version, commandId) + return b.CreateToRemote(backupName, "", diffFromRemote, tablePattern, partitions, schemaOnly, backupRBAC, false, backupConfigs, false, skipCheckPartsColumns, false, version, commandId) }) deleteLocalErr, deleteLocalErrCount = metrics.ExecuteWithMetrics("delete", deleteLocalErrCount, func() error { return b.RemoveBackupLocal(ctx, backupName, nil) }) } else { - createRemoteErr = b.CreateToRemote(backupName, "", diffFromRemote, tablePattern, partitions, schemaOnly, rbac, backupConfig, skipCheckPartsColumns, false, version, commandId) + createRemoteErr = b.CreateToRemote(backupName, "", diffFromRemote, tablePattern, partitions, schemaOnly, backupRBAC, false, backupConfigs, false, skipCheckPartsColumns, false, version, commandId) if createRemoteErr != nil { log.Errorf("create_remote %s return error: %v", backupName, createRemoteErr) createRemoteErrCount += 1 diff --git a/pkg/clickhouse/clickhouse.go b/pkg/clickhouse/clickhouse.go index 814575db..8e86280d 100644 --- a/pkg/clickhouse/clickhouse.go +++ b/pkg/clickhouse/clickhouse.go @@ -633,7 +633,7 @@ func (ch *ClickHouse) FreezeTableOldWay(ctx context.Context, table *Table, name PartitionID string `ch:"partition_id"` } q := fmt.Sprintf("SELECT DISTINCT partition_id FROM `system`.`parts` WHERE database='%s' AND table='%s' %s", table.Database, table.Name, ch.Config.FreezeByPartWhere) - if err := ch.conn.Select(ctx, &partitions, q); err != nil { + if err := ch.SelectContext(ctx, &partitions, q); err != nil { return fmt.Errorf("can't get partitions for '%s.%s': %w", table.Database, table.Name, err) } withNameQuery := "" diff --git a/pkg/config/config.go b/pkg/config/config.go index 68398373..13a04763 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -507,7 +507,7 @@ func DefaultConfig() *Config { SyncReplicatedTables: false, LogSQLQueries: true, ConfigDir: "/etc/clickhouse-server/", - RestartCommand: "systemctl restart clickhouse-server", + RestartCommand: "exec:systemctl restart clickhouse-server", IgnoreNotExistsErrorDuringFreeze: true, CheckReplicasBeforeAttach: true, UseEmbeddedBackupRestore: false, diff --git a/pkg/server/server.go b/pkg/server/server.go index b0e0917e..895f91ca 100644 --- a/pkg/server/server.go +++ b/pkg/server/server.go @@ -184,7 +184,7 @@ func (api *APIServer) Restart() error { go func() { err = api.server.ListenAndServeTLS(api.config.API.CertificateFile, api.config.API.PrivateKeyFile) if err != nil { - if err == http.ErrServerClosed { + if errors.Is(err, http.ErrServerClosed) { log.Warnf("ListenAndServeTLS get signal: %s", err.Error()) } else { log.Fatalf("ListenAndServeTLS error: %s", err.Error()) @@ -195,7 +195,7 @@ func (api *APIServer) Restart() error { } else { go func() { if err = api.server.ListenAndServe(); err != nil { - if err == http.ErrServerClosed { + if errors.Is(err, http.ErrServerClosed) { log.Warnf("ListenAndServe get signal: %s", err.Error()) } else { log.Fatalf("ListenAndServe error: %s", err.Error()) @@ -808,8 +808,8 @@ func (api *APIServer) httpCreateHandler(w http.ResponseWriter, r *http.Request) partitionsToBackup := make([]string, 0) backupName := backup.NewBackupName() schemaOnly := false - rbacOnly := false - configsOnly := false + createRBAC := false + createConfigs := false checkPartsColumns := true fullCommand := "create" query := r.URL.Query() @@ -828,14 +828,14 @@ func (api *APIServer) httpCreateHandler(w http.ResponseWriter, r *http.Request) } } if rbac, exist := query["rbac"]; exist { - rbacOnly, _ = strconv.ParseBool(rbac[0]) - if rbacOnly { + createRBAC, _ = strconv.ParseBool(rbac[0]) + if createRBAC { fullCommand = fmt.Sprintf("%s --rbac", fullCommand) } } if configs, exist := query["configs"]; exist { - configsOnly, _ = strconv.ParseBool(configs[0]) - if configsOnly { + createConfigs, _ = strconv.ParseBool(configs[0]) + if createConfigs { fullCommand = fmt.Sprintf("%s --configs", fullCommand) } } @@ -861,7 +861,7 @@ func (api *APIServer) httpCreateHandler(w http.ResponseWriter, r *http.Request) go func() { err, _ := api.metrics.ExecuteWithMetrics("create", 0, func() error { b := backup.NewBackuper(cfg) - return b.CreateBackup(backupName, tablePattern, partitionsToBackup, schemaOnly, rbacOnly, configsOnly, checkPartsColumns, api.clickhouseBackupVersion, commandId) + return b.CreateBackup(backupName, tablePattern, partitionsToBackup, schemaOnly, createRBAC, false, createConfigs, false, checkPartsColumns, api.clickhouseBackupVersion, commandId) }) if err != nil { api.log.Errorf("API /backup/create error: %v", err) @@ -1151,8 +1151,8 @@ func (api *APIServer) httpRestoreHandler(w http.ResponseWriter, r *http.Request) dataOnly := false dropTable := false ignoreDependencies := false - rbacOnly := false - configsOnly := false + restoreRBAC := false + restoreConfigs := false fullCommand := "restore" query := r.URL.Query() @@ -1200,11 +1200,11 @@ func (api *APIServer) httpRestoreHandler(w http.ResponseWriter, r *http.Request) fullCommand += " --ignore-dependencies" } if _, exist := query["rbac"]; exist { - rbacOnly = true + restoreRBAC = true fullCommand += " --rbac" } if _, exist := query["configs"]; exist { - configsOnly = true + restoreConfigs = true fullCommand += " --configs" } @@ -1222,7 +1222,7 @@ func (api *APIServer) httpRestoreHandler(w http.ResponseWriter, r *http.Request) go func() { err, _ := api.metrics.ExecuteWithMetrics("restore", 0, func() error { b := backup.NewBackuper(api.config) - return b.Restore(name, tablePattern, databaseMappingToRestore, partitionsToBackup, schemaOnly, dataOnly, dropTable, ignoreDependencies, rbacOnly, configsOnly, commandId) + return b.Restore(name, tablePattern, databaseMappingToRestore, partitionsToBackup, schemaOnly, dataOnly, dropTable, ignoreDependencies, restoreRBAC, false, restoreConfigs, false, commandId) }) status.Current.Stop(commandId, err) if err != nil { diff --git a/pkg/storage/gcs.go b/pkg/storage/gcs.go index 7a01000f..8801742c 100644 --- a/pkg/storage/gcs.go +++ b/pkg/storage/gcs.go @@ -3,7 +3,9 @@ package storage import ( "context" "encoding/base64" + "errors" "fmt" + "google.golang.org/api/iterator" "io" "net/http" "path" @@ -15,7 +17,6 @@ import ( "cloud.google.com/go/storage" "github.com/apex/log" - "google.golang.org/api/iterator" "google.golang.org/api/option" googleHTTPTransport "google.golang.org/api/transport/http" ) @@ -119,26 +120,25 @@ func (gcs *GCS) Walk(ctx context.Context, gcsPath string, recursive bool, proces }) for { object, err := it.Next() - switch err { - case nil: - if object.Prefix != "" { - if err := process(ctx, &gcsFile{ - name: strings.TrimPrefix(object.Prefix, rootPath), - }); err != nil { - return err - } - continue - } + if errors.Is(err, iterator.Done) { + return nil + } + if err != nil { + return err + } + if object.Prefix != "" { if err := process(ctx, &gcsFile{ - size: object.Size, - lastModified: object.Updated, - name: strings.TrimPrefix(object.Name, rootPath), + name: strings.TrimPrefix(object.Prefix, rootPath), }); err != nil { return err } - case iterator.Done: - return nil - default: + continue + } + if err := process(ctx, &gcsFile{ + size: object.Size, + lastModified: object.Updated, + name: strings.TrimPrefix(object.Name, rootPath), + }); err != nil { return err } } @@ -178,7 +178,7 @@ func (gcs *GCS) PutFile(ctx context.Context, key string, r io.ReadCloser) error func (gcs *GCS) StatFile(ctx context.Context, key string) (RemoteFile, error) { objAttr, err := gcs.client.Bucket(gcs.Config.Bucket).Object(path.Join(gcs.Config.Path, key)).Attrs(ctx) if err != nil { - if err == storage.ErrObjectNotExist { + if errors.Is(err, storage.ErrObjectNotExist) { return nil, ErrNotFound } return nil, err diff --git a/pkg/storage/s3.go b/pkg/storage/s3.go index cb373710..6945cbf3 100644 --- a/pkg/storage/s3.go +++ b/pkg/storage/s3.go @@ -144,7 +144,7 @@ func (s *S3) Connect(ctx context.Context) error { if s.Config.Debug { awsConfig.Logger = newS3Logger(s.Log) - awsConfig.ClientLogMode = aws.LogRetries | aws.LogRequestWithBody | aws.LogResponseWithBody + awsConfig.ClientLogMode = aws.LogRetries | aws.LogRequest | aws.LogResponse } httpTransport := http.DefaultTransport diff --git a/test/integration/clickhouse-keeper.xml b/test/integration/clickhouse-keeper.xml index 5e83a464..9d819956 100644 --- a/test/integration/clickhouse-keeper.xml +++ b/test/integration/clickhouse-keeper.xml @@ -1,5 +1,5 @@ - 0.0.0.0 + 0.0.0.0 1 diff --git a/test/integration/config-azblob-embedded.yml b/test/integration/config-azblob-embedded.yml index 7e46d38e..6d440981 100644 --- a/test/integration/config-azblob-embedded.yml +++ b/test/integration/config-azblob-embedded.yml @@ -10,7 +10,7 @@ general: - "_temporary_and_external_tables.*" restore_schema_on_cluster: "{cluster}" clickhouse: - host: 127.0.0.1 + host: clickhouse port: 9440 username: backup password: meow=& 123?*%# МЯУ @@ -32,5 +32,5 @@ azblob: api: listen: :7171 create_integration_tables: true - integration_tables_host: "localhost" + integration_tables_host: "clickhouse-backup" allow_parallel: true diff --git a/test/integration/config-azblob.yml b/test/integration/config-azblob.yml index ce9b8043..29ef5be1 100644 --- a/test/integration/config-azblob.yml +++ b/test/integration/config-azblob.yml @@ -5,7 +5,7 @@ general: download_concurrency: 4 restore_schema_on_cluster: "{cluster}" clickhouse: - host: 127.0.0.1 + host: clickhouse port: 9000 restart_command: bash -c 'echo "FAKE RESTART"' azblob: diff --git a/test/integration/config-custom-kopia.yml b/test/integration/config-custom-kopia.yml index b6f65472..f2742a0a 100644 --- a/test/integration/config-custom-kopia.yml +++ b/test/integration/config-custom-kopia.yml @@ -10,15 +10,13 @@ general: restore_schema_on_cluster: "{cluster}" use_resumable_state: false clickhouse: - host: 127.0.0.1 - port: 9440 + host: clickhouse + port: 9000 username: backup password: meow=& 123?*%# МЯУ - secure: true - skip_verify: true sync_replicated_tables: true timeout: 2s - restart_command: bash -c 'echo "FAKE RESTART"' + restart_command: "sql:SYSTEM RELOAD USERS; sql:SYSTEM RELOAD CONFIG; sql:SYSTEM SHUTDOWN" custom: # all `kopia` uploads are incremental upload_command: /custom/kopia/upload.sh {{ .backupName }} diff --git a/test/integration/config-custom-restic.yml b/test/integration/config-custom-restic.yml index 8e422d98..053cf073 100644 --- a/test/integration/config-custom-restic.yml +++ b/test/integration/config-custom-restic.yml @@ -10,15 +10,13 @@ general: restore_schema_on_cluster: "{cluster}" use_resumable_state: false clickhouse: - host: 127.0.0.1 - port: 9440 + host: clickhouse + port: 9000 username: backup password: meow=& 123?*%# МЯУ - secure: true - skip_verify: true sync_replicated_tables: true timeout: 2s - restart_command: bash -c 'echo "FAKE RESTART"' + restart_command: "sql:SYSTEM RELOAD USERS; sql:SYSTEM RELOAD CONFIG; sql:SYSTEM SHUTDOWN" custom: upload_command: /custom/restic/upload.sh {{ .backupName }} {{ .diffFromRemote }} download_command: /custom/restic/download.sh {{ .backupName }} diff --git a/test/integration/config-custom-rsync.yml b/test/integration/config-custom-rsync.yml index a58fc743..93813de5 100644 --- a/test/integration/config-custom-rsync.yml +++ b/test/integration/config-custom-rsync.yml @@ -10,15 +10,13 @@ general: restore_schema_on_cluster: "{cluster}" use_resumable_state: false clickhouse: - host: 127.0.0.1 - port: 9440 + host: clickhouse + port: 9000 username: backup password: meow=& 123?*%# МЯУ - secure: true - skip_verify: true sync_replicated_tables: true timeout: 2s - restart_command: bash -c 'echo "FAKE RESTART"' + restart_command: "sql:SYSTEM RELOAD USERS; sql:SYSTEM RELOAD CONFIG; sql:SYSTEM SHUTDOWN" custom: upload_command: /custom/rsync/upload.sh {{ .backupName }} {{ .diffFromRemote }} download_command: /custom/rsync/download.sh {{ .backupName }} diff --git a/test/integration/config-database-mapping.yml b/test/integration/config-database-mapping.yml index ff6ce138..d73b2ee9 100644 --- a/test/integration/config-database-mapping.yml +++ b/test/integration/config-database-mapping.yml @@ -7,7 +7,7 @@ general: restore_database_mapping: database1: default clickhouse: - host: 127.0.0.1 + host: clickhouse port: 9440 username: backup password: meow=& 123?*%# МЯУ diff --git a/test/integration/config-ftp.yaml b/test/integration/config-ftp.yaml index 2ab61e4c..0f0b6d8a 100644 --- a/test/integration/config-ftp.yaml +++ b/test/integration/config-ftp.yaml @@ -5,7 +5,7 @@ general: download_concurrency: 4 restore_schema_on_cluster: "{cluster}" clickhouse: - host: 127.0.0.1 + host: clickhouse port: 9440 username: backup password: meow=& 123?*%# МЯУ diff --git a/test/integration/config-gcs.yml b/test/integration/config-gcs.yml index 33a18bd0..55996304 100644 --- a/test/integration/config-gcs.yml +++ b/test/integration/config-gcs.yml @@ -5,7 +5,7 @@ general: download_concurrency: 4 restore_schema_on_cluster: "{cluster}" clickhouse: - host: 127.0.0.1 + host: clickhouse port: 9000 restart_command: bash -c 'echo "FAKE RESTART"' gcs: diff --git a/test/integration/config-s3-embedded.yml b/test/integration/config-s3-embedded.yml index 7eada69f..d8071dcd 100644 --- a/test/integration/config-s3-embedded.yml +++ b/test/integration/config-s3-embedded.yml @@ -10,7 +10,7 @@ general: - "_temporary_and_external_tables.*" restore_schema_on_cluster: "{cluster}" clickhouse: - host: 127.0.0.1 + host: clickhouse port: 9440 username: backup password: meow=& 123?*%# МЯУ @@ -34,5 +34,5 @@ s3: api: listen: :7171 create_integration_tables: true - integration_tables_host: "localhost" + integration_tables_host: "clickhouse-backup" allow_parallel: true diff --git a/test/integration/config-s3-fips.yml b/test/integration/config-s3-fips.yml index 528959e9..724835f8 100644 --- a/test/integration/config-s3-fips.yml +++ b/test/integration/config-s3-fips.yml @@ -10,7 +10,7 @@ general: - "_temporary_and_external_tables.*" restore_schema_on_cluster: "{cluster}" clickhouse: - host: 127.0.0.1 + host: clickhouse port: 9440 username: backup password: meow=& 123?*%# МЯУ diff --git a/test/integration/config-s3-nodelete.yml b/test/integration/config-s3-nodelete.yml index 29eb97db..b5e093be 100644 --- a/test/integration/config-s3-nodelete.yml +++ b/test/integration/config-s3-nodelete.yml @@ -10,7 +10,7 @@ general: - "_temporary_and_external_tables.*" restore_schema_on_cluster: "{cluster}" clickhouse: - host: 127.0.0.1 + host: clickhouse port: 9440 username: backup password: meow=& 123?*%# МЯУ diff --git a/test/integration/config-s3-plain-embedded.yml b/test/integration/config-s3-plain-embedded.yml index ee501cf7..3060aea1 100644 --- a/test/integration/config-s3-plain-embedded.yml +++ b/test/integration/config-s3-plain-embedded.yml @@ -10,7 +10,7 @@ general: - "_temporary_and_external_tables.*" restore_schema_on_cluster: "{cluster}" clickhouse: - host: 127.0.0.1 + host: clickhouse port: 9440 username: backup password: meow=& 123?*%# МЯУ @@ -35,5 +35,5 @@ s3: api: listen: :7171 create_integration_tables: true - integration_tables_host: "localhost" + integration_tables_host: "clickhouse-backup" allow_parallel: true diff --git a/test/integration/config-s3.yml b/test/integration/config-s3.yml index b44faf67..29bd141d 100644 --- a/test/integration/config-s3.yml +++ b/test/integration/config-s3.yml @@ -13,7 +13,7 @@ clickhouse: # wrong disk name mapping for https://github.com/Altinity/clickhouse-backup/issues/676 disk_mapping: default-gp3: /var/lib/clickhouse - host: 127.0.0.1 + host: clickhouse port: 9440 username: backup password: meow=& 123?*%# МЯУ @@ -21,7 +21,7 @@ clickhouse: skip_verify: true sync_replicated_tables: true timeout: 2s - restart_command: bash -c 'echo "FAKE RESTART"' + restart_command: "sql:SYSTEM RELOAD USERS; sql:SYSTEM RELOAD CONFIG; sql:SYSTEM SHUTDOWN" backup_mutations: true s3: access_key: access-key @@ -39,5 +39,5 @@ s3: api: listen: :7171 create_integration_tables: true - integration_tables_host: "localhost" + integration_tables_host: "clickhouse-backup" allow_parallel: true diff --git a/test/integration/config-sftp-auth-key.yaml b/test/integration/config-sftp-auth-key.yaml index a7bec689..d7037c85 100644 --- a/test/integration/config-sftp-auth-key.yaml +++ b/test/integration/config-sftp-auth-key.yaml @@ -4,7 +4,7 @@ general: upload_concurrency: 4 download_concurrency: 4 clickhouse: - host: 127.0.0.1 + host: clickhouse port: 9440 username: backup password: meow=& 123?*%# МЯУ diff --git a/test/integration/config-sftp-auth-password.yaml b/test/integration/config-sftp-auth-password.yaml index 11983de7..55191d5f 100644 --- a/test/integration/config-sftp-auth-password.yaml +++ b/test/integration/config-sftp-auth-password.yaml @@ -5,7 +5,7 @@ general: download_concurrency: 4 restore_schema_on_cluster: "{cluster}" clickhouse: - host: 127.0.0.1 + host: clickhouse port: 9440 username: backup password: meow=& 123?*%# МЯУ diff --git a/test/integration/docker-compose.yml b/test/integration/docker-compose.yml index a995ccbf..293e8829 100644 --- a/test/integration/docker-compose.yml +++ b/test/integration/docker-compose.yml @@ -11,7 +11,7 @@ services: ftp: image: docker.io/fauria/vsftpd:latest - container_name: ftp + hostname: ftp environment: FTP_USER: test_backup FTP_PASS: test_backup @@ -44,7 +44,7 @@ services: # todo need to reproduce download after upload # gcs: # image: fsouza/fake-gcs-server:latest -# container_name: gcs +# hostname: gcs # entrypoint: # - /bin/sh # command: @@ -85,7 +85,7 @@ services: zookeeper: image: docker.io/zookeeper:${ZOOKEEPER_VERSION:-latest} - container_name: zookeeper + hostname: zookeeper environment: ZOO_4LW_COMMANDS_WHITELIST: "*" networks: @@ -98,9 +98,59 @@ services: start_period: 2s + clickhouse-backup: + image: docker.io/${CLICKHOUSE_IMAGE:-yandex/clickhouse-server}:${CLICKHOUSE_VERSION:-1.1.54390} + hostname: clickhouse-backup + container_name: clickhouse-backup + user: root + entrypoint: + - /bin/bash + - -xce + - sleep infinity + healthcheck: + test: bash -c "exit 0" + interval: 30s + timeout: 1s + retries: 5 + start_period: 1s + environment: + CLICKHOUSE_VERSION: ${CLICKHOUSE_VERSION:-1.1.54394} + TZ: UTC + LOG_LEVEL: "${LOG_LEVEL:-info}" + S3_DEBUG: "${S3_DEBUG:-false}" + GCS_DEBUG: "${GCS_DEBUG:-false}" + FTP_DEBUG: "${FTP_DEBUG:-false}" + SFTP_DEBUG: "${SFTP_DEBUG:-false}" + CLICKHOUSE_DEBUG: "${CLICKHOUSE_DEBUG:-false}" + GOCOVERDIR: "/tmp/_coverage_/" +# fake-gcs-server +# STORAGE_EMULATOR_HOST: "http://gsc:8080" +# GOOGLE_API_USE_CLIENT_CERTIFICATE: "false" +# FIPS + QA_AWS_ACCESS_KEY: ${QA_AWS_ACCESS_KEY} + QA_AWS_SECRET_KEY: ${QA_AWS_SECRET_KEY} + QA_AWS_BUCKET: ${QA_AWS_BUCKET} + QA_AWS_REGION: ${QA_AWS_REGION} +# https://github.com/Altinity/clickhouse-backup/issues/691: + AWS_ACCESS_KEY_ID: access-key + AWS_SECRET_ACCESS_KEY: it-is-my-super-secret-key + volumes_from: + - clickhouse + ports: + - "7171:7171" +# for delve debugger + - "40001:40001" + networks: + - clickhouse-backup + depends_on: + clickhouse: + condition: service_healthy + clickhouse: image: docker.io/${CLICKHOUSE_IMAGE:-yandex/clickhouse-server}:${CLICKHOUSE_VERSION:-1.1.54390} + hostname: clickhouse container_name: clickhouse + restart: always user: root environment: CLICKHOUSE_VERSION: ${CLICKHOUSE_VERSION:-1.1.54394} @@ -112,10 +162,10 @@ services: SFTP_DEBUG: "${SFTP_DEBUG:-false}" CLICKHOUSE_DEBUG: "${CLICKHOUSE_DEBUG:-false}" GOCOVERDIR: "/tmp/_coverage_/" - # fake-gcs-server -# STORAGE_EMULATOR_HOST: "http://gsc:8080" -# GOOGLE_API_USE_CLIENT_CERTIFICATE: "false" - # FIPS +# fake-gcs-server +# STORAGE_EMULATOR_HOST: "http://gsc:8080" +# GOOGLE_API_USE_CLIENT_CERTIFICATE: "false" +# FIPS QA_AWS_ACCESS_KEY: ${QA_AWS_ACCESS_KEY} QA_AWS_SECRET_KEY: ${QA_AWS_SECRET_KEY} QA_AWS_BUCKET: ${QA_AWS_BUCKET} @@ -129,27 +179,30 @@ services: QA_GCS_OVER_S3_BUCKET: "${QA_GCS_OVER_S3_BUCKET}" AWS_EC2_METADATA_DISABLED: "true" volumes: - - ./backup-user.xml:/etc/clickhouse-server/users.d/backup-user.xml +# clickhouse-backup related files requires for some tests - ${CLICKHOUSE_BACKUP_BIN:-../../clickhouse-backup/clickhouse-backup-race}:/usr/bin/clickhouse-backup - ${CLICKHOUSE_BACKUP_BIN_FIPS:-../../clickhouse-backup/clickhouse-backup-race-fips}:/usr/bin/clickhouse-backup-fips - ./credentials.json:/etc/clickhouse-backup/credentials.json + - ./_coverage_/:/tmp/_coverage_/ +# for local debug + - ./install_delve.sh:/tmp/install_delve.sh +# clickhouse configuration + - ./backup-user.xml:/etc/clickhouse-server/users.d/backup-user.xml - ./server.crt:/etc/clickhouse-server/server.crt - ./server.key:/etc/clickhouse-server/server.key - ./dhparam.pem:/etc/clickhouse-server/dhparam.pem - ./ssl.xml:/etc/clickhouse-server/config.d/ssl.xml - ./cluster.xml:/etc/clickhouse-server/config.d/cluster.xml - - ./_coverage_/:/tmp/_coverage_/ + - /var/lib/clickhouse + - /hdd1_data + - /hdd2_data + - /hdd3_data # uncomment only when you need clickhouse logs # - ./clickhouse-server.log:/var/log/clickhouse-server/clickhouse-server.log # - ./clickhouse-server.err.log:/var/log/clickhouse-server/clickhouse-server.err.log -# uncomment only for local debug -# - ./install_delve.sh:/tmp/install_delve.sh ports: - "8123:8123" - "9000:9000" - - "7171:7171" -# uncomment for delve debugger -# - "40001:40001" networks: - clickhouse-backup links: @@ -178,7 +231,7 @@ services: all_services_ready: image: hello-world depends_on: - clickhouse: + clickhouse-backup: condition: service_healthy networks: diff --git a/test/integration/docker-compose_advanced.yml b/test/integration/docker-compose_advanced.yml index 5a192042..f9d7c1ac 100644 --- a/test/integration/docker-compose_advanced.yml +++ b/test/integration/docker-compose_advanced.yml @@ -11,7 +11,7 @@ services: # ftp: # image: docker.io/fauria/vsftpd:latest -# container_name: ftp +# hostname: ftp # environment: # FTP_USER: test_backup # FTP_PASS: test_backup @@ -25,7 +25,7 @@ services: ftp: image: docker.io/iradu/proftpd:latest - container_name: ftp + hostname: ftp environment: FTP_USER_NAME: "test_backup" FTP_USER_PASS: "test_backup" @@ -57,7 +57,7 @@ services: # todo need to reproduce download after upload # gcs: # image: fsouza/fake-gcs-server:latest -# container_name: gcs +# hostname: gcs # entrypoint: # - /bin/sh # command: @@ -99,7 +99,7 @@ services: mysql: image: docker.io/mysql:${MYSQL_VERSION:-latest} command: --default-authentication-plugin=mysql_native_password --gtid_mode=on --enforce_gtid_consistency=ON - container_name: mysql + hostname: mysql environment: MYSQL_ROOT_PASSWORD: "root" ports: @@ -113,7 +113,7 @@ services: pgsql: image: docker.io/postgres:${PGSQL_VERSION:-latest} - container_name: pgsql + hostname: pgsql environment: POSTGRES_USER: "root" POSTGRES_PASSWORD: "root" @@ -131,7 +131,7 @@ services: zookeeper: image: docker.io/clickhouse/clickhouse-keeper:${CLICKHOUSE_KEEPER_VERSION:-latest-alpine} - container_name: zookeeper + hostname: zookeeper volumes: - ./clickhouse-keeper.xml:/etc/clickhouse-keeper/conf.d/clickhouse-keeper.xml - /var/lib/clickhouse @@ -145,9 +145,59 @@ services: start_period: 2s + clickhouse-backup: + image: docker.io/${CLICKHOUSE_IMAGE:-yandex/clickhouse-server}:${CLICKHOUSE_VERSION:-19.17} + hostname: clickhouse-backup + container_name: clickhouse-backup + user: root + entrypoint: + - /bin/bash + - -xce + - sleep infinity + healthcheck: + test: bash -c "exit 0" + interval: 30s + timeout: 1s + retries: 5 + start_period: 1s + environment: + CLICKHOUSE_VERSION: ${CLICKHOUSE_VERSION:-19.17} + TZ: UTC + LOG_LEVEL: "${LOG_LEVEL:-info}" + S3_DEBUG: "${S3_DEBUG:-false}" + GCS_DEBUG: "${GCS_DEBUG:-false}" + FTP_DEBUG: "${FTP_DEBUG:-false}" + SFTP_DEBUG: "${SFTP_DEBUG:-false}" + CLICKHOUSE_DEBUG: "${CLICKHOUSE_DEBUG:-false}" + GOCOVERDIR: "/tmp/_coverage_/" +# fake-gcs-server +# STORAGE_EMULATOR_HOST: "http://gsc:8080" +# GOOGLE_API_USE_CLIENT_CERTIFICATE: "false" +# FIPS + QA_AWS_ACCESS_KEY: ${QA_AWS_ACCESS_KEY} + QA_AWS_SECRET_KEY: ${QA_AWS_SECRET_KEY} + QA_AWS_BUCKET: ${QA_AWS_BUCKET} + QA_AWS_REGION: ${QA_AWS_REGION} +# https://github.com/Altinity/clickhouse-backup/issues/691: + AWS_ACCESS_KEY_ID: access-key + AWS_SECRET_ACCESS_KEY: it-is-my-super-secret-key + volumes_from: + - clickhouse + ports: + - "7171:7171" +# for delve debugger + - "40001:40001" + networks: + - clickhouse-backup + depends_on: + clickhouse: + condition: service_healthy + clickhouse: image: docker.io/${CLICKHOUSE_IMAGE:-yandex/clickhouse-server}:${CLICKHOUSE_VERSION:-19.17} + hostname: clickhouse container_name: clickhouse + restart: always user: root environment: CLICKHOUSE_VERSION: ${CLICKHOUSE_VERSION:-19.17} @@ -159,10 +209,10 @@ services: SFTP_DEBUG: "${SFTP_DEBUG:-false}" CLICKHOUSE_DEBUG: "${CLICKHOUSE_DEBUG:-false}" GOCOVERDIR: "/tmp/_coverage_/" - # fake-gcs-server -# STORAGE_EMULATOR_HOST: "http://gsc:8080" -# GOOGLE_API_USE_CLIENT_CERTIFICATE: "false" - # FIPS +# fake-gcs-server +# STORAGE_EMULATOR_HOST: "http://gsc:8080" +# GOOGLE_API_USE_CLIENT_CERTIFICATE: "false" +# FIPS QA_AWS_ACCESS_KEY: ${QA_AWS_ACCESS_KEY} QA_AWS_SECRET_KEY: ${QA_AWS_SECRET_KEY} QA_AWS_BUCKET: ${QA_AWS_BUCKET} @@ -176,29 +226,34 @@ services: QA_GCS_OVER_S3_BUCKET: "${QA_GCS_OVER_S3_BUCKET}" AWS_EC2_METADATA_DISABLED: "true" volumes: - - ./backup-user.xml:/etc/clickhouse-server/users.d/backup-user.xml - - ./enable-access_management.xml:/etc/clickhouse-server/users.d/enable-access_management.xml +# clickhouse-backup related files requires for some tests - ${CLICKHOUSE_BACKUP_BIN:-../../clickhouse-backup/clickhouse-backup-race}:/usr/bin/clickhouse-backup - ${CLICKHOUSE_BACKUP_BIN_FIPS:-../../clickhouse-backup/clickhouse-backup-race-fips}:/usr/bin/clickhouse-backup-fips - ./credentials.json:/etc/clickhouse-backup/credentials.json + - ./_coverage_/:/tmp/_coverage_/ +# for local debug + - ./install_delve.sh:/tmp/install_delve.sh +# clickhouse configuration + - ./dynamic_settings.sh:/docker-entrypoint-initdb.d/dynamic_settings.sh + - ./enable-access_management.xml:/etc/clickhouse-server/users.d/enable-access_management.xml + - ./backup-user.xml:/etc/clickhouse-server/users.d/backup-user.xml - ./server.crt:/etc/clickhouse-server/server.crt - ./server.key:/etc/clickhouse-server/server.key - ./dhparam.pem:/etc/clickhouse-server/dhparam.pem - ./ssl.xml:/etc/clickhouse-server/config.d/ssl.xml - ./cluster.xml:/etc/clickhouse-server/config.d/cluster.xml - - ./dynamic_settings.sh:/docker-entrypoint-initdb.d/dynamic_settings.sh - - ./_coverage_/:/tmp/_coverage_/ -# uncomment only for local debug - - ./install_delve.sh:/tmp/install_delve.sh + - /var/lib/clickhouse + - /hdd1_data + - /hdd2_data + - /hdd3_data # uncomment only when you need clickhouse logs # - ./clickhouse-server.log:/var/log/clickhouse-server/clickhouse-server.log # - ./clickhouse-server.err.log:/var/log/clickhouse-server/clickhouse-server.err.log ports: - "8123:8123" - "9000:9000" - - "7171:7171" -# uncomment for delve debugger - - "40001:40001" +# for delve debugger +# - "40001:40001" networks: - clickhouse-backup links: @@ -233,7 +288,7 @@ services: all_services_ready: image: hello-world depends_on: - clickhouse: + clickhouse-backup: condition: service_healthy networks: diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index d059ef70..e3616fb2 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -404,13 +404,16 @@ func init() { } func TestSkipNotExistsTable(t *testing.T) { - //t.Skip("TestSkipNotExistsTable is flaky now, need more precise algorithm for pause calculation") + if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "19.1") < 0 { + t.Skip("TestSkipNotExistsTable too small time between `SELECT DISTINCT partition_id` and `ALTER TABLE ... FREEZE PARTITION`") + } ch := &TestClickHouse{} r := require.New(t) ch.connectWithWait(r, 0*time.Second, 1*time.Second) defer ch.chbackend.Close() log.Info("Check skip not exist errors") + r.NoError(dockerCP("config-s3.yml", "clickhouse-backup:/etc/clickhouse-backup/config.yml")) ifNotExistsCreateSQL := "CREATE TABLE IF NOT EXISTS default.if_not_exists (id UInt64) ENGINE=MergeTree() ORDER BY id" ifNotExistsInsertSQL := "INSERT INTO default.if_not_exists SELECT number FROM numbers(1000)" chVersion, err := ch.chbackend.GetVersion(context.Background()) @@ -443,14 +446,14 @@ func TestSkipNotExistsTable(t *testing.T) { pauseChannel <- pause / i } startTime := time.Now() - out, err := dockerExecOut("clickhouse", "bash", "-ce", "LOG_LEVEL=debug clickhouse-backup create --table default.if_not_exists "+testBackupName) + out, err := dockerExecOut("clickhouse-backup", "bash", "-ce", "LOG_LEVEL=debug clickhouse-backup create --table default.if_not_exists "+testBackupName) log.Info(out) if (err != nil && (strings.Contains(out, "can't freeze") || strings.Contains(out, "no tables for backup"))) || (err == nil && !strings.Contains(out, "can't freeze")) { parseTime := func(line string) time.Time { parsedTime, err := time.Parse("2006/01/02 15:04:05.999999", line[:26]) if err != nil { - r.Failf("%s, Error parsing time: %v", line, err) + r.Failf("Error parsing time", "%s, : %v", line, err) } return parsedTime } @@ -462,25 +465,27 @@ func TestSkipNotExistsTable(t *testing.T) { freezeTime = parseTime(line) break } + if strings.Contains(line, "SELECT DISTINCT partition_id") { + freezeTime = parseTime(line) + break + } } pause += (firstTime.Sub(startTime) + freezeTime.Sub(firstTime)).Nanoseconds() } if err != nil { if !strings.Contains(out, "no tables for backup") { assert.NoError(t, err) - } /* else { - pausePercent += 1 - } */ + } } if strings.Contains(out, "code: 60") && err == nil { freezeErrorHandled = true <-resumeChannel - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "delete", "local", testBackupName)) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", testBackupName)) break } if err == nil { - err = dockerExec("clickhouse", "clickhouse-backup", "delete", "local", testBackupName) + err = dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", testBackupName) assert.NoError(t, err) } <-resumeChannel @@ -516,24 +521,24 @@ func TestTablePatterns(t *testing.T) { testBackupName := "test_backup_patterns" databaseList := []string{dbNameOrdinary, dbNameAtomic} - r.NoError(dockerCP("config-s3.yml", "clickhouse:/etc/clickhouse-backup/config.yml")) + r.NoError(dockerCP("config-s3.yml", "clickhouse-backup:/etc/clickhouse-backup/config.yml")) for _, createPattern := range []bool{true, false} { for _, restorePattern := range []bool{true, false} { fullCleanup(r, ch, []string{testBackupName}, []string{"remote", "local"}, databaseList, false, false) generateTestData(ch, r, "S3") if createPattern { - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "create_remote", "--tables", " "+dbNameOrdinary+".*", testBackupName)) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "create_remote", "--tables", " "+dbNameOrdinary+".*", testBackupName)) } else { - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "create_remote", testBackupName)) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "create_remote", testBackupName)) } - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "delete", "local", testBackupName)) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", testBackupName)) dropDatabasesFromTestDataDataSet(r, ch, databaseList) if restorePattern { - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "restore_remote", "--tables", " "+dbNameOrdinary+".*", testBackupName)) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "restore_remote", "--tables", " "+dbNameOrdinary+".*", testBackupName)) } else { - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "restore_remote", testBackupName)) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "restore_remote", testBackupName)) } restored := uint64(0) @@ -573,16 +578,16 @@ func TestProjections(t *testing.T) { ch.connectWithWait(r, 0*time.Second, 1*time.Second) defer ch.chbackend.Close() - r.NoError(dockerCP("config-s3.yml", "clickhouse:/etc/clickhouse-backup/config.yml")) + r.NoError(dockerCP("config-s3.yml", "clickhouse-backup:/etc/clickhouse-backup/config.yml")) err = ch.chbackend.Query("CREATE TABLE default.table_with_projection(dt DateTime, v UInt64, PROJECTION x (SELECT toStartOfMonth(dt) m, sum(v) GROUP BY m)) ENGINE=MergeTree() ORDER BY dt") r.NoError(err) err = ch.chbackend.Query("INSERT INTO default.table_with_projection SELECT today() - INTERVAL number DAY, number FROM numbers(10)") r.NoError(err) - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "create", "test_backup_projection")) - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "restore", "--rm", "test_backup_projection")) - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "delete", "local", "test_backup_projection")) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "create", "test_backup_projection")) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "restore", "--rm", "test_backup_projection")) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", "test_backup_projection")) var counts uint64 r.NoError(ch.chbackend.SelectSingleRowNoCtx(&counts, "SELECT count() FROM default.table_with_projection")) r.Equal(uint64(10), counts) @@ -598,33 +603,33 @@ func TestKeepBackupRemoteAndDiffFromRemote(t *testing.T) { } r := require.New(t) ch := &TestClickHouse{} - ch.connectWithWait(r, 500*time.Millisecond, 1*time.Second) + ch.connectWithWait(r, 500*time.Millisecond, 2*time.Second) backupNames := make([]string, 5) for i := 0; i < 5; i++ { backupNames[i] = fmt.Sprintf("keep_remote_backup_%d", i) } databaseList := []string{dbNameOrdinary, dbNameAtomic, dbNameMySQL, dbNamePostgreSQL, Issue331Atomic, Issue331Ordinary} - r.NoError(dockerCP("config-s3.yml", "clickhouse:/etc/clickhouse-backup/config.yml")) + r.NoError(dockerCP("config-s3.yml", "clickhouse-backup:/etc/clickhouse-backup/config.yml")) fullCleanup(r, ch, backupNames, []string{"remote", "local"}, databaseList, false, false) generateTestData(ch, r, "S3") for i, backupName := range backupNames { generateIncrementTestData(ch, r) if i == 0 { - r.NoError(dockerExec("clickhouse", "bash", "-ce", fmt.Sprintf("BACKUPS_TO_KEEP_REMOTE=3 clickhouse-backup create_remote %s", backupName))) + r.NoError(dockerExec("clickhouse-backup", "bash", "-ce", fmt.Sprintf("BACKUPS_TO_KEEP_REMOTE=3 clickhouse-backup create_remote %s", backupName))) } else { - r.NoError(dockerExec("clickhouse", "bash", "-ce", fmt.Sprintf("BACKUPS_TO_KEEP_REMOTE=3 clickhouse-backup create_remote --diff-from-remote=%s %s", backupNames[i-1], backupName))) + r.NoError(dockerExec("clickhouse-backup", "bash", "-ce", fmt.Sprintf("BACKUPS_TO_KEEP_REMOTE=3 clickhouse-backup create_remote --diff-from-remote=%s %s", backupNames[i-1], backupName))) } } - out, err := dockerExecOut("clickhouse", "bash", "-ce", "clickhouse-backup list local") + out, err := dockerExecOut("clickhouse-backup", "bash", "-ce", "clickhouse-backup list local") r.NoError(err) // shall not delete any backup, cause all deleted backup have links as required in other backups for _, backupName := range backupNames { r.Contains(out, backupName) - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "delete", "local", backupName)) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", backupName)) } latestIncrementBackup := fmt.Sprintf("keep_remote_backup_%d", len(backupNames)-1) - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "download", latestIncrementBackup)) - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "restore", "--rm", latestIncrementBackup)) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "download", latestIncrementBackup)) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "restore", "--rm", latestIncrementBackup)) var res uint64 r.NoError(ch.chbackend.SelectSingleRowNoCtx(&res, fmt.Sprintf("SELECT count() FROM `%s`.`%s`", Issue331Atomic, Issue331Atomic))) r.Equal(uint64(200), res) @@ -638,17 +643,17 @@ func TestS3NoDeletePermission(t *testing.T) { } r := require.New(t) r.NoError(dockerExec("minio", "/bin/minio_nodelete.sh")) - r.NoError(dockerCP("config-s3-nodelete.yml", "clickhouse:/etc/clickhouse-backup/config.yml")) + r.NoError(dockerCP("config-s3-nodelete.yml", "clickhouse-backup:/etc/clickhouse-backup/config.yml")) ch := &TestClickHouse{} - ch.connectWithWait(r, 500*time.Millisecond, 1*time.Second) + ch.connectWithWait(r, 500*time.Millisecond, 2*time.Second) defer ch.chbackend.Close() generateTestData(ch, r, "S3") - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "create_remote", "no_delete_backup")) - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "delete", "local", "no_delete_backup")) - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "restore_remote", "no_delete_backup")) - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "delete", "local", "no_delete_backup")) - r.Error(dockerExec("clickhouse", "clickhouse-backup", "delete", "remote", "no_delete_backup")) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "create_remote", "no_delete_backup")) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", "no_delete_backup")) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "restore_remote", "no_delete_backup")) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", "no_delete_backup")) + r.Error(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "remote", "no_delete_backup")) databaseList := []string{dbNameOrdinary, dbNameAtomic, dbNameMySQL, dbNamePostgreSQL, Issue331Atomic, Issue331Ordinary} dropDatabasesFromTestDataDataSet(r, ch, databaseList) r.NoError(dockerExec("minio", "bash", "-ce", "rm -rf /data/clickhouse/*")) @@ -661,7 +666,7 @@ func TestSyncReplicaTimeout(t *testing.T) { ch := &TestClickHouse{} r := require.New(t) ch.connectWithWait(r, 0*time.Millisecond, 2*time.Second) - r.NoError(dockerCP("config-s3.yml", "clickhouse:/etc/clickhouse-backup/config.yml")) + r.NoError(dockerCP("config-s3.yml", "clickhouse-backup:/etc/clickhouse-backup/config.yml")) dropReplTables := func() { for _, table := range []string{"repl1", "repl2"} { @@ -683,10 +688,10 @@ func TestSyncReplicaTimeout(t *testing.T) { ch.queryWithNoError(r, "INSERT INTO default.repl1 SELECT number FROM numbers(100)") - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "create", "--tables=default.repl*", "test_not_synced_backup")) - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "upload", "test_not_synced_backup")) - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "delete", "local", "test_not_synced_backup")) - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "delete", "remote", "test_not_synced_backup")) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "create", "--tables=default.repl*", "test_not_synced_backup")) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "upload", "test_not_synced_backup")) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", "test_not_synced_backup")) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "remote", "test_not_synced_backup")) ch.queryWithNoError(r, "SYSTEM START REPLICATED SENDS default.repl1") ch.queryWithNoError(r, "SYSTEM START FETCHES default.repl2") @@ -819,22 +824,22 @@ func TestRestoreMutationInProgress(t *testing.T) { } r.NoError(dockerExec("clickhouse", "clickhouse", "client", "-q", "SELECT * FROM system.mutations WHERE is_done=0 FORMAT Vertical")) - r.NoError(dockerCP("config-s3.yml", "clickhouse:/etc/clickhouse-backup/config.yml")) + r.NoError(dockerCP("config-s3.yml", "clickhouse-backup:/etc/clickhouse-backup/config.yml")) // backup with check consistency - out, createErr := dockerExecOut("clickhouse", "clickhouse-backup", "create", "--tables=default.test_restore_mutation_in_progress", "test_restore_mutation_in_progress") + out, createErr := dockerExecOut("clickhouse-backup", "clickhouse-backup", "create", "--tables=default.test_restore_mutation_in_progress", "test_restore_mutation_in_progress") r.NotEqual(createErr, nil) r.Contains(out, "have inconsistent data types") t.Log(out) // backup without check consistency - out, createErr = dockerExecOut("clickhouse", "clickhouse-backup", "create", "--skip-check-parts-columns", "--tables=default.test_restore_mutation_in_progress", "test_restore_mutation_in_progress") + out, createErr = dockerExecOut("clickhouse-backup", "clickhouse-backup", "create", "--skip-check-parts-columns", "--tables=default.test_restore_mutation_in_progress", "test_restore_mutation_in_progress") t.Log(out) r.NoError(createErr) r.NotContains(out, "have inconsistent data types") r.NoError(ch.chbackend.DropTable(clickhouse.Table{Database: "default", Name: "test_restore_mutation_in_progress"}, "", "", false, version)) var restoreErr error - restoreErr = dockerExec("clickhouse", "clickhouse-backup", "restore", "--rm", "--tables=default.test_restore_mutation_in_progress", "test_restore_mutation_in_progress") + restoreErr = dockerExec("clickhouse-backup", "clickhouse-backup", "restore", "--rm", "--tables=default.test_restore_mutation_in_progress", "test_restore_mutation_in_progress") if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "20.8") >= 0 && compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "22.8") < 0 { r.NotEqual(restoreErr, nil) } else { @@ -886,7 +891,7 @@ func TestRestoreMutationInProgress(t *testing.T) { r.NoError(dockerExec("clickhouse", "clickhouse", "client", "-q", "SELECT * FROM system.mutations FORMAT Vertical")) r.NoError(ch.chbackend.DropTable(clickhouse.Table{Database: "default", Name: "test_restore_mutation_in_progress"}, "", "", false, version)) - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "delete", "local", "test_restore_mutation_in_progress")) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", "test_restore_mutation_in_progress")) } func TestInnerTablesMaterializedView(t *testing.T) { @@ -901,8 +906,8 @@ func TestInnerTablesMaterializedView(t *testing.T) { ch.queryWithNoError(r, "CREATE MATERIALIZED VIEW test_mv.mv_with_inner (v UInt64) ENGINE=MergeTree() ORDER BY v AS SELECT v FROM test_mv.src_table") ch.queryWithNoError(r, "CREATE MATERIALIZED VIEW test_mv.mv_with_dst TO test_mv.dst_table AS SELECT v FROM test_mv.src_table") ch.queryWithNoError(r, "INSERT INTO test_mv.src_table SELECT number FROM numbers(100)") - r.NoError(dockerCP("config-s3.yml", "clickhouse:/etc/clickhouse-backup/config.yml")) - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "create", "test_mv", "--tables=test_mv.mv_with*,test_mv.dst*")) + r.NoError(dockerCP("config-s3.yml", "clickhouse-backup:/etc/clickhouse-backup/config.yml")) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "create", "test_mv", "--tables=test_mv.mv_with*,test_mv.dst*")) dropSQL := "DROP DATABASE test_mv" isAtomic, err := ch.chbackend.IsAtomic("test_mv") r.NoError(err) @@ -910,7 +915,7 @@ func TestInnerTablesMaterializedView(t *testing.T) { dropSQL += " NO DELAY" } ch.queryWithNoError(r, dropSQL) - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "restore", "test_mv", "--tables=test_mv.mv_with*,test_mv.dst*")) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "restore", "test_mv", "--tables=test_mv.mv_with*,test_mv.dst*")) var rowCnt uint64 r.NoError(ch.chbackend.SelectSingleRowNoCtx(&rowCnt, "SELECT count() FROM test_mv.mv_with_inner")) r.Equal(uint64(100), rowCnt) @@ -925,7 +930,6 @@ func TestFIPS(t *testing.T) { defer ch.chbackend.Close() fipsBackupName := fmt.Sprintf("fips_backup_%d", rand.Int()) r.NoError(dockerExec("clickhouse", "rm", "-fv", "/etc/apt/sources.list.d/clickhouse.list")) - r.NoError(dockerExec("clickhouse", "bash", "-c", "find /etc/apt/ -type f -exec sed -i 's/ru.archive.ubuntu.com/archive.ubuntu.com/g' {} +")) installDebIfNotExists(r, "clickhouse", "curl", "gettext-base", "bsdmainutils", "dnsutils", "git", "ca-certificates") r.NoError(dockerCP("config-s3-fips.yml", "clickhouse:/etc/clickhouse-backup/config.yml.fips-template")) r.NoError(dockerExec("clickhouse", "update-ca-certificates")) @@ -964,10 +968,8 @@ func TestFIPS(t *testing.T) { runClickHouseClientInsertSystemBackupActions(r, ch, []string{fmt.Sprintf("create_remote --tables=default.fips_table %s", fipsBackupName)}, true) runClickHouseClientInsertSystemBackupActions(r, ch, []string{fmt.Sprintf("delete local %s", fipsBackupName)}, false) runClickHouseClientInsertSystemBackupActions(r, ch, []string{fmt.Sprintf("restore_remote --tables=default.fips_table %s", fipsBackupName)}, true) - runClickHouseClientInsertSystemBackupActions(r, ch, []string{ - fmt.Sprintf("delete local %s", fipsBackupName), - fmt.Sprintf("delete remote %s", fipsBackupName), - }, false) + runClickHouseClientInsertSystemBackupActions(r, ch, []string{fmt.Sprintf("delete local %s", fipsBackupName)}, false) + runClickHouseClientInsertSystemBackupActions(r, ch, []string{fmt.Sprintf("delete remote %s", fipsBackupName)}, false) inProgressActions := make([]struct { Command string `ch:"command"` @@ -1017,7 +1019,7 @@ func TestDoRestoreRBAC(t *testing.T) { ch.connectWithWait(r, 1*time.Second, 1*time.Second) - r.NoError(dockerCP("config-s3.yml", "clickhouse:/etc/clickhouse-backup/config.yml")) + r.NoError(dockerCP("config-s3.yml", "clickhouse-backup:/etc/clickhouse-backup/config.yml")) ch.queryWithNoError(r, "DROP TABLE IF EXISTS default.test_rbac") ch.queryWithNoError(r, "CREATE TABLE default.test_rbac (v UInt64) ENGINE=MergeTree() ORDER BY tuple()") @@ -1034,9 +1036,9 @@ func TestDoRestoreRBAC(t *testing.T) { ch.queryWithNoError(r, "CREATE QUOTA test_rbac KEYED BY user_name FOR INTERVAL 1 hour NO LIMITS TO test_rbac") ch.queryWithNoError(r, "CREATE ROW POLICY test_rbac ON default.test_rbac USING 1=1 AS RESTRICTIVE TO test_rbac") - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "create", "--backup-rbac", "test_rbac_backup")) - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "upload", "test_rbac_backup")) - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "delete", "local", "test_rbac_backup")) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "create", "--rbac", "--rbac-only", "test_rbac_backup")) + r.NoError(dockerExec("clickhouse-backup", "bash", "-xec", "ALLOW_EMPTY_BACKUPS=1 clickhouse-backup upload test_rbac_backup")) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", "test_rbac_backup")) r.NoError(dockerExec("clickhouse", "ls", "-lah", "/var/lib/clickhouse/access")) log.Info("drop all RBAC related objects after backup") @@ -1046,19 +1048,13 @@ func TestDoRestoreRBAC(t *testing.T) { ch.queryWithNoError(r, "DROP ROLE test_rbac") ch.queryWithNoError(r, "DROP USER test_rbac") - ch.chbackend.Close() - r.NoError(utils.ExecCmd(context.Background(), 180*time.Second, "docker-compose", "-f", os.Getenv("COMPOSE_FILE"), "restart", "clickhouse")) - ch.connectWithWait(r, 2*time.Second, 8*time.Second) - log.Info("download+restore RBAC") r.NoError(dockerExec("clickhouse", "ls", "-lah", "/var/lib/clickhouse/access")) - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "download", "test_rbac_backup")) - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "restore", "--rm", "--rbac", "test_rbac_backup")) + r.NoError(dockerExec("clickhouse-backup", "bash", "-xec", "ALLOW_EMPTY_BACKUPS=1 clickhouse-backup download test_rbac_backup")) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "restore", "--rm", "--rbac", "--rbac-only", "test_rbac_backup")) r.NoError(dockerExec("clickhouse", "ls", "-lah", "/var/lib/clickhouse/access")) - // we can't restart clickhouse inside container, we need restart container ch.chbackend.Close() - r.NoError(utils.ExecCmd(context.Background(), 180*time.Second, "docker-compose", "-f", os.Getenv("COMPOSE_FILE"), "restart", "clickhouse")) ch.connectWithWait(r, 2*time.Second, 8*time.Second) r.NoError(dockerExec("clickhouse", "ls", "-lah", "/var/lib/clickhouse/access")) @@ -1085,22 +1081,23 @@ func TestDoRestoreRBAC(t *testing.T) { } if !found { r.NoError(dockerExec("clickhouse", "cat", "/var/log/clickhouse-server/clickhouse-server.log")) - r.Failf("result for SHOW %s, %v doesn't contain %v", rbacType, rbacRows, expectedValue) + r.Failf("wrong RBAC", "SHOW %s, %#v doesn't contain %#v", rbacType, rbacRows, expectedValue) } } - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "delete", "local", "test_rbac_backup")) - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "delete", "remote", "test_rbac_backup")) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", "test_rbac_backup")) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "remote", "test_rbac_backup")) ch.queryWithNoError(r, "DROP SETTINGS PROFILE test_rbac") ch.queryWithNoError(r, "DROP QUOTA test_rbac") ch.queryWithNoError(r, "DROP ROW POLICY test_rbac ON default.test_rbac") ch.queryWithNoError(r, "DROP ROLE test_rbac") ch.queryWithNoError(r, "DROP USER test_rbac") - + ch.queryWithNoError(r, "DROP TABLE IF EXISTS default.test_rbac") ch.chbackend.Close() } +// TestDoRestoreConfigs - require direct access to `/etc/clickhouse-backup/`, so executed inside `clickhouse` container func TestDoRestoreConfigs(t *testing.T) { if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "1.1.54391") == -1 { t.Skipf("Test skipped, users.d is not available for %s version", os.Getenv("CLICKHOUSE_VERSION")) @@ -1108,45 +1105,49 @@ func TestDoRestoreConfigs(t *testing.T) { ch := &TestClickHouse{} r := require.New(t) ch.connectWithWait(r, 0*time.Millisecond, 1*time.Second) - ch.queryWithNoError(r, "DROP TABLE IF EXISTS default.test_rbac") + ch.queryWithNoError(r, "DROP TABLE IF EXISTS default.test_configs") ch.queryWithNoError(r, "CREATE TABLE default.test_rbac (v UInt64) ENGINE=MergeTree() ORDER BY tuple()") r.NoError(dockerCP("config-s3.yml", "clickhouse:/etc/clickhouse-backup/config.yml")) r.NoError(dockerExec("clickhouse", "bash", "-ce", "echo '1' > /etc/clickhouse-server/users.d/test_config.xml")) - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "create", "--configs", "test_configs_backup")) - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "upload", "test_configs_backup")) + r.NoError(dockerExec("clickhouse", "clickhouse-backup", "create", "--configs", "--configs-only", "test_configs_backup")) + ch.queryWithNoError(r, "DROP TABLE IF EXISTS default.test_configs") + r.NoError(dockerExec("clickhouse", "bash", "-xec", "ALLOW_EMPTY_BACKUPS=1 clickhouse-backup upload test_configs_backup")) r.NoError(dockerExec("clickhouse", "clickhouse-backup", "delete", "local", "test_configs_backup")) ch.chbackend.Close() - ch.connectWithWait(r, 2*time.Second, 8*time.Second) + ch.connectWithWait(r, 1*time.Second, 1*time.Second) - selectEmptyResultForAggQuery := - "SELECT value FROM system.settings WHERE name='empty_result_for_aggregation_by_empty_set'" + selectEmptyResultForAggQuery := "SELECT value FROM system.settings WHERE name='empty_result_for_aggregation_by_empty_set'" var settings string r.NoError(ch.chbackend.SelectSingleRowNoCtx(&settings, selectEmptyResultForAggQuery)) r.Equal("1", settings, "expect empty_result_for_aggregation_by_empty_set=1") r.NoError(dockerExec("clickhouse", "rm", "-rfv", "/etc/clickhouse-server/users.d/test_config.xml")) - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "download", "test_configs_backup")) + r.NoError(dockerExec("clickhouse", "bash", "-xec", "ALLOW_EMPTY_BACKUPS=1 clickhouse-backup download test_configs_backup")) + r.NoError(ch.chbackend.Query("SYSTEM RELOAD CONFIG")) ch.chbackend.Close() - ch.connectWithWait(r, 2*time.Second, 8*time.Second) + ch.connectWithWait(r, 1*time.Second, 1*time.Second) settings = "" r.NoError(ch.chbackend.SelectSingleRowNoCtx(&settings, "SELECT value FROM system.settings WHERE name='empty_result_for_aggregation_by_empty_set'")) r.Equal("0", settings, "expect empty_result_for_aggregation_by_empty_set=0") - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "restore", "--rm", "--configs", "test_configs_backup")) - err := ch.chbackend.Query("SYSTEM RELOAD CONFIG") - r.NoError(err) + r.NoError(dockerExec("clickhouse", "bash", "-xec", "CLICKHOUSE_RESTART_COMMAND='sql:SYSTEM RELOAD CONFIG' clickhouse-backup restore --rm --configs --configs-only test_configs_backup")) + ch.chbackend.Close() - ch.connectWithWait(r, 2*time.Second, 1*time.Second) + ch.connectWithWait(r, 1*time.Second, 1*time.Second) settings = "" r.NoError(ch.chbackend.SelectSingleRowNoCtx(&settings, "SELECT value FROM system.settings WHERE name='empty_result_for_aggregation_by_empty_set'")) r.Equal("1", settings, "expect empty_result_for_aggregation_by_empty_set=1") + isTestConfigsTablePresent := 0 + r.NoError(ch.chbackend.SelectSingleRowNoCtx(&isTestConfigsTablePresent, "SELECT count() FROM system.tables WHERE database='default' AND name='test_configs' SETTINGS empty_result_for_aggregation_by_empty_set=1")) + r.Equal(0, isTestConfigsTablePresent, "expect default.test_configs is not present") + r.NoError(dockerExec("clickhouse", "clickhouse-backup", "delete", "local", "test_configs_backup")) r.NoError(dockerExec("clickhouse", "clickhouse-backup", "delete", "remote", "test_configs_backup")) r.NoError(dockerExec("clickhouse", "rm", "-rfv", "/etc/clickhouse-server/users.d/test_config.xml")) @@ -1156,7 +1157,7 @@ func TestDoRestoreConfigs(t *testing.T) { func TestIntegrationS3(t *testing.T) { r := require.New(t) - r.NoError(dockerCP("config-s3.yml", "clickhouse:/etc/clickhouse-backup/config.yml")) + r.NoError(dockerCP("config-s3.yml", "clickhouse-backup:/etc/clickhouse-backup/config.yml")) runMainIntegrationScenario(t, "S3") } @@ -1166,8 +1167,9 @@ func TestIntegrationGCS(t *testing.T) { return } r := require.New(t) - r.NoError(dockerCP("config-gcs.yml", "clickhouse:/etc/clickhouse-backup/config.yml")) - installDebIfNotExists(r, "clickhouse", "ca-certificates") + r.NoError(dockerCP("config-gcs.yml", "clickhouse-backup:/etc/clickhouse-backup/config.yml")) + installDebIfNotExists(r, "clickhouse-backup", "ca-certificates") + r.NoError(dockerExec("clickhouse-backup", "update-ca-certificates")) runMainIntegrationScenario(t, "GCS") } @@ -1177,57 +1179,58 @@ func TestIntegrationAzure(t *testing.T) { return } r := require.New(t) - r.NoError(dockerCP("config-azblob.yml", "clickhouse:/etc/clickhouse-backup/config.yml")) - installDebIfNotExists(r, "clickhouse", "ca-certificates") + r.NoError(dockerCP("config-azblob.yml", "clickhouse-backup:/etc/clickhouse-backup/config.yml")) + installDebIfNotExists(r, "clickhouse-backup", "ca-certificates") + r.NoError(dockerExec("clickhouse-backup", "update-ca-certificates")) runMainIntegrationScenario(t, "AZBLOB") } func TestIntegrationSFTPAuthPassword(t *testing.T) { r := require.New(t) - r.NoError(dockerCP("config-sftp-auth-password.yaml", "clickhouse:/etc/clickhouse-backup/config.yml")) + r.NoError(dockerCP("config-sftp-auth-password.yaml", "clickhouse-backup:/etc/clickhouse-backup/config.yml")) runMainIntegrationScenario(t, "SFTP") } func TestIntegrationFTP(t *testing.T) { r := require.New(t) - r.NoError(dockerCP("config-ftp.yaml", "clickhouse:/etc/clickhouse-backup/config.yml")) + r.NoError(dockerCP("config-ftp.yaml", "clickhouse-backup:/etc/clickhouse-backup/config.yml")) runMainIntegrationScenario(t, "FTP") } func TestIntegrationSFTPAuthKey(t *testing.T) { r := require.New(t) - r.NoError(dockerCP("config-sftp-auth-key.yaml", "clickhouse:/etc/clickhouse-backup/config.yml")) + r.NoError(dockerCP("config-sftp-auth-key.yaml", "clickhouse-backup:/etc/clickhouse-backup/config.yml")) - uploadSSHKeys(r) + uploadSSHKeys(r, "clickhouse-backup") runMainIntegrationScenario(t, "SFTP") } func TestIntegrationCustom(t *testing.T) { r := require.New(t) - + installDebIfNotExists(r, "clickhouse-backup", "ca-certificates", "curl") + r.NoError(dockerExec("clickhouse-backup", "update-ca-certificates")) + r.NoError(dockerExec("clickhouse-backup", "bash", "-xce", "curl -sL \"https://github.com/mikefarah/yq/releases/latest/download/yq_linux_$(dpkg --print-architecture)\" -o /usr/bin/yq && chmod +x /usr/bin/yq")) for _, customType := range []string{"restic", "kopia", "rsync"} { if customType == "rsync" { - uploadSSHKeys(r) - installDebIfNotExists(r, "clickhouse", "openssh-client", "rsync", "jq") + uploadSSHKeys(r, "clickhouse-backup") + installDebIfNotExists(r, "clickhouse-backup", "openssh-client", "rsync", "jq") } if customType == "restic" { r.NoError(dockerExec("minio", "rm", "-rf", "/data/clickhouse/*")) - installDebIfNotExists(r, "clickhouse", "curl", "jq", "bzip2") - r.NoError(dockerExec("clickhouse", "bash", "-xec", "RELEASE_TAG=$(curl -H 'Accept: application/json' -sL https://github.com/restic/restic/releases/latest | jq -c -r -M '.tag_name'); RELEASE=$(echo ${RELEASE_TAG} | sed -e 's/v//'); curl -sfL \"https://github.com/restic/restic/releases/download/${RELEASE_TAG}/restic_${RELEASE}_linux_amd64.bz2\" | bzip2 -d > /bin/restic; chmod +x /bin/restic")) + installDebIfNotExists(r, "clickhouse-backup", "curl", "jq", "bzip2") + r.NoError(dockerExec("clickhouse-backup", "bash", "-xec", "RELEASE_TAG=$(curl -H 'Accept: application/json' -sL https://github.com/restic/restic/releases/latest | jq -c -r -M '.tag_name'); RELEASE=$(echo ${RELEASE_TAG} | sed -e 's/v//'); curl -sfL \"https://github.com/restic/restic/releases/download/${RELEASE_TAG}/restic_${RELEASE}_linux_amd64.bz2\" | bzip2 -d > /bin/restic; chmod +x /bin/restic")) } if customType == "kopia" { r.NoError(dockerExec("minio", "bash", "-ce", "rm -rfv /data/clickhouse/*")) - installDebIfNotExists(r, "clickhouse", "pgp", "curl") - r.NoError(dockerExec("clickhouse", "apt-get", "install", "-y", "ca-certificates")) - r.NoError(dockerExec("clickhouse", "update-ca-certificates")) - r.NoError(dockerExec("clickhouse", "bash", "-ce", "curl -sfL https://kopia.io/signing-key | gpg --dearmor -o /usr/share/keyrings/kopia-keyring.gpg")) - r.NoError(dockerExec("clickhouse", "bash", "-ce", "echo 'deb [signed-by=/usr/share/keyrings/kopia-keyring.gpg] https://packages.kopia.io/apt/ stable main' > /etc/apt/sources.list.d/kopia.list")) - installDebIfNotExists(r, "clickhouse", "kopia", "jq") + installDebIfNotExists(r, "clickhouse-backup", "pgp", "curl") + r.NoError(dockerExec("clickhouse-backup", "bash", "-ce", "curl -sfL https://kopia.io/signing-key | gpg --dearmor -o /usr/share/keyrings/kopia-keyring.gpg")) + r.NoError(dockerExec("clickhouse-backup", "bash", "-ce", "echo 'deb [signed-by=/usr/share/keyrings/kopia-keyring.gpg] https://packages.kopia.io/apt/ stable main' > /etc/apt/sources.list.d/kopia.list")) + installDebIfNotExists(r, "clickhouse-backup", "kopia", "jq") } - r.NoError(dockerCP("config-custom-"+customType+".yml", "clickhouse:/etc/clickhouse-backup/config.yml")) - r.NoError(dockerExec("clickhouse", "mkdir", "-pv", "/custom/"+customType)) - r.NoError(dockerCP("./"+customType+"/", "clickhouse:/custom/")) + r.NoError(dockerCP("config-custom-"+customType+".yml", "clickhouse-backup:/etc/clickhouse-backup/config.yml")) + r.NoError(dockerExec("clickhouse-backup", "mkdir", "-pv", "/custom/"+customType)) + r.NoError(dockerCP("./"+customType+"/", "clickhouse-backup:/custom/")) runMainIntegrationScenario(t, "CUSTOM") } } @@ -1242,15 +1245,15 @@ func TestIntegrationEmbedded(t *testing.T) { r := require.New(t) //CUSTOM backup create folder in each disk r.NoError(dockerExec("clickhouse", "rm", "-rfv", "/var/lib/clickhouse/disks/backups_s3/backup/")) - r.NoError(dockerCP("config-s3-embedded.yml", "clickhouse:/etc/clickhouse-backup/config.yml")) + r.NoError(dockerCP("config-s3-embedded.yml", "clickhouse-backup:/etc/clickhouse-backup/config.yml")) runMainIntegrationScenario(t, "EMBEDDED_S3") //@TODO uncomment when resolve slow azure BACKUP/RESTORE https://github.com/ClickHouse/ClickHouse/issues/52088 //r.NoError(dockerExec("clickhouse", "rm", "-rf", "/var/lib/clickhouse/disks/backups_azure/backup/")) - //r.NoError(dockerCP("config-azblob-embedded.yml", "clickhouse:/etc/clickhouse-backup/config.yml")) + //r.NoError(dockerCP("config-azblob-embedded.yml", "clickhouse-backup:/etc/clickhouse-backup/config.yml")) //runMainIntegrationScenario(t, "EMBEDDED_AZURE") //@TODO think about how to implements embedded backup for s3_plain disks //r.NoError(dockerExec("clickhouse", "rm", "-rf", "/var/lib/clickhouse/disks/backups_s3_plain/backup/")) - //r.NoError(dockerCP("config-s3-plain-embedded.yml", "clickhouse:/etc/clickhouse-backup/config.yml")) + //r.NoError(dockerCP("config-s3-plain-embedded.yml", "clickhouse-backup:/etc/clickhouse-backup/config.yml")) //runMainIntegrationScenario(t, "EMBEDDED_S3_PLAIN") } @@ -1261,35 +1264,35 @@ func TestLongListRemote(t *testing.T) { defer ch.chbackend.Close() totalCacheCount := 20 testBackupName := "test_list_remote" - err := dockerCP("config-s3.yml", "clickhouse:/etc/clickhouse-backup/config.yml") + err := dockerCP("config-s3.yml", "clickhouse-backup:/etc/clickhouse-backup/config.yml") r.NoError(err) for i := 0; i < totalCacheCount; i++ { - r.NoError(dockerExec("clickhouse", "bash", "-ce", fmt.Sprintf("ALLOW_EMPTY_BACKUPS=true clickhouse-backup create_remote %s_%d", testBackupName, i))) + r.NoError(dockerExec("clickhouse-backup", "bash", "-ce", fmt.Sprintf("ALLOW_EMPTY_BACKUPS=true clickhouse-backup create_remote %s_%d", testBackupName, i))) } - r.NoError(dockerExec("clickhouse", "rm", "-rfv", "/tmp/.clickhouse-backup-metadata.cache.S3")) + r.NoError(dockerExec("clickhouse-backup", "rm", "-rfv", "/tmp/.clickhouse-backup-metadata.cache.S3")) r.NoError(utils.ExecCmd(context.Background(), 180*time.Second, "docker-compose", "-f", os.Getenv("COMPOSE_FILE"), "restart", "minio")) time.Sleep(2 * time.Second) startFirst := time.Now() - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "list", "remote")) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "list", "remote")) noCacheDuration := time.Since(startFirst) - r.NoError(dockerExec("clickhouse", "chmod", "-Rv", "+r", "/tmp/.clickhouse-backup-metadata.cache.S3")) + r.NoError(dockerExec("clickhouse-backup", "chmod", "-Rv", "+r", "/tmp/.clickhouse-backup-metadata.cache.S3")) startCashed := time.Now() - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "list", "remote")) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "list", "remote")) cashedDuration := time.Since(startCashed) r.Greater(noCacheDuration, cashedDuration) - r.NoError(dockerExec("clickhouse", "rm", "-Rfv", "/tmp/.clickhouse-backup-metadata.cache.S3")) + r.NoError(dockerExec("clickhouse-backup", "rm", "-Rfv", "/tmp/.clickhouse-backup-metadata.cache.S3")) r.NoError(utils.ExecCmd(context.Background(), 180*time.Second, "docker-compose", "-f", os.Getenv("COMPOSE_FILE"), "restart", "minio")) time.Sleep(2 * time.Second) startCacheClear := time.Now() - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "list", "remote")) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "list", "remote")) cacheClearDuration := time.Since(startCacheClear) r.Greater(cacheClearDuration, cashedDuration) @@ -1304,7 +1307,7 @@ func TestLongListRemote(t *testing.T) { func TestRestoreDatabaseMapping(t *testing.T) { r := require.New(t) - r.NoError(dockerCP("config-database-mapping.yml", "clickhouse:/etc/clickhouse-backup/config.yml")) + r.NoError(dockerCP("config-database-mapping.yml", "clickhouse-backup:/etc/clickhouse-backup/config.yml")) ch := &TestClickHouse{} ch.connectWithWait(r, 500*time.Millisecond, 1*time.Second) defer ch.chbackend.Close() @@ -1334,10 +1337,10 @@ func TestRestoreDatabaseMapping(t *testing.T) { ch.queryWithNoError(r, "INSERT INTO database1.t1 SELECT '2022-01-01 00:00:00', number FROM numbers(10)") log.Info("Create backup") - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "create", testBackupName)) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "create", testBackupName)) log.Info("Restore schema") - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "restore", "--schema", "--rm", "--restore-database-mapping", "database1:database2", "--tables", "database1.*", testBackupName)) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "restore", "--schema", "--rm", "--restore-database-mapping", "database1:database2", "--tables", "database1.*", testBackupName)) log.Info("Check result database1") ch.queryWithNoError(r, "INSERT INTO database1.t1 SELECT '2023-01-01 00:00:00', number FROM numbers(10)") @@ -1356,7 +1359,7 @@ func TestRestoreDatabaseMapping(t *testing.T) { } log.Info("Restore data") - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "restore", "--data", "--restore-database-mapping", "database1:database2", "--tables", "database1.*", testBackupName)) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "restore", "--data", "--restore-database-mapping", "database1:database2", "--tables", "database1.*", testBackupName)) log.Info("Check result database2") checkRecordset(1, 10, "SELECT count() FROM database2.t1") @@ -1376,7 +1379,7 @@ func TestMySQLMaterialized(t *testing.T) { t.Skipf("MaterializedMySQL doens't support for clickhouse version %s", os.Getenv("CLICKHOUSE_VERSION")) } r := require.New(t) - r.NoError(dockerCP("config-s3.yml", "clickhouse:/etc/clickhouse-backup/config.yml")) + r.NoError(dockerCP("config-s3.yml", "clickhouse-backup:/etc/clickhouse-backup/config.yml")) r.NoError(dockerExec("mysql", "mysql", "-u", "root", "--password=root", "-v", "-e", "CREATE DATABASE ch_mysql_repl")) ch := &TestClickHouse{} ch.connectWithWait(r, 500*time.Millisecond, 1*time.Second) @@ -1389,16 +1392,16 @@ func TestMySQLMaterialized(t *testing.T) { r.NoError(dockerExec("mysql", "mysql", "-u", "root", "--password=root", "-v", "-e", "CREATE TABLE ch_mysql_repl.t1 (id BIGINT UNSIGNED NOT NULL AUTO_INCREMENT PRIMARY KEY, s VARCHAR(255)); INSERT INTO ch_mysql_repl.t1(s) VALUES('s1'),('s2'),('s3')")) time.Sleep(1 * time.Second) - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "create", "test_mysql_materialized")) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "create", "test_mysql_materialized")) ch.queryWithNoError(r, "DROP DATABASE ch_mysql_repl") - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "restore", "test_mysql_materialized")) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "restore", "test_mysql_materialized")) result := 0 r.NoError(ch.chbackend.SelectSingleRowNoCtx(&result, "SELECT count() FROM ch_mysql_repl.t1")) r.Equal(3, result, "expect count=3") ch.queryWithNoError(r, "DROP DATABASE ch_mysql_repl") - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "delete", "local", "test_mysql_materialized")) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", "test_mysql_materialized")) } func TestPostgreSQLMaterialized(t *testing.T) { @@ -1408,7 +1411,7 @@ func TestPostgreSQLMaterialized(t *testing.T) { t.Skipf("MaterializedPostgreSQL doens't support for clickhouse version %s", os.Getenv("CLICKHOUSE_VERSION")) } r := require.New(t) - r.NoError(dockerCP("config-s3.yml", "clickhouse:/etc/clickhouse-backup/config.yml")) + r.NoError(dockerCP("config-s3.yml", "clickhouse-backup:/etc/clickhouse-backup/config.yml")) r.NoError(dockerExec("pgsql", "bash", "-ce", "echo 'CREATE DATABASE ch_pgsql_repl' | PGPASSWORD=root psql -v ON_ERROR_STOP=1 -U root")) r.NoError(dockerExec("pgsql", "bash", "-ce", "echo \"CREATE TABLE t1 (id BIGINT PRIMARY KEY, s VARCHAR(255)); INSERT INTO t1(id, s) VALUES(1,'s1'),(2,'s2'),(3,'s3')\" | PGPASSWORD=root psql -v ON_ERROR_STOP=1 -U root -d ch_pgsql_repl")) ch := &TestClickHouse{} @@ -1421,22 +1424,22 @@ func TestPostgreSQLMaterialized(t *testing.T) { ) time.Sleep(1 * time.Second) - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "create", "test_pgsql_materialized")) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "create", "test_pgsql_materialized")) ch.queryWithNoError(r, "DROP DATABASE ch_pgsql_repl") - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "restore", "test_pgsql_materialized")) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "restore", "test_pgsql_materialized")) result := 0 r.NoError(ch.chbackend.SelectSingleRowNoCtx(&result, "SELECT count() FROM ch_pgsql_repl.t1")) r.Equal(3, result, "expect count=3") ch.queryWithNoError(r, "DROP DATABASE ch_pgsql_repl") - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "delete", "local", "test_pgsql_materialized")) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", "test_pgsql_materialized")) } -func uploadSSHKeys(r *require.Assertions) { - r.NoError(dockerCP("sftp/clickhouse-backup_rsa", "clickhouse:/id_rsa")) - r.NoError(dockerExec("clickhouse", "cp", "-vf", "/id_rsa", "/tmp/id_rsa")) - r.NoError(dockerExec("clickhouse", "chmod", "-v", "0600", "/tmp/id_rsa")) +func uploadSSHKeys(r *require.Assertions, container string) { + r.NoError(dockerCP("sftp/clickhouse-backup_rsa", container+":/id_rsa")) + r.NoError(dockerExec(container, "cp", "-vf", "/id_rsa", "/tmp/id_rsa")) + r.NoError(dockerExec(container, "chmod", "-v", "0600", "/tmp/id_rsa")) r.NoError(dockerCP("sftp/clickhouse-backup_rsa.pub", "sshd:/root/.ssh/authorized_keys")) r.NoError(dockerExec("sshd", "chown", "-v", "root:root", "/root/.ssh/authorized_keys")) @@ -1468,11 +1471,11 @@ func runMainIntegrationScenario(t *testing.T, remoteStorageType string) { r.NoError(dockerExec("minio", "mc", "ls", "local/clickhouse/disk_s3")) log.Info("Create backup") - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "create", testBackupName)) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "create", testBackupName)) generateIncrementTestData(ch, r) - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "create", incrementBackupName)) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "create", incrementBackupName)) log.Info("Upload") uploadCmd := fmt.Sprintf("%s_COMPRESSION_FORMAT=zstd clickhouse-backup upload --resume %s", remoteStorageType, testBackupName) @@ -1487,13 +1490,13 @@ func runMainIntegrationScenario(t *testing.T, remoteStorageType string) { if strings.HasPrefix(remoteStorageType, "EMBEDDED") { backupDir = "/var/lib/clickhouse/disks/backups" + strings.ToLower(strings.TrimPrefix(remoteStorageType, "EMBEDDED")) } - out, err = dockerExecOut("clickhouse", "ls", "-lha", backupDir) + out, err = dockerExecOut("clickhouse-backup", "ls", "-lha", backupDir) r.NoError(err) r.Equal(5, len(strings.Split(strings.Trim(out, " \t\r\n"), "\n")), "expect '2' backups exists in backup directory") log.Info("Delete backup") - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "delete", "local", testBackupName)) - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "delete", "local", incrementBackupName)) - out, err = dockerExecOut("clickhouse", "ls", "-lha", backupDir) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", testBackupName)) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", incrementBackupName)) + out, err = dockerExecOut("clickhouse-backup", "ls", "-lha", backupDir) r.NoError(err) r.Equal(3, len(strings.Split(strings.Trim(out, " \t\r\n"), "\n")), "expect '0' backup exists in backup directory") @@ -1504,13 +1507,13 @@ func runMainIntegrationScenario(t *testing.T, remoteStorageType string) { checkResumeAlreadyProcessed(downloadCmd, testBackupName, "download", r, remoteStorageType) log.Info("Restore schema") - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "restore", "--schema", testBackupName)) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "restore", "--schema", testBackupName)) log.Info("Restore data") - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "restore", "--data", testBackupName)) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "restore", "--data", testBackupName)) log.Info("Full restore with rm") - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "restore", "--rm", testBackupName)) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "restore", "--rm", testBackupName)) log.Info("Check data") for i := range testData { @@ -1527,14 +1530,14 @@ func runMainIntegrationScenario(t *testing.T, remoteStorageType string) { dropDatabasesFromTestDataDataSet(r, ch, databaseList) log.Info("Delete backup") - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "delete", "local", testBackupName)) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", testBackupName)) log.Info("Download increment") downloadCmd = fmt.Sprintf("clickhouse-backup download --resume %s", incrementBackupName) checkResumeAlreadyProcessed(downloadCmd, incrementBackupName, "download", r, remoteStorageType) log.Info("Restore") - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "restore", "--schema", "--data", incrementBackupName)) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "restore", "--schema", "--data", incrementBackupName)) log.Info("Check increment data") for i := range testData { @@ -1585,14 +1588,14 @@ func testBackupSpecifiedPartitions(r *require.Assertions, ch *TestClickHouse, re } // check create_remote full > download + partitions > delete local > download > restore --partitions > restore - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "create_remote", "--tables=default.t*", fullBackupName)) - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "delete", "local", fullBackupName)) - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "download", "--partitions=('2022-01-02'),('2022-01-03')", fullBackupName)) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "create_remote", "--tables=default.t*", fullBackupName)) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", fullBackupName)) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "download", "--partitions=('2022-01-02'),('2022-01-03')", fullBackupName)) fullBackupDir := "/var/lib/clickhouse/backup/" + fullBackupName + "/shadow/default/t?/default/" if strings.HasPrefix(remoteStorageType, "EMBEDDED") { fullBackupDir = "/var/lib/clickhouse/disks/backups" + strings.ToLower(strings.TrimPrefix(remoteStorageType, "EMBEDDED")) + "/" + fullBackupName + "/data/default/t?" } - out, err = dockerExecOut("clickhouse", "bash", "-c", "ls -la "+fullBackupDir+" | wc -l") + out, err = dockerExecOut("clickhouse-backup", "bash", "-c", "ls -la "+fullBackupDir+" | wc -l") r.NoError(err) expectedLines := "13" // custom storage doesn't support --partitions for upload / download now @@ -1601,53 +1604,53 @@ func testBackupSpecifiedPartitions(r *require.Assertions, ch *TestClickHouse, re expectedLines = "17" } r.Equal(expectedLines, strings.Trim(out, "\r\n\t ")) - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "delete", "local", fullBackupName)) - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "download", fullBackupName)) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", fullBackupName)) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "download", fullBackupName)) fullBackupDir = "/var/lib/clickhouse/backup/" + fullBackupName + "/shadow/default/t?/default/" if strings.HasPrefix(remoteStorageType, "EMBEDDED") { fullBackupDir = "/var/lib/clickhouse/disks/backups" + strings.ToLower(strings.TrimPrefix(remoteStorageType, "EMBEDDED")) + "/" + fullBackupName + "/data/default/t?" } - out, err = dockerExecOut("clickhouse", "bash", "-c", "ls -la "+fullBackupDir+"| wc -l") + out, err = dockerExecOut("clickhouse-backup", "bash", "-c", "ls -la "+fullBackupDir+"| wc -l") r.NoError(err) r.Equal("17", strings.Trim(out, "\r\n\t ")) - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "restore", "--partitions=('2022-01-02'),('2022-01-03')", fullBackupName)) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "restore", "--partitions=('2022-01-02'),('2022-01-03')", fullBackupName)) result = 0 r.NoError(ch.chbackend.SelectSingleRowNoCtx(&result, "SELECT sum(c) FROM (SELECT count() AS c FROM default.t1 UNION ALL SELECT count() AS c FROM default.t2)")) expectedCount = 40 r.Equal(expectedCount, result, fmt.Sprintf("expect count=%d", expectedCount)) - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "restore", fullBackupName)) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "restore", fullBackupName)) result = 0 r.NoError(ch.chbackend.SelectSingleRowNoCtx(&result, "SELECT sum(c) FROM (SELECT count() AS c FROM default.t1 UNION ALL SELECT count() AS c FROM default.t2)")) r.Equal(uint64(80), result, "expect count=80") - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "delete", "remote", fullBackupName)) - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "delete", "local", fullBackupName)) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "remote", fullBackupName)) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", fullBackupName)) // check create + partitions - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "create", "--tables=default.t1", "--partitions=20220102,20220103", partitionBackupName)) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "create", "--tables=default.t1", "--partitions=20220102,20220103", partitionBackupName)) partitionBackupDir := "/var/lib/clickhouse/backup/" + partitionBackupName + "/shadow/default/t1/default/" if strings.HasPrefix(remoteStorageType, "EMBEDDED") { partitionBackupDir = "/var/lib/clickhouse/disks/backups" + strings.ToLower(strings.TrimPrefix(remoteStorageType, "EMBEDDED")) + "/" + partitionBackupName + "/data/default/t1" } - out, err = dockerExecOut("clickhouse", "bash", "-c", "ls -la "+partitionBackupDir+"| wc -l") + out, err = dockerExecOut("clickhouse-backup", "bash", "-c", "ls -la "+partitionBackupDir+"| wc -l") r.NoError(err) r.Equal("5", strings.Trim(out, "\r\n\t ")) - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "delete", "local", partitionBackupName)) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", partitionBackupName)) // check create > upload + partitions - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "create", "--tables=default.t1", partitionBackupName)) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "create", "--tables=default.t1", partitionBackupName)) partitionBackupDir = "/var/lib/clickhouse/backup/" + partitionBackupName + "/shadow/default/t1/default/" if strings.HasPrefix(remoteStorageType, "EMBEDDED") { partitionBackupDir = "/var/lib/clickhouse/disks/backups" + strings.ToLower(strings.TrimPrefix(remoteStorageType, "EMBEDDED")) + "/" + partitionBackupName + "/data/default/t1" } - out, err = dockerExecOut("clickhouse", "bash", "-c", "ls -la "+partitionBackupDir+" | wc -l") + out, err = dockerExecOut("clickhouse-backup", "bash", "-c", "ls -la "+partitionBackupDir+" | wc -l") r.NoError(err) r.Equal("7", strings.Trim(out, "\r\n\t ")) - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "upload", "--tables=default.t1", "--partitions=20220102,20220103", partitionBackupName)) - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "delete", "local", partitionBackupName)) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "upload", "--tables=default.t1", "--partitions=20220102,20220103", partitionBackupName)) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", partitionBackupName)) // restore partial uploaded - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "restore_remote", partitionBackupName)) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "restore_remote", partitionBackupName)) // Check partial restored t1 result = 0 @@ -1671,8 +1674,8 @@ func testBackupSpecifiedPartitions(r *require.Assertions, ch *TestClickHouse, re r.Equal(expectedCount, result, "expect count=0") // DELETE backup. - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "delete", "remote", partitionBackupName)) - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "delete", "local", partitionBackupName)) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "remote", partitionBackupName)) + r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", partitionBackupName)) ch.queryWithNoError(r, "DROP TABLE IF EXISTS default.t1") ch.queryWithNoError(r, "DROP TABLE IF EXISTS default.t2") @@ -1687,7 +1690,7 @@ func checkResumeAlreadyProcessed(backupCmd, testBackupName, resumeKind string, r } else { backupCmd = fmt.Sprintf("%s; cat /var/lib/clickhouse/backup/%s/%s.state; %s", backupCmd, testBackupName, resumeKind, backupCmd) } - out, err := dockerExecOut("clickhouse", "bash", "-xce", backupCmd) + out, err := dockerExecOut("clickhouse-backup", "bash", "-xce", backupCmd) log.Info(out) r.NoError(err) if strings.Contains(backupCmd, "--resume") { @@ -1698,7 +1701,7 @@ func checkResumeAlreadyProcessed(backupCmd, testBackupName, resumeKind string, r func fullCleanup(r *require.Assertions, ch *TestClickHouse, backupNames, backupTypes, databaseList []string, checkDeleteErr, checkDropErr bool) { for _, backupName := range backupNames { for _, backupType := range backupTypes { - err := dockerExec("clickhouse", "clickhouse-backup", "delete", backupType, backupName) + err := dockerExec("clickhouse-backup", "clickhouse-backup", "delete", backupType, backupName) if checkDeleteErr { r.NoError(err) } @@ -1708,7 +1711,7 @@ func fullCleanup(r *require.Assertions, ch *TestClickHouse, backupNames, backupT if err == nil { for _, backupName := range strings.Split(otherBackupList, "\n") { if backupName != "" { - err := dockerExec("clickhouse", "clickhouse-backup", "delete", "local", backupName) + err := dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", backupName) if checkDropErr { r.NoError(err) } @@ -1819,20 +1822,20 @@ const apiBackupNumber = 5 func TestServerAPI(t *testing.T) { ch := &TestClickHouse{} r := require.New(t) - ch.connectWithWait(r, 0*time.Second, 1*time.Second) + ch.connectWithWait(r, 0*time.Second, 10*time.Second) defer func() { ch.chbackend.Close() }() - r.NoError(dockerCP("config-s3.yml", "clickhouse:/etc/clickhouse-backup/config.yml")) + r.NoError(dockerCP("config-s3.yml", "clickhouse-backup:/etc/clickhouse-backup/config.yml")) fieldTypes := []string{"UInt64", "String", "Int"} - installDebIfNotExists(r, "clickhouse", "curl") + installDebIfNotExists(r, "clickhouse-backup", "curl") maxTables := 10 minFields := 10 randFields := 10 fillDatabaseForAPIServer(maxTables, minFields, randFields, ch, r, fieldTypes) log.Info("Run `clickhouse-backup server --watch` in background") - r.NoError(dockerExec("-d", "clickhouse", "bash", "-ce", "clickhouse-backup server --watch &>>/tmp/clickhouse-backup-server.log")) + r.NoError(dockerExec("-d", "clickhouse-backup", "bash", "-ce", "clickhouse-backup server --watch &>>/tmp/clickhouse-backup-server.log")) time.Sleep(1 * time.Second) testAPIBackupCreate(r) @@ -1858,12 +1861,12 @@ func TestServerAPI(t *testing.T) { testAPIBackupDelete(r) - r.NoError(dockerExec("clickhouse", "pkill", "-n", "-f", "clickhouse-backup")) + r.NoError(dockerExec("clickhouse-backup", "pkill", "-n", "-f", "clickhouse-backup")) r.NoError(ch.dropDatabase("long_schema")) } func testAPIRestart(r *require.Assertions, ch *TestClickHouse) { - out, err := dockerExecOut("clickhouse", "bash", "-ce", "curl -sfL -XPOST 'http://localhost:7171/restart'") + out, err := dockerExecOut("clickhouse-backup", "bash", "-ce", "curl -sfL -XPOST 'http://localhost:7171/restart'") log.Debug(out) r.NoError(err) r.Contains(out, "acknowledged") @@ -1917,7 +1920,7 @@ func testAPIBackupActions(r *require.Assertions, ch *TestClickHouse) { r.NoError(ch.chbackend.SelectSingleRowNoCtx(&actionsBackups, "SELECT count() FROM system.backup_list WHERE name LIKE 'backup_action%'")) r.Equal(uint64(0), actionsBackups) - out, err := dockerExecOut("clickhouse", "curl", "http://localhost:7171/metrics") + out, err := dockerExecOut("clickhouse-backup", "curl", "http://localhost:7171/metrics") r.NoError(err) r.Contains(out, "clickhouse_backup_last_create_remote_status 1") r.Contains(out, "clickhouse_backup_last_create_status 1") @@ -1930,7 +1933,7 @@ func testAPIBackupActions(r *require.Assertions, ch *TestClickHouse) { func testAPIWatchAndKill(r *require.Assertions, ch *TestClickHouse) { log.Info("Check /backup/watch + /backup/kill") runKillCommand := func(command string) { - out, err := dockerExecOut("clickhouse", "bash", "-ce", fmt.Sprintf("curl -sfL 'http://localhost:7171/backup/kill?command=%s'", command)) + out, err := dockerExecOut("clickhouse-backup", "bash", "-ce", fmt.Sprintf("curl -sfL 'http://localhost:7171/backup/kill?command=%s'", command)) log.Debug(out) r.NoError(err) } @@ -1957,7 +1960,7 @@ func testAPIWatchAndKill(r *require.Assertions, ch *TestClickHouse) { runKillCommand("watch") checkCanceledCommand(1) - out, err := dockerExecOut("clickhouse", "bash", "-ce", "curl -sfL 'http://localhost:7171/backup/watch'") + out, err := dockerExecOut("clickhouse-backup", "bash", "-ce", "curl -sfL 'http://localhost:7171/backup/watch'") log.Debug(out) r.NoError(err) time.Sleep(7 * time.Second) @@ -1970,18 +1973,18 @@ func testAPIWatchAndKill(r *require.Assertions, ch *TestClickHouse) { func testAPIBackupDelete(r *require.Assertions) { log.Info("Check /backup/delete/{where}/{name}") for i := 1; i <= apiBackupNumber; i++ { - out, err := dockerExecOut("clickhouse", "bash", "-ce", fmt.Sprintf("curl -sfL -XPOST 'http://localhost:7171/backup/delete/local/z_backup_%d'", i)) + out, err := dockerExecOut("clickhouse-backup", "bash", "-ce", fmt.Sprintf("curl -sfL -XPOST 'http://localhost:7171/backup/delete/local/z_backup_%d'", i)) log.Infof(out) r.NoError(err) r.NotContains(out, "another operation is currently running") r.NotContains(out, "\"status\":\"error\"") - out, err = dockerExecOut("clickhouse", "bash", "-ce", fmt.Sprintf("curl -sfL -XPOST 'http://localhost:7171/backup/delete/remote/z_backup_%d'", i)) + out, err = dockerExecOut("clickhouse-backup", "bash", "-ce", fmt.Sprintf("curl -sfL -XPOST 'http://localhost:7171/backup/delete/remote/z_backup_%d'", i)) log.Infof(out) r.NoError(err) r.NotContains(out, "another operation is currently running") r.NotContains(out, "\"status\":\"error\"") } - out, err := dockerExecOut("clickhouse", "curl", "http://localhost:7171/metrics") + out, err := dockerExecOut("clickhouse-backup", "curl", "http://localhost:7171/metrics") r.NoError(err) r.Contains(out, "clickhouse_backup_last_delete_status 1") } @@ -2000,7 +2003,7 @@ func testAPIMetrics(r *require.Assertions, ch *TestClickHouse) { r.Greater(realTotalBytes, uint64(0)) r.Greater(uint64(lastRemoteSize), realTotalBytes) - out, err := dockerExecOut("clickhouse", "curl", "-sL", "http://localhost:7171/metrics") + out, err := dockerExecOut("clickhouse-backup", "curl", "-sL", "http://localhost:7171/metrics") log.Debug(out) r.NoError(err) r.Contains(out, fmt.Sprintf("clickhouse_backup_last_backup_size_remote %d", lastRemoteSize)) @@ -2016,7 +2019,7 @@ func testAPIMetrics(r *require.Assertions, ch *TestClickHouse) { func testAPIDeleteLocalDownloadRestore(r *require.Assertions) { log.Info("Check /backup/delete/local/{name} + /backup/download/{name} + /backup/restore/{name}?rm=1") out, err := dockerExecOut( - "clickhouse", + "clickhouse-backup", "bash", "-xe", "-c", fmt.Sprintf("for i in {1..%d}; do date; curl -sfL -XPOST \"http://localhost:7171/backup/delete/local/z_backup_$i\"; curl -sfL -XPOST \"http://localhost:7171/backup/download/z_backup_$i\"; sleep 2; curl -sfL -XPOST \"http://localhost:7171/backup/restore/z_backup_$i?rm=1\"; sleep 8; done", apiBackupNumber), ) @@ -2025,7 +2028,7 @@ func testAPIDeleteLocalDownloadRestore(r *require.Assertions) { r.NotContains(out, "another operation is currently running") r.NotContains(out, "\"status\":\"error\"") - out, err = dockerExecOut("clickhouse", "curl", "http://localhost:7171/metrics") + out, err = dockerExecOut("clickhouse-backup", "curl", "http://localhost:7171/metrics") r.NoError(err) r.Contains(out, "clickhouse_backup_last_delete_status 1") r.Contains(out, "clickhouse_backup_last_download_status 1") @@ -2034,7 +2037,7 @@ func testAPIDeleteLocalDownloadRestore(r *require.Assertions) { func testAPIBackupList(t *testing.T, r *require.Assertions) { log.Info("Check /backup/list") - out, err := dockerExecOut("clickhouse", "bash", "-ce", "curl -sfL 'http://localhost:7171/backup/list'") + out, err := dockerExecOut("clickhouse-backup", "bash", "-ce", "curl -sfL 'http://localhost:7171/backup/list'") log.Debug(out) r.NoError(err) for i := 1; i <= apiBackupNumber; i++ { @@ -2043,7 +2046,7 @@ func testAPIBackupList(t *testing.T, r *require.Assertions) { } log.Info("Check /backup/list/local") - out, err = dockerExecOut("clickhouse", "bash", "-ce", "curl -sfL 'http://localhost:7171/backup/list/local'") + out, err = dockerExecOut("clickhouse-backup", "bash", "-ce", "curl -sfL 'http://localhost:7171/backup/list/local'") log.Debug(out) r.NoError(err) for i := 1; i <= apiBackupNumber; i++ { @@ -2052,7 +2055,7 @@ func testAPIBackupList(t *testing.T, r *require.Assertions) { } log.Info("Check /backup/list/remote") - out, err = dockerExecOut("clickhouse", "bash", "-ce", "curl -sfL 'http://localhost:7171/backup/list/remote'") + out, err = dockerExecOut("clickhouse-backup", "bash", "-ce", "curl -sfL 'http://localhost:7171/backup/list/remote'") log.Debug(out) r.NoError(err) for i := 1; i <= apiBackupNumber; i++ { @@ -2064,7 +2067,7 @@ func testAPIBackupList(t *testing.T, r *require.Assertions) { func testAPIBackupUpload(r *require.Assertions) { log.Info("Check /backup/upload") out, err := dockerExecOut( - "clickhouse", + "clickhouse-backup", "bash", "-xe", "-c", fmt.Sprintf("for i in {1..%d}; do date; curl -sfL -XPOST \"http://localhost:7171/backup/upload/z_backup_$i\"; sleep 2; done", apiBackupNumber), ) @@ -2072,7 +2075,7 @@ func testAPIBackupUpload(r *require.Assertions) { r.NoError(err) r.NotContains(out, "\"status\":\"error\"") r.NotContains(out, "another operation is currently running") - out, err = dockerExecOut("clickhouse", "curl", "http://localhost:7171/metrics") + out, err = dockerExecOut("clickhouse-backup", "curl", "http://localhost:7171/metrics") r.NoError(err) r.Contains(out, "clickhouse_backup_last_upload_status 1") } @@ -2080,7 +2083,7 @@ func testAPIBackupUpload(r *require.Assertions) { func testAPIBackupTables(r *require.Assertions) { log.Info("Check /backup/tables") out, err := dockerExecOut( - "clickhouse", + "clickhouse-backup", "bash", "-xe", "-c", "curl -sfL \"http://localhost:7171/backup/tables\"", ) log.Debug(out) @@ -2095,7 +2098,7 @@ func testAPIBackupTables(r *require.Assertions) { log.Info("Check /backup/tables/all") out, err = dockerExecOut( - "clickhouse", + "clickhouse-backup", "bash", "-xe", "-c", "curl -sfL \"http://localhost:7171/backup/tables/all\"", ) log.Debug(out) @@ -2114,7 +2117,7 @@ func testAPIBackupTables(r *require.Assertions) { func testAPIBackupCreate(r *require.Assertions) { log.Info("Check /backup/create") out, err := dockerExecOut( - "clickhouse", + "clickhouse-backup", "bash", "-xe", "-c", fmt.Sprintf("sleep 3; for i in {1..%d}; do date; curl -sfL -XPOST \"http://localhost:7171/backup/create?table=long_schema.*&name=z_backup_$i\"; sleep 1.5; done", apiBackupNumber), ) @@ -2123,7 +2126,7 @@ func testAPIBackupCreate(r *require.Assertions) { r.NotContains(out, "Connection refused") r.NotContains(out, "another operation is currently running") r.NotContains(out, "\"status\":\"error\"") - out, err = dockerExecOut("clickhouse", "curl", "http://localhost:7171/metrics") + out, err = dockerExecOut("clickhouse-backup", "curl", "http://localhost:7171/metrics") r.NoError(err) r.Contains(out, "clickhouse_backup_last_create_status 1") @@ -2157,14 +2160,14 @@ func (ch *TestClickHouse) connectWithWait(r *require.Assertions, sleepBefore, ti err := ch.connect(timeOut.String()) if i == 10 { r.NoError(utils.ExecCmd(context.Background(), 180*time.Second, "docker", "logs", "clickhouse")) - out, dockerErr := dockerExecOut("clickhouse", "clickhouse-client", "--echo", "-q", "'SELECT version()'") + out, dockerErr := dockerExecOut("clickhouse", "clickhouse client", "--echo", "-q", "'SELECT version()'") r.NoError(dockerErr) ch.chbackend.Log.Debug(out) r.NoError(err) } if err != nil { r.NoError(utils.ExecCmd(context.Background(), 180*time.Second, "docker", "ps", "-a")) - if out, dockerErr := dockerExecOut("clickhouse", "clickhouse-client", "--echo", "-q", "SELECT version()"); dockerErr == nil { + if out, dockerErr := dockerExecOut("clickhouse", "clickhouse client", "--echo", "-q", "SELECT version()"); dockerErr == nil { log.Info(out) } else { log.Warn(out) diff --git a/test/integration/kopia/init.sh b/test/integration/kopia/init.sh index 7751189f..b2fc2ecb 100755 --- a/test/integration/kopia/init.sh +++ b/test/integration/kopia/init.sh @@ -7,5 +7,6 @@ export AWS_SECRET_ACCESS_KEY=it-is-my-super-secret-key export KOPIA_KEEP_LAST=7 export KOPIA_PASSWORD=kopia-repo-password export KOPIA_CHECK_FOR_UPDATES=false +export CLICKHOUSE_PARAMS="--host '$(yq '.clickhouse.host' /etc/clickhouse-backup/config.yml)' --port '$(yq '.clickhouse.port' /etc/clickhouse-backup/config.yml)' --user '$(yq '.clickhouse.username' /etc/clickhouse-backup/config.yml)' --password '$(yq '.clickhouse.password' /etc/clickhouse-backup/config.yml)'" kopia repository connect s3 --endpoint=${KOPIA_S3_ENDPOINT} --disable-tls --bucket=${KOPIA_S3_BUCKET} --access-key=${AWS_ACCESS_KEY_ID} --secret-access-key=${AWS_SECRET_ACCESS_KEY} || kopia repository create s3 --endpoint=${KOPIA_S3_ENDPOINT} --disable-tls --bucket=${KOPIA_S3_BUCKET} --access-key=${AWS_ACCESS_KEY_ID} --secret-access-key=${AWS_SECRET_ACCESS_KEY} kopia policy set --global --keep-latest=${KOPIA_KEEP_LAST} \ No newline at end of file diff --git a/test/integration/kopia/upload.sh b/test/integration/kopia/upload.sh index 4ca4197a..9b442dd3 100755 --- a/test/integration/kopia/upload.sh +++ b/test/integration/kopia/upload.sh @@ -5,7 +5,7 @@ source "${CUR_DIR}/init.sh" BACKUP_NAME=$1 DIFF_FROM_REMOTE=${2:-} DIFF_FROM_REMOTE_CMD="" -LOCAL_PATHS=$(clickhouse client -q "SELECT concat(trim(TRAILING '/' FROM path),'/backup/','${BACKUP_NAME}') FROM system.disks FORMAT TSVRaw" | awk '{printf("%s ",$0)} END { printf "\n" }' || clickhouse client -q "SELECT concat(replaceRegexpOne(metadata_path,'/metadata.*$',''),'/backup/','${BACKUP_NAME}') FROM system.tables WHERE database = 'system' AND metadata_path!='' LIMIT 1 FORMAT TSVRaw" | awk '{printf("%s ",$0)} END { printf "\n" }') +LOCAL_PATHS=$(eval "clickhouse client $CLICKHOUSE_PARAMS -q \"SELECT concat(trim(TRAILING '/' FROM path),'/backup/','${BACKUP_NAME}') FROM system.disks FORMAT TSVRaw\" | awk '{printf(\"%s \",\$0)} END { printf \"\n\" }' || clickhouse client $CLICKHOUSE_PARAMS -q \"SELECT concat(replaceRegexpOne(metadata_path,'/metadata.*$',''),'/backup/','${BACKUP_NAME}') FROM system.tables WHERE database = 'system' AND metadata_path!='' LIMIT 1 FORMAT TSVRaw\" | awk '{printf(\"%s \",\$0)} END { printf \"\n\" }'") if [[ "" != "${DIFF_FROM_REMOTE}" ]]; then # DIFF_FROM_REMOTE_CMD="--parent ${DIFF_FROM_REMOTE}" DIFF_FROM_REMOTE_CMD="" diff --git a/test/integration/restic/init.sh b/test/integration/restic/init.sh index e42d45df..ce540059 100755 --- a/test/integration/restic/init.sh +++ b/test/integration/restic/init.sh @@ -4,4 +4,5 @@ export RESTIC_REPOSITORY=s3:http://minio:9000/clickhouse/restic/cluster_name/sha export AWS_ACCESS_KEY_ID=access-key export AWS_SECRET_ACCESS_KEY=it-is-my-super-secret-key export RESTIC_KEEP_LAST=7 +export CLICKHOUSE_PARAMS="--host '$(yq '.clickhouse.host' /etc/clickhouse-backup/config.yml)' --port '$(yq '.clickhouse.port' /etc/clickhouse-backup/config.yml)' --user '$(yq '.clickhouse.username' /etc/clickhouse-backup/config.yml)' --password '$(yq '.clickhouse.password' /etc/clickhouse-backup/config.yml)'" restic cat config > /dev/null || restic init \ No newline at end of file diff --git a/test/integration/restic/upload.sh b/test/integration/restic/upload.sh index 86ea8213..5d430234 100755 --- a/test/integration/restic/upload.sh +++ b/test/integration/restic/upload.sh @@ -5,7 +5,7 @@ source "${CUR_DIR}/init.sh" BACKUP_NAME=$1 DIFF_FROM_REMOTE=${2:-} DIFF_FROM_REMOTE_CMD="" -LOCAL_PATHS=$(clickhouse client -q "SELECT concat(trim(TRAILING '/' FROM path),'/backup/','${BACKUP_NAME}') FROM system.disks FORMAT TSVRaw" | awk '{printf("%s ",$0)} END { printf "\n" }' || clickhouse client -q "SELECT concat(replaceRegexpOne(metadata_path,'/metadata/.*$|/store/.*$',''),'/backup/','${BACKUP_NAME}') FROM system.tables WHERE database = 'system' AND metadata_path!='' LIMIT 1 FORMAT TSVRaw" | awk '{printf("%s ",$0)} END { printf "\n" }') +LOCAL_PATHS=$(eval "clickhouse client $CLICKHOUSE_PARAMS -q \"SELECT concat(trim(TRAILING '/' FROM path),'/backup/','${BACKUP_NAME}') FROM system.disks FORMAT TSVRaw\" | awk '{printf(\"%s \",\$0)} END { printf \"\n\" }' || clickhouse client $CLICKHOUSE_PARAMS -q \"SELECT concat(replaceRegexpOne(metadata_path,'/metadata/.*$|/store/.*$',''),'/backup/','${BACKUP_NAME}') FROM system.tables WHERE database = 'system' AND metadata_path!='' LIMIT 1 FORMAT TSVRaw\" | awk '{printf(\"%s \",\$0)} END { printf \"\n\" }'") if [[ "" != "${DIFF_FROM_REMOTE}" ]]; then DIFF_FROM_REMOTE=$(${CUR_DIR}/list.sh | grep "${DIFF_FROM_REMOTE}" | jq -r -c '.snapshot_id') DIFF_FROM_REMOTE_CMD="--parent ${DIFF_FROM_REMOTE}" diff --git a/test/integration/rsync/delete.sh b/test/integration/rsync/delete.sh index 5456e555..d009c38f 100755 --- a/test/integration/rsync/delete.sh +++ b/test/integration/rsync/delete.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash set -xeuo pipefail CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" -source "${CUR_DIR}/settings.sh" +source "${CUR_DIR}/init.sh" BACKUP_NAME=$1 ssh -i "${BACKUP_SSH_KEY}" -o "StrictHostKeyChecking no" "${BACKUP_REMOTE_SERVER}" rm -rf "${BACKUP_REMOTE_DIR}/${BACKUP_NAME}" \ No newline at end of file diff --git a/test/integration/rsync/download.sh b/test/integration/rsync/download.sh index 10bf9b7b..35a8d07d 100755 --- a/test/integration/rsync/download.sh +++ b/test/integration/rsync/download.sh @@ -1,9 +1,9 @@ #!/usr/bin/env bash set -xeuo pipefail CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" -source "${CUR_DIR}/settings.sh" +source "${CUR_DIR}/init.sh" BACKUP_NAME=$1 -LOCAL_DISKS=$(clickhouse client -q "SELECT concat(name, ':', trim(TRAILING '/' FROM path)) FROM system.disks FORMAT TSVRaw" || clickhouse client -q "SELECT concat('default:',replaceRegexpOne(metadata_path,'/metadata.*$','')) FROM system.tables WHERE database = 'system' AND metadata_path!='' LIMIT 1 FORMAT TSVRaw") +LOCAL_DISKS=$(eval "clickhouse client $CLICKHOUSE_PARAMS -q \"SELECT concat(name, ':', trim(TRAILING '/' FROM path)) FROM system.disks FORMAT TSVRaw\" || clickhouse client $CLICKHOUSE_PARAMS -q \"SELECT concat('default:',replaceRegexpOne(metadata_path,'/metadata.*$','')) FROM system.tables WHERE database = 'system' AND metadata_path!='' LIMIT 1 FORMAT TSVRaw\"") for disk in $LOCAL_DISKS; do disk_name=$(echo $disk | cut -d ":" -f 1) disk_path=$(echo $disk | cut -d ":" -f 2) diff --git a/test/integration/rsync/init.sh b/test/integration/rsync/init.sh new file mode 100644 index 00000000..150b610b --- /dev/null +++ b/test/integration/rsync/init.sh @@ -0,0 +1,6 @@ +#!/usr/bin/env bash +export BACKUP_REMOTE_DIR="/root/rsync_backups/cluster/shard0" +export BACKUP_REMOTE_SERVER="root@sshd" +export BACKUP_SSH_KEY="/tmp/id_rsa" +export BACKUP_KEEP_TO_REMOTE=7 +export CLICKHOUSE_PARAMS="--host '$(yq '.clickhouse.host' /etc/clickhouse-backup/config.yml)' --port '$(yq '.clickhouse.port' /etc/clickhouse-backup/config.yml)' --user '$(yq '.clickhouse.username' /etc/clickhouse-backup/config.yml)' --password '$(yq '.clickhouse.password' /etc/clickhouse-backup/config.yml)'" \ No newline at end of file diff --git a/test/integration/rsync/list.sh b/test/integration/rsync/list.sh index e05cac76..5ba42433 100755 --- a/test/integration/rsync/list.sh +++ b/test/integration/rsync/list.sh @@ -2,7 +2,7 @@ set +x set -euo pipefail CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" -source "${CUR_DIR}/settings.sh" +source "${CUR_DIR}/init.sh" ssh -i "${BACKUP_SSH_KEY}" -o "StrictHostKeyChecking no" "${BACKUP_REMOTE_SERVER}" ls -d -1 "${BACKUP_REMOTE_DIR}/*" | while IFS= read -r backup_name ; do backup_name=${backup_name#"$BACKUP_REMOTE_DIR"} ssh -i "${BACKUP_SSH_KEY}" -o "StrictHostKeyChecking no" "${BACKUP_REMOTE_SERVER}" cat "${BACKUP_REMOTE_DIR}/${backup_name}/default/metadata.json" | jq -c -r -M '.' diff --git a/test/integration/rsync/settings.sh b/test/integration/rsync/settings.sh deleted file mode 100755 index 8b7ab07c..00000000 --- a/test/integration/rsync/settings.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/usr/bin/env bash -export BACKUP_REMOTE_DIR="/root/rsync_backups/cluster/shard0" -export BACKUP_REMOTE_SERVER="root@sshd" -export BACKUP_SSH_KEY="/tmp/id_rsa" -export BACKUP_KEEP_TO_REMOTE=7 \ No newline at end of file diff --git a/test/integration/rsync/upload.sh b/test/integration/rsync/upload.sh index cafa144e..64645bb7 100755 --- a/test/integration/rsync/upload.sh +++ b/test/integration/rsync/upload.sh @@ -1,11 +1,11 @@ #!/usr/bin/env bash set -xeuo pipefail CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" -source "${CUR_DIR}/settings.sh" +source "${CUR_DIR}/init.sh" BACKUP_NAME=$1 DIFF_FROM_REMOTE=${2:-} DIFF_FROM_REMOTE_CMD="" -LOCAL_DISKS=$(clickhouse client -q "SELECT concat(name, ':', trim(TRAILING '/' FROM path)) FROM system.disks FORMAT TSVRaw" || clickhouse client -q "SELECT concat('default:',replaceRegexpOne(metadata_path,'/metadata.*$','')) FROM system.tables WHERE database = 'system' AND metadata_path!='' LIMIT 1 FORMAT TSVRaw") +LOCAL_DISKS=$(eval "clickhouse client ${CLICKHOUSE_PARAMS} -q \"SELECT concat(name, ':', trim(TRAILING '/' FROM path)) FROM system.disks FORMAT TSVRaw\" || clickhouse client ${CLICKHOUSE_PARAMS} -q \"SELECT concat('default:',replaceRegexpOne(metadata_path,'/metadata.*$','')) FROM system.tables WHERE database = 'system' AND metadata_path!='' LIMIT 1 FORMAT TSVRaw\"") for disk in $LOCAL_DISKS; do disk_name=$(echo $disk | cut -d ":" -f 1) disk_path=$(echo $disk | cut -d ":" -f 2) diff --git a/test/integration/ssl.xml b/test/integration/ssl.xml index 299322f2..7b78dfe7 100644 --- a/test/integration/ssl.xml +++ b/test/integration/ssl.xml @@ -1,5 +1,5 @@ - 0.0.0.0 + 0.0.0.0 9440 diff --git a/test/testflows/clickhouse_backup/docker-compose/docker-compose.yml b/test/testflows/clickhouse_backup/docker-compose/docker-compose.yml index c156e27f..56d16951 100644 --- a/test/testflows/clickhouse_backup/docker-compose/docker-compose.yml +++ b/test/testflows/clickhouse_backup/docker-compose/docker-compose.yml @@ -165,7 +165,6 @@ services: sftp_server: image: panubo/sshd:1.5.0 - container_name: sftp_server hostname: sftp_server environment: SSH_ENABLE_ROOT: "true" @@ -180,7 +179,6 @@ services: minio: image: minio/minio:${MINIO_VERSION:-latest} - container_name: minio hostname: minio environment: MINIO_ACCESS_KEY: access-key diff --git a/test/testflows/clickhouse_backup/tests/snapshots/cli.py.cli.snapshot b/test/testflows/clickhouse_backup/tests/snapshots/cli.py.cli.snapshot index 9d345060..2086efa2 100644 --- a/test/testflows/clickhouse_backup/tests/snapshots/cli.py.cli.snapshot +++ b/test/testflows/clickhouse_backup/tests/snapshots/cli.py.cli.snapshot @@ -1,4 +1,4 @@ -default_config = r"""'[\'general:\', \' remote_storage: none\', \' disable_progress_bar: true\', \' backups_to_keep_local: 0\', \' backups_to_keep_remote: 0\', \' log_level: info\', \' allow_empty_backups: false\', \' use_resumable_state: true\', \' restore_schema_on_cluster: ""\', \' upload_by_part: true\', \' download_by_part: true\', \' restore_database_mapping: {}\', \' retries_on_failure: 3\', \' retries_pause: 30s\', \' watch_interval: 1h\', \' full_interval: 24h\', \' watch_backup_name_template: shard{shard}-{type}-{time:20060102150405}\', \' retriesduration: 100ms\', \' watchduration: 1h0m0s\', \' fullduration: 24h0m0s\', \'clickhouse:\', \' username: default\', \' password: ""\', \' host: localhost\', \' port: 9000\', \' disk_mapping: {}\', \' skip_tables:\', \' - system.*\', \' - INFORMATION_SCHEMA.*\', \' - information_schema.*\', \' - _temporary_and_external_tables.*\', \' skip_table_engines: []\', \' timeout: 5m\', \' freeze_by_part: false\', \' freeze_by_part_where: ""\', \' use_embedded_backup_restore: false\', \' embedded_backup_disk: ""\', \' backup_mutations: true\', \' restore_as_attach: false\', \' check_parts_columns: true\', \' secure: false\', \' skip_verify: false\', \' sync_replicated_tables: false\', \' log_sql_queries: true\', \' config_dir: /etc/clickhouse-server/\', \' restart_command: systemctl restart clickhouse-server\', \' ignore_not_exists_error_during_freeze: true\', \' check_replicas_before_attach: true\', \' tls_key: ""\', \' tls_cert: ""\', \' tls_ca: ""\', \' debug: false\', \'s3:\', \' access_key: ""\', \' secret_key: ""\', \' bucket: ""\', \' endpoint: ""\', \' region: us-east-1\', \' acl: private\', \' assume_role_arn: ""\', \' force_path_style: false\', \' path: ""\', \' object_disk_path: ""\', \' disable_ssl: false\', \' compression_level: 1\', \' compression_format: tar\', \' sse: ""\', \' sse_kms_key_id: ""\', \' sse_customer_algorithm: ""\', \' sse_customer_key: ""\', \' sse_customer_key_md5: ""\', \' sse_kms_encryption_context: ""\', \' disable_cert_verification: false\', \' use_custom_storage_class: false\', \' storage_class: STANDARD\', \' custom_storage_class_map: {}\', \' part_size: 0\', \' allow_multipart_download: false\', \' object_labels: {}\', \' debug: false\', \'gcs:\', \' credentials_file: ""\', \' credentials_json: ""\', \' credentials_json_encoded: ""\', \' bucket: ""\', \' path: ""\', \' object_disk_path: ""\', \' compression_level: 1\', \' compression_format: tar\', \' debug: false\', \' endpoint: ""\', \' storage_class: STANDARD\', \' object_labels: {}\', \' custom_storage_class_map: {}\', \'cos:\', \' url: ""\', \' timeout: 2m\', \' secret_id: ""\', \' secret_key: ""\', \' path: ""\', \' compression_format: tar\', \' compression_level: 1\', \' debug: false\', \'api:\', \' listen: localhost:7171\', \' enable_metrics: true\', \' enable_pprof: false\', \' username: ""\', \' password: ""\', \' secure: false\', \' certificate_file: ""\', \' private_key_file: ""\', \' ca_cert_file: ""\', \' ca_key_file: ""\', \' create_integration_tables: false\', \' integration_tables_host: ""\', \' allow_parallel: false\', \' complete_resumable_after_restart: true\', \'ftp:\', \' address: ""\', \' timeout: 2m\', \' username: ""\', \' password: ""\', \' tls: false\', \' path: ""\', \' object_disk_path: ""\', \' compression_format: tar\', \' compression_level: 1\', \' debug: false\', \'sftp:\', \' address: ""\', \' port: 22\', \' username: ""\', \' password: ""\', \' key: ""\', \' path: ""\', \' object_disk_path: ""\', \' compression_format: tar\', \' compression_level: 1\', \' debug: false\', \'azblob:\', \' endpoint_schema: https\', \' endpoint_suffix: core.windows.net\', \' account_name: ""\', \' account_key: ""\', \' sas: ""\', \' use_managed_identity: false\', \' container: ""\', \' path: ""\', \' object_disk_path: ""\', \' compression_level: 1\', \' compression_format: tar\', \' sse_key: ""\', \' buffer_size: 0\', \' buffer_count: 3\', \' timeout: 15m\', \'custom:\', \' upload_command: ""\', \' download_command: ""\', \' list_command: ""\', \' delete_command: ""\', \' command_timeout: 4h\', \' commandtimeoutduration: 4h0m0s\']'""" +default_config = r"""'[\'general:\', \' remote_storage: none\', \' disable_progress_bar: true\', \' backups_to_keep_local: 0\', \' backups_to_keep_remote: 0\', \' log_level: info\', \' allow_empty_backups: false\', \' use_resumable_state: true\', \' restore_schema_on_cluster: ""\', \' upload_by_part: true\', \' download_by_part: true\', \' restore_database_mapping: {}\', \' retries_on_failure: 3\', \' retries_pause: 30s\', \' watch_interval: 1h\', \' full_interval: 24h\', \' watch_backup_name_template: shard{shard}-{type}-{time:20060102150405}\', \' retriesduration: 100ms\', \' watchduration: 1h0m0s\', \' fullduration: 24h0m0s\', \'clickhouse:\', \' username: default\', \' password: ""\', \' host: localhost\', \' port: 9000\', \' disk_mapping: {}\', \' skip_tables:\', \' - system.*\', \' - INFORMATION_SCHEMA.*\', \' - information_schema.*\', \' - _temporary_and_external_tables.*\', \' skip_table_engines: []\', \' timeout: 5m\', \' freeze_by_part: false\', \' freeze_by_part_where: ""\', \' use_embedded_backup_restore: false\', \' embedded_backup_disk: ""\', \' backup_mutations: true\', \' restore_as_attach: false\', \' check_parts_columns: true\', \' secure: false\', \' skip_verify: false\', \' sync_replicated_tables: false\', \' log_sql_queries: true\', \' config_dir: /etc/clickhouse-server/\', \' restart_command: exec:systemctl restart clickhouse-server\', \' ignore_not_exists_error_during_freeze: true\', \' check_replicas_before_attach: true\', \' tls_key: ""\', \' tls_cert: ""\', \' tls_ca: ""\', \' debug: false\', \'s3:\', \' access_key: ""\', \' secret_key: ""\', \' bucket: ""\', \' endpoint: ""\', \' region: us-east-1\', \' acl: private\', \' assume_role_arn: ""\', \' force_path_style: false\', \' path: ""\', \' object_disk_path: ""\', \' disable_ssl: false\', \' compression_level: 1\', \' compression_format: tar\', \' sse: ""\', \' sse_kms_key_id: ""\', \' sse_customer_algorithm: ""\', \' sse_customer_key: ""\', \' sse_customer_key_md5: ""\', \' sse_kms_encryption_context: ""\', \' disable_cert_verification: false\', \' use_custom_storage_class: false\', \' storage_class: STANDARD\', \' custom_storage_class_map: {}\', \' part_size: 0\', \' allow_multipart_download: false\', \' object_labels: {}\', \' debug: false\', \'gcs:\', \' credentials_file: ""\', \' credentials_json: ""\', \' credentials_json_encoded: ""\', \' bucket: ""\', \' path: ""\', \' object_disk_path: ""\', \' compression_level: 1\', \' compression_format: tar\', \' debug: false\', \' endpoint: ""\', \' storage_class: STANDARD\', \' object_labels: {}\', \' custom_storage_class_map: {}\', \'cos:\', \' url: ""\', \' timeout: 2m\', \' secret_id: ""\', \' secret_key: ""\', \' path: ""\', \' compression_format: tar\', \' compression_level: 1\', \' debug: false\', \'api:\', \' listen: localhost:7171\', \' enable_metrics: true\', \' enable_pprof: false\', \' username: ""\', \' password: ""\', \' secure: false\', \' certificate_file: ""\', \' private_key_file: ""\', \' ca_cert_file: ""\', \' ca_key_file: ""\', \' create_integration_tables: false\', \' integration_tables_host: ""\', \' allow_parallel: false\', \' complete_resumable_after_restart: true\', \'ftp:\', \' address: ""\', \' timeout: 2m\', \' username: ""\', \' password: ""\', \' tls: false\', \' path: ""\', \' object_disk_path: ""\', \' compression_format: tar\', \' compression_level: 1\', \' debug: false\', \'sftp:\', \' address: ""\', \' port: 22\', \' username: ""\', \' password: ""\', \' key: ""\', \' path: ""\', \' object_disk_path: ""\', \' compression_format: tar\', \' compression_level: 1\', \' debug: false\', \'azblob:\', \' endpoint_schema: https\', \' endpoint_suffix: core.windows.net\', \' account_name: ""\', \' account_key: ""\', \' sas: ""\', \' use_managed_identity: false\', \' container: ""\', \' path: ""\', \' object_disk_path: ""\', \' compression_level: 1\', \' compression_format: tar\', \' sse_key: ""\', \' buffer_size: 0\', \' buffer_count: 3\', \' timeout: 15m\', \'custom:\', \' upload_command: ""\', \' download_command: ""\', \' list_command: ""\', \' delete_command: ""\', \' command_timeout: 4h\', \' commandtimeoutduration: 4h0m0s\']'""" help_flag = r"""'NAME:\n clickhouse-backup - Tool for easy backup of ClickHouse with cloud supportUSAGE:\n clickhouse-backup [-t, --tables=.
] DESCRIPTION:\n Run as \'root\' or \'clickhouse\' userCOMMANDS:\n tables List of tables, exclude skip_tables\n create Create new backup\n create_remote Create and upload new backup\n upload Upload backup to remote storage\n list List of backups\n download Download backup from remote storage\n restore Create schema and restore data from backup\n restore_remote Download and restore\n delete Delete specific backup\n default-config Print default config\n print-config Print current config merged with environment variables\n clean Remove data in \'shadow\' folder from all \'path\' folders available from \'system.disks\'\n clean_remote_broken Remove all broken remote backups\n watch Run infinite loop which create full + incremental backup sequence to allow efficient backup sequences\n server Run API server\n help, h Shows a list of commands or help for one commandGLOBAL OPTIONS:\n --config value, -c value Config \'FILE\' name. (default: "/etc/clickhouse-backup/config.yml") [$CLICKHOUSE_BACKUP_CONFIG]\n --help, -h show help\n --version, -v print the version'"""