Skip to content

Commit 7f316b6

Browse files
committed
Add support for s3 GLACIER storage class, when GET return error, then, it requires 5 minutes per key and restore could be slow. Use GLACIER_IR, it looks more robust, fix #614
1 parent 65af682 commit 7f316b6

File tree

6 files changed

+70
-7
lines changed

6 files changed

+70
-7
lines changed

ChangeLog.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@ IMPROVEMENTS
66
- Backup/Restore RBAC related objects from Zookeeper via direct connection to zookeeper/keeper, fix [604](https://github.com/Altinity/clickhouse-backup/issues/604)
77
- Add `SHARDED_OPERATION_MODE` option, to easy create backup for sharded cluster, available values `none` (no sharding), `table` (table granularity), `database` (database granularity), `first-replica` (on the lexicographically sorted first active replica), thanks @mskwon, fix [639](https://github.com/Altinity/clickhouse-backup/issues/639), fix [648](https://github.com/Altinity/clickhouse-backup/pull/648)
88
- Add support for `compression_format: none` for upload and download backups created with `--rbac` / `--rbac-only` or `--configs` / `--configs-only` options, fix [713](https://github.com/Altinity/clickhouse-backup/issues/713)
9+
- Add support for s3 `GLACIER` storage class, when GET return error, then, it requires 5 minutes per key and restore could be slow. Use `GLACIER_IR`, it looks more robust, fix [614](https://github.com/Altinity/clickhouse-backup/issues/614)
910

1011
BUG FIXES
1112
- fix possible create backup failures during UNFREEZE not exists tables, affected 2.2.7+ version, fix [704](https://github.com/Altinity/clickhouse-backup/issues/704)

pkg/storage/s3.go

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -144,7 +144,7 @@ func (s *S3) Connect(ctx context.Context) error {
144144

145145
if s.Config.Debug {
146146
awsConfig.Logger = newS3Logger(s.Log)
147-
awsConfig.ClientLogMode = aws.LogRetries | aws.LogRequest | aws.LogResponse
147+
awsConfig.ClientLogMode = aws.LogRetries | aws.LogRequest | aws.LogResponseWithBody
148148
}
149149

150150
httpTransport := http.DefaultTransport
@@ -630,7 +630,8 @@ func (s *S3) restoreObject(ctx context.Context, key string) error {
630630
}
631631

632632
if res.Restore != nil && *res.Restore == "ongoing-request=\"true\"" {
633-
s.Log.Debugf("%s still not restored, will wait %d seconds", key, i*5)
633+
i += 1
634+
s.Log.Warnf("%s still not restored, will wait %d seconds", key, i*5)
634635
time.Sleep(time.Duration(i*5) * time.Second)
635636
} else {
636637
return nil

test/integration/config-s3-fips.yml

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,6 @@ s3:
3535
compression_format: tar
3636
allow_multipart_download: true
3737
concurrency: 3
38-
storage_class: GLACIER
3938
api:
4039
listen: :7171
4140
create_integration_tables: true
Lines changed: 44 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,44 @@
1+
general:
2+
disable_progress_bar: true
3+
remote_storage: s3
4+
upload_concurrency: 4
5+
download_concurrency: 4
6+
skip_tables:
7+
- " system.*"
8+
- "INFORMATION_SCHEMA.*"
9+
- "information_schema.*"
10+
- "_temporary_and_external_tables.*"
11+
restore_schema_on_cluster: "{cluster}"
12+
clickhouse:
13+
host: clickhouse
14+
port: 9440
15+
username: backup
16+
password: meow=& 123?*%# МЯУ
17+
secure: true
18+
skip_verify: true
19+
sync_replicated_tables: true
20+
timeout: 1h
21+
restart_command: bash -c 'echo "FAKE RESTART"'
22+
backup_mutations: true
23+
# secrets for `FISP` will provide from `.env` or from GitHub actions secrets
24+
s3:
25+
access_key: ${QA_AWS_ACCESS_KEY}
26+
secret_key: ${QA_AWS_SECRET_KEY}
27+
bucket: ${QA_AWS_BUCKET}
28+
# endpoint: https://${QA_AWS_BUCKET}.s3-fips.${QA_AWS_REGION}.amazonaws.com/
29+
region: ${QA_AWS_REGION}
30+
acl: private
31+
force_path_style: false
32+
path: backup/{cluster}/{shard}
33+
object_disk_path: object_disks/{cluster}/{shard}
34+
disable_ssl: false
35+
compression_format: tar
36+
allow_multipart_download: false
37+
concurrency: 4
38+
# storage_class: GLACIER, 6000 seconds test execution
39+
storage_class: GLACIER_IR
40+
api:
41+
listen: :7171
42+
create_integration_tables: true
43+
integration_tables_host: "localhost"
44+
allow_parallel: false

test/integration/integration_test.go

Lines changed: 19 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -952,7 +952,7 @@ func TestFIPS(t *testing.T) {
952952
r.NoError(dockerExec("clickhouse", "bash", "-xce", "openssl req -subj \"/CN=localhost\" -addext \"subjectAltName = DNS:localhost,DNS:*.cluster.local\" -new -key /etc/clickhouse-backup/server-key.pem -out /etc/clickhouse-backup/server-req.csr"))
953953
r.NoError(dockerExec("clickhouse", "bash", "-xce", "openssl x509 -req -days 365000 -extensions SAN -extfile <(printf \"\\n[SAN]\\nsubjectAltName=DNS:localhost,DNS:*.cluster.local\") -in /etc/clickhouse-backup/server-req.csr -out /etc/clickhouse-backup/server-cert.pem -CA /etc/clickhouse-backup/ca-cert.pem -CAkey /etc/clickhouse-backup/ca-key.pem -CAcreateserial"))
954954
}
955-
r.NoError(dockerExec("clickhouse", "bash", "-c", "cat /etc/clickhouse-backup/config.yml.fips-template | envsubst > /etc/clickhouse-backup/config.yml"))
955+
r.NoError(dockerExec("clickhouse", "bash", "-xec", "cat /etc/clickhouse-backup/config.yml.fips-template | envsubst > /etc/clickhouse-backup/config.yml"))
956956

957957
generateCerts("rsa", "4096", "")
958958
createSQL := "CREATE TABLE default.fips_table (v UInt64) ENGINE=MergeTree() ORDER BY tuple()"
@@ -1158,6 +1158,20 @@ func TestDoRestoreConfigs(t *testing.T) {
11581158
ch.chbackend.Close()
11591159
}
11601160

1161+
func TestIntegrationS3Glacier(t *testing.T) {
1162+
if isTestShouldSkip("GLACIER_TESTS") {
1163+
t.Skip("Skipping GLACIER integration tests...")
1164+
return
1165+
}
1166+
r := require.New(t)
1167+
r.NoError(dockerCP("config-s3-glacier.yml", "clickhouse-backup:/etc/clickhouse-backup/config.yml.s3glacier-template"))
1168+
installDebIfNotExists(r, "clickhouse-backup", "curl", "gettext-base", "bsdmainutils", "dnsutils", "git", "ca-certificates")
1169+
r.NoError(dockerExec("clickhouse-backup", "bash", "-xec", "cat /etc/clickhouse-backup/config.yml.s3glacier-template | envsubst > /etc/clickhouse-backup/config.yml"))
1170+
dockerExecTimeout = 60 * time.Minute
1171+
runMainIntegrationScenario(t, "GLACIER")
1172+
dockerExecTimeout = 3 * time.Minute
1173+
}
1174+
11611175
func TestIntegrationS3(t *testing.T) {
11621176
r := require.New(t)
11631177
r.NoError(dockerCP("config-s3.yml", "clickhouse-backup:/etc/clickhouse-backup/config.yml"))
@@ -2412,6 +2426,8 @@ func (ch *TestClickHouse) queryWithNoError(r *require.Assertions, query string,
24122426
r.NoError(err)
24132427
}
24142428

2429+
var dockerExecTimeout = 180 * time.Second
2430+
24152431
func dockerExec(container string, cmd ...string) error {
24162432
out, err := dockerExecOut(container, cmd...)
24172433
log.Info(out)
@@ -2421,7 +2437,7 @@ func dockerExec(container string, cmd ...string) error {
24212437
func dockerExecOut(container string, cmd ...string) (string, error) {
24222438
dcmd := []string{"exec", container}
24232439
dcmd = append(dcmd, cmd...)
2424-
return utils.ExecCmdOut(context.Background(), 180*time.Second, "docker", dcmd...)
2440+
return utils.ExecCmdOut(context.Background(), dockerExecTimeout, "docker", dcmd...)
24252441
}
24262442

24272443
func dockerCP(src, dst string) error {
@@ -2490,7 +2506,7 @@ func installDebIfNotExists(r *require.Assertions, container string, pkgs ...stri
24902506
container,
24912507
"bash", "-xec",
24922508
fmt.Sprintf(
2493-
"export DEBIAN_FRONTEND=noniteractive; if [[ '%d' != $(dpkg -l | grep -c -E \"%s\" ) ]]; then rm -fv /etc/apt/sources.list.d/clickhouse.list; find /etc/apt/ -type f -exec sed -i 's/ru.archive.ubuntu.com/archive.ubuntu.com/g' {} +; apt-get -y update; apt-get install --no-install-recommends -y %s; fi",
2509+
"export DEBIAN_FRONTEND=noniteractive; if [[ '%d' != $(dpkg -l | grep -c -E \"%s\" ) ]]; then rm -fv /etc/apt/sources.list.d/clickhouse.list; find /etc/apt/ -type f -name *.list -exec sed -i 's/ru.archive.ubuntu.com/archive.ubuntu.com/g' {} +; apt-get -y update; apt-get install --no-install-recommends -y %s; fi",
24942510
len(pkgs), "^ii\\s+"+strings.Join(pkgs, "|^ii\\s+"), strings.Join(pkgs, " "),
24952511
),
24962512
))

test/integration/run.sh

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,8 @@ else
2323
export GCS_TESTS=${GCS_TESTS:-}
2424
fi
2525

26+
export GLACIER_TESTS=${GLACIER_TESTS:-}
27+
2628
export AZURE_TESTS=${AZURE_TESTS:-1}
2729
export RUN_ADVANCED_TESTS=${RUN_ADVANCED_TESTS:-1}
2830
export S3_DEBUG=${S3_DEBUG:-false}
@@ -44,5 +46,5 @@ make clean build-race-docker build-race-fips-docker
4446
docker-compose -f ${CUR_DIR}/${COMPOSE_FILE} up -d
4547
docker-compose -f ${CUR_DIR}/${COMPOSE_FILE} exec minio mc alias list
4648

47-
go test -timeout 30m -failfast -tags=integration -run "${RUN_TESTS:-.+}" -v ${CUR_DIR}/integration_test.go
49+
go test -timeout ${TESTS_TIMEOUT:-30m} -failfast -tags=integration -run "${RUN_TESTS:-.+}" -v ${CUR_DIR}/integration_test.go
4850
go tool covdata textfmt -i "${CUR_DIR}/_coverage_/" -o "${CUR_DIR}/_coverage_/coverage.out"

0 commit comments

Comments
 (0)