Skip to content

Commit e22d174

Browse files
committed
change S3_MAX_PARTS_COUNT default value from 256 to 2000 to fix memory usage for s3 which increased for 2.4.16+
1 parent f2edb85 commit e22d174

File tree

4 files changed

+14
-11
lines changed

4 files changed

+14
-11
lines changed

ChangeLog.md

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,7 @@
1+
# v2.4.22
2+
BUG FIXES
3+
- change `S3_MAX_PARTS_COUNT` default value from `256` to `2000` to fix memory usage for s3 which increased for 2.4.16+
4+
15
# v2.4.21
26
BUG FIXES
37
- refactoring execution UpdateBackupMetrics, to avoid context canceled error, fix [814](https://github.com/Altinity/clickhouse-backup/issues/814)
@@ -24,7 +28,7 @@ BUG FIXES
2428
# v2.4.16
2529
BUG FIXES
2630
- increase `AZBLOB_TIMEOUT` to 4h, instead 15m to allow download long size data parts
27-
- change `S3_MAX_PARTS_COUNT` from `5000` to `1000` and minimal `S3_PART_SIZE` from 5Mb to 25Mb from by default to improve speedup S3 uploading / downloading
31+
- change `S3_MAX_PARTS_COUNT` default from `5000` to `256` and minimal `S3_PART_SIZE` from 5Mb to 25Mb from by default to improve speedup S3 uploading / downloading
2832

2933
# v2.4.15
3034
BUG FIXES

pkg/config/config.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -558,7 +558,7 @@ func DefaultConfig() *Config {
558558
StorageClass: string(s3types.StorageClassStandard),
559559
Concurrency: int(downloadConcurrency + 1),
560560
PartSize: 0,
561-
MaxPartsCount: 256,
561+
MaxPartsCount: 2000,
562562
},
563563
GCS: GCSConfig{
564564
CompressionLevel: 1,

pkg/storage/gcs.go

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -103,15 +103,14 @@ func (gcs *GCS) Connect(ctx context.Context) error {
103103

104104
if gcs.Config.ForceHttp {
105105
customTransport := &http.Transport{
106-
WriteBufferSize: 8388608,
106+
WriteBufferSize: 128 * 1024,
107107
Proxy: http.ProxyFromEnvironment,
108108
DialContext: (&net.Dialer{
109109
Timeout: 30 * time.Second,
110110
KeepAlive: 30 * time.Second,
111111
}).DialContext,
112-
ForceAttemptHTTP2: false,
113-
MaxIdleConns: 100,
114-
MaxIdleConnsPerHost: 100,
112+
MaxIdleConns: 1,
113+
MaxIdleConnsPerHost: 1,
115114
IdleConnTimeout: 90 * time.Second,
116115
TLSHandshakeTimeout: 10 * time.Second,
117116
ExpectContinueTimeout: 1 * time.Second,
@@ -130,9 +129,9 @@ func (gcs *GCS) Connect(ctx context.Context) error {
130129
}
131130
clientOptions = append(clientOptions, internaloption.WithDefaultEndpoint(endpoint))
132131

133-
customRountripper := &rewriteTransport{base: customTransport}
132+
customRoundTripper := &rewriteTransport{base: customTransport}
134133
gcpTransport, _, err := googleHTTPTransport.NewClient(ctx, clientOptions...)
135-
transport, err := googleHTTPTransport.NewTransport(ctx, customRountripper, clientOptions...)
134+
transport, err := googleHTTPTransport.NewTransport(ctx, customRoundTripper, clientOptions...)
136135
gcpTransport.Transport = transport
137136
if err != nil {
138137
return fmt.Errorf("failed to create GCP transport: %v", err)

pkg/storage/general.go

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ import (
3131

3232
const (
3333
// BufferSize - size of ring buffer between stream handlers
34-
BufferSize = 512 * 1024
34+
BufferSize = 128 * 1024
3535
)
3636

3737
type readerWrapperForContext func(p []byte) (n int, err error)
@@ -635,7 +635,7 @@ func NewBackupDestination(ctx context.Context, cfg *config.Config, ch *clickhous
635635
if bufferSize <= 0 {
636636
bufferSize = int(cfg.General.MaxFileSize) / cfg.AzureBlob.MaxPartsCount
637637
if int(cfg.General.MaxFileSize)%cfg.AzureBlob.MaxPartsCount > 0 {
638-
bufferSize++
638+
bufferSize += int(cfg.General.MaxFileSize) % cfg.AzureBlob.MaxPartsCount
639639
}
640640
if bufferSize < 2*1024*1024 {
641641
bufferSize = 2 * 1024 * 1024
@@ -669,7 +669,7 @@ func NewBackupDestination(ctx context.Context, cfg *config.Config, ch *clickhous
669669
s3Storage := &S3{
670670
Config: &cfg.S3,
671671
Concurrency: cfg.S3.Concurrency,
672-
BufferSize: 512 * 1024,
672+
BufferSize: 128 * 1024,
673673
PartSize: partSize,
674674
Log: log.WithField("logger", "S3"),
675675
}

0 commit comments

Comments
 (0)