diff --git a/pkg/checksum/checker.go b/pkg/checksum/checker.go index cce4b14d..bcd2477c 100644 --- a/pkg/checksum/checker.go +++ b/pkg/checksum/checker.go @@ -21,8 +21,6 @@ import ( "golang.org/x/sync/errgroup" ) -var binlogFlushIntervalDuringChecksum = 30 * time.Second - type Checker struct { sync.Mutex table *table.TableInfo @@ -209,7 +207,7 @@ func (c *Checker) Run(ctx context.Context) error { // - If they are there, they will take a huge amount of time to flush // - The memory requirements for 1MM deltas seems reasonable, but for a multi-day // checksum it is reasonable to assume it may exceed this. - go c.feed.StartPeriodicFlush(ctx, binlogFlushIntervalDuringChecksum) + go c.feed.StartPeriodicFlush(ctx, repl.DefaultFlushInterval) defer c.feed.StopPeriodicFlush() g, errGrpCtx := errgroup.WithContext(ctx) diff --git a/pkg/migration/runner.go b/pkg/migration/runner.go index 3dc38514..a6718a4b 100644 --- a/pkg/migration/runner.go +++ b/pkg/migration/runner.go @@ -43,10 +43,6 @@ var ( checkpointDumpInterval = 50 * time.Second tableStatUpdateInterval = 5 * time.Minute statusInterval = 30 * time.Second - // binlogPerodicFlushInterval is the time that the client will flush all binlog changes to disk. - // Longer values require more memory, but permit more merging. - // I expect we will change this to 1hr-24hr in the future. - binlogPerodicFlushInterval = 30 * time.Second ) func (s migrationState) String() string { @@ -429,7 +425,7 @@ func (r *Runner) setup(ctx context.Context) error { // Continuously update the min/max and estimated rows // and to flush the binary log position periodically. go r.table.AutoUpdateStatistics(ctx, tableStatUpdateInterval, r.logger) - go r.replClient.StartPeriodicFlush(ctx, binlogPerodicFlushInterval) + go r.replClient.StartPeriodicFlush(ctx, repl.DefaultFlushInterval) return nil } diff --git a/pkg/repl/client.go b/pkg/repl/client.go index d3f741af..d9d41264 100644 --- a/pkg/repl/client.go +++ b/pkg/repl/client.go @@ -33,6 +33,10 @@ const ( // multiple-flush-threads, which should help it group commit and still be fast. DefaultBatchSize = 1000 flushThreads = 16 + // DefaultFlushInterval is the time that the client will flush all binlog changes to disk. + // Longer values require more memory, but permit more merging. + // I expect we will change this to 1hr-24hr in the future. + DefaultFlushInterval = 30 * time.Second ) type queuedChange struct {