forked from pingcap/tidb-binlog
-
Notifications
You must be signed in to change notification settings - Fork 0
/
drainer.toml
107 lines (82 loc) · 3.29 KB
/
drainer.toml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
# drainer Configuration.
# addr (i.e. 'host:port') to listen on for drainer connections
# will register this addr into etcd
addr = "127.0.0.1:8249"
# addr(i.e. 'host:port') to advertise to the public
advertise-addr = ""
# the interval time (in seconds) of detect pumps' status
detect-interval = 10
# drainer meta data directory path
data-dir = "data.drainer"
# a comma separated list of PD endpoints
pd-urls = "http://127.0.0.1:2379"
# Use the specified compressor to compress payload between pump and drainer
compressor = ""
#[security]
# Path of file that contains list of trusted SSL CAs for connection with cluster components.
# ssl-ca = "/path/to/ca.pem"
# Path of file that contains X509 certificate in PEM format for connection with cluster components.
# ssl-cert = "/path/to/pump.pem"
# Path of file that contains X509 key in PEM format for connection with cluster components.
# ssl-key = "/path/to/pump-key.pem"
# syncer Configuration.
[syncer]
# Assume the upstream sql-mode.
# If this is setted , will use the same sql-mode to parse DDL statment, and set the same sql-mode at downstream when db-type is mysql.
# If this is not setted, it will not set any sql-mode.
# sql-mode = "STRICT_TRANS_TABLES,NO_ENGINE_SUBSTITUTION"
# number of binlog events in a transaction batch
txn-batch = 20
# work count to execute binlogs
# if the latency between drainer and downstream(mysql or tidb) are too high, you might want to increase this
# to get higher throughput by higher concurrent write to the downstream
worker-count = 16
disable-dispatch = false
# safe mode will split update to delete and insert
safe-mode = false
# downstream storage, equal to --dest-db-type
# valid values are "mysql", "file", "tidb", "flash", "kafka"
db-type = "mysql"
# ignore syncing the txn with specified commit ts to downstream
ignore-txn-commit-ts = []
# disable sync these schema
ignore-schemas = "INFORMATION_SCHEMA,PERFORMANCE_SCHEMA,mysql"
##replicate-do-db priority over replicate-do-table if have same db name
##and we support regex expression , start with '~' declare use regex expression.
#
#replicate-do-db = ["~^b.*","s1"]
#[[syncer.replicate-do-table]]
#db-name ="test"
#tbl-name = "log"
#[[syncer.replicate-do-table]]
#db-name ="test"
#tbl-name = "~^a.*"
# disable sync these table
#[[syncer.ignore-table]]
#db-name = "test"
#tbl-name = "log"
# the downstream mysql protocol database
[syncer.to]
host = "127.0.0.1"
user = "root"
password = ""
port = 3306
[syncer.to.checkpoint]
# you can uncomment this to change the database to save checkpoint when the downstream is mysql or tidb
#schema = "tidb_binlog"
# Uncomment this if you want to use file as db-type.
#[syncer.to]
# directory to save binlog file, default same as data-dir(save checkpoint file) if this is not configured.
# dir = "data.drainer"
# when db-type is kafka, you can uncomment this to config the down stream kafka, it will be the globle config kafka default
#[syncer.to]
# only need config one of zookeeper-addrs and kafka-addrs, will get kafka address if zookeeper-addrs is configed.
# zookeeper-addrs = "127.0.0.1:2181"
# kafka-addrs = "127.0.0.1:9092"
# kafka-version = "0.8.2.0"
# kafka-max-messages = 1024
#
#
# the topic name drainer will push msg, the default name is <cluster-id>_obinlog
# be careful don't use the same name if run multi drainer instances
# topic-name = ""