-
Notifications
You must be signed in to change notification settings - Fork 3
/
Copy pathlogstash.conf
57 lines (50 loc) · 2.52 KB
/
logstash.conf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
input {
jdbc {
# Here is the jdbc connection mysql statement. The second mysql is because the internal access of this docker project requires network bridging. You can modify it yourself
jdbc_connection_string => "jdbc:mysql://mysql:3306/cita_staging"
jdbc_user => "root"
jdbc_password => "toor"
# Driver; / usr/share/logstash/config/mysql / starting for logstash plug-in is the default directory for finding jar files
jdbc_driver_library => "/usr/share/logstash/config/mysql-connector-java-8.0.18.jar"
# Drive class name
jdbc_driver_class => "com.mysql.cj.jdbc.Driver"
jdbc_paging_enabled => "true"
jdbc_page_size => "50000"
##jdbc_default_timezone => "Asia/Shanghai"
# sql file name
#statement_filepath => "/usr/share/logstash/config/query.sql"
statement => "SELECT * FROM reservations"
# Monitoring interval [minute, hour, day, month, year]
schedule => "* * * * *"
#type => "user"
# Whether to record the last execution result. If it is true, the value of the tracking column field last executed will be recorded and saved in the file specified by last run metadata path
record_last_run => true
# Whether to record the value of a column. If record last run is true, you can customize the column name of the track. At this time, the parameter must be true. Otherwise, the default track value is timestamp
use_column_value => true
# If use column value is true, you need to configure the database column name of this parameter. track. The column must be incremental. Generally, it is mysql primary key
tracking_column => "unix_ts_in_secs"
tracking_column_type => "numeric"
#last_run_metadata_path => "./last_record/logstash_article_last_time"
# Whether to clear the record of last run metadata path? If it is true, each time is equivalent to querying all database records from the beginning
clean_run => false
# Convert the field (column) name to lowercase
lowercase_column_names => false
}
}
filter {
mutate {
copy => { "id" => "[@metadata][_id]"}
remove_field => ["id", "@version", "unix_ts_in_secs"]
}
}
output {
stdout {
codec => rubydebug
}
elasticsearch {
# The same reason is that the internal access of this docker project needs network bridging
hosts => ["http://elasticsearch:9200"]
index => "reservations"
document_id => "%{[@metadata][_id]}"
}
}