From 085d41244703b7c0c16ebc84d48fe5ef546aecea Mon Sep 17 00:00:00 2001 From: fadlinurhasan Date: Tue, 21 Jan 2020 15:25:38 +0700 Subject: [PATCH] Configure Kafka so it still works if a single box is died. (#12) * Add min.insync.replicas config condition Co-authored-by: bentol * berks update Co-authored-by: bentol * set min insync replicas as attribute Co-authored-by: bentol * berks update Co-authored-by: bentol * update config attribute Co-authored-by: bentol * berks update Co-authored-by: bentol Co-authored-by: bentol --- attributes/default.rb | 23 ++++++++++--------- cookbooks/kafka/attributes/default.rb | 23 ++++++++++--------- cookbooks/kafka/recipes/kafka_config.rb | 3 +-- .../kafka/recipes/kafka_consul_register.rb | 2 +- recipes/kafka_config.rb | 3 +-- 5 files changed, 27 insertions(+), 27 deletions(-) diff --git a/attributes/default.rb b/attributes/default.rb index 9a45704..887b522 100644 --- a/attributes/default.rb +++ b/attributes/default.rb @@ -49,23 +49,24 @@ # Kafka configuration, default provided by Kafka project default[cookbook_name]['kafka']['port'] = 9092 -default[cookbook_name]['kafka']['max_replication_factor'] = 3 default[cookbook_name]['kafka']['config'] = { 'advertised.listeners' => "PLAINTEXT://#{node['ipaddress']}:#{node[cookbook_name]['kafka']['port']}", 'broker.id' => -1, - 'port' => node[cookbook_name]['kafka']['port'], - 'num.network.threads' => 3, - 'num.io.threads' => 8, - 'socket.send.buffer.bytes' => 102_400, - 'socket.receive.buffer.bytes' => 102_400, - 'socket.request.max.bytes' => 104_857_600, + 'log.cleaner.enable' => false, 'log.dirs' => '/var/lib/kafka', - 'num.partitions' => 1, - 'num.recovery.threads.per.data.dir' => 1, + 'log.retention.check.interval.ms' => 300_000, 'log.retention.hours' => 168, 'log.segment.bytes' => 1_073_741_824, - 'log.retention.check.interval.ms' => 300_000, - 'log.cleaner.enable' => false, + 'min.insync.replicas' => 2, + 'num.io.threads' => 8, + 'num.network.threads' => 3, + 'num.partitions' => 1, + 'num.recovery.threads.per.data.dir' => 1, + 'offsets.topic.replication.factor' => 3, + 'port' => node[cookbook_name]['kafka']['port'], + 'socket.receive.buffer.bytes' => 102_400, + 'socket.request.max.bytes' => 104_857_600, + 'socket.send.buffer.bytes' => 102_400, 'zookeeper.connect' => 'localhost:2181', 'zookeeper.connection.timeout.ms' => 15_000, 'zookeeper.session.timeout.ms' => 15_000 diff --git a/cookbooks/kafka/attributes/default.rb b/cookbooks/kafka/attributes/default.rb index 9a45704..887b522 100644 --- a/cookbooks/kafka/attributes/default.rb +++ b/cookbooks/kafka/attributes/default.rb @@ -49,23 +49,24 @@ # Kafka configuration, default provided by Kafka project default[cookbook_name]['kafka']['port'] = 9092 -default[cookbook_name]['kafka']['max_replication_factor'] = 3 default[cookbook_name]['kafka']['config'] = { 'advertised.listeners' => "PLAINTEXT://#{node['ipaddress']}:#{node[cookbook_name]['kafka']['port']}", 'broker.id' => -1, - 'port' => node[cookbook_name]['kafka']['port'], - 'num.network.threads' => 3, - 'num.io.threads' => 8, - 'socket.send.buffer.bytes' => 102_400, - 'socket.receive.buffer.bytes' => 102_400, - 'socket.request.max.bytes' => 104_857_600, + 'log.cleaner.enable' => false, 'log.dirs' => '/var/lib/kafka', - 'num.partitions' => 1, - 'num.recovery.threads.per.data.dir' => 1, + 'log.retention.check.interval.ms' => 300_000, 'log.retention.hours' => 168, 'log.segment.bytes' => 1_073_741_824, - 'log.retention.check.interval.ms' => 300_000, - 'log.cleaner.enable' => false, + 'min.insync.replicas' => 2, + 'num.io.threads' => 8, + 'num.network.threads' => 3, + 'num.partitions' => 1, + 'num.recovery.threads.per.data.dir' => 1, + 'offsets.topic.replication.factor' => 3, + 'port' => node[cookbook_name]['kafka']['port'], + 'socket.receive.buffer.bytes' => 102_400, + 'socket.request.max.bytes' => 104_857_600, + 'socket.send.buffer.bytes' => 102_400, 'zookeeper.connect' => 'localhost:2181', 'zookeeper.connection.timeout.ms' => 15_000, 'zookeeper.session.timeout.ms' => 15_000 diff --git a/cookbooks/kafka/recipes/kafka_config.rb b/cookbooks/kafka/recipes/kafka_config.rb index 6498f76..772f3f9 100644 --- a/cookbooks/kafka/recipes/kafka_config.rb +++ b/cookbooks/kafka/recipes/kafka_config.rb @@ -47,8 +47,7 @@ kafka_hosts_count = node[cookbook_name]['kafka']['hosts_count'] if kafka_hosts_count < 3 config['offsets.topic.replication.factor'] = kafka_hosts_count -else - config['offsets.topic.replication.factor'] = node[cookbook_name]['kafka']['max_replication_factor'] + config['min.insync.replicas'] = 1 end # Write configurations diff --git a/cookbooks/kafka/recipes/kafka_consul_register.rb b/cookbooks/kafka/recipes/kafka_consul_register.rb index fddc24c..a6bfb1f 100644 --- a/cookbooks/kafka/recipes/kafka_consul_register.rb +++ b/cookbooks/kafka/recipes/kafka_consul_register.rb @@ -28,7 +28,7 @@ { "id": "#{node['hostname']}-hc-payload", "name": "kafka", - "args": ["/bin/bash", "-c", "nc -vz #{node['ipaddress']} #{node[cookbook_name]['kafka']['port']} 2>&1 | grep open"], + "args": ["/bin/bash", "-c", "nc -vz #{node['ipaddress']} #{node[cookbook_name]['kafka']['port']} 2>&1 | grep 'open\\\|succeeded'"], "interval": "10s", "timeout": "1s" } diff --git a/recipes/kafka_config.rb b/recipes/kafka_config.rb index 6498f76..772f3f9 100644 --- a/recipes/kafka_config.rb +++ b/recipes/kafka_config.rb @@ -47,8 +47,7 @@ kafka_hosts_count = node[cookbook_name]['kafka']['hosts_count'] if kafka_hosts_count < 3 config['offsets.topic.replication.factor'] = kafka_hosts_count -else - config['offsets.topic.replication.factor'] = node[cookbook_name]['kafka']['max_replication_factor'] + config['min.insync.replicas'] = 1 end # Write configurations