Skip to content

Commit

Permalink
Configure Kafka so it still works if a single box is died. (#12)
Browse files Browse the repository at this point in the history
* Add min.insync.replicas config condition

Co-authored-by: bentol <[email protected]>

* berks update

Co-authored-by: bentol <[email protected]>

* set min insync replicas as attribute

Co-authored-by: bentol <[email protected]>

* berks update

Co-authored-by: bentol <[email protected]>

* update config attribute

Co-authored-by: bentol <[email protected]>

* berks update

Co-authored-by: bentol <[email protected]>

Co-authored-by: bentol <[email protected]>
  • Loading branch information
2 people authored and ekarisky committed Jan 21, 2020
1 parent 6d59ed7 commit 085d412
Show file tree
Hide file tree
Showing 5 changed files with 27 additions and 27 deletions.
23 changes: 12 additions & 11 deletions attributes/default.rb
Original file line number Diff line number Diff line change
Expand Up @@ -49,23 +49,24 @@

# Kafka configuration, default provided by Kafka project
default[cookbook_name]['kafka']['port'] = 9092
default[cookbook_name]['kafka']['max_replication_factor'] = 3
default[cookbook_name]['kafka']['config'] = {
'advertised.listeners' => "PLAINTEXT://#{node['ipaddress']}:#{node[cookbook_name]['kafka']['port']}",
'broker.id' => -1,
'port' => node[cookbook_name]['kafka']['port'],
'num.network.threads' => 3,
'num.io.threads' => 8,
'socket.send.buffer.bytes' => 102_400,
'socket.receive.buffer.bytes' => 102_400,
'socket.request.max.bytes' => 104_857_600,
'log.cleaner.enable' => false,
'log.dirs' => '/var/lib/kafka',
'num.partitions' => 1,
'num.recovery.threads.per.data.dir' => 1,
'log.retention.check.interval.ms' => 300_000,
'log.retention.hours' => 168,
'log.segment.bytes' => 1_073_741_824,
'log.retention.check.interval.ms' => 300_000,
'log.cleaner.enable' => false,
'min.insync.replicas' => 2,
'num.io.threads' => 8,
'num.network.threads' => 3,
'num.partitions' => 1,
'num.recovery.threads.per.data.dir' => 1,
'offsets.topic.replication.factor' => 3,
'port' => node[cookbook_name]['kafka']['port'],
'socket.receive.buffer.bytes' => 102_400,
'socket.request.max.bytes' => 104_857_600,
'socket.send.buffer.bytes' => 102_400,
'zookeeper.connect' => 'localhost:2181',
'zookeeper.connection.timeout.ms' => 15_000,
'zookeeper.session.timeout.ms' => 15_000
Expand Down
23 changes: 12 additions & 11 deletions cookbooks/kafka/attributes/default.rb
Original file line number Diff line number Diff line change
Expand Up @@ -49,23 +49,24 @@

# Kafka configuration, default provided by Kafka project
default[cookbook_name]['kafka']['port'] = 9092
default[cookbook_name]['kafka']['max_replication_factor'] = 3
default[cookbook_name]['kafka']['config'] = {
'advertised.listeners' => "PLAINTEXT://#{node['ipaddress']}:#{node[cookbook_name]['kafka']['port']}",
'broker.id' => -1,
'port' => node[cookbook_name]['kafka']['port'],
'num.network.threads' => 3,
'num.io.threads' => 8,
'socket.send.buffer.bytes' => 102_400,
'socket.receive.buffer.bytes' => 102_400,
'socket.request.max.bytes' => 104_857_600,
'log.cleaner.enable' => false,
'log.dirs' => '/var/lib/kafka',
'num.partitions' => 1,
'num.recovery.threads.per.data.dir' => 1,
'log.retention.check.interval.ms' => 300_000,
'log.retention.hours' => 168,
'log.segment.bytes' => 1_073_741_824,
'log.retention.check.interval.ms' => 300_000,
'log.cleaner.enable' => false,
'min.insync.replicas' => 2,
'num.io.threads' => 8,
'num.network.threads' => 3,
'num.partitions' => 1,
'num.recovery.threads.per.data.dir' => 1,
'offsets.topic.replication.factor' => 3,
'port' => node[cookbook_name]['kafka']['port'],
'socket.receive.buffer.bytes' => 102_400,
'socket.request.max.bytes' => 104_857_600,
'socket.send.buffer.bytes' => 102_400,
'zookeeper.connect' => 'localhost:2181',
'zookeeper.connection.timeout.ms' => 15_000,
'zookeeper.session.timeout.ms' => 15_000
Expand Down
3 changes: 1 addition & 2 deletions cookbooks/kafka/recipes/kafka_config.rb
Original file line number Diff line number Diff line change
Expand Up @@ -47,8 +47,7 @@
kafka_hosts_count = node[cookbook_name]['kafka']['hosts_count']
if kafka_hosts_count < 3
config['offsets.topic.replication.factor'] = kafka_hosts_count
else
config['offsets.topic.replication.factor'] = node[cookbook_name]['kafka']['max_replication_factor']
config['min.insync.replicas'] = 1
end

# Write configurations
Expand Down
2 changes: 1 addition & 1 deletion cookbooks/kafka/recipes/kafka_consul_register.rb
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@
{
"id": "#{node['hostname']}-hc-payload",
"name": "kafka",
"args": ["/bin/bash", "-c", "nc -vz #{node['ipaddress']} #{node[cookbook_name]['kafka']['port']} 2>&1 | grep open"],
"args": ["/bin/bash", "-c", "nc -vz #{node['ipaddress']} #{node[cookbook_name]['kafka']['port']} 2>&1 | grep 'open\\\|succeeded'"],
"interval": "10s",
"timeout": "1s"
}
Expand Down
3 changes: 1 addition & 2 deletions recipes/kafka_config.rb
Original file line number Diff line number Diff line change
Expand Up @@ -47,8 +47,7 @@
kafka_hosts_count = node[cookbook_name]['kafka']['hosts_count']
if kafka_hosts_count < 3
config['offsets.topic.replication.factor'] = kafka_hosts_count
else
config['offsets.topic.replication.factor'] = node[cookbook_name]['kafka']['max_replication_factor']
config['min.insync.replicas'] = 1
end

# Write configurations
Expand Down

0 comments on commit 085d412

Please sign in to comment.