Skip to content

Commit 12462bd

Browse files
author
Kishan Sairam Adapa
authored
refactor: make consumer the only public interface for live event listener (#91)
1 parent 7b78923 commit 12462bd

File tree

6 files changed

+143
-96
lines changed

6 files changed

+143
-96
lines changed

kafka-event-listener/build.gradle.kts

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@ plugins {
33
jacoco
44
id("org.hypertrace.publish-plugin")
55
id("org.hypertrace.jacoco-report-plugin")
6+
id("java-test-fixtures")
67
}
78

89
dependencies {
@@ -16,6 +17,9 @@ dependencies {
1617
testImplementation("org.junit.jupiter:junit-jupiter:5.9.2")
1718
testImplementation("org.mockito:mockito-core:5.2.0")
1819
testImplementation("com.github.ben-manes.caffeine:caffeine:3.1.8")
20+
21+
testFixturesApi(platform(project(":kafka-bom")))
22+
testFixturesApi("org.apache.kafka:kafka-clients")
1923
}
2024

2125
tasks.test {
Lines changed: 54 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,54 @@
1+
package org.hypertrace.core.kafka.event.listener;
2+
3+
import com.typesafe.config.Config;
4+
import com.typesafe.config.ConfigFactory;
5+
import com.typesafe.config.ConfigValue;
6+
import java.util.HashMap;
7+
import java.util.Map;
8+
import java.util.Properties;
9+
import java.util.Set;
10+
import org.apache.kafka.clients.consumer.Consumer;
11+
import org.apache.kafka.clients.consumer.ConsumerConfig;
12+
import org.apache.kafka.clients.consumer.KafkaConsumer;
13+
import org.apache.kafka.common.serialization.ByteArrayDeserializer;
14+
import org.apache.kafka.common.serialization.Deserializer;
15+
16+
public class KafkaConsumerUtils {
17+
public static final String TOPIC_NAME = "topic.name"; // required key in kafkaConfig
18+
public static final String POLL_TIMEOUT = "poll.timeout"; // defaults to 30s if not provided
19+
20+
/**
21+
* Returns a kafka consumer for provided config and key value deserializers. Only one instance of
22+
* consumer should be required per pod, ensure singleton.
23+
*/
24+
public static <K, V> Consumer<K, V> getKafkaConsumer(
25+
Config kafkaConfig, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer) {
26+
return new KafkaConsumer<>(
27+
getKafkaConsumerConfigs(kafkaConfig.withFallback(getDefaultKafkaConsumerConfigs())),
28+
keyDeserializer,
29+
valueDeserializer);
30+
}
31+
32+
private static Properties getKafkaConsumerConfigs(Config configs) {
33+
Map<String, String> configMap = new HashMap<>();
34+
Set<Map.Entry<String, ConfigValue>> entries = configs.entrySet();
35+
for (Map.Entry<String, ConfigValue> entry : entries) {
36+
String key = entry.getKey();
37+
configMap.put(key, configs.getString(key));
38+
}
39+
Properties props = new Properties();
40+
props.putAll(configMap);
41+
return props;
42+
}
43+
44+
private static Config getDefaultKafkaConsumerConfigs() {
45+
Map<String, String> defaultKafkaConsumerConfigMap = new HashMap<>();
46+
defaultKafkaConsumerConfigMap.put(
47+
ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName());
48+
defaultKafkaConsumerConfigMap.put(
49+
ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName());
50+
defaultKafkaConsumerConfigMap.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, "1000");
51+
defaultKafkaConsumerConfigMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
52+
return ConfigFactory.parseMap(defaultKafkaConsumerConfigMap);
53+
}
54+
}

kafka-event-listener/src/main/java/org/hypertrace/core/kafka/event/listener/KafkaLiveEventListener.java

Lines changed: 4 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,6 @@
99
import java.util.concurrent.TimeUnit;
1010
import java.util.function.BiConsumer;
1111
import org.apache.kafka.clients.consumer.Consumer;
12-
import org.apache.kafka.common.serialization.Deserializer;
1312

1413
/**
1514
* KafkaLiveEventListener consumes events produced to a single Kafka Topic from its initialisation
@@ -25,6 +24,10 @@
2524
*
2625
* <p>Typical usage of this listener is to back the remote caches to have lower latency of refresh
2726
* by generating respective information on kafka topics.
27+
*
28+
* <p>Refer to
29+
* org.hypertrace.core.kafka.event.listener.KafkaLiveEventListenerTest#testEventModificationCache()
30+
* for sample usage and test. Note that testing requires Thread.sleep > poll timeout in between
2831
*/
2932
public class KafkaLiveEventListener<K, V> implements AutoCloseable {
3033
private final Future<Void> kafkaLiveEventListenerCallableFuture;
@@ -77,19 +80,6 @@ public KafkaLiveEventListener<K, V> build(
7780
cleanupExecutor);
7881
}
7982

80-
public KafkaLiveEventListener<K, V> build(
81-
String consumerName,
82-
Config kafkaConfig,
83-
Deserializer<K> keyDeserializer,
84-
Deserializer<V> valueDeserializer) {
85-
assertCallbacksPresent();
86-
return new KafkaLiveEventListener<>(
87-
new KafkaLiveEventListenerCallable<>(
88-
consumerName, kafkaConfig, keyDeserializer, valueDeserializer, callbacks),
89-
executorService,
90-
cleanupExecutor);
91-
}
92-
9383
private void assertCallbacksPresent() {
9484
if (callbacks.isEmpty()) {
9585
throw new IllegalArgumentException("no call backs are provided to KafkaLiveEventListener");
Lines changed: 3 additions & 52 deletions
Original file line numberDiff line numberDiff line change
@@ -1,59 +1,33 @@
11
package org.hypertrace.core.kafka.event.listener;
22

3+
import static org.hypertrace.core.kafka.event.listener.KafkaConsumerUtils.POLL_TIMEOUT;
4+
import static org.hypertrace.core.kafka.event.listener.KafkaConsumerUtils.TOPIC_NAME;
5+
36
import com.typesafe.config.Config;
4-
import com.typesafe.config.ConfigFactory;
5-
import com.typesafe.config.ConfigValue;
67
import io.micrometer.core.instrument.Counter;
78
import java.time.Duration;
89
import java.util.Collections;
9-
import java.util.HashMap;
1010
import java.util.List;
11-
import java.util.Map;
12-
import java.util.Map.Entry;
13-
import java.util.Properties;
14-
import java.util.Set;
1511
import java.util.concurrent.Callable;
1612
import java.util.function.BiConsumer;
1713
import java.util.stream.Collectors;
1814
import lombok.extern.slf4j.Slf4j;
1915
import org.apache.kafka.clients.consumer.Consumer;
20-
import org.apache.kafka.clients.consumer.ConsumerConfig;
2116
import org.apache.kafka.clients.consumer.ConsumerRecords;
22-
import org.apache.kafka.clients.consumer.KafkaConsumer;
2317
import org.apache.kafka.common.PartitionInfo;
2418
import org.apache.kafka.common.TopicPartition;
2519
import org.apache.kafka.common.errors.InterruptException;
26-
import org.apache.kafka.common.serialization.ByteArrayDeserializer;
27-
import org.apache.kafka.common.serialization.Deserializer;
2820
import org.hypertrace.core.serviceframework.metrics.PlatformMetricsRegistry;
2921

3022
@Slf4j
3123
class KafkaLiveEventListenerCallable<K, V> implements Callable<Void> {
3224
private static final String EVENT_CONSUMER_ERROR_COUNT = "event.consumer.error.count";
33-
private static final String TOPIC_NAME = "topic.name";
34-
private static final String POLL_TIMEOUT = "poll.timeout";
3525
private final List<TopicPartition> topicPartitions;
3626
private final Consumer<K, V> kafkaConsumer;
3727
private final Duration pollTimeout;
3828
private final Counter errorCounter;
3929
private final List<BiConsumer<? super K, ? super V>> callbacks;
4030

41-
KafkaLiveEventListenerCallable(
42-
String consumerName,
43-
Config kafkaConfig,
44-
Deserializer<K> keyDeserializer,
45-
Deserializer<V> valueDeserializer,
46-
List<BiConsumer<? super K, ? super V>> callbacks) {
47-
this(
48-
consumerName,
49-
kafkaConfig,
50-
new KafkaConsumer<>(
51-
getKafkaConsumerConfigs(kafkaConfig.withFallback(getDefaultKafkaConsumerConfigs())),
52-
keyDeserializer,
53-
valueDeserializer),
54-
callbacks);
55-
}
56-
5731
KafkaLiveEventListenerCallable(
5832
String consumerName,
5933
Config kafkaConfig,
@@ -107,27 +81,4 @@ public Void call() {
10781

10882
} while (true);
10983
}
110-
111-
private static Config getDefaultKafkaConsumerConfigs() {
112-
Map<String, String> defaultKafkaConsumerConfigMap = new HashMap<>();
113-
defaultKafkaConsumerConfigMap.put(
114-
ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName());
115-
defaultKafkaConsumerConfigMap.put(
116-
ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName());
117-
defaultKafkaConsumerConfigMap.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, "1000");
118-
defaultKafkaConsumerConfigMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
119-
return ConfigFactory.parseMap(defaultKafkaConsumerConfigMap);
120-
}
121-
122-
private static Properties getKafkaConsumerConfigs(Config configs) {
123-
Map<String, String> configMap = new HashMap<>();
124-
Set<Entry<String, ConfigValue>> entries = configs.entrySet();
125-
for (Entry<String, ConfigValue> entry : entries) {
126-
String key = entry.getKey();
127-
configMap.put(key, configs.getString(key));
128-
}
129-
Properties props = new Properties();
130-
props.putAll(configMap);
131-
return props;
132-
}
13384
}

kafka-event-listener/src/test/java/org/hypertrace/core/kafka/event/listener/KafkaLiveEventListenerTest.java

Lines changed: 7 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -3,26 +3,19 @@
33
import static org.junit.jupiter.api.Assertions.assertEquals;
44
import static org.junit.jupiter.api.Assertions.assertFalse;
55
import static org.junit.jupiter.api.Assertions.assertThrows;
6-
import static org.mockito.Mockito.mock;
76

87
import com.github.benmanes.caffeine.cache.AsyncLoadingCache;
98
import com.github.benmanes.caffeine.cache.Caffeine;
109
import com.typesafe.config.Config;
1110
import com.typesafe.config.ConfigException;
1211
import com.typesafe.config.ConfigFactory;
1312
import java.time.Duration;
14-
import java.util.HashMap;
15-
import java.util.List;
1613
import java.util.Map;
1714
import java.util.concurrent.CompletableFuture;
1815
import java.util.concurrent.TimeUnit;
1916
import org.apache.kafka.clients.consumer.Consumer;
20-
import org.apache.kafka.clients.consumer.ConsumerRecord;
2117
import org.apache.kafka.clients.consumer.MockConsumer;
2218
import org.apache.kafka.clients.consumer.OffsetResetStrategy;
23-
import org.apache.kafka.common.Node;
24-
import org.apache.kafka.common.PartitionInfo;
25-
import org.apache.kafka.common.TopicPartition;
2619
import org.junit.jupiter.api.Test;
2720

2821
class KafkaLiveEventListenerTest {
@@ -50,45 +43,29 @@ void testThrowOnInvalidInputs() {
5043
@Test
5144
void testEventModificationCache() throws Exception {
5245
// kafka consumer mock setup
53-
MockConsumer<String, Long> kafkaConsumer = new MockConsumer<>(OffsetResetStrategy.LATEST);
54-
String topic = "event-update-topic";
55-
kafkaConsumer.updatePartitions(
56-
topic,
57-
List.of(
58-
getPartitionInfo(topic, 0),
59-
getPartitionInfo(topic, 1),
60-
getPartitionInfo(topic, 2),
61-
getPartitionInfo(topic, 3)));
62-
HashMap<TopicPartition, Long> endOffsets = new HashMap<>();
63-
endOffsets.put(new TopicPartition(topic, 0), 50L);
64-
endOffsets.put(new TopicPartition(topic, 1), 50L);
65-
endOffsets.put(new TopicPartition(topic, 2), 50L);
66-
endOffsets.put(new TopicPartition(topic, 3), 50L);
67-
kafkaConsumer.updateEndOffsets(endOffsets);
46+
String topicName = "event-update-topic";
47+
KafkaMockConsumerTestUtil<String, Long> mockConsumerTestUtil =
48+
new KafkaMockConsumerTestUtil<>(topicName, 4);
6849
// create instance of event modification cache consuming from this consumer
6950
EventModificationCache eventModificationCache =
7051
new EventModificationCache(
7152
"modification-event-consumer",
72-
ConfigFactory.parseMap(Map.of("topic.name", topic, "poll.timeout", "5ms")),
73-
kafkaConsumer);
53+
ConfigFactory.parseMap(Map.of("topic.name", topicName, "poll.timeout", "5ms")),
54+
mockConsumerTestUtil.getMockConsumer());
7455
Thread.sleep(10);
7556
assertEquals(10L, eventModificationCache.get(10));
7657
assertEquals(100L, eventModificationCache.get(100));
7758
// not present key won't trigger any population but callback function should be called
78-
kafkaConsumer.addRecord(new ConsumerRecord<>(topic, 0, 100, "32", 89L));
59+
mockConsumerTestUtil.addRecordToPartition(0, "32", 89L);
7960
Thread.sleep(100);
8061
assertFalse(eventModificationCache.hasKey(32));
8162
// existing key will be modified based on entry
82-
kafkaConsumer.addRecord(new ConsumerRecord<>(topic, 3, 200, "10", -3L));
63+
mockConsumerTestUtil.addRecordToPartition(3, "10", -3L);
8364
Thread.sleep(100);
8465
assertEquals(-3L, eventModificationCache.get(10));
8566
eventModificationCache.close();
8667
}
8768

88-
private PartitionInfo getPartitionInfo(String topic, int partition) {
89-
return new PartitionInfo(topic, partition, mock(Node.class), new Node[0], new Node[0]);
90-
}
91-
9269
static class EventModificationCache {
9370
private final AsyncLoadingCache<Integer, Long> cache;
9471
private final KafkaLiveEventListener<String, Long> eventListener;
Lines changed: 71 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,71 @@
1+
package org.hypertrace.core.kafka.event.listener;
2+
3+
import java.util.List;
4+
import java.util.Map;
5+
import java.util.Objects;
6+
import java.util.function.Function;
7+
import java.util.stream.Collectors;
8+
import java.util.stream.IntStream;
9+
import org.apache.kafka.clients.consumer.ConsumerRecord;
10+
import org.apache.kafka.clients.consumer.MockConsumer;
11+
import org.apache.kafka.clients.consumer.OffsetResetStrategy;
12+
import org.apache.kafka.common.Node;
13+
import org.apache.kafka.common.PartitionInfo;
14+
import org.apache.kafka.common.TopicPartition;
15+
16+
public class KafkaMockConsumerTestUtil<K, V> {
17+
private final String topicName;
18+
private final Map<TopicPartition, Long> currentOffsets;
19+
20+
private final MockConsumer<K, V> mockConsumer;
21+
22+
public KafkaMockConsumerTestUtil(String topicName, int numPartitions) {
23+
this.topicName = topicName;
24+
mockConsumer = new MockConsumer<>(OffsetResetStrategy.LATEST);
25+
List<PartitionInfo> partitionInfos =
26+
IntStream.range(0, numPartitions)
27+
.mapToObj(i -> getPartitionInfo(topicName, i))
28+
.collect(Collectors.toUnmodifiableList());
29+
mockConsumer.updatePartitions(topicName, partitionInfos);
30+
currentOffsets =
31+
IntStream.range(0, numPartitions)
32+
.mapToObj(i -> getTopicPartition(topicName, i))
33+
.collect(Collectors.toMap(Function.identity(), k -> 1L));
34+
mockConsumer.updateEndOffsets(currentOffsets);
35+
}
36+
37+
/** creates 1 partition by default */
38+
public KafkaMockConsumerTestUtil(String topicName) {
39+
this(topicName, 1);
40+
}
41+
42+
public MockConsumer<K, V> getMockConsumer() {
43+
return mockConsumer;
44+
}
45+
46+
/** adds to 0th partition by default */
47+
public void addRecord(K key, V value) {
48+
addRecordToPartition(0, key, value);
49+
}
50+
51+
public void addRecordToPartition(int partition, K key, V value) {
52+
Long latestOffset =
53+
currentOffsets.computeIfPresent(getTopicPartition(topicName, partition), (k, v) -> v + 1);
54+
if (Objects.isNull(latestOffset)) {
55+
throw new UnsupportedOperationException(
56+
"cannot add to partition "
57+
+ partition
58+
+ ", total partitions is "
59+
+ currentOffsets.size());
60+
}
61+
mockConsumer.addRecord(new ConsumerRecord<>(topicName, partition, latestOffset, key, value));
62+
}
63+
64+
private static PartitionInfo getPartitionInfo(String topic, int partition) {
65+
return new PartitionInfo(topic, partition, Node.noNode(), new Node[0], new Node[0]);
66+
}
67+
68+
private static TopicPartition getTopicPartition(String topic, int partition) {
69+
return new TopicPartition(topic, partition);
70+
}
71+
}

0 commit comments

Comments
 (0)