This repository has been archived by the owner on Aug 7, 2021. It is now read-only.
-
-
Notifications
You must be signed in to change notification settings - Fork 7
/
env.example
73 lines (55 loc) · 2.38 KB
/
env.example
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
# APP
APP_NAME=kafka-swoole
# APP ID
APP_ID=0
# zh_CN,en_US
APP_LANGUGE=en_US
# Key that affects storage access:prod/test
APP_MODE=test
# The master just only receive rpc request select data in memory
SERVER_AF_UNIX_DIR=./tmp
# Log storage directory
KAFKA_LOG_DIR=./logs
# Log storage level
KAFKA_LOG_LEVEL=DEBUG
# sdtout
KAFKA_LOG_SDTOUT=true
# KAFKA_CLIENT_CONSUMER_NUM dynamically changes this parameter based on the partition of the subscribed topic
KAFKA_CLIENT_CONSUMER_NUM=1
# KAFKA_CLIENT
# Client Process
# KAFKA_CLIENT_API_MODE:"HIGH_LEVEL" / "LOW LEVEL"
# KAFKA_CLIENT_CONSUMER_NUM: Must be less than the maximum partition in topic
KAFKA_CLIENT_API_MODE=LOW_LEVEL
# REDIS/FILE/DIRECTLY
# If you choose "DIRECTLY" mode, the number of processing logical processes is equal to the minimum number of kafka client processes.
# The KAFKA_CUSTOM_PROCESS_NUM parameter is ignored.
# Make sure your consumption logic consumes as much data as possible, otherwise the rate of consumption will be lower than the rate of production.
# The process generated by KAFKA_CUSTOM_PROCESS_NUM gets messages from the storage medium
KAFKA_MESSAGE_STORAGE=REDIS
# Message reliability level.
# When "LOW" reliability is selected, only the maximum offset will be committed in a request.
# When "HIGH" reliability is selected, the data in a request is offsetcommit every time, but with the consumption of RTT.
KAFKA_MESSAGE_RELIABILITY=LOW
# Number of message processing processes
KAFKA_SINKER_PROCESS_NUM=1
# Which is your storage redis config
KAFKA_STORAGE_REDIS=POOL_REDIS_0
# Redis stores the persistent key
# `lpush` pending queue, then `rpoplpush` from pending queue to processing queue, then `lrem` processsing
# add a check for processing queue, If timeout occurs, the peding queue is added to ensure that the message is not lost
KAFKA_STORAGE_REDIS_PENDING_KEY=${APP_NAME}:${APP_ID}:${APP_MODE}:storage:redis:messages:pending
KAFKA_STORAGE_REDIS_PROCESSING_KEY=${APP_NAME}:${APP_ID}:${APP_MODE}:storage:redis:messages:processing
# Redis persists the maximum number of messages
KAFKA_STORAGE_REDIS_LIMIT=40000
# The maximum number of messages SINKER can fetch from a storage medium at one time
KAFKA_MAX_FETCH_MESSAGE_NUM=1
# Redis Pool
POOL_REDIS_NUM=1
POOL_REDIS_0_MAX_NUMBER=5
POOL_REDIS_0_MAX_IDLE=3
POOL_REDIS_0_MIN_IDLE=0
POOL_REDIS_0_HOST=mredis
POOL_REDIS_0_PORT=6379
POOL_REDIS_0_AUTH=
# other redis config ...