Skip to content

Commit

Permalink
Including the docs for all the Badger params
Browse files Browse the repository at this point in the history
  • Loading branch information
KalyanAkella committed Apr 6, 2021
1 parent 22bec12 commit 294d790
Showing 1 changed file with 149 additions and 5 deletions.
154 changes: 149 additions & 5 deletions badger.ini
Original file line number Diff line number Diff line change
@@ -1,37 +1,181 @@
Dir =
ValueDir =
# Dir is the path of the directory where key data will be stored in.
# If it doesn't exist, Badger will try to create it for you.
Dir = /tmp/dkvsrv

# ValueDir is the path of the directory where value data will be stored in.
# If it doesn't exist, Badger will try to create it for you.
ValueDir = /tmp/dkvsrv

# When SyncWrites is true all writes are synced to disk. Setting this to false would achieve better
# performance, but may cause data loss in case of crash.
SyncWrites = true

# TableLoadingMode indicates which file loading mode should be used for the LSM tree data files.
# 0 -> indicates that files must be loaded using standard I/O
# 1 -> indicates that file must be loaded into RAM
# 2 -> indicates that that the file must be memory-mapped
TableLoadingMode = 2

# ValueLogLoadingMode indicates which file loading mode should be used for the value log data files.
# 0 -> indicates that files must be loaded using standard I/O
# 1 -> indicates that file must be loaded into RAM
# 2 -> indicates that that the file must be memory-mapped
ValueLogLoadingMode = 2

# NumVersionsToKeep sets how many versions to keep per key at most.
NumVersionsToKeep = 1

# When ReadOnly is true the DB will be opened on read-only mode.
# Multiple processes can open the same Badger DB.
# Note: if the DB being opened had crashed before and has vlog data to be replayed,
# ReadOnly will cause Open to fail with an appropriate message.
ReadOnly = false

# Truncate indicates whether value log files should be truncated to delete corrupt data, if any.
# This option is ignored when ReadOnly is true.
Truncate = false

# When compression is enabled, every block will be compressed using the specified algorithm.
# This option doesn't affect existing tables. Only the newly created tables will be compressed.
# The default compression algorithm used is zstd when built with Cgo. Without Cgo, the default is snappy.
# Compression is enabled by default.
Compression = 0
InMemory = true

# When badger is running in InMemory mode, everything is stored in memory.
# No value/sst files are created. In case of a crash all data will be lost.
InMemory = false

# MaxTableSize sets the maximum size in bytes for each LSM table or file.
MaxTableSize = 67108864

# LevelSizeMultiplier sets the ratio between the maximum sizes of contiguous levels in the LSM.
# Once a level grows to be larger than this ratio allowed, the compaction process will be triggered.
LevelSizeMultiplier = 10

# Maximum number of levels of compaction allowed in the LSM.
MaxLevels = 7

# ValueThreshold sets the threshold used to decide whether a value is stored
# directly in the LSM tree or separately in the log value files.
ValueThreshold = 1024

# NumMemtables sets the maximum number of tables to keep in memory before stalling.
NumMemtables = 5

# BlockSize sets the size of any block in SSTable. SSTable is divided into multiple blocks
# internally. Each block is compressed using prefix diff encoding.
BlockSize = 4096

# BloomFalsePositive sets the false positive probability of the bloom filter in any SSTable.
# Before reading a key from table, the bloom filter is checked for key existence.
# BloomFalsePositive might impact read performance of DB.
# Lower BloomFalsePositive value might consume more memory.
BloomFalsePositive = 0.01

# When KeepL0InMemory is set to true we will keep all Level 0 tables in memory.
# This leads to better performance in writes as well as compactions. In case of DB crash,
# the value log replay will take longer to complete since memtables and all
# level 0 tables will have to be recreated.
# This option also sets CompactL0OnClose option to true.
KeepL0InMemory = false

# This value specifies how much data cache should hold in memory. A small size
# of cache means lower memory consumption and lookups/iterations would take
# longer. It is recommended to use a cache if you're using compression or encryption.
# If compression and encryption both are disabled, adding a cache will lead to
# unnecessary overhead which will affect the read performance. Setting size to
# zero disables the cache altogether.
BlockCacheSize = 0

# This value specifies how much memory should be used by table indices. These
# indices include the block offsets and the bloomfilters. Badger uses bloom
# filters to speed up lookups. Each table has its own bloom
# filter and each bloom filter is approximately of 5 MB.
#
# Zero value for IndexCacheSize means all the indices will be kept in
# memory and the cache is disabled.
#
# The default value of IndexCacheSize is 0 which means all indices are kept in
# memory.
IndexCacheSize = 0

# Badger uses bloom filters to speed up key lookups. When LoadBloomsOnOpen is set
# to false, bloom filters will be loaded lazily and not on DB open. Set this
# option to false to reduce the time taken to open the DB.
LoadBloomsOnOpen = true

# NumLevelZeroTables sets the maximum number of Level 0 tables before compaction starts.
NumLevelZeroTables = 5

# NumLevelZeroTablesStall sets the number of Level 0 tables that once reached
# causes the DB to stall until compaction succeeds.
NumLevelZeroTablesStall = 15

# LevelOneSize sets the maximum total size for Level 1.
LevelOneSize = 268435456

# ValueLogFileSize sets the maximum size of a single value log file.
ValueLogFileSize = 1073741823

# ValueLogMaxEntries sets the maximum number of entries a value log file can hold approximately.
# A actual size limit of a value log file is the minimum of ValueLogFileSize and
# ValueLogMaxEntries.
ValueLogMaxEntries = 1000000

# NumCompactors sets the number of compaction workers to run concurrently.
# Setting this to zero stops compactions, which could eventually cause writes to
# block forever. One is dedicated just for L0.
NumCompactors = 2

# CompactL0OnClose determines whether Level 0 should be compacted before closing the DB.
# This ensures that both reads and writes are efficient when the DB is opened later.
# CompactL0OnClose is set to true if KeepL0InMemory is set to true.
CompactL0OnClose = true

# LogRotatesToFlush sets the number of value log file rotates after which the Memtables are
# flushed to disk. This is useful in write loads with fewer keys and larger values. This work load
# would fill up the value logs quickly, while not filling up the Memtables. Thus, on a crash
# and restart, the value log head could cause the replay of a good number of value log files
# which can slow things on start.
LogRotatesToFlush = 2

# The ZSTD compression algorithm supports 20 compression levels. The higher the compression
# level, the better is the compression ratio but lower is the performance. Lower levels
# have better performance and higher levels have better compression ratios.
# We recommend using level 1 ZSTD Compression Level. Any level higher than 1 seems to
# deteriorate badger's performance.
# The following benchmarks were done on a 4 KB block size (default block size). The compression is
# ratio supposed to increase with increasing compression level but since the input for compression
# algorithm is small (4 KB), we don't get significant benefit at level 3. It is advised to write
# your own benchmarks before choosing a compression algorithm or level.
#
# no_compression-16 10 502848865 ns/op 165.46 MB/s -
# zstd_compression/level_1-16 7 739037966 ns/op 112.58 MB/s 2.93
# zstd_compression/level_3-16 7 756950250 ns/op 109.91 MB/s 2.72
# zstd_compression/level_15-16 1 11135686219 ns/op 7.47 MB/s 4.38
ZSTDCompressionLevel = 1

# When VerifyValueChecksum is set to true, checksum will be verified for every entry read
# from the value log. If the value is stored in SST (value size less than value threshold) then the
# checksum validation will not be done.
VerifyValueChecksum = false
EncryptionKey =
EncryptionKeyRotationDuration = 0

# When BypassLockGuard option is set, badger will not acquire a lock on the
# directory. This could lead to data corruption if multiple badger instances
# write to the same data directory. Use this option with caution.
BypassLockGuard = false

# ChecksumVerificationMode indicates when the db should verify checksums for SSTable blocks.
# 0 -> indicates DB should not verify checksum for SSTable blocks.
# 1 -> indicates checksum should be verified while opening SSTtable.
# 2 -> indicates checksum should be verified on every SSTable block read.
# 3 -> indicates checksum should be verified on SSTable opening and on every block read.
ChecksumVerificationMode = 0

# Detect conflicts options determines if the transactions would be checked for
# conflicts before committing them. When this option is set to false
# (detectConflicts=false) badger can process transactions at a higher rate.
# Setting this options to false might be useful when the user application
# deals with conflict detection and resolution.
DetectConflicts = true

0 comments on commit 294d790

Please sign in to comment.