Skip to content

Commit e1e4ede

Browse files
committed
accounts-db: Benchmark cache evictions
The already existing `concurrent_{read,scan}_write` benchmarks are not sufficient for benchmarking the eviction and evaluating what kind of eviction policy performs the best, because they don't fill up the cache, so eviction never happens. The number of readers in that benchmark is low (5) and there are no writer threads causing more contention. The cache is RW-locked, so bencharking only concurrent reads doesn't push it to the limits. Add new benchmarks which are made with measuring contention in mind: - `read_only_accounts_cache` - benchmarks read-only cache loads and stores without causing eviction. - `read_only_accounts_cache_lo_hi` - benchmarks read-only cache eviction with low and high thresholds. After each eviction, enough stores need to be made to reach the difference between the low and high threshold, triggering another eviction. Aims to simulate contention in a manner close to what occurs on validators. - `read_only_accounts_cache_hi` - benchmarks read-only cache eviction without differentiating between low and high thresholds. Each store triggers another eviction immediately. Measures the absolutely worst-case scenario, which may not reflect actual conditions in validators.
1 parent 22c8951 commit e1e4ede

File tree

10 files changed

+499
-29
lines changed

10 files changed

+499
-29
lines changed

Cargo.lock

Lines changed: 51 additions & 1 deletion
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

Cargo.toml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -375,6 +375,7 @@ merlin = "3"
375375
min-max-heap = "1.3.0"
376376
mockall = "0.11.4"
377377
modular-bitfield = "0.11.2"
378+
ndarray = "0.16.1"
378379
nix = "0.29.0"
379380
num-bigint = "0.4.6"
380381
num-derive = "0.4"

accounts-db/Cargo.toml

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -67,6 +67,7 @@ assert_matches = { workspace = true }
6767
criterion = { workspace = true }
6868
libsecp256k1 = { workspace = true }
6969
memoffset = { workspace = true }
70+
ndarray = { workspace = true }
7071
rand_chacha = { workspace = true }
7172
serde_bytes = { workspace = true }
7273
# See order-crates-for-publishing.py for using this unusual `path = "."`
@@ -103,6 +104,10 @@ harness = false
103104
name = "bench_hashing"
104105
harness = false
105106

107+
[[bench]]
108+
name = "read_only_accounts_cache"
109+
harness = false
110+
106111
[[bench]]
107112
name = "bench_serde"
108113
harness = false

accounts-db/benches/bench_accounts_file.rs

Lines changed: 6 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,6 @@
11
#![allow(clippy::arithmetic_side_effects)]
22
use {
33
criterion::{criterion_group, criterion_main, BatchSize, BenchmarkId, Criterion, Throughput},
4-
rand::{distributions::WeightedIndex, prelude::*},
5-
rand_chacha::ChaChaRng,
64
solana_accounts_db::{
75
accounts_file::StorageAccess,
86
append_vec::{self, AppendVec, SCAN_BUFFER_SIZE_WITHOUT_DATA},
@@ -15,13 +13,14 @@ use {
1513
account::{AccountSharedData, ReadableAccount},
1614
clock::Slot,
1715
pubkey::Pubkey,
18-
rent::Rent,
1916
rent_collector::RENT_EXEMPT_RENT_EPOCH,
2017
system_instruction::MAX_PERMITTED_DATA_LENGTH,
2118
},
22-
std::{iter, mem::ManuallyDrop},
19+
std::mem::ManuallyDrop,
2320
};
2421

22+
mod utils;
23+
2524
const ACCOUNTS_COUNTS: [usize; 4] = [
2625
1, // the smallest count; will bench overhead
2726
100, // number of accounts written per slot on mnb (with *no* rent rewrites)
@@ -116,40 +115,20 @@ fn bench_scan_pubkeys(c: &mut Criterion) {
116115
MAX_PERMITTED_DATA_LENGTH as usize,
117116
];
118117
let weights = [3, 75, 20, 1, 1];
119-
let distribution = WeightedIndex::new(weights).unwrap();
120-
121-
let rent = Rent::default();
122-
let rent_minimum_balances: Vec<_> = data_sizes
123-
.iter()
124-
.map(|data_size| rent.minimum_balance(*data_size))
125-
.collect();
126118

127119
for accounts_count in ACCOUNTS_COUNTS {
128120
group.throughput(Throughput::Elements(accounts_count as u64));
129-
let mut rng = ChaChaRng::seed_from_u64(accounts_count as u64);
130121

131-
let pubkeys: Vec<_> = iter::repeat_with(Pubkey::new_unique)
122+
let storable_accounts: Vec<_> = utils::accounts(255, &data_sizes, &weights)
132123
.take(accounts_count)
133124
.collect();
134-
let accounts: Vec<_> = iter::repeat_with(|| {
135-
let index = distribution.sample(&mut rng);
136-
AccountSharedData::new_rent_epoch(
137-
rent_minimum_balances[index],
138-
data_sizes[index],
139-
&Pubkey::default(),
140-
RENT_EXEMPT_RENT_EPOCH,
141-
)
142-
})
143-
.take(pubkeys.len())
144-
.collect();
145-
let storable_accounts: Vec<_> = iter::zip(&pubkeys, &accounts).collect();
146125

147126
// create an append vec file
148127
let append_vec_path = temp_dir.path().join(format!("append_vec_{accounts_count}"));
149128
_ = std::fs::remove_file(&append_vec_path);
150-
let file_size = accounts
129+
let file_size = storable_accounts
151130
.iter()
152-
.map(|account| append_vec::aligned_stored_size(account.data().len()))
131+
.map(|(_, account)| append_vec::aligned_stored_size(account.data().len()))
153132
.sum();
154133
let append_vec = AppendVec::new(append_vec_path, true, file_size);
155134
let stored_accounts_info = append_vec

0 commit comments

Comments
 (0)