forked from thepaul/cassandra-dtest
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathglobal_row_key_cache_test.py
92 lines (73 loc) · 3.31 KB
/
global_row_key_cache_test.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
import time
from dtest import Tester, debug
from loadmaker import LoadMaker
class TestGlobalRowKeyCache(Tester):
def __init__(self, *argv, **kwargs):
super(TestGlobalRowKeyCache, self).__init__(*argv, **kwargs)
# When a node goes down under load it prints an error in it's log.
# If we don't allow log errors, then the test will fail.
# self.allow_log_errors = True
def functional_test(self):
"""
Test global caches.
Test that save and load work in the situation when you write to
different CFs. Read 2 or 3 times to make sure the page cache doesn't
skew the results.
"""
# create some rows to insert
NUM_INSERTS = 100
NUM_UPDATES = 10
NUM_DELETES = 1
cluster = self.cluster
cluster.populate(3)
node1 = cluster.nodelist()[0]
for kcsim in (0, 10):
for rcsim in (0, 10):
setup_name = "%d_%d" % (kcsim, rcsim)
ks_name = 'ks_' + setup_name
debug("setup " + setup_name)
cluster.set_configuration_options(values={
'key_cache_size_in_mb': kcsim,
'row_cache_size_in_mb': rcsim,
'row_cache_save_period': 5,
'key_cache_save_period': 5,
})
cluster.start()
time.sleep(.5)
cursor = self.cql_connection(node1).cursor()
self.create_ks(cursor, ks_name, 3)
time.sleep(1) # wait for propagation
host, port = node1.network_interfaces['thrift']
# create some load makers
lm_standard = LoadMaker(host, port,
keyspace_name=ks_name, column_family_type='standard')
lm_counter = LoadMaker(host, port,
keyspace_name=ks_name, column_family_type='standard', is_counter=True)
# insert some rows
lm_standard.generate(NUM_INSERTS)
lm_counter.generate(NUM_INSERTS)
# flush everything to get it into sstables
for node in cluster.nodelist():
node.flush()
debug("Validating")
for i in range(3):
# read and modify multiple times to get data into and invalidated out of the cache.
lm_standard.update(NUM_UPDATES).delete(NUM_DELETES).validate()
lm_counter.generate().validate()
# let the data be written to the row/key caches.
debug("Letting caches be written")
time.sleep(10)
debug("Stopping cluster")
cluster.stop()
time.sleep(1)
debug("Starting cluster")
cluster.start()
time.sleep(5) # read the data back from row and key caches
lm_standard.refresh_connection()
lm_counter.refresh_connection()
debug("Validating again...")
for i in range(2):
# read and modify multiple times to get data into and invalidated out of the cache.
lm_standard.validate()
lm_counter.validate()
cluster.stop()