@@ -329,6 +329,7 @@ func (db *Database) Cap(limit common.StorageSize) error {
329
329
// outside code doesn't see an inconsistent state (referenced data removed from
330
330
// memory cache during commit but not yet in persistent storage). This is ensured
331
331
// by only uncaching existing data when the database write finalizes.
332
+ db .lock .RLock ()
332
333
nodes , storage , start := len (db .dirties ), db .dirtiesSize , time .Now ()
333
334
batch := db .diskdb .NewBatch ()
334
335
@@ -337,12 +338,15 @@ func (db *Database) Cap(limit common.StorageSize) error {
337
338
// counted.
338
339
size := db .dirtiesSize + common .StorageSize (len (db .dirties )* cachedNodeSize )
339
340
size += db .childrenSize
341
+ db .lock .RUnlock ()
340
342
341
343
// Keep committing nodes from the flush-list until we're below allowance
342
344
oldest := db .oldest
343
345
for size > limit && oldest != (common.Hash {}) {
344
346
// Fetch the oldest referenced node and push into the batch
347
+ db .lock .RLock ()
345
348
node := db .dirties [oldest ]
349
+ db .lock .RUnlock ()
346
350
rawdb .WriteLegacyTrieNode (batch , oldest , node .node )
347
351
348
352
// If we exceeded the ideal batch size, commit and reset
@@ -418,7 +422,9 @@ func (db *Database) Commit(node common.Hash, report bool) error {
418
422
batch := db .diskdb .NewBatch ()
419
423
420
424
// Move the trie itself into the batch, flushing if enough data is accumulated
425
+ db .lock .RLock ()
421
426
nodes , storage := len (db .dirties ), db .dirtiesSize
427
+ db .lock .RUnlock ()
422
428
423
429
uncacher := & cleaner {db }
424
430
if err := db .commit (node , batch , uncacher ); err != nil {
0 commit comments