Skip to content
This repository was archived by the owner on Jun 6, 2025. It is now read-only.

Commit 782bf20

Browse files
authored
core: fix data races in unit tests (#609)
* core: fix data race in TestInsertChainWithSidecars We get this data race when running tests WARNING: DATA RACE Write at 0x00c0117bbf40 by goroutine 77211: github.com/ethereum/go-ethereum/core.TestInsertChainWithSidecars() /home/runner/work/ronin/ronin/core/blockchain_test.go:4215 +0x49fd testing.tRunner() /opt/hostedtoolcache/go/1.20.10/x64/src/testing/testing.go:1576 +0x216 testing.(*T).Run.func1() /opt/hostedtoolcache/go/1.20.10/x64/src/testing/testing.go:1629 +0x47 Previous write at 0x00c0117bbf40 by goroutine 82133: github.com/ethereum/go-ethereum/core.TestInsertChainWithSidecars.func6.1() /home/runner/work/ronin/ronin/core/blockchain_test.go:4179 +0x5a4 testing.tRunner() /opt/hostedtoolcache/go/1.20.10/x64/src/testing/testing.go:1576 +0x216 testing.(*T).Run.func1() /opt/hostedtoolcache/go/1.20.10/x64/src/testing/testing.go:1629 +0x47 The data race happens because 2 goroutines use the same variables. This commit makes them use their own variables. * core: fix data race in cacheConfig.TriesInMemory access We get this data race when running tests WARNING: DATA RACE Read at 0x000001faa090 by goroutine 23: github.com/ethereum/go-ethereum/core.NewBlockChain() /home/runner/work/ronin/ronin/core/blockchain.go:247 +0xc4 github.com/ethereum/go-ethereum/eth/protocols/eth.newTestBackendWithGenerator() /home/runner/work/ronin/ronin/eth/protocols/eth/handler_test.go:74 +0x4c4 github.com/ethereum/go-ethereum/eth/protocols/eth.testGetBlockReceipts() /home/runner/work/ronin/ronin/eth/protocols/eth/handler_test.go:569 +0x1f5 github.com/ethereum/go-ethereum/eth/protocols/eth.TestGetBlockReceipts66() /home/runner/work/ronin/ronin/eth/protocols/eth/handler_test.go:528 +0x33 testing.tRunner() /opt/hostedtoolcache/go/1.20.10/x64/src/testing/testing.go:1576 +0x216 testing.(*T).Run.func1() /opt/hostedtoolcache/go/1.20.10/x64/src/testing/testing.go:1629 +0x47 Previous write at 0x000001faa090 by goroutine 19: github.com/ethereum/go-ethereum/core.NewBlockChain() /home/runner/work/ronin/ronin/core/blockchain.go:248 +0xe4 github.com/ethereum/go-ethereum/eth/protocols/eth.newTestBackendWithGenerator() /home/runner/work/ronin/ronin/eth/protocols/eth/handler_test.go:74 +0x4c4 github.com/ethereum/go-ethereum/eth/protocols/eth.newTestBackend() /home/runner/work/ronin/ronin/eth/protocols/eth/handler_test.go:61 +0x64 github.com/ethereum/go-ethereum/eth/protocols/eth.testGetBlockHeaders() /home/runner/work/ronin/ronin/eth/protocols/eth/handler_test.go:126 +0x53 github.com/ethereum/go-ethereum/eth/protocols/eth.TestGetBlockHeaders66() /home/runner/work/ronin/ronin/eth/protocols/eth/handler_test.go:121 +0x33 testing.tRunner() /opt/hostedtoolcache/go/1.20.10/x64/src/testing/testing.go:1576 +0x216 testing.(*T).Run.func1() /opt/hostedtoolcache/go/1.20.10/x64/src/testing/testing.go:1629 +0x47 In the tests, the cacheConfig passed to NewBlockChain is nil so the defaultCacheConfig is used. However, the defaultCacheConfig.TriesInMemory is 0, so we fall into the path that set the cacheConfig.TriesInMemory to DefaultTriesInMemory which is 128. This set is made to the global defaultCacheConfig which causes data race with other accesses. This commit set the defaultCacheConfig.TriesInMemory to DefaultTriesInMemory. * core/vote: hold read lock while getting vote pool stats We get multiples data races while running vote pool tests WARNING: DATA RACE Read at 0x00c0002a3ce0 by goroutine 19: github.com/ethereum/go-ethereum/core/vote.(*VotePool).verifyStructureSizeOfVotePool() /home/runner/work/ronin/ronin/core/vote/vote_pool_test.go:77 +0xaa github.com/ethereum/go-ethereum/core/vote.testVotePool() /home/runner/work/ronin/ronin/core/vote/vote_pool_test.go:160 +0xe84 github.com/ethereum/go-ethereum/core/vote.TestValidVotePool() /home/runner/work/ronin/ronin/core/vote/vote_pool_test.go:85 +0x33 testing.tRunner() /opt/hostedtoolcache/go/1.20.10/x64/src/testing/testing.go:1576 +0x216 testing.(*T).Run.func1() /opt/hostedtoolcache/go/1.20.10/x64/src/testing/testing.go:1629 +0x47 Previous write at 0x00c0002a3ce0 by goroutine 23: runtime.mapassign() /opt/hostedtoolcache/go/1.20.10/x64/src/runtime/map.go:578 +0x0 github.com/ethereum/go-ethereum/core/vote.(*VotePool).putVote() /home/runner/work/ronin/ronin/core/vote/vote_pool.go:236 +0x346 github.com/ethereum/go-ethereum/core/vote.(*VotePool).putIntoVotePool() /home/runner/work/ronin/ronin/core/vote/vote_pool.go:212 +0x978 github.com/ethereum/go-ethereum/core/vote.(*VotePool).loop() /home/runner/work/ronin/ronin/core/vote/vote_pool.go:123 +0x504 github.com/ethereum/go-ethereum/core/vote.NewVotePool.func1() /home/runner/work/ronin/ronin/core/vote/vote_pool.go:98 +0x39 Currently, when getting vote pool's stats for checking in unit test we access these fields directly without holding lock. This commit creates some helper funtion to get these stats safely with read lock hold. * internal/cmdtest: increase the cli command test timeout The current timeout is 5s which is not enough when running tests with race detector. Increase that timeout to 15s. * p2p/discover: use synchronous version of addSeenNode We get this data race when running TestTable_BucketIPLimit Write at 0x00c00029c000 by goroutine 64: github.com/ethereum/go-ethereum/p2p/discover.(*Table).addSeenNodeSync() /home/runner/work/ronin/ronin/p2p/discover/table.go:570 +0x70a github.com/ethereum/go-ethereum/p2p/discover.(*Table).addSeenNode.func1() /home/runner/work/ronin/ronin/p2p/discover/table.go:527 +0x47 Previous read at 0x00c00029c000 by goroutine 57: github.com/ethereum/go-ethereum/p2p/discover.checkIPLimitInvariant() /home/runner/work/ronin/ronin/p2p/discover/table_test.go:187 +0x105 github.com/ethereum/go-ethereum/p2p/discover.TestTable_BucketIPLimit() /home/runner/work/ronin/ronin/p2p/discover/table_test.go:177 +0x2e4 testing.tRunner() /opt/hostedtoolcache/go/1.20.10/x64/src/testing/testing.go:1576 +0x216 testing.(*T).Run.func1() /opt/hostedtoolcache/go/1.20.10/x64/src/testing/testing.go:1629 +0x47 This commit changes TestTable_BucketIPLimit to use synchronous version of addSeenNode to avoid the data race.
1 parent 2d172f2 commit 782bf20

File tree

6 files changed

+64
-31
lines changed

6 files changed

+64
-31
lines changed

core/blockchain.go

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -151,6 +151,7 @@ var defaultCacheConfig = &CacheConfig{
151151
TrieTimeLimit: 5 * time.Minute,
152152
SnapshotLimit: 256,
153153
SnapshotWait: true,
154+
TriesInMemory: DefaultTriesInMemory,
154155
}
155156

156157
// BlockChain represents the canonical chain given a database with a genesis

core/blockchain_test.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4176,11 +4176,11 @@ func TestInsertChainWithSidecars(t *testing.T) {
41764176

41774177
// Wait for future block to be inserted
41784178
time.Sleep(15 * time.Second)
4179-
block = chain.CurrentBlock()
4179+
block := chain.CurrentBlock()
41804180
if block.Hash() != futureBlocks[0].Hash() {
41814181
t.Fatalf("Failed to insert future block, current: %d expected: %d", block.NumberU64(), futureBlocks[0].NumberU64())
41824182
}
4183-
savedSidecars = chain.GetBlobSidecarsByHash(block.Hash())
4183+
savedSidecars := chain.GetBlobSidecarsByHash(block.Hash())
41844184
if len(savedSidecars) != len(sidecars) {
41854185
t.Fatalf("Expect length of sidecar to be %d, got %d", len(sidecars), len(savedSidecars))
41864186
}

core/vote/vote_pool.go

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -432,6 +432,25 @@ func (pool *VotePool) basicVerify(vote *types.VoteEnvelope, headNumber uint64, m
432432
return true
433433
}
434434

435+
// stats returns the vote pool's
436+
// - number of current votes
437+
// - length of current vote queue
438+
// - number of future votes
439+
// - length of future vote queue
440+
func (pool *VotePool) stats() (int, int, int, int) {
441+
pool.mu.RLock()
442+
defer pool.mu.RUnlock()
443+
444+
return len(pool.curVotes), pool.curVotesPq.Len(), len(pool.futureVotes), pool.futureVotesPq.Len()
445+
}
446+
447+
func (pool *VotePool) getNumberOfFutureVoteByPeer(peer string) uint64 {
448+
pool.mu.RLock()
449+
defer pool.mu.RUnlock()
450+
451+
return pool.numFutureVotePerPeer[peer]
452+
}
453+
435454
func (pq votesPriorityQueue) Less(i, j int) bool {
436455
return pq[i].TargetNumber < pq[j].TargetNumber
437456
}

core/vote/vote_pool_test.go

Lines changed: 39 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -74,7 +74,8 @@ func (m *mockPOSA) IsFinalityVoterAt(chain consensus.ChainHeaderReader, header *
7474
func (pool *VotePool) verifyStructureSizeOfVotePool(curVotes, futureVotes, curVotesPq, futureVotesPq int) bool {
7575
for i := 0; i < timeThreshold; i++ {
7676
time.Sleep(1 * time.Second)
77-
if len(pool.curVotes) == curVotes && len(pool.futureVotes) == futureVotes && pool.curVotesPq.Len() == curVotesPq && pool.futureVotesPq.Len() == futureVotesPq {
77+
poolCurVotes, poolCurVotesPq, poolFutureVotes, poolFutureVotesPq := pool.stats()
78+
if poolCurVotes == curVotes && poolFutureVotes == futureVotes && poolCurVotesPq == curVotesPq && poolFutureVotesPq == futureVotesPq {
7879
return true
7980
}
8081
}
@@ -286,14 +287,15 @@ func testVotePool(t *testing.T, isValidRules bool) {
286287
}
287288
}
288289

290+
done := false
289291
for i := 0; i < timeThreshold; i++ {
290292
time.Sleep(1 * time.Second)
291-
_, ok := votePool.curVotes[futureBlockHash]
292-
if ok && len(votePool.curVotes[futureBlockHash].voteMessages) == 2 {
293+
if len(votePool.FetchVoteByBlockHash(futureBlockHash)) == 2 {
294+
done = true
293295
break
294296
}
295297
}
296-
if votePool.curVotes[futureBlockHash] == nil || len(votePool.curVotes[futureBlockHash].voteMessages) != 2 {
298+
if !done {
297299
t.Fatalf("transfer vote failed")
298300
}
299301

@@ -410,46 +412,55 @@ func TestVotePoolDosProtection(t *testing.T) {
410412
time.Sleep(100 * time.Millisecond)
411413
}
412414

413-
if len(*votePool.futureVotesPq) != maxFutureVotePerPeer {
414-
t.Fatalf("Future vote pool length, expect %d have %d", maxFutureVotePerPeer, len(*votePool.futureVotesPq))
415+
_, _, _, futureVoteQueueLength := votePool.stats()
416+
if futureVoteQueueLength != maxFutureVotePerPeer {
417+
t.Fatalf("Future vote pool length, expect %d have %d", maxFutureVotePerPeer, futureVoteQueueLength)
415418
}
416-
if votePool.numFutureVotePerPeer["AAAA"] != maxFutureVotePerPeer {
417-
t.Fatalf("Number of future vote per peer, expect %d have %d", maxFutureVotePerPeer, votePool.numFutureVotePerPeer["AAAA"])
419+
numFutureVotePerPeer := votePool.getNumberOfFutureVoteByPeer("AAAA")
420+
if numFutureVotePerPeer != maxFutureVotePerPeer {
421+
t.Fatalf("Number of future vote per peer, expect %d have %d", maxFutureVotePerPeer, numFutureVotePerPeer)
418422
}
419423

420424
// This vote is dropped due to DOS protection
421425
vote := generateVote(1, common.BigToHash(big.NewInt(int64(maxFutureVoteAmountPerBlock+1))), secretKey)
422426
votePool.PutVote("AAAA", vote)
423427
time.Sleep(100 * time.Millisecond)
424-
if len(*votePool.futureVotesPq) != maxFutureVotePerPeer {
425-
t.Fatalf("Future vote pool length, expect %d have %d", maxFutureVotePerPeer, len(*votePool.futureVotesPq))
428+
_, _, _, futureVoteQueueLength = votePool.stats()
429+
if futureVoteQueueLength != maxFutureVotePerPeer {
430+
t.Fatalf("Future vote pool length, expect %d have %d", maxFutureVotePerPeer, futureVoteQueueLength)
426431
}
427-
if votePool.numFutureVotePerPeer["AAAA"] != maxFutureVotePerPeer {
428-
t.Fatalf("Number of future vote per peer, expect %d have %d", maxFutureVotePerPeer, votePool.numFutureVotePerPeer["AAAA"])
432+
numFutureVotePerPeer = votePool.getNumberOfFutureVoteByPeer("AAAA")
433+
if numFutureVotePerPeer != maxFutureVotePerPeer {
434+
t.Fatalf("Number of future vote per peer, expect %d have %d", maxFutureVotePerPeer, numFutureVotePerPeer)
429435
}
430436

431437
// Vote from different peer must be accepted
432438
vote = generateVote(1, common.BigToHash(big.NewInt(int64(maxFutureVoteAmountPerBlock+2))), secretKey)
433439
votePool.PutVote("BBBB", vote)
434440
time.Sleep(100 * time.Millisecond)
435-
if len(*votePool.futureVotesPq) != maxFutureVotePerPeer+1 {
436-
t.Fatalf("Future vote pool length, expect %d have %d", maxFutureVotePerPeer, len(*votePool.futureVotesPq))
441+
_, _, _, futureVoteQueueLength = votePool.stats()
442+
if futureVoteQueueLength != maxFutureVotePerPeer+1 {
443+
t.Fatalf("Future vote pool length, expect %d have %d", maxFutureVotePerPeer, futureVoteQueueLength)
437444
}
438-
if votePool.numFutureVotePerPeer["AAAA"] != maxFutureVotePerPeer {
439-
t.Fatalf("Number of future vote per peer, expect %d have %d", maxFutureVotePerPeer, votePool.numFutureVotePerPeer["AAAA"])
445+
numFutureVotePerPeer = votePool.getNumberOfFutureVoteByPeer("AAAA")
446+
if numFutureVotePerPeer != maxFutureVotePerPeer {
447+
t.Fatalf("Number of future vote per peer, expect %d have %d", maxFutureVotePerPeer, numFutureVotePerPeer)
440448
}
441-
if votePool.numFutureVotePerPeer["BBBB"] != 1 {
442-
t.Fatalf("Number of future vote per peer, expect %d have %d", 1, votePool.numFutureVotePerPeer["BBBB"])
449+
numFutureVotePerPeer = votePool.getNumberOfFutureVoteByPeer("BBBB")
450+
if numFutureVotePerPeer != 1 {
451+
t.Fatalf("Number of future vote per peer, expect %d have %d", 1, numFutureVotePerPeer)
443452
}
444453

445454
// One vote is not queued twice
446455
votePool.PutVote("CCCC", vote)
447456
time.Sleep(100 * time.Millisecond)
448-
if len(*votePool.futureVotesPq) != maxFutureVotePerPeer+1 {
449-
t.Fatalf("Future vote pool length, expect %d have %d", maxFutureVotePerPeer, len(*votePool.futureVotesPq))
457+
_, _, _, futureVoteQueueLength = votePool.stats()
458+
if futureVoteQueueLength != maxFutureVotePerPeer+1 {
459+
t.Fatalf("Future vote pool length, expect %d have %d", maxFutureVotePerPeer, futureVoteQueueLength)
450460
}
451-
if votePool.numFutureVotePerPeer["CCCC"] != 0 {
452-
t.Fatalf("Number of future vote per peer, expect %d have %d", 0, votePool.numFutureVotePerPeer["CCCC"])
461+
numFutureVotePerPeer = votePool.getNumberOfFutureVoteByPeer("CCCC")
462+
if numFutureVotePerPeer != 0 {
463+
t.Fatalf("Number of future vote per peer, expect %d have %d", 0, numFutureVotePerPeer)
453464
}
454465

455466
if _, err := chain.InsertChain(bs[1:], nil); err != nil {
@@ -458,11 +469,13 @@ func TestVotePoolDosProtection(t *testing.T) {
458469
time.Sleep(100 * time.Millisecond)
459470
// Future vote must be transferred to current and failed the verification,
460471
// numFutureVotePerPeer decreases
461-
if len(*votePool.futureVotesPq) != 0 {
462-
t.Fatalf("Future vote pool length, expect %d have %d", 0, len(*votePool.futureVotesPq))
472+
_, _, _, futureVoteQueueLength = votePool.stats()
473+
if futureVoteQueueLength != 0 {
474+
t.Fatalf("Future vote pool length, expect %d have %d", 0, futureVoteQueueLength)
463475
}
464-
if votePool.numFutureVotePerPeer["AAAA"] != 0 {
465-
t.Fatalf("Number of future vote per peer, expect %d have %d", 0, votePool.numFutureVotePerPeer["AAAA"])
476+
numFutureVotePerPeer = votePool.getNumberOfFutureVoteByPeer("AAAA")
477+
if numFutureVotePerPeer != 0 {
478+
t.Fatalf("Number of future vote per peer, expect %d have %d", 0, numFutureVotePerPeer)
466479
}
467480
}
468481

internal/cmdtest/test_cmd.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -84,7 +84,7 @@ func (tt *TestCmd) Run(name string, args ...string) {
8484
// InputLine writes the given text to the child's stdin.
8585
// This method can also be called from an expect template, e.g.:
8686
//
87-
// geth.expect(`Passphrase: {{.InputLine "password"}}`)
87+
// geth.expect(`Passphrase: {{.InputLine "password"}}`)
8888
func (tt *TestCmd) InputLine(s string) string {
8989
io.WriteString(tt.stdin, s+"\n")
9090
return ""
@@ -238,7 +238,7 @@ func (tt *TestCmd) Kill() {
238238
}
239239

240240
func (tt *TestCmd) withKillTimeout(fn func()) {
241-
timeout := time.AfterFunc(5*time.Second, func() {
241+
timeout := time.AfterFunc(15*time.Second, func() {
242242
tt.Log("killing the child process (timeout)")
243243
tt.Kill()
244244
})

p2p/discover/table_test.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -169,7 +169,7 @@ func TestTable_BucketIPLimit(t *testing.T) {
169169
d := 3
170170
for i := 0; i < bucketIPLimit+1; i++ {
171171
n := nodeAtDistance(tab.self().ID(), d, net.IP{172, 0, 1, byte(i)})
172-
tab.addSeenNode(n)
172+
tab.addSeenNodeSync(n)
173173
}
174174
if tab.len() > bucketIPLimit {
175175
t.Errorf("too many nodes in table")

0 commit comments

Comments
 (0)