Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .avalanche-golangci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ linters:
- bodyclose
- copyloopvar
- depguard
# - errcheck
- errcheck
- errorlint
- forbidigo
- goconst
Expand Down
4 changes: 3 additions & 1 deletion core/fifo_cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,9 @@ func (f *BufferFIFOCache[K, V]) Put(key K, val V) {
f.l.Lock()
defer f.l.Unlock()

f.buffer.Insert(key) // Insert will remove the oldest [K] if we are at the [limit]
// Insert will remove the oldest [K] if we are at the [limit] -
// remove always returns nil, so it is safe to ignore this error.
_ = f.buffer.Insert(key)
f.m[key] = val
}

Expand Down
5 changes: 3 additions & 2 deletions core/state/trie_prefetcher_extra_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -178,8 +178,9 @@ func addKVs(
for i := 0; i < count/2; i++ {
key := make([]byte, 32)
value := make([]byte, 32)
rand.Read(key)
rand.Read(value)
// rand.Read never returns an error
_, _ = rand.Read(key)
_, _ = rand.Read(value)

statedb.SetState(address, common.BytesToHash(key), common.BytesToHash(value))
}
Expand Down
8 changes: 4 additions & 4 deletions core/state_manager_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -55,8 +55,8 @@ func TestCappedMemoryTrieWriter(t *testing.T) {
require.NoError(t, w.InsertTrie(block))
require.Zero(t, m.LastDereference, "should not have dereferenced block on insert")
require.Zero(t, m.LastCommit, "should not have committed block on insert")
require.NoError(t, w.AcceptTrie(block))

w.AcceptTrie(block)
if i <= tipBufferSize {
require.Zero(t, m.LastDereference, "should not have dereferenced block on accept")
} else {
Expand All @@ -70,7 +70,7 @@ func TestCappedMemoryTrieWriter(t *testing.T) {
m.LastCommit = common.Hash{}
}

w.RejectTrie(block)
require.NoError(t, w.RejectTrie(block))
require.Equal(t, block.Root(), m.LastDereference, "should have dereferenced block on reject")
require.Zero(t, m.LastCommit, "should not have committed block on reject")
m.LastDereference = common.Hash{}
Expand All @@ -94,12 +94,12 @@ func TestNoPruningTrieWriter(t *testing.T) {
require.Zero(t, m.LastDereference, "should not have dereferenced block on insert")
require.Zero(t, m.LastCommit, "should not have committed block on insert")

w.AcceptTrie(block)
require.NoError(t, w.AcceptTrie(block))
require.Zero(t, m.LastDereference, "should not have dereferenced block on accept")
require.Equal(t, block.Root(), m.LastCommit, "should have committed block on accept")
m.LastCommit = common.Hash{}

w.RejectTrie(block)
require.NoError(t, w.RejectTrie(block))
require.Equal(t, block.Root(), m.LastDereference, "should have dereferenced block on reject")
require.Zero(t, m.LastCommit, "should not have committed block on reject")
m.LastDereference = common.Hash{}
Expand Down
2 changes: 1 addition & 1 deletion plugin/evm/message/codec.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ func init() {
// See https://github.com/ava-labs/coreth/pull/999
c.SkipRegistrations(3)

Codec.RegisterCodec(Version, c)
errs.Add(Codec.RegisterCodec(Version, c))

if errs.Errored() {
panic(errs.Err)
Expand Down
27 changes: 23 additions & 4 deletions plugin/evm/syncervm_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ import (
"github.com/ava-labs/avalanchego/snow/engine/enginetest"
"github.com/ava-labs/avalanchego/snow/engine/snowman/block"
"github.com/ava-labs/avalanchego/upgrade/upgradetest"
"github.com/ava-labs/avalanchego/utils"
"github.com/ava-labs/avalanchego/utils/set"
"github.com/ava-labs/avalanchego/vms/evm/database"
"github.com/ava-labs/avalanchego/vms/evm/predicate"
Expand Down Expand Up @@ -104,7 +105,7 @@ func TestStateSyncToggleEnabledToDisabled(t *testing.T) {
require.NoError(t, syncerVM.AppRequestFailed(t.Context(), nodeID, requestID, commonEng.ErrTimeout))
require.NoError(t, syncerVM.Client.Shutdown())
} else {
syncerVM.AppResponse(t.Context(), nodeID, requestID, response)
require.NoError(t, syncerVM.AppResponse(t.Context(), nodeID, requestID, response))
}
},
expectedErr: context.Canceled,
Expand All @@ -118,13 +119,18 @@ func TestStateSyncToggleEnabledToDisabled(t *testing.T) {
test.responseIntercept = nil
test.expectedErr = nil

var atomicErr utils.Atomic[error]
syncDisabledVM := &VM{}
appSender := &enginetest.Sender{T: t}
appSender.SendAppGossipF = func(context.Context, commonEng.SendConfig, []byte) error { return nil }
appSender.SendAppRequestF = func(ctx context.Context, nodeSet set.Set[ids.NodeID], requestID uint32, request []byte) error {
nodeID, hasItem := nodeSet.Pop()
require.True(t, hasItem, "expected nodeSet to contain at least 1 nodeID")
go vmSetup.serverVM.AppRequest(ctx, nodeID, requestID, time.Now().Add(1*time.Second), request)
go func() {
if err := vmSetup.serverVM.AppRequest(ctx, nodeID, requestID, time.Now().Add(1*time.Second), request); err != nil {
atomicErr.Set(err)
}
}()
return nil
}
// Reset metrics to allow re-initialization
Expand Down Expand Up @@ -191,7 +197,11 @@ func TestStateSyncToggleEnabledToDisabled(t *testing.T) {
// override [serverVM]'s SendAppResponse function to trigger AppResponse on [syncerVM]
vmSetup.serverAppSender.SendAppResponseF = func(ctx context.Context, nodeID ids.NodeID, requestID uint32, response []byte) error {
if test.responseIntercept == nil {
go syncReEnabledVM.AppResponse(ctx, nodeID, requestID, response)
go func() {
if err := syncReEnabledVM.AppResponse(ctx, nodeID, requestID, response); err != nil {
atomicErr.Set(err)
}
}()
} else {
go test.responseIntercept(syncReEnabledVM, nodeID, requestID, response)
}
Expand All @@ -212,6 +222,7 @@ func TestStateSyncToggleEnabledToDisabled(t *testing.T) {

vmSetup.syncerVM = syncReEnabledVM
testSyncerVM(t, vmSetup, test)
require.NoError(t, atomicErr.Get())
}

func TestVMShutdownWhileSyncing(t *testing.T) {
Expand Down Expand Up @@ -311,15 +322,23 @@ func createSyncServerAndClientVMs(t *testing.T, test syncTest, numBlocks int) *s
require.True(enabled)

// override [serverVM]'s SendAppResponse function to trigger AppResponse on [syncerVM]
var atomicErr utils.Atomic[error]
serverVM.appSender.SendAppResponseF = func(ctx context.Context, nodeID ids.NodeID, requestID uint32, response []byte) error {
if test.responseIntercept == nil {
go syncerVM.vm.AppResponse(ctx, nodeID, requestID, response)
go func() {
if err := syncerVM.vm.AppResponse(ctx, nodeID, requestID, response); err != nil {
atomicErr.Set(err)
}
}()
} else {
go test.responseIntercept(syncerVM.vm, nodeID, requestID, response)
}

return nil
}
t.Cleanup(func() {
require.NoError(atomicErr.Get())
})

// connect peer to [syncerVM]
require.NoError(
Expand Down
13 changes: 9 additions & 4 deletions plugin/evm/vm.go
Original file line number Diff line number Diff line change
Expand Up @@ -497,7 +497,9 @@ func (vm *VM) Initialize(

// Add p2p warp message warpHandler
warpHandler := acp118.NewCachedHandler(meteredCache, vm.warpBackend, vm.ctx.WarpSigner)
vm.Network.AddHandler(p2p.SignatureRequestHandlerID, warpHandler)
if err = vm.Network.AddHandler(p2p.SignatureRequestHandlerID, warpHandler); err != nil {
return err
}

vm.stateSyncDone = make(chan struct{})

Expand Down Expand Up @@ -813,7 +815,9 @@ func (vm *VM) onNormalOperationsStarted() error {

// Initially sync the uptime tracker so that APIs expose recent data even if
// called immediately after bootstrapping.
vm.uptimeTracker.Sync(ctx)
if err := vm.uptimeTracker.Sync(ctx); err != nil {
return err
}

vm.shutdownWg.Add(1)
go func() {
Expand Down Expand Up @@ -986,8 +990,9 @@ func (vm *VM) Shutdown(context.Context) error {
for _, handler := range vm.rpcHandlers {
handler.Stop()
}
vm.eth.Stop()
log.Info("Ethereum backend stop completed")
if err := vm.eth.Stop(); err != nil {
log.Error("failed stopping eth", "err", err)
}
if vm.usingStandaloneDB {
if err := vm.db.Close(); err != nil {
log.Error("failed to close database: %w", err)
Expand Down
12 changes: 8 additions & 4 deletions plugin/evm/vm_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2719,7 +2719,9 @@ func TestStandaloneDB(t *testing.T) {
[]*commonEng.Fx{},
appSender,
)
defer vm.Shutdown(t.Context())
defer func() {
require.NoError(t, vm.Shutdown(t.Context()))
}()
require.NoError(t, err, "error initializing VM")
require.NoError(t, vm.SetState(t.Context(), snow.Bootstrapping))
require.NoError(t, vm.SetState(t.Context(), snow.NormalOp))
Expand Down Expand Up @@ -3166,7 +3168,7 @@ func TestWaitForEvent(t *testing.T) {
fork: &fork,
}).vm
testCase.testCase(t, tvm)
tvm.Shutdown(t.Context())
require.NoError(t, tvm.Shutdown(t.Context()))
})
}
}
Expand Down Expand Up @@ -3533,7 +3535,9 @@ func TestDelegatePrecompile_BehaviorAcrossUpgrades(t *testing.T) {
genesisJSON: string(genesisJSON),
fork: &tt.fork,
}).vm
defer vm.Shutdown(ctx)
defer func() {
require.NoError(t, vm.Shutdown(ctx))
}()

if tt.preDeployTime != 0 {
vm.clock.Set(time.Unix(tt.preDeployTime, 0))
Expand Down Expand Up @@ -3704,6 +3708,6 @@ func TestInspectDatabases(t *testing.T) {
db = memdb.New()
)

vm.initializeDBs(db)
require.NoError(t, vm.initializeDBs(db))
require.NoError(t, vm.inspectDatabases())
}
4 changes: 3 additions & 1 deletion plugin/evm/vm_uptime_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,9 @@ func TestUptimeTracker(t *testing.T) {
[]*commonEng.Fx{},
appSender,
))
defer vm.Shutdown(t.Context())
t.Cleanup(func() {
require.NoError(vm.Shutdown(t.Context()))
})

// Mock the clock to control time progression
clock := vm.clock
Expand Down
5 changes: 4 additions & 1 deletion plugin/runner/runner.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,5 +29,8 @@ func Run(versionStr string) {
fmt.Printf("failed to set fd limit correctly due to: %s", err)
os.Exit(1)
}
rpcchainvm.Serve(context.Background(), &evm.VM{})
if err := rpcchainvm.Serve(context.Background(), &evm.VM{}); err != nil {
fmt.Printf("failed to serve VM: %s", err)
os.Exit(1)
}
}
3 changes: 1 addition & 2 deletions precompile/contracts/rewardmanager/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ func (i *InitialRewardConfig) Verify() error {
}
}

func (i *InitialRewardConfig) Configure(state contract.StateDB) error {
func (i *InitialRewardConfig) Configure(state contract.StateDB) {
// enable allow fee recipients
if i.AllowFeeRecipients {
EnableAllowFeeRecipients(state)
Expand All @@ -50,7 +50,6 @@ func (i *InitialRewardConfig) Configure(state contract.StateDB) error {
// set reward address
StoreRewardAddress(state, i.RewardAddress)
}
return nil
}

// Config implements the StatefulPrecompileConfig interface while adding in the
Expand Down
4 changes: 2 additions & 2 deletions sync/statesync/sync_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ func testSync(t *testing.T, test syncTest) {
})
require.NoError(t, err, "failed to create state syncer")
// begin sync
s.Start(ctx)
require.NoError(t, s.Start(ctx))
waitFor(t, t.Context(), s.Wait, test.expectedError, testSyncTimeout)
if test.expectedError != nil {
return
Expand All @@ -98,7 +98,7 @@ func waitFor(t *testing.T, ctx context.Context, resultFunc func(context.Context)
if ctx.Err() != nil {
// print a stack trace to assist with debugging
var stackBuf bytes.Buffer
pprof.Lookup("goroutine").WriteTo(&stackBuf, 2)
require.NoErrorf(t, pprof.Lookup("goroutine").WriteTo(&stackBuf, 2), "error trying to print stack trace for timeout")
t.Log(stackBuf.String())
// fail the test
require.Fail(t, "unexpected timeout waiting for sync result")
Expand Down
4 changes: 3 additions & 1 deletion sync/statesync/trie_sync_tasks.go
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,9 @@ func (m *mainTrieTask) OnLeafs(db ethdb.KeyValueWriter, keys, vals [][]byte) err

// check if this account has storage root that we need to fetch
if acc.Root != (common.Hash{}) && acc.Root != types.EmptyRootHash {
m.sync.trieQueue.RegisterStorageTrie(acc.Root, accountHash)
if err := m.sync.trieQueue.RegisterStorageTrie(acc.Root, accountHash); err != nil {
return err
}
}

// check if this account has code and add it to codeHashes to fetch
Expand Down
2 changes: 1 addition & 1 deletion tests/utils/tmpnet.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ func NewTmpnetNodes(count int) []*tmpnet.Node {
nodes := make([]*tmpnet.Node, count)
for i := range nodes {
node := tmpnet.NewNode()
node.EnsureKeys()
_ = node.EnsureKeys() // guaranteed to be nil for new node
nodes[i] = node
}
return nodes
Expand Down
4 changes: 2 additions & 2 deletions tests/warp/warp_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -417,7 +417,7 @@ func (w *warpTest) aggregateSignaturesViaAPI() {
numSigners, err := parsedWarpMessage.Signature.NumSigners()
require.NoError(err)
require.Len(warpValidators.Validators, numSigners)
parsedWarpMessage.Signature.Verify(&parsedWarpMessage.UnsignedMessage, w.networkID, warpValidators, warp.WarpQuorumDenominator, warp.WarpQuorumDenominator)
err = parsedWarpMessage.Signature.Verify(&parsedWarpMessage.UnsignedMessage, w.networkID, warpValidators, warp.WarpQuorumDenominator, warp.WarpQuorumDenominator)
require.NoError(err)
w.addressedCallSignedMessage = parsedWarpMessage

Expand All @@ -429,7 +429,7 @@ func (w *warpTest) aggregateSignaturesViaAPI() {
numSigners, err = parsedWarpBlockMessage.Signature.NumSigners()
require.NoError(err)
require.Len(warpValidators.Validators, numSigners)
parsedWarpBlockMessage.Signature.Verify(&parsedWarpBlockMessage.UnsignedMessage, w.networkID, warpValidators, warp.WarpQuorumDenominator, warp.WarpQuorumDenominator)
err = parsedWarpBlockMessage.Signature.Verify(&parsedWarpBlockMessage.UnsignedMessage, w.networkID, warpValidators, warp.WarpQuorumDenominator, warp.WarpQuorumDenominator)
require.NoError(err)
w.blockPayloadSignedMessage = parsedWarpBlockMessage
}
Expand Down
6 changes: 5 additions & 1 deletion triedb/firewood/database.go
Original file line number Diff line number Diff line change
Expand Up @@ -556,7 +556,11 @@ func (db *Database) getProposalHash(parentRoot common.Hash, keys, values [][]byt
ffiHashTimer.Inc(time.Since(start).Milliseconds())

// We succesffuly created a proposal, so we must drop it after use.
defer p.Drop()
defer func() {
if err := p.Drop(); err != nil {
log.Error("firewood: error dropping proposal after hash computation", "parentRoot", parentRoot.Hex(), "error", err)
}
}()

root, err := p.Root()
if err != nil {
Expand Down
3 changes: 1 addition & 2 deletions warp/verifier_backend_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,8 +58,7 @@ func TestAddressedCallSignatures(t *testing.T) {
require.NoError(t, err)
signature, err := snowCtx.WarpSigner.Sign(msg)
require.NoError(t, err)

backend.AddMessage(msg)
require.NoError(t, backend.AddMessage(msg))
return msg.Bytes(), signature
},
verifyStats: func(t *testing.T, stats *verifierStats) {
Expand Down
Loading