diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index faf922df01..0dabaf4df5 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -10,6 +10,7 @@ core/ @karalabe @holiman @rjl493456442 eth/ @karalabe @holiman @rjl493456442 eth/catalyst/ @gballet eth/tracers/ @s1na +core/tracing/ @s1na graphql/ @s1na les/ @zsfelfoldi @rjl493456442 light/ @zsfelfoldi @rjl493456442 diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml index e47a61bbec..781ee06d49 100644 --- a/.github/workflows/go.yml +++ b/.github/workflows/go.yml @@ -12,9 +12,9 @@ jobs: if: false # not supported on OffchainLabs fork, ci.yml is used instead runs-on: self-hosted steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 - name: Set up Go - uses: actions/setup-go@v2 + uses: actions/setup-go@v5 with: go-version: 1.21.4 - name: Run tests diff --git a/.golangci.yml b/.golangci.yml index 0343c4b4eb..46844d1e90 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -6,8 +6,6 @@ run: # default is true. Enables skipping of directories: # vendor$, third_party$, testdata$, examples$, Godeps$, builtin$ skip-dirs-use-default: true - skip-files: - - core/genesis_alloc.go linters: disable-all: true @@ -26,6 +24,8 @@ linters: - exportloopref - whitespace + ### linters we tried and will not be using: + ### # - structcheck # lots of false positives # - errcheck #lot of false positives # - contextcheck @@ -40,6 +40,8 @@ linters-settings: simplify: true issues: + exclude-files: + - core/genesis_alloc.go exclude-rules: - path: crypto/bn256/cloudflare/optate.go linters: diff --git a/.travis.yml b/.travis.yml index 488ec1e7d2..2dc80f85ed 100644 --- a/.travis.yml +++ b/.travis.yml @@ -97,6 +97,7 @@ jobs: # These builders run the tests - stage: build + if: type = push os: linux arch: amd64 dist: noble @@ -105,6 +106,7 @@ jobs: - travis_wait 45 go run build/ci.go test $TEST_PACKAGES - stage: build + if: type = push os: linux dist: noble go: 1.21.x @@ -146,5 +148,7 @@ jobs: os: linux dist: noble go: 1.22.x + env: + - racetests script: - - travis_wait 50 go run build/ci.go test -race $TEST_PACKAGES + - travis_wait 60 go run build/ci.go test -race $TEST_PACKAGES diff --git a/Makefile b/Makefile index 278ae63120..857cb8c978 100644 --- a/Makefile +++ b/Makefile @@ -2,31 +2,35 @@ # with Go source code. If you know what GOPATH is then you probably # don't need to bother with make. -.PHONY: geth all test lint clean devtools help +.PHONY: geth all test lint fmt clean devtools help GOBIN = ./build/bin GO ?= latest GORUN = go run -#? geth: Build geth +#? geth: Build geth. geth: $(GORUN) build/ci.go install ./cmd/geth @echo "Done building." @echo "Run \"$(GOBIN)/geth\" to launch geth." -#? all: Build all packages and executables +#? all: Build all packages and executables. all: $(GORUN) build/ci.go install -#? test: Run the tests +#? test: Run the tests. test: all $(GORUN) build/ci.go test -#? lint: Run certain pre-selected linters +#? lint: Run certain pre-selected linters. lint: ## Run linters. $(GORUN) build/ci.go lint -#? clean: Clean go cache, built executables, and the auto generated folder +#? fmt: Ensure consistent code formatting. +fmt: + gofmt -s -w $(shell find . -name "*.go") + +#? clean: Clean go cache, built executables, and the auto generated folder. clean: go clean -cache rm -fr build/_workspace/pkg/ $(GOBIN)/* @@ -34,7 +38,7 @@ clean: # The devtools target installs tools required for 'go generate'. # You need to put $GOBIN (or $GOPATH/bin) in your PATH to use 'go generate'. -#? devtools: Install recommended developer tools +#? devtools: Install recommended developer tools. devtools: env GOBIN= go install golang.org/x/tools/cmd/stringer@latest env GOBIN= go install github.com/fjl/gencodec@latest @@ -45,5 +49,9 @@ devtools: #? help: Get more info on make commands. help: Makefile - @echo " Choose a command run in go-ethereum:" + @echo '' + @echo 'Usage:' + @echo ' make [target]' + @echo '' + @echo 'Targets:' @sed -n 's/^#?//p' $< | column -t -s ':' | sort | sed -e 's/^/ /' diff --git a/accounts/keystore/account_cache_test.go b/accounts/keystore/account_cache_test.go index 1a9f9a4714..6bc14f5bb6 100644 --- a/accounts/keystore/account_cache_test.go +++ b/accounts/keystore/account_cache_test.go @@ -326,6 +326,11 @@ func TestUpdatedKeyfileContents(t *testing.T) { // Create a temporary keystore to test with dir := filepath.Join(os.TempDir(), fmt.Sprintf("eth-keystore-updatedkeyfilecontents-test-%d-%d", os.Getpid(), rand.Int())) + + // Create the directory + os.MkdirAll(dir, 0700) + defer os.RemoveAll(dir) + ks := NewKeyStore(dir, LightScryptN, LightScryptP) list := ks.Accounts() @@ -335,9 +340,7 @@ func TestUpdatedKeyfileContents(t *testing.T) { if !waitWatcherStart(ks) { t.Fatal("keystore watcher didn't start in time") } - // Create the directory and copy a key file into it. - os.MkdirAll(dir, 0700) - defer os.RemoveAll(dir) + // Copy a key file into it file := filepath.Join(dir, "aaa") // Place one of our testfiles in there diff --git a/arbitrum/recordingdb.go b/arbitrum/recordingdb.go index f7fd017520..c7a3f34727 100644 --- a/arbitrum/recordingdb.go +++ b/arbitrum/recordingdb.go @@ -32,17 +32,19 @@ type RecordingKV struct { inner *triedb.Database diskDb ethdb.KeyValueStore readDbEntries map[common.Hash][]byte + mutex sync.Mutex enableBypass bool } func newRecordingKV(inner *triedb.Database, diskDb ethdb.KeyValueStore) *RecordingKV { - return &RecordingKV{inner, diskDb, make(map[common.Hash][]byte), false} + return &RecordingKV{inner, diskDb, make(map[common.Hash][]byte), sync.Mutex{}, false} } func (db *RecordingKV) Has(key []byte) (bool, error) { return false, errors.New("recording KV doesn't support Has") } +// Get may be called concurrently with other Get calls func (db *RecordingKV) Get(key []byte) ([]byte, error) { var hash common.Hash var res []byte @@ -66,6 +68,8 @@ func (db *RecordingKV) Get(key []byte) ([]byte, error) { if crypto.Keccak256Hash(res) != hash { return nil, fmt.Errorf("recording KV attempted to access non-hash key %v", hash) } + db.mutex.Lock() + defer db.mutex.Unlock() db.readDbEntries[hash] = res return res, nil } diff --git a/beacon/engine/types.go b/beacon/engine/types.go index a73691ca05..1dfcf5b71a 100644 --- a/beacon/engine/types.go +++ b/beacon/engine/types.go @@ -209,7 +209,7 @@ func ExecutableDataToBlock(params ExecutableData, versionedHashes []common.Hash, if params.BaseFeePerGas != nil && (params.BaseFeePerGas.Sign() == -1 || params.BaseFeePerGas.BitLen() > 256) { return nil, fmt.Errorf("invalid baseFeePerGas: %v", params.BaseFeePerGas) } - var blobHashes []common.Hash + var blobHashes = make([]common.Hash, 0, len(txs)) for _, tx := range txs { blobHashes = append(blobHashes, tx.BlobHashes()...) } diff --git a/beacon/light/sync/head_sync_test.go b/beacon/light/sync/head_sync_test.go index cd7dacf7fe..d095d6a446 100644 --- a/beacon/light/sync/head_sync_test.go +++ b/beacon/light/sync/head_sync_test.go @@ -91,7 +91,7 @@ func TestValidatedHead(t *testing.T) { ts.ServerEvent(EvNewOptimisticUpdate, testServer3, testOptUpdate4) // finality should be requested from both servers ts.Run(4, testServer1, ReqFinality{}, testServer3, ReqFinality{}) - // future period annonced heads should be queued + // future period announced heads should be queued ht.ExpValidated(t, 4, nil) chain.SetNextSyncPeriod(2) diff --git a/beacon/types/exec_payload.go b/beacon/types/exec_payload.go index 4448f854ad..b159687dfc 100644 --- a/beacon/types/exec_payload.go +++ b/beacon/types/exec_payload.go @@ -65,7 +65,7 @@ func convertPayload[T payloadType](payload T, parentRoot *zrntcommon.Root) (*typ block := types.NewBlockWithHeader(&header).WithBody(types.Body{Transactions: transactions, Withdrawals: withdrawals}) if hash := block.Hash(); hash != expectedHash { - return nil, fmt.Errorf("Sanity check failed, payload hash does not match (expected %x, got %x)", expectedHash, hash) + return nil, fmt.Errorf("sanity check failed, payload hash does not match (expected %x, got %x)", expectedHash, hash) } return block, nil } diff --git a/build/checksums.txt b/build/checksums.txt index da2988452a..94cac9738c 100644 --- a/build/checksums.txt +++ b/build/checksums.txt @@ -48,36 +48,37 @@ cab2af6951a6e2115824263f6df13ff069c47270f5788714fa1d776f7f60cb39 go1.22.3.windo 40b37f4b068fc759f3a0dd61176a0f7570a4ba48bed8561c31d3967a3583981a go1.22.3.windows-arm.zip 59b76ee22b9b1c3afbf7f50e3cb4edb954d6c0d25e5e029ab5483a6804d61e71 go1.22.3.windows-arm64.zip -# version:golangci 1.55.2 +# version:golangci 1.59.0 # https://github.com/golangci/golangci-lint/releases/ -# https://github.com/golangci/golangci-lint/releases/download/v1.55.2/ -632e96e6d5294fbbe7b2c410a49c8fa01c60712a0af85a567de85bcc1623ea21 golangci-lint-1.55.2-darwin-amd64.tar.gz -234463f059249f82045824afdcdd5db5682d0593052f58f6a3039a0a1c3899f6 golangci-lint-1.55.2-darwin-arm64.tar.gz -2bdd105e2d4e003a9058c33a22bb191a1e0f30fa0790acca0d8fbffac1d6247c golangci-lint-1.55.2-freebsd-386.tar.gz -e75056e8b082386676ce23eba455cf893931a792c0d87e1e3743c0aec33c7fb5 golangci-lint-1.55.2-freebsd-amd64.tar.gz -5789b933facaf6136bd23f1d50add67b79bbcf8dfdfc9069a37f729395940a66 golangci-lint-1.55.2-freebsd-armv6.tar.gz -7f21ab1008d05f32c954f99470fc86a83a059e530fe2add1d0b7d8ed4d8992a7 golangci-lint-1.55.2-freebsd-armv7.tar.gz -33ab06139b9219a28251f10821da94423db30285cc2af97494cbb2a281927de9 golangci-lint-1.55.2-illumos-amd64.tar.gz -57ce6f8ce3ad6ee45d7cc3d9a047545a851c2547637834a3fcb086c7b40b1e6b golangci-lint-1.55.2-linux-386.tar.gz -ca21c961a33be3bc15e4292dc40c98c8dcc5463a7b6768a3afc123761630c09c golangci-lint-1.55.2-linux-amd64.tar.gz -8eb0cee9b1dbf0eaa49871798c7f8a5b35f2960c52d776a5f31eb7d886b92746 golangci-lint-1.55.2-linux-arm64.tar.gz -3195f3e0f37d353fd5bd415cabcd4e263f5c29d3d0ffb176c26ff3d2c75eb3bb golangci-lint-1.55.2-linux-armv6.tar.gz -c823ee36eb1a719e171de1f2f5ca3068033dce8d9817232fd10ed71fd6650406 golangci-lint-1.55.2-linux-armv7.tar.gz -758a5d2a356dc494bd13ed4c0d4bf5a54a4dc91267ea5ecdd87b86c7ca0624e7 golangci-lint-1.55.2-linux-loong64.tar.gz -2c7b9abdce7cae802a67d583cd7c6dca520bff6d0e17c8535a918e2f2b437aa0 golangci-lint-1.55.2-linux-mips64.tar.gz -024e0a15b85352cc27271285526e16a4ab66d3e67afbbe446c9808c06cb8dbed golangci-lint-1.55.2-linux-mips64le.tar.gz -6b00f89ba5506c1de1efdd9fa17c54093013a294fefd8b9b31534db626a672ee golangci-lint-1.55.2-linux-ppc64le.tar.gz -0faa0d047d9bf7b703ed3ea65b6117043c93504f9ca1de25ae929d3901c73d4a golangci-lint-1.55.2-linux-riscv64.tar.gz -30dec9b22e7d5bb4e9d5ccea96da20f71cd7db3c8cf30b8ddc7cb9174c4d742a golangci-lint-1.55.2-linux-s390x.tar.gz -5a0ede48f79ad707902fdb29be8cd2abd8302dc122b65ebae3fdfc86751c7698 golangci-lint-1.55.2-netbsd-386.tar.gz -95af20a2e617126dd5b08122ece7819101070e1582a961067ce8c41172f901ad golangci-lint-1.55.2-netbsd-amd64.tar.gz -94fb7dacb7527847cc95d7120904e19a2a0a81a0d50d61766c9e0251da72ab9d golangci-lint-1.55.2-netbsd-armv6.tar.gz -ca906bce5fee9619400e4a321c56476fe4a4efb6ac4fc989d340eb5563348873 golangci-lint-1.55.2-netbsd-armv7.tar.gz -45b442f69fc8915c4500201c0247b7f3f69544dbc9165403a61f9095f2c57355 golangci-lint-1.55.2-windows-386.zip -f57d434d231d43417dfa631587522f8c1991220b43c8ffadb9c7bd279508bf81 golangci-lint-1.55.2-windows-amd64.zip -fd7dc8f4c6829ee6fafb252a4d81d2155cd35da7833665cbb25d53ce7cecd990 golangci-lint-1.55.2-windows-arm64.zip -1892c3c24f9e7ef44b02f6750c703864b6dc350129f3ec39510300007b2376f1 golangci-lint-1.55.2-windows-armv6.zip -a5e68ae73d38748b5269fad36ac7575e3c162a5dc63ef58abdea03cc5da4522a golangci-lint-1.55.2-windows-armv7.zip +# https://github.com/golangci/golangci-lint/releases/download/v1.59.0/ +418acf7e255ddc0783e97129c9b03d9311b77826a5311d425a01c708a86417e7 golangci-lint-1.59.0-darwin-amd64.tar.gz +5f6a1d95a6dd69f6e328eb56dd311a38e04cfab79a1305fbf4957f4e203f47b6 golangci-lint-1.59.0-darwin-arm64.tar.gz +8899bf589185d49f747f3e5db9f0bde8a47245a100c64a3dd4d65e8e92cfc4f2 golangci-lint-1.59.0-freebsd-386.tar.gz +658212f138d9df2ac89427e22115af34bf387c0871d70f2a25101718946a014f golangci-lint-1.59.0-freebsd-amd64.tar.gz +4c6395ea40f314d3b6fa17d8997baab93464d5d1deeaab513155e625473bd03a golangci-lint-1.59.0-freebsd-armv6.tar.gz +ff37da4fbaacdb6bbae70fdbdbb1ba932a859956f788c82822fa06bef5b7c6b3 golangci-lint-1.59.0-freebsd-armv7.tar.gz +439739469ed2bda182b1ec276d40c40e02f195537f78e3672996741ad223d6b6 golangci-lint-1.59.0-illumos-amd64.tar.gz +940801d46790e40d0a097d8fee34e2606f0ef148cd039654029b0b8750a15ed6 golangci-lint-1.59.0-linux-386.tar.gz +3b14a439f33c4fff83dbe0349950d984042b9a1feb6c62f82787b598fc3ab5f4 golangci-lint-1.59.0-linux-amd64.tar.gz +c57e6c0b0fa03089a2611dceddd5bc5d206716cccdff8b149da8baac598719a1 golangci-lint-1.59.0-linux-arm64.tar.gz +93149e2d3b25ac754df9a23172403d8aa6d021a7e0d9c090a12f51897f68c9a0 golangci-lint-1.59.0-linux-armv6.tar.gz +d10ac38239d9efee3ee87b55c96cdf3fa09e1a525babe3ffdaaf65ccc48cf3dc golangci-lint-1.59.0-linux-armv7.tar.gz +047338114b4f0d5f08f0fb9a397b03cc171916ed0960be7dfb355c2320cd5e9c golangci-lint-1.59.0-linux-loong64.tar.gz +5632df0f7f8fc03a80a266130faef0b5902d280cf60621f1b2bdc1aef6d97ee9 golangci-lint-1.59.0-linux-mips64.tar.gz +71dd638c82fa4439171e7126d2c7a32b5d103bfdef282cea40c83632cb3d1f4b golangci-lint-1.59.0-linux-mips64le.tar.gz +6cf9ea0d34e91669948483f9ae7f07da319a879344373a1981099fbd890cde00 golangci-lint-1.59.0-linux-ppc64le.tar.gz +af0205fa6fbab197cee613c359947711231739095d21b5c837086233b36ad971 golangci-lint-1.59.0-linux-riscv64.tar.gz +a9d2fb93f3c688ebccef94f5dc96c0b07c4d20bf6556cddebd8442159b0c80f6 golangci-lint-1.59.0-linux-s390x.tar.gz +68ab4c57a847b8ace9679887f2f8b2b6760e57ee29dcde8c3f40dd8bb2654fa2 golangci-lint-1.59.0-netbsd-386.tar.gz +d277b8b435c19406d00de4d509eadf5a024a5782878332e9a1b7c02bb76e87a7 golangci-lint-1.59.0-netbsd-amd64.tar.gz +83211656be8dcfa1545af4f92894409f412d1f37566798cb9460a526593ad62c golangci-lint-1.59.0-netbsd-arm64.tar.gz +6c6866d28bf79fa9817a0f7d2b050890ed109cae80bdb4dfa39536a7226da237 golangci-lint-1.59.0-netbsd-armv6.tar.gz +11587566363bd03ca586b7df9776ccaed569fcd1f3489930ac02f9375b307503 golangci-lint-1.59.0-netbsd-armv7.tar.gz +466181a8967bafa495e41494f93a0bec829c2cf715de874583b0460b3b8ae2b8 golangci-lint-1.59.0-windows-386.zip +3317d8a87a99a49a0a1321d295c010790e6dbf43ee96b318f4b8bb23eae7a565 golangci-lint-1.59.0-windows-amd64.zip +b3af955c7fceac8220a36fc799e1b3f19d3b247d32f422caac5f9845df8f7316 golangci-lint-1.59.0-windows-arm64.zip +6f083c7d0c764e5a0e5bde46ee3e91ae357d80c194190fe1d9754392e9064c7e golangci-lint-1.59.0-windows-armv6.zip +3709b4dd425deadab27748778d08e03c0f804d7748f7dd5b6bb488d98aa031c7 golangci-lint-1.59.0-windows-armv7.zip # This is the builder on PPA that will build Go itself (inception-y), don't modify! # diff --git a/cmd/clef/main.go b/cmd/clef/main.go index f9b00e4a12..88d4c99e78 100644 --- a/cmd/clef/main.go +++ b/cmd/clef/main.go @@ -552,7 +552,7 @@ func listWallets(c *cli.Context) error { // accountImport imports a raw hexadecimal private key via CLI. func accountImport(c *cli.Context) error { if c.Args().Len() != 1 { - return errors.New(" must be given as first argument.") + return errors.New(" must be given as first argument") } internalApi, ui, err := initInternalApi(c) if err != nil { diff --git a/cmd/devp2p/discv4cmd.go b/cmd/devp2p/discv4cmd.go index 45bcdcd367..3b5400ca3a 100644 --- a/cmd/devp2p/discv4cmd.go +++ b/cmd/devp2p/discv4cmd.go @@ -20,6 +20,7 @@ import ( "errors" "fmt" "net" + "net/http" "strconv" "strings" "time" @@ -28,9 +29,11 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/internal/flags" + "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/p2p/discover" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rpc" "github.com/urfave/cli/v2" ) @@ -45,6 +48,7 @@ var ( discv4ResolveJSONCommand, discv4CrawlCommand, discv4TestCommand, + discv4ListenCommand, }, } discv4PingCommand = &cli.Command{ @@ -75,6 +79,14 @@ var ( Flags: discoveryNodeFlags, ArgsUsage: "", } + discv4ListenCommand = &cli.Command{ + Name: "listen", + Usage: "Runs a discovery node", + Action: discv4Listen, + Flags: flags.Merge(discoveryNodeFlags, []cli.Flag{ + httpAddrFlag, + }), + } discv4CrawlCommand = &cli.Command{ Name: "crawl", Usage: "Updates a nodes.json file with random nodes found in the DHT", @@ -131,6 +143,10 @@ var ( Usage: "Enode of the remote node under test", EnvVars: []string{"REMOTE_ENODE"}, } + httpAddrFlag = &cli.StringFlag{ + Name: "rpc", + Usage: "HTTP server listening address", + } ) var discoveryNodeFlags = []cli.Flag{ @@ -154,6 +170,27 @@ func discv4Ping(ctx *cli.Context) error { return nil } +func discv4Listen(ctx *cli.Context) error { + disc, _ := startV4(ctx) + defer disc.Close() + + fmt.Println(disc.Self()) + + httpAddr := ctx.String(httpAddrFlag.Name) + if httpAddr == "" { + // Non-HTTP mode. + select {} + } + + api := &discv4API{disc} + log.Info("Starting RPC API server", "addr", httpAddr) + srv := rpc.NewServer() + srv.RegisterName("discv4", api) + http.DefaultServeMux.Handle("/", srv) + httpsrv := http.Server{Addr: httpAddr, Handler: http.DefaultServeMux} + return httpsrv.ListenAndServe() +} + func discv4RequestRecord(ctx *cli.Context) error { n := getNodeArg(ctx) disc, _ := startV4(ctx) @@ -362,3 +399,23 @@ func parseBootnodes(ctx *cli.Context) ([]*enode.Node, error) { } return nodes, nil } + +type discv4API struct { + host *discover.UDPv4 +} + +func (api *discv4API) LookupRandom(n int) (ns []*enode.Node) { + it := api.host.RandomNodes() + for len(ns) < n && it.Next() { + ns = append(ns, it.Node()) + } + return ns +} + +func (api *discv4API) Buckets() [][]discover.BucketNode { + return api.host.TableBuckets() +} + +func (api *discv4API) Self() *enode.Node { + return api.host.Self() +} diff --git a/cmd/devp2p/internal/v4test/framework.go b/cmd/devp2p/internal/v4test/framework.go index 9286594181..df1f1f8aba 100644 --- a/cmd/devp2p/internal/v4test/framework.go +++ b/cmd/devp2p/internal/v4test/framework.go @@ -62,7 +62,7 @@ func newTestEnv(remote string, listen1, listen2 string) *testenv { if tcpPort = node.TCP(); tcpPort == 0 { tcpPort = 30303 } - if udpPort = node.TCP(); udpPort == 0 { + if udpPort = node.UDP(); udpPort == 0 { udpPort = 30303 } node = enode.NewV4(node.Pubkey(), ip, tcpPort, udpPort) @@ -110,7 +110,7 @@ func (te *testenv) localEndpoint(c net.PacketConn) v4wire.Endpoint { } func (te *testenv) remoteEndpoint() v4wire.Endpoint { - return v4wire.NewEndpoint(te.remoteAddr, 0) + return v4wire.NewEndpoint(te.remoteAddr.AddrPort(), 0) } func contains(ns []v4wire.Node, key v4wire.Pubkey) bool { diff --git a/cmd/evm/internal/t8ntool/transition.go b/cmd/evm/internal/t8ntool/transition.go index a46f8a0667..baf97f248b 100644 --- a/cmd/evm/internal/t8ntool/transition.go +++ b/cmd/evm/internal/t8ntool/transition.go @@ -217,7 +217,7 @@ func applyLondonChecks(env *stEnv, chainConfig *params.ChainConfig) error { return nil } if env.ParentBaseFee == nil || env.Number == 0 { - return NewError(ErrorConfig, errors.New("EIP-1559 config but missing 'currentBaseFee' in env section")) + return NewError(ErrorConfig, errors.New("EIP-1559 config but missing 'parentBaseFee' in env section")) } env.BaseFee = eip1559.CalcBaseFee(chainConfig, &types.Header{ Number: new(big.Int).SetUint64(env.Number - 1), diff --git a/cmd/evm/t8n_test.go b/cmd/evm/t8n_test.go index 5a74491c3b..76ebc420ec 100644 --- a/cmd/evm/t8n_test.go +++ b/cmd/evm/t8n_test.go @@ -234,7 +234,7 @@ func TestT8n(t *testing.T) { { // Test post-merge transition base: "./testdata/24", input: t8nInput{ - "alloc.json", "txs.json", "env.json", "Merge", "", + "alloc.json", "txs.json", "env.json", "Paris", "", }, output: t8nOutput{alloc: true, result: true}, expOut: "exp.json", @@ -242,7 +242,7 @@ func TestT8n(t *testing.T) { { // Test post-merge transition where input is missing random base: "./testdata/24", input: t8nInput{ - "alloc.json", "txs.json", "env-missingrandom.json", "Merge", "", + "alloc.json", "txs.json", "env-missingrandom.json", "Paris", "", }, output: t8nOutput{alloc: false, result: false}, expExitCode: 3, @@ -250,7 +250,7 @@ func TestT8n(t *testing.T) { { // Test base fee calculation base: "./testdata/25", input: t8nInput{ - "alloc.json", "txs.json", "env.json", "Merge", "", + "alloc.json", "txs.json", "env.json", "Paris", "", }, output: t8nOutput{alloc: true, result: true}, expOut: "exp.json", @@ -378,7 +378,7 @@ func TestT8nTracing(t *testing.T) { { base: "./testdata/32", input: t8nInput{ - "alloc.json", "txs.json", "env.json", "Merge", "", + "alloc.json", "txs.json", "env.json", "Paris", "", }, extraArgs: []string{"--trace", "--trace.callframes"}, expectedTraces: []string{"trace-0-0x47806361c0fa084be3caa18afe8c48156747c01dbdfc1ee11b5aecdbe4fcf23e.jsonl"}, diff --git a/cmd/geth/consolecmd_test.go b/cmd/geth/consolecmd_test.go index 091c37e457..6e95d7522e 100644 --- a/cmd/geth/consolecmd_test.go +++ b/cmd/geth/consolecmd_test.go @@ -105,17 +105,17 @@ func TestAttachWelcome(t *testing.T) { "--http", "--http.port", httpPort, "--ws", "--ws.port", wsPort) t.Run("ipc", func(t *testing.T) { - waitForEndpoint(t, ipc, 3*time.Second) + waitForEndpoint(t, ipc, 4*time.Second) testAttachWelcome(t, geth, "ipc:"+ipc, ipcAPIs) }) t.Run("http", func(t *testing.T) { endpoint := "http://127.0.0.1:" + httpPort - waitForEndpoint(t, endpoint, 3*time.Second) + waitForEndpoint(t, endpoint, 4*time.Second) testAttachWelcome(t, geth, endpoint, httpAPIs) }) t.Run("ws", func(t *testing.T) { endpoint := "ws://127.0.0.1:" + wsPort - waitForEndpoint(t, endpoint, 3*time.Second) + waitForEndpoint(t, endpoint, 4*time.Second) testAttachWelcome(t, geth, endpoint, httpAPIs) }) geth.Kill() diff --git a/cmd/geth/snapshot.go b/cmd/geth/snapshot.go index facfbff537..afdd13ea27 100644 --- a/cmd/geth/snapshot.go +++ b/cmd/geth/snapshot.go @@ -91,7 +91,7 @@ data, and verifies that all snapshot storage data has a corresponding account. }, { Name: "inspect-account", - Usage: "Check all snapshot layers for the a specific account", + Usage: "Check all snapshot layers for the specific account", ArgsUsage: "
", Action: checkAccount, Flags: flags.Merge(utils.NetworkFlags, utils.DatabaseFlags), diff --git a/cmd/geth/testdata/vcheck/vulnerabilities.json b/cmd/geth/testdata/vcheck/vulnerabilities.json index bee0e66dd8..31a34de6be 100644 --- a/cmd/geth/testdata/vcheck/vulnerabilities.json +++ b/cmd/geth/testdata/vcheck/vulnerabilities.json @@ -166,5 +166,37 @@ "severity": "Low", "CVE": "CVE-2022-29177", "check": "(Geth\\/v1\\.10\\.(0|1|2|3|4|5|6|7|8|9|10|11|12|13|14|15|16)-.*)$" + }, + { + "name": "DoS via malicious p2p message", + "uid": "GETH-2023-01", + "summary": "A vulnerable node can be made to consume unbounded amounts of memory when handling specially crafted p2p messages sent from an attacker node.", + "description": "The p2p handler spawned a new goroutine to respond to ping requests. By flooding a node with ping requests, an unbounded number of goroutines can be created, leading to resource exhaustion and potentially crash due to OOM.", + "links": [ + "https://github.com/ethereum/go-ethereum/security/advisories/GHSA-ppjg-v974-84cm", + "https://geth.ethereum.org/docs/vulnerabilities/vulnerabilities" + ], + "introduced": "v1.10.0", + "fixed": "v1.12.1", + "published": "2023-09-06", + "severity": "High", + "CVE": "CVE-2023-40591", + "check": "(Geth\\/v1\\.(10|11)\\..*)|(Geth\\/v1\\.12\\.0-.*)$" + }, + { + "name": "DoS via malicious p2p message", + "uid": "GETH-2024-01", + "summary": "A vulnerable node can be made to consume very large amounts of memory when handling specially crafted p2p messages sent from an attacker node.", + "description": "A vulnerable node can be made to consume very large amounts of memory when handling specially crafted p2p messages sent from an attacker node. Full details will be available at the Github security [advisory](https://github.com/ethereum/go-ethereum/security/advisories/GHSA-4xc9-8hmq-j652)", + "links": [ + "https://github.com/ethereum/go-ethereum/security/advisories/GHSA-4xc9-8hmq-j652", + "https://geth.ethereum.org/docs/vulnerabilities/vulnerabilities" + ], + "introduced": "v1.10.0", + "fixed": "v1.13.15", + "published": "2024-05-06", + "severity": "High", + "CVE": "CVE-2024-32972", + "check": "(Geth\\/v1\\.(10|11|12)\\..*)|(Geth\\/v1\\.13\\.\\d-.*)|(Geth\\/v1\\.13\\.1(0|1|2|3|4)-.*)$" } ] diff --git a/cmd/geth/verkle.go b/cmd/geth/verkle.go index ff3931356e..9eb37fb5a8 100644 --- a/cmd/geth/verkle.go +++ b/cmd/geth/verkle.go @@ -28,7 +28,7 @@ import ( "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/internal/flags" "github.com/ethereum/go-ethereum/log" - "github.com/gballet/go-verkle" + "github.com/ethereum/go-verkle" "github.com/urfave/cli/v2" ) diff --git a/common/math/big_test.go b/common/math/big_test.go index 803b5e1cc6..ee8f09e7b4 100644 --- a/common/math/big_test.go +++ b/common/math/big_test.go @@ -180,9 +180,9 @@ func BenchmarkByteAtOld(b *testing.B) { func TestReadBits(t *testing.T) { check := func(input string) { want, _ := hex.DecodeString(input) - int, _ := new(big.Int).SetString(input, 16) + n, _ := new(big.Int).SetString(input, 16) buf := make([]byte, len(want)) - ReadBits(int, buf) + ReadBits(n, buf) if !bytes.Equal(buf, want) { t.Errorf("have: %x\nwant: %x", buf, want) } diff --git a/common/math/integer.go b/common/math/integer.go index da01c0a08e..080fba8fea 100644 --- a/common/math/integer.go +++ b/common/math/integer.go @@ -54,11 +54,11 @@ func (i *HexOrDecimal64) UnmarshalJSON(input []byte) error { // UnmarshalText implements encoding.TextUnmarshaler. func (i *HexOrDecimal64) UnmarshalText(input []byte) error { - int, ok := ParseUint64(string(input)) + n, ok := ParseUint64(string(input)) if !ok { return fmt.Errorf("invalid hex or decimal integer %q", input) } - *i = HexOrDecimal64(int) + *i = HexOrDecimal64(n) return nil } diff --git a/core/blockchain.go b/core/blockchain.go index 1524ace48b..45f248dd22 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -1925,8 +1925,12 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error) } statedb.SetLogger(bc.logger) - // Enable prefetching to pull in trie node paths while processing transactions - statedb.StartPrefetcher("chain") + // If we are past Byzantium, enable prefetching to pull in trie node paths + // while processing transactions. Before Byzantium the prefetcher is mostly + // useless due to the intermediate root hashing after each transaction. + if bc.chainConfig.IsByzantium(block.Number()) { + statedb.StartPrefetcher("chain") + } activeState = statedb // If we have a followup block, run that against the current state to pre-cache diff --git a/core/bloombits/scheduler.go b/core/bloombits/scheduler.go index 6449c7465a..a523bc55ab 100644 --- a/core/bloombits/scheduler.go +++ b/core/bloombits/scheduler.go @@ -23,7 +23,7 @@ import ( // request represents a bloom retrieval task to prioritize and pull from the local // database or remotely from the network. type request struct { - section uint64 // Section index to retrieve the a bit-vector from + section uint64 // Section index to retrieve the bit-vector from bit uint // Bit index within the section to retrieve the vector of } diff --git a/core/chain_indexer_test.go b/core/chain_indexer_test.go index f099609015..bf3bde756c 100644 --- a/core/chain_indexer_test.go +++ b/core/chain_indexer_test.go @@ -228,7 +228,7 @@ func (b *testChainIndexBackend) Process(ctx context.Context, header *types.Heade b.t.Error("Unexpected call to Process") // Can't use Fatal since this is not the test's goroutine. // Returning error stops the chainIndexer's updateLoop - return errors.New("Unexpected call to Process") + return errors.New("unexpected call to Process") case b.processCh <- header.Number.Uint64(): } return nil diff --git a/core/chain_makers.go b/core/chain_makers.go index 841c5ef802..6df74753f5 100644 --- a/core/chain_makers.go +++ b/core/chain_makers.go @@ -32,7 +32,7 @@ import ( "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/triedb" - "github.com/gballet/go-verkle" + "github.com/ethereum/go-verkle" "github.com/holiman/uint256" ) diff --git a/core/chain_makers_test.go b/core/chain_makers_test.go index cb637ae789..e1c54d1e64 100644 --- a/core/chain_makers_test.go +++ b/core/chain_makers_test.go @@ -43,12 +43,11 @@ func TestGeneratePOSChain(t *testing.T) { bb = common.Address{0xbb} funds = big.NewInt(0).Mul(big.NewInt(1337), big.NewInt(params.Ether)) config = *params.AllEthashProtocolChanges - asm4788 = common.Hex2Bytes("3373fffffffffffffffffffffffffffffffffffffffe14604d57602036146024575f5ffd5b5f35801560495762001fff810690815414603c575f5ffd5b62001fff01545f5260205ff35b5f5ffd5b62001fff42064281555f359062001fff015500") gspec = &Genesis{ Config: &config, Alloc: types.GenesisAlloc{ address: {Balance: funds}, - params.BeaconRootsAddress: {Balance: common.Big0, Code: asm4788}, + params.BeaconRootsAddress: {Code: params.BeaconRootsCode}, }, BaseFee: big.NewInt(params.InitialBaseFee), Difficulty: common.Big1, diff --git a/core/error.go b/core/error.go index e6e6ba2f90..161538fe43 100644 --- a/core/error.go +++ b/core/error.go @@ -64,6 +64,11 @@ var ( // than init code size limit. ErrMaxInitCodeSizeExceeded = errors.New("max initcode size exceeded") + // ErrInsufficientBalanceWitness is returned if the transaction sender has enough + // funds to cover the transfer, but not enough to pay for witness access/modification + // costs for the transaction + ErrInsufficientBalanceWitness = errors.New("insufficient funds to cover witness access costs for transaction") + // ErrInsufficientFunds is returned if the total cost of executing a transaction // is higher than the balance of the user's account. ErrInsufficientFunds = errors.New("insufficient funds for gas * price + value") diff --git a/core/genesis.go b/core/genesis.go index 4e59de9c80..c7e6f919c7 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -601,6 +601,8 @@ func DeveloperGenesisBlock(gasLimit uint64, faucet *common.Address) *Genesis { common.BytesToAddress([]byte{7}): {Balance: big.NewInt(1)}, // ECScalarMul common.BytesToAddress([]byte{8}): {Balance: big.NewInt(1)}, // ECPairing common.BytesToAddress([]byte{9}): {Balance: big.NewInt(1)}, // BLAKE2b + // Pre-deploy EIP-4788 system contract + params.BeaconRootsAddress: {Nonce: 1, Code: params.BeaconRootsCode}, }, } if faucet != nil { diff --git a/core/genesis_test.go b/core/genesis_test.go index 4fbabf00fd..382d7a38e5 100644 --- a/core/genesis_test.go +++ b/core/genesis_test.go @@ -304,7 +304,7 @@ func TestVerkleGenesisCommit(t *testing.T) { }, } - expected := common.Hex2Bytes("14398d42be3394ff8d50681816a4b7bf8d8283306f577faba2d5bc57498de23b") + expected := common.FromHex("14398d42be3394ff8d50681816a4b7bf8d8283306f577faba2d5bc57498de23b") got := genesis.ToBlock().Root().Bytes() if !bytes.Equal(got, expected) { t.Fatalf("invalid genesis state root, expected %x, got %x", expected, got) @@ -314,7 +314,7 @@ func TestVerkleGenesisCommit(t *testing.T) { triedb := triedb.NewDatabase(db, &triedb.Config{IsVerkle: true, PathDB: pathdb.Defaults}) block := genesis.MustCommit(db, triedb) if !bytes.Equal(block.Root().Bytes(), expected) { - t.Fatalf("invalid genesis state root, expected %x, got %x", expected, got) + t.Fatalf("invalid genesis state root, expected %x, got %x", expected, block.Root()) } // Test that the trie is verkle diff --git a/core/rawdb/freezer_resettable.go b/core/rawdb/freezer_resettable.go index 7fa59b8d21..6f8541f43b 100644 --- a/core/rawdb/freezer_resettable.go +++ b/core/rawdb/freezer_resettable.go @@ -33,10 +33,11 @@ type freezerOpenFunc = func() (*Freezer, error) // resettableFreezer is a wrapper of the freezer which makes the // freezer resettable. type resettableFreezer struct { - freezer *Freezer - opener freezerOpenFunc - datadir string - lock sync.RWMutex + readOnly bool + freezer *Freezer + opener freezerOpenFunc + datadir string + lock sync.RWMutex } // newResettableFreezer creates a resettable freezer, note freezer is @@ -60,9 +61,10 @@ func newResettableFreezer(datadir string, namespace string, readonly bool, maxTa return nil, err } return &resettableFreezer{ - freezer: freezer, - opener: opener, - datadir: datadir, + readOnly: readonly, + freezer: freezer, + opener: opener, + datadir: datadir, }, nil } @@ -74,6 +76,9 @@ func (f *resettableFreezer) Reset() error { f.lock.Lock() defer f.lock.Unlock() + if f.readOnly { + return errReadOnly + } if err := f.freezer.Close(); err != nil { return err } diff --git a/core/state/access_events.go b/core/state/access_events.go new file mode 100644 index 0000000000..4b6c7c7e69 --- /dev/null +++ b/core/state/access_events.go @@ -0,0 +1,320 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package state + +import ( + "maps" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/math" + "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/trie/utils" + "github.com/holiman/uint256" +) + +// mode specifies how a tree location has been accessed +// for the byte value: +// * the first bit is set if the branch has been edited +// * the second bit is set if the branch has been read +type mode byte + +const ( + AccessWitnessReadFlag = mode(1) + AccessWitnessWriteFlag = mode(2) +) + +var zeroTreeIndex uint256.Int + +// AccessEvents lists the locations of the state that are being accessed +// during the production of a block. +type AccessEvents struct { + branches map[branchAccessKey]mode + chunks map[chunkAccessKey]mode + + pointCache *utils.PointCache +} + +func NewAccessEvents(pointCache *utils.PointCache) *AccessEvents { + return &AccessEvents{ + branches: make(map[branchAccessKey]mode), + chunks: make(map[chunkAccessKey]mode), + pointCache: pointCache, + } +} + +// Merge is used to merge the access events that were generated during the +// execution of a tx, with the accumulation of all access events that were +// generated during the execution of all txs preceding this one in a block. +func (ae *AccessEvents) Merge(other *AccessEvents) { + for k := range other.branches { + ae.branches[k] |= other.branches[k] + } + for k, chunk := range other.chunks { + ae.chunks[k] |= chunk + } +} + +// Keys returns, predictably, the list of keys that were touched during the +// buildup of the access witness. +func (ae *AccessEvents) Keys() [][]byte { + // TODO: consider if parallelizing this is worth it, probably depending on len(ae.chunks). + keys := make([][]byte, 0, len(ae.chunks)) + for chunk := range ae.chunks { + basePoint := ae.pointCache.Get(chunk.addr[:]) + key := utils.GetTreeKeyWithEvaluatedAddress(basePoint, &chunk.treeIndex, chunk.leafKey) + keys = append(keys, key) + } + return keys +} + +func (ae *AccessEvents) Copy() *AccessEvents { + cpy := &AccessEvents{ + branches: maps.Clone(ae.branches), + chunks: maps.Clone(ae.chunks), + pointCache: ae.pointCache, + } + return cpy +} + +// AddAccount returns the gas to be charged for each of the currently cold +// member fields of an account. +func (ae *AccessEvents) AddAccount(addr common.Address, isWrite bool) uint64 { + var gas uint64 + gas += ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.VersionLeafKey, isWrite) + gas += ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.BalanceLeafKey, isWrite) + gas += ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.NonceLeafKey, isWrite) + gas += ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.CodeKeccakLeafKey, isWrite) + gas += ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.CodeSizeLeafKey, isWrite) + return gas +} + +// MessageCallGas returns the gas to be charged for each of the currently +// cold member fields of an account, that need to be touched when making a message +// call to that account. +func (ae *AccessEvents) MessageCallGas(destination common.Address) uint64 { + var gas uint64 + gas += ae.touchAddressAndChargeGas(destination, zeroTreeIndex, utils.VersionLeafKey, false) + gas += ae.touchAddressAndChargeGas(destination, zeroTreeIndex, utils.CodeSizeLeafKey, false) + return gas +} + +// ValueTransferGas returns the gas to be charged for each of the currently +// cold balance member fields of the caller and the callee accounts. +func (ae *AccessEvents) ValueTransferGas(callerAddr, targetAddr common.Address) uint64 { + var gas uint64 + gas += ae.touchAddressAndChargeGas(callerAddr, zeroTreeIndex, utils.BalanceLeafKey, true) + gas += ae.touchAddressAndChargeGas(targetAddr, zeroTreeIndex, utils.BalanceLeafKey, true) + return gas +} + +// ContractCreateInitGas returns the access gas costs for the initialization of +// a contract creation. +func (ae *AccessEvents) ContractCreateInitGas(addr common.Address, createSendsValue bool) uint64 { + var gas uint64 + gas += ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.VersionLeafKey, true) + gas += ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.NonceLeafKey, true) + if createSendsValue { + gas += ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.BalanceLeafKey, true) + } + return gas +} + +// AddTxOrigin adds the member fields of the sender account to the access event list, +// so that cold accesses are not charged, since they are covered by the 21000 gas. +func (ae *AccessEvents) AddTxOrigin(originAddr common.Address) { + ae.touchAddressAndChargeGas(originAddr, zeroTreeIndex, utils.VersionLeafKey, false) + ae.touchAddressAndChargeGas(originAddr, zeroTreeIndex, utils.BalanceLeafKey, true) + ae.touchAddressAndChargeGas(originAddr, zeroTreeIndex, utils.NonceLeafKey, true) + ae.touchAddressAndChargeGas(originAddr, zeroTreeIndex, utils.CodeKeccakLeafKey, false) + ae.touchAddressAndChargeGas(originAddr, zeroTreeIndex, utils.CodeSizeLeafKey, false) +} + +// AddTxDestination adds the member fields of the sender account to the access event list, +// so that cold accesses are not charged, since they are covered by the 21000 gas. +func (ae *AccessEvents) AddTxDestination(addr common.Address, sendsValue bool) { + ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.VersionLeafKey, false) + ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.BalanceLeafKey, sendsValue) + ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.NonceLeafKey, false) + ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.CodeKeccakLeafKey, false) + ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.CodeSizeLeafKey, false) +} + +// SlotGas returns the amount of gas to be charged for a cold storage access. +func (ae *AccessEvents) SlotGas(addr common.Address, slot common.Hash, isWrite bool) uint64 { + treeIndex, subIndex := utils.StorageIndex(slot.Bytes()) + return ae.touchAddressAndChargeGas(addr, *treeIndex, subIndex, isWrite) +} + +// touchAddressAndChargeGas adds any missing access event to the access event list, and returns the cold +// access cost to be charged, if need be. +func (ae *AccessEvents) touchAddressAndChargeGas(addr common.Address, treeIndex uint256.Int, subIndex byte, isWrite bool) uint64 { + stemRead, selectorRead, stemWrite, selectorWrite, selectorFill := ae.touchAddress(addr, treeIndex, subIndex, isWrite) + + var gas uint64 + if stemRead { + gas += params.WitnessBranchReadCost + } + if selectorRead { + gas += params.WitnessChunkReadCost + } + if stemWrite { + gas += params.WitnessBranchWriteCost + } + if selectorWrite { + gas += params.WitnessChunkWriteCost + } + if selectorFill { + gas += params.WitnessChunkFillCost + } + return gas +} + +// touchAddress adds any missing access event to the access event list. +func (ae *AccessEvents) touchAddress(addr common.Address, treeIndex uint256.Int, subIndex byte, isWrite bool) (bool, bool, bool, bool, bool) { + branchKey := newBranchAccessKey(addr, treeIndex) + chunkKey := newChunkAccessKey(branchKey, subIndex) + + // Read access. + var branchRead, chunkRead bool + if _, hasStem := ae.branches[branchKey]; !hasStem { + branchRead = true + ae.branches[branchKey] = AccessWitnessReadFlag + } + if _, hasSelector := ae.chunks[chunkKey]; !hasSelector { + chunkRead = true + ae.chunks[chunkKey] = AccessWitnessReadFlag + } + + // Write access. + var branchWrite, chunkWrite, chunkFill bool + if isWrite { + if (ae.branches[branchKey] & AccessWitnessWriteFlag) == 0 { + branchWrite = true + ae.branches[branchKey] |= AccessWitnessWriteFlag + } + + chunkValue := ae.chunks[chunkKey] + if (chunkValue & AccessWitnessWriteFlag) == 0 { + chunkWrite = true + ae.chunks[chunkKey] |= AccessWitnessWriteFlag + } + // TODO: charge chunk filling costs if the leaf was previously empty in the state + } + return branchRead, chunkRead, branchWrite, chunkWrite, chunkFill +} + +type branchAccessKey struct { + addr common.Address + treeIndex uint256.Int +} + +func newBranchAccessKey(addr common.Address, treeIndex uint256.Int) branchAccessKey { + var sk branchAccessKey + sk.addr = addr + sk.treeIndex = treeIndex + return sk +} + +type chunkAccessKey struct { + branchAccessKey + leafKey byte +} + +func newChunkAccessKey(branchKey branchAccessKey, leafKey byte) chunkAccessKey { + var lk chunkAccessKey + lk.branchAccessKey = branchKey + lk.leafKey = leafKey + return lk +} + +// CodeChunksRangeGas is a helper function to touch every chunk in a code range and charge witness gas costs +func (ae *AccessEvents) CodeChunksRangeGas(contractAddr common.Address, startPC, size uint64, codeLen uint64, isWrite bool) uint64 { + // note that in the case where the copied code is outside the range of the + // contract code but touches the last leaf with contract code in it, + // we don't include the last leaf of code in the AccessWitness. The + // reason that we do not need the last leaf is the account's code size + // is already in the AccessWitness so a stateless verifier can see that + // the code from the last leaf is not needed. + if (codeLen == 0 && size == 0) || startPC > codeLen { + return 0 + } + + endPC := startPC + size + if endPC > codeLen { + endPC = codeLen + } + if endPC > 0 { + endPC -= 1 // endPC is the last bytecode that will be touched. + } + + var statelessGasCharged uint64 + for chunkNumber := startPC / 31; chunkNumber <= endPC/31; chunkNumber++ { + treeIndex := *uint256.NewInt((chunkNumber + 128) / 256) + subIndex := byte((chunkNumber + 128) % 256) + gas := ae.touchAddressAndChargeGas(contractAddr, treeIndex, subIndex, isWrite) + var overflow bool + statelessGasCharged, overflow = math.SafeAdd(statelessGasCharged, gas) + if overflow { + panic("overflow when adding gas") + } + } + return statelessGasCharged +} + +// VersionGas adds the account's version to the accessed data, and returns the +// amount of gas that it costs. +// Note that an access in write mode implies an access in read mode, whereas an +// access in read mode does not imply an access in write mode. +func (ae *AccessEvents) VersionGas(addr common.Address, isWrite bool) uint64 { + return ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.VersionLeafKey, isWrite) +} + +// BalanceGas adds the account's balance to the accessed data, and returns the +// amount of gas that it costs. +// in write mode. If false, the charged gas corresponds to an access in read mode. +// Note that an access in write mode implies an access in read mode, whereas an access in +// read mode does not imply an access in write mode. +func (ae *AccessEvents) BalanceGas(addr common.Address, isWrite bool) uint64 { + return ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.BalanceLeafKey, isWrite) +} + +// NonceGas adds the account's nonce to the accessed data, and returns the +// amount of gas that it costs. +// in write mode. If false, the charged gas corresponds to an access in read mode. +// Note that an access in write mode implies an access in read mode, whereas an access in +// read mode does not imply an access in write mode. +func (ae *AccessEvents) NonceGas(addr common.Address, isWrite bool) uint64 { + return ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.NonceLeafKey, isWrite) +} + +// CodeSizeGas adds the account's code size to the accessed data, and returns the +// amount of gas that it costs. +// in write mode. If false, the charged gas corresponds to an access in read mode. +// Note that an access in write mode implies an access in read mode, whereas an access in +// read mode does not imply an access in write mode. +func (ae *AccessEvents) CodeSizeGas(addr common.Address, isWrite bool) uint64 { + return ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.CodeSizeLeafKey, isWrite) +} + +// CodeHashGas adds the account's code hash to the accessed data, and returns the +// amount of gas that it costs. +// in write mode. If false, the charged gas corresponds to an access in read mode. +// Note that an access in write mode implies an access in read mode, whereas an access in +// read mode does not imply an access in write mode. +func (ae *AccessEvents) CodeHashGas(addr common.Address, isWrite bool) uint64 { + return ae.touchAddressAndChargeGas(addr, zeroTreeIndex, utils.CodeKeccakLeafKey, isWrite) +} diff --git a/core/state/access_events_test.go b/core/state/access_events_test.go new file mode 100644 index 0000000000..705033fe0b --- /dev/null +++ b/core/state/access_events_test.go @@ -0,0 +1,153 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package state + +import ( + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/trie/utils" +) + +var ( + testAddr [20]byte + testAddr2 [20]byte +) + +func init() { + for i := byte(0); i < 20; i++ { + testAddr[i] = i + testAddr[2] = 2 * i + } +} + +func TestAccountHeaderGas(t *testing.T) { + ae := NewAccessEvents(utils.NewPointCache(1024)) + + // Check cold read cost + gas := ae.VersionGas(testAddr, false) + if gas != params.WitnessBranchReadCost+params.WitnessChunkReadCost { + t.Fatalf("incorrect gas computed, got %d, want %d", gas, params.WitnessBranchReadCost+params.WitnessChunkReadCost) + } + + // Check warm read cost + gas = ae.VersionGas(testAddr, false) + if gas != 0 { + t.Fatalf("incorrect gas computed, got %d, want %d", gas, 0) + } + + // Check cold read costs in the same group no longer incur the branch read cost + gas = ae.BalanceGas(testAddr, false) + if gas != params.WitnessChunkReadCost { + t.Fatalf("incorrect gas computed, got %d, want %d", gas, params.WitnessChunkReadCost) + } + gas = ae.NonceGas(testAddr, false) + if gas != params.WitnessChunkReadCost { + t.Fatalf("incorrect gas computed, got %d, want %d", gas, params.WitnessChunkReadCost) + } + gas = ae.CodeSizeGas(testAddr, false) + if gas != params.WitnessChunkReadCost { + t.Fatalf("incorrect gas computed, got %d, want %d", gas, params.WitnessChunkReadCost) + } + gas = ae.CodeHashGas(testAddr, false) + if gas != params.WitnessChunkReadCost { + t.Fatalf("incorrect gas computed, got %d, want %d", gas, params.WitnessChunkReadCost) + } + + // Check cold write cost + gas = ae.VersionGas(testAddr, true) + if gas != params.WitnessBranchWriteCost+params.WitnessChunkWriteCost { + t.Fatalf("incorrect gas computed, got %d, want %d", gas, params.WitnessBranchReadCost+params.WitnessBranchWriteCost) + } + + // Check warm write cost + gas = ae.VersionGas(testAddr, true) + if gas != 0 { + t.Fatalf("incorrect gas computed, got %d, want %d", gas, 0) + } + + // Check a write without a read charges both read and write costs + gas = ae.BalanceGas(testAddr2, true) + if gas != params.WitnessBranchReadCost+params.WitnessBranchWriteCost+params.WitnessChunkWriteCost+params.WitnessChunkReadCost { + t.Fatalf("incorrect gas computed, got %d, want %d", gas, params.WitnessBranchReadCost+params.WitnessBranchWriteCost+params.WitnessChunkWriteCost+params.WitnessChunkReadCost) + } + + // Check that a write followed by a read charges nothing + gas = ae.BalanceGas(testAddr2, false) + if gas != 0 { + t.Fatalf("incorrect gas computed, got %d, want %d", gas, 0) + } + + // Check that reading a slot from the account header only charges the + // chunk read cost. + gas = ae.SlotGas(testAddr, common.Hash{}, false) + if gas != params.WitnessChunkReadCost { + t.Fatalf("incorrect gas computed, got %d, want %d", gas, params.WitnessChunkReadCost) + } +} + +// TestContractCreateInitGas checks that the gas cost of contract creation is correctly +// calculated. +func TestContractCreateInitGas(t *testing.T) { + ae := NewAccessEvents(utils.NewPointCache(1024)) + + var testAddr [20]byte + for i := byte(0); i < 20; i++ { + testAddr[i] = i + } + + // Check cold read cost, without a value + gas := ae.ContractCreateInitGas(testAddr, false) + if gas != params.WitnessBranchWriteCost+params.WitnessBranchReadCost+params.WitnessChunkWriteCost*2+params.WitnessChunkReadCost*2 { + t.Fatalf("incorrect gas computed, got %d, want %d", gas, params.WitnessBranchWriteCost+params.WitnessBranchReadCost+params.WitnessChunkWriteCost*3) + } + + // Check warm read cost + gas = ae.ContractCreateInitGas(testAddr, false) + if gas != 0 { + t.Fatalf("incorrect gas computed, got %d, want %d", gas, 0) + } +} + +// TestMessageCallGas checks that the gas cost of message calls is correctly +// calculated. +func TestMessageCallGas(t *testing.T) { + ae := NewAccessEvents(utils.NewPointCache(1024)) + + // Check cold read cost, without a value + gas := ae.MessageCallGas(testAddr) + if gas != params.WitnessBranchReadCost+params.WitnessChunkReadCost*2 { + t.Fatalf("incorrect gas computed, got %d, want %d", gas, params.WitnessBranchReadCost+params.WitnessChunkReadCost*2) + } + + // Check that reading the version and code size of the same account does not incur the branch read cost + gas = ae.VersionGas(testAddr, false) + if gas != 0 { + t.Fatalf("incorrect gas computed, got %d, want %d", gas, 0) + } + gas = ae.CodeSizeGas(testAddr, false) + if gas != 0 { + t.Fatalf("incorrect gas computed, got %d, want %d", gas, 0) + } + + // Check warm read cost + gas = ae.MessageCallGas(testAddr) + if gas != 0 { + t.Fatalf("incorrect gas computed, got %d, want %d", gas, 0) + } +} diff --git a/core/state/database.go b/core/state/database.go index 6ded7db3b6..13a314e682 100644 --- a/core/state/database.go +++ b/core/state/database.go @@ -20,7 +20,6 @@ import ( "errors" "fmt" - "github.com/crate-crypto/go-ipa/banderwagon" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/lru" "github.com/ethereum/go-ethereum/core/rawdb" @@ -43,11 +42,8 @@ const ( // Cache size granted for caching clean code. codeCacheSize = 64 * 1024 * 1024 - // commitmentSize is the size of commitment stored in cache. - commitmentSize = banderwagon.UncompressedSize - - // Cache item granted for caching commitment results. - commitmentCacheItems = 64 * 1024 * 1024 / (commitmentSize + common.AddressLength) + // Number of address->curve point associations to keep. + pointCacheSize = 4096 ) // Database wraps access to tries and contract code. @@ -76,6 +72,9 @@ type Database interface { // DiskDB returns the underlying key-value disk database. DiskDB() ethdb.KeyValueStore + // PointCache returns the cache holding points used in verkle tree key computation + PointCache() *utils.PointCache + // TrieDB returns the underlying trie database for managing trie nodes. TrieDB() *triedb.Database } @@ -148,6 +147,9 @@ type Trie interface { // nodes of the longest existing prefix of the key (at least the root), ending // with the node that proves the absence of the key. Prove(key []byte, proofDb ethdb.KeyValueWriter) error + + // IsVerkle returns true if the trie is verkle-tree based + IsVerkle() bool } // NewDatabase creates a backing store for state. The returned database is safe for @@ -173,6 +175,7 @@ func NewDatabaseWithConfig(db ethdb.Database, config *triedb.Config) Database { codeSizeCache: lru.NewCache[common.Hash, int](codeSizeCacheSize), codeCache: lru.NewSizeConstrainedCache[common.Hash, []byte](codeCacheSize), triedb: triedb.NewDatabase(db, config), + pointCache: utils.NewPointCache(pointCacheSize), } return cdb } @@ -191,6 +194,7 @@ func NewDatabaseWithNodeDB(db ethdb.Database, triedb *triedb.Database) Database codeSizeCache: lru.NewCache[common.Hash, int](codeSizeCacheSize), codeCache: lru.NewSizeConstrainedCache[common.Hash, []byte](codeCacheSize), triedb: triedb, + pointCache: utils.NewPointCache(pointCacheSize), } return cdb } @@ -211,6 +215,7 @@ type cachingDB struct { codeSizeCache *lru.Cache[common.Hash, int] codeCache *lru.SizeConstrainedCache[common.Hash, []byte] triedb *triedb.Database + pointCache *utils.PointCache } func (db *cachingDB) WasmStore() ethdb.KeyValueStore { @@ -228,7 +233,7 @@ func (db *cachingDB) WasmTargets() []ethdb.WasmTarget { // OpenTrie opens the main account trie at a specific root hash. func (db *cachingDB) OpenTrie(root common.Hash) (Trie, error) { if db.triedb.IsVerkle() { - return trie.NewVerkleTrie(root, db.triedb, utils.NewPointCache(commitmentCacheItems)) + return trie.NewVerkleTrie(root, db.triedb, db.pointCache) } tr, err := trie.NewStateTrie(trie.StateTrieID(root), db.triedb) if err != nil { @@ -314,3 +319,8 @@ func (db *cachingDB) DiskDB() ethdb.KeyValueStore { func (db *cachingDB) TrieDB() *triedb.Database { return db.triedb } + +// PointCache returns the cache of evaluated curve points. +func (db *cachingDB) PointCache() *utils.PointCache { + return db.pointCache +} diff --git a/core/state/journal.go b/core/state/journal.go index 37775c602b..8cd3c84617 100644 --- a/core/state/journal.go +++ b/core/state/journal.go @@ -153,7 +153,8 @@ type ( storageChange struct { account *common.Address key common.Hash - prevvalue *common.Hash + prevvalue common.Hash + origvalue common.Hash } codeChange struct { account *common.Address @@ -300,7 +301,7 @@ func (ch codeChange) copy() journalEntry { } func (ch storageChange) revert(s *StateDB) { - s.getStateObject(*ch.account).setState(ch.key, ch.prevvalue) + s.getStateObject(*ch.account).setState(ch.key, ch.prevvalue, ch.origvalue) } func (ch storageChange) dirtied() *common.Address { diff --git a/core/state/state_object.go b/core/state/state_object.go index 7da032a1a4..6ea1ed51bc 100644 --- a/core/state/state_object.go +++ b/core/state/state_object.go @@ -19,7 +19,6 @@ package state import ( "bytes" "fmt" - "io" "maps" "sort" "time" @@ -57,9 +56,20 @@ type stateObject struct { trie Trie // storage trie, which becomes non-nil on first access code []byte // contract bytecode, which gets set when code is loaded - originStorage Storage // Storage cache of original entries to dedup rewrites - pendingStorage Storage // Storage entries that need to be flushed to disk, at the end of an entire block - dirtyStorage Storage // Storage entries that have been modified in the current transaction execution, reset for every transaction + originStorage Storage // Storage entries that have been accessed within the current block + dirtyStorage Storage // Storage entries that have been modified within the current transaction + pendingStorage Storage // Storage entries that have been modified within the current block + + // uncommittedStorage tracks a set of storage entries that have been modified + // but not yet committed since the "last commit operation", along with their + // original values before mutation. + // + // Specifically, the commit will be performed after each transaction before + // the byzantium fork, therefore the map is already reset at the transaction + // boundary; however post the byzantium fork, the commit will only be performed + // at the end of block, this set essentially tracks all the modifications + // made within the block. + uncommittedStorage Storage // Cache flags. dirtyCode bool // true if the code was updated @@ -88,22 +98,18 @@ func newObject(db *StateDB, address common.Address, acct *types.StateAccount) *s acct = types.NewEmptyStateAccount() } return &stateObject{ - db: db, - address: address, - addrHash: crypto.Keccak256Hash(address[:]), - origin: origin, - data: *acct, - originStorage: make(Storage), - pendingStorage: make(Storage), - dirtyStorage: make(Storage), + db: db, + address: address, + addrHash: crypto.Keccak256Hash(address[:]), + origin: origin, + data: *acct, + originStorage: make(Storage), + dirtyStorage: make(Storage), + pendingStorage: make(Storage), + uncommittedStorage: make(Storage), } } -// EncodeRLP implements rlp.Encoder. -func (s *stateObject) EncodeRLP(w io.Writer) error { - return rlp.Encode(w, &s.data) -} - func (s *stateObject) markSelfdestructed() { s.selfDestructed = true } @@ -119,46 +125,58 @@ func (s *stateObject) touch() { } } -// getTrie returns the associated storage trie. The trie will be opened -// if it's not loaded previously. An error will be returned if trie can't -// be loaded. +// getTrie returns the associated storage trie. The trie will be opened if it's +// not loaded previously. An error will be returned if trie can't be loaded. +// +// If a new trie is opened, it will be cached within the state object to allow +// subsequent reads to expand the same trie instead of reloading from disk. func (s *stateObject) getTrie() (Trie, error) { if s.trie == nil { - // Try fetching from prefetcher first - if s.data.Root != types.EmptyRootHash && s.db.prefetcher != nil { - // When the miner is creating the pending state, there is no prefetcher - s.trie = s.db.prefetcher.trie(s.addrHash, s.data.Root) - } - if s.trie == nil { - tr, err := s.db.db.OpenStorageTrie(s.db.originalRoot, s.address, s.data.Root, s.db.trie) - if err != nil { - return nil, err - } - s.trie = tr + tr, err := s.db.db.OpenStorageTrie(s.db.originalRoot, s.address, s.data.Root, s.db.trie) + if err != nil { + return nil, err } + s.trie = tr } return s.trie, nil } -// GetState retrieves a value from the account storage trie. +// getPrefetchedTrie returns the associated trie, as populated by the prefetcher +// if it's available. +// +// Note, opposed to getTrie, this method will *NOT* blindly cache the resulting +// trie in the state object. The caller might want to do that, but it's cleaner +// to break the hidden interdependency between retrieving tries from the db or +// from the prefetcher. +func (s *stateObject) getPrefetchedTrie() Trie { + // If there's nothing to meaningfully return, let the user figure it out by + // pulling the trie from disk. + if s.data.Root == types.EmptyRootHash || s.db.prefetcher == nil { + return nil + } + // Attempt to retrieve the trie from the prefetcher + return s.db.prefetcher.trie(s.addrHash, s.data.Root) +} + +// GetState retrieves a value associated with the given storage key. func (s *stateObject) GetState(key common.Hash) common.Hash { value, _ := s.getState(key) return value } -// getState retrieves a value from the account storage trie and also returns if -// the slot is already dirty or not. -func (s *stateObject) getState(key common.Hash) (common.Hash, bool) { - // If we have a dirty value for this state entry, return it +// getState retrieves a value associated with the given storage key, along with +// its original value. +func (s *stateObject) getState(key common.Hash) (common.Hash, common.Hash) { + origin := s.GetCommittedState(key) value, dirty := s.dirtyStorage[key] if dirty { - return value, true + return value, origin } - // Otherwise return the entry's original value - return s.GetCommittedState(key), false + return origin, origin } -// GetCommittedState retrieves a value from the committed account storage trie. +// GetCommittedState retrieves the value associated with the specific key +// without any mutations caused in the current execution. func (s *stateObject) GetCommittedState(key common.Hash) common.Hash { // If we have a pending write or clean cached, return that if value, pending := s.pendingStorage[key]; pending { @@ -174,6 +192,7 @@ func (s *stateObject) GetCommittedState(key common.Hash) common.Hash { // have been handles via pendingStorage above. // 2) we don't have new values, and can deliver empty response back if _, destructed := s.db.stateObjectsDestruct[s.address]; destructed { + s.originStorage[key] = common.Hash{} // track the empty slot as origin value return common.Hash{} } // If no live objects are available, attempt to use snapshots @@ -220,57 +239,64 @@ func (s *stateObject) GetCommittedState(key common.Hash) common.Hash { func (s *stateObject) SetState(key, value common.Hash) { // If the new value is the same as old, don't set. Otherwise, track only the // dirty changes, supporting reverting all of it back to no change. - prev, dirty := s.getState(key) + prev, origin := s.getState(key) if prev == value { return } - var prevvalue *common.Hash - if dirty { - prevvalue = &prev - } // New value is different, update and journal the change s.db.journal.append(storageChange{ account: &s.address, key: key, - prevvalue: prevvalue, + prevvalue: prev, + origvalue: origin, }) if s.db.logger != nil && s.db.logger.OnStorageChange != nil { s.db.logger.OnStorageChange(s.address, key, prev, value) } - s.setState(key, &value) + s.setState(key, value, origin) } -// setState updates a value in account dirty storage. If the value being set is -// nil (assuming journal revert), the dirtyness is removed. -func (s *stateObject) setState(key common.Hash, value *common.Hash) { - // If the first set is being reverted, undo the dirty marker - if value == nil { +// setState updates a value in account dirty storage. The dirtiness will be +// removed if the value being set equals to the original value. +func (s *stateObject) setState(key common.Hash, value common.Hash, origin common.Hash) { + // Storage slot is set back to its original value, undo the dirty marker + if value == origin { delete(s.dirtyStorage, key) return } - // Otherwise restore the previous value - s.dirtyStorage[key] = *value + s.dirtyStorage[key] = value } // finalise moves all dirty storage slots into the pending area to be hashed or // committed later. It is invoked at the end of every transaction. -func (s *stateObject) finalise(prefetch bool) { +func (s *stateObject) finalise() { slotsToPrefetch := make([][]byte, 0, len(s.dirtyStorage)) for key, value := range s.dirtyStorage { - // If the slot is different from its original value, move it into the - // pending area to be committed at the end of the block (and prefetch - // the pathways). - if value != s.originStorage[key] { - s.pendingStorage[key] = value - slotsToPrefetch = append(slotsToPrefetch, common.CopyBytes(key[:])) // Copy needed for closure + if origin, exist := s.uncommittedStorage[key]; exist && origin == value { + // The slot is reverted to its original value, delete the entry + // to avoid thrashing the data structures. + delete(s.uncommittedStorage, key) + } else if exist { + // The slot is modified to another value and the slot has been + // tracked for commit, do nothing here. } else { - // Otherwise, the slot was reverted to its original value, remove it - // from the pending area to avoid thrashing the data strutures. - delete(s.pendingStorage, key) + // The slot is different from its original value and hasn't been + // tracked for commit yet. + s.uncommittedStorage[key] = s.GetCommittedState(key) + slotsToPrefetch = append(slotsToPrefetch, common.CopyBytes(key[:])) // Copy needed for closure + } + // Aggregate the dirty storage slots into the pending area. It might + // be possible that the value of tracked slot here is same with the + // one in originStorage (e.g. the slot was modified in tx_a and then + // modified back in tx_b). We can't blindly remove it from pending + // map as the dirty slot might have been committed already (before the + // byzantium fork) and entry is necessary to modify the value back. + s.pendingStorage[key] = value + } + if s.db.prefetcher != nil && len(slotsToPrefetch) > 0 && s.data.Root != types.EmptyRootHash { + if err := s.db.prefetcher.prefetch(s.addrHash, s.data.Root, s.address, slotsToPrefetch); err != nil { + log.Error("Failed to prefetch slots", "addr", s.address, "slots", len(slotsToPrefetch), "err", err) } - } - if s.db.prefetcher != nil && prefetch && len(slotsToPrefetch) > 0 && s.data.Root != types.EmptyRootHash { - s.db.prefetcher.prefetch(s.addrHash, s.data.Root, s.address, slotsToPrefetch) } if len(s.dirtyStorage) > 0 { s.dirtyStorage = make(Storage) @@ -287,29 +313,29 @@ func (s *stateObject) finalise(prefetch bool) { // loading or updating of the trie, an error will be returned. Furthermore, // this function will return the mutated storage trie, or nil if there is no // storage change at all. +// +// It assumes all the dirty storage slots have been finalized before. func (s *stateObject) updateTrie() (Trie, error) { - // Make sure all dirty slots are finalized into the pending storage area - s.finalise(false) - // Short circuit if nothing changed, don't bother with hashing anything - if len(s.pendingStorage) == 0 { + if len(s.uncommittedStorage) == 0 { return s.trie, nil } - // The snapshot storage map for the object - var ( - storage map[common.Hash][]byte - origin map[common.Hash][]byte - ) - tr, err := s.getTrie() - if err != nil { - s.db.setError(err) - return nil, err + // Retrieve a pretecher populated trie, or fall back to the database + tr := s.getPrefetchedTrie() + if tr != nil { + // Prefetcher returned a live trie, swap it out for the current one + s.trie = tr + } else { + // Fetcher not running or empty trie, fallback to the database trie + var err error + tr, err = s.getTrie() + if err != nil { + s.db.setError(err) + return nil, err + } } - // Insert all the pending storage updates into the trie - usedStorage := make([][]byte, 0, len(s.pendingStorage)) - - // Perform trie updates before deletions. This prevents resolution of unnecessary trie nodes - // in circumstances similar to the following: + // Perform trie updates before deletions. This prevents resolution of unnecessary trie nodes + // in circumstances similar to the following: // // Consider nodes `A` and `B` who share the same full node parent `P` and have no other siblings. // During the execution of a block: @@ -318,57 +344,32 @@ func (s *stateObject) updateTrie() (Trie, error) { // If the deletion is handled first, then `P` would be left with only one child, thus collapsed // into a shortnode. This requires `B` to be resolved from disk. // Whereas if the created node is handled first, then the collapse is avoided, and `B` is not resolved. - var deletions []common.Hash - for key, value := range s.pendingStorage { + var ( + deletions []common.Hash + used = make([][]byte, 0, len(s.uncommittedStorage)) + ) + for key, origin := range s.uncommittedStorage { // Skip noop changes, persist actual changes - if value == s.originStorage[key] { + value, exist := s.pendingStorage[key] + if value == origin { + log.Error("Storage update was noop", "address", s.address, "slot", key) + continue + } + if !exist { + log.Error("Storage slot is not found in pending area", s.address, "slot", key) continue } - prev := s.originStorage[key] - s.originStorage[key] = value - - var encoded []byte // rlp-encoded value to be used by the snapshot if (value != common.Hash{}) { - // Encoding []byte cannot fail, ok to ignore the error. - trimmed := common.TrimLeftZeroes(value[:]) - encoded, _ = rlp.EncodeToBytes(trimmed) - if err := tr.UpdateStorage(s.address, key[:], trimmed); err != nil { + if err := tr.UpdateStorage(s.address, key[:], common.TrimLeftZeroes(value[:])); err != nil { s.db.setError(err) return nil, err } - s.db.StorageUpdated += 1 + s.db.StorageUpdated.Add(1) } else { deletions = append(deletions, key) } - // Cache the mutated storage slots until commit - if storage == nil { - if storage = s.db.storages[s.addrHash]; storage == nil { - storage = make(map[common.Hash][]byte) - s.db.storages[s.addrHash] = storage - } - } - khash := crypto.HashData(s.db.hasher, key[:]) - storage[khash] = encoded // encoded will be nil if it's deleted - - // Cache the original value of mutated storage slots - if origin == nil { - if origin = s.db.storagesOrigin[s.address]; origin == nil { - origin = make(map[common.Hash][]byte) - s.db.storagesOrigin[s.address] = origin - } - } - // Track the original value of slot only if it's mutated first time - if _, ok := origin[khash]; !ok { - if prev == (common.Hash{}) { - origin[khash] = nil // nil if it was not present previously - } else { - // Encoding []byte cannot fail, ok to ignore the error. - b, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(prev[:])) - origin[khash] = b - } - } // Cache the items for preloading - usedStorage = append(usedStorage, common.CopyBytes(key[:])) // Copy needed for closure + used = append(used, common.CopyBytes(key[:])) // Copy needed for closure } if s.db.Deterministic() { sort.Slice(deletions, func(i, j int) bool { return bytes.Compare(deletions[i][:], deletions[j][:]) < 0 }) @@ -378,17 +379,12 @@ func (s *stateObject) updateTrie() (Trie, error) { s.db.setError(err) return nil, err } - s.db.StorageDeleted += 1 - } - // If no slots were touched, issue a warning as we shouldn't have done all - // the above work in the first place - if len(usedStorage) == 0 { - log.Error("State object update was noop", "addr", s.address, "slots", len(s.pendingStorage)) + s.db.StorageDeleted.Add(1) } if s.db.prefetcher != nil { - s.db.prefetcher.used(s.addrHash, s.data.Root, usedStorage) + s.db.prefetcher.used(s.addrHash, s.data.Root, used) } - s.pendingStorage = make(Storage) // reset pending map + s.uncommittedStorage = make(Storage) // empties the commit markers return tr, nil } @@ -404,30 +400,79 @@ func (s *stateObject) updateRoot() { s.data.Root = tr.Hash() } -// commit obtains a set of dirty storage trie nodes and updates the account data. -// The returned set can be nil if nothing to commit. This function assumes all -// storage mutations have already been flushed into trie by updateRoot. +// commitStorage overwrites the clean storage with the storage changes and +// fulfills the storage diffs into the given accountUpdate struct. +func (s *stateObject) commitStorage(op *accountUpdate) { + var ( + buf = crypto.NewKeccakState() + encode = func(val common.Hash) []byte { + if val == (common.Hash{}) { + return nil + } + blob, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(val[:])) + return blob + } + ) + for key, val := range s.pendingStorage { + // Skip the noop storage changes, it might be possible the value + // of tracked slot is same in originStorage and pendingStorage + // map, e.g. the storage slot is modified in tx_a and then reset + // back in tx_b. + if val == s.originStorage[key] { + continue + } + hash := crypto.HashData(buf, key[:]) + if op.storages == nil { + op.storages = make(map[common.Hash][]byte) + } + op.storages[hash] = encode(val) + if op.storagesOrigin == nil { + op.storagesOrigin = make(map[common.Hash][]byte) + } + op.storagesOrigin[hash] = encode(s.originStorage[key]) + + // Overwrite the clean value of storage slots + s.originStorage[key] = val + } + s.pendingStorage = make(Storage) +} + +// commit obtains the account changes (metadata, storage slots, code) caused by +// state execution along with the dirty storage trie nodes. // // Note, commit may run concurrently across all the state objects. Do not assume // thread-safe access to the statedb. -func (s *stateObject) commit() (*trienode.NodeSet, error) { - // Short circuit if trie is not even loaded, don't bother with committing anything - if s.trie == nil { +func (s *stateObject) commit() (*accountUpdate, *trienode.NodeSet, error) { + // commit the account metadata changes + op := &accountUpdate{ + address: s.address, + data: types.SlimAccountRLP(s.data), + } + if s.origin != nil { + op.origin = types.SlimAccountRLP(*s.origin) + } + // commit the contract code if it's modified + if s.dirtyCode { + op.code = &contractCode{ + hash: common.BytesToHash(s.CodeHash()), + blob: s.code, + } + s.dirtyCode = false // reset the dirty flag + } + // Commit storage changes and the associated storage trie + s.commitStorage(op) + if len(op.storages) == 0 { + // nothing changed, don't bother to commit the trie s.origin = s.data.Copy() - return nil, nil + return op, nil, nil } - // The trie is currently in an open state and could potentially contain - // cached mutations. Call commit to acquire a set of nodes that have been - // modified, the set can be nil if nothing to commit. root, nodes, err := s.trie.Commit(false) if err != nil { - return nil, err + return nil, nil, err } s.data.Root = root - - // Update original account data after commit s.origin = s.data.Copy() - return nodes, nil + return op, nodes, nil } // AddBalance adds amount to s's balance. @@ -470,18 +515,19 @@ func (s *stateObject) setBalance(amount *uint256.Int) { func (s *stateObject) deepCopy(db *StateDB) *stateObject { obj := &stateObject{ - db: db, - address: s.address, - addrHash: s.addrHash, - origin: s.origin, - data: s.data, - code: s.code, - originStorage: s.originStorage.Copy(), - pendingStorage: s.pendingStorage.Copy(), - dirtyStorage: s.dirtyStorage.Copy(), - dirtyCode: s.dirtyCode, - selfDestructed: s.selfDestructed, - newContract: s.newContract, + db: db, + address: s.address, + addrHash: s.addrHash, + origin: s.origin, + data: s.data, + code: s.code, + originStorage: s.originStorage.Copy(), + pendingStorage: s.pendingStorage.Copy(), + dirtyStorage: s.dirtyStorage.Copy(), + uncommittedStorage: s.uncommittedStorage.Copy(), + dirtyCode: s.dirtyCode, + selfDestructed: s.selfDestructed, + newContract: s.newContract, } if s.trie != nil { obj.trie = db.db.CopyTrie(s.trie) diff --git a/core/state/statedb.go b/core/state/statedb.go index ca9f778e0a..87c78e85dd 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -18,13 +18,14 @@ package state import ( - "bytes" + "errors" "fmt" "maps" "math/big" "slices" "sort" "sync" + "sync/atomic" "time" "github.com/ethereum/go-ethereum/common" @@ -38,6 +39,7 @@ import ( "github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie/trienode" "github.com/ethereum/go-ethereum/trie/triestate" + "github.com/ethereum/go-ethereum/trie/utils" "github.com/holiman/uint256" "golang.org/x/sync/errgroup" ) @@ -99,13 +101,6 @@ type StateDB struct { // It will be updated when the Commit is called. originalRoot common.Hash - // These maps hold the state changes (including the corresponding - // original value) that occurred in this **block**. - accounts map[common.Hash][]byte // The mutated accounts in 'slim RLP' encoding - storages map[common.Hash]map[common.Hash][]byte // The mutated slots in prefix-zero trimmed rlp format - accountsOrigin map[common.Address][]byte // The original value of mutated accounts in 'slim RLP' encoding - storagesOrigin map[common.Address]map[common.Hash][]byte // The original value of mutated slots in prefix-zero trimmed rlp format - // This map holds 'live' objects, which will get modified while // processing a state transition. stateObjects map[common.Address]*stateObject @@ -170,12 +165,9 @@ type StateDB struct { TrieDBCommits time.Duration AccountUpdated int - StorageUpdated int + StorageUpdated atomic.Int64 AccountDeleted int - StorageDeleted int - - // Testing hooks - onCommit func(states *triestate.Set) // Hook invoked when commit is performed + StorageDeleted atomic.Int64 deterministic bool } @@ -199,10 +191,6 @@ func New(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error) trie: tr, originalRoot: root, snaps: snaps, - accounts: make(map[common.Hash][]byte), - storages: make(map[common.Hash]map[common.Hash][]byte), - accountsOrigin: make(map[common.Address][]byte), - storagesOrigin: make(map[common.Address]map[common.Hash][]byte), stateObjects: make(map[common.Address]*stateObject), stateObjectsDestruct: make(map[common.Address]*types.StateAccount), mutations: make(map[common.Address]*mutation), @@ -241,11 +229,25 @@ func (s *StateDB) SetLogger(l *tracing.Hooks) { // commit phase, most of the needed data is already hot. func (s *StateDB) StartPrefetcher(namespace string) { if s.prefetcher != nil { - s.prefetcher.close() + s.prefetcher.terminate(false) + s.prefetcher.report() s.prefetcher = nil } if s.snap != nil { s.prefetcher = newTriePrefetcher(s.db, s.originalRoot, namespace) + + // With the switch to the Proof-of-Stake consensus algorithm, block production + // rewards are now handled at the consensus layer. Consequently, a block may + // have no state transitions if it contains no transactions and no withdrawals. + // In such cases, the account trie won't be scheduled for prefetching, leading + // to unnecessary error logs. + // + // To prevent this, the account trie is always scheduled for prefetching once + // the prefetcher is constructed. For more details, see: + // https://github.com/ethereum/go-ethereum/issues/29880 + if err := s.prefetcher.prefetch(common.Hash{}, s.originalRoot, common.Address{}, nil); err != nil { + log.Error("Failed to prefetch account trie", "root", s.originalRoot, "err", err) + } } } @@ -253,7 +255,8 @@ func (s *StateDB) StartPrefetcher(namespace string) { // from the gathered metrics. func (s *StateDB) StopPrefetcher() { if s.prefetcher != nil { - s.prefetcher.close() + s.prefetcher.terminate(false) + s.prefetcher.report() s.prefetcher = nil } } @@ -373,7 +376,7 @@ func (s *StateDB) GetStorageRoot(addr common.Address) common.Hash { return common.Hash{} } -// TxIndex returns the current transaction index set by Prepare. +// TxIndex returns the current transaction index set by SetTxContext. func (s *StateDB) TxIndex() int { return s.txIndex } @@ -402,7 +405,7 @@ func (s *StateDB) GetCodeHash(addr common.Address) common.Hash { return common.Hash{} } -// GetState retrieves a value from the given account's storage trie. +// GetState retrieves the value associated with the specific key. func (s *StateDB) GetState(addr common.Address, hash common.Hash) common.Hash { stateObject := s.getStateObject(addr) if stateObject != nil { @@ -411,7 +414,8 @@ func (s *StateDB) GetState(addr common.Address, hash common.Hash) common.Hash { return common.Hash{} } -// GetCommittedState retrieves a value from the given account's committed storage trie. +// GetCommittedState retrieves the value associated with the specific key +// without any mutations caused in the current execution. func (s *StateDB) GetCommittedState(addr common.Address, hash common.Hash) common.Hash { stateObject := s.getStateObject(addr) if stateObject != nil { @@ -588,9 +592,6 @@ func (s *StateDB) GetTransientState(addr common.Address, key common.Hash) common // updateStateObject writes the given object to the trie. func (s *StateDB) updateStateObject(obj *stateObject) { - // Track the amount of time wasted on updating the account from the trie - defer func(start time.Time) { s.AccountUpdates += time.Since(start) }(time.Now()) - // Encode the account and update the account trie addr := obj.Address() if err := s.trie.UpdateAccount(addr, &obj.data); err != nil { @@ -599,30 +600,10 @@ func (s *StateDB) updateStateObject(obj *stateObject) { if obj.dirtyCode { s.trie.UpdateContractCode(obj.Address(), common.BytesToHash(obj.CodeHash()), obj.code) } - // Cache the data until commit. Note, this update mechanism is not symmetric - // to the deletion, because whereas it is enough to track account updates - // at commit time, deletions need tracking at transaction boundary level to - // ensure we capture state clearing. - s.accounts[obj.addrHash] = types.SlimAccountRLP(obj.data) - - // Track the original value of mutated account, nil means it was not present. - // Skip if it has been tracked (because updateStateObject may be called - // multiple times in a block). - if _, ok := s.accountsOrigin[obj.address]; !ok { - if obj.origin == nil { - s.accountsOrigin[obj.address] = nil - } else { - s.accountsOrigin[obj.address] = types.SlimAccountRLP(*obj.origin) - } - } } // deleteStateObject removes the given object from the state trie. func (s *StateDB) deleteStateObject(addr common.Address) { - // Track the amount of time wasted on deleting the account from the trie - defer func(start time.Time) { s.AccountUpdates += time.Since(start) }(time.Now()) - - // Delete the account from the trie if err := s.trie.DeleteAccount(addr); err != nil { s.setError(fmt.Errorf("deleteStateObject (%x) error: %v", addr[:], err)) } @@ -755,10 +736,6 @@ func (s *StateDB) Copy() *StateDB { trie: s.db.CopyTrie(s.trie), hasher: crypto.NewKeccakState(), originalRoot: s.originalRoot, - accounts: copySet(s.accounts), - storages: copy2DSet(s.storages), - accountsOrigin: copySet(s.accountsOrigin), - storagesOrigin: copy2DSet(s.storagesOrigin), stateObjects: make(map[common.Address]*stateObject, len(s.stateObjects)), stateObjectsDestruct: maps.Clone(s.stateObjectsDestruct), mutations: make(map[common.Address]*mutation, len(s.mutations)), @@ -819,12 +796,6 @@ func (s *StateDB) Copy() *StateDB { state.arbExtraData.activatedWasms[moduleHash] = asmMap } - // If there's a prefetcher running, make an inactive copy of it that can - // only access data but does not actively preload (since the user will not - // know that they need to explicitly terminate an active copy). - if s.prefetcher != nil { - state.prefetcher = s.prefetcher.copy() - } return state } @@ -890,15 +861,8 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) { if _, ok := s.stateObjectsDestruct[obj.address]; !ok { s.stateObjectsDestruct[obj.address] = obj.origin } - // Note, we can't do this only at the end of a block because multiple - // transactions within the same block might self destruct and then - // resurrect an account; but the snapshotter needs both events. - delete(s.accounts, obj.addrHash) // Clear out any previously updated account data (may be recreated via a resurrect) - delete(s.storages, obj.addrHash) // Clear out any previously updated storage data (may be recreated via a resurrect) - delete(s.accountsOrigin, obj.address) // Clear out any previously updated account data (may be recreated via a resurrect) - delete(s.storagesOrigin, obj.address) // Clear out any previously updated storage data (may be recreated via a resurrect) } else { - obj.finalise(true) // Prefetch slots in the background + obj.finalise() s.markUpdate(addr) } // At this point, also ship the address off to the precacher. The precacher @@ -907,7 +871,9 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) { addressesToPrefetch = append(addressesToPrefetch, common.CopyBytes(addr[:])) // Copy needed for closure } if s.prefetcher != nil && len(addressesToPrefetch) > 0 { - s.prefetcher.prefetch(common.Hash{}, s.originalRoot, common.Address{}, addressesToPrefetch) + if err := s.prefetcher.prefetch(common.Hash{}, s.originalRoot, common.Address{}, addressesToPrefetch); err != nil { + log.Error("Failed to prefetch addresses", "addresses", len(addressesToPrefetch), "err", err) + } } // Invalidate journal because reverting across transactions is not allowed. s.clearJournalAndRefund() @@ -920,55 +886,52 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash { // Finalise all the dirty storage states and write them into the tries s.Finalise(deleteEmptyObjects) - // If there was a trie prefetcher operating, it gets aborted and irrevocably - // modified after we start retrieving tries. Remove it from the statedb after - // this round of use. - // - // This is weird pre-byzantium since the first tx runs with a prefetcher and - // the remainder without, but pre-byzantium even the initial prefetcher is - // useless, so no sleep lost. - prefetcher := s.prefetcher + // If there was a trie prefetcher operating, terminate it async so that the + // individual storage tries can be updated as soon as the disk load finishes. if s.prefetcher != nil { + s.prefetcher.terminate(true) defer func() { - s.prefetcher.close() - s.prefetcher = nil + s.prefetcher.report() + s.prefetcher = nil // Pre-byzantium, unset any used up prefetcher }() } - // Although naively it makes sense to retrieve the account trie and then do - // the contract storage and account updates sequentially, that short circuits - // the account prefetcher. Instead, let's process all the storage updates - // first, giving the account prefetches just a few more milliseconds of time - // to pull useful data from disk. - start := time.Now() - if s.deterministic { - addressesToUpdate := make([]common.Address, 0, len(s.mutations)) - for addr := range s.mutations { - addressesToUpdate = append(addressesToUpdate, addr) - } - sort.Slice(addressesToUpdate, func(i, j int) bool { return bytes.Compare(addressesToUpdate[i][:], addressesToUpdate[j][:]) < 0 }) - for _, addr := range addressesToUpdate { - if obj := s.mutations[addr]; !obj.applied && !obj.isDelete() { - s.stateObjects[addr].updateRoot() - } - } - } else { - for addr, op := range s.mutations { - if op.applied { - continue - } - if op.isDelete() { - continue - } - s.stateObjects[addr].updateRoot() + // Process all storage updates concurrently. The state object update root + // method will internally call a blocking trie fetch from the prefetcher, + // so there's no need to explicitly wait for the prefetchers to finish. + var ( + start = time.Now() + workers errgroup.Group + ) + if s.db.TrieDB().IsVerkle() { + // Whilst MPT storage tries are independent, Verkle has one single trie + // for all the accounts and all the storage slots merged together. The + // former can thus be simply parallelized, but updating the latter will + // need concurrency support within the trie itself. That's a TODO for a + // later time. + workers.SetLimit(1) + } + for addr, op := range s.mutations { + if op.applied || op.isDelete() { + continue } + obj := s.stateObjects[addr] // closure for the task runner below + workers.Go(func() error { + obj.updateRoot() + return nil + }) } + workers.Wait() s.StorageUpdates += time.Since(start) // Now we're about to start to write changes to the trie. The trie is so far // _untouched_. We can check with the prefetcher, if it can give us a trie // which has the same root, but also has some content loaded into it. - if prefetcher != nil { - if trie := prefetcher.trie(common.Hash{}, s.originalRoot); trie != nil { + start = time.Now() + + if s.prefetcher != nil { + if trie := s.prefetcher.trie(common.Hash{}, s.originalRoot); trie == nil { + log.Error("Failed to retrieve account pre-fetcher trie") + } else { s.trie = trie } } @@ -1000,12 +963,17 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash { } usedAddrs = append(usedAddrs, common.CopyBytes(addr[:])) // Copy needed for closure } + if s.deterministic { + sort.Slice(deletedAddrs, func(i, j int) bool { return deletedAddrs[i].Cmp(deletedAddrs[j]) < 0 }) + } for _, deletedAddr := range deletedAddrs { s.deleteStateObject(deletedAddr) s.AccountDeleted += 1 } - if prefetcher != nil { - prefetcher.used(common.Hash{}, s.originalRoot, usedAddrs) + s.AccountUpdates += time.Since(start) + + if s.prefetcher != nil { + s.prefetcher.used(common.Hash{}, s.originalRoot, usedAddrs) } // Track the amount of time wasted on hashing the account trie defer func(start time.Time) { s.AccountHashes += time.Since(start) }(time.Now()) @@ -1110,10 +1078,9 @@ func (s *StateDB) slowDeleteStorage(addr common.Address, addrHash common.Hash, r } // deleteStorage is designed to delete the storage trie of a designated account. -// It could potentially be terminated if the storage size is excessively large, -// potentially leading to an out-of-memory panic. The function will make an attempt -// to utilize an efficient strategy if the associated state snapshot is reachable; -// otherwise, it will resort to a less-efficient approach. +// The function will make an attempt to utilize an efficient strategy if the +// associated state snapshot is reachable; otherwise, it will resort to a less +// efficient approach. func (s *StateDB) deleteStorage(addr common.Address, addrHash common.Hash, root common.Hash) (map[common.Hash][]byte, *trienode.NodeSet, error) { var ( start = time.Now() @@ -1148,75 +1115,61 @@ func (s *StateDB) deleteStorage(addr common.Address, addrHash common.Hash, root } // handleDestruction processes all destruction markers and deletes the account -// and associated storage slots if necessary. There are four possible situations -// here: -// -// - the account was not existent and be marked as destructed -// -// - the account was not existent and be marked as destructed, -// however, it's resurrected later in the same block. +// and associated storage slots if necessary. There are four potential scenarios +// as following: // -// - the account was existent and be marked as destructed -// -// - the account was existent and be marked as destructed, -// however it's resurrected later in the same block. +// (a) the account was not existent and be marked as destructed +// (b) the account was not existent and be marked as destructed, +// however, it's resurrected later in the same block. +// (c) the account was existent and be marked as destructed +// (d) the account was existent and be marked as destructed, +// however it's resurrected later in the same block. // // In case (a), nothing needs be deleted, nil to nil transition can be ignored. -// // In case (b), nothing needs be deleted, nil is used as the original value for // newly created account and storages -// // In case (c), **original** account along with its storages should be deleted, // with their values be tracked as original value. -// // In case (d), **original** account along with its storages should be deleted, // with their values be tracked as original value. -func (s *StateDB) handleDestruction(nodes *trienode.MergedNodeSet) error { - // Short circuit if geth is running with hash mode. This procedure can consume - // considerable time and storage deletion isn't supported in hash mode, thus - // preemptively avoiding unnecessary expenses. - if s.db.TrieDB().Scheme() == rawdb.HashScheme { - return nil - } +func (s *StateDB) handleDestruction() (map[common.Hash]*accountDelete, []*trienode.NodeSet, error) { + var ( + nodes []*trienode.NodeSet + buf = crypto.NewKeccakState() + deletes = make(map[common.Hash]*accountDelete) + ) for addr, prev := range s.stateObjectsDestruct { - // The original account was non-existing, and it's marked as destructed - // in the scope of block. It can be case (a) or (b). - // - for (a), skip it without doing anything. - // - for (b), track account's original value as nil. It may overwrite - // the data cached in s.accountsOrigin set by 'updateStateObject'. - addrHash := crypto.Keccak256Hash(addr[:]) + // The account was non-existent, and it's marked as destructed in the scope + // of block. It can be either case (a) or (b) and will be interpreted as + // null->null state transition. + // - for (a), skip it without doing anything + // - for (b), the resurrected account with nil as original will be handled afterwards if prev == nil { - if _, ok := s.accounts[addrHash]; ok { - s.accountsOrigin[addr] = nil // case (b) - } continue } - // It can overwrite the data in s.accountsOrigin set by 'updateStateObject'. - s.accountsOrigin[addr] = types.SlimAccountRLP(*prev) // case (c) or (d) + // The account was existent, it can be either case (c) or (d). + addrHash := crypto.HashData(buf, addr.Bytes()) + op := &accountDelete{ + address: addr, + origin: types.SlimAccountRLP(*prev), + } + deletes[addrHash] = op - // Short circuit if the storage was empty. + // Short circuit if the origin storage was empty. if prev.Root == types.EmptyRootHash { continue } - // Remove storage slots belong to the account. + // Remove storage slots belonging to the account. slots, set, err := s.deleteStorage(addr, addrHash, prev.Root) if err != nil { - return fmt.Errorf("failed to delete storage, err: %w", err) - } - if s.storagesOrigin[addr] == nil { - s.storagesOrigin[addr] = slots - } else { - // It can overwrite the data in s.storagesOrigin[addrHash] set by - // 'object.updateTrie'. - for key, val := range slots { - s.storagesOrigin[addr][key] = val - } - } - if err := nodes.Merge(set); err != nil { - return err + return nil, nil, fmt.Errorf("failed to delete storage, err: %w", err) } + op.storagesOrigin = slots + + // Aggregate the associated trie node changes. + nodes = append(nodes, set) } - return nil + return deletes, nodes, nil } // GetTrie returns the account trie. @@ -1224,21 +1177,15 @@ func (s *StateDB) GetTrie() Trie { return s.trie } -// Commit writes the state to the underlying in-memory trie database. -// Once the state is committed, tries cached in stateDB (including account -// trie, storage tries) will no longer be functional. A new state instance -// must be created with new root and updated database for accessing post- -// commit states. -// -// The associated block number of the state transition is also provided -// for more chain context. -func (s *StateDB) Commit(block uint64, deleteEmptyObjects bool) (common.Hash, error) { +// commit gathers the state mutations accumulated along with the associated +// trie changes, resetting all internal flags with the new state as the base. +func (s *StateDB) commit(deleteEmptyObjects bool) (*stateUpdate, error) { if s.arbExtraData.arbTxFilter { - return common.Hash{}, ErrArbTxFilter + return nil, ErrArbTxFilter } // Short circuit in case any database failure occurred earlier. if s.dbErr != nil { - return common.Hash{}, fmt.Errorf("commit aborted due to earlier error: %v", s.dbErr) + return nil, fmt.Errorf("commit aborted due to earlier error: %v", s.dbErr) } // Finalize any pending changes and merge everything into the tries s.IntermediateRoot(deleteEmptyObjects) @@ -1249,20 +1196,56 @@ func (s *StateDB) Commit(block uint64, deleteEmptyObjects bool) (common.Hash, er accountTrieNodesDeleted int storageTrieNodesUpdated int storageTrieNodesDeleted int - nodes = trienode.NewMergedNodeSet() - wasmCodeWriter = s.db.WasmStore().NewBatch() + + lock sync.Mutex // protect two maps below + nodes = trienode.NewMergedNodeSet() // aggregated trie nodes + updates = make(map[common.Hash]*accountUpdate, len(s.mutations)) // aggregated account updates + + // merge aggregates the dirty trie nodes into the global set. + // + // Given that some accounts may be destroyed and then recreated within + // the same block, it's possible that a node set with the same owner + // may already exists. In such cases, these two sets are combined, with + // the later one overwriting the previous one if any nodes are modified + // or deleted in both sets. + // + // merge run concurrently across all the state objects and account trie. + merge = func(set *trienode.NodeSet) error { + if set == nil { + return nil + } + lock.Lock() + defer lock.Unlock() + + updates, deletes := set.Size() + if set.Owner == (common.Hash{}) { + accountTrieNodesUpdated += updates + accountTrieNodesDeleted += deletes + } else { + storageTrieNodesUpdated += updates + storageTrieNodesDeleted += deletes + } + return nodes.Merge(set) + } ) - // Handle all state deletions first - if err := s.handleDestruction(nodes); err != nil { - return common.Hash{}, err + // Given that some accounts could be destroyed and then recreated within + // the same block, account deletions must be processed first. This ensures + // that the storage trie nodes deleted during destruction and recreated + // during subsequent resurrection can be combined correctly. + deletes, delNodes, err := s.handleDestruction() + if err != nil { + return nil, err + } + for _, set := range delNodes { + if err := merge(set); err != nil { + return nil, err + } } // Handle all state updates afterwards, concurrently to one another to shave // off some milliseconds from the commit operation. Also accumulate the code // writes to run in parallel with the computations. - start := time.Now() var ( - code = s.db.DiskDB().NewBatch() - lock sync.Mutex + start = time.Now() root common.Hash workers errgroup.Group ) @@ -1283,15 +1266,8 @@ func (s *StateDB) Commit(block uint64, deleteEmptyObjects bool) (common.Hash, er } root = newroot - // Merge the dirty nodes of account trie into global set - lock.Lock() - defer lock.Unlock() - - if set != nil { - if err = nodes.Merge(set); err != nil { - return err - } - accountTrieNodesUpdated, accountTrieNodesDeleted = set.Size() + if err := merge(set); err != nil { + return err } s.AccountCommits = time.Since(start) return nil @@ -1309,128 +1285,128 @@ func (s *StateDB) Commit(block uint64, deleteEmptyObjects bool) (common.Hash, er } // Write any contract code associated with the state object obj := s.stateObjects[addr] - if obj.code != nil && obj.dirtyCode { - rawdb.WriteCode(code, common.BytesToHash(obj.CodeHash()), obj.code) - obj.dirtyCode = false + if obj == nil { + return nil, errors.New("missing state object") } // Run the storage updates concurrently to one another workers.Go(func() error { // Write any storage changes in the state object to its storage trie - set, err := obj.commit() + update, set, err := obj.commit() if err != nil { return err } - // Merge the dirty nodes of storage trie into global set. It is possible - // that the account was destructed and then resurrected in the same block. - // In this case, the node set is shared by both accounts. - lock.Lock() - defer lock.Unlock() - - if set != nil { - if err = nodes.Merge(set); err != nil { - return err - } - updates, deleted := set.Size() - storageTrieNodesUpdated += updates - storageTrieNodesDeleted += deleted + if err := merge(set); err != nil { + return err } + lock.Lock() + updates[obj.addrHash] = update s.StorageCommits = time.Since(start) // overwrite with the longest storage commit runtime + lock.Unlock() return nil }) } - // Schedule the code commits to run concurrently too. This shouldn't really - // take much since we don't often commit code, but since it's disk access, - // it's always yolo. - workers.Go(func() error { - if code.ValueSize() > 0 { - if err := code.Write(); err != nil { - log.Crit("Failed to commit dirty codes", "error", err) - } - } - return nil - }) - - // Arbitrum: write Stylus programs to disk - for moduleHash, asmMap := range s.arbExtraData.activatedWasms { - rawdb.WriteActivation(wasmCodeWriter, moduleHash, asmMap) - } - if len(s.arbExtraData.activatedWasms) > 0 { - s.arbExtraData.activatedWasms = make(map[common.Hash]ActivatedWasm) - } - - workers.Go(func() error { - if wasmCodeWriter.ValueSize() > 0 { - if err := wasmCodeWriter.Write(); err != nil { - log.Crit("Failed to commit dirty stylus codes", "error", err) - } - } - return nil - }) // Wait for everything to finish and update the metrics if err := workers.Wait(); err != nil { - return common.Hash{}, err + return nil, err } accountUpdatedMeter.Mark(int64(s.AccountUpdated)) - storageUpdatedMeter.Mark(int64(s.StorageUpdated)) + storageUpdatedMeter.Mark(s.StorageUpdated.Load()) accountDeletedMeter.Mark(int64(s.AccountDeleted)) - storageDeletedMeter.Mark(int64(s.StorageDeleted)) + storageDeletedMeter.Mark(s.StorageDeleted.Load()) accountTrieUpdatedMeter.Mark(int64(accountTrieNodesUpdated)) accountTrieDeletedMeter.Mark(int64(accountTrieNodesDeleted)) storageTriesUpdatedMeter.Mark(int64(storageTrieNodesUpdated)) storageTriesDeletedMeter.Mark(int64(storageTrieNodesDeleted)) s.AccountUpdated, s.AccountDeleted = 0, 0 - s.StorageUpdated, s.StorageDeleted = 0, 0 + s.StorageUpdated.Store(0) + s.StorageDeleted.Store(0) - // If snapshotting is enabled, update the snapshot tree with this new version - if s.snap != nil { - start = time.Now() - // Only update if there's a state transition (skip empty Clique blocks) - if parent := s.snap.Root(); parent != root { - if err := s.snaps.Update(root, parent, s.convertAccountSet(s.stateObjectsDestruct), s.accounts, s.storages); err != nil { - log.Warn("Failed to update snapshot tree", "from", parent, "to", root, "err", err) + // Clear all internal flags and update state root at the end. + s.mutations = make(map[common.Address]*mutation) + s.stateObjectsDestruct = make(map[common.Address]*types.StateAccount) + + origin := s.originalRoot + s.originalRoot = root + return newStateUpdate(origin, root, deletes, updates, nodes, s.arbExtraData.activatedWasms), nil +} + +// commitAndFlush is a wrapper of commit which also commits the state mutations +// to the configured data stores. +func (s *StateDB) commitAndFlush(block uint64, deleteEmptyObjects bool) (*stateUpdate, error) { + ret, err := s.commit(deleteEmptyObjects) + if err != nil { + return nil, err + } + // Commit dirty contract code if any exists + if db := s.db.DiskDB(); db != nil && len(ret.codes) > 0 { + batch := db.NewBatch() + for _, code := range ret.codes { + rawdb.WriteCode(batch, code.hash, code.blob) + } + if err := batch.Write(); err != nil { + return nil, err + } + } + + if db := s.db.WasmStore(); db != nil && len(ret.activatedWasms) > 0 { + batch := db.NewBatch() + // Arbitrum: write Stylus programs to disk + for moduleHash, asmMap := range ret.activatedWasms { + rawdb.WriteActivation(batch, moduleHash, asmMap) + } + s.arbExtraData.activatedWasms = make(map[common.Hash]ActivatedWasm) + if err := batch.Write(); err != nil { + return nil, err + } + } + + if !ret.empty() { + // If snapshotting is enabled, update the snapshot tree with this new version + if s.snap != nil { + s.snap = nil + + start := time.Now() + if err := s.snaps.Update(ret.root, ret.originRoot, ret.destructs, ret.accounts, ret.storages); err != nil { + log.Warn("Failed to update snapshot tree", "from", ret.originRoot, "to", ret.root, "err", err) } - // Keep TriesInMemory diff layers in the memory, persistent layer is 129th. + // Keep 128 diff layers in the memory, persistent layer is 129th. // - head layer is paired with HEAD state // - head-1 layer is paired with HEAD-1 state // - head-127 layer(bottom-most diff layer) is paired with HEAD-127 state - if err := s.snaps.Cap(root, DefaultTriesInMemory); err != nil { - log.Warn("Failed to cap snapshot tree", "root", root, "layers", DefaultTriesInMemory, "err", err) + if err := s.snaps.Cap(ret.root, DefaultTriesInMemory); err != nil { + log.Warn("Failed to cap snapshot tree", "root", ret.root, "layers", DefaultTriesInMemory, "err", err) } + s.SnapshotCommits += time.Since(start) + } + // If trie database is enabled, commit the state update as a new layer + if db := s.db.TrieDB(); db != nil { + start := time.Now() + set := triestate.New(ret.accountsOrigin, ret.storagesOrigin) + if err := db.Update(ret.root, ret.originRoot, block, ret.nodes, set); err != nil { + return nil, err + } + s.TrieDBCommits += time.Since(start) } - s.SnapshotCommits += time.Since(start) - s.snap = nil } - s.arbExtraData.unexpectedBalanceDelta.Set(new(big.Int)) + return ret, err +} - if root == (common.Hash{}) { - root = types.EmptyRootHash - } - origin := s.originalRoot - if origin == (common.Hash{}) { - origin = types.EmptyRootHash - } - if root != origin { - start = time.Now() - set := triestate.New(s.accountsOrigin, s.storagesOrigin) - if err := s.db.TrieDB().Update(root, origin, block, nodes, set); err != nil { - return common.Hash{}, err - } - s.originalRoot = root - s.TrieDBCommits += time.Since(start) - - if s.onCommit != nil { - s.onCommit(set) - } +// Commit writes the state mutations into the configured data stores. +// +// Once the state is committed, tries cached in stateDB (including account +// trie, storage tries) will no longer be functional. A new state instance +// must be created with new root and updated database for accessing post- +// commit states. +// +// The associated block number of the state transition is also provided +// for more chain context. +func (s *StateDB) Commit(block uint64, deleteEmptyObjects bool) (common.Hash, error) { + ret, err := s.commitAndFlush(block, deleteEmptyObjects) + if err != nil { + return common.Hash{}, err } - // Clear all internal flags at the end of commit operation. - s.accounts = make(map[common.Hash][]byte) - s.storages = make(map[common.Hash]map[common.Hash][]byte) - s.accountsOrigin = make(map[common.Address][]byte) - s.storagesOrigin = make(map[common.Address]map[common.Hash][]byte) - s.mutations = make(map[common.Address]*mutation) - s.stateObjectsDestruct = make(map[common.Address]*types.StateAccount) - return root, nil + return ret.root, nil } // Prepare handles the preparatory steps for executing a state transition with. @@ -1447,7 +1423,10 @@ func (s *StateDB) Commit(block uint64, deleteEmptyObjects bool) (common.Hash, er // - Add coinbase to access list (EIP-3651) // - Reset transient storage (EIP-1153) func (s *StateDB) Prepare(rules params.Rules, sender, coinbase common.Address, dst *common.Address, precompiles []common.Address, list types.AccessList) { - if rules.IsBerlin { + if rules.IsEIP2929 && rules.IsEIP4762 { + panic("eip2929 and eip4762 are both activated") + } + if rules.IsEIP2929 { // Clear out any leftover from previous executions al := newAccessList() s.accessList = al @@ -1509,41 +1488,9 @@ func (s *StateDB) SlotInAccessList(addr common.Address, slot common.Hash) (addre return s.accessList.Contains(addr, slot) } -// convertAccountSet converts a provided account set from address keyed to hash keyed. -func (s *StateDB) convertAccountSet(set map[common.Address]*types.StateAccount) map[common.Hash]struct{} { - ret := make(map[common.Hash]struct{}, len(set)) - for addr := range set { - obj, exist := s.stateObjects[addr] - if !exist { - ret[crypto.Keccak256Hash(addr[:])] = struct{}{} - } else { - ret[obj.addrHash] = struct{}{} - } - } - return ret -} - -// copySet returns a deep-copied set. -func copySet[k comparable](set map[k][]byte) map[k][]byte { - copied := make(map[k][]byte, len(set)) - for key, val := range set { - copied[key] = common.CopyBytes(val) - } - return copied -} - -// copy2DSet returns a two-dimensional deep-copied set. -func copy2DSet[k comparable](set map[k]map[common.Hash][]byte) map[k]map[common.Hash][]byte { - copied := make(map[k]map[common.Hash][]byte, len(set)) - for addr, subset := range set { - copied[addr] = make(map[common.Hash][]byte, len(subset)) - for key, val := range subset { - copied[addr][key] = common.CopyBytes(val) - } - } - return copied -} - +// markDelete is invoked when an account is deleted but the deletion is +// not yet committed. The pending mutation is cached and will be applied +// all together func (s *StateDB) markDelete(addr common.Address) { if _, ok := s.mutations[addr]; !ok { s.mutations[addr] = &mutation{} @@ -1559,3 +1506,7 @@ func (s *StateDB) markUpdate(addr common.Address) { s.mutations[addr].applied = false s.mutations[addr].typ = update } + +func (s *StateDB) PointCache() *utils.PointCache { + return s.db.PointCache() +} diff --git a/core/state/statedb_fuzz_test.go b/core/state/statedb_fuzz_test.go index ca676160e0..b6969b11d2 100644 --- a/core/state/statedb_fuzz_test.go +++ b/core/state/statedb_fuzz_test.go @@ -36,7 +36,6 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" - "github.com/ethereum/go-ethereum/trie/triestate" "github.com/ethereum/go-ethereum/triedb" "github.com/ethereum/go-ethereum/triedb/pathdb" "github.com/holiman/uint256" @@ -180,9 +179,21 @@ func (test *stateTest) run() bool { roots []common.Hash accountList []map[common.Address][]byte storageList []map[common.Address]map[common.Hash][]byte - onCommit = func(states *triestate.Set) { - accountList = append(accountList, copySet(states.Accounts)) - storageList = append(storageList, copy2DSet(states.Storages)) + copyUpdate = func(update *stateUpdate) { + accounts := make(map[common.Address][]byte, len(update.accountsOrigin)) + for key, val := range update.accountsOrigin { + accounts[key] = common.CopyBytes(val) + } + accountList = append(accountList, accounts) + + storages := make(map[common.Address]map[common.Hash][]byte, len(update.storagesOrigin)) + for addr, subset := range update.storagesOrigin { + storages[addr] = make(map[common.Hash][]byte, len(subset)) + for key, val := range subset { + storages[addr][key] = common.CopyBytes(val) + } + } + storageList = append(storageList, storages) } disk = rawdb.NewMemoryDatabase() tdb = triedb.NewDatabase(disk, &triedb.Config{PathDB: pathdb.Defaults}) @@ -210,8 +221,6 @@ func (test *stateTest) run() bool { if err != nil { panic(err) } - state.onCommit = onCommit - for i, action := range actions { if i%test.chunk == 0 && i != 0 { if byzantium { @@ -227,14 +236,15 @@ func (test *stateTest) run() bool { } else { state.IntermediateRoot(true) // call intermediateRoot at the transaction boundary } - nroot, err := state.Commit(0, true) // call commit at the block boundary + ret, err := state.commitAndFlush(0, true) // call commit at the block boundary if err != nil { panic(err) } - if nroot == root { - return true // filter out non-change state transition + if ret.empty() { + return true } - roots = append(roots, nroot) + copyUpdate(ret) + roots = append(roots, ret.root) } for i := 0; i < len(test.actions); i++ { root := types.EmptyRootHash diff --git a/core/state/statedb_test.go b/core/state/statedb_test.go index e32257abf2..1350b9b090 100644 --- a/core/state/statedb_test.go +++ b/core/state/statedb_test.go @@ -1287,3 +1287,47 @@ func TestDeleteStorage(t *testing.T) { t.Fatalf("difference found:\nfast: %v\nslow: %v\n", fastRes, slowRes) } } + +func TestStorageDirtiness(t *testing.T) { + var ( + disk = rawdb.NewMemoryDatabase() + tdb = triedb.NewDatabase(disk, nil) + db = NewDatabaseWithNodeDB(disk, tdb) + state, _ = New(types.EmptyRootHash, db, nil) + addr = common.HexToAddress("0x1") + checkDirty = func(key common.Hash, value common.Hash, dirty bool) { + obj := state.getStateObject(addr) + v, exist := obj.dirtyStorage[key] + if exist != dirty { + t.Fatalf("Unexpected dirty marker, want: %t, got: %t", dirty, exist) + } + if v != value { + t.Fatalf("Unexpected storage slot, want: %t, got: %t", value, v) + } + } + ) + state.CreateAccount(addr) + + // the storage change is noop, no dirty marker + state.SetState(addr, common.Hash{0x1}, common.Hash{}) + checkDirty(common.Hash{0x1}, common.Hash{}, false) + + // the storage change is valid, dirty marker is expected + snap := state.Snapshot() + state.SetState(addr, common.Hash{0x1}, common.Hash{0x1}) + checkDirty(common.Hash{0x1}, common.Hash{0x1}, true) + + // the storage change is reverted, dirtiness should be revoked + state.RevertToSnapshot(snap) + checkDirty(common.Hash{0x1}, common.Hash{}, false) + + // the storage is reset back to its original value, dirtiness should be revoked + state.SetState(addr, common.Hash{0x1}, common.Hash{0x1}) + snap = state.Snapshot() + state.SetState(addr, common.Hash{0x1}, common.Hash{}) + checkDirty(common.Hash{0x1}, common.Hash{}, false) + + // the storage change is reverted, dirty value should be set back + state.RevertToSnapshot(snap) + checkDirty(common.Hash{0x1}, common.Hash{0x1}, true) +} diff --git a/core/state/stateupdate.go b/core/state/stateupdate.go new file mode 100644 index 0000000000..ca92a1ed96 --- /dev/null +++ b/core/state/stateupdate.go @@ -0,0 +1,135 @@ +// Copyright 2024 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package state + +import ( + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/trie/trienode" +) + +// contractCode represents a contract code with associated metadata. +type contractCode struct { + hash common.Hash // hash is the cryptographic hash of the contract code. + blob []byte // blob is the binary representation of the contract code. +} + +// accountDelete represents an operation for deleting an Ethereum account. +type accountDelete struct { + address common.Address // address is the unique account identifier + origin []byte // origin is the original value of account data in slim-RLP encoding. + storagesOrigin map[common.Hash][]byte // storagesOrigin stores the original values of mutated slots in prefix-zero-trimmed RLP format. +} + +// accountUpdate represents an operation for updating an Ethereum account. +type accountUpdate struct { + address common.Address // address is the unique account identifier + data []byte // data is the slim-RLP encoded account data. + origin []byte // origin is the original value of account data in slim-RLP encoding. + code *contractCode // code represents mutated contract code; nil means it's not modified. + storages map[common.Hash][]byte // storages stores mutated slots in prefix-zero-trimmed RLP format. + storagesOrigin map[common.Hash][]byte // storagesOrigin stores the original values of mutated slots in prefix-zero-trimmed RLP format. +} + +// stateUpdate represents the difference between two states resulting from state +// execution. It contains information about mutated contract codes, accounts, +// and storage slots, along with their original values. +type stateUpdate struct { + originRoot common.Hash // hash of the state before applying mutation + root common.Hash // hash of the state after applying mutation + destructs map[common.Hash]struct{} // destructs contains the list of destructed accounts + accounts map[common.Hash][]byte // accounts stores mutated accounts in 'slim RLP' encoding + accountsOrigin map[common.Address][]byte // accountsOrigin stores the original values of mutated accounts in 'slim RLP' encoding + storages map[common.Hash]map[common.Hash][]byte // storages stores mutated slots in 'prefix-zero-trimmed' RLP format + storagesOrigin map[common.Address]map[common.Hash][]byte // storagesOrigin stores the original values of mutated slots in 'prefix-zero-trimmed' RLP format + codes map[common.Address]contractCode // codes contains the set of dirty codes + activatedWasms map[common.Hash]ActivatedWasm // newly activated WASMs + nodes *trienode.MergedNodeSet // Aggregated dirty nodes caused by state changes +} + +// empty returns a flag indicating the state transition is empty or not. +func (sc *stateUpdate) empty() bool { + return sc.originRoot == sc.root +} + +// newStateUpdate constructs a state update object, representing the differences +// between two states by performing state execution. It aggregates the given +// account deletions and account updates to form a comprehensive state update. +func newStateUpdate(originRoot common.Hash, root common.Hash, deletes map[common.Hash]*accountDelete, updates map[common.Hash]*accountUpdate, nodes *trienode.MergedNodeSet, activatedWasms map[common.Hash]ActivatedWasm) *stateUpdate { + var ( + destructs = make(map[common.Hash]struct{}) + accounts = make(map[common.Hash][]byte) + accountsOrigin = make(map[common.Address][]byte) + storages = make(map[common.Hash]map[common.Hash][]byte) + storagesOrigin = make(map[common.Address]map[common.Hash][]byte) + codes = make(map[common.Address]contractCode) + ) + // Due to the fact that some accounts could be destructed and resurrected + // within the same block, the deletions must be aggregated first. + for addrHash, op := range deletes { + addr := op.address + destructs[addrHash] = struct{}{} + accountsOrigin[addr] = op.origin + if len(op.storagesOrigin) > 0 { + storagesOrigin[addr] = op.storagesOrigin + } + } + // Aggregate account updates then. + for addrHash, op := range updates { + // Aggregate dirty contract codes if they are available. + addr := op.address + if op.code != nil { + codes[addr] = *op.code + } + // Aggregate the account changes. The original account value will only + // be tracked if it's not present yet. + accounts[addrHash] = op.data + if _, found := accountsOrigin[addr]; !found { + accountsOrigin[addr] = op.origin + } + // Aggregate the storage changes. The original storage slot value will + // only be tracked if it's not present yet. + if len(op.storages) > 0 { + storages[addrHash] = op.storages + } + if len(op.storagesOrigin) > 0 { + origin := storagesOrigin[addr] + if origin == nil { + storagesOrigin[addr] = op.storagesOrigin + continue + } + for key, slot := range op.storagesOrigin { + if _, found := origin[key]; !found { + origin[key] = slot + } + } + storagesOrigin[addr] = origin + } + } + return &stateUpdate{ + originRoot: types.TrieRootHash(originRoot), + root: types.TrieRootHash(root), + destructs: destructs, + accounts: accounts, + accountsOrigin: accountsOrigin, + storages: storages, + storagesOrigin: storagesOrigin, + codes: codes, + activatedWasms: activatedWasms, + nodes: nodes, + } +} diff --git a/core/state/trie_prefetcher.go b/core/state/trie_prefetcher.go index c2a49417d4..ce94ab5139 100644 --- a/core/state/trie_prefetcher.go +++ b/core/state/trie_prefetcher.go @@ -17,6 +17,7 @@ package state import ( + "errors" "sync" "github.com/ethereum/go-ethereum/common" @@ -27,6 +28,10 @@ import ( var ( // triePrefetchMetricsPrefix is the prefix under which to publish the metrics. triePrefetchMetricsPrefix = "trie/prefetch/" + + // errTerminated is returned if a fetcher is attempted to be operated after it + // has already terminated. + errTerminated = errors.New("fetcher is already terminated") ) // triePrefetcher is an active prefetcher, which receives accounts or storage @@ -37,160 +42,126 @@ var ( type triePrefetcher struct { db Database // Database to fetch trie nodes through root common.Hash // Root hash of the account trie for metrics - fetches map[string]Trie // Partially or fully fetched tries. Only populated for inactive copies. fetchers map[string]*subfetcher // Subfetchers for each trie + term chan struct{} // Channel to signal interruption deliveryMissMeter metrics.Meter accountLoadMeter metrics.Meter accountDupMeter metrics.Meter - accountSkipMeter metrics.Meter accountWasteMeter metrics.Meter storageLoadMeter metrics.Meter storageDupMeter metrics.Meter - storageSkipMeter metrics.Meter storageWasteMeter metrics.Meter } func newTriePrefetcher(db Database, root common.Hash, namespace string) *triePrefetcher { prefix := triePrefetchMetricsPrefix + namespace - p := &triePrefetcher{ + return &triePrefetcher{ db: db, root: root, fetchers: make(map[string]*subfetcher), // Active prefetchers use the fetchers map + term: make(chan struct{}), deliveryMissMeter: metrics.GetOrRegisterMeter(prefix+"/deliverymiss", nil), accountLoadMeter: metrics.GetOrRegisterMeter(prefix+"/account/load", nil), accountDupMeter: metrics.GetOrRegisterMeter(prefix+"/account/dup", nil), - accountSkipMeter: metrics.GetOrRegisterMeter(prefix+"/account/skip", nil), accountWasteMeter: metrics.GetOrRegisterMeter(prefix+"/account/waste", nil), storageLoadMeter: metrics.GetOrRegisterMeter(prefix+"/storage/load", nil), storageDupMeter: metrics.GetOrRegisterMeter(prefix+"/storage/dup", nil), - storageSkipMeter: metrics.GetOrRegisterMeter(prefix+"/storage/skip", nil), storageWasteMeter: metrics.GetOrRegisterMeter(prefix+"/storage/waste", nil), } - return p } -// close iterates over all the subfetchers, aborts any that were left spinning -// and reports the stats to the metrics subsystem. -func (p *triePrefetcher) close() { +// terminate iterates over all the subfetchers and issues a termination request +// to all of them. Depending on the async parameter, the method will either block +// until all subfetchers spin down, or return immediately. +func (p *triePrefetcher) terminate(async bool) { + // Short circuit if the fetcher is already closed + select { + case <-p.term: + return + default: + } + // Termiante all sub-fetchers, sync or async, depending on the request for _, fetcher := range p.fetchers { - fetcher.abort() // safe to do multiple times - - if metrics.Enabled { - if fetcher.root == p.root { - p.accountLoadMeter.Mark(int64(len(fetcher.seen))) - p.accountDupMeter.Mark(int64(fetcher.dups)) - p.accountSkipMeter.Mark(int64(len(fetcher.tasks))) - - for _, key := range fetcher.used { - delete(fetcher.seen, string(key)) - } - p.accountWasteMeter.Mark(int64(len(fetcher.seen))) - } else { - p.storageLoadMeter.Mark(int64(len(fetcher.seen))) - p.storageDupMeter.Mark(int64(fetcher.dups)) - p.storageSkipMeter.Mark(int64(len(fetcher.tasks))) - - for _, key := range fetcher.used { - delete(fetcher.seen, string(key)) - } - p.storageWasteMeter.Mark(int64(len(fetcher.seen))) - } - } + fetcher.terminate(async) } - // Clear out all fetchers (will crash on a second call, deliberate) - p.fetchers = nil + close(p.term) } -// copy creates a deep-but-inactive copy of the trie prefetcher. Any trie data -// already loaded will be copied over, but no goroutines will be started. This -// is mostly used in the miner which creates a copy of it's actively mutated -// state to be sealed while it may further mutate the state. -func (p *triePrefetcher) copy() *triePrefetcher { - copy := &triePrefetcher{ - db: p.db, - root: p.root, - fetches: make(map[string]Trie), // Active prefetchers use the fetches map - - deliveryMissMeter: p.deliveryMissMeter, - accountLoadMeter: p.accountLoadMeter, - accountDupMeter: p.accountDupMeter, - accountSkipMeter: p.accountSkipMeter, - accountWasteMeter: p.accountWasteMeter, - storageLoadMeter: p.storageLoadMeter, - storageDupMeter: p.storageDupMeter, - storageSkipMeter: p.storageSkipMeter, - storageWasteMeter: p.storageWasteMeter, +// report aggregates the pre-fetching and usage metrics and reports them. +func (p *triePrefetcher) report() { + if !metrics.Enabled { + return } - // If the prefetcher is already a copy, duplicate the data - if p.fetches != nil { - for root, fetch := range p.fetches { - if fetch == nil { - continue + for _, fetcher := range p.fetchers { + fetcher.wait() // ensure the fetcher's idle before poking in its internals + + if fetcher.root == p.root { + p.accountLoadMeter.Mark(int64(len(fetcher.seen))) + p.accountDupMeter.Mark(int64(fetcher.dups)) + for _, key := range fetcher.used { + delete(fetcher.seen, string(key)) } - copy.fetches[root] = p.db.CopyTrie(fetch) + p.accountWasteMeter.Mark(int64(len(fetcher.seen))) + } else { + p.storageLoadMeter.Mark(int64(len(fetcher.seen))) + p.storageDupMeter.Mark(int64(fetcher.dups)) + for _, key := range fetcher.used { + delete(fetcher.seen, string(key)) + } + p.storageWasteMeter.Mark(int64(len(fetcher.seen))) } - return copy - } - // Otherwise we're copying an active fetcher, retrieve the current states - for id, fetcher := range p.fetchers { - copy.fetches[id] = fetcher.peek() } - return copy } -// prefetch schedules a batch of trie items to prefetch. -func (p *triePrefetcher) prefetch(owner common.Hash, root common.Hash, addr common.Address, keys [][]byte) { - // If the prefetcher is an inactive one, bail out - if p.fetches != nil { - return +// prefetch schedules a batch of trie items to prefetch. After the prefetcher is +// closed, all the following tasks scheduled will not be executed and an error +// will be returned. +// +// prefetch is called from two locations: +// +// 1. Finalize of the state-objects storage roots. This happens at the end +// of every transaction, meaning that if several transactions touches +// upon the same contract, the parameters invoking this method may be +// repeated. +// 2. Finalize of the main account trie. This happens only once per block. +func (p *triePrefetcher) prefetch(owner common.Hash, root common.Hash, addr common.Address, keys [][]byte) error { + // Ensure the subfetcher is still alive + select { + case <-p.term: + return errTerminated + default: } - // Active fetcher, schedule the retrievals id := p.trieID(owner, root) fetcher := p.fetchers[id] if fetcher == nil { fetcher = newSubfetcher(p.db, p.root, owner, root, addr) p.fetchers[id] = fetcher } - fetcher.schedule(keys) + return fetcher.schedule(keys) } -// trie returns the trie matching the root hash, or nil if the prefetcher doesn't -// have it. +// trie returns the trie matching the root hash, blocking until the fetcher of +// the given trie terminates. If no fetcher exists for the request, nil will be +// returned. func (p *triePrefetcher) trie(owner common.Hash, root common.Hash) Trie { - // If the prefetcher is inactive, return from existing deep copies - id := p.trieID(owner, root) - if p.fetches != nil { - trie := p.fetches[id] - if trie == nil { - p.deliveryMissMeter.Mark(1) - return nil - } - return p.db.CopyTrie(trie) - } - // Otherwise the prefetcher is active, bail if no trie was prefetched for this root - fetcher := p.fetchers[id] + // Bail if no trie was prefetched for this root + fetcher := p.fetchers[p.trieID(owner, root)] if fetcher == nil { + log.Error("Prefetcher missed to load trie", "owner", owner, "root", root) p.deliveryMissMeter.Mark(1) return nil } - // Interrupt the prefetcher if it's by any chance still running and return - // a copy of any pre-loaded trie. - fetcher.abort() // safe to do multiple times - - trie := fetcher.peek() - if trie == nil { - p.deliveryMissMeter.Mark(1) - return nil - } - return trie + // Subfetcher exists, retrieve its trie + return fetcher.peek() } // used marks a batch of state items used to allow creating statistics as to -// how useful or wasteful the prefetcher is. +// how useful or wasteful the fetcher is. func (p *triePrefetcher) used(owner common.Hash, root common.Hash, used [][]byte) { if fetcher := p.fetchers[p.trieID(owner, root)]; fetcher != nil { + fetcher.wait() // ensure the fetcher's idle before poking in its internals fetcher.used = used } } @@ -218,10 +189,9 @@ type subfetcher struct { tasks [][]byte // Items queued up for retrieval lock sync.Mutex // Lock protecting the task queue - wake chan struct{} // Wake channel if a new task is scheduled - stop chan struct{} // Channel to interrupt processing - term chan struct{} // Channel to signal interruption - copy chan chan Trie // Channel to request a copy of the current trie + wake chan struct{} // Wake channel if a new task is scheduled + stop chan struct{} // Channel to interrupt processing + term chan struct{} // Channel to signal interruption seen map[string]struct{} // Tracks the entries already loaded dups int // Number of duplicate preload tasks @@ -240,7 +210,6 @@ func newSubfetcher(db Database, state common.Hash, owner common.Hash, root commo wake: make(chan struct{}, 1), stop: make(chan struct{}), term: make(chan struct{}), - copy: make(chan chan Trie), seen: make(map[string]struct{}), } go sf.loop() @@ -248,50 +217,61 @@ func newSubfetcher(db Database, state common.Hash, owner common.Hash, root commo } // schedule adds a batch of trie keys to the queue to prefetch. -func (sf *subfetcher) schedule(keys [][]byte) { +func (sf *subfetcher) schedule(keys [][]byte) error { + // Ensure the subfetcher is still alive + select { + case <-sf.term: + return errTerminated + default: + } // Append the tasks to the current queue sf.lock.Lock() sf.tasks = append(sf.tasks, keys...) sf.lock.Unlock() - // Notify the prefetcher, it's fine if it's already terminated + // Notify the background thread to execute scheduled tasks select { case sf.wake <- struct{}{}: + // Wake signal sent default: + // Wake signal not sent as a previous one is already queued } + return nil } -// peek tries to retrieve a deep copy of the fetcher's trie in whatever form it -// is currently. -func (sf *subfetcher) peek() Trie { - ch := make(chan Trie) - select { - case sf.copy <- ch: - // Subfetcher still alive, return copy from it - return <-ch +// wait blocks until the subfetcher terminates. This method is used to block on +// an async termination before accessing internal fields from the fetcher. +func (sf *subfetcher) wait() { + <-sf.term +} - case <-sf.term: - // Subfetcher already terminated, return a copy directly - if sf.trie == nil { - return nil - } - return sf.db.CopyTrie(sf.trie) - } +// peek retrieves the fetcher's trie, populated with any pre-fetched data. The +// returned trie will be a shallow copy, so modifying it will break subsequent +// peeks for the original data. The method will block until all the scheduled +// data has been loaded and the fethcer terminated. +func (sf *subfetcher) peek() Trie { + // Block until the fetcher terminates, then retrieve the trie + sf.wait() + return sf.trie } -// abort interrupts the subfetcher immediately. It is safe to call abort multiple -// times but it is not thread safe. -func (sf *subfetcher) abort() { +// terminate requests the subfetcher to stop accepting new tasks and spin down +// as soon as everything is loaded. Depending on the async parameter, the method +// will either block until all disk loads finish or return immediately. +func (sf *subfetcher) terminate(async bool) { select { case <-sf.stop: default: close(sf.stop) } + if async { + return + } <-sf.term } -// loop waits for new tasks to be scheduled and keeps loading them until it runs -// out of tasks or its underlying trie is retrieved for committing. +// loop loads newly-scheduled trie tasks as they are received and loads them, stopping +// when requested. func (sf *subfetcher) loop() { // No matter how the loop stops, signal anyone waiting that it's terminated defer close(sf.term) @@ -305,8 +285,6 @@ func (sf *subfetcher) loop() { } sf.trie = trie } else { - // The trie argument can be nil as verkle doesn't support prefetching - // yet. TODO FIX IT(rjl493456442), otherwise code will panic here. trie, err := sf.db.OpenStorageTrie(sf.state, sf.addr, sf.root, nil) if err != nil { log.Warn("Trie prefetcher failed opening trie", "root", sf.root, "err", err) @@ -318,48 +296,38 @@ func (sf *subfetcher) loop() { for { select { case <-sf.wake: - // Subfetcher was woken up, retrieve any tasks to avoid spinning the lock + // Execute all remaining tasks in a single run sf.lock.Lock() tasks := sf.tasks sf.tasks = nil sf.lock.Unlock() - // Prefetch any tasks until the loop is interrupted - for i, task := range tasks { - select { - case <-sf.stop: - // If termination is requested, add any leftover back and return - sf.lock.Lock() - sf.tasks = append(sf.tasks, tasks[i:]...) - sf.lock.Unlock() - return - - case ch := <-sf.copy: - // Somebody wants a copy of the current trie, grant them - ch <- sf.db.CopyTrie(sf.trie) - - default: - // No termination request yet, prefetch the next entry - if _, ok := sf.seen[string(task)]; ok { - sf.dups++ - } else { - if len(task) == common.AddressLength { - sf.trie.GetAccount(common.BytesToAddress(task)) - } else { - sf.trie.GetStorage(sf.addr, task) - } - sf.seen[string(task)] = struct{}{} - } + for _, task := range tasks { + if _, ok := sf.seen[string(task)]; ok { + sf.dups++ + continue + } + if len(task) == common.AddressLength { + sf.trie.GetAccount(common.BytesToAddress(task)) + } else { + sf.trie.GetStorage(sf.addr, task) } + sf.seen[string(task)] = struct{}{} } - case ch := <-sf.copy: - // Somebody wants a copy of the current trie, grant them - ch <- sf.db.CopyTrie(sf.trie) - case <-sf.stop: - // Termination is requested, abort and leave remaining tasks - return + // Termination is requested, abort if no more tasks are pending. If + // there are some, exhaust them first. + sf.lock.Lock() + done := sf.tasks == nil + sf.lock.Unlock() + + if done { + return + } + // Some tasks are pending, loop and pick them up (that wake branch + // will be selected eventually, whilst stop remains closed to this + // branch will also run afterwards). } } } diff --git a/core/state/trie_prefetcher_test.go b/core/state/trie_prefetcher_test.go index a616adf98f..478407dfbb 100644 --- a/core/state/trie_prefetcher_test.go +++ b/core/state/trie_prefetcher_test.go @@ -19,7 +19,6 @@ package state import ( "math/big" "testing" - "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/rawdb" @@ -46,68 +45,20 @@ func filledStateDB() *StateDB { return state } -func TestCopyAndClose(t *testing.T) { +func TestUseAfterTerminate(t *testing.T) { db := filledStateDB() prefetcher := newTriePrefetcher(db.db, db.originalRoot, "") skey := common.HexToHash("aaa") - prefetcher.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()}) - prefetcher.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()}) - time.Sleep(1 * time.Second) - a := prefetcher.trie(common.Hash{}, db.originalRoot) - prefetcher.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()}) - b := prefetcher.trie(common.Hash{}, db.originalRoot) - cpy := prefetcher.copy() - cpy.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()}) - cpy.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()}) - c := cpy.trie(common.Hash{}, db.originalRoot) - prefetcher.close() - cpy2 := cpy.copy() - cpy2.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()}) - d := cpy2.trie(common.Hash{}, db.originalRoot) - cpy.close() - cpy2.close() - if a.Hash() != b.Hash() || a.Hash() != c.Hash() || a.Hash() != d.Hash() { - t.Fatalf("Invalid trie, hashes should be equal: %v %v %v %v", a.Hash(), b.Hash(), c.Hash(), d.Hash()) - } -} -func TestUseAfterClose(t *testing.T) { - db := filledStateDB() - prefetcher := newTriePrefetcher(db.db, db.originalRoot, "") - skey := common.HexToHash("aaa") - prefetcher.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()}) - a := prefetcher.trie(common.Hash{}, db.originalRoot) - prefetcher.close() - b := prefetcher.trie(common.Hash{}, db.originalRoot) - if a == nil { - t.Fatal("Prefetching before close should not return nil") - } - if b != nil { - t.Fatal("Trie after close should return nil") + if err := prefetcher.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()}); err != nil { + t.Errorf("Prefetch failed before terminate: %v", err) } -} + prefetcher.terminate(false) -func TestCopyClose(t *testing.T) { - db := filledStateDB() - prefetcher := newTriePrefetcher(db.db, db.originalRoot, "") - skey := common.HexToHash("aaa") - prefetcher.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()}) - cpy := prefetcher.copy() - a := prefetcher.trie(common.Hash{}, db.originalRoot) - b := cpy.trie(common.Hash{}, db.originalRoot) - prefetcher.close() - c := prefetcher.trie(common.Hash{}, db.originalRoot) - d := cpy.trie(common.Hash{}, db.originalRoot) - if a == nil { - t.Fatal("Prefetching before close should not return nil") - } - if b == nil { - t.Fatal("Copy trie should return nil") - } - if c != nil { - t.Fatal("Trie after close should return nil") + if err := prefetcher.prefetch(common.Hash{}, db.originalRoot, common.Address{}, [][]byte{skey.Bytes()}); err == nil { + t.Errorf("Prefetch succeeded after terminate: %v", err) } - if d == nil { - t.Fatal("Copy trie should not return nil") + if tr := prefetcher.trie(common.Hash{}, db.originalRoot); tr == nil { + t.Errorf("Prefetcher returned nil trie after terminate") } } diff --git a/core/state_processor_test.go b/core/state_processor_test.go index c5642eeffa..7d73a86fe9 100644 --- a/core/state_processor_test.go +++ b/core/state_processor_test.go @@ -483,7 +483,7 @@ func TestProcessVerkle(t *testing.T) { txCost1 := params.TxGas txCost2 := params.TxGas contractCreationCost := intrinsicContractCreationGas + uint64(2039 /* execution costs */) - codeWithExtCodeCopyGas := intrinsicCodeWithExtCodeCopyGas + uint64(293644 /* execution costs */) + codeWithExtCodeCopyGas := intrinsicCodeWithExtCodeCopyGas + uint64(57444 /* execution costs */) blockGasUsagesExpected := []uint64{ txCost1*2 + txCost2, txCost1*2 + txCost2 + contractCreationCost + codeWithExtCodeCopyGas, diff --git a/core/state_transition.go b/core/state_transition.go index 8c01b2c285..183b4fb6de 100644 --- a/core/state_transition.go +++ b/core/state_transition.go @@ -73,7 +73,7 @@ func (result *ExecutionResult) Revert() []byte { } // IntrinsicGas computes the 'intrinsic gas' for a message with the given data. -func IntrinsicGas(data []byte, accessList types.AccessList, isContractCreation bool, isHomestead, isEIP2028, isEIP3860 bool) (uint64, error) { +func IntrinsicGas(data []byte, accessList types.AccessList, isContractCreation, isHomestead, isEIP2028, isEIP3860 bool) (uint64, error) { // Set the starting gas for the raw transaction var gas uint64 if isContractCreation && isHomestead { @@ -274,8 +274,8 @@ func (st *StateTransition) buyGas() error { if st.msg.GasFeeCap != nil { balanceCheck.SetUint64(st.msg.GasLimit) balanceCheck = balanceCheck.Mul(balanceCheck, st.msg.GasFeeCap) - balanceCheck.Add(balanceCheck, st.msg.Value) } + balanceCheck.Add(balanceCheck, st.msg.Value) if st.evm.ChainConfig().IsCancun(st.evm.Context.BlockNumber, st.evm.Context.Time, st.evm.Context.ArbOSVersion) { if blobGas := st.blobGasUsed(); blobGas > 0 { // Check that the user has enough funds to cover blobGasUsed * tx.BlobGasFeeCap @@ -467,6 +467,13 @@ func (st *StateTransition) TransitionDb() (*ExecutionResult, error) { if err != nil { return nil, err } + if rules.IsEIP4762 { + st.evm.AccessEvents.AddTxOrigin(msg.From) + + if targetAddr := msg.To; targetAddr != nil { + st.evm.AccessEvents.AddTxDestination(*targetAddr, msg.Value.Sign() != 0) + } + } // Check clause 6 value, overflow := uint256.FromBig(msg.Value) @@ -525,6 +532,10 @@ func (st *StateTransition) TransitionDb() (*ExecutionResult, error) { fee.Mul(fee, effectiveTipU256) st.state.AddBalance(tipReceipient, fee, tracing.BalanceIncreaseRewardTransactionFee) tipAmount = fee.ToBig() + // add the coinbase to the witness iff the fee is greater than 0 + if rules.IsEIP4762 && fee.Sign() != 0 { + st.evm.AccessEvents.BalanceGas(st.evm.Context.Coinbase, true) + } } // Arbitrum: record the tip diff --git a/core/tracing/gen_balance_change_reason_stringer.go b/core/tracing/gen_balance_change_reason_stringer.go new file mode 100644 index 0000000000..d3a515a12d --- /dev/null +++ b/core/tracing/gen_balance_change_reason_stringer.go @@ -0,0 +1,37 @@ +// Code generated by "stringer -type=BalanceChangeReason -output gen_balance_change_reason_stringer.go"; DO NOT EDIT. + +package tracing + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[BalanceChangeUnspecified-0] + _ = x[BalanceIncreaseRewardMineUncle-1] + _ = x[BalanceIncreaseRewardMineBlock-2] + _ = x[BalanceIncreaseWithdrawal-3] + _ = x[BalanceIncreaseGenesisBalance-4] + _ = x[BalanceIncreaseRewardTransactionFee-5] + _ = x[BalanceDecreaseGasBuy-6] + _ = x[BalanceIncreaseGasReturn-7] + _ = x[BalanceIncreaseDaoContract-8] + _ = x[BalanceDecreaseDaoAccount-9] + _ = x[BalanceChangeTransfer-10] + _ = x[BalanceChangeTouchAccount-11] + _ = x[BalanceIncreaseSelfdestruct-12] + _ = x[BalanceDecreaseSelfdestruct-13] + _ = x[BalanceDecreaseSelfdestructBurn-14] +} + +const _BalanceChangeReason_name = "BalanceChangeUnspecifiedBalanceIncreaseRewardMineUncleBalanceIncreaseRewardMineBlockBalanceIncreaseWithdrawalBalanceIncreaseGenesisBalanceBalanceIncreaseRewardTransactionFeeBalanceDecreaseGasBuyBalanceIncreaseGasReturnBalanceIncreaseDaoContractBalanceDecreaseDaoAccountBalanceChangeTransferBalanceChangeTouchAccountBalanceIncreaseSelfdestructBalanceDecreaseSelfdestructBalanceDecreaseSelfdestructBurn" + +var _BalanceChangeReason_index = [...]uint16{0, 24, 54, 84, 109, 138, 173, 194, 218, 244, 269, 290, 315, 342, 369, 400} + +func (i BalanceChangeReason) String() string { + if i >= BalanceChangeReason(len(_BalanceChangeReason_index)-1) { + return "BalanceChangeReason(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _BalanceChangeReason_name[_BalanceChangeReason_index[i]:_BalanceChangeReason_index[i+1]] +} diff --git a/core/tracing/hooks.go b/core/tracing/hooks.go index f95de98b94..5731abca10 100644 --- a/core/tracing/hooks.go +++ b/core/tracing/hooks.go @@ -215,6 +215,8 @@ type Hooks struct { // for tracing and reporting. type BalanceChangeReason byte +//go:generate stringer -type=BalanceChangeReason -output gen_balance_change_reason_stringer.go + const ( BalanceChangeUnspecified BalanceChangeReason = 0 @@ -314,6 +316,12 @@ const ( GasChangeCallStorageColdAccess GasChangeReason = 13 // GasChangeCallFailedExecution is the burning of the remaining gas when the execution failed without a revert. GasChangeCallFailedExecution GasChangeReason = 14 + // GasChangeWitnessContractInit is the amount charged for adding to the witness during the contract creation initialization step + GasChangeWitnessContractInit GasChangeReason = 15 + // GasChangeWitnessContractCreation is the amount charged for adding to the witness during the contract creation finalization step + GasChangeWitnessContractCreation GasChangeReason = 16 + // GasChangeWitnessCodeChunk is the amount charged for touching one or more contract code chunks + GasChangeWitnessCodeChunk GasChangeReason = 17 // GasChangeIgnored is a special value that can be used to indicate that the gas change should be ignored as // it will be "manually" tracked by a direct emit of the gas change event. diff --git a/core/types/transaction.go b/core/types/transaction.go index 6e8ad56dea..3e8885f955 100644 --- a/core/types/transaction.go +++ b/core/types/transaction.go @@ -639,7 +639,7 @@ func (s Transactions) EncodeIndex(i int, w *bytes.Buffer) { } } -// TxDifference returns a new set which is the difference between a and b. +// TxDifference returns a new set of transactions that are present in a but not in b. func TxDifference(a, b Transactions) Transactions { keep := make(Transactions, 0, len(a)) @@ -657,7 +657,7 @@ func TxDifference(a, b Transactions) Transactions { return keep } -// HashDifference returns a new set which is the difference between a and b. +// HashDifference returns a new set of hashes that are present in a but not in b. func HashDifference(a, b []common.Hash) []common.Hash { keep := make([]common.Hash, 0, len(a)) diff --git a/core/types/transaction_test.go b/core/types/transaction_test.go index 361b977611..5dbf367073 100644 --- a/core/types/transaction_test.go +++ b/core/types/transaction_test.go @@ -379,7 +379,7 @@ func assertEqual(orig *Transaction, cpy *Transaction) error { } if orig.AccessList() != nil { if !reflect.DeepEqual(orig.AccessList(), cpy.AccessList()) { - return errors.New("access list wrong!") + return errors.New("access list wrong") } } return nil diff --git a/core/vm/common.go b/core/vm/common.go index 90ba4a4ad1..ba75950e37 100644 --- a/core/vm/common.go +++ b/core/vm/common.go @@ -63,6 +63,18 @@ func getData(data []byte, start uint64, size uint64) []byte { return common.RightPadBytes(data[start:end], int(size)) } +func getDataAndAdjustedBounds(data []byte, start uint64, size uint64) (codeCopyPadded []byte, actualStart uint64, sizeNonPadded uint64) { + length := uint64(len(data)) + if start > length { + start = length + } + end := start + size + if end > length { + end = length + } + return common.RightPadBytes(data[start:end], int(size)), start, end - start +} + // toWordSize returns the ceiled word size required for memory expansion. func toWordSize(size uint64) uint64 { if size > math.MaxUint64-31 { diff --git a/core/vm/contract.go b/core/vm/contract.go index 8608c3f62b..c0e223200b 100644 --- a/core/vm/contract.go +++ b/core/vm/contract.go @@ -60,6 +60,9 @@ type Contract struct { CodeAddr *common.Address Input []byte + // is the execution frame represented by this object a contract deployment + IsDeployment bool + Gas uint64 value *uint256.Int } diff --git a/core/vm/contracts.go b/core/vm/contracts.go index 907dbb245c..daafc3b557 100644 --- a/core/vm/contracts.go +++ b/core/vm/contracts.go @@ -144,6 +144,8 @@ var PrecompiledContractsPrague = map[common.Address]PrecompiledContract{ var PrecompiledContractsBLS = PrecompiledContractsPrague +var PrecompiledContractsVerkle = PrecompiledContractsPrague + var ( PrecompiledAddressesPrague []common.Address PrecompiledAddressesCancun []common.Address diff --git a/core/vm/eips.go b/core/vm/eips.go index c3e337a67f..e9788da83c 100644 --- a/core/vm/eips.go +++ b/core/vm/eips.go @@ -19,9 +19,11 @@ package vm import ( "errors" "fmt" + "math" "sort" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/tracing" "github.com/ethereum/go-ethereum/params" "github.com/holiman/uint256" ) @@ -38,6 +40,7 @@ var activators = map[int]func(*JumpTable){ 1884: enable1884, 1344: enable1344, 1153: enable1153, + 4762: enable4762, } // EnableEIP enables the given EIP on the config. @@ -323,3 +326,214 @@ func enable6780(jt *JumpTable) { maxStack: maxStack(1, 0), } } + +func opExtCodeCopyEIP4762(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + var ( + stack = scope.Stack + a = stack.pop() + memOffset = stack.pop() + codeOffset = stack.pop() + length = stack.pop() + ) + uint64CodeOffset, overflow := codeOffset.Uint64WithOverflow() + if overflow { + uint64CodeOffset = math.MaxUint64 + } + addr := common.Address(a.Bytes20()) + code := interpreter.evm.StateDB.GetCode(addr) + contract := &Contract{ + Code: code, + self: AccountRef(addr), + } + paddedCodeCopy, copyOffset, nonPaddedCopyLength := getDataAndAdjustedBounds(code, uint64CodeOffset, length.Uint64()) + statelessGas := interpreter.evm.AccessEvents.CodeChunksRangeGas(addr, copyOffset, nonPaddedCopyLength, uint64(len(contract.Code)), false) + if !scope.Contract.UseGas(statelessGas, interpreter.evm.Config.Tracer, tracing.GasChangeUnspecified) { + scope.Contract.Gas = 0 + return nil, ErrOutOfGas + } + scope.Memory.Set(memOffset.Uint64(), length.Uint64(), paddedCodeCopy) + + return nil, nil +} + +// opPush1EIP4762 handles the special case of PUSH1 opcode for EIP-4762, which +// need not worry about the adjusted bound logic when adding the PUSHDATA to +// the list of access events. +func opPush1EIP4762(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + var ( + codeLen = uint64(len(scope.Contract.Code)) + integer = new(uint256.Int) + ) + *pc += 1 + if *pc < codeLen { + scope.Stack.push(integer.SetUint64(uint64(scope.Contract.Code[*pc]))) + + if !scope.Contract.IsDeployment && *pc%31 == 0 { + // touch next chunk if PUSH1 is at the boundary. if so, *pc has + // advanced past this boundary. + contractAddr := scope.Contract.Address() + statelessGas := interpreter.evm.AccessEvents.CodeChunksRangeGas(contractAddr, *pc+1, uint64(1), uint64(len(scope.Contract.Code)), false) + if !scope.Contract.UseGas(statelessGas, interpreter.evm.Config.Tracer, tracing.GasChangeUnspecified) { + scope.Contract.Gas = 0 + return nil, ErrOutOfGas + } + } + } else { + scope.Stack.push(integer.Clear()) + } + return nil, nil +} + +func makePushEIP4762(size uint64, pushByteSize int) executionFunc { + return func(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + var ( + codeLen = len(scope.Contract.Code) + start = min(codeLen, int(*pc+1)) + end = min(codeLen, start+pushByteSize) + ) + scope.Stack.push(new(uint256.Int).SetBytes( + common.RightPadBytes( + scope.Contract.Code[start:end], + pushByteSize, + )), + ) + + if !scope.Contract.IsDeployment { + contractAddr := scope.Contract.Address() + statelessGas := interpreter.evm.AccessEvents.CodeChunksRangeGas(contractAddr, uint64(start), uint64(pushByteSize), uint64(len(scope.Contract.Code)), false) + if !scope.Contract.UseGas(statelessGas, interpreter.evm.Config.Tracer, tracing.GasChangeUnspecified) { + scope.Contract.Gas = 0 + return nil, ErrOutOfGas + } + } + + *pc += size + return nil, nil + } +} + +func enable4762(jt *JumpTable) { + jt[SSTORE] = &operation{ + dynamicGas: gasSStore4762, + execute: opSstore, + minStack: minStack(2, 0), + maxStack: maxStack(2, 0), + } + jt[SLOAD] = &operation{ + dynamicGas: gasSLoad4762, + execute: opSload, + minStack: minStack(1, 1), + maxStack: maxStack(1, 1), + } + + jt[BALANCE] = &operation{ + execute: opBalance, + dynamicGas: gasBalance4762, + minStack: minStack(1, 1), + maxStack: maxStack(1, 1), + } + + jt[EXTCODESIZE] = &operation{ + execute: opExtCodeSize, + dynamicGas: gasExtCodeSize4762, + minStack: minStack(1, 1), + maxStack: maxStack(1, 1), + } + + jt[EXTCODEHASH] = &operation{ + execute: opExtCodeHash, + dynamicGas: gasExtCodeHash4762, + minStack: minStack(1, 1), + maxStack: maxStack(1, 1), + } + + jt[EXTCODECOPY] = &operation{ + execute: opExtCodeCopyEIP4762, + dynamicGas: gasExtCodeCopyEIP4762, + minStack: minStack(4, 0), + maxStack: maxStack(4, 0), + memorySize: memoryExtCodeCopy, + } + + jt[CODECOPY] = &operation{ + execute: opCodeCopy, + constantGas: GasFastestStep, + dynamicGas: gasCodeCopyEip4762, + minStack: minStack(3, 0), + maxStack: maxStack(3, 0), + memorySize: memoryCodeCopy, + } + + jt[SELFDESTRUCT] = &operation{ + execute: opSelfdestruct6780, + dynamicGas: gasSelfdestructEIP4762, + constantGas: params.SelfdestructGasEIP150, + minStack: minStack(1, 0), + maxStack: maxStack(1, 0), + } + + jt[CREATE] = &operation{ + execute: opCreate, + constantGas: params.CreateNGasEip4762, + dynamicGas: gasCreateEip3860, + minStack: minStack(3, 1), + maxStack: maxStack(3, 1), + memorySize: memoryCreate, + } + + jt[CREATE2] = &operation{ + execute: opCreate2, + constantGas: params.CreateNGasEip4762, + dynamicGas: gasCreate2Eip3860, + minStack: minStack(4, 1), + maxStack: maxStack(4, 1), + memorySize: memoryCreate2, + } + + jt[CALL] = &operation{ + execute: opCall, + dynamicGas: gasCallEIP4762, + minStack: minStack(7, 1), + maxStack: maxStack(7, 1), + memorySize: memoryCall, + } + + jt[CALLCODE] = &operation{ + execute: opCallCode, + dynamicGas: gasCallCodeEIP4762, + minStack: minStack(7, 1), + maxStack: maxStack(7, 1), + memorySize: memoryCall, + } + + jt[STATICCALL] = &operation{ + execute: opStaticCall, + dynamicGas: gasStaticCallEIP4762, + minStack: minStack(6, 1), + maxStack: maxStack(6, 1), + memorySize: memoryStaticCall, + } + + jt[DELEGATECALL] = &operation{ + execute: opDelegateCall, + dynamicGas: gasDelegateCallEIP4762, + minStack: minStack(6, 1), + maxStack: maxStack(6, 1), + memorySize: memoryDelegateCall, + } + + jt[PUSH1] = &operation{ + execute: opPush1EIP4762, + constantGas: GasFastestStep, + minStack: minStack(0, 1), + maxStack: maxStack(0, 1), + } + for i := 1; i < 32; i++ { + jt[PUSH1+OpCode(i)] = &operation{ + execute: makePushEIP4762(uint64(i+1), i+1), + constantGas: GasFastestStep, + minStack: minStack(0, 1), + maxStack: maxStack(0, 1), + } + } +} diff --git a/core/vm/evm.go b/core/vm/evm.go index 7205908a96..444bb52b39 100644 --- a/core/vm/evm.go +++ b/core/vm/evm.go @@ -48,6 +48,8 @@ func (evm *EVM) precompile(addr common.Address) (PrecompiledContract, bool) { precompiles = PrecompiledContractsArbOS30 case evm.chainRules.IsArbitrum: precompiles = PrecompiledContractsArbitrum + case evm.chainRules.IsVerkle: + precompiles = PrecompiledContractsVerkle case evm.chainRules.IsPrague: precompiles = PrecompiledContractsPrague case evm.chainRules.IsCancun: @@ -95,10 +97,11 @@ type BlockContext struct { // All fields can change between transactions. type TxContext struct { // Message information - Origin common.Address // Provides information for ORIGIN - GasPrice *big.Int // Provides information for GASPRICE (and is used to zero the basefee if NoBaseFee is set) - BlobHashes []common.Hash // Provides information for BLOBHASH - BlobFeeCap *big.Int // Is used to zero the blobbasefee if NoBaseFee is set + Origin common.Address // Provides information for ORIGIN + GasPrice *big.Int // Provides information for GASPRICE (and is used to zero the basefee if NoBaseFee is set) + BlobHashes []common.Hash // Provides information for BLOBHASH + BlobFeeCap *big.Int // Is used to zero the blobbasefee if NoBaseFee is set + AccessEvents *state.AccessEvents // Capture all state accesses for this tx } // EVM is the Ethereum Virtual Machine base object and provides @@ -170,6 +173,9 @@ func NewEVM(blockCtx BlockContext, txCtx TxContext, statedb StateDB, chainConfig // Reset resets the EVM with a new transaction context.Reset // This is not threadsafe and should only be done very cautiously. func (evm *EVM) Reset(txCtx TxContext, statedb StateDB) { + if evm.chainRules.IsEIP4762 { + txCtx.AccessEvents = state.NewAccessEvents(statedb.PointCache()) + } evm.TxContext = txCtx evm.StateDB = statedb } @@ -214,6 +220,16 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas p, isPrecompile := evm.precompile(addr) if !evm.StateDB.Exist(addr) { + if !isPrecompile && evm.chainRules.IsEIP4762 { + // add proof of absence to witness + wgas := evm.AccessEvents.AddAccount(addr, false) + if gas < wgas { + evm.StateDB.RevertToSnapshot(snapshot) + return nil, 0, ErrOutOfGas + } + gas -= wgas + } + if !isPrecompile && evm.chainRules.IsEIP158 && value.IsZero() { // Calling a non-existing account, don't do anything. return nil, gas, nil @@ -494,7 +510,7 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64, // We add this to the access list _before_ taking a snapshot. Even if the // creation fails, the access-list change should not be rolled back. - if evm.chainRules.IsBerlin { + if evm.chainRules.IsEIP2929 { evm.StateDB.AddAddressToAccessList(address) } // Ensure there's no existing contract already at the designated address. @@ -534,8 +550,18 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64, // The contract is a scoped environment for this execution context only. contract := NewContract(caller, AccountRef(address), value, gas) contract.SetCodeOptionalHash(&address, codeAndHash) + contract.IsDeployment = true - ret, err = evm.interpreter.Run(contract, nil, false) + // Charge the contract creation init gas in verkle mode + if evm.chainRules.IsEIP4762 { + if !contract.UseGas(evm.AccessEvents.ContractCreateInitGas(address, value.Sign() != 0), evm.Config.Tracer, tracing.GasChangeWitnessContractInit) { + err = ErrOutOfGas + } + } + + if err == nil { + ret, err = evm.interpreter.Run(contract, nil, false) + } // Check whether the max code size has been exceeded, assign err if the case. if err == nil && evm.chainRules.IsEIP158 && len(ret) > int(evm.chainConfig.MaxCodeSize()) { @@ -557,11 +583,24 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64, // be stored due to not enough gas set an error and let it be handled // by the error checking condition below. if err == nil { - createDataGas := uint64(len(ret)) * params.CreateDataGas - if contract.UseGas(createDataGas, evm.Config.Tracer, tracing.GasChangeCallCodeStorage) { - evm.StateDB.SetCode(address, ret) + if !evm.chainRules.IsEIP4762 { + createDataGas := uint64(len(ret)) * params.CreateDataGas + if !contract.UseGas(createDataGas, evm.Config.Tracer, tracing.GasChangeCallCodeStorage) { + err = ErrCodeStoreOutOfGas + } } else { - err = ErrCodeStoreOutOfGas + // Contract creation completed, touch the missing fields in the contract + if !contract.UseGas(evm.AccessEvents.AddAccount(address, true), evm.Config.Tracer, tracing.GasChangeWitnessContractCreation) { + err = ErrCodeStoreOutOfGas + } + + if err == nil && len(ret) > 0 && !contract.UseGas(evm.AccessEvents.CodeChunksRangeGas(address, 0, uint64(len(ret)), uint64(len(ret)), true), evm.Config.Tracer, tracing.GasChangeWitnessCodeChunk) { + err = ErrCodeStoreOutOfGas + } + } + + if err == nil { + evm.StateDB.SetCode(address, ret) } } diff --git a/core/vm/gas_table.go b/core/vm/gas_table.go index 9d055bc959..a0b9b67f35 100644 --- a/core/vm/gas_table.go +++ b/core/vm/gas_table.go @@ -383,7 +383,7 @@ func gasCall(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize } else if !evm.StateDB.Exist(address) { gas += params.CallNewAccountGas } - if transfersValue { + if transfersValue && !evm.chainRules.IsEIP4762 { gas += params.CallValueTransferGas } memoryGas, err := memoryGasCost(mem, memorySize) @@ -394,7 +394,14 @@ func gasCall(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize if gas, overflow = math.SafeAdd(gas, memoryGas); overflow { return 0, ErrGasUintOverflow } - + if evm.chainRules.IsEIP4762 { + if transfersValue { + gas, overflow = math.SafeAdd(gas, evm.AccessEvents.ValueTransferGas(contract.Address(), address)) + if overflow { + return 0, ErrGasUintOverflow + } + } + } evm.callGasTemp, err = callGas(evm.chainRules.IsEIP150, contract.Gas, gas, stack.Back(0)) if err != nil { return 0, err @@ -402,6 +409,7 @@ func gasCall(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize if gas, overflow = math.SafeAdd(gas, evm.callGasTemp); overflow { return 0, ErrGasUintOverflow } + return gas, nil } @@ -414,12 +422,22 @@ func gasCallCode(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memory gas uint64 overflow bool ) - if stack.Back(2).Sign() != 0 { + if stack.Back(2).Sign() != 0 && !evm.chainRules.IsEIP4762 { gas += params.CallValueTransferGas } if gas, overflow = math.SafeAdd(gas, memoryGas); overflow { return 0, ErrGasUintOverflow } + if evm.chainRules.IsEIP4762 { + address := common.Address(stack.Back(1).Bytes20()) + transfersValue := !stack.Back(2).IsZero() + if transfersValue { + gas, overflow = math.SafeAdd(gas, evm.AccessEvents.ValueTransferGas(contract.Address(), address)) + if overflow { + return 0, ErrGasUintOverflow + } + } + } evm.callGasTemp, err = callGas(evm.chainRules.IsEIP150, contract.Gas, gas, stack.Back(0)) if err != nil { return 0, err diff --git a/core/vm/instructions.go b/core/vm/instructions.go index a63426f217..6b2421909c 100644 --- a/core/vm/instructions.go +++ b/core/vm/instructions.go @@ -361,9 +361,9 @@ func opCodeCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([ if overflow { uint64CodeOffset = math.MaxUint64 } + codeCopy := getData(scope.Contract.Code, uint64CodeOffset, length.Uint64()) scope.Memory.Set(memOffset.Uint64(), length.Uint64(), codeCopy) - return nil, nil } @@ -439,6 +439,7 @@ func opBlockhash(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ( num.Clear() return nil, nil } + upper, err := interpreter.evm.ProcessingHook.L1BlockNumber(interpreter.evm.Context) if err != nil { return nil, err @@ -599,6 +600,7 @@ func opCreate(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]b if interpreter.evm.chainRules.IsEIP150 { gas -= gas / 64 } + // reuse size int for stackvalue stackvalue := size @@ -639,6 +641,7 @@ func opCreate2(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([] input = scope.Memory.GetCopy(int64(offset.Uint64()), int64(size.Uint64())) gas = scope.Contract.Gas ) + // Apply EIP150 gas -= gas / 64 scope.Contract.UseGas(gas, interpreter.evm.Config.Tracer, tracing.GasChangeCallContractCreation2) @@ -653,7 +656,6 @@ func opCreate2(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([] stackvalue.SetBytes(addr.Bytes()) } scope.Stack.push(&stackvalue) - scope.Contract.RefundGas(returnGas, interpreter.evm.Config.Tracer, tracing.GasChangeCallLeftOverRefunded) if suberr == ErrExecutionReverted { @@ -923,6 +925,7 @@ func makePush(size uint64, pushByteSize int) executionFunc { pushByteSize, )), ) + *pc += size return nil, nil } diff --git a/core/vm/instructions_test.go b/core/vm/instructions_test.go index 8653864d11..e17e913aa3 100644 --- a/core/vm/instructions_test.go +++ b/core/vm/instructions_test.go @@ -643,7 +643,7 @@ func BenchmarkOpKeccak256(bench *testing.B) { } } -func TestCreate2Addreses(t *testing.T) { +func TestCreate2Addresses(t *testing.T) { type testcase struct { origin string salt string diff --git a/core/vm/interface.go b/core/vm/interface.go index c04aec6f8c..f7a3bb204f 100644 --- a/core/vm/interface.go +++ b/core/vm/interface.go @@ -25,6 +25,7 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/trie/utils" "github.com/holiman/uint256" ) @@ -105,6 +106,10 @@ type StateDB interface { // AddSlotToAccessList adds the given (address,slot) to the access list. This operation is safe to perform // even if the feature/fork is not active yet AddSlotToAccessList(addr common.Address, slot common.Hash) + + // PointCache returns the point cache used in computations + PointCache() *utils.PointCache + Prepare(rules params.Rules, sender, coinbase common.Address, dest *common.Address, precompiles []common.Address, txAccesses types.AccessList) RevertToSnapshot(int) diff --git a/core/vm/interpreter.go b/core/vm/interpreter.go index bc7f44bfe7..01bcef3786 100644 --- a/core/vm/interpreter.go +++ b/core/vm/interpreter.go @@ -100,6 +100,9 @@ func NewEVMInterpreter(evm *EVM) *EVMInterpreter { // If jump table was not initialised we set the default one. var table *JumpTable switch { + case evm.chainRules.IsVerkle: + // TODO replace with proper instruction set when fork is specified + table = &verkleInstructionSet case evm.chainRules.IsCancun: table = &cancunInstructionSet case evm.chainRules.IsShanghai: @@ -229,6 +232,14 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) ( // Capture pre-execution values for tracing. logged, pcCopy, gasCopy = false, pc, contract.Gas } + + if in.evm.chainRules.IsEIP4762 && !contract.IsDeployment { + // if the PC ends up in a new "chunk" of verkleized code, charge the + // associated costs. + contractAddr := contract.Address() + contract.Gas -= in.evm.TxContext.AccessEvents.CodeChunksRangeGas(contractAddr, pc, 1, uint64(len(contract.Code)), false) + } + // Get the operation from the jump table and validate the stack to ensure there are // enough stack items available to perform the operation. op = contract.GetOp(pc) diff --git a/core/vm/jump_table.go b/core/vm/jump_table.go index 65716f9442..5624f47ba7 100644 --- a/core/vm/jump_table.go +++ b/core/vm/jump_table.go @@ -57,6 +57,7 @@ var ( mergeInstructionSet = newMergeInstructionSet() shanghaiInstructionSet = newShanghaiInstructionSet() cancunInstructionSet = newCancunInstructionSet() + verkleInstructionSet = newVerkleInstructionSet() ) // JumpTable contains the EVM opcodes supported at a given fork. @@ -80,6 +81,12 @@ func validate(jt JumpTable) JumpTable { return jt } +func newVerkleInstructionSet() JumpTable { + instructionSet := newCancunInstructionSet() + enable4762(&instructionSet) + return validate(instructionSet) +} + func newCancunInstructionSet() JumpTable { instructionSet := newShanghaiInstructionSet() enable4844(&instructionSet) // EIP-4844 (BLOBHASH opcode) diff --git a/core/vm/operations_verkle.go b/core/vm/operations_verkle.go new file mode 100644 index 0000000000..73eb05974d --- /dev/null +++ b/core/vm/operations_verkle.go @@ -0,0 +1,159 @@ +// Copyright 2024 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package vm + +import ( + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/math" + "github.com/ethereum/go-ethereum/params" +) + +func gasSStore4762(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { + gas := evm.AccessEvents.SlotGas(contract.Address(), stack.peek().Bytes32(), true) + if gas == 0 { + gas = params.WarmStorageReadCostEIP2929 + } + return gas, nil +} + +func gasSLoad4762(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { + gas := evm.AccessEvents.SlotGas(contract.Address(), stack.peek().Bytes32(), false) + if gas == 0 { + gas = params.WarmStorageReadCostEIP2929 + } + return gas, nil +} + +func gasBalance4762(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { + address := stack.peek().Bytes20() + gas := evm.AccessEvents.BalanceGas(address, false) + if gas == 0 { + gas = params.WarmStorageReadCostEIP2929 + } + return gas, nil +} + +func gasExtCodeSize4762(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { + address := stack.peek().Bytes20() + if _, isPrecompile := evm.precompile(address); isPrecompile { + return 0, nil + } + gas := evm.AccessEvents.VersionGas(address, false) + gas += evm.AccessEvents.CodeSizeGas(address, false) + if gas == 0 { + gas = params.WarmStorageReadCostEIP2929 + } + return gas, nil +} + +func gasExtCodeHash4762(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { + address := stack.peek().Bytes20() + if _, isPrecompile := evm.precompile(address); isPrecompile { + return 0, nil + } + gas := evm.AccessEvents.CodeHashGas(address, false) + if gas == 0 { + gas = params.WarmStorageReadCostEIP2929 + } + return gas, nil +} + +func makeCallVariantGasEIP4762(oldCalculator gasFunc) gasFunc { + return func(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { + gas, err := oldCalculator(evm, contract, stack, mem, memorySize) + if err != nil { + return 0, err + } + if _, isPrecompile := evm.precompile(contract.Address()); isPrecompile { + return gas, nil + } + witnessGas := evm.AccessEvents.MessageCallGas(contract.Address()) + if witnessGas == 0 { + witnessGas = params.WarmStorageReadCostEIP2929 + } + return witnessGas + gas, nil + } +} + +var ( + gasCallEIP4762 = makeCallVariantGasEIP4762(gasCall) + gasCallCodeEIP4762 = makeCallVariantGasEIP4762(gasCallCode) + gasStaticCallEIP4762 = makeCallVariantGasEIP4762(gasStaticCall) + gasDelegateCallEIP4762 = makeCallVariantGasEIP4762(gasDelegateCall) +) + +func gasSelfdestructEIP4762(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { + beneficiaryAddr := common.Address(stack.peek().Bytes20()) + if _, isPrecompile := evm.precompile(beneficiaryAddr); isPrecompile { + return 0, nil + } + contractAddr := contract.Address() + statelessGas := evm.AccessEvents.VersionGas(contractAddr, false) + statelessGas += evm.AccessEvents.CodeSizeGas(contractAddr, false) + statelessGas += evm.AccessEvents.BalanceGas(contractAddr, false) + if contractAddr != beneficiaryAddr { + statelessGas += evm.AccessEvents.BalanceGas(beneficiaryAddr, false) + } + // Charge write costs if it transfers value + if evm.StateDB.GetBalance(contractAddr).Sign() != 0 { + statelessGas += evm.AccessEvents.BalanceGas(contractAddr, true) + if contractAddr != beneficiaryAddr { + statelessGas += evm.AccessEvents.BalanceGas(beneficiaryAddr, true) + } + } + return statelessGas, nil +} + +func gasCodeCopyEip4762(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { + gas, err := gasCodeCopy(evm, contract, stack, mem, memorySize) + if err != nil { + return 0, err + } + var ( + codeOffset = stack.Back(1) + length = stack.Back(2) + ) + uint64CodeOffset, overflow := codeOffset.Uint64WithOverflow() + if overflow { + uint64CodeOffset = math.MaxUint64 + } + _, copyOffset, nonPaddedCopyLength := getDataAndAdjustedBounds(contract.Code, uint64CodeOffset, length.Uint64()) + if !contract.IsDeployment { + gas += evm.AccessEvents.CodeChunksRangeGas(contract.Address(), copyOffset, nonPaddedCopyLength, uint64(len(contract.Code)), false) + } + return gas, nil +} + +func gasExtCodeCopyEIP4762(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { + // memory expansion first (dynamic part of pre-2929 implementation) + gas, err := gasExtCodeCopy(evm, contract, stack, mem, memorySize) + if err != nil { + return 0, err + } + addr := common.Address(stack.peek().Bytes20()) + wgas := evm.AccessEvents.VersionGas(addr, false) + wgas += evm.AccessEvents.CodeSizeGas(addr, false) + if wgas == 0 { + wgas = params.WarmStorageReadCostEIP2929 + } + var overflow bool + // We charge (cold-warm), since 'warm' is already charged as constantGas + if gas, overflow = math.SafeAdd(gas, wgas); overflow { + return 0, ErrGasUintOverflow + } + return gas, nil +} diff --git a/core/vm/runtime/runtime.go b/core/vm/runtime/runtime.go index 0839115266..9ab6caa762 100644 --- a/core/vm/runtime/runtime.go +++ b/core/vm/runtime/runtime.go @@ -57,24 +57,33 @@ type Config struct { // sets defaults on the config func setDefaults(cfg *Config) { if cfg.ChainConfig == nil { + var ( + shanghaiTime = uint64(0) + cancunTime = uint64(0) + ) cfg.ChainConfig = ¶ms.ChainConfig{ - ChainID: big.NewInt(1), - HomesteadBlock: new(big.Int), - DAOForkBlock: new(big.Int), - DAOForkSupport: false, - EIP150Block: new(big.Int), - EIP155Block: new(big.Int), - EIP158Block: new(big.Int), - ByzantiumBlock: new(big.Int), - ConstantinopleBlock: new(big.Int), - PetersburgBlock: new(big.Int), - IstanbulBlock: new(big.Int), - MuirGlacierBlock: new(big.Int), - BerlinBlock: new(big.Int), - LondonBlock: new(big.Int), - } + ChainID: big.NewInt(1), + HomesteadBlock: new(big.Int), + DAOForkBlock: new(big.Int), + DAOForkSupport: false, + EIP150Block: new(big.Int), + EIP155Block: new(big.Int), + EIP158Block: new(big.Int), + ByzantiumBlock: new(big.Int), + ConstantinopleBlock: new(big.Int), + PetersburgBlock: new(big.Int), + IstanbulBlock: new(big.Int), + MuirGlacierBlock: new(big.Int), + BerlinBlock: new(big.Int), + LondonBlock: new(big.Int), + ArrowGlacierBlock: nil, + GrayGlacierBlock: nil, + TerminalTotalDifficulty: big.NewInt(0), + TerminalTotalDifficultyPassed: true, + MergeNetsplitBlock: nil, + ShanghaiTime: &shanghaiTime, + CancunTime: &cancunTime} } - if cfg.Difficulty == nil { cfg.Difficulty = new(big.Int) } @@ -101,6 +110,10 @@ func setDefaults(cfg *Config) { if cfg.BlobBaseFee == nil { cfg.BlobBaseFee = big.NewInt(params.BlobTxMinBlobGasprice) } + // Merge indicators + if t := cfg.ChainConfig.ShanghaiTime; cfg.ChainConfig.TerminalTotalDifficultyPassed || (t != nil && *t == 0) { + cfg.Random = &(common.Hash{}) + } } // Execute executes the code using the input as call data during the execution. diff --git a/core/vm/runtime/runtime_test.go b/core/vm/runtime/runtime_test.go index 45228e78c4..04abc5480e 100644 --- a/core/vm/runtime/runtime_test.go +++ b/core/vm/runtime/runtime_test.go @@ -105,7 +105,7 @@ func TestExecute(t *testing.T) { func TestCall(t *testing.T) { state, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - address := common.HexToAddress("0x0a") + address := common.HexToAddress("0xaa") state.SetCode(address, []byte{ byte(vm.PUSH1), 10, byte(vm.PUSH1), 0, @@ -725,7 +725,7 @@ func TestRuntimeJSTracer(t *testing.T) { byte(vm.CREATE), byte(vm.POP), }, - results: []string{`"1,1,952855,6,12"`, `"1,1,952855,6,0"`}, + results: []string{`"1,1,952853,6,12"`, `"1,1,952853,6,0"`}, }, { // CREATE2 @@ -741,7 +741,7 @@ func TestRuntimeJSTracer(t *testing.T) { byte(vm.CREATE2), byte(vm.POP), }, - results: []string{`"1,1,952846,6,13"`, `"1,1,952846,6,0"`}, + results: []string{`"1,1,952844,6,13"`, `"1,1,952844,6,0"`}, }, { // CALL diff --git a/crypto/secp256k1/curve.go b/crypto/secp256k1/curve.go index 9b26ab2928..85ba885d6f 100644 --- a/crypto/secp256k1/curve.go +++ b/crypto/secp256k1/curve.go @@ -79,52 +79,52 @@ type BitCurve struct { BitSize int // the size of the underlying field } -func (BitCurve *BitCurve) Params() *elliptic.CurveParams { +func (bitCurve *BitCurve) Params() *elliptic.CurveParams { return &elliptic.CurveParams{ - P: BitCurve.P, - N: BitCurve.N, - B: BitCurve.B, - Gx: BitCurve.Gx, - Gy: BitCurve.Gy, - BitSize: BitCurve.BitSize, + P: bitCurve.P, + N: bitCurve.N, + B: bitCurve.B, + Gx: bitCurve.Gx, + Gy: bitCurve.Gy, + BitSize: bitCurve.BitSize, } } // IsOnCurve returns true if the given (x,y) lies on the BitCurve. -func (BitCurve *BitCurve) IsOnCurve(x, y *big.Int) bool { +func (bitCurve *BitCurve) IsOnCurve(x, y *big.Int) bool { // y² = x³ + b y2 := new(big.Int).Mul(y, y) //y² - y2.Mod(y2, BitCurve.P) //y²%P + y2.Mod(y2, bitCurve.P) //y²%P x3 := new(big.Int).Mul(x, x) //x² x3.Mul(x3, x) //x³ - x3.Add(x3, BitCurve.B) //x³+B - x3.Mod(x3, BitCurve.P) //(x³+B)%P + x3.Add(x3, bitCurve.B) //x³+B + x3.Mod(x3, bitCurve.P) //(x³+B)%P return x3.Cmp(y2) == 0 } // affineFromJacobian reverses the Jacobian transform. See the comment at the // top of the file. -func (BitCurve *BitCurve) affineFromJacobian(x, y, z *big.Int) (xOut, yOut *big.Int) { +func (bitCurve *BitCurve) affineFromJacobian(x, y, z *big.Int) (xOut, yOut *big.Int) { if z.Sign() == 0 { return new(big.Int), new(big.Int) } - zinv := new(big.Int).ModInverse(z, BitCurve.P) + zinv := new(big.Int).ModInverse(z, bitCurve.P) zinvsq := new(big.Int).Mul(zinv, zinv) xOut = new(big.Int).Mul(x, zinvsq) - xOut.Mod(xOut, BitCurve.P) + xOut.Mod(xOut, bitCurve.P) zinvsq.Mul(zinvsq, zinv) yOut = new(big.Int).Mul(y, zinvsq) - yOut.Mod(yOut, BitCurve.P) + yOut.Mod(yOut, bitCurve.P) return } // Add returns the sum of (x1,y1) and (x2,y2) -func (BitCurve *BitCurve) Add(x1, y1, x2, y2 *big.Int) (*big.Int, *big.Int) { +func (bitCurve *BitCurve) Add(x1, y1, x2, y2 *big.Int) (*big.Int, *big.Int) { // If one point is at infinity, return the other point. // Adding the point at infinity to any point will preserve the other point. if x1.Sign() == 0 && y1.Sign() == 0 { @@ -135,27 +135,27 @@ func (BitCurve *BitCurve) Add(x1, y1, x2, y2 *big.Int) (*big.Int, *big.Int) { } z := new(big.Int).SetInt64(1) if x1.Cmp(x2) == 0 && y1.Cmp(y2) == 0 { - return BitCurve.affineFromJacobian(BitCurve.doubleJacobian(x1, y1, z)) + return bitCurve.affineFromJacobian(bitCurve.doubleJacobian(x1, y1, z)) } - return BitCurve.affineFromJacobian(BitCurve.addJacobian(x1, y1, z, x2, y2, z)) + return bitCurve.affineFromJacobian(bitCurve.addJacobian(x1, y1, z, x2, y2, z)) } // addJacobian takes two points in Jacobian coordinates, (x1, y1, z1) and // (x2, y2, z2) and returns their sum, also in Jacobian form. -func (BitCurve *BitCurve) addJacobian(x1, y1, z1, x2, y2, z2 *big.Int) (*big.Int, *big.Int, *big.Int) { +func (bitCurve *BitCurve) addJacobian(x1, y1, z1, x2, y2, z2 *big.Int) (*big.Int, *big.Int, *big.Int) { // See http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-add-2007-bl z1z1 := new(big.Int).Mul(z1, z1) - z1z1.Mod(z1z1, BitCurve.P) + z1z1.Mod(z1z1, bitCurve.P) z2z2 := new(big.Int).Mul(z2, z2) - z2z2.Mod(z2z2, BitCurve.P) + z2z2.Mod(z2z2, bitCurve.P) u1 := new(big.Int).Mul(x1, z2z2) - u1.Mod(u1, BitCurve.P) + u1.Mod(u1, bitCurve.P) u2 := new(big.Int).Mul(x2, z1z1) - u2.Mod(u2, BitCurve.P) + u2.Mod(u2, bitCurve.P) h := new(big.Int).Sub(u2, u1) if h.Sign() == -1 { - h.Add(h, BitCurve.P) + h.Add(h, bitCurve.P) } i := new(big.Int).Lsh(h, 1) i.Mul(i, i) @@ -163,13 +163,13 @@ func (BitCurve *BitCurve) addJacobian(x1, y1, z1, x2, y2, z2 *big.Int) (*big.Int s1 := new(big.Int).Mul(y1, z2) s1.Mul(s1, z2z2) - s1.Mod(s1, BitCurve.P) + s1.Mod(s1, bitCurve.P) s2 := new(big.Int).Mul(y2, z1) s2.Mul(s2, z1z1) - s2.Mod(s2, BitCurve.P) + s2.Mod(s2, bitCurve.P) r := new(big.Int).Sub(s2, s1) if r.Sign() == -1 { - r.Add(r, BitCurve.P) + r.Add(r, bitCurve.P) } r.Lsh(r, 1) v := new(big.Int).Mul(u1, i) @@ -179,7 +179,7 @@ func (BitCurve *BitCurve) addJacobian(x1, y1, z1, x2, y2, z2 *big.Int) (*big.Int x3.Sub(x3, j) x3.Sub(x3, v) x3.Sub(x3, v) - x3.Mod(x3, BitCurve.P) + x3.Mod(x3, bitCurve.P) y3 := new(big.Int).Set(r) v.Sub(v, x3) @@ -187,33 +187,33 @@ func (BitCurve *BitCurve) addJacobian(x1, y1, z1, x2, y2, z2 *big.Int) (*big.Int s1.Mul(s1, j) s1.Lsh(s1, 1) y3.Sub(y3, s1) - y3.Mod(y3, BitCurve.P) + y3.Mod(y3, bitCurve.P) z3 := new(big.Int).Add(z1, z2) z3.Mul(z3, z3) z3.Sub(z3, z1z1) if z3.Sign() == -1 { - z3.Add(z3, BitCurve.P) + z3.Add(z3, bitCurve.P) } z3.Sub(z3, z2z2) if z3.Sign() == -1 { - z3.Add(z3, BitCurve.P) + z3.Add(z3, bitCurve.P) } z3.Mul(z3, h) - z3.Mod(z3, BitCurve.P) + z3.Mod(z3, bitCurve.P) return x3, y3, z3 } // Double returns 2*(x,y) -func (BitCurve *BitCurve) Double(x1, y1 *big.Int) (*big.Int, *big.Int) { +func (bitCurve *BitCurve) Double(x1, y1 *big.Int) (*big.Int, *big.Int) { z1 := new(big.Int).SetInt64(1) - return BitCurve.affineFromJacobian(BitCurve.doubleJacobian(x1, y1, z1)) + return bitCurve.affineFromJacobian(bitCurve.doubleJacobian(x1, y1, z1)) } // doubleJacobian takes a point in Jacobian coordinates, (x, y, z), and // returns its double, also in Jacobian form. -func (BitCurve *BitCurve) doubleJacobian(x, y, z *big.Int) (*big.Int, *big.Int, *big.Int) { +func (bitCurve *BitCurve) doubleJacobian(x, y, z *big.Int) (*big.Int, *big.Int, *big.Int) { // See http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#doubling-dbl-2009-l a := new(big.Int).Mul(x, x) //X1² @@ -231,30 +231,30 @@ func (BitCurve *BitCurve) doubleJacobian(x, y, z *big.Int) (*big.Int, *big.Int, x3 := new(big.Int).Mul(big.NewInt(2), d) //2*D x3.Sub(f, x3) //F-2*D - x3.Mod(x3, BitCurve.P) + x3.Mod(x3, bitCurve.P) y3 := new(big.Int).Sub(d, x3) //D-X3 y3.Mul(e, y3) //E*(D-X3) y3.Sub(y3, new(big.Int).Mul(big.NewInt(8), c)) //E*(D-X3)-8*C - y3.Mod(y3, BitCurve.P) + y3.Mod(y3, bitCurve.P) z3 := new(big.Int).Mul(y, z) //Y1*Z1 z3.Mul(big.NewInt(2), z3) //3*Y1*Z1 - z3.Mod(z3, BitCurve.P) + z3.Mod(z3, bitCurve.P) return x3, y3, z3 } // ScalarBaseMult returns k*G, where G is the base point of the group and k is // an integer in big-endian form. -func (BitCurve *BitCurve) ScalarBaseMult(k []byte) (*big.Int, *big.Int) { - return BitCurve.ScalarMult(BitCurve.Gx, BitCurve.Gy, k) +func (bitCurve *BitCurve) ScalarBaseMult(k []byte) (*big.Int, *big.Int) { + return bitCurve.ScalarMult(bitCurve.Gx, bitCurve.Gy, k) } // Marshal converts a point into the form specified in section 4.3.6 of ANSI // X9.62. -func (BitCurve *BitCurve) Marshal(x, y *big.Int) []byte { - byteLen := (BitCurve.BitSize + 7) >> 3 +func (bitCurve *BitCurve) Marshal(x, y *big.Int) []byte { + byteLen := (bitCurve.BitSize + 7) >> 3 ret := make([]byte, 1+2*byteLen) ret[0] = 4 // uncompressed point flag readBits(x, ret[1:1+byteLen]) @@ -264,8 +264,8 @@ func (BitCurve *BitCurve) Marshal(x, y *big.Int) []byte { // Unmarshal converts a point, serialised by Marshal, into an x, y pair. On // error, x = nil. -func (BitCurve *BitCurve) Unmarshal(data []byte) (x, y *big.Int) { - byteLen := (BitCurve.BitSize + 7) >> 3 +func (bitCurve *BitCurve) Unmarshal(data []byte) (x, y *big.Int) { + byteLen := (bitCurve.BitSize + 7) >> 3 if len(data) != 1+2*byteLen { return } diff --git a/crypto/secp256k1/scalar_mult_cgo.go b/crypto/secp256k1/scalar_mult_cgo.go index bdf8eeede7..d11c11faf8 100644 --- a/crypto/secp256k1/scalar_mult_cgo.go +++ b/crypto/secp256k1/scalar_mult_cgo.go @@ -21,7 +21,7 @@ extern int secp256k1_ext_scalar_mul(const secp256k1_context* ctx, const unsigned */ import "C" -func (BitCurve *BitCurve) ScalarMult(Bx, By *big.Int, scalar []byte) (*big.Int, *big.Int) { +func (bitCurve *BitCurve) ScalarMult(Bx, By *big.Int, scalar []byte) (*big.Int, *big.Int) { // Ensure scalar is exactly 32 bytes. We pad always, even if // scalar is 32 bytes long, to avoid a timing side channel. if len(scalar) > 32 { diff --git a/crypto/secp256k1/scalar_mult_nocgo.go b/crypto/secp256k1/scalar_mult_nocgo.go index 22f53ac6ae..feb13a8dfd 100644 --- a/crypto/secp256k1/scalar_mult_nocgo.go +++ b/crypto/secp256k1/scalar_mult_nocgo.go @@ -9,6 +9,6 @@ package secp256k1 import "math/big" -func (BitCurve *BitCurve) ScalarMult(Bx, By *big.Int, scalar []byte) (*big.Int, *big.Int) { +func (bitCurve *BitCurve) ScalarMult(Bx, By *big.Int, scalar []byte) (*big.Int, *big.Int) { panic("ScalarMult is not available when secp256k1 is built without cgo") } diff --git a/eth/backend.go b/eth/backend.go index 15cb0996a0..c164a5c0ef 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -19,7 +19,6 @@ package eth import ( "encoding/json" - "errors" "fmt" "math/big" "runtime" @@ -106,9 +105,6 @@ type Ethereum struct { // whose lifecycle will be managed by the provided node. func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { // Ensure configuration values are compatible and sane - if config.SyncMode == downloader.LightSync { - return nil, errors.New("can't run eth.Ethereum in light sync mode, light mode has been deprecated") - } if !config.SyncMode.IsValid() { return nil, fmt.Errorf("invalid sync mode %d", config.SyncMode) } @@ -214,7 +210,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { } t, err := tracers.LiveDirectory.New(config.VMTrace, traceConfig) if err != nil { - return nil, fmt.Errorf("Failed to create tracer %s: %v", config.VMTrace, err) + return nil, fmt.Errorf("failed to create tracer %s: %v", config.VMTrace, err) } vmConfig.Tracer = t } diff --git a/eth/catalyst/api_test.go b/eth/catalyst/api_test.go index 39021fb6b8..dedca6e435 100644 --- a/eth/catalyst/api_test.go +++ b/eth/catalyst/api_test.go @@ -979,11 +979,11 @@ func TestSimultaneousNewBlock(t *testing.T) { defer wg.Done() if newResp, err := api.NewPayloadV1(*execData); err != nil { errMu.Lock() - testErr = fmt.Errorf("Failed to insert block: %w", err) + testErr = fmt.Errorf("failed to insert block: %w", err) errMu.Unlock() } else if newResp.Status != "VALID" { errMu.Lock() - testErr = fmt.Errorf("Failed to insert block: %v", newResp.Status) + testErr = fmt.Errorf("failed to insert block: %v", newResp.Status) errMu.Unlock() } }() @@ -1018,7 +1018,7 @@ func TestSimultaneousNewBlock(t *testing.T) { defer wg.Done() if _, err := api.ForkchoiceUpdatedV1(fcState, nil); err != nil { errMu.Lock() - testErr = fmt.Errorf("Failed to insert block: %w", err) + testErr = fmt.Errorf("failed to insert block: %w", err) errMu.Unlock() } }() diff --git a/eth/catalyst/simulated_beacon.go b/eth/catalyst/simulated_beacon.go index fecd83f276..2d6569e422 100644 --- a/eth/catalyst/simulated_beacon.go +++ b/eth/catalyst/simulated_beacon.go @@ -279,9 +279,12 @@ func (c *SimulatedBeacon) Rollback() { // Fork sets the head to the provided hash. func (c *SimulatedBeacon) Fork(parentHash common.Hash) error { + // Ensure no pending transactions. + c.eth.TxPool().Sync() if len(c.eth.TxPool().Pending(txpool.PendingFilter{})) != 0 { return errors.New("pending block dirty") } + parent := c.eth.BlockChain().GetBlockByHash(parentHash) if parent == nil { return errors.New("parent not found") diff --git a/eth/downloader/api.go b/eth/downloader/api.go index 90c36afbb5..ac175672a0 100644 --- a/eth/downloader/api.go +++ b/eth/downloader/api.go @@ -129,7 +129,7 @@ func (api *DownloaderAPI) eventLoop() { } } -// Syncing provides information when this nodes starts synchronising with the Ethereum network and when it's finished. +// Syncing provides information when this node starts synchronising with the Ethereum network and when it's finished. func (api *DownloaderAPI) Syncing(ctx context.Context) (*rpc.Subscription, error) { notifier, supported := rpc.NotifierFromContext(ctx) if !supported { diff --git a/eth/downloader/beaconsync.go b/eth/downloader/beaconsync.go index 8088f16af9..57c6eee40a 100644 --- a/eth/downloader/beaconsync.go +++ b/eth/downloader/beaconsync.go @@ -202,7 +202,7 @@ func (d *Downloader) findBeaconAncestor() (uint64, error) { case SnapSync: chainHead = d.blockchain.CurrentSnapBlock() default: - chainHead = d.lightchain.CurrentHeader() + panic("unknown sync mode") } number := chainHead.Number.Uint64() @@ -222,7 +222,7 @@ func (d *Downloader) findBeaconAncestor() (uint64, error) { case SnapSync: linked = d.blockchain.HasFastBlock(beaconTail.ParentHash, beaconTail.Number.Uint64()-1) default: - linked = d.blockchain.HasHeader(beaconTail.ParentHash, beaconTail.Number.Uint64()-1) + panic("unknown sync mode") } if !linked { // This is a programming error. The chain backfiller was called with a @@ -257,7 +257,7 @@ func (d *Downloader) findBeaconAncestor() (uint64, error) { case SnapSync: known = d.blockchain.HasFastBlock(h.Hash(), n) default: - known = d.lightchain.HasHeader(h.Hash(), n) + panic("unknown sync mode") } if !known { end = check diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index bb083260e4..d147414859 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -67,7 +67,6 @@ var ( errCancelContentProcessing = errors.New("content processing canceled (requested)") errCanceled = errors.New("syncing canceled (requested)") errNoPivotHeader = errors.New("pivot header is not found") - ErrMergeTransition = errors.New("legacy sync reached the merge") ) // peerDropFn is a callback type for dropping a peer detected as malicious. @@ -98,7 +97,6 @@ type Downloader struct { syncStatsChainHeight uint64 // Highest block number known when syncing started syncStatsLock sync.RWMutex // Lock protecting the sync stats fields - lightchain LightChain blockchain BlockChain // Callbacks @@ -143,8 +141,8 @@ type Downloader struct { syncLogTime time.Time // Time instance when status was last reported } -// LightChain encapsulates functions required to synchronise a light chain. -type LightChain interface { +// BlockChain encapsulates functions required to sync a (full or snap) blockchain. +type BlockChain interface { // HasHeader verifies a header's presence in the local chain. HasHeader(common.Hash, uint64) bool @@ -162,11 +160,6 @@ type LightChain interface { // SetHead rewinds the local chain to a new head. SetHead(uint64) error -} - -// BlockChain encapsulates functions required to sync a (full or snap) blockchain. -type BlockChain interface { - LightChain // HasBlock verifies a block's presence in the local chain. HasBlock(common.Hash, uint64) bool @@ -201,17 +194,13 @@ type BlockChain interface { } // New creates a new downloader to fetch hashes and blocks from remote peers. -func New(stateDb ethdb.Database, mux *event.TypeMux, chain BlockChain, lightchain LightChain, dropPeer peerDropFn, success func()) *Downloader { - if lightchain == nil { - lightchain = chain - } +func New(stateDb ethdb.Database, mux *event.TypeMux, chain BlockChain, dropPeer peerDropFn, success func()) *Downloader { dl := &Downloader{ stateDB: stateDb, mux: mux, queue: newQueue(blockCacheMaxItems, blockCacheInitialItems), peers: newPeerSet(), blockchain: chain, - lightchain: lightchain, dropPeer: dropPeer, headerProcCh: make(chan *headerTask, 1), quitCh: make(chan struct{}), @@ -240,15 +229,13 @@ func (d *Downloader) Progress() ethereum.SyncProgress { current := uint64(0) mode := d.getMode() - switch { - case d.blockchain != nil && mode == FullSync: + switch mode { + case FullSync: current = d.blockchain.CurrentBlock().Number.Uint64() - case d.blockchain != nil && mode == SnapSync: + case SnapSync: current = d.blockchain.CurrentSnapBlock().Number.Uint64() - case d.lightchain != nil: - current = d.lightchain.CurrentHeader().Number.Uint64() default: - log.Error("Unknown downloader chain/mode combo", "light", d.lightchain != nil, "full", d.blockchain != nil, "mode", mode) + log.Error("Unknown downloader mode", "mode", mode) } progress, pending := d.SnapSyncer.Progress() @@ -402,7 +389,7 @@ func (d *Downloader) syncToHead() (err error) { if err != nil { d.mux.Post(FailedEvent{err}) } else { - latest := d.lightchain.CurrentHeader() + latest := d.blockchain.CurrentHeader() d.mux.Post(DoneEvent{latest}) } }() @@ -520,7 +507,7 @@ func (d *Downloader) syncToHead() (err error) { } // Rewind the ancient store and blockchain if reorg happens. if origin+1 < frozen { - if err := d.lightchain.SetHead(origin); err != nil { + if err := d.blockchain.SetHead(origin); err != nil { return err } log.Info("Truncated excess ancient chain segment", "oldhead", frozen-1, "newhead", origin) @@ -690,34 +677,32 @@ func (d *Downloader) processHeaders(origin uint64) error { chunkHashes := hashes[:limit] // In case of header only syncing, validate the chunk immediately - if mode == SnapSync || mode == LightSync { + if mode == SnapSync { // Although the received headers might be all valid, a legacy // PoW/PoA sync must not accept post-merge headers. Make sure // that any transition is rejected at this point. if len(chunkHeaders) > 0 { - if n, err := d.lightchain.InsertHeaderChain(chunkHeaders); err != nil { + if n, err := d.blockchain.InsertHeaderChain(chunkHeaders); err != nil { log.Warn("Invalid header encountered", "number", chunkHeaders[n].Number, "hash", chunkHashes[n], "parent", chunkHeaders[n].ParentHash, "err", err) return fmt.Errorf("%w: %v", errInvalidChain, err) } } } - // Unless we're doing light chains, schedule the headers for associated content retrieval - if mode == FullSync || mode == SnapSync { - // If we've reached the allowed number of pending headers, stall a bit - for d.queue.PendingBodies() >= maxQueuedHeaders || d.queue.PendingReceipts() >= maxQueuedHeaders { - timer.Reset(time.Second) - select { - case <-d.cancelCh: - return errCanceled - case <-timer.C: - } - } - // Otherwise insert the headers for content retrieval - inserts := d.queue.Schedule(chunkHeaders, chunkHashes, origin) - if len(inserts) != len(chunkHeaders) { - return fmt.Errorf("%w: stale headers", errBadPeer) + // If we've reached the allowed number of pending headers, stall a bit + for d.queue.PendingBodies() >= maxQueuedHeaders || d.queue.PendingReceipts() >= maxQueuedHeaders { + timer.Reset(time.Second) + select { + case <-d.cancelCh: + return errCanceled + case <-timer.C: } } + // Otherwise insert the headers for content retrieval + inserts := d.queue.Schedule(chunkHeaders, chunkHashes, origin) + if len(inserts) != len(chunkHeaders) { + return fmt.Errorf("%w: stale headers", errBadPeer) + } + headers = headers[limit:] hashes = hashes[limit:] origin += uint64(limit) @@ -1056,7 +1041,7 @@ func (d *Downloader) readHeaderRange(last *types.Header, count int) []*types.Hea headers []*types.Header ) for { - parent := d.lightchain.GetHeaderByHash(current.ParentHash) + parent := d.blockchain.GetHeaderByHash(current.ParentHash) if parent == nil { break // The chain is not continuous, or the chain is exhausted } diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go index b19fec105e..b201770999 100644 --- a/eth/downloader/downloader_test.go +++ b/eth/downloader/downloader_test.go @@ -76,7 +76,7 @@ func newTesterWithNotification(t *testing.T, success func()) *downloadTester { chain: chain, peers: make(map[string]*downloadTesterPeer), } - tester.downloader = New(db, new(event.TypeMux), tester.chain, nil, tester.dropPeer, success) + tester.downloader = New(db, new(event.TypeMux), tester.chain, tester.dropPeer, success) return tester } @@ -384,9 +384,6 @@ func assertOwnChain(t *testing.T, tester *downloadTester, length int) { t.Helper() headers, blocks, receipts := length, length, length - if tester.downloader.getMode() == LightSync { - blocks, receipts = 1, 1 - } if hs := int(tester.chain.CurrentHeader().Number.Uint64()) + 1; hs != headers { t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, headers) } @@ -398,9 +395,8 @@ func assertOwnChain(t *testing.T, tester *downloadTester, length int) { } } -func TestCanonicalSynchronisation68Full(t *testing.T) { testCanonSync(t, eth.ETH68, FullSync) } -func TestCanonicalSynchronisation68Snap(t *testing.T) { testCanonSync(t, eth.ETH68, SnapSync) } -func TestCanonicalSynchronisation68Light(t *testing.T) { testCanonSync(t, eth.ETH68, LightSync) } +func TestCanonicalSynchronisation68Full(t *testing.T) { testCanonSync(t, eth.ETH68, FullSync) } +func TestCanonicalSynchronisation68Snap(t *testing.T) { testCanonSync(t, eth.ETH68, SnapSync) } func testCanonSync(t *testing.T, protocol uint, mode SyncMode) { success := make(chan struct{}) @@ -505,9 +501,8 @@ func testThrottling(t *testing.T, protocol uint, mode SyncMode) { } // Tests that a canceled download wipes all previously accumulated state. -func TestCancel68Full(t *testing.T) { testCancel(t, eth.ETH68, FullSync) } -func TestCancel68Snap(t *testing.T) { testCancel(t, eth.ETH68, SnapSync) } -func TestCancel68Light(t *testing.T) { testCancel(t, eth.ETH68, LightSync) } +func TestCancel68Full(t *testing.T) { testCancel(t, eth.ETH68, FullSync) } +func TestCancel68Snap(t *testing.T) { testCancel(t, eth.ETH68, SnapSync) } func testCancel(t *testing.T, protocol uint, mode SyncMode) { complete := make(chan struct{}) @@ -538,9 +533,8 @@ func testCancel(t *testing.T, protocol uint, mode SyncMode) { // Tests that synchronisations behave well in multi-version protocol environments // and not wreak havoc on other nodes in the network. -func TestMultiProtoSynchronisation68Full(t *testing.T) { testMultiProtoSync(t, eth.ETH68, FullSync) } -func TestMultiProtoSynchronisation68Snap(t *testing.T) { testMultiProtoSync(t, eth.ETH68, SnapSync) } -func TestMultiProtoSynchronisation68Light(t *testing.T) { testMultiProtoSync(t, eth.ETH68, LightSync) } +func TestMultiProtoSynchronisation68Full(t *testing.T) { testMultiProtoSync(t, eth.ETH68, FullSync) } +func TestMultiProtoSynchronisation68Snap(t *testing.T) { testMultiProtoSync(t, eth.ETH68, SnapSync) } func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) { complete := make(chan struct{}) @@ -578,9 +572,8 @@ func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) { // Tests that if a block is empty (e.g. header only), no body request should be // made, and instead the header should be assembled into a whole block in itself. -func TestEmptyShortCircuit68Full(t *testing.T) { testEmptyShortCircuit(t, eth.ETH68, FullSync) } -func TestEmptyShortCircuit68Snap(t *testing.T) { testEmptyShortCircuit(t, eth.ETH68, SnapSync) } -func TestEmptyShortCircuit68Light(t *testing.T) { testEmptyShortCircuit(t, eth.ETH68, LightSync) } +func TestEmptyShortCircuit68Full(t *testing.T) { testEmptyShortCircuit(t, eth.ETH68, FullSync) } +func TestEmptyShortCircuit68Snap(t *testing.T) { testEmptyShortCircuit(t, eth.ETH68, SnapSync) } func testEmptyShortCircuit(t *testing.T, protocol uint, mode SyncMode) { success := make(chan struct{}) @@ -619,7 +612,7 @@ func testEmptyShortCircuit(t *testing.T, protocol uint, mode SyncMode) { // Validate the number of block bodies that should have been requested bodiesNeeded, receiptsNeeded := 0, 0 for _, block := range chain.blocks[1:] { - if mode != LightSync && (len(block.Transactions()) > 0 || len(block.Uncles()) > 0) { + if len(block.Transactions()) > 0 || len(block.Uncles()) > 0 { bodiesNeeded++ } } @@ -694,9 +687,8 @@ func testBeaconSync(t *testing.T, protocol uint, mode SyncMode) { // Tests that synchronisation progress (origin block number, current block number // and highest block number) is tracked and updated correctly. -func TestSyncProgress68Full(t *testing.T) { testSyncProgress(t, eth.ETH68, FullSync) } -func TestSyncProgress68Snap(t *testing.T) { testSyncProgress(t, eth.ETH68, SnapSync) } -func TestSyncProgress68Light(t *testing.T) { testSyncProgress(t, eth.ETH68, LightSync) } +func TestSyncProgress68Full(t *testing.T) { testSyncProgress(t, eth.ETH68, FullSync) } +func TestSyncProgress68Snap(t *testing.T) { testSyncProgress(t, eth.ETH68, SnapSync) } func testSyncProgress(t *testing.T, protocol uint, mode SyncMode) { success := make(chan struct{}) @@ -734,17 +726,7 @@ func testSyncProgress(t *testing.T, protocol uint, mode SyncMode) { if err := tester.downloader.BeaconSync(mode, chain.blocks[len(chain.blocks)-1].Header(), nil); err != nil { t.Fatalf("failed to beacon-sync chain: %v", err) } - var startingBlock uint64 - if mode == LightSync { - // in light-sync mode: - // * the starting block is 0 on the second sync cycle because blocks - // are never downloaded. - // * The current/highest blocks reported in the progress reflect the - // current/highest header. - startingBlock = 0 - } else { - startingBlock = uint64(len(chain.blocks)/2 - 1) - } + startingBlock := uint64(len(chain.blocks)/2 - 1) select { case <-success: diff --git a/eth/downloader/modes.go b/eth/downloader/modes.go index d388b9ee4d..9d8e1f313c 100644 --- a/eth/downloader/modes.go +++ b/eth/downloader/modes.go @@ -23,13 +23,12 @@ import "fmt" type SyncMode uint32 const ( - FullSync SyncMode = iota // Synchronise the entire blockchain history from full blocks - SnapSync // Download the chain and the state via compact snapshots - LightSync // Download only the headers and terminate afterwards + FullSync SyncMode = iota // Synchronise the entire blockchain history from full blocks + SnapSync // Download the chain and the state via compact snapshots ) func (mode SyncMode) IsValid() bool { - return mode >= FullSync && mode <= LightSync + return mode == FullSync || mode == SnapSync } // String implements the stringer interface. @@ -39,8 +38,6 @@ func (mode SyncMode) String() string { return "full" case SnapSync: return "snap" - case LightSync: - return "light" default: return "unknown" } @@ -52,8 +49,6 @@ func (mode SyncMode) MarshalText() ([]byte, error) { return []byte("full"), nil case SnapSync: return []byte("snap"), nil - case LightSync: - return []byte("light"), nil default: return nil, fmt.Errorf("unknown sync mode %d", mode) } @@ -65,10 +60,8 @@ func (mode *SyncMode) UnmarshalText(text []byte) error { *mode = FullSync case "snap": *mode = SnapSync - case "light": - *mode = LightSync default: - return fmt.Errorf(`unknown sync mode %q, want "full", "snap" or "light"`, text) + return fmt.Errorf(`unknown sync mode %q, want "full" or "snap"`, text) } return nil } diff --git a/eth/downloader/skeleton_test.go b/eth/downloader/skeleton_test.go index 3693ab095f..4aa97cf1f7 100644 --- a/eth/downloader/skeleton_test.go +++ b/eth/downloader/skeleton_test.go @@ -29,6 +29,7 @@ import ( "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/eth/protocols/eth" + "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" ) @@ -376,20 +377,9 @@ func TestSkeletonSyncInit(t *testing.T) { skeleton.Terminate() // Ensure the correct resulting sync status - var progress skeletonProgress - json.Unmarshal(rawdb.ReadSkeletonSyncStatus(db), &progress) - - if len(progress.Subchains) != len(tt.newstate) { - t.Errorf("test %d: subchain count mismatch: have %d, want %d", i, len(progress.Subchains), len(tt.newstate)) - continue - } - for j := 0; j < len(progress.Subchains); j++ { - if progress.Subchains[j].Head != tt.newstate[j].Head { - t.Errorf("test %d: subchain %d head mismatch: have %d, want %d", i, j, progress.Subchains[j].Head, tt.newstate[j].Head) - } - if progress.Subchains[j].Tail != tt.newstate[j].Tail { - t.Errorf("test %d: subchain %d tail mismatch: have %d, want %d", i, j, progress.Subchains[j].Tail, tt.newstate[j].Tail) - } + expect := skeletonExpect{state: tt.newstate} + if err := checkSkeletonProgress(db, false, nil, expect); err != nil { + t.Errorf("test %d: %v", i, err) } } } @@ -493,28 +483,36 @@ func TestSkeletonSyncExtend(t *testing.T) { skeleton.Terminate() // Ensure the correct resulting sync status - var progress skeletonProgress - json.Unmarshal(rawdb.ReadSkeletonSyncStatus(db), &progress) - - if len(progress.Subchains) != len(tt.newstate) { - t.Errorf("test %d: subchain count mismatch: have %d, want %d", i, len(progress.Subchains), len(tt.newstate)) - continue - } - for j := 0; j < len(progress.Subchains); j++ { - if progress.Subchains[j].Head != tt.newstate[j].Head { - t.Errorf("test %d: subchain %d head mismatch: have %d, want %d", i, j, progress.Subchains[j].Head, tt.newstate[j].Head) - } - if progress.Subchains[j].Tail != tt.newstate[j].Tail { - t.Errorf("test %d: subchain %d tail mismatch: have %d, want %d", i, j, progress.Subchains[j].Tail, tt.newstate[j].Tail) - } + expect := skeletonExpect{state: tt.newstate} + if err := checkSkeletonProgress(db, false, nil, expect); err != nil { + t.Errorf("test %d: %v", i, err) } } } +type skeletonExpect struct { + state []*subchain // Expected sync state after the post-init event + serve uint64 // Expected number of header retrievals after initial cycle + drop uint64 // Expected number of peers dropped after initial cycle +} + +type skeletonTest struct { + fill bool // Whether to run a real backfiller in this test case + unpredictable bool // Whether to ignore drops/serves due to uncertain packet assignments + + head *types.Header // New head header to announce to reorg to + peers []*skeletonTestPeer // Initial peer set to start the sync with + mid skeletonExpect + + newHead *types.Header // New header to anoint on top of the old one + newPeer *skeletonTestPeer // New peer to join the skeleton syncer + end skeletonExpect +} + // Tests that the skeleton sync correctly retrieves headers from one or more // peers without duplicates or other strange side effects. func TestSkeletonSyncRetrievals(t *testing.T) { - //log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) + //log.SetDefault(log.NewLogger(log.NewGlogHandler(log.NewTerminalHandler(os.Stderr, false)))) // Since skeleton headers don't need to be meaningful, beyond a parent hash // progression, create a long fake chain to test with. @@ -537,22 +535,7 @@ func TestSkeletonSyncRetrievals(t *testing.T) { Extra: []byte("B"), // force a different hash }) } - tests := []struct { - fill bool // Whether to run a real backfiller in this test case - unpredictable bool // Whether to ignore drops/serves due to uncertain packet assignments - - head *types.Header // New head header to announce to reorg to - peers []*skeletonTestPeer // Initial peer set to start the sync with - midstate []*subchain // Expected sync state after initial cycle - midserve uint64 // Expected number of header retrievals after initial cycle - middrop uint64 // Expected number of peers dropped after initial cycle - - newHead *types.Header // New header to anoint on top of the old one - newPeer *skeletonTestPeer // New peer to join the skeleton syncer - endstate []*subchain // Expected sync state after the post-init event - endserve uint64 // Expected number of header retrievals after the post-init event - enddrop uint64 // Expected number of peers dropped after the post-init event - }{ + tests := []skeletonTest{ // Completely empty database with only the genesis set. The sync is expected // to create a single subchain with the requested head. No peers however, so // the sync should be stuck without any progression. @@ -560,12 +543,16 @@ func TestSkeletonSyncRetrievals(t *testing.T) { // When a new peer is added, it should detect the join and fill the headers // to the genesis block. { - head: chain[len(chain)-1], - midstate: []*subchain{{Head: uint64(len(chain) - 1), Tail: uint64(len(chain) - 1)}}, + head: chain[len(chain)-1], + mid: skeletonExpect{ + state: []*subchain{{Head: uint64(len(chain) - 1), Tail: uint64(len(chain) - 1)}}, + }, - newPeer: newSkeletonTestPeer("test-peer", chain), - endstate: []*subchain{{Head: uint64(len(chain) - 1), Tail: 1}}, - endserve: uint64(len(chain) - 2), // len - head - genesis + newPeer: newSkeletonTestPeer("test-peer", chain), + end: skeletonExpect{ + state: []*subchain{{Head: uint64(len(chain) - 1), Tail: 1}}, + serve: uint64(len(chain) - 2), // len - head - genesis + }, }, // Completely empty database with only the genesis set. The sync is expected // to create a single subchain with the requested head. With one valid peer, @@ -573,14 +560,18 @@ func TestSkeletonSyncRetrievals(t *testing.T) { // // Adding a second peer should not have any effect. { - head: chain[len(chain)-1], - peers: []*skeletonTestPeer{newSkeletonTestPeer("test-peer-1", chain)}, - midstate: []*subchain{{Head: uint64(len(chain) - 1), Tail: 1}}, - midserve: uint64(len(chain) - 2), // len - head - genesis - - newPeer: newSkeletonTestPeer("test-peer-2", chain), - endstate: []*subchain{{Head: uint64(len(chain) - 1), Tail: 1}}, - endserve: uint64(len(chain) - 2), // len - head - genesis + head: chain[len(chain)-1], + peers: []*skeletonTestPeer{newSkeletonTestPeer("test-peer-1", chain)}, + mid: skeletonExpect{ + state: []*subchain{{Head: uint64(len(chain) - 1), Tail: 1}}, + serve: uint64(len(chain) - 2), // len - head - genesis + }, + + newPeer: newSkeletonTestPeer("test-peer-2", chain), + end: skeletonExpect{ + state: []*subchain{{Head: uint64(len(chain) - 1), Tail: 1}}, + serve: uint64(len(chain) - 2), // len - head - genesis + }, }, // Completely empty database with only the genesis set. The sync is expected // to create a single subchain with the requested head. With many valid peers, @@ -594,12 +585,16 @@ func TestSkeletonSyncRetrievals(t *testing.T) { newSkeletonTestPeer("test-peer-2", chain), newSkeletonTestPeer("test-peer-3", chain), }, - midstate: []*subchain{{Head: uint64(len(chain) - 1), Tail: 1}}, - midserve: uint64(len(chain) - 2), // len - head - genesis + mid: skeletonExpect{ + state: []*subchain{{Head: uint64(len(chain) - 1), Tail: 1}}, + serve: uint64(len(chain) - 2), // len - head - genesis + }, - newPeer: newSkeletonTestPeer("test-peer-4", chain), - endstate: []*subchain{{Head: uint64(len(chain) - 1), Tail: 1}}, - endserve: uint64(len(chain) - 2), // len - head - genesis + newPeer: newSkeletonTestPeer("test-peer-4", chain), + end: skeletonExpect{ + state: []*subchain{{Head: uint64(len(chain) - 1), Tail: 1}}, + serve: uint64(len(chain) - 2), // len - head - genesis + }, }, // This test checks if a peer tries to withhold a header - *on* the sync // boundary - instead of sending the requested amount. The malicious short @@ -611,14 +606,18 @@ func TestSkeletonSyncRetrievals(t *testing.T) { peers: []*skeletonTestPeer{ newSkeletonTestPeer("header-skipper", append(append(append([]*types.Header{}, chain[:99]...), nil), chain[100:]...)), }, - midstate: []*subchain{{Head: requestHeaders + 100, Tail: 100}}, - midserve: requestHeaders + 101 - 3, // len - head - genesis - missing - middrop: 1, // penalize shortened header deliveries + mid: skeletonExpect{ + state: []*subchain{{Head: requestHeaders + 100, Tail: 100}}, + serve: requestHeaders + 101 - 3, // len - head - genesis - missing + drop: 1, // penalize shortened header deliveries + }, - newPeer: newSkeletonTestPeer("good-peer", chain), - endstate: []*subchain{{Head: requestHeaders + 100, Tail: 1}}, - endserve: (requestHeaders + 101 - 3) + (100 - 1), // midserve + lenrest - genesis - enddrop: 1, // no new drops + newPeer: newSkeletonTestPeer("good-peer", chain), + end: skeletonExpect{ + state: []*subchain{{Head: requestHeaders + 100, Tail: 1}}, + serve: (requestHeaders + 101 - 3) + (100 - 1), // midserve + lenrest - genesis + drop: 1, // no new drops + }, }, // This test checks if a peer tries to withhold a header - *off* the sync // boundary - instead of sending the requested amount. The malicious short @@ -630,14 +629,18 @@ func TestSkeletonSyncRetrievals(t *testing.T) { peers: []*skeletonTestPeer{ newSkeletonTestPeer("header-skipper", append(append(append([]*types.Header{}, chain[:50]...), nil), chain[51:]...)), }, - midstate: []*subchain{{Head: requestHeaders + 100, Tail: 100}}, - midserve: requestHeaders + 101 - 3, // len - head - genesis - missing - middrop: 1, // penalize shortened header deliveries + mid: skeletonExpect{ + state: []*subchain{{Head: requestHeaders + 100, Tail: 100}}, + serve: requestHeaders + 101 - 3, // len - head - genesis - missing + drop: 1, // penalize shortened header deliveries + }, - newPeer: newSkeletonTestPeer("good-peer", chain), - endstate: []*subchain{{Head: requestHeaders + 100, Tail: 1}}, - endserve: (requestHeaders + 101 - 3) + (100 - 1), // midserve + lenrest - genesis - enddrop: 1, // no new drops + newPeer: newSkeletonTestPeer("good-peer", chain), + end: skeletonExpect{ + state: []*subchain{{Head: requestHeaders + 100, Tail: 1}}, + serve: (requestHeaders + 101 - 3) + (100 - 1), // midserve + lenrest - genesis + drop: 1, // no new drops + }, }, // This test checks if a peer tries to duplicate a header - *on* the sync // boundary - instead of sending the correct sequence. The malicious duped @@ -649,14 +652,18 @@ func TestSkeletonSyncRetrievals(t *testing.T) { peers: []*skeletonTestPeer{ newSkeletonTestPeer("header-duper", append(append(append([]*types.Header{}, chain[:99]...), chain[98]), chain[100:]...)), }, - midstate: []*subchain{{Head: requestHeaders + 100, Tail: 100}}, - midserve: requestHeaders + 101 - 2, // len - head - genesis - middrop: 1, // penalize invalid header sequences + mid: skeletonExpect{ + state: []*subchain{{Head: requestHeaders + 100, Tail: 100}}, + serve: requestHeaders + 101 - 2, // len - head - genesis + drop: 1, // penalize invalid header sequences + }, - newPeer: newSkeletonTestPeer("good-peer", chain), - endstate: []*subchain{{Head: requestHeaders + 100, Tail: 1}}, - endserve: (requestHeaders + 101 - 2) + (100 - 1), // midserve + lenrest - genesis - enddrop: 1, // no new drops + newPeer: newSkeletonTestPeer("good-peer", chain), + end: skeletonExpect{ + state: []*subchain{{Head: requestHeaders + 100, Tail: 1}}, + serve: (requestHeaders + 101 - 2) + (100 - 1), // midserve + lenrest - genesis + drop: 1, // no new drops + }, }, // This test checks if a peer tries to duplicate a header - *off* the sync // boundary - instead of sending the correct sequence. The malicious duped @@ -668,14 +675,18 @@ func TestSkeletonSyncRetrievals(t *testing.T) { peers: []*skeletonTestPeer{ newSkeletonTestPeer("header-duper", append(append(append([]*types.Header{}, chain[:50]...), chain[49]), chain[51:]...)), }, - midstate: []*subchain{{Head: requestHeaders + 100, Tail: 100}}, - midserve: requestHeaders + 101 - 2, // len - head - genesis - middrop: 1, // penalize invalid header sequences + mid: skeletonExpect{ + state: []*subchain{{Head: requestHeaders + 100, Tail: 100}}, + serve: requestHeaders + 101 - 2, // len - head - genesis + drop: 1, // penalize invalid header sequences + }, - newPeer: newSkeletonTestPeer("good-peer", chain), - endstate: []*subchain{{Head: requestHeaders + 100, Tail: 1}}, - endserve: (requestHeaders + 101 - 2) + (100 - 1), // midserve + lenrest - genesis - enddrop: 1, // no new drops + newPeer: newSkeletonTestPeer("good-peer", chain), + end: skeletonExpect{ + state: []*subchain{{Head: requestHeaders + 100, Tail: 1}}, + serve: (requestHeaders + 101 - 2) + (100 - 1), // midserve + lenrest - genesis + drop: 1, // no new drops + }, }, // This test checks if a peer tries to inject a different header - *on* // the sync boundary - instead of sending the correct sequence. The bad @@ -698,14 +709,18 @@ func TestSkeletonSyncRetrievals(t *testing.T) { ), ), }, - midstate: []*subchain{{Head: requestHeaders + 100, Tail: 100}}, - midserve: requestHeaders + 101 - 2, // len - head - genesis - middrop: 1, // different set of headers, drop // TODO(karalabe): maybe just diff sync? + mid: skeletonExpect{ + state: []*subchain{{Head: requestHeaders + 100, Tail: 100}}, + serve: requestHeaders + 101 - 2, // len - head - genesis + drop: 1, // different set of headers, drop // TODO(karalabe): maybe just diff sync? + }, - newPeer: newSkeletonTestPeer("good-peer", chain), - endstate: []*subchain{{Head: requestHeaders + 100, Tail: 1}}, - endserve: (requestHeaders + 101 - 2) + (100 - 1), // midserve + lenrest - genesis - enddrop: 1, // no new drops + newPeer: newSkeletonTestPeer("good-peer", chain), + end: skeletonExpect{ + state: []*subchain{{Head: requestHeaders + 100, Tail: 1}}, + serve: (requestHeaders + 101 - 2) + (100 - 1), // midserve + lenrest - genesis + drop: 1, // no new drops + }, }, // This test checks if a peer tries to inject a different header - *off* // the sync boundary - instead of sending the correct sequence. The bad @@ -728,14 +743,18 @@ func TestSkeletonSyncRetrievals(t *testing.T) { ), ), }, - midstate: []*subchain{{Head: requestHeaders + 100, Tail: 100}}, - midserve: requestHeaders + 101 - 2, // len - head - genesis - middrop: 1, // different set of headers, drop + mid: skeletonExpect{ + state: []*subchain{{Head: requestHeaders + 100, Tail: 100}}, + serve: requestHeaders + 101 - 2, // len - head - genesis + drop: 1, // different set of headers, drop + }, - newPeer: newSkeletonTestPeer("good-peer", chain), - endstate: []*subchain{{Head: requestHeaders + 100, Tail: 1}}, - endserve: (requestHeaders + 101 - 2) + (100 - 1), // midserve + lenrest - genesis - enddrop: 1, // no new drops + newPeer: newSkeletonTestPeer("good-peer", chain), + end: skeletonExpect{ + state: []*subchain{{Head: requestHeaders + 100, Tail: 1}}, + serve: (requestHeaders + 101 - 2) + (100 - 1), // midserve + lenrest - genesis + drop: 1, // no new drops + }, }, // This test reproduces a bug caught during review (kudos to @holiman) // where a subchain is merged with a previously interrupted one, causing @@ -765,12 +784,16 @@ func TestSkeletonSyncRetrievals(t *testing.T) { return nil // Fallback to default behavior, just delayed }), }, - midstate: []*subchain{{Head: 2 * requestHeaders, Tail: 1}}, - midserve: 2*requestHeaders - 1, // len - head - genesis + mid: skeletonExpect{ + state: []*subchain{{Head: 2 * requestHeaders, Tail: 1}}, + serve: 2*requestHeaders - 1, // len - head - genesis + }, - newHead: chain[2*requestHeaders+2], - endstate: []*subchain{{Head: 2*requestHeaders + 2, Tail: 1}}, - endserve: 4 * requestHeaders, + newHead: chain[2*requestHeaders+2], + end: skeletonExpect{ + state: []*subchain{{Head: 2*requestHeaders + 2, Tail: 1}}, + serve: 4 * requestHeaders, + }, }, // This test reproduces a bug caught by (@rjl493456442) where a skeleton // header goes missing, causing the sync to get stuck and/or panic. @@ -792,13 +815,17 @@ func TestSkeletonSyncRetrievals(t *testing.T) { fill: true, unpredictable: true, // We have good and bad peer too, bad may be dropped, test too short for certainty - head: chain[len(chain)/2+1], // Sync up until the sidechain common ancestor + 2 - peers: []*skeletonTestPeer{newSkeletonTestPeer("test-peer-oldchain", chain)}, - midstate: []*subchain{{Head: uint64(len(chain)/2 + 1), Tail: 1}}, + head: chain[len(chain)/2+1], // Sync up until the sidechain common ancestor + 2 + peers: []*skeletonTestPeer{newSkeletonTestPeer("test-peer-oldchain", chain)}, + mid: skeletonExpect{ + state: []*subchain{{Head: uint64(len(chain)/2 + 1), Tail: 1}}, + }, - newHead: sidechain[len(sidechain)/2+3], // Sync up until the sidechain common ancestor + 4 - newPeer: newSkeletonTestPeer("test-peer-newchain", sidechain), - endstate: []*subchain{{Head: uint64(len(sidechain)/2 + 3), Tail: uint64(len(chain) / 2)}}, + newHead: sidechain[len(sidechain)/2+3], // Sync up until the sidechain common ancestor + 4 + newPeer: newSkeletonTestPeer("test-peer-newchain", sidechain), + end: skeletonExpect{ + state: []*subchain{{Head: uint64(len(sidechain)/2 + 3), Tail: uint64(len(chain) / 2)}}, + }, }, } for i, tt := range tests { @@ -861,115 +888,83 @@ func TestSkeletonSyncRetrievals(t *testing.T) { skeleton := newSkeleton(db, peerset, drop, filler) skeleton.Sync(tt.head, nil, true) - var progress skeletonProgress // Wait a bit (bleah) for the initial sync loop to go to idle. This might // be either a finish or a never-start hence why there's no event to hook. - check := func() error { - if len(progress.Subchains) != len(tt.midstate) { - return fmt.Errorf("test %d, mid state: subchain count mismatch: have %d, want %d", i, len(progress.Subchains), len(tt.midstate)) - } - for j := 0; j < len(progress.Subchains); j++ { - if progress.Subchains[j].Head != tt.midstate[j].Head { - return fmt.Errorf("test %d, mid state: subchain %d head mismatch: have %d, want %d", i, j, progress.Subchains[j].Head, tt.midstate[j].Head) - } - if progress.Subchains[j].Tail != tt.midstate[j].Tail { - return fmt.Errorf("test %d, mid state: subchain %d tail mismatch: have %d, want %d", i, j, progress.Subchains[j].Tail, tt.midstate[j].Tail) - } - } - return nil - } - waitStart := time.Now() for waitTime := 20 * time.Millisecond; time.Since(waitStart) < 2*time.Second; waitTime = waitTime * 2 { time.Sleep(waitTime) - // Check the post-init end state if it matches the required results - json.Unmarshal(rawdb.ReadSkeletonSyncStatus(db), &progress) - if err := check(); err == nil { + if err := checkSkeletonProgress(db, tt.unpredictable, tt.peers, tt.mid); err == nil { break } } - if err := check(); err != nil { - t.Error(err) + if err := checkSkeletonProgress(db, tt.unpredictable, tt.peers, tt.mid); err != nil { + t.Errorf("test %d, mid: %v", i, err) continue } - if !tt.unpredictable { - var served uint64 - for _, peer := range tt.peers { - served += peer.served.Load() - } - if served != tt.midserve { - t.Errorf("test %d, mid state: served headers mismatch: have %d, want %d", i, served, tt.midserve) - } - var drops uint64 - for _, peer := range tt.peers { - drops += peer.dropped.Load() - } - if drops != tt.middrop { - t.Errorf("test %d, mid state: dropped peers mismatch: have %d, want %d", i, drops, tt.middrop) - } - } + // Apply the post-init events if there's any - if tt.newHead != nil { - skeleton.Sync(tt.newHead, nil, true) - } + endpeers := tt.peers if tt.newPeer != nil { if err := peerset.Register(newPeerConnection(tt.newPeer.id, eth.ETH68, tt.newPeer, log.New("id", tt.newPeer.id))); err != nil { t.Errorf("test %d: failed to register new peer: %v", i, err) } + time.Sleep(time.Millisecond * 50) // given time for peer registration + endpeers = append(tt.peers, tt.newPeer) + } + if tt.newHead != nil { + skeleton.Sync(tt.newHead, nil, true) } + // Wait a bit (bleah) for the second sync loop to go to idle. This might // be either a finish or a never-start hence why there's no event to hook. - check = func() error { - if len(progress.Subchains) != len(tt.endstate) { - return fmt.Errorf("test %d, end state: subchain count mismatch: have %d, want %d", i, len(progress.Subchains), len(tt.endstate)) - } - for j := 0; j < len(progress.Subchains); j++ { - if progress.Subchains[j].Head != tt.endstate[j].Head { - return fmt.Errorf("test %d, end state: subchain %d head mismatch: have %d, want %d", i, j, progress.Subchains[j].Head, tt.endstate[j].Head) - } - if progress.Subchains[j].Tail != tt.endstate[j].Tail { - return fmt.Errorf("test %d, end state: subchain %d tail mismatch: have %d, want %d", i, j, progress.Subchains[j].Tail, tt.endstate[j].Tail) - } - } - return nil - } waitStart = time.Now() for waitTime := 20 * time.Millisecond; time.Since(waitStart) < 2*time.Second; waitTime = waitTime * 2 { time.Sleep(waitTime) - // Check the post-init end state if it matches the required results - json.Unmarshal(rawdb.ReadSkeletonSyncStatus(db), &progress) - if err := check(); err == nil { + if err := checkSkeletonProgress(db, tt.unpredictable, endpeers, tt.end); err == nil { break } } - if err := check(); err != nil { - t.Error(err) + if err := checkSkeletonProgress(db, tt.unpredictable, endpeers, tt.end); err != nil { + t.Errorf("test %d, end: %v", i, err) continue } // Check that the peers served no more headers than we actually needed - if !tt.unpredictable { - served := uint64(0) - for _, peer := range tt.peers { - served += peer.served.Load() - } - if tt.newPeer != nil { - served += tt.newPeer.served.Load() - } - if served != tt.endserve { - t.Errorf("test %d, end state: served headers mismatch: have %d, want %d", i, served, tt.endserve) - } - drops := uint64(0) - for _, peer := range tt.peers { - drops += peer.dropped.Load() - } - if tt.newPeer != nil { - drops += tt.newPeer.dropped.Load() - } - if drops != tt.enddrop { - t.Errorf("test %d, end state: dropped peers mismatch: have %d, want %d", i, drops, tt.middrop) - } - } // Clean up any leftover skeleton sync resources skeleton.Terminate() } } + +func checkSkeletonProgress(db ethdb.KeyValueReader, unpredictable bool, peers []*skeletonTestPeer, expected skeletonExpect) error { + var progress skeletonProgress + // Check the post-init end state if it matches the required results + json.Unmarshal(rawdb.ReadSkeletonSyncStatus(db), &progress) + + if len(progress.Subchains) != len(expected.state) { + return fmt.Errorf("subchain count mismatch: have %d, want %d", len(progress.Subchains), len(expected.state)) + } + for j := 0; j < len(progress.Subchains); j++ { + if progress.Subchains[j].Head != expected.state[j].Head { + return fmt.Errorf("subchain %d head mismatch: have %d, want %d", j, progress.Subchains[j].Head, expected.state[j].Head) + } + if progress.Subchains[j].Tail != expected.state[j].Tail { + return fmt.Errorf("subchain %d tail mismatch: have %d, want %d", j, progress.Subchains[j].Tail, expected.state[j].Tail) + } + } + if !unpredictable { + var served uint64 + for _, peer := range peers { + served += peer.served.Load() + } + if served != expected.serve { + return fmt.Errorf("served headers mismatch: have %d, want %d", served, expected.serve) + } + var drops uint64 + for _, peer := range peers { + drops += peer.dropped.Load() + } + if drops != expected.drop { + return fmt.Errorf("dropped peers mismatch: have %d, want %d", drops, expected.drop) + } + } + return nil +} diff --git a/eth/gasprice/feehistory.go b/eth/gasprice/feehistory.go index d039bcb401..1e625e21c0 100644 --- a/eth/gasprice/feehistory.go +++ b/eth/gasprice/feehistory.go @@ -44,7 +44,8 @@ const ( // maxBlockFetchers is the max number of goroutines to spin up to pull blocks // for the fee history calculation (mostly relevant for LES). maxBlockFetchers = 4 - maxQueryLimit = 100 + // maxQueryLimit is the max number of requested percentiles. + maxQueryLimit = 100 ) // blockFees represents a single block for processing diff --git a/eth/handler.go b/eth/handler.go index 143ac2a8a5..d5117584c0 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -180,7 +180,7 @@ func newHandler(config *handlerConfig) (*handler, error) { return nil, errors.New("snap sync not supported with snapshots disabled") } // Construct the downloader (long sync) - h.downloader = downloader.New(config.Database, h.eventMux, h.chain, nil, h.removePeer, h.enableSyncedFeatures) + h.downloader = downloader.New(config.Database, h.eventMux, h.chain, h.removePeer, h.enableSyncedFeatures) fetchTx := func(peer string, hashes []common.Hash) error { p := h.peers.peer(peer) diff --git a/eth/protocols/snap/progress_test.go b/eth/protocols/snap/progress_test.go index 9d923bd2f5..1d9a6b8474 100644 --- a/eth/protocols/snap/progress_test.go +++ b/eth/protocols/snap/progress_test.go @@ -80,7 +80,7 @@ func makeLegacyProgress() legacyProgress { Next: common.Hash{}, Last: common.Hash{0x77}, SubTasks: map[common.Hash][]*legacyStorageTask{ - common.Hash{0x1}: { + {0x1}: { { Next: common.Hash{}, Last: common.Hash{0xff}, diff --git a/eth/protocols/snap/sync.go b/eth/protocols/snap/sync.go index ffda718700..88d7d34dcc 100644 --- a/eth/protocols/snap/sync.go +++ b/eth/protocols/snap/sync.go @@ -103,7 +103,7 @@ var ( // to allow concurrent retrievals. accountConcurrency = 16 - // storageConcurrency is the number of chunks to split the a large contract + // storageConcurrency is the number of chunks to split a large contract // storage trie into to allow concurrent retrievals. storageConcurrency = 16 ) @@ -2358,7 +2358,7 @@ func (s *Syncer) commitHealer(force bool) { } batch := s.db.NewBatch() if err := s.healer.scheduler.Commit(batch); err != nil { - log.Error("Failed to commit healing data", "err", err) + log.Crit("Failed to commit healing data", "err", err) } if err := batch.Write(); err != nil { log.Crit("Failed to persist healing data", "err", err) @@ -3250,9 +3250,9 @@ func (t *healRequestSort) Merge() []TrieNodePathSet { // sortByAccountPath takes hashes and paths, and sorts them. After that, it generates // the TrieNodePaths and merges paths which belongs to the same account path. func sortByAccountPath(paths []string, hashes []common.Hash) ([]string, []common.Hash, []trie.SyncPath, []TrieNodePathSet) { - var syncPaths []trie.SyncPath - for _, path := range paths { - syncPaths = append(syncPaths, trie.NewSyncPath([]byte(path))) + syncPaths := make([]trie.SyncPath, len(paths)) + for i, path := range paths { + syncPaths[i] = trie.NewSyncPath([]byte(path)) } n := &healRequestSort{paths, hashes, syncPaths} sort.Sort(n) diff --git a/eth/tracers/api.go b/eth/tracers/api.go index 94307f3f06..95788b38f3 100644 --- a/eth/tracers/api.go +++ b/eth/tracers/api.go @@ -22,7 +22,6 @@ import ( "encoding/json" "errors" "fmt" - "math/big" "os" "runtime" "sync" @@ -809,9 +808,13 @@ func (api *API) standardTraceBlockToFile(ctx context.Context, block *types.Block // Execute the transaction and flush any traces to disk vmenv := vm.NewEVM(vmctx, txContext, statedb, chainConfig, vmConf) statedb.SetTxContext(tx.Hash(), i) - vmConf.Tracer.OnTxStart(vmenv.GetVMContext(), tx, msg.From) + if vmConf.Tracer.OnTxStart != nil { + vmConf.Tracer.OnTxStart(vmenv.GetVMContext(), tx, msg.From) + } vmRet, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(msg.GasLimit)) - vmConf.Tracer.OnTxEnd(&types.Receipt{GasUsed: vmRet.UsedGas}, err) + if vmConf.Tracer.OnTxEnd != nil { + vmConf.Tracer.OnTxEnd(&types.Receipt{GasUsed: vmRet.UsedGas}, err) + } if writer != nil { writer.Flush() } @@ -986,7 +989,8 @@ func (api *API) traceTx(ctx context.Context, tx *types.Transaction, message *cor return nil, err } } - vmenv := vm.NewEVM(vmctx, vm.TxContext{GasPrice: big.NewInt(0)}, statedb, api.backend.ChainConfig(), vm.Config{Tracer: tracer.Hooks, NoBaseFee: true}) + // The actual TxContext will be created as part of ApplyTransactionWithEVM. + vmenv := vm.NewEVM(vmctx, vm.TxContext{GasPrice: message.GasPrice, BlobFeeCap: message.BlobGasFeeCap}, statedb, api.backend.ChainConfig(), vm.Config{Tracer: tracer.Hooks, NoBaseFee: true}) statedb.SetLogger(tracer.Hooks) // Define a meaningful timeout of a single transaction trace diff --git a/eth/tracers/api_test.go b/eth/tracers/api_test.go index 4455fe6196..1ba5602b63 100644 --- a/eth/tracers/api_test.go +++ b/eth/tracers/api_test.go @@ -32,6 +32,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/consensus" + "github.com/ethereum/go-ethereum/consensus/beacon" "github.com/ethereum/go-ethereum/consensus/ethash" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/rawdb" @@ -316,7 +317,7 @@ func TestTraceCall(t *testing.T) { config: &TraceCallConfig{TxIndex: uintPtr(1)}, expectErr: fmt.Errorf("tracing failed: insufficient funds for gas * price + value: address %s have 1000000000000000000 want 1000000000000000100", accounts[2].addr), }, - // After the target transaction, should be succeed + // After the target transaction, should be succeeded { blockNumber: rpc.BlockNumber(genBlocks - 1), call: ethapi.TransactionArgs{ @@ -999,3 +1000,90 @@ func TestTraceChain(t *testing.T) { } } } + +// newTestMergedBackend creates a post-merge chain +func newTestMergedBackend(t *testing.T, n int, gspec *core.Genesis, generator func(i int, b *core.BlockGen)) *testBackend { + backend := &testBackend{ + chainConfig: gspec.Config, + engine: beacon.NewFaker(), + chaindb: rawdb.NewMemoryDatabase(), + } + // Generate blocks for testing + _, blocks, _ := core.GenerateChainWithGenesis(gspec, backend.engine, n, generator) + + // Import the canonical chain + cacheConfig := &core.CacheConfig{ + TrieCleanLimit: 256, + TrieDirtyLimit: 256, + TrieTimeLimit: 5 * time.Minute, + SnapshotLimit: 0, + TrieDirtyDisabled: true, // Archive mode + } + chain, err := core.NewBlockChain(backend.chaindb, cacheConfig, nil, gspec, nil, backend.engine, vm.Config{}, nil, nil) + if err != nil { + t.Fatalf("failed to create tester chain: %v", err) + } + if n, err := chain.InsertChain(blocks); err != nil { + t.Fatalf("block %d: failed to insert into chain: %v", n, err) + } + backend.chain = chain + return backend +} + +func TestTraceBlockWithBasefee(t *testing.T) { + t.Parallel() + accounts := newAccounts(1) + target := common.HexToAddress("0x1111111111111111111111111111111111111111") + genesis := &core.Genesis{ + Config: params.AllDevChainProtocolChanges, + Alloc: types.GenesisAlloc{ + accounts[0].addr: {Balance: big.NewInt(1 * params.Ether)}, + target: {Nonce: 1, Code: []byte{ + byte(vm.BASEFEE), byte(vm.STOP), + }}, + }, + } + genBlocks := 1 + signer := types.HomesteadSigner{} + var txHash common.Hash + var baseFee = new(big.Int) + backend := newTestMergedBackend(t, genBlocks, genesis, func(i int, b *core.BlockGen) { + tx, _ := types.SignTx(types.NewTx(&types.LegacyTx{ + Nonce: uint64(i), + To: &target, + Value: big.NewInt(0), + Gas: 5 * params.TxGas, + GasPrice: b.BaseFee(), + Data: nil}), + signer, accounts[0].key) + b.AddTx(tx) + txHash = tx.Hash() + baseFee.Set(b.BaseFee()) + }) + defer backend.chain.Stop() + api := NewAPI(backend) + + var testSuite = []struct { + blockNumber rpc.BlockNumber + config *TraceConfig + want string + }{ + // Trace head block + { + blockNumber: rpc.BlockNumber(genBlocks), + want: fmt.Sprintf(`[{"txHash":"%#x","result":{"gas":21002,"failed":false,"returnValue":"","structLogs":[{"pc":0,"op":"BASEFEE","gas":84000,"gasCost":2,"depth":1,"stack":[]},{"pc":1,"op":"STOP","gas":83998,"gasCost":0,"depth":1,"stack":["%#x"]}]}}]`, txHash, baseFee), + }, + } + for i, tc := range testSuite { + result, err := api.TraceBlockByNumber(context.Background(), tc.blockNumber, tc.config) + if err != nil { + t.Errorf("test %d, want no error, have %v", i, err) + continue + } + have, _ := json.Marshal(result) + want := tc.want + if string(have) != want { + t.Errorf("test %d, result mismatch\nhave: %v\nwant: %v\n", i, string(have), want) + } + } +} diff --git a/eth/tracers/internal/tracetest/supply_test.go b/eth/tracers/internal/tracetest/supply_test.go new file mode 100644 index 0000000000..716ad5cd42 --- /dev/null +++ b/eth/tracers/internal/tracetest/supply_test.go @@ -0,0 +1,613 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package tracetest + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "math/big" + "os" + "path" + "path/filepath" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/consensus/beacon" + "github.com/ethereum/go-ethereum/consensus/ethash" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/eth/tracers" + "github.com/ethereum/go-ethereum/params" + + // Force-load live packages, to trigger registration + _ "github.com/ethereum/go-ethereum/eth/tracers/live" +) + +type supplyInfoIssuance struct { + GenesisAlloc *hexutil.Big `json:"genesisAlloc,omitempty"` + Reward *hexutil.Big `json:"reward,omitempty"` + Withdrawals *hexutil.Big `json:"withdrawals,omitempty"` +} + +type supplyInfoBurn struct { + EIP1559 *hexutil.Big `json:"1559,omitempty"` + Blob *hexutil.Big `json:"blob,omitempty"` + Misc *hexutil.Big `json:"misc,omitempty"` +} + +type supplyInfo struct { + Issuance *supplyInfoIssuance `json:"issuance,omitempty"` + Burn *supplyInfoBurn `json:"burn,omitempty"` + + // Block info + Number uint64 `json:"blockNumber"` + Hash common.Hash `json:"hash"` + ParentHash common.Hash `json:"parentHash"` +} + +func emptyBlockGenerationFunc(b *core.BlockGen) {} + +func TestSupplyOmittedFields(t *testing.T) { + var ( + config = *params.MergedTestChainConfig + gspec = &core.Genesis{ + Config: &config, + } + ) + + gspec.Config.TerminalTotalDifficulty = big.NewInt(0) + + out, _, err := testSupplyTracer(t, gspec, func(b *core.BlockGen) { + b.SetPoS() + }) + if err != nil { + t.Fatalf("failed to test supply tracer: %v", err) + } + + expected := supplyInfo{ + Number: 0, + Hash: common.HexToHash("0x52f276d96f0afaaf2c3cb358868bdc2779c4b0cb8de3e7e5302e247c0b66a703"), + ParentHash: common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"), + } + actual := out[expected.Number] + + compareAsJSON(t, expected, actual) +} + +func TestSupplyGenesisAlloc(t *testing.T) { + var ( + key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") + addr1 = crypto.PubkeyToAddress(key1.PublicKey) + addr2 = crypto.PubkeyToAddress(key2.PublicKey) + eth1 = new(big.Int).Mul(common.Big1, big.NewInt(params.Ether)) + + config = *params.AllEthashProtocolChanges + + gspec = &core.Genesis{ + Config: &config, + Alloc: types.GenesisAlloc{ + addr1: {Balance: eth1}, + addr2: {Balance: eth1}, + }, + } + ) + + expected := supplyInfo{ + Issuance: &supplyInfoIssuance{ + GenesisAlloc: (*hexutil.Big)(new(big.Int).Mul(common.Big2, big.NewInt(params.Ether))), + }, + Number: 0, + Hash: common.HexToHash("0xbcc9466e9fc6a8b56f4b29ca353a421ff8b51a0c1a58ca4743b427605b08f2ca"), + ParentHash: common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"), + } + + out, _, err := testSupplyTracer(t, gspec, emptyBlockGenerationFunc) + if err != nil { + t.Fatalf("failed to test supply tracer: %v", err) + } + + actual := out[expected.Number] + + compareAsJSON(t, expected, actual) +} + +func TestSupplyRewards(t *testing.T) { + var ( + config = *params.AllEthashProtocolChanges + + gspec = &core.Genesis{ + Config: &config, + } + ) + + expected := supplyInfo{ + Issuance: &supplyInfoIssuance{ + Reward: (*hexutil.Big)(new(big.Int).Mul(common.Big2, big.NewInt(params.Ether))), + }, + Number: 1, + Hash: common.HexToHash("0xcbb08370505be503dafedc4e96d139ea27aba3cbc580148568b8a307b3f51052"), + ParentHash: common.HexToHash("0xadeda0a83e337b6c073e3f0e9a17531a04009b397a9588c093b628f21b8bc5a3"), + } + + out, _, err := testSupplyTracer(t, gspec, emptyBlockGenerationFunc) + if err != nil { + t.Fatalf("failed to test supply tracer: %v", err) + } + + actual := out[expected.Number] + + compareAsJSON(t, expected, actual) +} + +func TestSupplyEip1559Burn(t *testing.T) { + var ( + config = *params.AllEthashProtocolChanges + + aa = common.HexToAddress("0x000000000000000000000000000000000000aaaa") + // A sender who makes transactions, has some eth1 + key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + addr1 = crypto.PubkeyToAddress(key1.PublicKey) + gwei5 = new(big.Int).Mul(big.NewInt(5), big.NewInt(params.GWei)) + eth1 = new(big.Int).Mul(common.Big1, big.NewInt(params.Ether)) + + gspec = &core.Genesis{ + Config: &config, + BaseFee: big.NewInt(params.InitialBaseFee), + Alloc: types.GenesisAlloc{ + addr1: {Balance: eth1}, + }, + } + ) + + signer := types.LatestSigner(gspec.Config) + + eip1559BlockGenerationFunc := func(b *core.BlockGen) { + txdata := &types.DynamicFeeTx{ + ChainID: gspec.Config.ChainID, + Nonce: 0, + To: &aa, + Gas: 21000, + GasFeeCap: gwei5, + GasTipCap: big.NewInt(2), + } + tx := types.NewTx(txdata) + tx, _ = types.SignTx(tx, signer, key1) + + b.AddTx(tx) + } + + out, chain, err := testSupplyTracer(t, gspec, eip1559BlockGenerationFunc) + if err != nil { + t.Fatalf("failed to test supply tracer: %v", err) + } + var ( + head = chain.CurrentBlock() + reward = new(big.Int).Mul(common.Big2, big.NewInt(params.Ether)) + burn = new(big.Int).Mul(big.NewInt(21000), head.BaseFee) + expected = supplyInfo{ + Issuance: &supplyInfoIssuance{ + Reward: (*hexutil.Big)(reward), + }, + Burn: &supplyInfoBurn{ + EIP1559: (*hexutil.Big)(burn), + }, + Number: 1, + Hash: head.Hash(), + ParentHash: head.ParentHash, + } + ) + + actual := out[expected.Number] + compareAsJSON(t, expected, actual) +} + +func TestSupplyWithdrawals(t *testing.T) { + var ( + config = *params.MergedTestChainConfig + gspec = &core.Genesis{ + Config: &config, + } + ) + + withdrawalsBlockGenerationFunc := func(b *core.BlockGen) { + b.SetPoS() + + b.AddWithdrawal(&types.Withdrawal{ + Validator: 42, + Address: common.Address{0xee}, + Amount: 1337, + }) + } + + out, chain, err := testSupplyTracer(t, gspec, withdrawalsBlockGenerationFunc) + if err != nil { + t.Fatalf("failed to test supply tracer: %v", err) + } + + var ( + head = chain.CurrentBlock() + expected = supplyInfo{ + Issuance: &supplyInfoIssuance{ + Withdrawals: (*hexutil.Big)(big.NewInt(1337000000000)), + }, + Number: 1, + Hash: head.Hash(), + ParentHash: head.ParentHash, + } + actual = out[expected.Number] + ) + + compareAsJSON(t, expected, actual) +} + +// Tests fund retrieval after contract's selfdestruct. +// Contract A calls contract B which selfdestructs, but B receives eth1 +// after the selfdestruct opcode executes from Contract A. +// Because Contract B is removed only at the end of the transaction +// the ether sent in between is burnt before Cancun hard fork. +func TestSupplySelfdestruct(t *testing.T) { + var ( + config = *params.TestChainConfig + + aa = common.HexToAddress("0x1111111111111111111111111111111111111111") + bb = common.HexToAddress("0x2222222222222222222222222222222222222222") + dad = common.HexToAddress("0x0000000000000000000000000000000000000dad") + key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + addr1 = crypto.PubkeyToAddress(key1.PublicKey) + gwei5 = new(big.Int).Mul(big.NewInt(5), big.NewInt(params.GWei)) + eth1 = new(big.Int).Mul(common.Big1, big.NewInt(params.Ether)) + + gspec = &core.Genesis{ + Config: &config, + BaseFee: big.NewInt(params.InitialBaseFee), + Alloc: types.GenesisAlloc{ + addr1: {Balance: eth1}, + aa: { + Code: common.FromHex("0x61face60f01b6000527322222222222222222222222222222222222222226000806002600080855af160008103603457600080fd5b60008060008034865af1905060008103604c57600080fd5b5050"), + // Nonce: 0, + Balance: big.NewInt(0), + }, + bb: { + Code: common.FromHex("0x6000357fface000000000000000000000000000000000000000000000000000000000000808203602f57610dad80ff5b5050"), + Nonce: 0, + Balance: eth1, + }, + }, + } + ) + + gspec.Config.TerminalTotalDifficulty = big.NewInt(0) + + signer := types.LatestSigner(gspec.Config) + + testBlockGenerationFunc := func(b *core.BlockGen) { + b.SetPoS() + + txdata := &types.LegacyTx{ + Nonce: 0, + To: &aa, + Value: gwei5, + Gas: 150000, + GasPrice: gwei5, + Data: []byte{}, + } + + tx := types.NewTx(txdata) + tx, _ = types.SignTx(tx, signer, key1) + + b.AddTx(tx) + } + + // 1. Test pre Cancun + preCancunOutput, preCancunChain, err := testSupplyTracer(t, gspec, testBlockGenerationFunc) + if err != nil { + t.Fatalf("Pre-cancun failed to test supply tracer: %v", err) + } + + // Check balance at state: + // 1. 0x0000...000dad has 1 ether + // 2. A has 0 ether + // 3. B has 0 ether + statedb, _ := preCancunChain.State() + if got, exp := statedb.GetBalance(dad), eth1; got.CmpBig(exp) != 0 { + t.Fatalf("Pre-cancun address \"%v\" balance, got %v exp %v\n", dad, got, exp) + } + if got, exp := statedb.GetBalance(aa), big.NewInt(0); got.CmpBig(exp) != 0 { + t.Fatalf("Pre-cancun address \"%v\" balance, got %v exp %v\n", aa, got, exp) + } + if got, exp := statedb.GetBalance(bb), big.NewInt(0); got.CmpBig(exp) != 0 { + t.Fatalf("Pre-cancun address \"%v\" balance, got %v exp %v\n", bb, got, exp) + } + + head := preCancunChain.CurrentBlock() + // Check live trace output + expected := supplyInfo{ + Burn: &supplyInfoBurn{ + EIP1559: (*hexutil.Big)(big.NewInt(55289500000000)), + Misc: (*hexutil.Big)(big.NewInt(5000000000)), + }, + Number: 1, + Hash: head.Hash(), + ParentHash: head.ParentHash, + } + + actual := preCancunOutput[expected.Number] + + compareAsJSON(t, expected, actual) + + // 2. Test post Cancun + cancunTime := uint64(0) + gspec.Config.ShanghaiTime = &cancunTime + gspec.Config.CancunTime = &cancunTime + + postCancunOutput, postCancunChain, err := testSupplyTracer(t, gspec, testBlockGenerationFunc) + if err != nil { + t.Fatalf("Post-cancun failed to test supply tracer: %v", err) + } + + // Check balance at state: + // 1. 0x0000...000dad has 1 ether + // 3. A has 0 ether + // 3. B has 5 gwei + statedb, _ = postCancunChain.State() + if got, exp := statedb.GetBalance(dad), eth1; got.CmpBig(exp) != 0 { + t.Fatalf("Post-shanghai address \"%v\" balance, got %v exp %v\n", dad, got, exp) + } + if got, exp := statedb.GetBalance(aa), big.NewInt(0); got.CmpBig(exp) != 0 { + t.Fatalf("Post-shanghai address \"%v\" balance, got %v exp %v\n", aa, got, exp) + } + if got, exp := statedb.GetBalance(bb), gwei5; got.CmpBig(exp) != 0 { + t.Fatalf("Post-shanghai address \"%v\" balance, got %v exp %v\n", bb, got, exp) + } + + // Check live trace output + head = postCancunChain.CurrentBlock() + expected = supplyInfo{ + Burn: &supplyInfoBurn{ + EIP1559: (*hexutil.Big)(big.NewInt(55289500000000)), + }, + Number: 1, + Hash: head.Hash(), + ParentHash: head.ParentHash, + } + + actual = postCancunOutput[expected.Number] + + compareAsJSON(t, expected, actual) +} + +// Tests selfdestructing contract to send its balance to itself (burn). +// It tests both cases of selfdestructing succeding and being reverted. +// - Contract A calls B and D. +// - Contract B selfdestructs and sends the eth1 to itself (Burn amount to be counted). +// - Contract C selfdestructs and sends the eth1 to itself. +// - Contract D calls C and reverts (Burn amount of C +// has to be reverted as well). +func TestSupplySelfdestructItselfAndRevert(t *testing.T) { + var ( + config = *params.TestChainConfig + + aa = common.HexToAddress("0x1111111111111111111111111111111111111111") + bb = common.HexToAddress("0x2222222222222222222222222222222222222222") + cc = common.HexToAddress("0x3333333333333333333333333333333333333333") + dd = common.HexToAddress("0x4444444444444444444444444444444444444444") + key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + addr1 = crypto.PubkeyToAddress(key1.PublicKey) + gwei5 = new(big.Int).Mul(big.NewInt(5), big.NewInt(params.GWei)) + eth1 = new(big.Int).Mul(common.Big1, big.NewInt(params.Ether)) + eth2 = new(big.Int).Mul(common.Big2, big.NewInt(params.Ether)) + eth5 = new(big.Int).Mul(big.NewInt(5), big.NewInt(params.Ether)) + + gspec = &core.Genesis{ + Config: &config, + // BaseFee: big.NewInt(params.InitialBaseFee), + Alloc: types.GenesisAlloc{ + addr1: {Balance: eth1}, + aa: { + // Contract code in YUL: + // + // object "ContractA" { + // code { + // let B := 0x2222222222222222222222222222222222222222 + // let D := 0x4444444444444444444444444444444444444444 + + // // Call to Contract B + // let resB:= call(gas(), B, 0, 0x0, 0x0, 0, 0) + + // // Call to Contract D + // let resD := call(gas(), D, 0, 0x0, 0x0, 0, 0) + // } + // } + Code: common.FromHex("0x73222222222222222222222222222222222222222273444444444444444444444444444444444444444460006000600060006000865af160006000600060006000865af150505050"), + Balance: common.Big0, + }, + bb: { + // Contract code in YUL: + // + // object "ContractB" { + // code { + // let self := address() + // selfdestruct(self) + // } + // } + Code: common.FromHex("0x3080ff50"), + Balance: eth5, + }, + cc: { + Code: common.FromHex("0x3080ff50"), + Balance: eth1, + }, + dd: { + // Contract code in YUL: + // + // object "ContractD" { + // code { + // let C := 0x3333333333333333333333333333333333333333 + + // // Call to Contract C + // let resC := call(gas(), C, 0, 0x0, 0x0, 0, 0) + + // // Revert + // revert(0, 0) + // } + // } + Code: common.FromHex("0x73333333333333333333333333333333333333333360006000600060006000855af160006000fd5050"), + Balance: eth2, + }, + }, + } + ) + + gspec.Config.TerminalTotalDifficulty = big.NewInt(0) + + signer := types.LatestSigner(gspec.Config) + + testBlockGenerationFunc := func(b *core.BlockGen) { + b.SetPoS() + + txdata := &types.LegacyTx{ + Nonce: 0, + To: &aa, + Value: common.Big0, + Gas: 150000, + GasPrice: gwei5, + Data: []byte{}, + } + + tx := types.NewTx(txdata) + tx, _ = types.SignTx(tx, signer, key1) + + b.AddTx(tx) + } + + output, chain, err := testSupplyTracer(t, gspec, testBlockGenerationFunc) + if err != nil { + t.Fatalf("failed to test supply tracer: %v", err) + } + + // Check balance at state: + // 1. A has 0 ether + // 2. B has 0 ether, burned + // 3. C has 2 ether, selfdestructed but parent D reverted + // 4. D has 1 ether, reverted + statedb, _ := chain.State() + if got, exp := statedb.GetBalance(aa), common.Big0; got.CmpBig(exp) != 0 { + t.Fatalf("address \"%v\" balance, got %v exp %v\n", aa, got, exp) + } + if got, exp := statedb.GetBalance(bb), common.Big0; got.CmpBig(exp) != 0 { + t.Fatalf("address \"%v\" balance, got %v exp %v\n", bb, got, exp) + } + if got, exp := statedb.GetBalance(cc), eth1; got.CmpBig(exp) != 0 { + t.Fatalf("address \"%v\" balance, got %v exp %v\n", bb, got, exp) + } + if got, exp := statedb.GetBalance(dd), eth2; got.CmpBig(exp) != 0 { + t.Fatalf("address \"%v\" balance, got %v exp %v\n", bb, got, exp) + } + + // Check live trace output + block := chain.GetBlockByNumber(1) + + expected := supplyInfo{ + Burn: &supplyInfoBurn{ + EIP1559: (*hexutil.Big)(new(big.Int).Mul(block.BaseFee(), big.NewInt(int64(block.GasUsed())))), + Misc: (*hexutil.Big)(eth5), // 5ETH burned from contract B + }, + Number: 1, + Hash: block.Hash(), + ParentHash: block.ParentHash(), + } + + actual := output[expected.Number] + + compareAsJSON(t, expected, actual) +} + +func testSupplyTracer(t *testing.T, genesis *core.Genesis, gen func(*core.BlockGen)) ([]supplyInfo, *core.BlockChain, error) { + var ( + engine = beacon.New(ethash.NewFaker()) + ) + + traceOutputPath := filepath.ToSlash(t.TempDir()) + traceOutputFilename := path.Join(traceOutputPath, "supply.jsonl") + + // Load supply tracer + tracer, err := tracers.LiveDirectory.New("supply", json.RawMessage(fmt.Sprintf(`{"path":"%s"}`, traceOutputPath))) + if err != nil { + return nil, nil, fmt.Errorf("failed to create call tracer: %v", err) + } + + chain, err := core.NewBlockChain(rawdb.NewMemoryDatabase(), core.DefaultCacheConfigWithScheme(rawdb.PathScheme), nil, genesis, nil, engine, vm.Config{Tracer: tracer}, nil, nil) + if err != nil { + return nil, nil, fmt.Errorf("failed to create tester chain: %v", err) + } + defer chain.Stop() + + _, blocks, _ := core.GenerateChainWithGenesis(genesis, engine, 1, func(i int, b *core.BlockGen) { + b.SetCoinbase(common.Address{1}) + gen(b) + }) + + if n, err := chain.InsertChain(blocks); err != nil { + return nil, chain, fmt.Errorf("block %d: failed to insert into chain: %v", n, err) + } + + // Check and compare the results + file, err := os.OpenFile(traceOutputFilename, os.O_RDONLY, 0666) + if err != nil { + return nil, chain, fmt.Errorf("failed to open output file: %v", err) + } + defer file.Close() + + var output []supplyInfo + scanner := bufio.NewScanner(file) + + for scanner.Scan() { + blockBytes := scanner.Bytes() + + var info supplyInfo + if err := json.Unmarshal(blockBytes, &info); err != nil { + return nil, chain, fmt.Errorf("failed to unmarshal result: %v", err) + } + + output = append(output, info) + } + + return output, chain, nil +} + +func compareAsJSON(t *testing.T, expected interface{}, actual interface{}) { + want, err := json.Marshal(expected) + if err != nil { + t.Fatalf("failed to marshal expected value to JSON: %v", err) + } + + have, err := json.Marshal(actual) + if err != nil { + t.Fatalf("failed to marshal actual value to JSON: %v", err) + } + + if !bytes.Equal(want, have) { + t.Fatalf("incorrect supply info: expected %s, got %s", string(want), string(have)) + } +} diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/frontier_create_outofstorage.json b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/frontier_create_outofstorage.json index b2e1ea79cd..6e903d66cd 100644 --- a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/frontier_create_outofstorage.json +++ b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/frontier_create_outofstorage.json @@ -104,8 +104,9 @@ "from": "0x0047a8033cc6d6ca2ed5044674fd421f44884de8", "gas": "0x1b7740", "gasUsed": "0x9274f", + "to": "0xc24431c1a1147456414355b1f1769de450e524da", "input": "0x606060405260018054600160a060020a0319163317905561036f600360609081527f55524c0000000000000000000000000000000000000000000000000000000000608052610120604052604c60a09081527f6a736f6e2868747470733a2f2f6170692e6b72616b656e2e636f6d2f302f707560c0527f626c69632f5469636b65723f706169723d455448584254292e726573756c742e60e0527f58455448585842542e632e3000000000000000000000000000000000000000006101005261037d919062030d417f38cc483100000000000000000000000000000000000000000000000000000000610120908152600090731d11e5eae3112dbd44f99266872ff1d07c77dce89081906338cc4831906101249060209060048188876161da5a03f1156100025750505060405180519060200150600060006101000a815481600160a060020a0302191690830217905550600060009054906101000a9004600160a060020a0316600160a060020a03166338592832600060009054906101000a9004600160a060020a0316600160a060020a0316632ef3accc8887604051837c010000000000000000000000000000000000000000000000000000000002815260040180806020018381526020018281038252848181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156102255780820380516001836020036101000a031916815260200191505b5093505050506020604051808303816000876161da5a03f11561000257505060405180517f385928320000000000000000000000000000000000000000000000000000000082526004828101888152606484018a90526080602485018181528d5160848701528d519496508a958e958e958e9594604484019360a40192909181908490829085908e906020601f850104600302600f01f150905090810190601f1680156102e65780820380516001836020036101000a031916815260200191505b508381038252858181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f16801561033f5780820380516001836020036101000a031916815260200191505b50965050505050505060206040518083038185886185025a03f11561000257505060405151979650505050505050565b611af2806103806000396000f35b5056606060405236156100985760e060020a6000350463056e1059811461009a57806327dc297e14610391578063346b306a146103e257806341c0e1b51461075e578063489306eb146107855780635731f35714610a5e57806365a4dfb314610de05780637975c56e14611179578063a2e6204514611458578063ae152cf414611528578063b77644751461181b578063d594877014611876575b005b60408051602060248035600481810135601f81018590048502860185019096528585526119559581359591946044949293909201918190840183828082843750506040805160209735808a0135601f81018a90048a0283018a019093528282529698976064979196506024919091019450909250829150840183828082843750949650509335935050505060006000731d11e5eae3112dbd44f99266872ff1d07c77dce8905080600160a060020a03166338cc48316040518160e060020a0281526004018090506020604051808303816000876161da5a03f1156100025750505060405180519060200150600060006101000a815481600160a060020a0302191690830217905550600060009054906101000a9004600160a060020a0316600160a060020a03166338592832600060009054906101000a9004600160a060020a0316600160a060020a0316632ef3accc88876040518360e060020a02815260040180806020018381526020018281038252848181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f16801561025e5780820380516001836020036101000a031916815260200191505b5093505050506020604051808303816000876161da5a03f1156100025750505060405180519060200150888888886040518660e060020a0281526004018085815260200180602001806020018481526020018381038352868181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156103085780820380516001836020036101000a031916815260200191505b508381038252858181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156103615780820380516001836020036101000a031916815260200191505b50965050505050505060206040518083038185886185025a03f115610002575050604051519350610dd892505050565b60408051602060248035600481810135601f81018590048502860185019096528585526100989581359591946044949293909201918190840183828082843750949650505050505050611a2761187a565b6040805160206004803580820135601f8101849004840285018401909552848452611955949193602493909291840191908190840183828082843750506040805160208835808b0135601f8101839004830284018301909452838352979998604498929750919091019450909250829150840183828082843750506040805160209735808a0135601f81018a90048a0283018a019093528282529698976064979196506024919091019450909250829150840183828082843750506040805160e060020a6338cc48310281529051959760009750731d11e5eae3112dbd44f99266872ff1d07c77dce8968796506338cc4831955082820194506020935091829003018188876161da5a03f1156100025750505060405180519060200150600060006101000a815481600160a060020a0302191690830217905550600060009054906101000a9004600160a060020a0316600160a060020a03166377228659600060009054906101000a9004600160a060020a0316600160a060020a031663524f3889886040518260e060020a02815260040180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156105d15780820380516001836020036101000a031916815260200191505b50925050506020604051808303816000876161da5a03f115610002575050506040518051906020015060008888886040518660e060020a028152600401808581526020018060200180602001806020018481038452878181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156106795780820380516001836020036101000a031916815260200191505b508481038352868181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156106d25780820380516001836020036101000a031916815260200191505b508481038252858181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f16801561072b5780820380516001836020036101000a031916815260200191505b5097505050505050505060206040518083038185886185025a03f1156100025750506040515193505050505b9392505050565b610098600154600160a060020a03908116339091161415611a255733600160a060020a0316ff5b6040805160206004803580820135601f8101849004840285018401909552848452611955949193602493909291840191908190840183828082843750506040805160208835808b0135601f8101839004830284018301909452838352979998604498929750919091019450909250829150840183828082843750506040805160e060020a6338cc48310281529051959760009750731d11e5eae3112dbd44f99266872ff1d07c77dce8968796506338cc4831955082820194506020935091829003018188876161da5a03f1156100025750505060405180519060200150600060006101000a815481600160a060020a0302191690830217905550600060009054906101000a9004600160a060020a0316600160a060020a031663adf59f99600060009054906101000a9004600160a060020a0316600160a060020a031663524f3889876040518260e060020a02815260040180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156109345780820380516001836020036101000a031916815260200191505b50925050506020604051808303816000876161da5a03f1156100025750505060405180519060200150600087876040518560e060020a0281526004018084815260200180602001806020018381038352858181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156109d75780820380516001836020036101000a031916815260200191505b508381038252848181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f168015610a305780820380516001836020036101000a031916815260200191505b509550505050505060206040518083038185886185025a03f115610002575050604051519695505050505050565b60408051602060248035600481810135601f81018590048502860185019096528585526119559581359591946044949293909201918190840183828082843750506040805160209735808a0135601f81018a90048a0283018a019093528282529698976064979196506024919091019450909250829150840183828082843750506040805160209735808a0135601f81018a90048a0283018a019093528282529698976084979196506024919091019450909250829150840183828082843750506040805160e060020a6338cc48310281529051959760009750731d11e5eae3112dbd44f99266872ff1d07c77dce8968796506338cc4831955082820194506020935091829003018188876161da5a03f1156100025750505060405180519060200150600060006101000a815481600160a060020a0302191690830217905550600060009054906101000a9004600160a060020a0316600160a060020a03166377228659600060009054906101000a9004600160a060020a0316600160a060020a031663524f3889886040518260e060020a02815260040180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f168015610c535780820380516001836020036101000a031916815260200191505b50925050506020604051808303816000876161da5a03f1156100025750505060405180519060200150888888886040518660e060020a028152600401808581526020018060200180602001806020018481038452878181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f168015610cfa5780820380516001836020036101000a031916815260200191505b508481038352868181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f168015610d535780820380516001836020036101000a031916815260200191505b508481038252858181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f168015610dac5780820380516001836020036101000a031916815260200191505b5097505050505050505060206040518083038185886185025a03f1156100025750506040515193505050505b949350505050565b60408051602060248035600481810135601f81018590048502860185019096528585526119559581359591946044949293909201918190840183828082843750506040805160209735808a0135601f81018a90048a0283018a019093528282529698976064979196506024919091019450909250829150840183828082843750506040805160209735808a0135601f81018a90048a0283018a019093528282529698976084979196506024919091019450909250829150840183828082843750949650509335935050505060006000731d11e5eae3112dbd44f99266872ff1d07c77dce8905080600160a060020a03166338cc48316040518160e060020a0281526004018090506020604051808303816000876161da5a03f1156100025750505060405180519060200150600060006101000a815481600160a060020a0302191690830217905550600060009054906101000a9004600160a060020a0316600160a060020a031663fbf80418600060009054906101000a9004600160a060020a0316600160a060020a0316632ef3accc89876040518360e060020a02815260040180806020018381526020018281038252848181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f168015610fe45780820380516001836020036101000a031916815260200191505b5093505050506020604051808303816000876161da5a03f115610002575050506040518051906020015089898989896040518760e060020a028152600401808681526020018060200180602001806020018581526020018481038452888181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156110935780820380516001836020036101000a031916815260200191505b508481038352878181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156110ec5780820380516001836020036101000a031916815260200191505b508481038252868181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156111455780820380516001836020036101000a031916815260200191505b509850505050505050505060206040518083038185886185025a03f115610002575050604051519998505050505050505050565b60408051602060248035600481810135601f81018590048502860185019096528585526119559581359591946044949293909201918190840183828082843750506040805160209735808a0135601f81018a90048a0283018a019093528282529698976064979196506024919091019450909250829150840183828082843750506040805160e060020a6338cc48310281529051959760009750731d11e5eae3112dbd44f99266872ff1d07c77dce8968796506338cc4831955082820194506020935091829003018188876161da5a03f1156100025750505060405180519060200150600060006101000a815481600160a060020a0302191690830217905550600060009054906101000a9004600160a060020a0316600160a060020a031663adf59f99600060009054906101000a9004600160a060020a0316600160a060020a031663524f3889876040518260e060020a02815260040180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f16801561132e5780820380516001836020036101000a031916815260200191505b50925050506020604051808303816000876161da5a03f11561000257505050604051805190602001508787876040518560e060020a0281526004018084815260200180602001806020018381038352858181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156113d05780820380516001836020036101000a031916815260200191505b508381038252848181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156114295780820380516001836020036101000a031916815260200191505b509550505050505060206040518083038185886185025a03f11561000257505060405151935061075792505050565b6100985b611aef604060405190810160405280600381526020017f55524c0000000000000000000000000000000000000000000000000000000000815260200150608060405190810160405280604c81526020017f6a736f6e2868747470733a2f2f6170692e6b72616b656e2e636f6d2f302f707581526020017f626c69632f5469636b65723f706169723d455448584254292e726573756c742e81526020017f58455448585842542e632e30000000000000000000000000000000000000000081526020015062030d416115ae565b6040805160206004803580820135601f8101849004840285018401909552848452611955949193602493909291840191908190840183828082843750506040805160208835808b0135601f810183900483028401830190945283835297999860449892975091909101945090925082915084018382808284375094965050933593505050505b60006000731d11e5eae3112dbd44f99266872ff1d07c77dce8905080600160a060020a03166338cc48316040518160e060020a0281526004018090506020604051808303816000876161da5a03f1156100025750505060405180519060200150600060006101000a815481600160a060020a0302191690830217905550600060009054906101000a9004600160a060020a0316600160a060020a03166338592832600060009054906101000a9004600160a060020a0316600160a060020a0316632ef3accc88876040518360e060020a02815260040180806020018381526020018281038252848181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156116e75780820380516001836020036101000a031916815260200191505b5093505050506020604051808303816000876161da5a03f115610002575050506040518051906020015060008888886040518660e060020a0281526004018085815260200180602001806020018481526020018381038352868181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156117925780820380516001836020036101000a031916815260200191505b508381038252858181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156117eb5780820380516001836020036101000a031916815260200191505b50965050505050505060206040518083038185886185025a03f11561000257505060405151935061075792505050565b6040805160028054602060018216156101000260001901909116829004601f81018290048202840182019094528383526119679390830182828015611a1d5780601f106119f257610100808354040283529160200191611a1d565b6119d55b60006000731d11e5eae3112dbd44f99266872ff1d07c77dce8905080600160a060020a03166338cc48316040518160e060020a0281526004018090506020604051808303816000876161da5a03f115610002575050604080518051855473ffffffffffffffffffffffffffffffffffffffff1916178086557f4c7737950000000000000000000000000000000000000000000000000000000082529151600160a060020a03929092169250634c773795916004828101926020929190829003018188876161da5a03f115610002575050604051519250505090565b60408051918252519081900360200190f35b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156119c75780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b60408051600160a060020a03929092168252519081900360200190f35b820191906000526020600020905b815481529060010190602001808311611a0057829003601f168201915b505050505081565b565b600160a060020a031633600160a060020a0316141515611a4657610002565b8060026000509080519060200190828054600181600116156101000203166002900490600052602060002090601f016020900481019282601f10611aad57805160ff19168380011785555b50611add9291505b80821115611ae75760008155600101611a99565b82800160010185558215611a91579182015b82811115611a91578251826000505591602001919060010190611abf565b5050611aeb61145c565b5090565b5050565b5056", - "error": "contract creation code storage out of gas", + "output": "0x606060405236156100985760e060020a6000350463056e1059811461009a57806327dc297e14610391578063346b306a146103e257806341c0e1b51461075e578063489306eb146107855780635731f35714610a5e57806365a4dfb314610de05780637975c56e14611179578063a2e6204514611458578063ae152cf414611528578063b77644751461181b578063d594877014611876575b005b60408051602060248035600481810135601f81018590048502860185019096528585526119559581359591946044949293909201918190840183828082843750506040805160209735808a0135601f81018a90048a0283018a019093528282529698976064979196506024919091019450909250829150840183828082843750949650509335935050505060006000731d11e5eae3112dbd44f99266872ff1d07c77dce8905080600160a060020a03166338cc48316040518160e060020a0281526004018090506020604051808303816000876161da5a03f1156100025750505060405180519060200150600060006101000a815481600160a060020a0302191690830217905550600060009054906101000a9004600160a060020a0316600160a060020a03166338592832600060009054906101000a9004600160a060020a0316600160a060020a0316632ef3accc88876040518360e060020a02815260040180806020018381526020018281038252848181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f16801561025e5780820380516001836020036101000a031916815260200191505b5093505050506020604051808303816000876161da5a03f1156100025750505060405180519060200150888888886040518660e060020a0281526004018085815260200180602001806020018481526020018381038352868181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156103085780820380516001836020036101000a031916815260200191505b508381038252858181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156103615780820380516001836020036101000a031916815260200191505b50965050505050505060206040518083038185886185025a03f115610002575050604051519350610dd892505050565b60408051602060248035600481810135601f81018590048502860185019096528585526100989581359591946044949293909201918190840183828082843750949650505050505050611a2761187a565b6040805160206004803580820135601f8101849004840285018401909552848452611955949193602493909291840191908190840183828082843750506040805160208835808b0135601f8101839004830284018301909452838352979998604498929750919091019450909250829150840183828082843750506040805160209735808a0135601f81018a90048a0283018a019093528282529698976064979196506024919091019450909250829150840183828082843750506040805160e060020a6338cc48310281529051959760009750731d11e5eae3112dbd44f99266872ff1d07c77dce8968796506338cc4831955082820194506020935091829003018188876161da5a03f1156100025750505060405180519060200150600060006101000a815481600160a060020a0302191690830217905550600060009054906101000a9004600160a060020a0316600160a060020a03166377228659600060009054906101000a9004600160a060020a0316600160a060020a031663524f3889886040518260e060020a02815260040180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156105d15780820380516001836020036101000a031916815260200191505b50925050506020604051808303816000876161da5a03f115610002575050506040518051906020015060008888886040518660e060020a028152600401808581526020018060200180602001806020018481038452878181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156106795780820380516001836020036101000a031916815260200191505b508481038352868181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156106d25780820380516001836020036101000a031916815260200191505b508481038252858181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f16801561072b5780820380516001836020036101000a031916815260200191505b5097505050505050505060206040518083038185886185025a03f1156100025750506040515193505050505b9392505050565b610098600154600160a060020a03908116339091161415611a255733600160a060020a0316ff5b6040805160206004803580820135601f8101849004840285018401909552848452611955949193602493909291840191908190840183828082843750506040805160208835808b0135601f8101839004830284018301909452838352979998604498929750919091019450909250829150840183828082843750506040805160e060020a6338cc48310281529051959760009750731d11e5eae3112dbd44f99266872ff1d07c77dce8968796506338cc4831955082820194506020935091829003018188876161da5a03f1156100025750505060405180519060200150600060006101000a815481600160a060020a0302191690830217905550600060009054906101000a9004600160a060020a0316600160a060020a031663adf59f99600060009054906101000a9004600160a060020a0316600160a060020a031663524f3889876040518260e060020a02815260040180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156109345780820380516001836020036101000a031916815260200191505b50925050506020604051808303816000876161da5a03f1156100025750505060405180519060200150600087876040518560e060020a0281526004018084815260200180602001806020018381038352858181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156109d75780820380516001836020036101000a031916815260200191505b508381038252848181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f168015610a305780820380516001836020036101000a031916815260200191505b509550505050505060206040518083038185886185025a03f115610002575050604051519695505050505050565b60408051602060248035600481810135601f81018590048502860185019096528585526119559581359591946044949293909201918190840183828082843750506040805160209735808a0135601f81018a90048a0283018a019093528282529698976064979196506024919091019450909250829150840183828082843750506040805160209735808a0135601f81018a90048a0283018a019093528282529698976084979196506024919091019450909250829150840183828082843750506040805160e060020a6338cc48310281529051959760009750731d11e5eae3112dbd44f99266872ff1d07c77dce8968796506338cc4831955082820194506020935091829003018188876161da5a03f1156100025750505060405180519060200150600060006101000a815481600160a060020a0302191690830217905550600060009054906101000a9004600160a060020a0316600160a060020a03166377228659600060009054906101000a9004600160a060020a0316600160a060020a031663524f3889886040518260e060020a02815260040180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f168015610c535780820380516001836020036101000a031916815260200191505b50925050506020604051808303816000876161da5a03f1156100025750505060405180519060200150888888886040518660e060020a028152600401808581526020018060200180602001806020018481038452878181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f168015610cfa5780820380516001836020036101000a031916815260200191505b508481038352868181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f168015610d535780820380516001836020036101000a031916815260200191505b508481038252858181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f168015610dac5780820380516001836020036101000a031916815260200191505b5097505050505050505060206040518083038185886185025a03f1156100025750506040515193505050505b949350505050565b60408051602060248035600481810135601f81018590048502860185019096528585526119559581359591946044949293909201918190840183828082843750506040805160209735808a0135601f81018a90048a0283018a019093528282529698976064979196506024919091019450909250829150840183828082843750506040805160209735808a0135601f81018a90048a0283018a019093528282529698976084979196506024919091019450909250829150840183828082843750949650509335935050505060006000731d11e5eae3112dbd44f99266872ff1d07c77dce8905080600160a060020a03166338cc48316040518160e060020a0281526004018090506020604051808303816000876161da5a03f1156100025750505060405180519060200150600060006101000a815481600160a060020a0302191690830217905550600060009054906101000a9004600160a060020a0316600160a060020a031663fbf80418600060009054906101000a9004600160a060020a0316600160a060020a0316632ef3accc89876040518360e060020a02815260040180806020018381526020018281038252848181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f168015610fe45780820380516001836020036101000a031916815260200191505b5093505050506020604051808303816000876161da5a03f115610002575050506040518051906020015089898989896040518760e060020a028152600401808681526020018060200180602001806020018581526020018481038452888181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156110935780820380516001836020036101000a031916815260200191505b508481038352878181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156110ec5780820380516001836020036101000a031916815260200191505b508481038252868181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156111455780820380516001836020036101000a031916815260200191505b509850505050505050505060206040518083038185886185025a03f115610002575050604051519998505050505050505050565b60408051602060248035600481810135601f81018590048502860185019096528585526119559581359591946044949293909201918190840183828082843750506040805160209735808a0135601f81018a90048a0283018a019093528282529698976064979196506024919091019450909250829150840183828082843750506040805160e060020a6338cc48310281529051959760009750731d11e5eae3112dbd44f99266872ff1d07c77dce8968796506338cc4831955082820194506020935091829003018188876161da5a03f1156100025750505060405180519060200150600060006101000a815481600160a060020a0302191690830217905550600060009054906101000a9004600160a060020a0316600160a060020a031663adf59f99600060009054906101000a9004600160a060020a0316600160a060020a031663524f3889876040518260e060020a02815260040180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f16801561132e5780820380516001836020036101000a031916815260200191505b50925050506020604051808303816000876161da5a03f11561000257505050604051805190602001508787876040518560e060020a0281526004018084815260200180602001806020018381038352858181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156113d05780820380516001836020036101000a031916815260200191505b508381038252848181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156114295780820380516001836020036101000a031916815260200191505b509550505050505060206040518083038185886185025a03f11561000257505060405151935061075792505050565b6100985b611aef604060405190810160405280600381526020017f55524c0000000000000000000000000000000000000000000000000000000000815260200150608060405190810160405280604c81526020017f6a736f6e2868747470733a2f2f6170692e6b72616b656e2e636f6d2f302f707581526020017f626c69632f5469636b65723f706169723d455448584254292e726573756c742e81526020017f58455448585842542e632e30000000000000000000000000000000000000000081526020015062030d416115ae565b6040805160206004803580820135601f8101849004840285018401909552848452611955949193602493909291840191908190840183828082843750506040805160208835808b0135601f810183900483028401830190945283835297999860449892975091909101945090925082915084018382808284375094965050933593505050505b60006000731d11e5eae3112dbd44f99266872ff1d07c77dce8905080600160a060020a03166338cc48316040518160e060020a0281526004018090506020604051808303816000876161da5a03f1156100025750505060405180519060200150600060006101000a815481600160a060020a0302191690830217905550600060009054906101000a9004600160a060020a0316600160a060020a03166338592832600060009054906101000a9004600160a060020a0316600160a060020a0316632ef3accc88876040518360e060020a02815260040180806020018381526020018281038252848181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156116e75780820380516001836020036101000a031916815260200191505b5093505050506020604051808303816000876161da5a03f115610002575050506040518051906020015060008888886040518660e060020a0281526004018085815260200180602001806020018481526020018381038352868181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156117925780820380516001836020036101000a031916815260200191505b508381038252858181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156117eb5780820380516001836020036101000a031916815260200191505b50965050505050505060206040518083038185886185025a03f11561000257505060405151935061075792505050565b6040805160028054602060018216156101000260001901909116829004601f81018290048202840182019094528383526119679390830182828015611a1d5780601f106119f257610100808354040283529160200191611a1d565b6119d55b60006000731d11e5eae3112dbd44f99266872ff1d07c77dce8905080600160a060020a03166338cc48316040518160e060020a0281526004018090506020604051808303816000876161da5a03f115610002575050604080518051855473ffffffffffffffffffffffffffffffffffffffff1916178086557f4c7737950000000000000000000000000000000000000000000000000000000082529151600160a060020a03929092169250634c773795916004828101926020929190829003018188876161da5a03f115610002575050604051519250505090565b60408051918252519081900360200190f35b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156119c75780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b60408051600160a060020a03929092168252519081900360200190f35b820191906000526020600020905b815481529060010190602001808311611a0057829003601f168201915b505050505081565b565b600160a060020a031633600160a060020a0316141515611a4657610002565b8060026000509080519060200190828054600181600116156101000203166002900490600052602060002090601f016020900481019282601f10611aad57805160ff19168380011785555b50611add9291505b80821115611ae75760008155600101611a99565b82800160010185558215611a91579182015b82811115611a91578251826000505591602001919060010190611abf565b5050611aeb61145c565b5090565b5050565b5056", "calls": [ { "from": "0xc24431c1a1147456414355b1f1769de450e524da", diff --git a/eth/tracers/live/gen_supplyinfoburn.go b/eth/tracers/live/gen_supplyinfoburn.go new file mode 100644 index 0000000000..d01eda3975 --- /dev/null +++ b/eth/tracers/live/gen_supplyinfoburn.go @@ -0,0 +1,49 @@ +// Code generated by github.com/fjl/gencodec. DO NOT EDIT. + +package live + +import ( + "encoding/json" + "math/big" + + "github.com/ethereum/go-ethereum/common/hexutil" +) + +var _ = (*supplyInfoBurnMarshaling)(nil) + +// MarshalJSON marshals as JSON. +func (s supplyInfoBurn) MarshalJSON() ([]byte, error) { + type supplyInfoBurn struct { + EIP1559 *hexutil.Big `json:"1559,omitempty"` + Blob *hexutil.Big `json:"blob,omitempty"` + Misc *hexutil.Big `json:"misc,omitempty"` + } + var enc supplyInfoBurn + enc.EIP1559 = (*hexutil.Big)(s.EIP1559) + enc.Blob = (*hexutil.Big)(s.Blob) + enc.Misc = (*hexutil.Big)(s.Misc) + return json.Marshal(&enc) +} + +// UnmarshalJSON unmarshals from JSON. +func (s *supplyInfoBurn) UnmarshalJSON(input []byte) error { + type supplyInfoBurn struct { + EIP1559 *hexutil.Big `json:"1559,omitempty"` + Blob *hexutil.Big `json:"blob,omitempty"` + Misc *hexutil.Big `json:"misc,omitempty"` + } + var dec supplyInfoBurn + if err := json.Unmarshal(input, &dec); err != nil { + return err + } + if dec.EIP1559 != nil { + s.EIP1559 = (*big.Int)(dec.EIP1559) + } + if dec.Blob != nil { + s.Blob = (*big.Int)(dec.Blob) + } + if dec.Misc != nil { + s.Misc = (*big.Int)(dec.Misc) + } + return nil +} diff --git a/eth/tracers/live/gen_supplyinfoissuance.go b/eth/tracers/live/gen_supplyinfoissuance.go new file mode 100644 index 0000000000..e2536ee325 --- /dev/null +++ b/eth/tracers/live/gen_supplyinfoissuance.go @@ -0,0 +1,49 @@ +// Code generated by github.com/fjl/gencodec. DO NOT EDIT. + +package live + +import ( + "encoding/json" + "math/big" + + "github.com/ethereum/go-ethereum/common/hexutil" +) + +var _ = (*supplyInfoIssuanceMarshaling)(nil) + +// MarshalJSON marshals as JSON. +func (s supplyInfoIssuance) MarshalJSON() ([]byte, error) { + type supplyInfoIssuance struct { + GenesisAlloc *hexutil.Big `json:"genesisAlloc,omitempty"` + Reward *hexutil.Big `json:"reward,omitempty"` + Withdrawals *hexutil.Big `json:"withdrawals,omitempty"` + } + var enc supplyInfoIssuance + enc.GenesisAlloc = (*hexutil.Big)(s.GenesisAlloc) + enc.Reward = (*hexutil.Big)(s.Reward) + enc.Withdrawals = (*hexutil.Big)(s.Withdrawals) + return json.Marshal(&enc) +} + +// UnmarshalJSON unmarshals from JSON. +func (s *supplyInfoIssuance) UnmarshalJSON(input []byte) error { + type supplyInfoIssuance struct { + GenesisAlloc *hexutil.Big `json:"genesisAlloc,omitempty"` + Reward *hexutil.Big `json:"reward,omitempty"` + Withdrawals *hexutil.Big `json:"withdrawals,omitempty"` + } + var dec supplyInfoIssuance + if err := json.Unmarshal(input, &dec); err != nil { + return err + } + if dec.GenesisAlloc != nil { + s.GenesisAlloc = (*big.Int)(dec.GenesisAlloc) + } + if dec.Reward != nil { + s.Reward = (*big.Int)(dec.Reward) + } + if dec.Withdrawals != nil { + s.Withdrawals = (*big.Int)(dec.Withdrawals) + } + return nil +} diff --git a/eth/tracers/live/supply.go b/eth/tracers/live/supply.go new file mode 100644 index 0000000000..0c9141e99d --- /dev/null +++ b/eth/tracers/live/supply.go @@ -0,0 +1,310 @@ +package live + +import ( + "encoding/json" + "errors" + "fmt" + "math/big" + "path/filepath" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/consensus/misc/eip4844" + "github.com/ethereum/go-ethereum/core/tracing" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/eth/tracers" + "github.com/ethereum/go-ethereum/log" + "gopkg.in/natefinch/lumberjack.v2" +) + +func init() { + tracers.LiveDirectory.Register("supply", newSupply) +} + +type supplyInfoIssuance struct { + GenesisAlloc *big.Int `json:"genesisAlloc,omitempty"` + Reward *big.Int `json:"reward,omitempty"` + Withdrawals *big.Int `json:"withdrawals,omitempty"` +} + +//go:generate go run github.com/fjl/gencodec -type supplyInfoIssuance -field-override supplyInfoIssuanceMarshaling -out gen_supplyinfoissuance.go +type supplyInfoIssuanceMarshaling struct { + GenesisAlloc *hexutil.Big + Reward *hexutil.Big + Withdrawals *hexutil.Big +} + +type supplyInfoBurn struct { + EIP1559 *big.Int `json:"1559,omitempty"` + Blob *big.Int `json:"blob,omitempty"` + Misc *big.Int `json:"misc,omitempty"` +} + +//go:generate go run github.com/fjl/gencodec -type supplyInfoBurn -field-override supplyInfoBurnMarshaling -out gen_supplyinfoburn.go +type supplyInfoBurnMarshaling struct { + EIP1559 *hexutil.Big + Blob *hexutil.Big + Misc *hexutil.Big +} + +type supplyInfo struct { + Issuance *supplyInfoIssuance `json:"issuance,omitempty"` + Burn *supplyInfoBurn `json:"burn,omitempty"` + + // Block info + Number uint64 `json:"blockNumber"` + Hash common.Hash `json:"hash"` + ParentHash common.Hash `json:"parentHash"` +} + +type supplyTxCallstack struct { + calls []supplyTxCallstack + burn *big.Int +} + +type supply struct { + delta supplyInfo + txCallstack []supplyTxCallstack // Callstack for current transaction + logger *lumberjack.Logger +} + +type supplyTracerConfig struct { + Path string `json:"path"` // Path to the directory where the tracer logs will be stored + MaxSize int `json:"maxSize"` // MaxSize is the maximum size in megabytes of the tracer log file before it gets rotated. It defaults to 100 megabytes. +} + +func newSupply(cfg json.RawMessage) (*tracing.Hooks, error) { + var config supplyTracerConfig + if cfg != nil { + if err := json.Unmarshal(cfg, &config); err != nil { + return nil, fmt.Errorf("failed to parse config: %v", err) + } + } + if config.Path == "" { + return nil, errors.New("supply tracer output path is required") + } + + // Store traces in a rotating file + logger := &lumberjack.Logger{ + Filename: filepath.Join(config.Path, "supply.jsonl"), + } + if config.MaxSize > 0 { + logger.MaxSize = config.MaxSize + } + + t := &supply{ + delta: newSupplyInfo(), + logger: logger, + } + return &tracing.Hooks{ + OnBlockStart: t.OnBlockStart, + OnBlockEnd: t.OnBlockEnd, + OnGenesisBlock: t.OnGenesisBlock, + OnTxStart: t.OnTxStart, + OnBalanceChange: t.OnBalanceChange, + OnEnter: t.OnEnter, + OnExit: t.OnExit, + OnClose: t.OnClose, + }, nil +} + +func newSupplyInfo() supplyInfo { + return supplyInfo{ + Issuance: &supplyInfoIssuance{ + GenesisAlloc: big.NewInt(0), + Reward: big.NewInt(0), + Withdrawals: big.NewInt(0), + }, + Burn: &supplyInfoBurn{ + EIP1559: big.NewInt(0), + Blob: big.NewInt(0), + Misc: big.NewInt(0), + }, + + Number: 0, + Hash: common.Hash{}, + ParentHash: common.Hash{}, + } +} + +func (s *supply) resetDelta() { + s.delta = newSupplyInfo() +} + +func (s *supply) OnBlockStart(ev tracing.BlockEvent) { + s.resetDelta() + + s.delta.Number = ev.Block.NumberU64() + s.delta.Hash = ev.Block.Hash() + s.delta.ParentHash = ev.Block.ParentHash() + + // Calculate Burn for this block + if ev.Block.BaseFee() != nil { + burn := new(big.Int).Mul(new(big.Int).SetUint64(ev.Block.GasUsed()), ev.Block.BaseFee()) + s.delta.Burn.EIP1559 = burn + } + // Blob burnt gas + if blobGas := ev.Block.BlobGasUsed(); blobGas != nil && *blobGas > 0 && ev.Block.ExcessBlobGas() != nil { + var ( + excess = *ev.Block.ExcessBlobGas() + baseFee = eip4844.CalcBlobFee(excess) + burn = new(big.Int).Mul(new(big.Int).SetUint64(*blobGas), baseFee) + ) + s.delta.Burn.Blob = burn + } +} + +func (s *supply) OnBlockEnd(err error) { + s.write(s.delta) +} + +func (s *supply) OnGenesisBlock(b *types.Block, alloc types.GenesisAlloc) { + s.resetDelta() + + s.delta.Number = b.NumberU64() + s.delta.Hash = b.Hash() + s.delta.ParentHash = b.ParentHash() + + // Initialize supply with total allocation in genesis block + for _, account := range alloc { + s.delta.Issuance.GenesisAlloc.Add(s.delta.Issuance.GenesisAlloc, account.Balance) + } + + s.write(s.delta) +} + +func (s *supply) OnBalanceChange(a common.Address, prevBalance, newBalance *big.Int, reason tracing.BalanceChangeReason) { + diff := new(big.Int).Sub(newBalance, prevBalance) + + // NOTE: don't handle "BalanceIncreaseGenesisBalance" because it is handled in OnGenesisBlock + switch reason { + case tracing.BalanceIncreaseRewardMineUncle: + case tracing.BalanceIncreaseRewardMineBlock: + s.delta.Issuance.Reward.Add(s.delta.Issuance.Reward, diff) + case tracing.BalanceIncreaseWithdrawal: + s.delta.Issuance.Withdrawals.Add(s.delta.Issuance.Withdrawals, diff) + case tracing.BalanceDecreaseSelfdestructBurn: + // BalanceDecreaseSelfdestructBurn is non-reversible as it happens + // at the end of the transaction. + s.delta.Burn.Misc.Sub(s.delta.Burn.Misc, diff) + default: + return + } +} + +func (s *supply) OnTxStart(vm *tracing.VMContext, tx *types.Transaction, from common.Address) { + s.txCallstack = make([]supplyTxCallstack, 0, 1) +} + +// internalTxsHandler handles internal transactions burned amount +func (s *supply) internalTxsHandler(call *supplyTxCallstack) { + // Handle Burned amount + if call.burn != nil { + s.delta.Burn.Misc.Add(s.delta.Burn.Misc, call.burn) + } + + if len(call.calls) > 0 { + // Recursivelly handle internal calls + for _, call := range call.calls { + callCopy := call + s.internalTxsHandler(&callCopy) + } + } +} + +func (s *supply) OnEnter(depth int, typ byte, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) { + call := supplyTxCallstack{ + calls: make([]supplyTxCallstack, 0), + } + + // This is a special case of burned amount which has to be handled here + // which happens when type == selfdestruct and from == to. + if vm.OpCode(typ) == vm.SELFDESTRUCT && from == to && value.Cmp(common.Big0) == 1 { + call.burn = value + } + + // Append call to the callstack, so we can fill the details in CaptureExit + s.txCallstack = append(s.txCallstack, call) +} + +func (s *supply) OnExit(depth int, output []byte, gasUsed uint64, err error, reverted bool) { + if depth == 0 { + // No need to handle Burned amount if transaction is reverted + if !reverted { + s.internalTxsHandler(&s.txCallstack[0]) + } + return + } + + size := len(s.txCallstack) + if size <= 1 { + return + } + // Pop call + call := s.txCallstack[size-1] + s.txCallstack = s.txCallstack[:size-1] + size -= 1 + + // In case of a revert, we can drop the call and all its subcalls. + // Caution, that this has to happen after popping the call from the stack. + if reverted { + return + } + s.txCallstack[size-1].calls = append(s.txCallstack[size-1].calls, call) +} + +func (s *supply) OnClose() { + if err := s.logger.Close(); err != nil { + log.Warn("failed to close supply tracer log file", "error", err) + } +} + +func (s *supply) write(data any) { + supply, ok := data.(supplyInfo) + if !ok { + log.Warn("failed to cast supply tracer data on write to log file") + return + } + + // Remove empty fields + if supply.Issuance.GenesisAlloc.Sign() == 0 { + supply.Issuance.GenesisAlloc = nil + } + + if supply.Issuance.Reward.Sign() == 0 { + supply.Issuance.Reward = nil + } + + if supply.Issuance.Withdrawals.Sign() == 0 { + supply.Issuance.Withdrawals = nil + } + + if supply.Issuance.GenesisAlloc == nil && supply.Issuance.Reward == nil && supply.Issuance.Withdrawals == nil { + supply.Issuance = nil + } + + if supply.Burn.EIP1559.Sign() == 0 { + supply.Burn.EIP1559 = nil + } + + if supply.Burn.Blob.Sign() == 0 { + supply.Burn.Blob = nil + } + + if supply.Burn.Misc.Sign() == 0 { + supply.Burn.Misc = nil + } + + if supply.Burn.EIP1559 == nil && supply.Burn.Blob == nil && supply.Burn.Misc == nil { + supply.Burn = nil + } + + out, _ := json.Marshal(supply) + if _, err := s.logger.Write(out); err != nil { + log.Warn("failed to write to supply tracer log file", "error", err) + } + if _, err := s.logger.Write([]byte{'\n'}); err != nil { + log.Warn("failed to write to supply tracer log file", "error", err) + } +} diff --git a/eth/tracers/logger/logger_json.go b/eth/tracers/logger/logger_json.go index d66b8c4b8a..797f7ac658 100644 --- a/eth/tracers/logger/logger_json.go +++ b/eth/tracers/logger/logger_json.go @@ -58,6 +58,7 @@ type jsonLogger struct { encoder *json.Encoder cfg *Config env *tracing.VMContext + hooks *tracing.Hooks } // NewJSONLogger creates a new EVM tracer that prints execution steps as JSON objects @@ -67,12 +68,14 @@ func NewJSONLogger(cfg *Config, writer io.Writer) *tracing.Hooks { if l.cfg == nil { l.cfg = &Config{} } - return &tracing.Hooks{ - OnTxStart: l.OnTxStart, - OnExit: l.OnExit, - OnOpcode: l.OnOpcode, - OnFault: l.OnFault, + l.hooks = &tracing.Hooks{ + OnTxStart: l.OnTxStart, + OnSystemCallStart: l.onSystemCallStart, + OnExit: l.OnEnd, + OnOpcode: l.OnOpcode, + OnFault: l.OnFault, } + return l.hooks } // NewJSONLoggerWithCallFrames creates a new EVM tracer that prints execution steps as JSON objects @@ -82,13 +85,15 @@ func NewJSONLoggerWithCallFrames(cfg *Config, writer io.Writer) *tracing.Hooks { if l.cfg == nil { l.cfg = &Config{} } - return &tracing.Hooks{ - OnTxStart: l.OnTxStart, - OnEnter: l.OnEnter, - OnExit: l.OnExit, - OnOpcode: l.OnOpcode, - OnFault: l.OnFault, + l.hooks = &tracing.Hooks{ + OnTxStart: l.OnTxStart, + OnSystemCallStart: l.onSystemCallStart, + OnEnter: l.OnEnter, + OnExit: l.OnExit, + OnOpcode: l.OnOpcode, + OnFault: l.OnFault, } + return l.hooks } func (l *jsonLogger) OnFault(pc uint64, op byte, gas uint64, cost uint64, scope tracing.OpContext, depth int, err error) { @@ -122,6 +127,16 @@ func (l *jsonLogger) OnOpcode(pc uint64, op byte, gas, cost uint64, scope tracin l.encoder.Encode(log) } +func (l *jsonLogger) onSystemCallStart() { + // Process no events while in system call. + hooks := *l.hooks + *l.hooks = tracing.Hooks{ + OnSystemCallEnd: func() { + *l.hooks = hooks + }, + } +} + // OnEnter is not enabled by default. func (l *jsonLogger) OnEnter(depth int, typ byte, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) { frame := callFrame{ diff --git a/eth/tracers/native/call.go b/eth/tracers/native/call.go index 05690c3aa4..768a59a84a 100644 --- a/eth/tracers/native/call.go +++ b/eth/tracers/native/call.go @@ -79,6 +79,11 @@ func (f callFrame) failed() bool { func (f *callFrame) processOutput(output []byte, err error, reverted bool) { output = common.CopyBytes(output) + // Clear error if tx wasn't reverted. This happened + // for pre-homestead contract storage OOG. + if err != nil && !reverted { + err = nil + } if err == nil { f.Output = output return diff --git a/ethdb/dbtest/testsuite.go b/ethdb/dbtest/testsuite.go index 83a13c8cff..29a773ced4 100644 --- a/ethdb/dbtest/testsuite.go +++ b/ethdb/dbtest/testsuite.go @@ -381,7 +381,7 @@ func TestDatabaseSuite(t *testing.T, New func() ethdb.KeyValueStore) { } }) - t.Run("OperatonsAfterClose", func(t *testing.T) { + t.Run("OperationsAfterClose", func(t *testing.T) { db := New() db.Put([]byte("key"), []byte("value")) db.Close() @@ -530,7 +530,7 @@ func makeDataset(size, ksize, vsize int, order bool) ([][]byte, [][]byte) { vals = append(vals, randBytes(vsize)) } if order { - slices.SortFunc(keys, func(a, b []byte) int { return bytes.Compare(a, b) }) + slices.SortFunc(keys, bytes.Compare) } return keys, vals } diff --git a/ethdb/leveldb/leveldb.go b/ethdb/leveldb/leveldb.go index a4270bfaf2..5477a4c029 100644 --- a/ethdb/leveldb/leveldb.go +++ b/ethdb/leveldb/leveldb.go @@ -400,7 +400,7 @@ func (b *batch) Put(key, value []byte) error { return nil } -// Delete inserts the a key removal into the batch for later committing. +// Delete inserts the key removal into the batch for later committing. func (b *batch) Delete(key []byte) error { b.b.Delete(key) b.size += len(key) diff --git a/ethdb/memorydb/memorydb.go b/ethdb/memorydb/memorydb.go index 86570feeec..d01f764f52 100644 --- a/ethdb/memorydb/memorydb.go +++ b/ethdb/memorydb/memorydb.go @@ -227,7 +227,7 @@ func (b *batch) Put(key, value []byte) error { return nil } -// Delete inserts the a key removal into the batch for later committing. +// Delete inserts the key removal into the batch for later committing. func (b *batch) Delete(key []byte) error { b.writes = append(b.writes, keyvalue{string(key), nil, true}) b.size += len(key) diff --git a/ethdb/pebble/pebble.go b/ethdb/pebble/pebble.go index 03715cdd5f..6cf09614f9 100644 --- a/ethdb/pebble/pebble.go +++ b/ethdb/pebble/pebble.go @@ -174,7 +174,7 @@ func New(file string, cache int, handles int, namespace string, readonly bool, e extraOptions.MemTableStopWritesThreshold = 2 } if extraOptions.MaxConcurrentCompactions == nil { - extraOptions.MaxConcurrentCompactions = func() int { return runtime.NumCPU() } + extraOptions.MaxConcurrentCompactions = runtime.NumCPU } var levels []pebble.LevelOptions if len(extraOptions.Levels) == 0 { @@ -690,7 +690,7 @@ func (b *batch) Put(key, value []byte) error { return nil } -// Delete inserts the a key removal into the batch for later committing. +// Delete inserts the key removal into the batch for later committing. func (b *batch) Delete(key []byte) error { b.b.Delete(key, nil) b.size += len(key) diff --git a/event/multisub.go b/event/multisub.go index 5c8d2df48c..1f0af2a292 100644 --- a/event/multisub.go +++ b/event/multisub.go @@ -17,7 +17,7 @@ package event // JoinSubscriptions joins multiple subscriptions to be able to track them as -// one entity and collectively cancel them of consume any errors from them. +// one entity and collectively cancel them or consume any errors from them. func JoinSubscriptions(subs ...Subscription) Subscription { return NewSubscription(func(unsubbed <-chan struct{}) error { // Unsubscribe all subscriptions before returning diff --git a/go.mod b/go.mod index adf829a13f..810beb3a46 100644 --- a/go.mod +++ b/go.mod @@ -4,8 +4,8 @@ go 1.21 require ( github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0 - github.com/Microsoft/go-winio v0.6.1 - github.com/VictoriaMetrics/fastcache v1.12.1 + github.com/Microsoft/go-winio v0.6.2 + github.com/VictoriaMetrics/fastcache v1.12.2 github.com/aws/aws-sdk-go-v2 v1.21.2 github.com/aws/aws-sdk-go-v2/config v1.18.45 github.com/aws/aws-sdk-go-v2/credentials v1.13.43 @@ -15,20 +15,20 @@ require ( github.com/cloudflare/cloudflare-go v0.79.0 github.com/cockroachdb/pebble v1.1.0 github.com/consensys/gnark-crypto v0.12.1 - github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233 + github.com/crate-crypto/go-ipa v0.0.0-20240223125850-b1e8a79f509c github.com/crate-crypto/go-kzg-4844 v1.0.0 github.com/davecgh/go-spew v1.1.1 - github.com/deckarep/golang-set/v2 v2.1.0 + github.com/deckarep/golang-set/v2 v2.6.0 github.com/donovanhide/eventsource v0.0.0-20210830082556-c59027999da0 github.com/dop251/goja v0.0.0-20230605162241-28ee0ee714f3 github.com/ethereum/c-kzg-4844 v1.0.0 - github.com/fatih/color v1.13.0 + github.com/ethereum/go-verkle v0.1.1-0.20240306133620-7d920df305f0 + github.com/fatih/color v1.16.0 github.com/ferranbt/fastssz v0.1.2 github.com/fjl/gencodec v0.0.0-20230517082657-f9840df7b83e github.com/fsnotify/fsnotify v1.6.0 github.com/gammazero/deque v0.2.1 github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff - github.com/gballet/go-verkle v0.1.1-0.20231031103413-a67434b50f46 github.com/gofrs/flock v0.8.1 github.com/golang-jwt/jwt/v4 v4.5.0 github.com/golang/protobuf v1.5.4 @@ -51,7 +51,7 @@ require ( github.com/kilic/bls12-381 v0.1.0 github.com/kylelemons/godebug v1.1.0 github.com/mattn/go-colorable v0.1.13 - github.com/mattn/go-isatty v0.0.17 + github.com/mattn/go-isatty v0.0.20 github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416 github.com/olekukonko/tablewriter v0.0.5 github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 @@ -71,11 +71,11 @@ require ( go.uber.org/automaxprocs v1.5.2 golang.org/x/crypto v0.22.0 golang.org/x/sync v0.7.0 - golang.org/x/sys v0.19.0 + golang.org/x/sys v0.20.0 golang.org/x/text v0.14.0 golang.org/x/time v0.5.0 golang.org/x/tools v0.20.0 - gopkg.in/natefinch/lumberjack.v2 v2.0.0 + gopkg.in/natefinch/lumberjack.v2 v2.2.1 gopkg.in/yaml.v3 v3.0.1 ) @@ -95,7 +95,7 @@ require ( github.com/aws/smithy-go v1.15.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bits-and-blooms/bitset v1.10.0 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/cockroachdb/errors v1.11.1 // indirect github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect github.com/cockroachdb/redact v1.1.5 // indirect diff --git a/go.sum b/go.sum index 7327e0f570..2b59d2cdb0 100644 --- a/go.sum +++ b/go.sum @@ -44,17 +44,15 @@ github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0/go.mod h1:+6KLcKIVgx github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 h1:OBhqkivkhkMqLPymWEppkm7vgPQY2XsHoEkaMQ0AdZY= github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0/go.mod h1:kgDmCTgBzIEPFElEF+FK0SdjAor06dRq2Go927dnQ6o= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= -github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DataDog/zstd v1.4.5 h1:EndNeuB0l9syBZhut0wns3gV1hL8zX8LIu6ZiVHWLIQ= github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= -github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= -github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA= github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= -github.com/VictoriaMetrics/fastcache v1.12.1 h1:i0mICQuojGDL3KblA7wUNlY5lOK6a4bwt3uRKnkZU40= -github.com/VictoriaMetrics/fastcache v1.12.1/go.mod h1:tX04vaqcNoQeGLD+ra5pU5sWkuxnzWhEzLwhP9w653o= +github.com/VictoriaMetrics/fastcache v1.12.2 h1:N0y9ASrJ0F6h0QaC3o6uJb3NIZ9VKLjCM7NQbSmF7WI= +github.com/VictoriaMetrics/fastcache v1.12.2/go.mod h1:AmC+Nzz1+3G2eCPapF6UcsnkThDcMsQicp4xDukwJYI= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= @@ -103,8 +101,9 @@ github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk= github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/logex v1.2.0/go.mod h1:9+9sk7u7pGNWYMkh0hdiL++6OeibzJccyQU4p4MedaY= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= @@ -133,8 +132,8 @@ github.com/consensys/gnark-crypto v0.12.1 h1:lHH39WuuFgVHONRl3J0LRBtuYdQTumFSDtJ github.com/consensys/gnark-crypto v0.12.1/go.mod h1:v2Gy7L/4ZRosZ7Ivs+9SfUDr0f5UlG+EM5t7MPHiLuY= github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233 h1:d28BXYi+wUpz1KBmiF9bWrjEMacUEREV6MBi2ODnrfQ= -github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233/go.mod h1:geZJZH3SzKCqnz5VT0q/DyIG/tvu/dZk+VIfXicupJs= +github.com/crate-crypto/go-ipa v0.0.0-20240223125850-b1e8a79f509c h1:uQYC5Z1mdLRPrZhHjHxufI8+2UG/i25QG92j0Er9p6I= +github.com/crate-crypto/go-ipa v0.0.0-20240223125850-b1e8a79f509c/go.mod h1:geZJZH3SzKCqnz5VT0q/DyIG/tvu/dZk+VIfXicupJs= github.com/crate-crypto/go-kzg-4844 v1.0.0 h1:TsSgHwrkTKecKJ4kadtHi4b3xHW5dCFUDFnUp1TsawI= github.com/crate-crypto/go-kzg-4844 v1.0.0/go.mod h1:1kMhvPgI0Ky3yIa+9lFySEBUBXkYxeOi8ZF1sYioxhc= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= @@ -142,8 +141,8 @@ github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/deckarep/golang-set/v2 v2.1.0 h1:g47V4Or+DUdzbs8FxCCmgb6VYd+ptPAngjM6dtGktsI= -github.com/deckarep/golang-set/v2 v2.1.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4= +github.com/deckarep/golang-set/v2 v2.6.0 h1:XfcQbWM1LlMB8BsJ8N9vW5ehnnPVIw0je80NsVHagjM= +github.com/deckarep/golang-set/v2 v2.6.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4= github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0= github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 h1:YLtO71vCjJRCBcrPMtQ9nqBsqpA1m5sE92cU+pd5Mcc= @@ -169,8 +168,10 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/ethereum/c-kzg-4844 v1.0.0 h1:0X1LBXxaEtYD9xsyj9B9ctQEZIpnvVDeoBx8aHEwTNA= github.com/ethereum/c-kzg-4844 v1.0.0/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0= -github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= -github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/ethereum/go-verkle v0.1.1-0.20240306133620-7d920df305f0 h1:KrE8I4reeVvf7C1tm8elRjj4BdscTYzz/WAbYyf/JI4= +github.com/ethereum/go-verkle v0.1.1-0.20240306133620-7d920df305f0/go.mod h1:D9AJLVXSyZQXJQVk8oh1EwjISE+sJTn2duYIZC0dy3w= +github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= +github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= github.com/ferranbt/fastssz v0.1.2 h1:Dky6dXlngF6Qjc+EfDipAkE83N5I5DE68bY6O0VLNPk= github.com/ferranbt/fastssz v0.1.2/go.mod h1:X5UPrE2u1UJjxHA8X54u04SBwdAQjG2sFtWs39YxyWs= github.com/fjl/gencodec v0.0.0-20230517082657-f9840df7b83e h1:bBLctRc7kr01YGvaDfgLbTwjFNW5jdp5y5rj8XXBHfY= @@ -185,8 +186,6 @@ github.com/garslo/gogen v0.0.0-20170306192744-1d203ffc1f61 h1:IZqZOB2fydHte3kUgx github.com/garslo/gogen v0.0.0-20170306192744-1d203ffc1f61/go.mod h1:Q0X6pkwTILDlzrGEckF6HKjXe48EgsY/l7K7vhY4MW8= github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI= github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= -github.com/gballet/go-verkle v0.1.1-0.20231031103413-a67434b50f46 h1:BAIP2GihuqhwdILrV+7GJel5lyPV3u1+PgzrWLc0TkE= -github.com/gballet/go-verkle v0.1.1-0.20231031103413-a67434b50f46/go.mod h1:QNpY22eby74jVhqH4WhDLDwxc/vqsern6pW+u2kbkpc= github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= github.com/getsentry/sentry-go v0.18.0 h1:MtBW5H9QgdcJabtZcuJG80BMOwaBpkRDZkxRkNC1sN0= github.com/getsentry/sentry-go v0.18.0/go.mod h1:Kgon4Mby+FJ7ZWHFUAZgVaIa8sxHtnRJRLTXZr51aKQ= @@ -377,16 +376,14 @@ github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIG github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= -github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU= @@ -684,7 +681,6 @@ golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -692,11 +688,12 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= -golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -853,8 +850,8 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntN gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= -gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= +gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index cd5f1300eb..d1498b8053 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -972,7 +972,7 @@ func (s *BlockChainAPI) GetBlockReceipts(ctx context.Context, blockNrOrHash rpc. // of a message call. // Note, state and stateDiff can't be specified at the same time. If state is // set, message execution will only use the data in the given state. Otherwise -// if statDiff is set, all diff will be applied first and then execute the call +// if stateDiff is set, all diff will be applied first and then execute the call // message. type OverrideAccount struct { Nonce *hexutil.Uint64 `json:"nonce"` @@ -1295,10 +1295,15 @@ func DoEstimateGas(ctx context.Context, b Backend, args TransactionArgs, blockNr ErrorRatio: gasestimator.EstimateGasErrorRatio, RunScheduledTxes: runScheduledTxes, } + // Set any required transaction default, but make sure the gas cap itself is not messed with + // if it was not specified in the original argument list. + if args.Gas == nil { + args.Gas = new(hexutil.Uint64) + } if err := args.CallDefaults(gasCap, header.BaseFee, b.ChainConfig().ChainID); err != nil { return 0, err } - // Run the gas estimation andwrap any revertals into a custom return + // Run the gas estimation and wrap any revertals into a custom return // Arbitrum: this also appropriately recursively calls another args.ToMessage with increased gasCap by posterCostInL2Gas amount call := args.ToMessage(header.BaseFee, gasCap, header, state, core.MessageGasEstimationMode) diff --git a/internal/ethapi/api_test.go b/internal/ethapi/api_test.go index 49d50658f1..a5fec4c741 100644 --- a/internal/ethapi/api_test.go +++ b/internal/ethapi/api_test.go @@ -760,7 +760,7 @@ func TestEstimateGas(t *testing.T) { From: &accounts[0].addr, To: &accounts[1].addr, Value: (*hexutil.Big)(big.NewInt(1)), - BlobHashes: []common.Hash{common.Hash{0x01, 0x22}}, + BlobHashes: []common.Hash{{0x01, 0x22}}, BlobFeeCap: (*hexutil.Big)(big.NewInt(1)), }, want: 21000, @@ -948,7 +948,7 @@ func TestCall(t *testing.T) { call: TransactionArgs{ From: &accounts[1].addr, To: &randomAccounts[2].addr, - BlobHashes: []common.Hash{common.Hash{0x01, 0x22}}, + BlobHashes: []common.Hash{{0x01, 0x22}}, BlobFeeCap: (*hexutil.Big)(big.NewInt(1)), }, overrides: StateOverride{ @@ -1072,7 +1072,7 @@ func TestSendBlobTransaction(t *testing.T) { From: &b.acc.Address, To: &to, Value: (*hexutil.Big)(big.NewInt(1)), - BlobHashes: []common.Hash{common.Hash{0x01, 0x22}}, + BlobHashes: []common.Hash{{0x01, 0x22}}, }) if err != nil { t.Fatalf("failed to fill tx defaults: %v\n", err) diff --git a/internal/testlog/testlog.go b/internal/testlog/testlog.go index 3740dd1f24..ad61af9eac 100644 --- a/internal/testlog/testlog.go +++ b/internal/testlog/testlog.go @@ -58,7 +58,7 @@ func (h *bufHandler) Handle(_ context.Context, r slog.Record) error { } func (h *bufHandler) Enabled(_ context.Context, lvl slog.Level) bool { - return lvl <= h.level + return lvl >= h.level } func (h *bufHandler) WithAttrs(attrs []slog.Attr) slog.Handler { diff --git a/log/logger_test.go b/log/logger_test.go index 2ea0858547..f1a9a93bce 100644 --- a/log/logger_test.go +++ b/log/logger_test.go @@ -26,7 +26,7 @@ func TestLoggingWithVmodule(t *testing.T) { logger.Trace("a message", "foo", "bar") have := out.String() // The timestamp is locale-dependent, so we want to trim that off - // "INFO [01-01|00:00:00.000] a messag ..." -> "a messag..." + // "INFO [01-01|00:00:00.000] a message ..." -> "a message..." have = strings.Split(have, "]")[1] want := " a message foo=bar\n" if have != want { @@ -42,7 +42,7 @@ func TestTerminalHandlerWithAttrs(t *testing.T) { logger.Trace("a message", "foo", "bar") have := out.String() // The timestamp is locale-dependent, so we want to trim that off - // "INFO [01-01|00:00:00.000] a messag ..." -> "a messag..." + // "INFO [01-01|00:00:00.000] a message ..." -> "a message..." have = strings.Split(have, "]")[1] want := " a message baz=bat foo=bar\n" if have != want { @@ -97,7 +97,7 @@ func benchmarkLogger(b *testing.B, l Logger) { tt = time.Now() bigint = big.NewInt(100) nilbig *big.Int - err = errors.New("Oh nooes it's crap") + err = errors.New("oh nooes it's crap") ) b.ReportAllocs() b.ResetTimer() @@ -126,7 +126,7 @@ func TestLoggerOutput(t *testing.T) { tt = time.Time{} bigint = big.NewInt(100) nilbig *big.Int - err = errors.New("Oh nooes it's crap") + err = errors.New("oh nooes it's crap") smallUint = uint256.NewInt(500_000) bigUint = &uint256.Int{0xff, 0xff, 0xff, 0xff} ) @@ -150,7 +150,7 @@ func TestLoggerOutput(t *testing.T) { have := out.String() t.Logf("output %v", out.String()) - want := `INFO [11-07|19:14:33.821] This is a message foo=123 bytes="[0 0 0 0 0 0 0 0 0 0]" bonk="a string with text" time=0001-01-01T00:00:00+0000 bigint=100 nilbig= err="Oh nooes it's crap" struct="{A:Foo B:12}" struct="{A:Foo\nLinebreak B:122}" ptrstruct="&{A:Foo B:12}" smalluint=500,000 bigUint=1,600,660,942,523,603,594,864,898,306,482,794,244,293,965,082,972,225,630,372,095 + want := `INFO [11-07|19:14:33.821] This is a message foo=123 bytes="[0 0 0 0 0 0 0 0 0 0]" bonk="a string with text" time=0001-01-01T00:00:00+0000 bigint=100 nilbig= err="oh nooes it's crap" struct="{A:Foo B:12}" struct="{A:Foo\nLinebreak B:122}" ptrstruct="&{A:Foo B:12}" smalluint=500,000 bigUint=1,600,660,942,523,603,594,864,898,306,482,794,244,293,965,082,972,225,630,372,095 ` if !bytes.Equal([]byte(have)[25:], []byte(want)[25:]) { t.Errorf("Error\nhave: %q\nwant: %q", have, want) diff --git a/metrics/debug.go b/metrics/debug.go index de4a2739fe..9dfee1a866 100644 --- a/metrics/debug.go +++ b/metrics/debug.go @@ -19,18 +19,18 @@ var ( gcStats debug.GCStats ) -// Capture new values for the Go garbage collector statistics exported in -// debug.GCStats. This is designed to be called as a goroutine. +// CaptureDebugGCStats captures new values for the Go garbage collector statistics +// exported in debug.GCStats. This is designed to be called as a goroutine. func CaptureDebugGCStats(r Registry, d time.Duration) { for range time.Tick(d) { CaptureDebugGCStatsOnce(r) } } -// Capture new values for the Go garbage collector statistics exported in -// debug.GCStats. This is designed to be called in a background goroutine. -// Giving a registry which has not been given to RegisterDebugGCStats will -// panic. +// CaptureDebugGCStatsOnce captures new values for the Go garbage collector +// statistics exported in debug.GCStats. This is designed to be called in +// a background goroutine. Giving a registry which has not been given to +// RegisterDebugGCStats will panic. // // Be careful (but much less so) with this because debug.ReadGCStats calls // the C function runtime·lock(runtime·mheap) which, while not a stop-the-world @@ -50,9 +50,9 @@ func CaptureDebugGCStatsOnce(r Registry) { debugMetrics.GCStats.PauseTotal.Update(int64(gcStats.PauseTotal)) } -// Register metrics for the Go garbage collector statistics exported in -// debug.GCStats. The metrics are named by their fully-qualified Go symbols, -// i.e. debug.GCStats.PauseTotal. +// RegisterDebugGCStats registers metrics for the Go garbage collector statistics +// exported in debug.GCStats. The metrics are named by their fully-qualified Go +// symbols, i.e. debug.GCStats.PauseTotal. func RegisterDebugGCStats(r Registry) { debugMetrics.GCStats.LastGC = NewGauge() debugMetrics.GCStats.NumGC = NewGauge() diff --git a/metrics/sample_test.go b/metrics/sample_test.go index 869d883337..9a0153ef8f 100644 --- a/metrics/sample_test.go +++ b/metrics/sample_test.go @@ -115,18 +115,18 @@ func TestExpDecaySample(t *testing.T) { } snap := sample.Snapshot() if have, want := int(snap.Count()), tc.updates; have != want { - t.Errorf("have %d want %d", have, want) + t.Errorf("unexpected count: have %d want %d", have, want) } if have, want := snap.Size(), min(tc.updates, tc.reservoirSize); have != want { - t.Errorf("have %d want %d", have, want) + t.Errorf("unexpected size: have %d want %d", have, want) } values := snap.(*sampleSnapshot).values if have, want := len(values), min(tc.updates, tc.reservoirSize); have != want { - t.Errorf("have %d want %d", have, want) + t.Errorf("unexpected values length: have %d want %d", have, want) } for _, v := range values { if v > int64(tc.updates) || v < 0 { - t.Errorf("out of range [0, %d): %v", tc.updates, v) + t.Errorf("out of range [0, %d]: %v", tc.updates, v) } } } @@ -137,12 +137,12 @@ func TestExpDecaySample(t *testing.T) { // The priority becomes +Inf quickly after starting if this is done, // effectively freezing the set of samples until a rescale step happens. func TestExpDecaySampleNanosecondRegression(t *testing.T) { - sw := NewExpDecaySample(100, 0.99) - for i := 0; i < 100; i++ { + sw := NewExpDecaySample(1000, 0.99) + for i := 0; i < 1000; i++ { sw.Update(10) } time.Sleep(1 * time.Millisecond) - for i := 0; i < 100; i++ { + for i := 0; i < 1000; i++ { sw.Update(20) } s := sw.Snapshot() @@ -207,7 +207,7 @@ func TestUniformSample(t *testing.T) { } for _, v := range values { if v > 1000 || v < 0 { - t.Errorf("out of range [0, 100): %v\n", v) + t.Errorf("out of range [0, 1000]: %v\n", v) } } } @@ -263,6 +263,9 @@ func benchmarkSample(b *testing.B, s Sample) { } func testExpDecaySampleStatistics(t *testing.T, s SampleSnapshot) { + if sum := s.Sum(); sum != 496598 { + t.Errorf("s.Sum(): 496598 != %v\n", sum) + } if count := s.Count(); count != 10000 { t.Errorf("s.Count(): 10000 != %v\n", count) } diff --git a/miner/miner.go b/miner/miner.go index e6d90d0925..4ff7146c17 100644 --- a/miner/miner.go +++ b/miner/miner.go @@ -53,7 +53,7 @@ type Config struct { // DefaultConfig contains default settings for miner. var DefaultConfig = Config{ GasCeil: 30_000_000, - GasPrice: big.NewInt(params.GWei), + GasPrice: big.NewInt(params.GWei / 1000), // The default recommit time is chosen as two seconds since // consensus-layer usually will wait a half slot of time(6s) diff --git a/miner/payload_building_test.go b/miner/payload_building_test.go index 3bbc892d6f..8aeeaf0020 100644 --- a/miner/payload_building_test.go +++ b/miner/payload_building_test.go @@ -141,7 +141,7 @@ func (b *testWorkerBackend) TxPool() *txpool.TxPool { return b.txPool } func newTestWorker(t *testing.T, chainConfig *params.ChainConfig, engine consensus.Engine, db ethdb.Database, blocks int) (*Miner, *testWorkerBackend) { backend := newTestWorkerBackend(t, chainConfig, engine, db, blocks) - backend.txPool.Add(pendingTxs, true, false) + backend.txPool.Add(pendingTxs, true, true) w := New(backend, testConfig, engine) return w, backend } diff --git a/node/api.go b/node/api.go index a71ae6aa29..33dfb3a1cc 100644 --- a/node/api.go +++ b/node/api.go @@ -26,6 +26,7 @@ import ( "github.com/ethereum/go-ethereum/internal/debug" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/p2p" + "github.com/ethereum/go-ethereum/p2p/discover" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/rpc" ) @@ -39,6 +40,9 @@ func (n *Node) apis() []rpc.API { }, { Namespace: "debug", Service: debug.Handler, + }, { + Namespace: "debug", + Service: &p2pDebugAPI{n}, }, { Namespace: "web3", Service: &web3API{n}, @@ -333,3 +337,16 @@ func (s *web3API) ClientVersion() string { func (s *web3API) Sha3(input hexutil.Bytes) hexutil.Bytes { return crypto.Keccak256(input) } + +// p2pDebugAPI provides access to p2p internals for debugging. +type p2pDebugAPI struct { + stack *Node +} + +func (s *p2pDebugAPI) DiscoveryV4Table() [][]discover.BucketNode { + disc := s.stack.server.DiscoveryV4() + if disc != nil { + return disc.TableBuckets() + } + return nil +} diff --git a/p2p/discover/common.go b/p2p/discover/common.go index 1f763904bb..0716f7472f 100644 --- a/p2p/discover/common.go +++ b/p2p/discover/common.go @@ -18,7 +18,12 @@ package discover import ( "crypto/ecdsa" + crand "crypto/rand" + "encoding/binary" + "math/rand" "net" + "net/netip" + "sync" "time" "github.com/ethereum/go-ethereum/common/mclock" @@ -30,8 +35,8 @@ import ( // UDPConn is a network connection on which discovery can operate. type UDPConn interface { - ReadFromUDP(b []byte) (n int, addr *net.UDPAddr, err error) - WriteToUDP(b []byte, addr *net.UDPAddr) (n int, err error) + ReadFromUDPAddrPort(b []byte) (n int, addr netip.AddrPort, err error) + WriteToUDPAddrPort(b []byte, addr netip.AddrPort) (n int, err error) Close() error LocalAddr() net.Addr } @@ -62,7 +67,7 @@ type Config struct { func (cfg Config) withDefaults() Config { // Node table configuration: if cfg.PingInterval == 0 { - cfg.PingInterval = 10 * time.Second + cfg.PingInterval = 3 * time.Second } if cfg.RefreshInterval == 0 { cfg.RefreshInterval = 30 * time.Minute @@ -90,5 +95,46 @@ func ListenUDP(c UDPConn, ln *enode.LocalNode, cfg Config) (*UDPv4, error) { // channel if configured. type ReadPacket struct { Data []byte - Addr *net.UDPAddr + Addr netip.AddrPort +} + +type randomSource interface { + Intn(int) int + Int63n(int64) int64 + Shuffle(int, func(int, int)) +} + +// reseedingRandom is a random number generator that tracks when it was last re-seeded. +type reseedingRandom struct { + mu sync.Mutex + cur *rand.Rand +} + +func (r *reseedingRandom) seed() { + var b [8]byte + crand.Read(b[:]) + seed := binary.BigEndian.Uint64(b[:]) + new := rand.New(rand.NewSource(int64(seed))) + + r.mu.Lock() + r.cur = new + r.mu.Unlock() +} + +func (r *reseedingRandom) Intn(n int) int { + r.mu.Lock() + defer r.mu.Unlock() + return r.cur.Intn(n) +} + +func (r *reseedingRandom) Int63n(n int64) int64 { + r.mu.Lock() + defer r.mu.Unlock() + return r.cur.Int63n(n) +} + +func (r *reseedingRandom) Shuffle(n int, swap func(i, j int)) { + r.mu.Lock() + defer r.mu.Unlock() + r.cur.Shuffle(n, swap) } diff --git a/p2p/discover/lookup.go b/p2p/discover/lookup.go index b8d97b44e1..09808b71e0 100644 --- a/p2p/discover/lookup.go +++ b/p2p/discover/lookup.go @@ -29,16 +29,16 @@ import ( // not need to be an actual node identifier. type lookup struct { tab *Table - queryfunc func(*node) ([]*node, error) - replyCh chan []*node + queryfunc queryFunc + replyCh chan []*enode.Node cancelCh <-chan struct{} asked, seen map[enode.ID]bool result nodesByDistance - replyBuffer []*node + replyBuffer []*enode.Node queries int } -type queryFunc func(*node) ([]*node, error) +type queryFunc func(*enode.Node) ([]*enode.Node, error) func newLookup(ctx context.Context, tab *Table, target enode.ID, q queryFunc) *lookup { it := &lookup{ @@ -47,7 +47,7 @@ func newLookup(ctx context.Context, tab *Table, target enode.ID, q queryFunc) *l asked: make(map[enode.ID]bool), seen: make(map[enode.ID]bool), result: nodesByDistance{target: target}, - replyCh: make(chan []*node, alpha), + replyCh: make(chan []*enode.Node, alpha), cancelCh: ctx.Done(), queries: -1, } @@ -61,7 +61,7 @@ func newLookup(ctx context.Context, tab *Table, target enode.ID, q queryFunc) *l func (it *lookup) run() []*enode.Node { for it.advance() { } - return unwrapNodes(it.result.entries) + return it.result.entries } // advance advances the lookup until any new nodes have been found. @@ -139,33 +139,14 @@ func (it *lookup) slowdown() { } } -func (it *lookup) query(n *node, reply chan<- []*node) { - fails := it.tab.db.FindFails(n.ID(), n.IP()) +func (it *lookup) query(n *enode.Node, reply chan<- []*enode.Node) { r, err := it.queryfunc(n) - if errors.Is(err, errClosed) { - // Avoid recording failures on shutdown. - reply <- nil - return - } else if len(r) == 0 { - fails++ - it.tab.db.UpdateFindFails(n.ID(), n.IP(), fails) - // Remove the node from the local table if it fails to return anything useful too - // many times, but only if there are enough other nodes in the bucket. - dropped := false - if fails >= maxFindnodeFailures && it.tab.bucketLen(n.ID()) >= bucketSize/2 { - dropped = true - it.tab.delete(n) + if !errors.Is(err, errClosed) { // avoid recording failures on shutdown. + success := len(r) > 0 + it.tab.trackRequest(n, success, r) + if err != nil { + it.tab.log.Trace("FINDNODE failed", "id", n.ID(), "err", err) } - it.tab.log.Trace("FINDNODE failed", "id", n.ID(), "failcount", fails, "dropped", dropped, "err", err) - } else if fails > 0 { - // Reset failure counter because it counts _consecutive_ failures. - it.tab.db.UpdateFindFails(n.ID(), n.IP(), 0) - } - - // Grab as many nodes as possible. Some of them might not be alive anymore, but we'll - // just remove those again during revalidation. - for _, n := range r { - it.tab.addSeenNode(n) } reply <- r } @@ -173,7 +154,7 @@ func (it *lookup) query(n *node, reply chan<- []*node) { // lookupIterator performs lookup operations and iterates over all seen nodes. // When a lookup finishes, a new one is created through nextLookup. type lookupIterator struct { - buffer []*node + buffer []*enode.Node nextLookup lookupFunc ctx context.Context cancel func() @@ -192,7 +173,7 @@ func (it *lookupIterator) Node() *enode.Node { if len(it.buffer) == 0 { return nil } - return unwrapNode(it.buffer[0]) + return it.buffer[0] } // Next moves to the next node. diff --git a/p2p/discover/metrics.go b/p2p/discover/metrics.go index 3cd0ab0414..8deafbbce4 100644 --- a/p2p/discover/metrics.go +++ b/p2p/discover/metrics.go @@ -18,7 +18,7 @@ package discover import ( "fmt" - "net" + "net/netip" "github.com/ethereum/go-ethereum/metrics" ) @@ -58,16 +58,16 @@ func newMeteredConn(conn UDPConn) UDPConn { return &meteredUdpConn{UDPConn: conn} } -// ReadFromUDP delegates a network read to the underlying connection, bumping the udp ingress traffic meter along the way. -func (c *meteredUdpConn) ReadFromUDP(b []byte) (n int, addr *net.UDPAddr, err error) { - n, addr, err = c.UDPConn.ReadFromUDP(b) +// ReadFromUDPAddrPort delegates a network read to the underlying connection, bumping the udp ingress traffic meter along the way. +func (c *meteredUdpConn) ReadFromUDPAddrPort(b []byte) (n int, addr netip.AddrPort, err error) { + n, addr, err = c.UDPConn.ReadFromUDPAddrPort(b) ingressTrafficMeter.Mark(int64(n)) return n, addr, err } -// Write delegates a network write to the underlying connection, bumping the udp egress traffic meter along the way. -func (c *meteredUdpConn) WriteToUDP(b []byte, addr *net.UDPAddr) (n int, err error) { - n, err = c.UDPConn.WriteToUDP(b, addr) +// WriteToUDP delegates a network write to the underlying connection, bumping the udp egress traffic meter along the way. +func (c *meteredUdpConn) WriteToUDP(b []byte, addr netip.AddrPort) (n int, err error) { + n, err = c.UDPConn.WriteToUDPAddrPort(b, addr) egressTrafficMeter.Mark(int64(n)) return n, err } diff --git a/p2p/discover/node.go b/p2p/discover/node.go index 9ffe101ccf..042619221b 100644 --- a/p2p/discover/node.go +++ b/p2p/discover/node.go @@ -21,7 +21,8 @@ import ( "crypto/elliptic" "errors" "math/big" - "net" + "slices" + "sort" "time" "github.com/ethereum/go-ethereum/common/math" @@ -29,12 +30,22 @@ import ( "github.com/ethereum/go-ethereum/p2p/enode" ) -// node represents a host on the network. -// The fields of Node may not be modified. -type node struct { - enode.Node - addedAt time.Time // time when the node was added to the table - livenessChecks uint // how often liveness was checked +type BucketNode struct { + Node *enode.Node `json:"node"` + AddedToTable time.Time `json:"addedToTable"` + AddedToBucket time.Time `json:"addedToBucket"` + Checks int `json:"checks"` + Live bool `json:"live"` +} + +// tableNode is an entry in Table. +type tableNode struct { + *enode.Node + revalList *revalidationList + addedToTable time.Time // first time node was added to bucket or replacement list + addedToBucket time.Time // time it was added in the actual bucket + livenessChecks uint // how often liveness was checked + isValidatedLive bool // true if existence of node is considered validated right now } type encPubkey [64]byte @@ -64,34 +75,59 @@ func (e encPubkey) id() enode.ID { return enode.ID(crypto.Keccak256Hash(e[:])) } -func wrapNode(n *enode.Node) *node { - return &node{Node: *n} -} - -func wrapNodes(ns []*enode.Node) []*node { - result := make([]*node, len(ns)) +func unwrapNodes(ns []*tableNode) []*enode.Node { + result := make([]*enode.Node, len(ns)) for i, n := range ns { - result[i] = wrapNode(n) + result[i] = n.Node } return result } -func unwrapNode(n *node) *enode.Node { - return &n.Node +func (n *tableNode) String() string { + return n.Node.String() } -func unwrapNodes(ns []*node) []*enode.Node { - result := make([]*enode.Node, len(ns)) - for i, n := range ns { - result[i] = unwrapNode(n) +// nodesByDistance is a list of nodes, ordered by distance to target. +type nodesByDistance struct { + entries []*enode.Node + target enode.ID +} + +// push adds the given node to the list, keeping the total size below maxElems. +func (h *nodesByDistance) push(n *enode.Node, maxElems int) { + ix := sort.Search(len(h.entries), func(i int) bool { + return enode.DistCmp(h.target, h.entries[i].ID(), n.ID()) > 0 + }) + + end := len(h.entries) + if len(h.entries) < maxElems { + h.entries = append(h.entries, n) + } + if ix < end { + // Slide existing entries down to make room. + // This will overwrite the entry we just appended. + copy(h.entries[ix+1:], h.entries[ix:]) + h.entries[ix] = n } - return result } -func (n *node) addr() *net.UDPAddr { - return &net.UDPAddr{IP: n.IP(), Port: n.UDP()} +type nodeType interface { + ID() enode.ID } -func (n *node) String() string { - return n.Node.String() +// containsID reports whether ns contains a node with the given ID. +func containsID[N nodeType](ns []N, id enode.ID) bool { + for _, n := range ns { + if n.ID() == id { + return true + } + } + return false +} + +// deleteNode removes a node from the list. +func deleteNode[N nodeType](list []N, id enode.ID) []N { + return slices.DeleteFunc(list, func(n N) bool { + return n.ID() == id + }) } diff --git a/p2p/discover/table.go b/p2p/discover/table.go index 2b7a28708b..bd3c9b4143 100644 --- a/p2p/discover/table.go +++ b/p2p/discover/table.go @@ -24,16 +24,14 @@ package discover import ( "context" - crand "crypto/rand" - "encoding/binary" "fmt" - mrand "math/rand" "net" - "sort" + "slices" "sync" "time" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/mclock" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/p2p/enode" @@ -55,21 +53,21 @@ const ( bucketIPLimit, bucketSubnet = 2, 24 // at most 2 addresses from the same /24 tableIPLimit, tableSubnet = 10, 24 - copyNodesInterval = 30 * time.Second - seedMinTableTime = 5 * time.Minute - seedCount = 30 - seedMaxAge = 5 * 24 * time.Hour + seedMinTableTime = 5 * time.Minute + seedCount = 30 + seedMaxAge = 5 * 24 * time.Hour ) // Table is the 'node table', a Kademlia-like index of neighbor nodes. The table keeps // itself up-to-date by verifying the liveness of neighbors and requesting their node // records when announcements of a new record version are received. type Table struct { - mutex sync.Mutex // protects buckets, bucket content, nursery, rand - buckets [nBuckets]*bucket // index of known nodes by distance - nursery []*node // bootstrap nodes - rand *mrand.Rand // source of randomness, periodically reseeded - ips netutil.DistinctNetSet + mutex sync.Mutex // protects buckets, bucket content, nursery, rand + buckets [nBuckets]*bucket // index of known nodes by distance + nursery []*enode.Node // bootstrap nodes + rand reseedingRandom // source of randomness, periodically reseeded + ips netutil.DistinctNetSet + revalidation tableRevalidation db *enode.DB // database of known nodes net transport @@ -77,13 +75,17 @@ type Table struct { log log.Logger // loop channels - refreshReq chan chan struct{} - initDone chan struct{} - closeReq chan struct{} - closed chan struct{} + refreshReq chan chan struct{} + revalResponseCh chan revalidationResponse + addNodeCh chan addNodeOp + addNodeHandled chan bool + trackRequestCh chan trackRequestOp + initDone chan struct{} + closeReq chan struct{} + closed chan struct{} - nodeAddedHook func(*bucket, *node) - nodeRemovedHook func(*bucket, *node) + nodeAddedHook func(*bucket, *tableNode) + nodeRemovedHook func(*bucket, *tableNode) } // transport is implemented by the UDP transports. @@ -98,28 +100,40 @@ type transport interface { // bucket contains nodes, ordered by their last activity. the entry // that was most recently active is the first element in entries. type bucket struct { - entries []*node // live entries, sorted by time of last contact - replacements []*node // recently seen nodes to be used if revalidation fails + entries []*tableNode // live entries, sorted by time of last contact + replacements []*tableNode // recently seen nodes to be used if revalidation fails ips netutil.DistinctNetSet index int } +type addNodeOp struct { + node *enode.Node + isInbound bool + forceSetLive bool // for tests +} + +type trackRequestOp struct { + node *enode.Node + foundNodes []*enode.Node + success bool +} + func newTable(t transport, db *enode.DB, cfg Config) (*Table, error) { cfg = cfg.withDefaults() tab := &Table{ - net: t, - db: db, - cfg: cfg, - log: cfg.Log, - refreshReq: make(chan chan struct{}), - initDone: make(chan struct{}), - closeReq: make(chan struct{}), - closed: make(chan struct{}), - rand: mrand.New(mrand.NewSource(0)), - ips: netutil.DistinctNetSet{Subnet: tableSubnet, Limit: tableIPLimit}, - } - if err := tab.setFallbackNodes(cfg.Bootnodes); err != nil { - return nil, err + net: t, + db: db, + cfg: cfg, + log: cfg.Log, + refreshReq: make(chan chan struct{}), + revalResponseCh: make(chan revalidationResponse), + addNodeCh: make(chan addNodeOp), + addNodeHandled: make(chan bool), + trackRequestCh: make(chan trackRequestOp), + initDone: make(chan struct{}), + closeReq: make(chan struct{}), + closed: make(chan struct{}), + ips: netutil.DistinctNetSet{Subnet: tableSubnet, Limit: tableIPLimit}, } for i := range tab.buckets { tab.buckets[i] = &bucket{ @@ -127,41 +141,34 @@ func newTable(t transport, db *enode.DB, cfg Config) (*Table, error) { ips: netutil.DistinctNetSet{Subnet: bucketSubnet, Limit: bucketIPLimit}, } } - tab.seedRand() - tab.loadSeedNodes() - - return tab, nil -} + tab.rand.seed() + tab.revalidation.init(&cfg) -func newMeteredTable(t transport, db *enode.DB, cfg Config) (*Table, error) { - tab, err := newTable(t, db, cfg) - if err != nil { + // initial table content + if err := tab.setFallbackNodes(cfg.Bootnodes); err != nil { return nil, err } - if metrics.Enabled { - tab.nodeAddedHook = func(b *bucket, n *node) { - bucketsCounter[b.index].Inc(1) - } - tab.nodeRemovedHook = func(b *bucket, n *node) { - bucketsCounter[b.index].Dec(1) - } - } + tab.loadSeedNodes() + return tab, nil } // Nodes returns all nodes contained in the table. -func (tab *Table) Nodes() []*enode.Node { - if !tab.isInitDone() { - return nil - } - +func (tab *Table) Nodes() [][]BucketNode { tab.mutex.Lock() defer tab.mutex.Unlock() - var nodes []*enode.Node - for _, b := range &tab.buckets { - for _, n := range b.entries { - nodes = append(nodes, unwrapNode(n)) + nodes := make([][]BucketNode, len(tab.buckets)) + for i, b := range &tab.buckets { + nodes[i] = make([]BucketNode, len(b.entries)) + for j, n := range b.entries { + nodes[i][j] = BucketNode{ + Node: n.Node, + Checks: int(n.livenessChecks), + Live: n.isValidatedLive, + AddedToTable: n.addedToTable, + AddedToBucket: n.addedToBucket, + } } } return nodes @@ -171,15 +178,6 @@ func (tab *Table) self() *enode.Node { return tab.net.Self() } -func (tab *Table) seedRand() { - var b [8]byte - crand.Read(b[:]) - - tab.mutex.Lock() - tab.rand.Seed(int64(binary.BigEndian.Uint64(b[:]))) - tab.mutex.Unlock() -} - // getNode returns the node with the given ID or nil if it isn't in the table. func (tab *Table) getNode(id enode.ID) *enode.Node { tab.mutex.Lock() @@ -188,7 +186,7 @@ func (tab *Table) getNode(id enode.ID) *enode.Node { b := tab.bucket(id) for _, e := range b.entries { if e.ID() == id { - return unwrapNode(e) + return e.Node } } return nil @@ -204,7 +202,7 @@ func (tab *Table) close() { // are used to connect to the network if the table is empty and there // are no known nodes in the database. func (tab *Table) setFallbackNodes(nodes []*enode.Node) error { - nursery := make([]*node, 0, len(nodes)) + nursery := make([]*enode.Node, 0, len(nodes)) for _, n := range nodes { if err := n.ValidateComplete(); err != nil { return fmt.Errorf("bad bootstrap node %q: %v", n, err) @@ -213,7 +211,7 @@ func (tab *Table) setFallbackNodes(nodes []*enode.Node) error { tab.log.Error("Bootstrap node filtered by netrestrict", "id", n.ID(), "ip", n.IP()) continue } - nursery = append(nursery, wrapNode(n)) + nursery = append(nursery, n) } tab.nursery = nursery return nil @@ -239,52 +237,173 @@ func (tab *Table) refresh() <-chan struct{} { return done } -// loop schedules runs of doRefresh, doRevalidate and copyLiveNodes. +// findnodeByID returns the n nodes in the table that are closest to the given id. +// This is used by the FINDNODE/v4 handler. +// +// The preferLive parameter says whether the caller wants liveness-checked results. If +// preferLive is true and the table contains any verified nodes, the result will not +// contain unverified nodes. However, if there are no verified nodes at all, the result +// will contain unverified nodes. +func (tab *Table) findnodeByID(target enode.ID, nresults int, preferLive bool) *nodesByDistance { + tab.mutex.Lock() + defer tab.mutex.Unlock() + + // Scan all buckets. There might be a better way to do this, but there aren't that many + // buckets, so this solution should be fine. The worst-case complexity of this loop + // is O(tab.len() * nresults). + nodes := &nodesByDistance{target: target} + liveNodes := &nodesByDistance{target: target} + for _, b := range &tab.buckets { + for _, n := range b.entries { + nodes.push(n.Node, nresults) + if preferLive && n.isValidatedLive { + liveNodes.push(n.Node, nresults) + } + } + } + + if preferLive && len(liveNodes.entries) > 0 { + return liveNodes + } + return nodes +} + +// appendLiveNodes adds nodes at the given distance to the result slice. +// This is used by the FINDNODE/v5 handler. +func (tab *Table) appendLiveNodes(dist uint, result []*enode.Node) []*enode.Node { + if dist > 256 { + return result + } + if dist == 0 { + return append(result, tab.self()) + } + + tab.mutex.Lock() + for _, n := range tab.bucketAtDistance(int(dist)).entries { + if n.isValidatedLive { + result = append(result, n.Node) + } + } + tab.mutex.Unlock() + + // Shuffle result to avoid always returning same nodes in FINDNODE/v5. + tab.rand.Shuffle(len(result), func(i, j int) { + result[i], result[j] = result[j], result[i] + }) + return result +} + +// len returns the number of nodes in the table. +func (tab *Table) len() (n int) { + tab.mutex.Lock() + defer tab.mutex.Unlock() + + for _, b := range &tab.buckets { + n += len(b.entries) + } + return n +} + +// addFoundNode adds a node which may not be live. If the bucket has space available, +// adding the node succeeds immediately. Otherwise, the node is added to the replacements +// list. +// +// The caller must not hold tab.mutex. +func (tab *Table) addFoundNode(n *enode.Node, forceSetLive bool) bool { + op := addNodeOp{node: n, isInbound: false, forceSetLive: forceSetLive} + select { + case tab.addNodeCh <- op: + return <-tab.addNodeHandled + case <-tab.closeReq: + return false + } +} + +// addInboundNode adds a node from an inbound contact. If the bucket has no space, the +// node is added to the replacements list. +// +// There is an additional safety measure: if the table is still initializing the node is +// not added. This prevents an attack where the table could be filled by just sending ping +// repeatedly. +// +// The caller must not hold tab.mutex. +func (tab *Table) addInboundNode(n *enode.Node) bool { + op := addNodeOp{node: n, isInbound: true} + select { + case tab.addNodeCh <- op: + return <-tab.addNodeHandled + case <-tab.closeReq: + return false + } +} + +func (tab *Table) trackRequest(n *enode.Node, success bool, foundNodes []*enode.Node) { + op := trackRequestOp{n, foundNodes, success} + select { + case tab.trackRequestCh <- op: + case <-tab.closeReq: + } +} + +// loop is the main loop of Table. func (tab *Table) loop() { var ( - revalidate = time.NewTimer(tab.nextRevalidateTime()) - refresh = time.NewTimer(tab.nextRefreshTime()) - copyNodes = time.NewTicker(copyNodesInterval) - refreshDone = make(chan struct{}) // where doRefresh reports completion - revalidateDone chan struct{} // where doRevalidate reports completion - waiting = []chan struct{}{tab.initDone} // holds waiting callers while doRefresh runs + refresh = time.NewTimer(tab.nextRefreshTime()) + refreshDone = make(chan struct{}) // where doRefresh reports completion + waiting = []chan struct{}{tab.initDone} // holds waiting callers while doRefresh runs + revalTimer = mclock.NewAlarm(tab.cfg.Clock) + reseedRandTimer = time.NewTicker(10 * time.Minute) ) defer refresh.Stop() - defer revalidate.Stop() - defer copyNodes.Stop() + defer revalTimer.Stop() + defer reseedRandTimer.Stop() // Start initial refresh. go tab.doRefresh(refreshDone) loop: for { + nextTime := tab.revalidation.run(tab, tab.cfg.Clock.Now()) + revalTimer.Schedule(nextTime) + select { + case <-reseedRandTimer.C: + tab.rand.seed() + + case <-revalTimer.C(): + + case r := <-tab.revalResponseCh: + tab.revalidation.handleResponse(tab, r) + + case op := <-tab.addNodeCh: + tab.mutex.Lock() + ok := tab.handleAddNode(op) + tab.mutex.Unlock() + tab.addNodeHandled <- ok + + case op := <-tab.trackRequestCh: + tab.handleTrackRequest(op) + case <-refresh.C: - tab.seedRand() if refreshDone == nil { refreshDone = make(chan struct{}) go tab.doRefresh(refreshDone) } + case req := <-tab.refreshReq: waiting = append(waiting, req) if refreshDone == nil { refreshDone = make(chan struct{}) go tab.doRefresh(refreshDone) } + case <-refreshDone: for _, ch := range waiting { close(ch) } waiting, refreshDone = nil, nil refresh.Reset(tab.nextRefreshTime()) - case <-revalidate.C: - revalidateDone = make(chan struct{}) - go tab.doRevalidate(revalidateDone) - case <-revalidateDone: - revalidate.Reset(tab.nextRevalidateTime()) - revalidateDone = nil - case <-copyNodes.C: - go tab.copyLiveNodes() + case <-tab.closeReq: break loop } @@ -296,9 +415,6 @@ loop: for _, ch := range waiting { close(ch) } - if revalidateDone != nil { - <-revalidateDone - } close(tab.closed) } @@ -327,177 +443,24 @@ func (tab *Table) doRefresh(done chan struct{}) { } func (tab *Table) loadSeedNodes() { - seeds := wrapNodes(tab.db.QuerySeeds(seedCount, seedMaxAge)) + seeds := tab.db.QuerySeeds(seedCount, seedMaxAge) seeds = append(seeds, tab.nursery...) for i := range seeds { seed := seeds[i] if tab.log.Enabled(context.Background(), log.LevelTrace) { age := time.Since(tab.db.LastPongReceived(seed.ID(), seed.IP())) - tab.log.Trace("Found seed node in database", "id", seed.ID(), "addr", seed.addr(), "age", age) + addr, _ := seed.UDPEndpoint() + tab.log.Trace("Found seed node in database", "id", seed.ID(), "addr", addr, "age", age) } - tab.addSeenNode(seed) + tab.handleAddNode(addNodeOp{node: seed, isInbound: false}) } } -// doRevalidate checks that the last node in a random bucket is still live and replaces or -// deletes the node if it isn't. -func (tab *Table) doRevalidate(done chan<- struct{}) { - defer func() { done <- struct{}{} }() - - last, bi := tab.nodeToRevalidate() - if last == nil { - // No non-empty bucket found. - return - } - - // Ping the selected node and wait for a pong. - remoteSeq, err := tab.net.ping(unwrapNode(last)) - - // Also fetch record if the node replied and returned a higher sequence number. - if last.Seq() < remoteSeq { - n, err := tab.net.RequestENR(unwrapNode(last)) - if err != nil { - tab.log.Debug("ENR request failed", "id", last.ID(), "addr", last.addr(), "err", err) - } else { - last = &node{Node: *n, addedAt: last.addedAt, livenessChecks: last.livenessChecks} - } - } - - tab.mutex.Lock() - defer tab.mutex.Unlock() - b := tab.buckets[bi] - if err == nil { - // The node responded, move it to the front. - last.livenessChecks++ - tab.log.Debug("Revalidated node", "b", bi, "id", last.ID(), "checks", last.livenessChecks) - tab.bumpInBucket(b, last) - return - } - // No reply received, pick a replacement or delete the node if there aren't - // any replacements. - if r := tab.replace(b, last); r != nil { - tab.log.Debug("Replaced dead node", "b", bi, "id", last.ID(), "ip", last.IP(), "checks", last.livenessChecks, "r", r.ID(), "rip", r.IP()) - } else { - tab.log.Debug("Removed dead node", "b", bi, "id", last.ID(), "ip", last.IP(), "checks", last.livenessChecks) - } -} - -// nodeToRevalidate returns the last node in a random, non-empty bucket. -func (tab *Table) nodeToRevalidate() (n *node, bi int) { - tab.mutex.Lock() - defer tab.mutex.Unlock() - - for _, bi = range tab.rand.Perm(len(tab.buckets)) { - b := tab.buckets[bi] - if len(b.entries) > 0 { - last := b.entries[len(b.entries)-1] - return last, bi - } - } - return nil, 0 -} - -func (tab *Table) nextRevalidateTime() time.Duration { - tab.mutex.Lock() - defer tab.mutex.Unlock() - - return time.Duration(tab.rand.Int63n(int64(tab.cfg.PingInterval))) -} - func (tab *Table) nextRefreshTime() time.Duration { - tab.mutex.Lock() - defer tab.mutex.Unlock() - half := tab.cfg.RefreshInterval / 2 return half + time.Duration(tab.rand.Int63n(int64(half))) } -// copyLiveNodes adds nodes from the table to the database if they have been in the table -// longer than seedMinTableTime. -func (tab *Table) copyLiveNodes() { - tab.mutex.Lock() - defer tab.mutex.Unlock() - - now := time.Now() - for _, b := range &tab.buckets { - for _, n := range b.entries { - if n.livenessChecks > 0 && now.Sub(n.addedAt) >= seedMinTableTime { - tab.db.UpdateNode(unwrapNode(n)) - } - } - } -} - -// findnodeByID returns the n nodes in the table that are closest to the given id. -// This is used by the FINDNODE/v4 handler. -// -// The preferLive parameter says whether the caller wants liveness-checked results. If -// preferLive is true and the table contains any verified nodes, the result will not -// contain unverified nodes. However, if there are no verified nodes at all, the result -// will contain unverified nodes. -func (tab *Table) findnodeByID(target enode.ID, nresults int, preferLive bool) *nodesByDistance { - tab.mutex.Lock() - defer tab.mutex.Unlock() - - // Scan all buckets. There might be a better way to do this, but there aren't that many - // buckets, so this solution should be fine. The worst-case complexity of this loop - // is O(tab.len() * nresults). - nodes := &nodesByDistance{target: target} - liveNodes := &nodesByDistance{target: target} - for _, b := range &tab.buckets { - for _, n := range b.entries { - nodes.push(n, nresults) - if preferLive && n.livenessChecks > 0 { - liveNodes.push(n, nresults) - } - } - } - - if preferLive && len(liveNodes.entries) > 0 { - return liveNodes - } - return nodes -} - -// appendLiveNodes adds nodes at the given distance to the result slice. -func (tab *Table) appendLiveNodes(dist uint, result []*enode.Node) []*enode.Node { - if dist > 256 { - return result - } - if dist == 0 { - return append(result, tab.self()) - } - - tab.mutex.Lock() - defer tab.mutex.Unlock() - for _, n := range tab.bucketAtDistance(int(dist)).entries { - if n.livenessChecks >= 1 { - node := n.Node // avoid handing out pointer to struct field - result = append(result, &node) - } - } - return result -} - -// len returns the number of nodes in the table. -func (tab *Table) len() (n int) { - tab.mutex.Lock() - defer tab.mutex.Unlock() - - for _, b := range &tab.buckets { - n += len(b.entries) - } - return n -} - -// bucketLen returns the number of nodes in the bucket for the given ID. -func (tab *Table) bucketLen(id enode.ID) int { - tab.mutex.Lock() - defer tab.mutex.Unlock() - - return len(tab.bucket(id).entries) -} - // bucket returns the bucket for the given node ID hash. func (tab *Table) bucket(id enode.ID) *bucket { d := enode.LogDist(tab.self().ID(), id) @@ -511,95 +474,6 @@ func (tab *Table) bucketAtDistance(d int) *bucket { return tab.buckets[d-bucketMinDistance-1] } -// addSeenNode adds a node which may or may not be live to the end of a bucket. If the -// bucket has space available, adding the node succeeds immediately. Otherwise, the node is -// added to the replacements list. -// -// The caller must not hold tab.mutex. -func (tab *Table) addSeenNode(n *node) { - if n.ID() == tab.self().ID() { - return - } - - tab.mutex.Lock() - defer tab.mutex.Unlock() - b := tab.bucket(n.ID()) - if contains(b.entries, n.ID()) { - // Already in bucket, don't add. - return - } - if len(b.entries) >= bucketSize { - // Bucket full, maybe add as replacement. - tab.addReplacement(b, n) - return - } - if !tab.addIP(b, n.IP()) { - // Can't add: IP limit reached. - return - } - - // Add to end of bucket: - b.entries = append(b.entries, n) - b.replacements = deleteNode(b.replacements, n) - n.addedAt = time.Now() - - if tab.nodeAddedHook != nil { - tab.nodeAddedHook(b, n) - } -} - -// addVerifiedNode adds a node whose existence has been verified recently to the front of a -// bucket. If the node is already in the bucket, it is moved to the front. If the bucket -// has no space, the node is added to the replacements list. -// -// There is an additional safety measure: if the table is still initializing the node -// is not added. This prevents an attack where the table could be filled by just sending -// ping repeatedly. -// -// The caller must not hold tab.mutex. -func (tab *Table) addVerifiedNode(n *node) { - if !tab.isInitDone() { - return - } - if n.ID() == tab.self().ID() { - return - } - - tab.mutex.Lock() - defer tab.mutex.Unlock() - b := tab.bucket(n.ID()) - if tab.bumpInBucket(b, n) { - // Already in bucket, moved to front. - return - } - if len(b.entries) >= bucketSize { - // Bucket full, maybe add as replacement. - tab.addReplacement(b, n) - return - } - if !tab.addIP(b, n.IP()) { - // Can't add: IP limit reached. - return - } - - // Add to front of bucket. - b.entries, _ = pushNode(b.entries, n, bucketSize) - b.replacements = deleteNode(b.replacements, n) - n.addedAt = time.Now() - - if tab.nodeAddedHook != nil { - tab.nodeAddedHook(b, n) - } -} - -// delete removes an entry from the node table. It is used to evacuate dead nodes. -func (tab *Table) delete(node *node) { - tab.mutex.Lock() - defer tab.mutex.Unlock() - - tab.deleteInBucket(tab.bucket(node.ID()), node) -} - func (tab *Table) addIP(b *bucket, ip net.IP) bool { if len(ip) == 0 { return false // Nodes without IP cannot be added. @@ -627,128 +501,194 @@ func (tab *Table) removeIP(b *bucket, ip net.IP) { b.ips.Remove(ip) } -func (tab *Table) addReplacement(b *bucket, n *node) { - for _, e := range b.replacements { - if e.ID() == n.ID() { - return // already in list - } +// handleAddNode adds the node in the request to the table, if there is space. +// The caller must hold tab.mutex. +func (tab *Table) handleAddNode(req addNodeOp) bool { + if req.node.ID() == tab.self().ID() { + return false + } + // For nodes from inbound contact, there is an additional safety measure: if the table + // is still initializing the node is not added. + if req.isInbound && !tab.isInitDone() { + return false + } + + b := tab.bucket(req.node.ID()) + n, _ := tab.bumpInBucket(b, req.node, req.isInbound) + if n != nil { + // Already in bucket. + return false + } + if len(b.entries) >= bucketSize { + // Bucket full, maybe add as replacement. + tab.addReplacement(b, req.node) + return false + } + if !tab.addIP(b, req.node.IP()) { + // Can't add: IP limit reached. + return false + } + + // Add to bucket. + wn := &tableNode{Node: req.node} + if req.forceSetLive { + wn.livenessChecks = 1 + wn.isValidatedLive = true + } + b.entries = append(b.entries, wn) + b.replacements = deleteNode(b.replacements, wn.ID()) + tab.nodeAdded(b, wn) + return true +} + +// addReplacement adds n to the replacement cache of bucket b. +func (tab *Table) addReplacement(b *bucket, n *enode.Node) { + if containsID(b.replacements, n.ID()) { + // TODO: update ENR + return } if !tab.addIP(b, n.IP()) { return } - var removed *node - b.replacements, removed = pushNode(b.replacements, n, maxReplacements) + + wn := &tableNode{Node: n, addedToTable: time.Now()} + var removed *tableNode + b.replacements, removed = pushNode(b.replacements, wn, maxReplacements) if removed != nil { tab.removeIP(b, removed.IP()) } } -// replace removes n from the replacement list and replaces 'last' with it if it is the -// last entry in the bucket. If 'last' isn't the last entry, it has either been replaced -// with someone else or became active. -func (tab *Table) replace(b *bucket, last *node) *node { - if len(b.entries) == 0 || b.entries[len(b.entries)-1].ID() != last.ID() { - // Entry has moved, don't replace it. - return nil +func (tab *Table) nodeAdded(b *bucket, n *tableNode) { + if n.addedToTable == (time.Time{}) { + n.addedToTable = time.Now() } - // Still the last entry. - if len(b.replacements) == 0 { - tab.deleteInBucket(b, last) - return nil + n.addedToBucket = time.Now() + tab.revalidation.nodeAdded(tab, n) + if tab.nodeAddedHook != nil { + tab.nodeAddedHook(b, n) } - r := b.replacements[tab.rand.Intn(len(b.replacements))] - b.replacements = deleteNode(b.replacements, r) - b.entries[len(b.entries)-1] = r - tab.removeIP(b, last.IP()) - return r -} - -// bumpInBucket moves the given node to the front of the bucket entry list -// if it is contained in that list. -func (tab *Table) bumpInBucket(b *bucket, n *node) bool { - for i := range b.entries { - if b.entries[i].ID() == n.ID() { - if !n.IP().Equal(b.entries[i].IP()) { - // Endpoint has changed, ensure that the new IP fits into table limits. - tab.removeIP(b, b.entries[i].IP()) - if !tab.addIP(b, n.IP()) { - // It doesn't, put the previous one back. - tab.addIP(b, b.entries[i].IP()) - return false - } - } - // Move it to the front. - copy(b.entries[1:], b.entries[:i]) - b.entries[0] = n - return true - } + if metrics.Enabled { + bucketsCounter[b.index].Inc(1) } - return false } -func (tab *Table) deleteInBucket(b *bucket, n *node) { - // Check if the node is actually in the bucket so the removed hook - // isn't called multiple times for the same node. - if !contains(b.entries, n.ID()) { - return - } - b.entries = deleteNode(b.entries, n) - tab.removeIP(b, n.IP()) +func (tab *Table) nodeRemoved(b *bucket, n *tableNode) { + tab.revalidation.nodeRemoved(n) if tab.nodeRemovedHook != nil { tab.nodeRemovedHook(b, n) } + if metrics.Enabled { + bucketsCounter[b.index].Dec(1) + } } -func contains(ns []*node, id enode.ID) bool { - for _, n := range ns { - if n.ID() == id { - return true - } +// deleteInBucket removes node n from the table. +// If there are replacement nodes in the bucket, the node is replaced. +func (tab *Table) deleteInBucket(b *bucket, id enode.ID) *tableNode { + index := slices.IndexFunc(b.entries, func(e *tableNode) bool { return e.ID() == id }) + if index == -1 { + // Entry has been removed already. + return nil } - return false -} -// pushNode adds n to the front of list, keeping at most max items. -func pushNode(list []*node, n *node, max int) ([]*node, *node) { - if len(list) < max { - list = append(list, nil) + // Remove the node. + n := b.entries[index] + b.entries = slices.Delete(b.entries, index, index+1) + tab.removeIP(b, n.IP()) + tab.nodeRemoved(b, n) + + // Add replacement. + if len(b.replacements) == 0 { + tab.log.Debug("Removed dead node", "b", b.index, "id", n.ID(), "ip", n.IP()) + return nil } - removed := list[len(list)-1] - copy(list[1:], list) - list[0] = n - return list, removed + rindex := tab.rand.Intn(len(b.replacements)) + rep := b.replacements[rindex] + b.replacements = slices.Delete(b.replacements, rindex, rindex+1) + b.entries = append(b.entries, rep) + tab.nodeAdded(b, rep) + tab.log.Debug("Replaced dead node", "b", b.index, "id", n.ID(), "ip", n.IP(), "r", rep.ID(), "rip", rep.IP()) + return rep } -// deleteNode removes n from list. -func deleteNode(list []*node, n *node) []*node { - for i := range list { - if list[i].ID() == n.ID() { - return append(list[:i], list[i+1:]...) +// bumpInBucket updates a node record if it exists in the bucket. +// The second return value reports whether the node's endpoint (IP/port) was updated. +func (tab *Table) bumpInBucket(b *bucket, newRecord *enode.Node, isInbound bool) (n *tableNode, endpointChanged bool) { + i := slices.IndexFunc(b.entries, func(elem *tableNode) bool { + return elem.ID() == newRecord.ID() + }) + if i == -1 { + return nil, false // not in bucket + } + n = b.entries[i] + + // For inbound updates (from the node itself) we accept any change, even if it sets + // back the sequence number. For found nodes (!isInbound), seq has to advance. Note + // this check also ensures found discv4 nodes (which always have seq=0) can't be + // updated. + if newRecord.Seq() <= n.Seq() && !isInbound { + return n, false + } + + // Check endpoint update against IP limits. + ipchanged := newRecord.IPAddr() != n.IPAddr() + portchanged := newRecord.UDP() != n.UDP() + if ipchanged { + tab.removeIP(b, n.IP()) + if !tab.addIP(b, newRecord.IP()) { + // It doesn't fit with the limit, put the previous record back. + tab.addIP(b, n.IP()) + return n, false } } - return list -} -// nodesByDistance is a list of nodes, ordered by distance to target. -type nodesByDistance struct { - entries []*node - target enode.ID + // Apply update. + n.Node = newRecord + if ipchanged || portchanged { + // Ensure node is revalidated quickly for endpoint changes. + tab.revalidation.nodeEndpointChanged(tab, n) + return n, true + } + return n, false } -// push adds the given node to the list, keeping the total size below maxElems. -func (h *nodesByDistance) push(n *node, maxElems int) { - ix := sort.Search(len(h.entries), func(i int) bool { - return enode.DistCmp(h.target, h.entries[i].ID(), n.ID()) > 0 - }) +func (tab *Table) handleTrackRequest(op trackRequestOp) { + var fails int + if op.success { + // Reset failure counter because it counts _consecutive_ failures. + tab.db.UpdateFindFails(op.node.ID(), op.node.IP(), 0) + } else { + fails = tab.db.FindFails(op.node.ID(), op.node.IP()) + fails++ + tab.db.UpdateFindFails(op.node.ID(), op.node.IP(), fails) + } - end := len(h.entries) - if len(h.entries) < maxElems { - h.entries = append(h.entries, n) + tab.mutex.Lock() + defer tab.mutex.Unlock() + + b := tab.bucket(op.node.ID()) + // Remove the node from the local table if it fails to return anything useful too + // many times, but only if there are enough other nodes in the bucket. This latter + // condition specifically exists to make bootstrapping in smaller test networks more + // reliable. + if fails >= maxFindnodeFailures && len(b.entries) >= bucketSize/4 { + tab.deleteInBucket(b, op.node.ID()) } - if ix < end { - // Slide existing entries down to make room. - // This will overwrite the entry we just appended. - copy(h.entries[ix+1:], h.entries[ix:]) - h.entries[ix] = n + + // Add found nodes. + for _, n := range op.foundNodes { + tab.handleAddNode(addNodeOp{n, false, false}) } } + +// pushNode adds n to the front of list, keeping at most max items. +func pushNode(list []*tableNode, n *tableNode, max int) ([]*tableNode, *tableNode) { + if len(list) < max { + list = append(list, nil) + } + removed := list[len(list)-1] + copy(list[1:], list) + list[0] = n + return list, removed +} diff --git a/p2p/discover/table_reval.go b/p2p/discover/table_reval.go new file mode 100644 index 0000000000..f2ea8b34fa --- /dev/null +++ b/p2p/discover/table_reval.go @@ -0,0 +1,244 @@ +// Copyright 2024 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package discover + +import ( + "fmt" + "math" + "slices" + "time" + + "github.com/ethereum/go-ethereum/common/mclock" + "github.com/ethereum/go-ethereum/p2p/enode" +) + +const never = mclock.AbsTime(math.MaxInt64) + +const slowRevalidationFactor = 3 + +// tableRevalidation implements the node revalidation process. +// It tracks all nodes contained in Table, and schedules sending PING to them. +type tableRevalidation struct { + fast revalidationList + slow revalidationList + activeReq map[enode.ID]struct{} +} + +type revalidationResponse struct { + n *tableNode + newRecord *enode.Node + didRespond bool +} + +func (tr *tableRevalidation) init(cfg *Config) { + tr.activeReq = make(map[enode.ID]struct{}) + tr.fast.nextTime = never + tr.fast.interval = cfg.PingInterval + tr.fast.name = "fast" + tr.slow.nextTime = never + tr.slow.interval = cfg.PingInterval * slowRevalidationFactor + tr.slow.name = "slow" +} + +// nodeAdded is called when the table receives a new node. +func (tr *tableRevalidation) nodeAdded(tab *Table, n *tableNode) { + tr.fast.push(n, tab.cfg.Clock.Now(), &tab.rand) +} + +// nodeRemoved is called when a node was removed from the table. +func (tr *tableRevalidation) nodeRemoved(n *tableNode) { + if n.revalList == nil { + panic(fmt.Errorf("removed node %v has nil revalList", n.ID())) + } + n.revalList.remove(n) +} + +// nodeEndpointChanged is called when a change in IP or port is detected. +func (tr *tableRevalidation) nodeEndpointChanged(tab *Table, n *tableNode) { + n.isValidatedLive = false + tr.moveToList(&tr.fast, n, tab.cfg.Clock.Now(), &tab.rand) +} + +// run performs node revalidation. +// It returns the next time it should be invoked, which is used in the Table main loop +// to schedule a timer. However, run can be called at any time. +func (tr *tableRevalidation) run(tab *Table, now mclock.AbsTime) (nextTime mclock.AbsTime) { + if n := tr.fast.get(now, &tab.rand, tr.activeReq); n != nil { + tr.startRequest(tab, n) + tr.fast.schedule(now, &tab.rand) + } + if n := tr.slow.get(now, &tab.rand, tr.activeReq); n != nil { + tr.startRequest(tab, n) + tr.slow.schedule(now, &tab.rand) + } + + return min(tr.fast.nextTime, tr.slow.nextTime) +} + +// startRequest spawns a revalidation request for node n. +func (tr *tableRevalidation) startRequest(tab *Table, n *tableNode) { + if _, ok := tr.activeReq[n.ID()]; ok { + panic(fmt.Errorf("duplicate startRequest (node %v)", n.ID())) + } + tr.activeReq[n.ID()] = struct{}{} + resp := revalidationResponse{n: n} + + // Fetch the node while holding lock. + tab.mutex.Lock() + node := n.Node + tab.mutex.Unlock() + + go tab.doRevalidate(resp, node) +} + +func (tab *Table) doRevalidate(resp revalidationResponse, node *enode.Node) { + // Ping the selected node and wait for a pong response. + remoteSeq, err := tab.net.ping(node) + resp.didRespond = err == nil + + // Also fetch record if the node replied and returned a higher sequence number. + if remoteSeq > node.Seq() { + newrec, err := tab.net.RequestENR(node) + if err != nil { + tab.log.Debug("ENR request failed", "id", node.ID(), "err", err) + } else { + resp.newRecord = newrec + } + } + + select { + case tab.revalResponseCh <- resp: + case <-tab.closed: + } +} + +// handleResponse processes the result of a revalidation request. +func (tr *tableRevalidation) handleResponse(tab *Table, resp revalidationResponse) { + var ( + now = tab.cfg.Clock.Now() + n = resp.n + b = tab.bucket(n.ID()) + ) + delete(tr.activeReq, n.ID()) + + // If the node was removed from the table while getting checked, we need to stop + // processing here to avoid re-adding it. + if n.revalList == nil { + return + } + + // Store potential seeds in database. + // This is done via defer to avoid holding Table lock while writing to DB. + defer func() { + if n.isValidatedLive && n.livenessChecks > 5 { + tab.db.UpdateNode(resp.n.Node) + } + }() + + // Remaining logic needs access to Table internals. + tab.mutex.Lock() + defer tab.mutex.Unlock() + + if !resp.didRespond { + n.livenessChecks /= 3 + if n.livenessChecks <= 0 { + tab.deleteInBucket(b, n.ID()) + } else { + tab.log.Debug("Node revalidation failed", "b", b.index, "id", n.ID(), "checks", n.livenessChecks, "q", n.revalList.name) + tr.moveToList(&tr.fast, n, now, &tab.rand) + } + return + } + + // The node responded. + n.livenessChecks++ + n.isValidatedLive = true + tab.log.Debug("Node revalidated", "b", b.index, "id", n.ID(), "checks", n.livenessChecks, "q", n.revalList.name) + var endpointChanged bool + if resp.newRecord != nil { + _, endpointChanged = tab.bumpInBucket(b, resp.newRecord, false) + } + + // Node moves to slow list if it passed and hasn't changed. + if !endpointChanged { + tr.moveToList(&tr.slow, n, now, &tab.rand) + } +} + +// moveToList ensures n is in the 'dest' list. +func (tr *tableRevalidation) moveToList(dest *revalidationList, n *tableNode, now mclock.AbsTime, rand randomSource) { + if n.revalList == dest { + return + } + if n.revalList != nil { + n.revalList.remove(n) + } + dest.push(n, now, rand) +} + +// revalidationList holds a list nodes and the next revalidation time. +type revalidationList struct { + nodes []*tableNode + nextTime mclock.AbsTime + interval time.Duration + name string +} + +// get returns a random node from the queue. Nodes in the 'exclude' map are not returned. +func (list *revalidationList) get(now mclock.AbsTime, rand randomSource, exclude map[enode.ID]struct{}) *tableNode { + if now < list.nextTime || len(list.nodes) == 0 { + return nil + } + for i := 0; i < len(list.nodes)*3; i++ { + n := list.nodes[rand.Intn(len(list.nodes))] + _, excluded := exclude[n.ID()] + if !excluded { + return n + } + } + return nil +} + +func (list *revalidationList) schedule(now mclock.AbsTime, rand randomSource) { + list.nextTime = now.Add(time.Duration(rand.Int63n(int64(list.interval)))) +} + +func (list *revalidationList) push(n *tableNode, now mclock.AbsTime, rand randomSource) { + list.nodes = append(list.nodes, n) + if list.nextTime == never { + list.schedule(now, rand) + } + n.revalList = list +} + +func (list *revalidationList) remove(n *tableNode) { + i := slices.Index(list.nodes, n) + if i == -1 { + panic(fmt.Errorf("node %v not found in list", n.ID())) + } + list.nodes = slices.Delete(list.nodes, i, i+1) + if len(list.nodes) == 0 { + list.nextTime = never + } + n.revalList = nil +} + +func (list *revalidationList) contains(id enode.ID) bool { + return slices.ContainsFunc(list.nodes, func(n *tableNode) bool { + return n.ID() == id + }) +} diff --git a/p2p/discover/table_reval_test.go b/p2p/discover/table_reval_test.go new file mode 100644 index 0000000000..3605443934 --- /dev/null +++ b/p2p/discover/table_reval_test.go @@ -0,0 +1,119 @@ +// Copyright 2024 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package discover + +import ( + "net" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common/mclock" + "github.com/ethereum/go-ethereum/p2p/enode" + "github.com/ethereum/go-ethereum/p2p/enr" +) + +// This test checks that revalidation can handle a node disappearing while +// a request is active. +func TestRevalidation_nodeRemoved(t *testing.T) { + var ( + clock mclock.Simulated + transport = newPingRecorder() + tab, db = newInactiveTestTable(transport, Config{Clock: &clock}) + tr = &tab.revalidation + ) + defer db.Close() + + // Add a node to the table. + node := nodeAtDistance(tab.self().ID(), 255, net.IP{77, 88, 99, 1}) + tab.handleAddNode(addNodeOp{node: node}) + + // Start a revalidation request. Schedule once to get the next start time, + // then advance the clock to that point and schedule again to start. + next := tr.run(tab, clock.Now()) + clock.Run(time.Duration(next + 1)) + tr.run(tab, clock.Now()) + if len(tr.activeReq) != 1 { + t.Fatal("revalidation request did not start:", tr.activeReq) + } + + // Delete the node. + tab.deleteInBucket(tab.bucket(node.ID()), node.ID()) + + // Now finish the revalidation request. + var resp revalidationResponse + select { + case resp = <-tab.revalResponseCh: + case <-time.After(1 * time.Second): + t.Fatal("timed out waiting for revalidation") + } + tr.handleResponse(tab, resp) + + // Ensure the node was not re-added to the table. + if tab.getNode(node.ID()) != nil { + t.Fatal("node was re-added to Table") + } + if tr.fast.contains(node.ID()) || tr.slow.contains(node.ID()) { + t.Fatal("removed node contained in revalidation list") + } +} + +// This test checks that nodes with an updated endpoint remain in the fast revalidation list. +func TestRevalidation_endpointUpdate(t *testing.T) { + var ( + clock mclock.Simulated + transport = newPingRecorder() + tab, db = newInactiveTestTable(transport, Config{Clock: &clock}) + tr = &tab.revalidation + ) + defer db.Close() + + // Add node to table. + node := nodeAtDistance(tab.self().ID(), 255, net.IP{77, 88, 99, 1}) + tab.handleAddNode(addNodeOp{node: node}) + + // Update the record in transport, including endpoint update. + record := node.Record() + record.Set(enr.IP{100, 100, 100, 100}) + record.Set(enr.UDP(9999)) + nodev2 := enode.SignNull(record, node.ID()) + transport.updateRecord(nodev2) + + // Start a revalidation request. Schedule once to get the next start time, + // then advance the clock to that point and schedule again to start. + next := tr.run(tab, clock.Now()) + clock.Run(time.Duration(next + 1)) + tr.run(tab, clock.Now()) + if len(tr.activeReq) != 1 { + t.Fatal("revalidation request did not start:", tr.activeReq) + } + + // Now finish the revalidation request. + var resp revalidationResponse + select { + case resp = <-tab.revalResponseCh: + case <-time.After(1 * time.Second): + t.Fatal("timed out waiting for revalidation") + } + tr.handleResponse(tab, resp) + + if tr.fast.nodes[0].ID() != node.ID() { + t.Fatal("node not contained in fast revalidation list") + } + if tr.fast.nodes[0].isValidatedLive { + t.Fatal("node is marked live after endpoint change") + } +} diff --git a/p2p/discover/table_test.go b/p2p/discover/table_test.go index 3ba3422251..30e7d56f4a 100644 --- a/p2p/discover/table_test.go +++ b/p2p/discover/table_test.go @@ -20,14 +20,17 @@ import ( "crypto/ecdsa" "fmt" "math/rand" - "net" "reflect" + "slices" "testing" "testing/quick" "time" + "github.com/ethereum/go-ethereum/common/mclock" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/internal/testlog" + "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/p2p/enr" "github.com/ethereum/go-ethereum/p2p/netutil" @@ -49,106 +52,109 @@ func TestTable_pingReplace(t *testing.T) { } func testPingReplace(t *testing.T, newNodeIsResponding, lastInBucketIsResponding bool) { + simclock := new(mclock.Simulated) transport := newPingRecorder() - tab, db := newTestTable(transport) + tab, db := newTestTable(transport, Config{ + Clock: simclock, + Log: testlog.Logger(t, log.LevelTrace), + }) defer db.Close() defer tab.close() <-tab.initDone // Fill up the sender's bucket. - pingKey, _ := crypto.HexToECDSA("45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8") - pingSender := wrapNode(enode.NewV4(&pingKey.PublicKey, net.IP{127, 0, 0, 1}, 99, 99)) - last := fillBucket(tab, pingSender) + replacementNodeKey, _ := crypto.HexToECDSA("45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8") + replacementNode := enode.NewV4(&replacementNodeKey.PublicKey, net.IP{127, 0, 0, 1}, 99, 99) + last := fillBucket(tab, replacementNode.ID()) + tab.mutex.Lock() + nodeEvents := newNodeEventRecorder(128) + tab.nodeAddedHook = nodeEvents.nodeAdded + tab.nodeRemovedHook = nodeEvents.nodeRemoved + tab.mutex.Unlock() - // Add the sender as if it just pinged us. Revalidate should replace the last node in - // its bucket if it is unresponsive. Revalidate again to ensure that + // The revalidation process should replace + // this node in the bucket if it is unresponsive. transport.dead[last.ID()] = !lastInBucketIsResponding - transport.dead[pingSender.ID()] = !newNodeIsResponding - tab.addSeenNode(pingSender) - tab.doRevalidate(make(chan struct{}, 1)) - tab.doRevalidate(make(chan struct{}, 1)) - - if !transport.pinged[last.ID()] { - // Oldest node in bucket is pinged to see whether it is still alive. - t.Error("table did not ping last node in bucket") + transport.dead[replacementNode.ID()] = !newNodeIsResponding + + // Add replacement node to table. + tab.addFoundNode(replacementNode, false) + + t.Log("last:", last.ID()) + t.Log("replacement:", replacementNode.ID()) + + // Wait until the last node was pinged. + waitForRevalidationPing(t, transport, tab, last.ID()) + + if !lastInBucketIsResponding { + if !nodeEvents.waitNodeAbsent(last.ID(), 2*time.Second) { + t.Error("last node was not removed") + } + if !nodeEvents.waitNodePresent(replacementNode.ID(), 2*time.Second) { + t.Error("replacement node was not added") + } + + // If a replacement is expected, we also need to wait until the replacement node + // was pinged and added/removed. + waitForRevalidationPing(t, transport, tab, replacementNode.ID()) + if !newNodeIsResponding { + if !nodeEvents.waitNodeAbsent(replacementNode.ID(), 2*time.Second) { + t.Error("replacement node was not removed") + } + } } + // Check bucket content. tab.mutex.Lock() defer tab.mutex.Unlock() wantSize := bucketSize if !lastInBucketIsResponding && !newNodeIsResponding { wantSize-- } - if l := len(tab.bucket(pingSender.ID()).entries); l != wantSize { - t.Errorf("wrong bucket size after bond: got %d, want %d", l, wantSize) + bucket := tab.bucket(replacementNode.ID()) + if l := len(bucket.entries); l != wantSize { + t.Errorf("wrong bucket size after revalidation: got %d, want %d", l, wantSize) } - if found := contains(tab.bucket(pingSender.ID()).entries, last.ID()); found != lastInBucketIsResponding { - t.Errorf("last entry found: %t, want: %t", found, lastInBucketIsResponding) + if ok := containsID(bucket.entries, last.ID()); ok != lastInBucketIsResponding { + t.Errorf("revalidated node found: %t, want: %t", ok, lastInBucketIsResponding) } wantNewEntry := newNodeIsResponding && !lastInBucketIsResponding - if found := contains(tab.bucket(pingSender.ID()).entries, pingSender.ID()); found != wantNewEntry { - t.Errorf("new entry found: %t, want: %t", found, wantNewEntry) + if ok := containsID(bucket.entries, replacementNode.ID()); ok != wantNewEntry { + t.Errorf("replacement node found: %t, want: %t", ok, wantNewEntry) } } -func TestBucket_bumpNoDuplicates(t *testing.T) { - t.Parallel() - cfg := &quick.Config{ - MaxCount: 1000, - Rand: rand.New(rand.NewSource(time.Now().Unix())), - Values: func(args []reflect.Value, rand *rand.Rand) { - // generate a random list of nodes. this will be the content of the bucket. - n := rand.Intn(bucketSize-1) + 1 - nodes := make([]*node, n) - for i := range nodes { - nodes[i] = nodeAtDistance(enode.ID{}, 200, intIP(200)) - } - args[0] = reflect.ValueOf(nodes) - // generate random bump positions. - bumps := make([]int, rand.Intn(100)) - for i := range bumps { - bumps[i] = rand.Intn(len(nodes)) - } - args[1] = reflect.ValueOf(bumps) - }, - } - - prop := func(nodes []*node, bumps []int) (ok bool) { - tab, db := newTestTable(newPingRecorder()) - defer db.Close() - defer tab.close() +// waitForRevalidationPing waits until a PING message is sent to a node with the given id. +func waitForRevalidationPing(t *testing.T, transport *pingRecorder, tab *Table, id enode.ID) *enode.Node { + t.Helper() - b := &bucket{entries: make([]*node, len(nodes))} - copy(b.entries, nodes) - for i, pos := range bumps { - tab.bumpInBucket(b, b.entries[pos]) - if hasDuplicates(b.entries) { - t.Logf("bucket has duplicates after %d/%d bumps:", i+1, len(bumps)) - for _, n := range b.entries { - t.Logf(" %p", n) - } - return false - } + simclock := tab.cfg.Clock.(*mclock.Simulated) + maxAttempts := tab.len() * 8 + for i := 0; i < maxAttempts; i++ { + simclock.Run(tab.cfg.PingInterval * slowRevalidationFactor) + p := transport.waitPing(2 * time.Second) + if p == nil { + t.Fatal("Table did not send revalidation ping") + } + if id == (enode.ID{}) || p.ID() == id { + return p } - checkIPLimitInvariant(t, tab) - return true - } - if err := quick.Check(prop, cfg); err != nil { - t.Error(err) } + t.Fatalf("Table did not ping node %v (%d attempts)", id, maxAttempts) + return nil } // This checks that the table-wide IP limit is applied correctly. func TestTable_IPLimit(t *testing.T) { transport := newPingRecorder() - tab, db := newTestTable(transport) + tab, db := newTestTable(transport, Config{}) defer db.Close() defer tab.close() for i := 0; i < tableIPLimit+1; i++ { n := nodeAtDistance(tab.self().ID(), i, net.IP{172, 0, 1, byte(i)}) - tab.addSeenNode(n) + tab.addFoundNode(n, false) } if tab.len() > tableIPLimit { t.Errorf("too many nodes in table") @@ -159,14 +165,14 @@ func TestTable_IPLimit(t *testing.T) { // This checks that the per-bucket IP limit is applied correctly. func TestTable_BucketIPLimit(t *testing.T) { transport := newPingRecorder() - tab, db := newTestTable(transport) + tab, db := newTestTable(transport, Config{}) defer db.Close() defer tab.close() d := 3 for i := 0; i < bucketIPLimit+1; i++ { n := nodeAtDistance(tab.self().ID(), d, net.IP{172, 0, 1, byte(i)}) - tab.addSeenNode(n) + tab.addFoundNode(n, false) } if tab.len() > bucketIPLimit { t.Errorf("too many nodes in table") @@ -196,7 +202,7 @@ func TestTable_findnodeByID(t *testing.T) { test := func(test *closeTest) bool { // for any node table, Target and N transport := newPingRecorder() - tab, db := newTestTable(transport) + tab, db := newTestTable(transport, Config{}) defer db.Close() defer tab.close() fillTable(tab, test.All, true) @@ -227,7 +233,7 @@ func TestTable_findnodeByID(t *testing.T) { // check that the result nodes have minimum distance to target. for _, b := range tab.buckets { for _, n := range b.entries { - if contains(result, n.ID()) { + if containsID(result, n.ID()) { continue // don't run the check below for nodes in result } farthestResult := result[len(result)-1].ID() @@ -250,7 +256,7 @@ func TestTable_findnodeByID(t *testing.T) { type closeTest struct { Self enode.ID Target enode.ID - All []*node + All []*enode.Node N int } @@ -263,15 +269,14 @@ func (*closeTest) Generate(rand *rand.Rand, size int) reflect.Value { for _, id := range gen([]enode.ID{}, rand).([]enode.ID) { r := new(enr.Record) r.Set(enr.IP(genIP(rand))) - n := wrapNode(enode.SignNull(r, id)) - n.livenessChecks = 1 + n := enode.SignNull(r, id) t.All = append(t.All, n) } return reflect.ValueOf(t) } -func TestTable_addVerifiedNode(t *testing.T) { - tab, db := newTestTable(newPingRecorder()) +func TestTable_addInboundNode(t *testing.T) { + tab, db := newTestTable(newPingRecorder(), Config{}) <-tab.initDone defer db.Close() defer tab.close() @@ -279,31 +284,29 @@ func TestTable_addVerifiedNode(t *testing.T) { // Insert two nodes. n1 := nodeAtDistance(tab.self().ID(), 256, net.IP{88, 77, 66, 1}) n2 := nodeAtDistance(tab.self().ID(), 256, net.IP{88, 77, 66, 2}) - tab.addSeenNode(n1) - tab.addSeenNode(n2) + tab.addFoundNode(n1, false) + tab.addFoundNode(n2, false) + checkBucketContent(t, tab, []*enode.Node{n1, n2}) - // Verify bucket content: - bcontent := []*node{n1, n2} - if !reflect.DeepEqual(tab.bucket(n1.ID()).entries, bcontent) { - t.Fatalf("wrong bucket content: %v", tab.bucket(n1.ID()).entries) - } - - // Add a changed version of n2. + // Add a changed version of n2. The bucket should be updated. newrec := n2.Record() newrec.Set(enr.IP{99, 99, 99, 99}) - newn2 := wrapNode(enode.SignNull(newrec, n2.ID())) - tab.addVerifiedNode(newn2) - - // Check that bucket is updated correctly. - newBcontent := []*node{newn2, n1} - if !reflect.DeepEqual(tab.bucket(n1.ID()).entries, newBcontent) { - t.Fatalf("wrong bucket content after update: %v", tab.bucket(n1.ID()).entries) - } - checkIPLimitInvariant(t, tab) + n2v2 := enode.SignNull(newrec, n2.ID()) + tab.addInboundNode(n2v2) + checkBucketContent(t, tab, []*enode.Node{n1, n2v2}) + + // Try updating n2 without sequence number change. The update is accepted + // because it's inbound. + newrec = n2.Record() + newrec.Set(enr.IP{100, 100, 100, 100}) + newrec.SetSeq(n2.Seq()) + n2v3 := enode.SignNull(newrec, n2.ID()) + tab.addInboundNode(n2v3) + checkBucketContent(t, tab, []*enode.Node{n1, n2v3}) } -func TestTable_addSeenNode(t *testing.T) { - tab, db := newTestTable(newPingRecorder()) +func TestTable_addFoundNode(t *testing.T) { + tab, db := newTestTable(newPingRecorder(), Config{}) <-tab.initDone defer db.Close() defer tab.close() @@ -311,25 +314,86 @@ func TestTable_addSeenNode(t *testing.T) { // Insert two nodes. n1 := nodeAtDistance(tab.self().ID(), 256, net.IP{88, 77, 66, 1}) n2 := nodeAtDistance(tab.self().ID(), 256, net.IP{88, 77, 66, 2}) - tab.addSeenNode(n1) - tab.addSeenNode(n2) - - // Verify bucket content: - bcontent := []*node{n1, n2} - if !reflect.DeepEqual(tab.bucket(n1.ID()).entries, bcontent) { - t.Fatalf("wrong bucket content: %v", tab.bucket(n1.ID()).entries) - } + tab.addFoundNode(n1, false) + tab.addFoundNode(n2, false) + checkBucketContent(t, tab, []*enode.Node{n1, n2}) - // Add a changed version of n2. + // Add a changed version of n2. The bucket should be updated. newrec := n2.Record() newrec.Set(enr.IP{99, 99, 99, 99}) - newn2 := wrapNode(enode.SignNull(newrec, n2.ID())) - tab.addSeenNode(newn2) + n2v2 := enode.SignNull(newrec, n2.ID()) + tab.addFoundNode(n2v2, false) + checkBucketContent(t, tab, []*enode.Node{n1, n2v2}) + + // Try updating n2 without a sequence number change. + // The update should not be accepted. + newrec = n2.Record() + newrec.Set(enr.IP{100, 100, 100, 100}) + newrec.SetSeq(n2.Seq()) + n2v3 := enode.SignNull(newrec, n2.ID()) + tab.addFoundNode(n2v3, false) + checkBucketContent(t, tab, []*enode.Node{n1, n2v2}) +} + +// This test checks that discv4 nodes can update their own endpoint via PING. +func TestTable_addInboundNodeUpdateV4Accept(t *testing.T) { + tab, db := newTestTable(newPingRecorder(), Config{}) + <-tab.initDone + defer db.Close() + defer tab.close() + + // Add a v4 node. + key, _ := crypto.HexToECDSA("dd3757a8075e88d0f2b1431e7d3c5b1562e1c0aab9643707e8cbfcc8dae5cfe3") + n1 := enode.NewV4(&key.PublicKey, net.IP{88, 77, 66, 1}, 9000, 9000) + tab.addInboundNode(n1) + checkBucketContent(t, tab, []*enode.Node{n1}) + + // Add an updated version with changed IP. + // The update will be accepted because it is inbound. + n1v2 := enode.NewV4(&key.PublicKey, net.IP{99, 99, 99, 99}, 9000, 9000) + tab.addInboundNode(n1v2) + checkBucketContent(t, tab, []*enode.Node{n1v2}) +} + +// This test checks that discv4 node entries will NOT be updated when a +// changed record is found. +func TestTable_addFoundNodeV4UpdateReject(t *testing.T) { + tab, db := newTestTable(newPingRecorder(), Config{}) + <-tab.initDone + defer db.Close() + defer tab.close() - // Check that bucket content is unchanged. - if !reflect.DeepEqual(tab.bucket(n1.ID()).entries, bcontent) { - t.Fatalf("wrong bucket content after update: %v", tab.bucket(n1.ID()).entries) + // Add a v4 node. + key, _ := crypto.HexToECDSA("dd3757a8075e88d0f2b1431e7d3c5b1562e1c0aab9643707e8cbfcc8dae5cfe3") + n1 := enode.NewV4(&key.PublicKey, net.IP{88, 77, 66, 1}, 9000, 9000) + tab.addFoundNode(n1, false) + checkBucketContent(t, tab, []*enode.Node{n1}) + + // Add an updated version with changed IP. + // The update won't be accepted because it isn't inbound. + n1v2 := enode.NewV4(&key.PublicKey, net.IP{99, 99, 99, 99}, 9000, 9000) + tab.addFoundNode(n1v2, false) + checkBucketContent(t, tab, []*enode.Node{n1}) +} + +func checkBucketContent(t *testing.T, tab *Table, nodes []*enode.Node) { + t.Helper() + + b := tab.bucket(nodes[0].ID()) + if reflect.DeepEqual(unwrapNodes(b.entries), nodes) { + return } + t.Log("wrong bucket content. have nodes:") + for _, n := range b.entries { + t.Logf(" %v (seq=%v, ip=%v)", n.ID(), n.Seq(), n.IP()) + } + t.Log("want nodes:") + for _, n := range nodes { + t.Logf(" %v (seq=%v, ip=%v)", n.ID(), n.Seq(), n.IP()) + } + t.FailNow() + + // Also check IP limits. checkIPLimitInvariant(t, tab) } @@ -337,7 +401,10 @@ func TestTable_addSeenNode(t *testing.T) { // announces a new sequence number, the new record should be pulled. func TestTable_revalidateSyncRecord(t *testing.T) { transport := newPingRecorder() - tab, db := newTestTable(transport) + tab, db := newTestTable(transport, Config{ + Clock: new(mclock.Simulated), + Log: testlog.Logger(t, log.LevelTrace), + }) <-tab.initDone defer db.Close() defer tab.close() @@ -346,15 +413,19 @@ func TestTable_revalidateSyncRecord(t *testing.T) { var r enr.Record r.Set(enr.IP(net.IP{127, 0, 0, 1})) id := enode.ID{1} - n1 := wrapNode(enode.SignNull(&r, id)) - tab.addSeenNode(n1) + n1 := enode.SignNull(&r, id) + tab.addFoundNode(n1, false) // Update the node record. r.Set(enr.WithEntry("foo", "bar")) n2 := enode.SignNull(&r, id) transport.updateRecord(n2) - tab.doRevalidate(make(chan struct{}, 1)) + // Wait for revalidation. We wait for the node to be revalidated two times + // in order to synchronize with the update in the table. + waitForRevalidationPing(t, transport, tab, n2.ID()) + waitForRevalidationPing(t, transport, tab, n2.ID()) + intable := tab.getNode(id) if !reflect.DeepEqual(intable, n2) { t.Fatalf("table contains old record with seq %d, want seq %d", intable.Seq(), n2.Seq()) @@ -366,7 +437,7 @@ func TestNodesPush(t *testing.T) { n1 := nodeAtDistance(target, 255, intIP(1)) n2 := nodeAtDistance(target, 254, intIP(2)) n3 := nodeAtDistance(target, 253, intIP(3)) - perm := [][]*node{ + perm := [][]*enode.Node{ {n3, n2, n1}, {n3, n1, n2}, {n2, n3, n1}, @@ -381,7 +452,7 @@ func TestNodesPush(t *testing.T) { for _, n := range nodes { list.push(n, 3) } - if !slicesEqual(list.entries, perm[0], nodeIDEqual) { + if !slices.EqualFunc(list.entries, perm[0], nodeIDEqual) { t.Fatal("not equal") } } @@ -392,28 +463,16 @@ func TestNodesPush(t *testing.T) { for _, n := range nodes { list.push(n, 2) } - if !slicesEqual(list.entries, perm[0][:2], nodeIDEqual) { + if !slices.EqualFunc(list.entries, perm[0][:2], nodeIDEqual) { t.Fatal("not equal") } } } -func nodeIDEqual(n1, n2 *node) bool { +func nodeIDEqual[N nodeType](n1, n2 N) bool { return n1.ID() == n2.ID() } -func slicesEqual[T any](s1, s2 []T, check func(e1, e2 T) bool) bool { - if len(s1) != len(s2) { - return false - } - for i := range s1 { - if !check(s1[i], s2[i]) { - return false - } - } - return true -} - // gen wraps quick.Value so it's easier to use. // it generates a random value of the given value's type. func gen(typ interface{}, rand *rand.Rand) interface{} { diff --git a/p2p/discover/table_util_test.go b/p2p/discover/table_util_test.go index f5d4d39bdb..997ac37799 100644 --- a/p2p/discover/table_util_test.go +++ b/p2p/discover/table_util_test.go @@ -26,6 +26,8 @@ import ( "net" "slices" "sync" + "sync/atomic" + "time" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/p2p/enode" @@ -40,27 +42,32 @@ func init() { nullNode = enode.SignNull(&r, enode.ID{}) } -func newTestTable(t transport) (*Table, *enode.DB) { - cfg := Config{} +func newTestTable(t transport, cfg Config) (*Table, *enode.DB) { + tab, db := newInactiveTestTable(t, cfg) + go tab.loop() + return tab, db +} + +// newInactiveTestTable creates a Table without running the main loop. +func newInactiveTestTable(t transport, cfg Config) (*Table, *enode.DB) { db, _ := enode.OpenDB("") tab, _ := newTable(t, db, cfg) - go tab.loop() return tab, db } // nodeAtDistance creates a node for which enode.LogDist(base, n.id) == ld. -func nodeAtDistance(base enode.ID, ld int, ip net.IP) *node { +func nodeAtDistance(base enode.ID, ld int, ip net.IP) *enode.Node { var r enr.Record r.Set(enr.IP(ip)) r.Set(enr.UDP(30303)) - return wrapNode(enode.SignNull(&r, idAtDistance(base, ld))) + return enode.SignNull(&r, idAtDistance(base, ld)) } // nodesAtDistance creates n nodes for which enode.LogDist(base, node.ID()) == ld. func nodesAtDistance(base enode.ID, ld int, n int) []*enode.Node { results := make([]*enode.Node, n) for i := range results { - results[i] = unwrapNode(nodeAtDistance(base, ld, intIP(i))) + results[i] = nodeAtDistance(base, ld, intIP(i)) } return results } @@ -98,31 +105,33 @@ func intIP(i int) net.IP { } // fillBucket inserts nodes into the given bucket until it is full. -func fillBucket(tab *Table, n *node) (last *node) { - ld := enode.LogDist(tab.self().ID(), n.ID()) - b := tab.bucket(n.ID()) +func fillBucket(tab *Table, id enode.ID) (last *tableNode) { + ld := enode.LogDist(tab.self().ID(), id) + b := tab.bucket(id) for len(b.entries) < bucketSize { - b.entries = append(b.entries, nodeAtDistance(tab.self().ID(), ld, intIP(ld))) + node := nodeAtDistance(tab.self().ID(), ld, intIP(ld)) + if !tab.addFoundNode(node, false) { + panic("node not added") + } } return b.entries[bucketSize-1] } // fillTable adds nodes the table to the end of their corresponding bucket // if the bucket is not full. The caller must not hold tab.mutex. -func fillTable(tab *Table, nodes []*node, setLive bool) { +func fillTable(tab *Table, nodes []*enode.Node, setLive bool) { for _, n := range nodes { - if setLive { - n.livenessChecks = 1 - } - tab.addSeenNode(n) + tab.addFoundNode(n, setLive) } } type pingRecorder struct { - mu sync.Mutex - dead, pinged map[enode.ID]bool - records map[enode.ID]*enode.Node - n *enode.Node + mu sync.Mutex + cond *sync.Cond + dead map[enode.ID]bool + records map[enode.ID]*enode.Node + pinged []*enode.Node + n *enode.Node } func newPingRecorder() *pingRecorder { @@ -130,12 +139,13 @@ func newPingRecorder() *pingRecorder { r.Set(enr.IP{0, 0, 0, 0}) n := enode.SignNull(&r, enode.ID{}) - return &pingRecorder{ + t := &pingRecorder{ dead: make(map[enode.ID]bool), - pinged: make(map[enode.ID]bool), records: make(map[enode.ID]*enode.Node), n: n, } + t.cond = sync.NewCond(&t.mu) + return t } // updateRecord updates a node record. Future calls to ping and @@ -151,12 +161,40 @@ func (t *pingRecorder) Self() *enode.Node { return nullNode } func (t *pingRecorder) lookupSelf() []*enode.Node { return nil } func (t *pingRecorder) lookupRandom() []*enode.Node { return nil } +func (t *pingRecorder) waitPing(timeout time.Duration) *enode.Node { + t.mu.Lock() + defer t.mu.Unlock() + + // Wake up the loop on timeout. + var timedout atomic.Bool + timer := time.AfterFunc(timeout, func() { + timedout.Store(true) + t.cond.Broadcast() + }) + defer timer.Stop() + + // Wait for a ping. + for { + if timedout.Load() { + return nil + } + if len(t.pinged) > 0 { + n := t.pinged[0] + t.pinged = append(t.pinged[:0], t.pinged[1:]...) + return n + } + t.cond.Wait() + } +} + // ping simulates a ping request. func (t *pingRecorder) ping(n *enode.Node) (seq uint64, err error) { t.mu.Lock() defer t.mu.Unlock() - t.pinged[n.ID()] = true + t.pinged = append(t.pinged, n) + t.cond.Broadcast() + if t.dead[n.ID()] { return 0, errTimeout } @@ -177,7 +215,7 @@ func (t *pingRecorder) RequestENR(n *enode.Node) (*enode.Node, error) { return t.records[n.ID()], nil } -func hasDuplicates(slice []*node) bool { +func hasDuplicates(slice []*enode.Node) bool { seen := make(map[enode.ID]bool, len(slice)) for i, e := range slice { if e == nil { @@ -219,14 +257,14 @@ func nodeEqual(n1 *enode.Node, n2 *enode.Node) bool { return n1.ID() == n2.ID() && n1.IP().Equal(n2.IP()) } -func sortByID(nodes []*enode.Node) { - slices.SortFunc(nodes, func(a, b *enode.Node) int { +func sortByID[N nodeType](nodes []N) { + slices.SortFunc(nodes, func(a, b N) int { return bytes.Compare(a.ID().Bytes(), b.ID().Bytes()) }) } -func sortedByDistanceTo(distbase enode.ID, slice []*node) bool { - return slices.IsSortedFunc(slice, func(a, b *node) int { +func sortedByDistanceTo(distbase enode.ID, slice []*enode.Node) bool { + return slices.IsSortedFunc(slice, func(a, b *enode.Node) int { return enode.DistCmp(distbase, a.ID(), b.ID()) }) } @@ -256,3 +294,57 @@ func hexEncPubkey(h string) (ret encPubkey) { copy(ret[:], b) return ret } + +type nodeEventRecorder struct { + evc chan recordedNodeEvent +} + +type recordedNodeEvent struct { + node *tableNode + added bool +} + +func newNodeEventRecorder(buffer int) *nodeEventRecorder { + return &nodeEventRecorder{ + evc: make(chan recordedNodeEvent, buffer), + } +} + +func (set *nodeEventRecorder) nodeAdded(b *bucket, n *tableNode) { + select { + case set.evc <- recordedNodeEvent{n, true}: + default: + panic("no space in event buffer") + } +} + +func (set *nodeEventRecorder) nodeRemoved(b *bucket, n *tableNode) { + select { + case set.evc <- recordedNodeEvent{n, false}: + default: + panic("no space in event buffer") + } +} + +func (set *nodeEventRecorder) waitNodePresent(id enode.ID, timeout time.Duration) bool { + return set.waitNodeEvent(id, timeout, true) +} + +func (set *nodeEventRecorder) waitNodeAbsent(id enode.ID, timeout time.Duration) bool { + return set.waitNodeEvent(id, timeout, false) +} + +func (set *nodeEventRecorder) waitNodeEvent(id enode.ID, timeout time.Duration, added bool) bool { + timer := time.NewTimer(timeout) + defer timer.Stop() + for { + select { + case ev := <-set.evc: + if ev.node.ID() == id && ev.added == added { + return true + } + case <-timer.C: + return false + } + } +} diff --git a/p2p/discover/v4_lookup_test.go b/p2p/discover/v4_lookup_test.go index 5682f262be..bc9475a8b3 100644 --- a/p2p/discover/v4_lookup_test.go +++ b/p2p/discover/v4_lookup_test.go @@ -19,7 +19,7 @@ package discover import ( "crypto/ecdsa" "fmt" - "net" + "net/netip" "slices" "testing" @@ -40,7 +40,7 @@ func TestUDPv4_Lookup(t *testing.T) { } // Seed table with initial node. - fillTable(test.table, []*node{wrapNode(lookupTestnet.node(256, 0))}, true) + fillTable(test.table, []*enode.Node{lookupTestnet.node(256, 0)}, true) // Start the lookup. resultC := make(chan []*enode.Node, 1) @@ -70,9 +70,9 @@ func TestUDPv4_LookupIterator(t *testing.T) { defer test.close() // Seed table with initial nodes. - bootnodes := make([]*node, len(lookupTestnet.dists[256])) + bootnodes := make([]*enode.Node, len(lookupTestnet.dists[256])) for i := range lookupTestnet.dists[256] { - bootnodes[i] = wrapNode(lookupTestnet.node(256, i)) + bootnodes[i] = lookupTestnet.node(256, i) } fillTable(test.table, bootnodes, true) go serveTestnet(test, lookupTestnet) @@ -105,9 +105,9 @@ func TestUDPv4_LookupIteratorClose(t *testing.T) { defer test.close() // Seed table with initial nodes. - bootnodes := make([]*node, len(lookupTestnet.dists[256])) + bootnodes := make([]*enode.Node, len(lookupTestnet.dists[256])) for i := range lookupTestnet.dists[256] { - bootnodes[i] = wrapNode(lookupTestnet.node(256, i)) + bootnodes[i] = lookupTestnet.node(256, i) } fillTable(test.table, bootnodes, true) go serveTestnet(test, lookupTestnet) @@ -136,7 +136,7 @@ func TestUDPv4_LookupIteratorClose(t *testing.T) { func serveTestnet(test *udpTest, testnet *preminedTestnet) { for done := false; !done; { - done = test.waitPacketOut(func(p v4wire.Packet, to *net.UDPAddr, hash []byte) { + done = test.waitPacketOut(func(p v4wire.Packet, to netip.AddrPort, hash []byte) { n, key := testnet.nodeByAddr(to) switch p.(type) { case *v4wire.Ping: @@ -158,10 +158,10 @@ func checkLookupResults(t *testing.T, tn *preminedTestnet, results []*enode.Node for _, e := range results { t.Logf(" ld=%d, %x", enode.LogDist(tn.target.id(), e.ID()), e.ID().Bytes()) } - if hasDuplicates(wrapNodes(results)) { + if hasDuplicates(results) { t.Errorf("result set contains duplicate entries") } - if !sortedByDistanceTo(tn.target.id(), wrapNodes(results)) { + if !sortedByDistanceTo(tn.target.id(), results) { t.Errorf("result set not sorted by distance to target") } wantNodes := tn.closest(len(results)) @@ -264,9 +264,10 @@ func (tn *preminedTestnet) node(dist, index int) *enode.Node { return n } -func (tn *preminedTestnet) nodeByAddr(addr *net.UDPAddr) (*enode.Node, *ecdsa.PrivateKey) { - dist := int(addr.IP[1])<<8 + int(addr.IP[2]) - index := int(addr.IP[3]) +func (tn *preminedTestnet) nodeByAddr(addr netip.AddrPort) (*enode.Node, *ecdsa.PrivateKey) { + ip := addr.Addr().As4() + dist := int(ip[1])<<8 + int(ip[2]) + index := int(ip[3]) key := tn.dists[dist][index] return tn.node(dist, index), key } @@ -274,7 +275,7 @@ func (tn *preminedTestnet) nodeByAddr(addr *net.UDPAddr) (*enode.Node, *ecdsa.Pr func (tn *preminedTestnet) nodesAtDistance(dist int) []v4wire.Node { result := make([]v4wire.Node, len(tn.dists[dist])) for i := range result { - result[i] = nodeToRPC(wrapNode(tn.node(dist, i))) + result[i] = nodeToRPC(tn.node(dist, i)) } return result } diff --git a/p2p/discover/v4_udp.go b/p2p/discover/v4_udp.go index 7a0a0f1c77..3880ca34a7 100644 --- a/p2p/discover/v4_udp.go +++ b/p2p/discover/v4_udp.go @@ -26,6 +26,7 @@ import ( "fmt" "io" "net" + "net/netip" "sync" "time" @@ -45,6 +46,7 @@ var ( errClockWarp = errors.New("reply deadline too far in the future") errClosed = errors.New("socket closed") errLowPort = errors.New("low port") + errNoUDPEndpoint = errors.New("node has no UDP endpoint") ) const ( @@ -93,7 +95,7 @@ type UDPv4 struct { type replyMatcher struct { // these fields must match in the reply. from enode.ID - ip net.IP + ip netip.Addr ptype byte // time when the request must complete @@ -119,7 +121,7 @@ type replyMatchFunc func(v4wire.Packet) (matched bool, requestDone bool) // reply is a reply packet from a certain node. type reply struct { from enode.ID - ip net.IP + ip netip.Addr data v4wire.Packet // loop indicates whether there was // a matching request by sending on this channel. @@ -142,7 +144,7 @@ func ListenV4(c UDPConn, ln *enode.LocalNode, cfg Config) (*UDPv4, error) { log: cfg.Log, } - tab, err := newMeteredTable(t, ln.Database(), cfg) + tab, err := newTable(t, ln.Database(), cfg) if err != nil { return nil, err } @@ -201,9 +203,12 @@ func (t *UDPv4) Resolve(n *enode.Node) *enode.Node { } func (t *UDPv4) ourEndpoint() v4wire.Endpoint { - n := t.Self() - a := &net.UDPAddr{IP: n.IP(), Port: n.UDP()} - return v4wire.NewEndpoint(a, uint16(n.TCP())) + node := t.Self() + addr, ok := node.UDPEndpoint() + if !ok { + return v4wire.Endpoint{} + } + return v4wire.NewEndpoint(addr, uint16(node.TCP())) } // Ping sends a ping message to the given node. @@ -214,7 +219,11 @@ func (t *UDPv4) Ping(n *enode.Node) error { // ping sends a ping message to the given node and waits for a reply. func (t *UDPv4) ping(n *enode.Node) (seq uint64, err error) { - rm := t.sendPing(n.ID(), &net.UDPAddr{IP: n.IP(), Port: n.UDP()}, nil) + addr, ok := n.UDPEndpoint() + if !ok { + return 0, errNoUDPEndpoint + } + rm := t.sendPing(n.ID(), addr, nil) if err = <-rm.errc; err == nil { seq = rm.reply.(*v4wire.Pong).ENRSeq } @@ -223,7 +232,7 @@ func (t *UDPv4) ping(n *enode.Node) (seq uint64, err error) { // sendPing sends a ping message to the given node and invokes the callback // when the reply arrives. -func (t *UDPv4) sendPing(toid enode.ID, toaddr *net.UDPAddr, callback func()) *replyMatcher { +func (t *UDPv4) sendPing(toid enode.ID, toaddr netip.AddrPort, callback func()) *replyMatcher { req := t.makePing(toaddr) packet, hash, err := v4wire.Encode(t.priv, req) if err != nil { @@ -233,7 +242,7 @@ func (t *UDPv4) sendPing(toid enode.ID, toaddr *net.UDPAddr, callback func()) *r } // Add a matcher for the reply to the pending reply queue. Pongs are matched if they // reference the ping we're about to send. - rm := t.pending(toid, toaddr.IP, v4wire.PongPacket, func(p v4wire.Packet) (matched bool, requestDone bool) { + rm := t.pending(toid, toaddr.Addr(), v4wire.PongPacket, func(p v4wire.Packet) (matched bool, requestDone bool) { matched = bytes.Equal(p.(*v4wire.Pong).ReplyTok, hash) if matched && callback != nil { callback() @@ -241,12 +250,13 @@ func (t *UDPv4) sendPing(toid enode.ID, toaddr *net.UDPAddr, callback func()) *r return matched, matched }) // Send the packet. - t.localNode.UDPContact(toaddr) + toUDPAddr := &net.UDPAddr{IP: toaddr.Addr().AsSlice()} + t.localNode.UDPContact(toUDPAddr) t.write(toaddr, toid, req.Name(), packet) return rm } -func (t *UDPv4) makePing(toaddr *net.UDPAddr) *v4wire.Ping { +func (t *UDPv4) makePing(toaddr netip.AddrPort) *v4wire.Ping { return &v4wire.Ping{ Version: 4, From: t.ourEndpoint(), @@ -290,35 +300,39 @@ func (t *UDPv4) newRandomLookup(ctx context.Context) *lookup { func (t *UDPv4) newLookup(ctx context.Context, targetKey encPubkey) *lookup { target := enode.ID(crypto.Keccak256Hash(targetKey[:])) ekey := v4wire.Pubkey(targetKey) - it := newLookup(ctx, t.tab, target, func(n *node) ([]*node, error) { - return t.findnode(n.ID(), n.addr(), ekey) + it := newLookup(ctx, t.tab, target, func(n *enode.Node) ([]*enode.Node, error) { + addr, ok := n.UDPEndpoint() + if !ok { + return nil, errNoUDPEndpoint + } + return t.findnode(n.ID(), addr, ekey) }) return it } // findnode sends a findnode request to the given node and waits until // the node has sent up to k neighbors. -func (t *UDPv4) findnode(toid enode.ID, toaddr *net.UDPAddr, target v4wire.Pubkey) ([]*node, error) { - t.ensureBond(toid, toaddr) +func (t *UDPv4) findnode(toid enode.ID, toAddrPort netip.AddrPort, target v4wire.Pubkey) ([]*enode.Node, error) { + t.ensureBond(toid, toAddrPort) // Add a matcher for 'neighbours' replies to the pending reply queue. The matcher is // active until enough nodes have been received. - nodes := make([]*node, 0, bucketSize) + nodes := make([]*enode.Node, 0, bucketSize) nreceived := 0 - rm := t.pending(toid, toaddr.IP, v4wire.NeighborsPacket, func(r v4wire.Packet) (matched bool, requestDone bool) { + rm := t.pending(toid, toAddrPort.Addr(), v4wire.NeighborsPacket, func(r v4wire.Packet) (matched bool, requestDone bool) { reply := r.(*v4wire.Neighbors) for _, rn := range reply.Nodes { nreceived++ - n, err := t.nodeFromRPC(toaddr, rn) + n, err := t.nodeFromRPC(toAddrPort, rn) if err != nil { - t.log.Trace("Invalid neighbor node received", "ip", rn.IP, "addr", toaddr, "err", err) + t.log.Trace("Invalid neighbor node received", "ip", rn.IP, "addr", toAddrPort, "err", err) continue } nodes = append(nodes, n) } return true, nreceived >= bucketSize }) - t.send(toaddr, toid, &v4wire.Findnode{ + t.send(toAddrPort, toid, &v4wire.Findnode{ Target: target, Expiration: uint64(time.Now().Add(expiration).Unix()), }) @@ -336,7 +350,7 @@ func (t *UDPv4) findnode(toid enode.ID, toaddr *net.UDPAddr, target v4wire.Pubke // RequestENR sends ENRRequest to the given node and waits for a response. func (t *UDPv4) RequestENR(n *enode.Node) (*enode.Node, error) { - addr := &net.UDPAddr{IP: n.IP(), Port: n.UDP()} + addr, _ := n.UDPEndpoint() t.ensureBond(n.ID(), addr) req := &v4wire.ENRRequest{ @@ -349,7 +363,7 @@ func (t *UDPv4) RequestENR(n *enode.Node) (*enode.Node, error) { // Add a matcher for the reply to the pending reply queue. Responses are matched if // they reference the request we're about to send. - rm := t.pending(n.ID(), addr.IP, v4wire.ENRResponsePacket, func(r v4wire.Packet) (matched bool, requestDone bool) { + rm := t.pending(n.ID(), addr.Addr(), v4wire.ENRResponsePacket, func(r v4wire.Packet) (matched bool, requestDone bool) { matched = bytes.Equal(r.(*v4wire.ENRResponse).ReplyTok, hash) return matched, matched }) @@ -369,15 +383,19 @@ func (t *UDPv4) RequestENR(n *enode.Node) (*enode.Node, error) { if respN.Seq() < n.Seq() { return n, nil // response record is older } - if err := netutil.CheckRelayIP(addr.IP, respN.IP()); err != nil { + if err := netutil.CheckRelayIP(addr.Addr().AsSlice(), respN.IP()); err != nil { return nil, fmt.Errorf("invalid IP in response record: %v", err) } return respN, nil } +func (t *UDPv4) TableBuckets() [][]BucketNode { + return t.tab.Nodes() +} + // pending adds a reply matcher to the pending reply queue. // see the documentation of type replyMatcher for a detailed explanation. -func (t *UDPv4) pending(id enode.ID, ip net.IP, ptype byte, callback replyMatchFunc) *replyMatcher { +func (t *UDPv4) pending(id enode.ID, ip netip.Addr, ptype byte, callback replyMatchFunc) *replyMatcher { ch := make(chan error, 1) p := &replyMatcher{from: id, ip: ip, ptype: ptype, callback: callback, errc: ch} select { @@ -391,7 +409,7 @@ func (t *UDPv4) pending(id enode.ID, ip net.IP, ptype byte, callback replyMatchF // handleReply dispatches a reply packet, invoking reply matchers. It returns // whether any matcher considered the packet acceptable. -func (t *UDPv4) handleReply(from enode.ID, fromIP net.IP, req v4wire.Packet) bool { +func (t *UDPv4) handleReply(from enode.ID, fromIP netip.Addr, req v4wire.Packet) bool { matched := make(chan bool, 1) select { case t.gotreply <- reply{from, fromIP, req, matched}: @@ -457,7 +475,7 @@ func (t *UDPv4) loop() { var matched bool // whether any replyMatcher considered the reply acceptable. for el := plist.Front(); el != nil; el = el.Next() { p := el.Value.(*replyMatcher) - if p.from == r.from && p.ptype == r.data.Kind() && p.ip.Equal(r.ip) { + if p.from == r.from && p.ptype == r.data.Kind() && p.ip == r.ip { ok, requestDone := p.callback(r.data) matched = matched || ok p.reply = r.data @@ -496,7 +514,7 @@ func (t *UDPv4) loop() { } } -func (t *UDPv4) send(toaddr *net.UDPAddr, toid enode.ID, req v4wire.Packet) ([]byte, error) { +func (t *UDPv4) send(toaddr netip.AddrPort, toid enode.ID, req v4wire.Packet) ([]byte, error) { packet, hash, err := v4wire.Encode(t.priv, req) if err != nil { return hash, err @@ -504,8 +522,8 @@ func (t *UDPv4) send(toaddr *net.UDPAddr, toid enode.ID, req v4wire.Packet) ([]b return hash, t.write(toaddr, toid, req.Name(), packet) } -func (t *UDPv4) write(toaddr *net.UDPAddr, toid enode.ID, what string, packet []byte) error { - _, err := t.conn.WriteToUDP(packet, toaddr) +func (t *UDPv4) write(toaddr netip.AddrPort, toid enode.ID, what string, packet []byte) error { + _, err := t.conn.WriteToUDPAddrPort(packet, toaddr) t.log.Trace(">> "+what, "id", toid, "addr", toaddr, "err", err) return err } @@ -519,7 +537,7 @@ func (t *UDPv4) readLoop(unhandled chan<- ReadPacket) { buf := make([]byte, maxPacketSize) for { - nbytes, from, err := t.conn.ReadFromUDP(buf) + nbytes, from, err := t.conn.ReadFromUDPAddrPort(buf) if netutil.IsTemporaryError(err) { // Ignore temporary read errors. t.log.Debug("Temporary UDP read error", "err", err) @@ -540,7 +558,7 @@ func (t *UDPv4) readLoop(unhandled chan<- ReadPacket) { } } -func (t *UDPv4) handlePacket(from *net.UDPAddr, buf []byte) error { +func (t *UDPv4) handlePacket(from netip.AddrPort, buf []byte) error { rawpacket, fromKey, hash, err := v4wire.Decode(buf) if err != nil { t.log.Debug("Bad discv4 packet", "addr", from, "err", err) @@ -559,15 +577,16 @@ func (t *UDPv4) handlePacket(from *net.UDPAddr, buf []byte) error { } // checkBond checks if the given node has a recent enough endpoint proof. -func (t *UDPv4) checkBond(id enode.ID, ip net.IP) bool { - return time.Since(t.db.LastPongReceived(id, ip)) < bondExpiration +func (t *UDPv4) checkBond(id enode.ID, ip netip.AddrPort) bool { + return time.Since(t.db.LastPongReceived(id, ip.Addr().AsSlice())) < bondExpiration } // ensureBond solicits a ping from a node if we haven't seen a ping from it for a while. // This ensures there is a valid endpoint proof on the remote end. -func (t *UDPv4) ensureBond(toid enode.ID, toaddr *net.UDPAddr) { - tooOld := time.Since(t.db.LastPingReceived(toid, toaddr.IP)) > bondExpiration - if tooOld || t.db.FindFails(toid, toaddr.IP) > maxFindnodeFailures { +func (t *UDPv4) ensureBond(toid enode.ID, toaddr netip.AddrPort) { + ip := toaddr.Addr().AsSlice() + tooOld := time.Since(t.db.LastPingReceived(toid, ip)) > bondExpiration + if tooOld || t.db.FindFails(toid, ip) > maxFindnodeFailures { rm := t.sendPing(toid, toaddr, nil) <-rm.errc // Wait for them to ping back and process our pong. @@ -575,11 +594,11 @@ func (t *UDPv4) ensureBond(toid enode.ID, toaddr *net.UDPAddr) { } } -func (t *UDPv4) nodeFromRPC(sender *net.UDPAddr, rn v4wire.Node) (*node, error) { +func (t *UDPv4) nodeFromRPC(sender netip.AddrPort, rn v4wire.Node) (*enode.Node, error) { if rn.UDP <= 1024 { return nil, errLowPort } - if err := netutil.CheckRelayIP(sender.IP, rn.IP); err != nil { + if err := netutil.CheckRelayIP(sender.Addr().AsSlice(), rn.IP); err != nil { return nil, err } if t.netrestrict != nil && !t.netrestrict.Contains(rn.IP) { @@ -589,12 +608,12 @@ func (t *UDPv4) nodeFromRPC(sender *net.UDPAddr, rn v4wire.Node) (*node, error) if err != nil { return nil, err } - n := wrapNode(enode.NewV4(key, rn.IP, int(rn.TCP), int(rn.UDP))) + n := enode.NewV4(key, rn.IP, int(rn.TCP), int(rn.UDP)) err = n.ValidateComplete() return n, err } -func nodeToRPC(n *node) v4wire.Node { +func nodeToRPC(n *enode.Node) v4wire.Node { var key ecdsa.PublicKey var ekey v4wire.Pubkey if err := n.Load((*enode.Secp256k1)(&key)); err == nil { @@ -633,14 +652,14 @@ type packetHandlerV4 struct { senderKey *ecdsa.PublicKey // used for ping // preverify checks whether the packet is valid and should be handled at all. - preverify func(p *packetHandlerV4, from *net.UDPAddr, fromID enode.ID, fromKey v4wire.Pubkey) error + preverify func(p *packetHandlerV4, from netip.AddrPort, fromID enode.ID, fromKey v4wire.Pubkey) error // handle handles the packet. - handle func(req *packetHandlerV4, from *net.UDPAddr, fromID enode.ID, mac []byte) + handle func(req *packetHandlerV4, from netip.AddrPort, fromID enode.ID, mac []byte) } // PING/v4 -func (t *UDPv4) verifyPing(h *packetHandlerV4, from *net.UDPAddr, fromID enode.ID, fromKey v4wire.Pubkey) error { +func (t *UDPv4) verifyPing(h *packetHandlerV4, from netip.AddrPort, fromID enode.ID, fromKey v4wire.Pubkey) error { req := h.Packet.(*v4wire.Ping) if v4wire.Expired(req.Expiration) { @@ -654,7 +673,7 @@ func (t *UDPv4) verifyPing(h *packetHandlerV4, from *net.UDPAddr, fromID enode.I return nil } -func (t *UDPv4) handlePing(h *packetHandlerV4, from *net.UDPAddr, fromID enode.ID, mac []byte) { +func (t *UDPv4) handlePing(h *packetHandlerV4, from netip.AddrPort, fromID enode.ID, mac []byte) { req := h.Packet.(*v4wire.Ping) // Reply. @@ -666,45 +685,51 @@ func (t *UDPv4) handlePing(h *packetHandlerV4, from *net.UDPAddr, fromID enode.I }) // Ping back if our last pong on file is too far in the past. - n := wrapNode(enode.NewV4(h.senderKey, from.IP, int(req.From.TCP), from.Port)) - if time.Since(t.db.LastPongReceived(n.ID(), from.IP)) > bondExpiration { + fromIP := from.Addr().AsSlice() + n := enode.NewV4(h.senderKey, fromIP, int(req.From.TCP), int(from.Port())) + if time.Since(t.db.LastPongReceived(n.ID(), fromIP)) > bondExpiration { t.sendPing(fromID, from, func() { - t.tab.addVerifiedNode(n) + t.tab.addInboundNode(n) }) } else { - t.tab.addVerifiedNode(n) + t.tab.addInboundNode(n) } // Update node database and endpoint predictor. - t.db.UpdateLastPingReceived(n.ID(), from.IP, time.Now()) - t.localNode.UDPEndpointStatement(from, &net.UDPAddr{IP: req.To.IP, Port: int(req.To.UDP)}) + t.db.UpdateLastPingReceived(n.ID(), fromIP, time.Now()) + fromUDPAddr := &net.UDPAddr{IP: fromIP, Port: int(from.Port())} + toUDPAddr := &net.UDPAddr{IP: req.To.IP, Port: int(req.To.UDP)} + t.localNode.UDPEndpointStatement(fromUDPAddr, toUDPAddr) } // PONG/v4 -func (t *UDPv4) verifyPong(h *packetHandlerV4, from *net.UDPAddr, fromID enode.ID, fromKey v4wire.Pubkey) error { +func (t *UDPv4) verifyPong(h *packetHandlerV4, from netip.AddrPort, fromID enode.ID, fromKey v4wire.Pubkey) error { req := h.Packet.(*v4wire.Pong) if v4wire.Expired(req.Expiration) { return errExpired } - if !t.handleReply(fromID, from.IP, req) { + if !t.handleReply(fromID, from.Addr(), req) { return errUnsolicitedReply } - t.localNode.UDPEndpointStatement(from, &net.UDPAddr{IP: req.To.IP, Port: int(req.To.UDP)}) - t.db.UpdateLastPongReceived(fromID, from.IP, time.Now()) + fromIP := from.Addr().AsSlice() + fromUDPAddr := &net.UDPAddr{IP: fromIP, Port: int(from.Port())} + toUDPAddr := &net.UDPAddr{IP: req.To.IP, Port: int(req.To.UDP)} + t.localNode.UDPEndpointStatement(fromUDPAddr, toUDPAddr) + t.db.UpdateLastPongReceived(fromID, fromIP, time.Now()) return nil } // FINDNODE/v4 -func (t *UDPv4) verifyFindnode(h *packetHandlerV4, from *net.UDPAddr, fromID enode.ID, fromKey v4wire.Pubkey) error { +func (t *UDPv4) verifyFindnode(h *packetHandlerV4, from netip.AddrPort, fromID enode.ID, fromKey v4wire.Pubkey) error { req := h.Packet.(*v4wire.Findnode) if v4wire.Expired(req.Expiration) { return errExpired } - if !t.checkBond(fromID, from.IP) { + if !t.checkBond(fromID, from) { // No endpoint proof pong exists, we don't process the packet. This prevents an // attack vector where the discovery protocol could be used to amplify traffic in a // DDOS attack. A malicious actor would send a findnode request with the IP address @@ -716,7 +741,7 @@ func (t *UDPv4) verifyFindnode(h *packetHandlerV4, from *net.UDPAddr, fromID eno return nil } -func (t *UDPv4) handleFindnode(h *packetHandlerV4, from *net.UDPAddr, fromID enode.ID, mac []byte) { +func (t *UDPv4) handleFindnode(h *packetHandlerV4, from netip.AddrPort, fromID enode.ID, mac []byte) { req := h.Packet.(*v4wire.Findnode) // Determine closest nodes. @@ -728,7 +753,8 @@ func (t *UDPv4) handleFindnode(h *packetHandlerV4, from *net.UDPAddr, fromID eno p := v4wire.Neighbors{Expiration: uint64(time.Now().Add(expiration).Unix())} var sent bool for _, n := range closest { - if netutil.CheckRelayIP(from.IP, n.IP()) == nil { + fromIP := from.Addr().AsSlice() + if netutil.CheckRelayIP(fromIP, n.IP()) == nil { p.Nodes = append(p.Nodes, nodeToRPC(n)) } if len(p.Nodes) == v4wire.MaxNeighbors { @@ -744,13 +770,13 @@ func (t *UDPv4) handleFindnode(h *packetHandlerV4, from *net.UDPAddr, fromID eno // NEIGHBORS/v4 -func (t *UDPv4) verifyNeighbors(h *packetHandlerV4, from *net.UDPAddr, fromID enode.ID, fromKey v4wire.Pubkey) error { +func (t *UDPv4) verifyNeighbors(h *packetHandlerV4, from netip.AddrPort, fromID enode.ID, fromKey v4wire.Pubkey) error { req := h.Packet.(*v4wire.Neighbors) if v4wire.Expired(req.Expiration) { return errExpired } - if !t.handleReply(fromID, from.IP, h.Packet) { + if !t.handleReply(fromID, from.Addr(), h.Packet) { return errUnsolicitedReply } return nil @@ -758,19 +784,19 @@ func (t *UDPv4) verifyNeighbors(h *packetHandlerV4, from *net.UDPAddr, fromID en // ENRREQUEST/v4 -func (t *UDPv4) verifyENRRequest(h *packetHandlerV4, from *net.UDPAddr, fromID enode.ID, fromKey v4wire.Pubkey) error { +func (t *UDPv4) verifyENRRequest(h *packetHandlerV4, from netip.AddrPort, fromID enode.ID, fromKey v4wire.Pubkey) error { req := h.Packet.(*v4wire.ENRRequest) if v4wire.Expired(req.Expiration) { return errExpired } - if !t.checkBond(fromID, from.IP) { + if !t.checkBond(fromID, from) { return errUnknownNode } return nil } -func (t *UDPv4) handleENRRequest(h *packetHandlerV4, from *net.UDPAddr, fromID enode.ID, mac []byte) { +func (t *UDPv4) handleENRRequest(h *packetHandlerV4, from netip.AddrPort, fromID enode.ID, mac []byte) { t.send(from, fromID, &v4wire.ENRResponse{ ReplyTok: mac, Record: *t.localNode.Node().Record(), @@ -779,8 +805,8 @@ func (t *UDPv4) handleENRRequest(h *packetHandlerV4, from *net.UDPAddr, fromID e // ENRRESPONSE/v4 -func (t *UDPv4) verifyENRResponse(h *packetHandlerV4, from *net.UDPAddr, fromID enode.ID, fromKey v4wire.Pubkey) error { - if !t.handleReply(fromID, from.IP, h.Packet) { +func (t *UDPv4) verifyENRResponse(h *packetHandlerV4, from netip.AddrPort, fromID enode.ID, fromKey v4wire.Pubkey) error { + if !t.handleReply(fromID, from.Addr(), h.Packet) { return errUnsolicitedReply } return nil diff --git a/p2p/discover/v4_udp_test.go b/p2p/discover/v4_udp_test.go index 9b80214f75..28a6fb8675 100644 --- a/p2p/discover/v4_udp_test.go +++ b/p2p/discover/v4_udp_test.go @@ -26,6 +26,7 @@ import ( "io" "math/rand" "net" + "net/netip" "reflect" "sync" "testing" @@ -55,7 +56,7 @@ type udpTest struct { udp *UDPv4 sent [][]byte localkey, remotekey *ecdsa.PrivateKey - remoteaddr *net.UDPAddr + remoteaddr netip.AddrPort } func newUDPTest(t *testing.T) *udpTest { @@ -64,7 +65,7 @@ func newUDPTest(t *testing.T) *udpTest { pipe: newpipe(), localkey: newkey(), remotekey: newkey(), - remoteaddr: &net.UDPAddr{IP: net.IP{10, 0, 1, 99}, Port: 30303}, + remoteaddr: netip.MustParseAddrPort("10.0.1.99:30303"), } test.db, _ = enode.OpenDB("") @@ -92,7 +93,7 @@ func (test *udpTest) packetIn(wantError error, data v4wire.Packet) { } // handles a packet as if it had been sent to the transport by the key/endpoint. -func (test *udpTest) packetInFrom(wantError error, key *ecdsa.PrivateKey, addr *net.UDPAddr, data v4wire.Packet) { +func (test *udpTest) packetInFrom(wantError error, key *ecdsa.PrivateKey, addr netip.AddrPort, data v4wire.Packet) { test.t.Helper() enc, _, err := v4wire.Encode(key, data) @@ -106,7 +107,7 @@ func (test *udpTest) packetInFrom(wantError error, key *ecdsa.PrivateKey, addr * } // waits for a packet to be sent by the transport. -// validate should have type func(X, *net.UDPAddr, []byte), where X is a packet type. +// validate should have type func(X, netip.AddrPort, []byte), where X is a packet type. func (test *udpTest) waitPacketOut(validate interface{}) (closed bool) { test.t.Helper() @@ -128,7 +129,7 @@ func (test *udpTest) waitPacketOut(validate interface{}) (closed bool) { test.t.Errorf("sent packet type mismatch, got: %v, want: %v", reflect.TypeOf(p), exptype) return false } - fn.Call([]reflect.Value{reflect.ValueOf(p), reflect.ValueOf(&dgram.to), reflect.ValueOf(hash)}) + fn.Call([]reflect.Value{reflect.ValueOf(p), reflect.ValueOf(dgram.to), reflect.ValueOf(hash)}) return false } @@ -236,7 +237,7 @@ func TestUDPv4_findnodeTimeout(t *testing.T) { test := newUDPTest(t) defer test.close() - toaddr := &net.UDPAddr{IP: net.ParseIP("1.2.3.4"), Port: 2222} + toaddr := netip.AddrPortFrom(netip.MustParseAddr("1.2.3.4"), 2222) toid := enode.ID{1, 2, 3, 4} target := v4wire.Pubkey{4, 5, 6, 7} result, err := test.udp.findnode(toid, toaddr, target) @@ -261,26 +262,25 @@ func TestUDPv4_findnode(t *testing.T) { for i := 0; i < numCandidates; i++ { key := newkey() ip := net.IP{10, 13, 0, byte(i)} - n := wrapNode(enode.NewV4(&key.PublicKey, ip, 0, 2000)) + n := enode.NewV4(&key.PublicKey, ip, 0, 2000) // Ensure half of table content isn't verified live yet. if i > numCandidates/2 { - n.livenessChecks = 1 live[n.ID()] = true } + test.table.addFoundNode(n, live[n.ID()]) nodes.push(n, numCandidates) } - fillTable(test.table, nodes.entries, false) // ensure there's a bond with the test node, // findnode won't be accepted otherwise. remoteID := v4wire.EncodePubkey(&test.remotekey.PublicKey).ID() - test.table.db.UpdateLastPongReceived(remoteID, test.remoteaddr.IP, time.Now()) + test.table.db.UpdateLastPongReceived(remoteID, test.remoteaddr.Addr().AsSlice(), time.Now()) // check that closest neighbors are returned. expected := test.table.findnodeByID(testTarget.ID(), bucketSize, true) test.packetIn(nil, &v4wire.Findnode{Target: testTarget, Expiration: futureExp}) - waitNeighbors := func(want []*node) { - test.waitPacketOut(func(p *v4wire.Neighbors, to *net.UDPAddr, hash []byte) { + waitNeighbors := func(want []*enode.Node) { + test.waitPacketOut(func(p *v4wire.Neighbors, to netip.AddrPort, hash []byte) { if len(p.Nodes) != len(want) { t.Errorf("wrong number of results: got %d, want %d", len(p.Nodes), len(want)) return @@ -309,10 +309,10 @@ func TestUDPv4_findnodeMultiReply(t *testing.T) { defer test.close() rid := enode.PubkeyToIDV4(&test.remotekey.PublicKey) - test.table.db.UpdateLastPingReceived(rid, test.remoteaddr.IP, time.Now()) + test.table.db.UpdateLastPingReceived(rid, test.remoteaddr.Addr().AsSlice(), time.Now()) // queue a pending findnode request - resultc, errc := make(chan []*node, 1), make(chan error, 1) + resultc, errc := make(chan []*enode.Node, 1), make(chan error, 1) go func() { rid := encodePubkey(&test.remotekey.PublicKey).id() ns, err := test.udp.findnode(rid, test.remoteaddr, testTarget) @@ -325,18 +325,18 @@ func TestUDPv4_findnodeMultiReply(t *testing.T) { // wait for the findnode to be sent. // after it is sent, the transport is waiting for a reply - test.waitPacketOut(func(p *v4wire.Findnode, to *net.UDPAddr, hash []byte) { + test.waitPacketOut(func(p *v4wire.Findnode, to netip.AddrPort, hash []byte) { if p.Target != testTarget { t.Errorf("wrong target: got %v, want %v", p.Target, testTarget) } }) // send the reply as two packets. - list := []*node{ - wrapNode(enode.MustParse("enode://ba85011c70bcc5c04d8607d3a0ed29aa6179c092cbdda10d5d32684fb33ed01bd94f588ca8f91ac48318087dcb02eaf36773a7a453f0eedd6742af668097b29c@10.0.1.16:30303?discport=30304")), - wrapNode(enode.MustParse("enode://81fa361d25f157cd421c60dcc28d8dac5ef6a89476633339c5df30287474520caca09627da18543d9079b5b288698b542d56167aa5c09111e55acdbbdf2ef799@10.0.1.16:30303")), - wrapNode(enode.MustParse("enode://9bffefd833d53fac8e652415f4973bee289e8b1a5c6c4cbe70abf817ce8a64cee11b823b66a987f51aaa9fba0d6a91b3e6bf0d5a5d1042de8e9eeea057b217f8@10.0.1.36:30301?discport=17")), - wrapNode(enode.MustParse("enode://1b5b4aa662d7cb44a7221bfba67302590b643028197a7d5214790f3bac7aaa4a3241be9e83c09cf1f6c69d007c634faae3dc1b1221793e8446c0b3a09de65960@10.0.1.16:30303")), + list := []*enode.Node{ + enode.MustParse("enode://ba85011c70bcc5c04d8607d3a0ed29aa6179c092cbdda10d5d32684fb33ed01bd94f588ca8f91ac48318087dcb02eaf36773a7a453f0eedd6742af668097b29c@10.0.1.16:30303?discport=30304"), + enode.MustParse("enode://81fa361d25f157cd421c60dcc28d8dac5ef6a89476633339c5df30287474520caca09627da18543d9079b5b288698b542d56167aa5c09111e55acdbbdf2ef799@10.0.1.16:30303"), + enode.MustParse("enode://9bffefd833d53fac8e652415f4973bee289e8b1a5c6c4cbe70abf817ce8a64cee11b823b66a987f51aaa9fba0d6a91b3e6bf0d5a5d1042de8e9eeea057b217f8@10.0.1.36:30301?discport=17"), + enode.MustParse("enode://1b5b4aa662d7cb44a7221bfba67302590b643028197a7d5214790f3bac7aaa4a3241be9e83c09cf1f6c69d007c634faae3dc1b1221793e8446c0b3a09de65960@10.0.1.16:30303"), } rpclist := make([]v4wire.Node, len(list)) for i := range list { @@ -368,8 +368,8 @@ func TestUDPv4_pingMatch(t *testing.T) { crand.Read(randToken) test.packetIn(nil, &v4wire.Ping{From: testRemote, To: testLocalAnnounced, Version: 4, Expiration: futureExp}) - test.waitPacketOut(func(*v4wire.Pong, *net.UDPAddr, []byte) {}) - test.waitPacketOut(func(*v4wire.Ping, *net.UDPAddr, []byte) {}) + test.waitPacketOut(func(*v4wire.Pong, netip.AddrPort, []byte) {}) + test.waitPacketOut(func(*v4wire.Ping, netip.AddrPort, []byte) {}) test.packetIn(errUnsolicitedReply, &v4wire.Pong{ReplyTok: randToken, To: testLocalAnnounced, Expiration: futureExp}) } @@ -379,10 +379,10 @@ func TestUDPv4_pingMatchIP(t *testing.T) { defer test.close() test.packetIn(nil, &v4wire.Ping{From: testRemote, To: testLocalAnnounced, Version: 4, Expiration: futureExp}) - test.waitPacketOut(func(*v4wire.Pong, *net.UDPAddr, []byte) {}) + test.waitPacketOut(func(*v4wire.Pong, netip.AddrPort, []byte) {}) - test.waitPacketOut(func(p *v4wire.Ping, to *net.UDPAddr, hash []byte) { - wrongAddr := &net.UDPAddr{IP: net.IP{33, 44, 1, 2}, Port: 30000} + test.waitPacketOut(func(p *v4wire.Ping, to netip.AddrPort, hash []byte) { + wrongAddr := netip.MustParseAddrPort("33.44.1.2:30000") test.packetInFrom(errUnsolicitedReply, test.remotekey, wrongAddr, &v4wire.Pong{ ReplyTok: hash, To: testLocalAnnounced, @@ -393,41 +393,36 @@ func TestUDPv4_pingMatchIP(t *testing.T) { func TestUDPv4_successfulPing(t *testing.T) { test := newUDPTest(t) - added := make(chan *node, 1) - test.table.nodeAddedHook = func(b *bucket, n *node) { added <- n } + added := make(chan *tableNode, 1) + test.table.nodeAddedHook = func(b *bucket, n *tableNode) { added <- n } defer test.close() // The remote side sends a ping packet to initiate the exchange. go test.packetIn(nil, &v4wire.Ping{From: testRemote, To: testLocalAnnounced, Version: 4, Expiration: futureExp}) // The ping is replied to. - test.waitPacketOut(func(p *v4wire.Pong, to *net.UDPAddr, hash []byte) { + test.waitPacketOut(func(p *v4wire.Pong, to netip.AddrPort, hash []byte) { pinghash := test.sent[0][:32] if !bytes.Equal(p.ReplyTok, pinghash) { t.Errorf("got pong.ReplyTok %x, want %x", p.ReplyTok, pinghash) } - wantTo := v4wire.Endpoint{ - // The mirrored UDP address is the UDP packet sender - IP: test.remoteaddr.IP, UDP: uint16(test.remoteaddr.Port), - // The mirrored TCP port is the one from the ping packet - TCP: testRemote.TCP, - } + // The mirrored UDP address is the UDP packet sender. + // The mirrored TCP port is the one from the ping packet. + wantTo := v4wire.NewEndpoint(test.remoteaddr, testRemote.TCP) if !reflect.DeepEqual(p.To, wantTo) { t.Errorf("got pong.To %v, want %v", p.To, wantTo) } }) // Remote is unknown, the table pings back. - test.waitPacketOut(func(p *v4wire.Ping, to *net.UDPAddr, hash []byte) { - if !reflect.DeepEqual(p.From, test.udp.ourEndpoint()) { + test.waitPacketOut(func(p *v4wire.Ping, to netip.AddrPort, hash []byte) { + wantFrom := test.udp.ourEndpoint() + wantFrom.IP = net.IP{} + if !reflect.DeepEqual(p.From, wantFrom) { t.Errorf("got ping.From %#v, want %#v", p.From, test.udp.ourEndpoint()) } - wantTo := v4wire.Endpoint{ - // The mirrored UDP address is the UDP packet sender. - IP: test.remoteaddr.IP, - UDP: uint16(test.remoteaddr.Port), - TCP: 0, - } + // The mirrored UDP address is the UDP packet sender. + wantTo := v4wire.NewEndpoint(test.remoteaddr, 0) if !reflect.DeepEqual(p.To, wantTo) { t.Errorf("got ping.To %v, want %v", p.To, wantTo) } @@ -442,11 +437,11 @@ func TestUDPv4_successfulPing(t *testing.T) { if n.ID() != rid { t.Errorf("node has wrong ID: got %v, want %v", n.ID(), rid) } - if !n.IP().Equal(test.remoteaddr.IP) { - t.Errorf("node has wrong IP: got %v, want: %v", n.IP(), test.remoteaddr.IP) + if !n.IP().Equal(test.remoteaddr.Addr().AsSlice()) { + t.Errorf("node has wrong IP: got %v, want: %v", n.IP(), test.remoteaddr.Addr()) } - if n.UDP() != test.remoteaddr.Port { - t.Errorf("node has wrong UDP port: got %v, want: %v", n.UDP(), test.remoteaddr.Port) + if n.UDP() != int(test.remoteaddr.Port()) { + t.Errorf("node has wrong UDP port: got %v, want: %v", n.UDP(), test.remoteaddr.Port()) } if n.TCP() != int(testRemote.TCP) { t.Errorf("node has wrong TCP port: got %v, want: %v", n.TCP(), testRemote.TCP) @@ -469,12 +464,12 @@ func TestUDPv4_EIP868(t *testing.T) { // Perform endpoint proof and check for sequence number in packet tail. test.packetIn(nil, &v4wire.Ping{Expiration: futureExp}) - test.waitPacketOut(func(p *v4wire.Pong, addr *net.UDPAddr, hash []byte) { + test.waitPacketOut(func(p *v4wire.Pong, addr netip.AddrPort, hash []byte) { if p.ENRSeq != wantNode.Seq() { t.Errorf("wrong sequence number in pong: %d, want %d", p.ENRSeq, wantNode.Seq()) } }) - test.waitPacketOut(func(p *v4wire.Ping, addr *net.UDPAddr, hash []byte) { + test.waitPacketOut(func(p *v4wire.Ping, addr netip.AddrPort, hash []byte) { if p.ENRSeq != wantNode.Seq() { t.Errorf("wrong sequence number in ping: %d, want %d", p.ENRSeq, wantNode.Seq()) } @@ -483,7 +478,7 @@ func TestUDPv4_EIP868(t *testing.T) { // Request should work now. test.packetIn(nil, &v4wire.ENRRequest{Expiration: futureExp}) - test.waitPacketOut(func(p *v4wire.ENRResponse, addr *net.UDPAddr, hash []byte) { + test.waitPacketOut(func(p *v4wire.ENRResponse, addr netip.AddrPort, hash []byte) { n, err := enode.New(enode.ValidSchemes, &p.Record) if err != nil { t.Fatalf("invalid record: %v", err) @@ -584,7 +579,7 @@ type dgramPipe struct { } type dgram struct { - to net.UDPAddr + to netip.AddrPort data []byte } @@ -597,8 +592,8 @@ func newpipe() *dgramPipe { } } -// WriteToUDP queues a datagram. -func (c *dgramPipe) WriteToUDP(b []byte, to *net.UDPAddr) (n int, err error) { +// WriteToUDPAddrPort queues a datagram. +func (c *dgramPipe) WriteToUDPAddrPort(b []byte, to netip.AddrPort) (n int, err error) { msg := make([]byte, len(b)) copy(msg, b) c.mu.Lock() @@ -606,15 +601,15 @@ func (c *dgramPipe) WriteToUDP(b []byte, to *net.UDPAddr) (n int, err error) { if c.closed { return 0, errors.New("closed") } - c.queue = append(c.queue, dgram{*to, b}) + c.queue = append(c.queue, dgram{to, b}) c.cond.Signal() return len(b), nil } -// ReadFromUDP just hangs until the pipe is closed. -func (c *dgramPipe) ReadFromUDP(b []byte) (n int, addr *net.UDPAddr, err error) { +// ReadFromUDPAddrPort just hangs until the pipe is closed. +func (c *dgramPipe) ReadFromUDPAddrPort(b []byte) (n int, addr netip.AddrPort, err error) { <-c.closing - return 0, nil, io.EOF + return 0, netip.AddrPort{}, io.EOF } func (c *dgramPipe) Close() error { diff --git a/p2p/discover/v4wire/v4wire.go b/p2p/discover/v4wire/v4wire.go index 9c59359fb2..958cca324d 100644 --- a/p2p/discover/v4wire/v4wire.go +++ b/p2p/discover/v4wire/v4wire.go @@ -25,6 +25,7 @@ import ( "fmt" "math/big" "net" + "net/netip" "time" "github.com/ethereum/go-ethereum/common/math" @@ -150,14 +151,15 @@ type Endpoint struct { } // NewEndpoint creates an endpoint. -func NewEndpoint(addr *net.UDPAddr, tcpPort uint16) Endpoint { - ip := net.IP{} - if ip4 := addr.IP.To4(); ip4 != nil { - ip = ip4 - } else if ip6 := addr.IP.To16(); ip6 != nil { - ip = ip6 +func NewEndpoint(addr netip.AddrPort, tcpPort uint16) Endpoint { + var ip net.IP + if addr.Addr().Is4() || addr.Addr().Is4In6() { + ip4 := addr.Addr().As4() + ip = ip4[:] + } else { + ip = addr.Addr().AsSlice() } - return Endpoint{IP: ip, UDP: uint16(addr.Port), TCP: tcpPort} + return Endpoint{IP: ip, UDP: addr.Port(), TCP: tcpPort} } type Packet interface { diff --git a/p2p/discover/v5_talk.go b/p2p/discover/v5_talk.go index c1f6787940..2246b47141 100644 --- a/p2p/discover/v5_talk.go +++ b/p2p/discover/v5_talk.go @@ -18,6 +18,7 @@ package discover import ( "net" + "net/netip" "sync" "time" @@ -70,7 +71,7 @@ func (t *talkSystem) register(protocol string, handler TalkRequestHandler) { } // handleRequest handles a talk request. -func (t *talkSystem) handleRequest(id enode.ID, addr *net.UDPAddr, req *v5wire.TalkRequest) { +func (t *talkSystem) handleRequest(id enode.ID, addr netip.AddrPort, req *v5wire.TalkRequest) { t.mutex.Lock() handler, ok := t.handlers[req.Protocol] t.mutex.Unlock() @@ -88,7 +89,8 @@ func (t *talkSystem) handleRequest(id enode.ID, addr *net.UDPAddr, req *v5wire.T case <-t.slots: go func() { defer func() { t.slots <- struct{}{} }() - respMessage := handler(id, addr, req.Message) + udpAddr := &net.UDPAddr{IP: addr.Addr().AsSlice(), Port: int(addr.Port())} + respMessage := handler(id, udpAddr, req.Message) resp := &v5wire.TalkResponse{ReqID: req.ReqID, Message: respMessage} t.transport.sendFromAnotherThread(id, addr, resp) }() diff --git a/p2p/discover/v5_udp.go b/p2p/discover/v5_udp.go index 20a8bccd05..9ba54b3d40 100644 --- a/p2p/discover/v5_udp.go +++ b/p2p/discover/v5_udp.go @@ -25,6 +25,7 @@ import ( "fmt" "io" "net" + "net/netip" "slices" "sync" "time" @@ -101,14 +102,14 @@ type UDPv5 struct { type sendRequest struct { destID enode.ID - destAddr *net.UDPAddr + destAddr netip.AddrPort msg v5wire.Packet } // callV5 represents a remote procedure call against another node. type callV5 struct { id enode.ID - addr *net.UDPAddr + addr netip.AddrPort node *enode.Node // This is required to perform handshakes. packet v5wire.Packet @@ -175,7 +176,7 @@ func newUDPv5(conn UDPConn, ln *enode.LocalNode, cfg Config) (*UDPv5, error) { cancelCloseCtx: cancelCloseCtx, } t.talk = newTalkSystem(t) - tab, err := newMeteredTable(t, t.db, cfg) + tab, err := newTable(t, t.db, cfg) if err != nil { return nil, err } @@ -233,7 +234,7 @@ func (t *UDPv5) AllNodes() []*enode.Node { for _, b := range &t.tab.buckets { for _, n := range b.entries { - nodes = append(nodes, unwrapNode(n)) + nodes = append(nodes, n.Node) } } return nodes @@ -266,7 +267,7 @@ func (t *UDPv5) TalkRequest(n *enode.Node, protocol string, request []byte) ([]b } // TalkRequestToID sends a talk request to a node and waits for a response. -func (t *UDPv5) TalkRequestToID(id enode.ID, addr *net.UDPAddr, protocol string, request []byte) ([]byte, error) { +func (t *UDPv5) TalkRequestToID(id enode.ID, addr netip.AddrPort, protocol string, request []byte) ([]byte, error) { req := &v5wire.TalkRequest{Protocol: protocol, Message: request} resp := t.callToID(id, addr, v5wire.TalkResponseMsg, req) defer t.callDone(resp) @@ -314,26 +315,26 @@ func (t *UDPv5) newRandomLookup(ctx context.Context) *lookup { } func (t *UDPv5) newLookup(ctx context.Context, target enode.ID) *lookup { - return newLookup(ctx, t.tab, target, func(n *node) ([]*node, error) { + return newLookup(ctx, t.tab, target, func(n *enode.Node) ([]*enode.Node, error) { return t.lookupWorker(n, target) }) } // lookupWorker performs FINDNODE calls against a single node during lookup. -func (t *UDPv5) lookupWorker(destNode *node, target enode.ID) ([]*node, error) { +func (t *UDPv5) lookupWorker(destNode *enode.Node, target enode.ID) ([]*enode.Node, error) { var ( dists = lookupDistances(target, destNode.ID()) nodes = nodesByDistance{target: target} err error ) var r []*enode.Node - r, err = t.findnode(unwrapNode(destNode), dists) + r, err = t.findnode(destNode, dists) if errors.Is(err, errClosed) { return nil, err } for _, n := range r { if n.ID() != t.Self().ID() { - nodes.push(wrapNode(n), findnodeResultLimit) + nodes.push(n, findnodeResultLimit) } } return nodes.entries, err @@ -427,7 +428,7 @@ func (t *UDPv5) verifyResponseNode(c *callV5, r *enr.Record, distances []uint, s if err != nil { return nil, err } - if err := netutil.CheckRelayIP(c.addr.IP, node.IP()); err != nil { + if err := netutil.CheckRelayIP(c.addr.Addr().AsSlice(), node.IP()); err != nil { return nil, err } if t.netrestrict != nil && !t.netrestrict.Contains(node.IP()) { @@ -452,14 +453,14 @@ func (t *UDPv5) verifyResponseNode(c *callV5, r *enr.Record, distances []uint, s // callToNode sends the given call and sets up a handler for response packets (of message // type responseType). Responses are dispatched to the call's response channel. func (t *UDPv5) callToNode(n *enode.Node, responseType byte, req v5wire.Packet) *callV5 { - addr := &net.UDPAddr{IP: n.IP(), Port: n.UDP()} + addr, _ := n.UDPEndpoint() c := &callV5{id: n.ID(), addr: addr, node: n} t.initCall(c, responseType, req) return c } // callToID is like callToNode, but for cases where the node record is not available. -func (t *UDPv5) callToID(id enode.ID, addr *net.UDPAddr, responseType byte, req v5wire.Packet) *callV5 { +func (t *UDPv5) callToID(id enode.ID, addr netip.AddrPort, responseType byte, req v5wire.Packet) *callV5 { c := &callV5{id: id, addr: addr} t.initCall(c, responseType, req) return c @@ -619,12 +620,12 @@ func (t *UDPv5) sendCall(c *callV5) { // sendResponse sends a response packet to the given node. // This doesn't trigger a handshake even if no keys are available. -func (t *UDPv5) sendResponse(toID enode.ID, toAddr *net.UDPAddr, packet v5wire.Packet) error { +func (t *UDPv5) sendResponse(toID enode.ID, toAddr netip.AddrPort, packet v5wire.Packet) error { _, err := t.send(toID, toAddr, packet, nil) return err } -func (t *UDPv5) sendFromAnotherThread(toID enode.ID, toAddr *net.UDPAddr, packet v5wire.Packet) { +func (t *UDPv5) sendFromAnotherThread(toID enode.ID, toAddr netip.AddrPort, packet v5wire.Packet) { select { case t.sendCh <- sendRequest{toID, toAddr, packet}: case <-t.closeCtx.Done(): @@ -632,7 +633,7 @@ func (t *UDPv5) sendFromAnotherThread(toID enode.ID, toAddr *net.UDPAddr, packet } // send sends a packet to the given node. -func (t *UDPv5) send(toID enode.ID, toAddr *net.UDPAddr, packet v5wire.Packet, c *v5wire.Whoareyou) (v5wire.Nonce, error) { +func (t *UDPv5) send(toID enode.ID, toAddr netip.AddrPort, packet v5wire.Packet, c *v5wire.Whoareyou) (v5wire.Nonce, error) { addr := toAddr.String() t.logcontext = append(t.logcontext[:0], "id", toID, "addr", addr) t.logcontext = packet.AppendLogInfo(t.logcontext) @@ -644,7 +645,7 @@ func (t *UDPv5) send(toID enode.ID, toAddr *net.UDPAddr, packet v5wire.Packet, c return nonce, err } - _, err = t.conn.WriteToUDP(enc, toAddr) + _, err = t.conn.WriteToUDPAddrPort(enc, toAddr) t.log.Trace(">> "+packet.Name(), t.logcontext...) return nonce, err } @@ -655,7 +656,7 @@ func (t *UDPv5) readLoop() { buf := make([]byte, maxPacketSize) for range t.readNextCh { - nbytes, from, err := t.conn.ReadFromUDP(buf) + nbytes, from, err := t.conn.ReadFromUDPAddrPort(buf) if netutil.IsTemporaryError(err) { // Ignore temporary read errors. t.log.Debug("Temporary UDP read error", "err", err) @@ -672,7 +673,7 @@ func (t *UDPv5) readLoop() { } // dispatchReadPacket sends a packet into the dispatch loop. -func (t *UDPv5) dispatchReadPacket(from *net.UDPAddr, content []byte) bool { +func (t *UDPv5) dispatchReadPacket(from netip.AddrPort, content []byte) bool { select { case t.packetInCh <- ReadPacket{content, from}: return true @@ -682,7 +683,7 @@ func (t *UDPv5) dispatchReadPacket(from *net.UDPAddr, content []byte) bool { } // handlePacket decodes and processes an incoming packet from the network. -func (t *UDPv5) handlePacket(rawpacket []byte, fromAddr *net.UDPAddr) error { +func (t *UDPv5) handlePacket(rawpacket []byte, fromAddr netip.AddrPort) error { addr := fromAddr.String() fromID, fromNode, packet, err := t.codec.Decode(rawpacket, addr) if err != nil { @@ -699,7 +700,7 @@ func (t *UDPv5) handlePacket(rawpacket []byte, fromAddr *net.UDPAddr) error { } if fromNode != nil { // Handshake succeeded, add to table. - t.tab.addSeenNode(wrapNode(fromNode)) + t.tab.addInboundNode(fromNode) } if packet.Kind() != v5wire.WhoareyouPacket { // WHOAREYOU logged separately to report errors. @@ -712,13 +713,13 @@ func (t *UDPv5) handlePacket(rawpacket []byte, fromAddr *net.UDPAddr) error { } // handleCallResponse dispatches a response packet to the call waiting for it. -func (t *UDPv5) handleCallResponse(fromID enode.ID, fromAddr *net.UDPAddr, p v5wire.Packet) bool { +func (t *UDPv5) handleCallResponse(fromID enode.ID, fromAddr netip.AddrPort, p v5wire.Packet) bool { ac := t.activeCallByNode[fromID] if ac == nil || !bytes.Equal(p.RequestID(), ac.reqid) { t.log.Debug(fmt.Sprintf("Unsolicited/late %s response", p.Name()), "id", fromID, "addr", fromAddr) return false } - if !fromAddr.IP.Equal(ac.addr.IP) || fromAddr.Port != ac.addr.Port { + if fromAddr != ac.addr { t.log.Debug(fmt.Sprintf("%s from wrong endpoint", p.Name()), "id", fromID, "addr", fromAddr) return false } @@ -743,7 +744,7 @@ func (t *UDPv5) getNode(id enode.ID) *enode.Node { } // handle processes incoming packets according to their message type. -func (t *UDPv5) handle(p v5wire.Packet, fromID enode.ID, fromAddr *net.UDPAddr) { +func (t *UDPv5) handle(p v5wire.Packet, fromID enode.ID, fromAddr netip.AddrPort) { switch p := p.(type) { case *v5wire.Unknown: t.handleUnknown(p, fromID, fromAddr) @@ -753,7 +754,9 @@ func (t *UDPv5) handle(p v5wire.Packet, fromID enode.ID, fromAddr *net.UDPAddr) t.handlePing(p, fromID, fromAddr) case *v5wire.Pong: if t.handleCallResponse(fromID, fromAddr, p) { - t.localNode.UDPEndpointStatement(fromAddr, &net.UDPAddr{IP: p.ToIP, Port: int(p.ToPort)}) + fromUDPAddr := &net.UDPAddr{IP: fromAddr.Addr().AsSlice(), Port: int(fromAddr.Port())} + toUDPAddr := &net.UDPAddr{IP: p.ToIP, Port: int(p.ToPort)} + t.localNode.UDPEndpointStatement(fromUDPAddr, toUDPAddr) } case *v5wire.Findnode: t.handleFindnode(p, fromID, fromAddr) @@ -767,7 +770,7 @@ func (t *UDPv5) handle(p v5wire.Packet, fromID enode.ID, fromAddr *net.UDPAddr) } // handleUnknown initiates a handshake by responding with WHOAREYOU. -func (t *UDPv5) handleUnknown(p *v5wire.Unknown, fromID enode.ID, fromAddr *net.UDPAddr) { +func (t *UDPv5) handleUnknown(p *v5wire.Unknown, fromID enode.ID, fromAddr netip.AddrPort) { challenge := &v5wire.Whoareyou{Nonce: p.Nonce} crand.Read(challenge.IDNonce[:]) if n := t.getNode(fromID); n != nil { @@ -783,7 +786,7 @@ var ( ) // handleWhoareyou resends the active call as a handshake packet. -func (t *UDPv5) handleWhoareyou(p *v5wire.Whoareyou, fromID enode.ID, fromAddr *net.UDPAddr) { +func (t *UDPv5) handleWhoareyou(p *v5wire.Whoareyou, fromID enode.ID, fromAddr netip.AddrPort) { c, err := t.matchWithCall(fromID, p.Nonce) if err != nil { t.log.Debug("Invalid "+p.Name(), "addr", fromAddr, "err", err) @@ -817,32 +820,35 @@ func (t *UDPv5) matchWithCall(fromID enode.ID, nonce v5wire.Nonce) (*callV5, err } // handlePing sends a PONG response. -func (t *UDPv5) handlePing(p *v5wire.Ping, fromID enode.ID, fromAddr *net.UDPAddr) { - remoteIP := fromAddr.IP - // Handle IPv4 mapped IPv6 addresses in the - // event the local node is binded to an - // ipv6 interface. - if remoteIP.To4() != nil { - remoteIP = remoteIP.To4() +func (t *UDPv5) handlePing(p *v5wire.Ping, fromID enode.ID, fromAddr netip.AddrPort) { + var remoteIP net.IP + // Handle IPv4 mapped IPv6 addresses in the event the local node is binded + // to an ipv6 interface. + if fromAddr.Addr().Is4() || fromAddr.Addr().Is4In6() { + ip4 := fromAddr.Addr().As4() + remoteIP = ip4[:] + } else { + remoteIP = fromAddr.Addr().AsSlice() } t.sendResponse(fromID, fromAddr, &v5wire.Pong{ ReqID: p.ReqID, ToIP: remoteIP, - ToPort: uint16(fromAddr.Port), + ToPort: fromAddr.Port(), ENRSeq: t.localNode.Node().Seq(), }) } // handleFindnode returns nodes to the requester. -func (t *UDPv5) handleFindnode(p *v5wire.Findnode, fromID enode.ID, fromAddr *net.UDPAddr) { - nodes := t.collectTableNodes(fromAddr.IP, p.Distances, findnodeResultLimit) +func (t *UDPv5) handleFindnode(p *v5wire.Findnode, fromID enode.ID, fromAddr netip.AddrPort) { + nodes := t.collectTableNodes(fromAddr.Addr(), p.Distances, findnodeResultLimit) for _, resp := range packNodes(p.ReqID, nodes) { t.sendResponse(fromID, fromAddr, resp) } } // collectTableNodes creates a FINDNODE result set for the given distances. -func (t *UDPv5) collectTableNodes(rip net.IP, distances []uint, limit int) []*enode.Node { +func (t *UDPv5) collectTableNodes(rip netip.Addr, distances []uint, limit int) []*enode.Node { + ripSlice := rip.AsSlice() var bn []*enode.Node var nodes []*enode.Node var processed = make(map[uint]struct{}) @@ -857,7 +863,7 @@ func (t *UDPv5) collectTableNodes(rip net.IP, distances []uint, limit int) []*en for _, n := range t.tab.appendLiveNodes(dist, bn[:0]) { // Apply some pre-checks to avoid sending invalid nodes. // Note liveness is checked by appendLiveNodes. - if netutil.CheckRelayIP(rip, n.IP()) != nil { + if netutil.CheckRelayIP(ripSlice, n.IP()) != nil { continue } nodes = append(nodes, n) diff --git a/p2p/discover/v5_udp_test.go b/p2p/discover/v5_udp_test.go index 4373ea8184..1f8e972200 100644 --- a/p2p/discover/v5_udp_test.go +++ b/p2p/discover/v5_udp_test.go @@ -23,6 +23,7 @@ import ( "fmt" "math/rand" "net" + "net/netip" "reflect" "slices" "testing" @@ -103,7 +104,7 @@ func TestUDPv5_pingHandling(t *testing.T) { defer test.close() test.packetIn(&v5wire.Ping{ReqID: []byte("foo")}) - test.waitPacketOut(func(p *v5wire.Pong, addr *net.UDPAddr, _ v5wire.Nonce) { + test.waitPacketOut(func(p *v5wire.Pong, addr netip.AddrPort, _ v5wire.Nonce) { if !bytes.Equal(p.ReqID, []byte("foo")) { t.Error("wrong request ID in response:", p.ReqID) } @@ -135,16 +136,16 @@ func TestUDPv5_unknownPacket(t *testing.T) { // Unknown packet from unknown node. test.packetIn(&v5wire.Unknown{Nonce: nonce}) - test.waitPacketOut(func(p *v5wire.Whoareyou, addr *net.UDPAddr, _ v5wire.Nonce) { + test.waitPacketOut(func(p *v5wire.Whoareyou, addr netip.AddrPort, _ v5wire.Nonce) { check(p, 0) }) // Make node known. n := test.getNode(test.remotekey, test.remoteaddr).Node() - test.table.addSeenNode(wrapNode(n)) + test.table.addFoundNode(n, false) test.packetIn(&v5wire.Unknown{Nonce: nonce}) - test.waitPacketOut(func(p *v5wire.Whoareyou, addr *net.UDPAddr, _ v5wire.Nonce) { + test.waitPacketOut(func(p *v5wire.Whoareyou, addr netip.AddrPort, _ v5wire.Nonce) { check(p, n.Seq()) }) } @@ -159,9 +160,9 @@ func TestUDPv5_findnodeHandling(t *testing.T) { nodes253 := nodesAtDistance(test.table.self().ID(), 253, 16) nodes249 := nodesAtDistance(test.table.self().ID(), 249, 4) nodes248 := nodesAtDistance(test.table.self().ID(), 248, 10) - fillTable(test.table, wrapNodes(nodes253), true) - fillTable(test.table, wrapNodes(nodes249), true) - fillTable(test.table, wrapNodes(nodes248), true) + fillTable(test.table, nodes253, true) + fillTable(test.table, nodes249, true) + fillTable(test.table, nodes248, true) // Requesting with distance zero should return the node's own record. test.packetIn(&v5wire.Findnode{ReqID: []byte{0}, Distances: []uint{0}}) @@ -199,7 +200,7 @@ func (test *udpV5Test) expectNodes(wantReqID []byte, wantTotal uint8, wantNodes } for { - test.waitPacketOut(func(p *v5wire.Nodes, addr *net.UDPAddr, _ v5wire.Nonce) { + test.waitPacketOut(func(p *v5wire.Nodes, addr netip.AddrPort, _ v5wire.Nonce) { if !bytes.Equal(p.ReqID, wantReqID) { test.t.Fatalf("wrong request ID %v in response, want %v", p.ReqID, wantReqID) } @@ -238,7 +239,7 @@ func TestUDPv5_pingCall(t *testing.T) { _, err := test.udp.ping(remote) done <- err }() - test.waitPacketOut(func(p *v5wire.Ping, addr *net.UDPAddr, _ v5wire.Nonce) {}) + test.waitPacketOut(func(p *v5wire.Ping, addr netip.AddrPort, _ v5wire.Nonce) {}) if err := <-done; err != errTimeout { t.Fatalf("want errTimeout, got %q", err) } @@ -248,7 +249,7 @@ func TestUDPv5_pingCall(t *testing.T) { _, err := test.udp.ping(remote) done <- err }() - test.waitPacketOut(func(p *v5wire.Ping, addr *net.UDPAddr, _ v5wire.Nonce) { + test.waitPacketOut(func(p *v5wire.Ping, addr netip.AddrPort, _ v5wire.Nonce) { test.packetInFrom(test.remotekey, test.remoteaddr, &v5wire.Pong{ReqID: p.ReqID}) }) if err := <-done; err != nil { @@ -260,8 +261,8 @@ func TestUDPv5_pingCall(t *testing.T) { _, err := test.udp.ping(remote) done <- err }() - test.waitPacketOut(func(p *v5wire.Ping, addr *net.UDPAddr, _ v5wire.Nonce) { - wrongAddr := &net.UDPAddr{IP: net.IP{33, 44, 55, 22}, Port: 10101} + test.waitPacketOut(func(p *v5wire.Ping, addr netip.AddrPort, _ v5wire.Nonce) { + wrongAddr := netip.MustParseAddrPort("33.44.55.22:10101") test.packetInFrom(test.remotekey, wrongAddr, &v5wire.Pong{ReqID: p.ReqID}) }) if err := <-done; err != errTimeout { @@ -291,7 +292,7 @@ func TestUDPv5_findnodeCall(t *testing.T) { }() // Serve the responses: - test.waitPacketOut(func(p *v5wire.Findnode, addr *net.UDPAddr, _ v5wire.Nonce) { + test.waitPacketOut(func(p *v5wire.Findnode, addr netip.AddrPort, _ v5wire.Nonce) { if !reflect.DeepEqual(p.Distances, distances) { t.Fatalf("wrong distances in request: %v", p.Distances) } @@ -337,15 +338,15 @@ func TestUDPv5_callResend(t *testing.T) { }() // Ping answered by WHOAREYOU. - test.waitPacketOut(func(p *v5wire.Ping, addr *net.UDPAddr, nonce v5wire.Nonce) { + test.waitPacketOut(func(p *v5wire.Ping, addr netip.AddrPort, nonce v5wire.Nonce) { test.packetIn(&v5wire.Whoareyou{Nonce: nonce}) }) // Ping should be re-sent. - test.waitPacketOut(func(p *v5wire.Ping, addr *net.UDPAddr, _ v5wire.Nonce) { + test.waitPacketOut(func(p *v5wire.Ping, addr netip.AddrPort, _ v5wire.Nonce) { test.packetIn(&v5wire.Pong{ReqID: p.ReqID}) }) // Answer the other ping. - test.waitPacketOut(func(p *v5wire.Ping, addr *net.UDPAddr, _ v5wire.Nonce) { + test.waitPacketOut(func(p *v5wire.Ping, addr netip.AddrPort, _ v5wire.Nonce) { test.packetIn(&v5wire.Pong{ReqID: p.ReqID}) }) if err := <-done; err != nil { @@ -370,11 +371,11 @@ func TestUDPv5_multipleHandshakeRounds(t *testing.T) { }() // Ping answered by WHOAREYOU. - test.waitPacketOut(func(p *v5wire.Ping, addr *net.UDPAddr, nonce v5wire.Nonce) { + test.waitPacketOut(func(p *v5wire.Ping, addr netip.AddrPort, nonce v5wire.Nonce) { test.packetIn(&v5wire.Whoareyou{Nonce: nonce}) }) // Ping answered by WHOAREYOU again. - test.waitPacketOut(func(p *v5wire.Ping, addr *net.UDPAddr, nonce v5wire.Nonce) { + test.waitPacketOut(func(p *v5wire.Ping, addr netip.AddrPort, nonce v5wire.Nonce) { test.packetIn(&v5wire.Whoareyou{Nonce: nonce}) }) if err := <-done; err != errTimeout { @@ -401,7 +402,7 @@ func TestUDPv5_callTimeoutReset(t *testing.T) { }() // Serve two responses, slowly. - test.waitPacketOut(func(p *v5wire.Findnode, addr *net.UDPAddr, _ v5wire.Nonce) { + test.waitPacketOut(func(p *v5wire.Findnode, addr netip.AddrPort, _ v5wire.Nonce) { time.Sleep(respTimeout - 50*time.Millisecond) test.packetIn(&v5wire.Nodes{ ReqID: p.ReqID, @@ -439,7 +440,7 @@ func TestUDPv5_talkHandling(t *testing.T) { Protocol: "test", Message: []byte("test request"), }) - test.waitPacketOut(func(p *v5wire.TalkResponse, addr *net.UDPAddr, _ v5wire.Nonce) { + test.waitPacketOut(func(p *v5wire.TalkResponse, addr netip.AddrPort, _ v5wire.Nonce) { if !bytes.Equal(p.ReqID, []byte("foo")) { t.Error("wrong request ID in response:", p.ReqID) } @@ -458,7 +459,7 @@ func TestUDPv5_talkHandling(t *testing.T) { Protocol: "wrong", Message: []byte("test request"), }) - test.waitPacketOut(func(p *v5wire.TalkResponse, addr *net.UDPAddr, _ v5wire.Nonce) { + test.waitPacketOut(func(p *v5wire.TalkResponse, addr netip.AddrPort, _ v5wire.Nonce) { if !bytes.Equal(p.ReqID, []byte("2")) { t.Error("wrong request ID in response:", p.ReqID) } @@ -485,7 +486,7 @@ func TestUDPv5_talkRequest(t *testing.T) { _, err := test.udp.TalkRequest(remote, "test", []byte("test request")) done <- err }() - test.waitPacketOut(func(p *v5wire.TalkRequest, addr *net.UDPAddr, _ v5wire.Nonce) {}) + test.waitPacketOut(func(p *v5wire.TalkRequest, addr netip.AddrPort, _ v5wire.Nonce) {}) if err := <-done; err != errTimeout { t.Fatalf("want errTimeout, got %q", err) } @@ -495,7 +496,7 @@ func TestUDPv5_talkRequest(t *testing.T) { _, err := test.udp.TalkRequest(remote, "test", []byte("test request")) done <- err }() - test.waitPacketOut(func(p *v5wire.TalkRequest, addr *net.UDPAddr, _ v5wire.Nonce) { + test.waitPacketOut(func(p *v5wire.TalkRequest, addr netip.AddrPort, _ v5wire.Nonce) { if p.Protocol != "test" { t.Errorf("wrong protocol ID in talk request: %q", p.Protocol) } @@ -516,7 +517,7 @@ func TestUDPv5_talkRequest(t *testing.T) { _, err := test.udp.TalkRequestToID(remote.ID(), test.remoteaddr, "test", []byte("test request 2")) done <- err }() - test.waitPacketOut(func(p *v5wire.TalkRequest, addr *net.UDPAddr, _ v5wire.Nonce) { + test.waitPacketOut(func(p *v5wire.TalkRequest, addr netip.AddrPort, _ v5wire.Nonce) { if p.Protocol != "test" { t.Errorf("wrong protocol ID in talk request: %q", p.Protocol) } @@ -583,13 +584,14 @@ func TestUDPv5_lookup(t *testing.T) { for d, nn := range lookupTestnet.dists { for i, key := range nn { n := lookupTestnet.node(d, i) - test.getNode(key, &net.UDPAddr{IP: n.IP(), Port: n.UDP()}) + addr, _ := n.UDPEndpoint() + test.getNode(key, addr) } } // Seed table with initial node. initialNode := lookupTestnet.node(256, 0) - fillTable(test.table, []*node{wrapNode(initialNode)}, true) + fillTable(test.table, []*enode.Node{initialNode}, true) // Start the lookup. resultC := make(chan []*enode.Node, 1) @@ -601,7 +603,7 @@ func TestUDPv5_lookup(t *testing.T) { // Answer lookup packets. asked := make(map[enode.ID]bool) for done := false; !done; { - done = test.waitPacketOut(func(p v5wire.Packet, to *net.UDPAddr, _ v5wire.Nonce) { + done = test.waitPacketOut(func(p v5wire.Packet, to netip.AddrPort, _ v5wire.Nonce) { recipient, key := lookupTestnet.nodeByAddr(to) switch p := p.(type) { case *v5wire.Ping: @@ -652,11 +654,8 @@ func TestUDPv5_PingWithIPV4MappedAddress(t *testing.T) { test := newUDPV5Test(t) defer test.close() - rawIP := net.IPv4(0xFF, 0x12, 0x33, 0xE5) - test.remoteaddr = &net.UDPAddr{ - IP: rawIP.To16(), - Port: 0, - } + rawIP := netip.AddrFrom4([4]byte{0xFF, 0x12, 0x33, 0xE5}) + test.remoteaddr = netip.AddrPortFrom(netip.AddrFrom16(rawIP.As16()), 0) remote := test.getNode(test.remotekey, test.remoteaddr).Node() done := make(chan struct{}, 1) @@ -665,14 +664,14 @@ func TestUDPv5_PingWithIPV4MappedAddress(t *testing.T) { test.udp.handlePing(&v5wire.Ping{ENRSeq: 1}, remote.ID(), test.remoteaddr) done <- struct{}{} }() - test.waitPacketOut(func(p *v5wire.Pong, addr *net.UDPAddr, _ v5wire.Nonce) { + test.waitPacketOut(func(p *v5wire.Pong, addr netip.AddrPort, _ v5wire.Nonce) { if len(p.ToIP) == net.IPv6len { t.Error("Received untruncated ip address") } if len(p.ToIP) != net.IPv4len { t.Errorf("Received ip address with incorrect length: %d", len(p.ToIP)) } - if !p.ToIP.Equal(rawIP) { + if !p.ToIP.Equal(rawIP.AsSlice()) { t.Errorf("Received incorrect ip address: wanted %s but received %s", rawIP.String(), p.ToIP.String()) } }) @@ -688,9 +687,9 @@ type udpV5Test struct { db *enode.DB udp *UDPv5 localkey, remotekey *ecdsa.PrivateKey - remoteaddr *net.UDPAddr + remoteaddr netip.AddrPort nodesByID map[enode.ID]*enode.LocalNode - nodesByIP map[string]*enode.LocalNode + nodesByIP map[netip.Addr]*enode.LocalNode } // testCodec is the packet encoding used by protocol tests. This codec does not perform encryption. @@ -750,9 +749,9 @@ func newUDPV5Test(t *testing.T) *udpV5Test { pipe: newpipe(), localkey: newkey(), remotekey: newkey(), - remoteaddr: &net.UDPAddr{IP: net.IP{10, 0, 1, 99}, Port: 30303}, + remoteaddr: netip.MustParseAddrPort("10.0.1.99:30303"), nodesByID: make(map[enode.ID]*enode.LocalNode), - nodesByIP: make(map[string]*enode.LocalNode), + nodesByIP: make(map[netip.Addr]*enode.LocalNode), } test.db, _ = enode.OpenDB("") ln := enode.NewLocalNode(test.db, test.localkey) @@ -777,8 +776,8 @@ func (test *udpV5Test) packetIn(packet v5wire.Packet) { test.packetInFrom(test.remotekey, test.remoteaddr, packet) } -// handles a packet as if it had been sent to the transport by the key/endpoint. -func (test *udpV5Test) packetInFrom(key *ecdsa.PrivateKey, addr *net.UDPAddr, packet v5wire.Packet) { +// packetInFrom handles a packet as if it had been sent to the transport by the key/endpoint. +func (test *udpV5Test) packetInFrom(key *ecdsa.PrivateKey, addr netip.AddrPort, packet v5wire.Packet) { test.t.Helper() ln := test.getNode(key, addr) @@ -793,22 +792,22 @@ func (test *udpV5Test) packetInFrom(key *ecdsa.PrivateKey, addr *net.UDPAddr, pa } // getNode ensures the test knows about a node at the given endpoint. -func (test *udpV5Test) getNode(key *ecdsa.PrivateKey, addr *net.UDPAddr) *enode.LocalNode { +func (test *udpV5Test) getNode(key *ecdsa.PrivateKey, addr netip.AddrPort) *enode.LocalNode { id := encodePubkey(&key.PublicKey).id() ln := test.nodesByID[id] if ln == nil { db, _ := enode.OpenDB("") ln = enode.NewLocalNode(db, key) - ln.SetStaticIP(addr.IP) - ln.Set(enr.UDP(addr.Port)) + ln.SetStaticIP(addr.Addr().AsSlice()) + ln.Set(enr.UDP(addr.Port())) test.nodesByID[id] = ln } - test.nodesByIP[string(addr.IP)] = ln + test.nodesByIP[addr.Addr()] = ln return ln } // waitPacketOut waits for the next output packet and handles it using the given 'validate' -// function. The function must be of type func (X, *net.UDPAddr, v5wire.Nonce) where X is +// function. The function must be of type func (X, netip.AddrPort, v5wire.Nonce) where X is // assignable to packetV5. func (test *udpV5Test) waitPacketOut(validate interface{}) (closed bool) { test.t.Helper() @@ -824,7 +823,7 @@ func (test *udpV5Test) waitPacketOut(validate interface{}) (closed bool) { test.t.Fatalf("timed out waiting for %v", exptype) return false } - ln := test.nodesByIP[string(dgram.to.IP)] + ln := test.nodesByIP[dgram.to.Addr()] if ln == nil { test.t.Fatalf("attempt to send to non-existing node %v", &dgram.to) return false @@ -839,7 +838,7 @@ func (test *udpV5Test) waitPacketOut(validate interface{}) (closed bool) { test.t.Errorf("sent packet type mismatch, got: %v, want: %v", reflect.TypeOf(p), exptype) return false } - fn.Call([]reflect.Value{reflect.ValueOf(p), reflect.ValueOf(&dgram.to), reflect.ValueOf(frame.AuthTag)}) + fn.Call([]reflect.Value{reflect.ValueOf(p), reflect.ValueOf(dgram.to), reflect.ValueOf(frame.AuthTag)}) return false } diff --git a/p2p/enode/idscheme.go b/p2p/enode/idscheme.go index 6ad7f809a7..db7841c047 100644 --- a/p2p/enode/idscheme.go +++ b/p2p/enode/idscheme.go @@ -157,5 +157,5 @@ func SignNull(r *enr.Record, id ID) *Node { if err := r.SetSig(NullID{}, []byte{}); err != nil { panic(err) } - return &Node{r: *r, id: id} + return newNodeWithID(r, id) } diff --git a/p2p/enode/node.go b/p2p/enode/node.go index d7a1a9a156..cb4ac8d172 100644 --- a/p2p/enode/node.go +++ b/p2p/enode/node.go @@ -24,6 +24,7 @@ import ( "fmt" "math/bits" "net" + "net/netip" "strings" "github.com/ethereum/go-ethereum/p2p/enr" @@ -36,6 +37,10 @@ var errMissingPrefix = errors.New("missing 'enr:' prefix for base64-encoded reco type Node struct { r enr.Record id ID + // endpoint information + ip netip.Addr + udp uint16 + tcp uint16 } // New wraps a node record. The record must be valid according to the given @@ -44,11 +49,76 @@ func New(validSchemes enr.IdentityScheme, r *enr.Record) (*Node, error) { if err := r.VerifySignature(validSchemes); err != nil { return nil, err } - node := &Node{r: *r} - if n := copy(node.id[:], validSchemes.NodeAddr(&node.r)); n != len(ID{}) { - return nil, fmt.Errorf("invalid node ID length %d, need %d", n, len(ID{})) + var id ID + if n := copy(id[:], validSchemes.NodeAddr(r)); n != len(id) { + return nil, fmt.Errorf("invalid node ID length %d, need %d", n, len(id)) + } + return newNodeWithID(r, id), nil +} + +func newNodeWithID(r *enr.Record, id ID) *Node { + n := &Node{r: *r, id: id} + // Set the preferred endpoint. + // Here we decide between IPv4 and IPv6, choosing the 'most global' address. + var ip4 netip.Addr + var ip6 netip.Addr + n.Load((*enr.IPv4Addr)(&ip4)) + n.Load((*enr.IPv6Addr)(&ip6)) + valid4 := validIP(ip4) + valid6 := validIP(ip6) + switch { + case valid4 && valid6: + if localityScore(ip4) >= localityScore(ip6) { + n.setIP4(ip4) + } else { + n.setIP6(ip6) + } + case valid4: + n.setIP4(ip4) + case valid6: + n.setIP6(ip6) + } + return n +} + +// validIP reports whether 'ip' is a valid node endpoint IP address. +func validIP(ip netip.Addr) bool { + return ip.IsValid() && !ip.IsMulticast() +} + +func localityScore(ip netip.Addr) int { + switch { + case ip.IsUnspecified(): + return 0 + case ip.IsLoopback(): + return 1 + case ip.IsLinkLocalUnicast(): + return 2 + case ip.IsPrivate(): + return 3 + default: + return 4 + } +} + +func (n *Node) setIP4(ip netip.Addr) { + n.ip = ip + n.Load((*enr.UDP)(&n.udp)) + n.Load((*enr.TCP)(&n.tcp)) +} + +func (n *Node) setIP6(ip netip.Addr) { + if ip.Is4In6() { + n.setIP4(ip) + return + } + n.ip = ip + if err := n.Load((*enr.UDP6)(&n.udp)); err != nil { + n.Load((*enr.UDP)(&n.udp)) + } + if err := n.Load((*enr.TCP6)(&n.tcp)); err != nil { + n.Load((*enr.TCP)(&n.tcp)) } - return node, nil } // MustParse parses a node record or enode:// URL. It panics if the input is invalid. @@ -89,43 +159,45 @@ func (n *Node) Seq() uint64 { return n.r.Seq() } -// Incomplete returns true for nodes with no IP address. -func (n *Node) Incomplete() bool { - return n.IP() == nil -} - // Load retrieves an entry from the underlying record. func (n *Node) Load(k enr.Entry) error { return n.r.Load(k) } -// IP returns the IP address of the node. This prefers IPv4 addresses. +// IP returns the IP address of the node. func (n *Node) IP() net.IP { - var ( - ip4 enr.IPv4 - ip6 enr.IPv6 - ) - if n.Load(&ip4) == nil { - return net.IP(ip4) - } - if n.Load(&ip6) == nil { - return net.IP(ip6) - } - return nil + return net.IP(n.ip.AsSlice()) +} + +// IPAddr returns the IP address of the node. +func (n *Node) IPAddr() netip.Addr { + return n.ip } // UDP returns the UDP port of the node. func (n *Node) UDP() int { - var port enr.UDP - n.Load(&port) - return int(port) + return int(n.udp) } // TCP returns the TCP port of the node. func (n *Node) TCP() int { - var port enr.TCP - n.Load(&port) - return int(port) + return int(n.tcp) +} + +// UDPEndpoint returns the announced UDP endpoint. +func (n *Node) UDPEndpoint() (netip.AddrPort, bool) { + if !n.ip.IsValid() || n.ip.IsUnspecified() || n.udp == 0 { + return netip.AddrPort{}, false + } + return netip.AddrPortFrom(n.ip, n.udp), true +} + +// TCPEndpoint returns the announced TCP endpoint. +func (n *Node) TCPEndpoint() (netip.AddrPort, bool) { + if !n.ip.IsValid() || n.ip.IsUnspecified() || n.tcp == 0 { + return netip.AddrPort{}, false + } + return netip.AddrPortFrom(n.ip, n.tcp), true } // Pubkey returns the secp256k1 public key of the node, if present. @@ -147,16 +219,15 @@ func (n *Node) Record() *enr.Record { // ValidateComplete checks whether n has a valid IP and UDP port. // Deprecated: don't use this method. func (n *Node) ValidateComplete() error { - if n.Incomplete() { + if !n.ip.IsValid() { return errors.New("missing IP address") } - if n.UDP() == 0 { - return errors.New("missing UDP port") - } - ip := n.IP() - if ip.IsMulticast() || ip.IsUnspecified() { + if n.ip.IsMulticast() || n.ip.IsUnspecified() { return errors.New("invalid IP (multicast/unspecified)") } + if n.udp == 0 { + return errors.New("missing UDP port") + } // Validate the node key (on curve, etc.). var key Secp256k1 return n.Load(&key) diff --git a/p2p/enode/node_test.go b/p2p/enode/node_test.go index d15859c477..56e196e82e 100644 --- a/p2p/enode/node_test.go +++ b/p2p/enode/node_test.go @@ -21,6 +21,7 @@ import ( "encoding/hex" "fmt" "math/big" + "net/netip" "testing" "testing/quick" @@ -64,6 +65,167 @@ func TestPythonInterop(t *testing.T) { } } +func TestNodeEndpoints(t *testing.T) { + id := HexID("00000000000000806ad9b61fa5ae014307ebdc964253adcd9f2c0a392aa11abc") + type endpointTest struct { + name string + node *Node + wantIP netip.Addr + wantUDP int + wantTCP int + } + tests := []endpointTest{ + { + name: "no-addr", + node: func() *Node { + var r enr.Record + return SignNull(&r, id) + }(), + }, + { + name: "udp-only", + node: func() *Node { + var r enr.Record + r.Set(enr.UDP(9000)) + return SignNull(&r, id) + }(), + }, + { + name: "tcp-only", + node: func() *Node { + var r enr.Record + r.Set(enr.TCP(9000)) + return SignNull(&r, id) + }(), + }, + { + name: "ipv4-only-loopback", + node: func() *Node { + var r enr.Record + r.Set(enr.IPv4Addr(netip.MustParseAddr("127.0.0.1"))) + return SignNull(&r, id) + }(), + wantIP: netip.MustParseAddr("127.0.0.1"), + }, + { + name: "ipv4-only-unspecified", + node: func() *Node { + var r enr.Record + r.Set(enr.IPv4Addr(netip.MustParseAddr("0.0.0.0"))) + return SignNull(&r, id) + }(), + wantIP: netip.MustParseAddr("0.0.0.0"), + }, + { + name: "ipv4-only", + node: func() *Node { + var r enr.Record + r.Set(enr.IPv4Addr(netip.MustParseAddr("99.22.33.1"))) + return SignNull(&r, id) + }(), + wantIP: netip.MustParseAddr("99.22.33.1"), + }, + { + name: "ipv6-only", + node: func() *Node { + var r enr.Record + r.Set(enr.IPv6Addr(netip.MustParseAddr("2001::ff00:0042:8329"))) + return SignNull(&r, id) + }(), + wantIP: netip.MustParseAddr("2001::ff00:0042:8329"), + }, + { + name: "ipv4-loopback-and-ipv6-global", + node: func() *Node { + var r enr.Record + r.Set(enr.IPv4Addr(netip.MustParseAddr("127.0.0.1"))) + r.Set(enr.UDP(30304)) + r.Set(enr.IPv6Addr(netip.MustParseAddr("2001::ff00:0042:8329"))) + r.Set(enr.UDP6(30306)) + return SignNull(&r, id) + }(), + wantIP: netip.MustParseAddr("2001::ff00:0042:8329"), + wantUDP: 30306, + }, + { + name: "ipv4-unspecified-and-ipv6-loopback", + node: func() *Node { + var r enr.Record + r.Set(enr.IPv4Addr(netip.MustParseAddr("0.0.0.0"))) + r.Set(enr.IPv6Addr(netip.MustParseAddr("::1"))) + return SignNull(&r, id) + }(), + wantIP: netip.MustParseAddr("::1"), + }, + { + name: "ipv4-private-and-ipv6-global", + node: func() *Node { + var r enr.Record + r.Set(enr.IPv4Addr(netip.MustParseAddr("192.168.2.2"))) + r.Set(enr.UDP(30304)) + r.Set(enr.IPv6Addr(netip.MustParseAddr("2001::ff00:0042:8329"))) + r.Set(enr.UDP6(30306)) + return SignNull(&r, id) + }(), + wantIP: netip.MustParseAddr("2001::ff00:0042:8329"), + wantUDP: 30306, + }, + { + name: "ipv4-local-and-ipv6-global", + node: func() *Node { + var r enr.Record + r.Set(enr.IPv4Addr(netip.MustParseAddr("169.254.2.6"))) + r.Set(enr.UDP(30304)) + r.Set(enr.IPv6Addr(netip.MustParseAddr("2001::ff00:0042:8329"))) + r.Set(enr.UDP6(30306)) + return SignNull(&r, id) + }(), + wantIP: netip.MustParseAddr("2001::ff00:0042:8329"), + wantUDP: 30306, + }, + { + name: "ipv4-private-and-ipv6-private", + node: func() *Node { + var r enr.Record + r.Set(enr.IPv4Addr(netip.MustParseAddr("192.168.2.2"))) + r.Set(enr.UDP(30304)) + r.Set(enr.IPv6Addr(netip.MustParseAddr("fd00::abcd:1"))) + r.Set(enr.UDP6(30306)) + return SignNull(&r, id) + }(), + wantIP: netip.MustParseAddr("192.168.2.2"), + wantUDP: 30304, + }, + { + name: "ipv4-private-and-ipv6-link-local", + node: func() *Node { + var r enr.Record + r.Set(enr.IPv4Addr(netip.MustParseAddr("192.168.2.2"))) + r.Set(enr.UDP(30304)) + r.Set(enr.IPv6Addr(netip.MustParseAddr("fe80::1"))) + r.Set(enr.UDP6(30306)) + return SignNull(&r, id) + }(), + wantIP: netip.MustParseAddr("192.168.2.2"), + wantUDP: 30304, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if test.wantIP != test.node.IPAddr() { + t.Errorf("node has wrong IP %v, want %v", test.node.IPAddr(), test.wantIP) + } + if test.wantUDP != test.node.UDP() { + t.Errorf("node has wrong UDP port %d, want %d", test.node.UDP(), test.wantUDP) + } + if test.wantTCP != test.node.TCP() { + t.Errorf("node has wrong TCP port %d, want %d", test.node.TCP(), test.wantTCP) + } + }) + } +} + func TestHexID(t *testing.T) { ref := ID{0, 0, 0, 0, 0, 0, 0, 128, 106, 217, 182, 31, 165, 174, 1, 67, 7, 235, 220, 150, 66, 83, 173, 205, 159, 44, 10, 57, 42, 161, 26, 188} id1 := HexID("0x00000000000000806ad9b61fa5ae014307ebdc964253adcd9f2c0a392aa11abc") diff --git a/p2p/enode/nodedb.go b/p2p/enode/nodedb.go index 6d55ce17f1..654d71d47b 100644 --- a/p2p/enode/nodedb.go +++ b/p2p/enode/nodedb.go @@ -26,6 +26,7 @@ import ( "sync" "time" + "github.com/ethereum/go-ethereum/p2p/enr" "github.com/ethereum/go-ethereum/rlp" "github.com/syndtr/goleveldb/leveldb" "github.com/syndtr/goleveldb/leveldb/errors" @@ -242,13 +243,14 @@ func (db *DB) Node(id ID) *Node { } func mustDecodeNode(id, data []byte) *Node { - node := new(Node) - if err := rlp.DecodeBytes(data, &node.r); err != nil { + var r enr.Record + if err := rlp.DecodeBytes(data, &r); err != nil { panic(fmt.Errorf("p2p/enode: can't decode node %x in DB: %v", id, err)) } - // Restore node id cache. - copy(node.id[:], id) - return node + if len(id) != len(ID{}) { + panic(fmt.Errorf("invalid id length %d", len(id))) + } + return newNodeWithID(&r, ID(id)) } // UpdateNode inserts - potentially overwriting - a node into the peer database. diff --git a/p2p/enode/urlv4.go b/p2p/enode/urlv4.go index 0272eee987..a55dfa6632 100644 --- a/p2p/enode/urlv4.go +++ b/p2p/enode/urlv4.go @@ -181,7 +181,7 @@ func (n *Node) URLv4() string { nodeid = fmt.Sprintf("%s.%x", scheme, n.id[:]) } u := url.URL{Scheme: "enode"} - if n.Incomplete() { + if !n.ip.IsValid() { u.Host = nodeid } else { addr := net.TCPAddr{IP: n.IP(), Port: n.TCP()} diff --git a/p2p/enr/entries.go b/p2p/enr/entries.go index 9945a436c9..917e1becba 100644 --- a/p2p/enr/entries.go +++ b/p2p/enr/entries.go @@ -21,6 +21,7 @@ import ( "fmt" "io" "net" + "net/netip" "github.com/ethereum/go-ethereum/rlp" ) @@ -167,6 +168,60 @@ func (v *IPv6) DecodeRLP(s *rlp.Stream) error { return nil } +// IPv4Addr is the "ip" key, which holds the IP address of the node. +type IPv4Addr netip.Addr + +func (v IPv4Addr) ENRKey() string { return "ip" } + +// EncodeRLP implements rlp.Encoder. +func (v IPv4Addr) EncodeRLP(w io.Writer) error { + addr := netip.Addr(v) + if !addr.Is4() { + return fmt.Errorf("address is not IPv4") + } + enc := rlp.NewEncoderBuffer(w) + bytes := addr.As4() + enc.WriteBytes(bytes[:]) + return enc.Flush() +} + +// DecodeRLP implements rlp.Decoder. +func (v *IPv4Addr) DecodeRLP(s *rlp.Stream) error { + var bytes [4]byte + if err := s.ReadBytes(bytes[:]); err != nil { + return err + } + *v = IPv4Addr(netip.AddrFrom4(bytes)) + return nil +} + +// IPv6Addr is the "ip6" key, which holds the IP address of the node. +type IPv6Addr netip.Addr + +func (v IPv6Addr) ENRKey() string { return "ip6" } + +// EncodeRLP implements rlp.Encoder. +func (v IPv6Addr) EncodeRLP(w io.Writer) error { + addr := netip.Addr(v) + if !addr.Is6() { + return fmt.Errorf("address is not IPv6") + } + enc := rlp.NewEncoderBuffer(w) + bytes := addr.As16() + enc.WriteBytes(bytes[:]) + return enc.Flush() +} + +// DecodeRLP implements rlp.Decoder. +func (v *IPv6Addr) DecodeRLP(s *rlp.Stream) error { + var bytes [16]byte + if err := s.ReadBytes(bytes[:]); err != nil { + return err + } + *v = IPv6Addr(netip.AddrFrom16(bytes)) + return nil +} + // KeyError is an error related to a key. type KeyError struct { Key string diff --git a/p2p/nodestate/nodestate.go b/p2p/nodestate/nodestate.go deleted file mode 100644 index 8052144465..0000000000 --- a/p2p/nodestate/nodestate.go +++ /dev/null @@ -1,1023 +0,0 @@ -// Copyright 2020 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package nodestate - -import ( - "errors" - "reflect" - "sync" - "time" - "unsafe" - - "github.com/ethereum/go-ethereum/common/mclock" - "github.com/ethereum/go-ethereum/ethdb" - "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/metrics" - "github.com/ethereum/go-ethereum/p2p/enode" - "github.com/ethereum/go-ethereum/p2p/enr" - "github.com/ethereum/go-ethereum/rlp" -) - -var ( - ErrInvalidField = errors.New("invalid field type") - ErrClosed = errors.New("already closed") -) - -type ( - // NodeStateMachine implements a network node-related event subscription system. - // It can assign binary state flags and fields of arbitrary type to each node and allows - // subscriptions to flag/field changes which can also modify further flags and fields, - // potentially triggering further subscriptions. An operation includes an initial change - // and all resulting subsequent changes and always ends in a consistent global state. - // It is initiated by a "top level" SetState/SetField call that blocks (also blocking other - // top-level functions) until the operation is finished. Callbacks making further changes - // should use the non-blocking SetStateSub/SetFieldSub functions. The tree of events - // resulting from the initial changes is traversed in a breadth-first order, ensuring for - // each subscription callback that all other callbacks caused by the same change triggering - // the current callback are processed before anything is triggered by the changes made in the - // current callback. In practice this logic ensures that all subscriptions "see" events in - // the logical order, callbacks are never called concurrently and "back and forth" effects - // are also possible. The state machine design should ensure that infinite event cycles - // cannot happen. - // The caller can also add timeouts assigned to a certain node and a subset of state flags. - // If the timeout elapses, the flags are reset. If all relevant flags are reset then the timer - // is dropped. State flags with no timeout are persisted in the database if the flag - // descriptor enables saving. If a node has no state flags set at any moment then it is discarded. - // Note: in order to avoid mutex deadlocks the callbacks should never lock a mutex that - // might be locked when the top level SetState/SetField functions are called. If a function - // potentially performs state/field changes then it is recommended to mention this fact in the - // function description, along with whether it should run inside an operation callback. - NodeStateMachine struct { - started, closed bool - lock sync.Mutex - clock mclock.Clock - db ethdb.KeyValueStore - dbNodeKey []byte - nodes map[enode.ID]*nodeInfo - offlineCallbackList []offlineCallback - opFlag bool // an operation has started - opWait *sync.Cond // signaled when the operation ends - opPending []func() // pending callback list of the current operation - - // Registered state flags or fields. Modifications are allowed - // only when the node state machine has not been started. - setup *Setup - fields []*fieldInfo - saveFlags bitMask - - // Installed callbacks. Modifications are allowed only when the - // node state machine has not been started. - stateSubs []stateSub - - // Testing hooks, only for testing purposes. - saveNodeHook func(*nodeInfo) - } - - // Flags represents a set of flags from a certain setup - Flags struct { - mask bitMask - setup *Setup - } - - // Field represents a field from a certain setup - Field struct { - index int - setup *Setup - } - - // flagDefinition describes a node state flag. Each registered instance is automatically - // mapped to a bit of the 64 bit node states. - // If persistent is true then the node is saved when state machine is shutdown. - flagDefinition struct { - name string - persistent bool - } - - // fieldDefinition describes an optional node field of the given type. The contents - // of the field are only retained for each node as long as at least one of the - // state flags is set. - fieldDefinition struct { - name string - ftype reflect.Type - encode func(interface{}) ([]byte, error) - decode func([]byte) (interface{}, error) - } - - // Setup contains the list of flags and fields used by the application - Setup struct { - Version uint - flags []flagDefinition - fields []fieldDefinition - } - - // bitMask describes a node state or state mask. It represents a subset - // of node flags with each bit assigned to a flag index (LSB represents flag 0). - bitMask uint64 - - // StateCallback is a subscription callback which is called when one of the - // state flags that is included in the subscription state mask is changed. - // Note: oldState and newState are also masked with the subscription mask so only - // the relevant bits are included. - StateCallback func(n *enode.Node, oldState, newState Flags) - - // FieldCallback is a subscription callback which is called when the value of - // a specific field is changed. - FieldCallback func(n *enode.Node, state Flags, oldValue, newValue interface{}) - - // nodeInfo contains node state, fields and state timeouts - nodeInfo struct { - node *enode.Node - state bitMask - timeouts []*nodeStateTimeout - fields []interface{} - fieldCount int - db, dirty bool - } - - nodeInfoEnc struct { - Enr enr.Record - Version uint - State bitMask - Fields [][]byte - } - - stateSub struct { - mask bitMask - callback StateCallback - } - - nodeStateTimeout struct { - mask bitMask - timer mclock.Timer - } - - fieldInfo struct { - fieldDefinition - subs []FieldCallback - } - - offlineCallback struct { - node *nodeInfo - state bitMask - fields []interface{} - } -) - -// offlineState is a special state that is assumed to be set before a node is loaded from -// the database and after it is shut down. -const offlineState = bitMask(1) - -// NewFlag creates a new node state flag -func (s *Setup) NewFlag(name string) Flags { - if s.flags == nil { - s.flags = []flagDefinition{{name: "offline"}} - } - f := Flags{mask: bitMask(1) << uint(len(s.flags)), setup: s} - s.flags = append(s.flags, flagDefinition{name: name}) - return f -} - -// NewPersistentFlag creates a new persistent node state flag -func (s *Setup) NewPersistentFlag(name string) Flags { - if s.flags == nil { - s.flags = []flagDefinition{{name: "offline"}} - } - f := Flags{mask: bitMask(1) << uint(len(s.flags)), setup: s} - s.flags = append(s.flags, flagDefinition{name: name, persistent: true}) - return f -} - -// OfflineFlag returns the system-defined offline flag belonging to the given setup -func (s *Setup) OfflineFlag() Flags { - return Flags{mask: offlineState, setup: s} -} - -// NewField creates a new node state field -func (s *Setup) NewField(name string, ftype reflect.Type) Field { - f := Field{index: len(s.fields), setup: s} - s.fields = append(s.fields, fieldDefinition{ - name: name, - ftype: ftype, - }) - return f -} - -// NewPersistentField creates a new persistent node field -func (s *Setup) NewPersistentField(name string, ftype reflect.Type, encode func(interface{}) ([]byte, error), decode func([]byte) (interface{}, error)) Field { - f := Field{index: len(s.fields), setup: s} - s.fields = append(s.fields, fieldDefinition{ - name: name, - ftype: ftype, - encode: encode, - decode: decode, - }) - return f -} - -// flagOp implements binary flag operations and also checks whether the operands belong to the same setup -func flagOp(a, b Flags, trueIfA, trueIfB, trueIfBoth bool) Flags { - if a.setup == nil { - if a.mask != 0 { - panic("Node state flags have no setup reference") - } - a.setup = b.setup - } - if b.setup == nil { - if b.mask != 0 { - panic("Node state flags have no setup reference") - } - b.setup = a.setup - } - if a.setup != b.setup { - panic("Node state flags belong to a different setup") - } - res := Flags{setup: a.setup} - if trueIfA { - res.mask |= a.mask & ^b.mask - } - if trueIfB { - res.mask |= b.mask & ^a.mask - } - if trueIfBoth { - res.mask |= a.mask & b.mask - } - return res -} - -// And returns the set of flags present in both a and b -func (a Flags) And(b Flags) Flags { return flagOp(a, b, false, false, true) } - -// AndNot returns the set of flags present in a but not in b -func (a Flags) AndNot(b Flags) Flags { return flagOp(a, b, true, false, false) } - -// Or returns the set of flags present in either a or b -func (a Flags) Or(b Flags) Flags { return flagOp(a, b, true, true, true) } - -// Xor returns the set of flags present in either a or b but not both -func (a Flags) Xor(b Flags) Flags { return flagOp(a, b, true, true, false) } - -// HasAll returns true if b is a subset of a -func (a Flags) HasAll(b Flags) bool { return flagOp(a, b, false, true, false).mask == 0 } - -// HasNone returns true if a and b have no shared flags -func (a Flags) HasNone(b Flags) bool { return flagOp(a, b, false, false, true).mask == 0 } - -// Equals returns true if a and b have the same flags set -func (a Flags) Equals(b Flags) bool { return flagOp(a, b, true, true, false).mask == 0 } - -// IsEmpty returns true if a has no flags set -func (a Flags) IsEmpty() bool { return a.mask == 0 } - -// MergeFlags merges multiple sets of state flags -func MergeFlags(list ...Flags) Flags { - if len(list) == 0 { - return Flags{} - } - res := list[0] - for i := 1; i < len(list); i++ { - res = res.Or(list[i]) - } - return res -} - -// String returns a list of the names of the flags specified in the bit mask -func (f Flags) String() string { - if f.mask == 0 { - return "[]" - } - s := "[" - comma := false - for index, flag := range f.setup.flags { - if f.mask&(bitMask(1)< 8*int(unsafe.Sizeof(bitMask(0))) { - panic("Too many node state flags") - } - ns := &NodeStateMachine{ - db: db, - dbNodeKey: dbKey, - clock: clock, - setup: setup, - nodes: make(map[enode.ID]*nodeInfo), - fields: make([]*fieldInfo, len(setup.fields)), - } - ns.opWait = sync.NewCond(&ns.lock) - stateNameMap := make(map[string]int, len(setup.flags)) - for index, flag := range setup.flags { - if _, ok := stateNameMap[flag.name]; ok { - panic("Node state flag name collision: " + flag.name) - } - stateNameMap[flag.name] = index - if flag.persistent { - ns.saveFlags |= bitMask(1) << uint(index) - } - } - fieldNameMap := make(map[string]int, len(setup.fields)) - for index, field := range setup.fields { - if _, ok := fieldNameMap[field.name]; ok { - panic("Node field name collision: " + field.name) - } - ns.fields[index] = &fieldInfo{fieldDefinition: field} - fieldNameMap[field.name] = index - } - return ns -} - -// stateMask checks whether the set of flags belongs to the same setup and returns its internal bit mask -func (ns *NodeStateMachine) stateMask(flags Flags) bitMask { - if flags.setup != ns.setup && flags.mask != 0 { - panic("Node state flags belong to a different setup") - } - return flags.mask -} - -// fieldIndex checks whether the field belongs to the same setup and returns its internal index -func (ns *NodeStateMachine) fieldIndex(field Field) int { - if field.setup != ns.setup { - panic("Node field belongs to a different setup") - } - return field.index -} - -// SubscribeState adds a node state subscription. The callback is called while the state -// machine mutex is not held and it is allowed to make further state updates using the -// non-blocking SetStateSub/SetFieldSub functions. All callbacks of an operation are running -// from the thread/goroutine of the initial caller and parallel operations are not permitted. -// Therefore the callback is never called concurrently. It is the responsibility of the -// implemented state logic to avoid deadlocks and to reach a stable state in a finite amount -// of steps. -// State subscriptions should be installed before loading the node database or making the -// first state update. -func (ns *NodeStateMachine) SubscribeState(flags Flags, callback StateCallback) { - ns.lock.Lock() - defer ns.lock.Unlock() - - if ns.started { - panic("state machine already started") - } - ns.stateSubs = append(ns.stateSubs, stateSub{ns.stateMask(flags), callback}) -} - -// SubscribeField adds a node field subscription. Same rules apply as for SubscribeState. -func (ns *NodeStateMachine) SubscribeField(field Field, callback FieldCallback) { - ns.lock.Lock() - defer ns.lock.Unlock() - - if ns.started { - panic("state machine already started") - } - f := ns.fields[ns.fieldIndex(field)] - f.subs = append(f.subs, callback) -} - -// newNode creates a new nodeInfo -func (ns *NodeStateMachine) newNode(n *enode.Node) *nodeInfo { - return &nodeInfo{node: n, fields: make([]interface{}, len(ns.fields))} -} - -// checkStarted checks whether the state machine has already been started and panics otherwise. -func (ns *NodeStateMachine) checkStarted() { - if !ns.started { - panic("state machine not started yet") - } -} - -// Start starts the state machine, enabling state and field operations and disabling -// further subscriptions. -func (ns *NodeStateMachine) Start() { - ns.lock.Lock() - if ns.started { - panic("state machine already started") - } - ns.started = true - if ns.db != nil { - ns.loadFromDb() - } - - ns.opStart() - ns.offlineCallbacks(true) - ns.opFinish() - ns.lock.Unlock() -} - -// Stop stops the state machine and saves its state if a database was supplied -func (ns *NodeStateMachine) Stop() { - ns.lock.Lock() - defer ns.lock.Unlock() - - ns.checkStarted() - if !ns.opStart() { - panic("already closed") - } - for _, node := range ns.nodes { - fields := make([]interface{}, len(node.fields)) - copy(fields, node.fields) - ns.offlineCallbackList = append(ns.offlineCallbackList, offlineCallback{node, node.state, fields}) - } - if ns.db != nil { - ns.saveToDb() - } - ns.offlineCallbacks(false) - ns.closed = true - ns.opFinish() -} - -// loadFromDb loads persisted node states from the database -func (ns *NodeStateMachine) loadFromDb() { - it := ns.db.NewIterator(ns.dbNodeKey, nil) - for it.Next() { - var id enode.ID - if len(it.Key()) != len(ns.dbNodeKey)+len(id) { - log.Error("Node state db entry with invalid length", "found", len(it.Key()), "expected", len(ns.dbNodeKey)+len(id)) - continue - } - copy(id[:], it.Key()[len(ns.dbNodeKey):]) - ns.decodeNode(id, it.Value()) - } -} - -type dummyIdentity enode.ID - -func (id dummyIdentity) Verify(r *enr.Record, sig []byte) error { return nil } -func (id dummyIdentity) NodeAddr(r *enr.Record) []byte { return id[:] } - -// decodeNode decodes a node database entry and adds it to the node set if successful -func (ns *NodeStateMachine) decodeNode(id enode.ID, data []byte) { - var enc nodeInfoEnc - if err := rlp.DecodeBytes(data, &enc); err != nil { - log.Error("Failed to decode node info", "id", id, "error", err) - return - } - n, _ := enode.New(dummyIdentity(id), &enc.Enr) - node := ns.newNode(n) - node.db = true - - if enc.Version != ns.setup.Version { - log.Debug("Removing stored node with unknown version", "current", ns.setup.Version, "stored", enc.Version) - ns.deleteNode(id) - return - } - if len(enc.Fields) > len(ns.setup.fields) { - log.Error("Invalid node field count", "id", id, "stored", len(enc.Fields)) - return - } - // Resolve persisted node fields - for i, encField := range enc.Fields { - if len(encField) == 0 { - continue - } - if decode := ns.fields[i].decode; decode != nil { - if field, err := decode(encField); err == nil { - node.fields[i] = field - node.fieldCount++ - } else { - log.Error("Failed to decode node field", "id", id, "field name", ns.fields[i].name, "error", err) - return - } - } else { - log.Error("Cannot decode node field", "id", id, "field name", ns.fields[i].name) - return - } - } - // It's a compatible node record, add it to set. - ns.nodes[id] = node - node.state = enc.State - fields := make([]interface{}, len(node.fields)) - copy(fields, node.fields) - ns.offlineCallbackList = append(ns.offlineCallbackList, offlineCallback{node, node.state, fields}) - log.Debug("Loaded node state", "id", id, "state", Flags{mask: enc.State, setup: ns.setup}) -} - -// saveNode saves the given node info to the database -func (ns *NodeStateMachine) saveNode(id enode.ID, node *nodeInfo) error { - if ns.db == nil { - return nil - } - - storedState := node.state & ns.saveFlags - for _, t := range node.timeouts { - storedState &= ^t.mask - } - enc := nodeInfoEnc{ - Enr: *node.node.Record(), - Version: ns.setup.Version, - State: storedState, - Fields: make([][]byte, len(ns.fields)), - } - log.Debug("Saved node state", "id", id, "state", Flags{mask: enc.State, setup: ns.setup}) - lastIndex := -1 - for i, f := range node.fields { - if f == nil { - continue - } - encode := ns.fields[i].encode - if encode == nil { - continue - } - blob, err := encode(f) - if err != nil { - return err - } - enc.Fields[i] = blob - lastIndex = i - } - if storedState == 0 && lastIndex == -1 { - if node.db { - node.db = false - ns.deleteNode(id) - } - node.dirty = false - return nil - } - enc.Fields = enc.Fields[:lastIndex+1] - data, err := rlp.EncodeToBytes(&enc) - if err != nil { - return err - } - if err := ns.db.Put(append(ns.dbNodeKey, id[:]...), data); err != nil { - return err - } - node.dirty, node.db = false, true - - if ns.saveNodeHook != nil { - ns.saveNodeHook(node) - } - return nil -} - -// deleteNode removes a node info from the database -func (ns *NodeStateMachine) deleteNode(id enode.ID) { - ns.db.Delete(append(ns.dbNodeKey, id[:]...)) -} - -// saveToDb saves the persistent flags and fields of all nodes that have been changed -func (ns *NodeStateMachine) saveToDb() { - for id, node := range ns.nodes { - if node.dirty { - err := ns.saveNode(id, node) - if err != nil { - log.Error("Failed to save node", "id", id, "error", err) - } - } - } -} - -// updateEnode updates the enode entry belonging to the given node if it already exists -func (ns *NodeStateMachine) updateEnode(n *enode.Node) (enode.ID, *nodeInfo) { - id := n.ID() - node := ns.nodes[id] - if node != nil && n.Seq() > node.node.Seq() { - node.node = n - node.dirty = true - } - return id, node -} - -// Persist saves the persistent state and fields of the given node immediately -func (ns *NodeStateMachine) Persist(n *enode.Node) error { - ns.lock.Lock() - defer ns.lock.Unlock() - - ns.checkStarted() - if id, node := ns.updateEnode(n); node != nil && node.dirty { - err := ns.saveNode(id, node) - if err != nil { - log.Error("Failed to save node", "id", id, "error", err) - } - return err - } - return nil -} - -// SetState updates the given node state flags and blocks until the operation is finished. -// If a flag with a timeout is set again, the operation removes or replaces the existing timeout. -func (ns *NodeStateMachine) SetState(n *enode.Node, setFlags, resetFlags Flags, timeout time.Duration) error { - ns.lock.Lock() - defer ns.lock.Unlock() - - if !ns.opStart() { - return ErrClosed - } - ns.setState(n, setFlags, resetFlags, timeout) - ns.opFinish() - return nil -} - -// SetStateSub updates the given node state flags without blocking (should be called -// from a subscription/operation callback). -func (ns *NodeStateMachine) SetStateSub(n *enode.Node, setFlags, resetFlags Flags, timeout time.Duration) { - ns.lock.Lock() - defer ns.lock.Unlock() - - ns.opCheck() - ns.setState(n, setFlags, resetFlags, timeout) -} - -func (ns *NodeStateMachine) setState(n *enode.Node, setFlags, resetFlags Flags, timeout time.Duration) { - ns.checkStarted() - set, reset := ns.stateMask(setFlags), ns.stateMask(resetFlags) - id, node := ns.updateEnode(n) - if node == nil { - if set == 0 { - return - } - node = ns.newNode(n) - ns.nodes[id] = node - } - oldState := node.state - newState := (node.state & (^reset)) | set - changed := oldState ^ newState - node.state = newState - - // Remove the timeout callbacks for all reset and set flags, - // even they are not existent(it's noop). - ns.removeTimeouts(node, set|reset) - - // Register the timeout callback if required - if timeout != 0 && set != 0 { - ns.addTimeout(n, set, timeout) - } - if newState == oldState { - return - } - if newState == 0 && node.fieldCount == 0 { - delete(ns.nodes, id) - if node.db { - ns.deleteNode(id) - } - } else { - if changed&ns.saveFlags != 0 { - node.dirty = true - } - } - callback := func() { - for _, sub := range ns.stateSubs { - if changed&sub.mask != 0 { - sub.callback(n, Flags{mask: oldState & sub.mask, setup: ns.setup}, Flags{mask: newState & sub.mask, setup: ns.setup}) - } - } - } - ns.opPending = append(ns.opPending, callback) -} - -// opCheck checks whether an operation is active -func (ns *NodeStateMachine) opCheck() { - if !ns.opFlag { - panic("Operation has not started") - } -} - -// opStart waits until other operations are finished and starts a new one -func (ns *NodeStateMachine) opStart() bool { - for ns.opFlag { - ns.opWait.Wait() - } - if ns.closed { - return false - } - ns.opFlag = true - return true -} - -// opFinish finishes the current operation by running all pending callbacks. -// Callbacks resulting from a state/field change performed in a previous callback are always -// put at the end of the pending list and therefore processed after all callbacks resulting -// from the previous state/field change. -func (ns *NodeStateMachine) opFinish() { - for len(ns.opPending) != 0 { - list := ns.opPending - ns.lock.Unlock() - for _, cb := range list { - cb() - } - ns.lock.Lock() - ns.opPending = ns.opPending[len(list):] - } - ns.opPending = nil - ns.opFlag = false - ns.opWait.Broadcast() -} - -// Operation calls the given function as an operation callback. This allows the caller -// to start an operation with multiple initial changes. The same rules apply as for -// subscription callbacks. -func (ns *NodeStateMachine) Operation(fn func()) error { - ns.lock.Lock() - started := ns.opStart() - ns.lock.Unlock() - if !started { - return ErrClosed - } - fn() - ns.lock.Lock() - ns.opFinish() - ns.lock.Unlock() - return nil -} - -// offlineCallbacks calls state update callbacks at startup or shutdown -func (ns *NodeStateMachine) offlineCallbacks(start bool) { - for _, cb := range ns.offlineCallbackList { - cb := cb - callback := func() { - for _, sub := range ns.stateSubs { - offState := offlineState & sub.mask - onState := cb.state & sub.mask - if offState == onState { - continue - } - if start { - sub.callback(cb.node.node, Flags{mask: offState, setup: ns.setup}, Flags{mask: onState, setup: ns.setup}) - } else { - sub.callback(cb.node.node, Flags{mask: onState, setup: ns.setup}, Flags{mask: offState, setup: ns.setup}) - } - } - for i, f := range cb.fields { - if f == nil || ns.fields[i].subs == nil { - continue - } - for _, fsub := range ns.fields[i].subs { - if start { - fsub(cb.node.node, Flags{mask: offlineState, setup: ns.setup}, nil, f) - } else { - fsub(cb.node.node, Flags{mask: offlineState, setup: ns.setup}, f, nil) - } - } - } - } - ns.opPending = append(ns.opPending, callback) - } - ns.offlineCallbackList = nil -} - -// AddTimeout adds a node state timeout associated to the given state flag(s). -// After the specified time interval, the relevant states will be reset. -func (ns *NodeStateMachine) AddTimeout(n *enode.Node, flags Flags, timeout time.Duration) error { - ns.lock.Lock() - defer ns.lock.Unlock() - - ns.checkStarted() - if ns.closed { - return ErrClosed - } - ns.addTimeout(n, ns.stateMask(flags), timeout) - return nil -} - -// addTimeout adds a node state timeout associated to the given state flag(s). -func (ns *NodeStateMachine) addTimeout(n *enode.Node, mask bitMask, timeout time.Duration) { - _, node := ns.updateEnode(n) - if node == nil { - return - } - mask &= node.state - if mask == 0 { - return - } - ns.removeTimeouts(node, mask) - t := &nodeStateTimeout{mask: mask} - t.timer = ns.clock.AfterFunc(timeout, func() { - ns.lock.Lock() - defer ns.lock.Unlock() - - if !ns.opStart() { - return - } - ns.setState(n, Flags{}, Flags{mask: t.mask, setup: ns.setup}, 0) - ns.opFinish() - }) - node.timeouts = append(node.timeouts, t) - if mask&ns.saveFlags != 0 { - node.dirty = true - } -} - -// removeTimeouts removes node state timeouts associated to the given state flag(s). -// If a timeout was associated to multiple flags which are not all included in the -// specified remove mask then only the included flags are de-associated and the timer -// stays active. -func (ns *NodeStateMachine) removeTimeouts(node *nodeInfo, mask bitMask) { - for i := 0; i < len(node.timeouts); i++ { - t := node.timeouts[i] - match := t.mask & mask - if match == 0 { - continue - } - t.mask -= match - if t.mask != 0 { - continue - } - t.timer.Stop() - node.timeouts[i] = node.timeouts[len(node.timeouts)-1] - node.timeouts = node.timeouts[:len(node.timeouts)-1] - i-- - if match&ns.saveFlags != 0 { - node.dirty = true - } - } -} - -// GetField retrieves the given field of the given node. Note that when used in a -// subscription callback the result can be out of sync with the state change represented -// by the callback parameters so extra safety checks might be necessary. -func (ns *NodeStateMachine) GetField(n *enode.Node, field Field) interface{} { - ns.lock.Lock() - defer ns.lock.Unlock() - - ns.checkStarted() - if ns.closed { - return nil - } - if _, node := ns.updateEnode(n); node != nil { - return node.fields[ns.fieldIndex(field)] - } - return nil -} - -// GetState retrieves the current state of the given node. Note that when used in a -// subscription callback the result can be out of sync with the state change represented -// by the callback parameters so extra safety checks might be necessary. -func (ns *NodeStateMachine) GetState(n *enode.Node) Flags { - ns.lock.Lock() - defer ns.lock.Unlock() - - ns.checkStarted() - if ns.closed { - return Flags{} - } - if _, node := ns.updateEnode(n); node != nil { - return Flags{mask: node.state, setup: ns.setup} - } - return Flags{} -} - -// SetField sets the given field of the given node and blocks until the operation is finished -func (ns *NodeStateMachine) SetField(n *enode.Node, field Field, value interface{}) error { - ns.lock.Lock() - defer ns.lock.Unlock() - - if !ns.opStart() { - return ErrClosed - } - err := ns.setField(n, field, value) - ns.opFinish() - return err -} - -// SetFieldSub sets the given field of the given node without blocking (should be called -// from a subscription/operation callback). -func (ns *NodeStateMachine) SetFieldSub(n *enode.Node, field Field, value interface{}) error { - ns.lock.Lock() - defer ns.lock.Unlock() - - ns.opCheck() - return ns.setField(n, field, value) -} - -func (ns *NodeStateMachine) setField(n *enode.Node, field Field, value interface{}) error { - ns.checkStarted() - id, node := ns.updateEnode(n) - if node == nil { - if value == nil { - return nil - } - node = ns.newNode(n) - ns.nodes[id] = node - } - fieldIndex := ns.fieldIndex(field) - f := ns.fields[fieldIndex] - if value != nil && reflect.TypeOf(value) != f.ftype { - log.Error("Invalid field type", "type", reflect.TypeOf(value), "required", f.ftype) - return ErrInvalidField - } - oldValue := node.fields[fieldIndex] - if value == oldValue { - return nil - } - if oldValue != nil { - node.fieldCount-- - } - if value != nil { - node.fieldCount++ - } - node.fields[fieldIndex] = value - if node.state == 0 && node.fieldCount == 0 { - delete(ns.nodes, id) - if node.db { - ns.deleteNode(id) - } - } else { - if f.encode != nil { - node.dirty = true - } - } - state := node.state - callback := func() { - for _, cb := range f.subs { - cb(n, Flags{mask: state, setup: ns.setup}, oldValue, value) - } - } - ns.opPending = append(ns.opPending, callback) - return nil -} - -// ForEach calls the callback for each node having all of the required and none of the -// disabled flags set. -// Note that this callback is not an operation callback but ForEach can be called from an -// Operation callback or Operation can also be called from a ForEach callback if necessary. -func (ns *NodeStateMachine) ForEach(requireFlags, disableFlags Flags, cb func(n *enode.Node, state Flags)) { - ns.lock.Lock() - ns.checkStarted() - type callback struct { - node *enode.Node - state bitMask - } - require, disable := ns.stateMask(requireFlags), ns.stateMask(disableFlags) - var callbacks []callback - for _, node := range ns.nodes { - if node.state&require == require && node.state&disable == 0 { - callbacks = append(callbacks, callback{node.node, node.state & (require | disable)}) - } - } - ns.lock.Unlock() - for _, c := range callbacks { - cb(c.node, Flags{mask: c.state, setup: ns.setup}) - } -} - -// GetNode returns the enode currently associated with the given ID -func (ns *NodeStateMachine) GetNode(id enode.ID) *enode.Node { - ns.lock.Lock() - defer ns.lock.Unlock() - - ns.checkStarted() - if node := ns.nodes[id]; node != nil { - return node.node - } - return nil -} - -// AddLogMetrics adds logging and/or metrics for nodes entering, exiting and currently -// being in a given set specified by required and disabled state flags -func (ns *NodeStateMachine) AddLogMetrics(requireFlags, disableFlags Flags, name string, inMeter, outMeter metrics.Meter, gauge metrics.Gauge) { - var count int64 - ns.SubscribeState(requireFlags.Or(disableFlags), func(n *enode.Node, oldState, newState Flags) { - oldMatch := oldState.HasAll(requireFlags) && oldState.HasNone(disableFlags) - newMatch := newState.HasAll(requireFlags) && newState.HasNone(disableFlags) - if newMatch == oldMatch { - return - } - - if newMatch { - count++ - if name != "" { - log.Debug("Node entered", "set", name, "id", n.ID(), "count", count) - } - if inMeter != nil { - inMeter.Mark(1) - } - } else { - count-- - if name != "" { - log.Debug("Node left", "set", name, "id", n.ID(), "count", count) - } - if outMeter != nil { - outMeter.Mark(1) - } - } - if gauge != nil { - gauge.Update(count) - } - }) -} diff --git a/p2p/nodestate/nodestate_test.go b/p2p/nodestate/nodestate_test.go deleted file mode 100644 index d06ad755e2..0000000000 --- a/p2p/nodestate/nodestate_test.go +++ /dev/null @@ -1,407 +0,0 @@ -// Copyright 2020 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package nodestate - -import ( - "errors" - "fmt" - "reflect" - "testing" - "time" - - "github.com/ethereum/go-ethereum/common/mclock" - "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/p2p/enode" - "github.com/ethereum/go-ethereum/p2p/enr" - "github.com/ethereum/go-ethereum/rlp" -) - -func testSetup(flagPersist []bool, fieldType []reflect.Type) (*Setup, []Flags, []Field) { - setup := &Setup{} - flags := make([]Flags, len(flagPersist)) - for i, persist := range flagPersist { - if persist { - flags[i] = setup.NewPersistentFlag(fmt.Sprintf("flag-%d", i)) - } else { - flags[i] = setup.NewFlag(fmt.Sprintf("flag-%d", i)) - } - } - fields := make([]Field, len(fieldType)) - for i, ftype := range fieldType { - switch ftype { - case reflect.TypeOf(uint64(0)): - fields[i] = setup.NewPersistentField(fmt.Sprintf("field-%d", i), ftype, uint64FieldEnc, uint64FieldDec) - case reflect.TypeOf(""): - fields[i] = setup.NewPersistentField(fmt.Sprintf("field-%d", i), ftype, stringFieldEnc, stringFieldDec) - default: - fields[i] = setup.NewField(fmt.Sprintf("field-%d", i), ftype) - } - } - return setup, flags, fields -} - -func testNode(b byte) *enode.Node { - r := &enr.Record{} - r.SetSig(dummyIdentity{b}, []byte{42}) - n, _ := enode.New(dummyIdentity{b}, r) - return n -} - -func TestCallback(t *testing.T) { - mdb, clock := rawdb.NewMemoryDatabase(), &mclock.Simulated{} - - s, flags, _ := testSetup([]bool{false, false, false}, nil) - ns := NewNodeStateMachine(mdb, []byte("-ns"), clock, s) - - set0 := make(chan struct{}, 1) - set1 := make(chan struct{}, 1) - set2 := make(chan struct{}, 1) - ns.SubscribeState(flags[0], func(n *enode.Node, oldState, newState Flags) { set0 <- struct{}{} }) - ns.SubscribeState(flags[1], func(n *enode.Node, oldState, newState Flags) { set1 <- struct{}{} }) - ns.SubscribeState(flags[2], func(n *enode.Node, oldState, newState Flags) { set2 <- struct{}{} }) - - ns.Start() - - ns.SetState(testNode(1), flags[0], Flags{}, 0) - ns.SetState(testNode(1), flags[1], Flags{}, time.Second) - ns.SetState(testNode(1), flags[2], Flags{}, 2*time.Second) - - for i := 0; i < 3; i++ { - select { - case <-set0: - case <-set1: - case <-set2: - case <-time.After(time.Second): - t.Fatalf("failed to invoke callback") - } - } -} - -func TestPersistentFlags(t *testing.T) { - mdb, clock := rawdb.NewMemoryDatabase(), &mclock.Simulated{} - - s, flags, _ := testSetup([]bool{true, true, true, false}, nil) - ns := NewNodeStateMachine(mdb, []byte("-ns"), clock, s) - - saveNode := make(chan *nodeInfo, 5) - ns.saveNodeHook = func(node *nodeInfo) { - saveNode <- node - } - - ns.Start() - - ns.SetState(testNode(1), flags[0], Flags{}, time.Second) // state with timeout should not be saved - ns.SetState(testNode(2), flags[1], Flags{}, 0) - ns.SetState(testNode(3), flags[2], Flags{}, 0) - ns.SetState(testNode(4), flags[3], Flags{}, 0) - ns.SetState(testNode(5), flags[0], Flags{}, 0) - ns.Persist(testNode(5)) - select { - case <-saveNode: - case <-time.After(time.Second): - t.Fatalf("Timeout") - } - ns.Stop() - - for i := 0; i < 2; i++ { - select { - case <-saveNode: - case <-time.After(time.Second): - t.Fatalf("Timeout") - } - } - select { - case <-saveNode: - t.Fatalf("Unexpected saveNode") - case <-time.After(time.Millisecond * 100): - } -} - -func TestSetField(t *testing.T) { - mdb, clock := rawdb.NewMemoryDatabase(), &mclock.Simulated{} - - s, flags, fields := testSetup([]bool{true}, []reflect.Type{reflect.TypeOf("")}) - ns := NewNodeStateMachine(mdb, []byte("-ns"), clock, s) - - saveNode := make(chan *nodeInfo, 1) - ns.saveNodeHook = func(node *nodeInfo) { - saveNode <- node - } - - ns.Start() - - // Set field before setting state - ns.SetField(testNode(1), fields[0], "hello world") - field := ns.GetField(testNode(1), fields[0]) - if field == nil { - t.Fatalf("Field should be set before setting states") - } - ns.SetField(testNode(1), fields[0], nil) - field = ns.GetField(testNode(1), fields[0]) - if field != nil { - t.Fatalf("Field should be unset") - } - // Set field after setting state - ns.SetState(testNode(1), flags[0], Flags{}, 0) - ns.SetField(testNode(1), fields[0], "hello world") - field = ns.GetField(testNode(1), fields[0]) - if field == nil { - t.Fatalf("Field should be set after setting states") - } - if err := ns.SetField(testNode(1), fields[0], 123); err == nil { - t.Fatalf("Invalid field should be rejected") - } - // Dirty node should be written back - ns.Stop() - select { - case <-saveNode: - case <-time.After(time.Second): - t.Fatalf("Timeout") - } -} - -func TestSetState(t *testing.T) { - mdb, clock := rawdb.NewMemoryDatabase(), &mclock.Simulated{} - - s, flags, _ := testSetup([]bool{false, false, false}, nil) - ns := NewNodeStateMachine(mdb, []byte("-ns"), clock, s) - - type change struct{ old, new Flags } - set := make(chan change, 1) - ns.SubscribeState(flags[0].Or(flags[1]), func(n *enode.Node, oldState, newState Flags) { - set <- change{ - old: oldState, - new: newState, - } - }) - - ns.Start() - - check := func(expectOld, expectNew Flags, expectChange bool) { - if expectChange { - select { - case c := <-set: - if !c.old.Equals(expectOld) { - t.Fatalf("Old state mismatch") - } - if !c.new.Equals(expectNew) { - t.Fatalf("New state mismatch") - } - case <-time.After(time.Second): - } - return - } - select { - case <-set: - t.Fatalf("Unexpected change") - case <-time.After(time.Millisecond * 100): - return - } - } - ns.SetState(testNode(1), flags[0], Flags{}, 0) - check(Flags{}, flags[0], true) - - ns.SetState(testNode(1), flags[1], Flags{}, 0) - check(flags[0], flags[0].Or(flags[1]), true) - - ns.SetState(testNode(1), flags[2], Flags{}, 0) - check(Flags{}, Flags{}, false) - - ns.SetState(testNode(1), Flags{}, flags[0], 0) - check(flags[0].Or(flags[1]), flags[1], true) - - ns.SetState(testNode(1), Flags{}, flags[1], 0) - check(flags[1], Flags{}, true) - - ns.SetState(testNode(1), Flags{}, flags[2], 0) - check(Flags{}, Flags{}, false) - - ns.SetState(testNode(1), flags[0].Or(flags[1]), Flags{}, time.Second) - check(Flags{}, flags[0].Or(flags[1]), true) - clock.Run(time.Second) - check(flags[0].Or(flags[1]), Flags{}, true) -} - -func uint64FieldEnc(field interface{}) ([]byte, error) { - if u, ok := field.(uint64); ok { - enc, err := rlp.EncodeToBytes(&u) - return enc, err - } - return nil, errors.New("invalid field type") -} - -func uint64FieldDec(enc []byte) (interface{}, error) { - var u uint64 - err := rlp.DecodeBytes(enc, &u) - return u, err -} - -func stringFieldEnc(field interface{}) ([]byte, error) { - if s, ok := field.(string); ok { - return []byte(s), nil - } - return nil, errors.New("invalid field type") -} - -func stringFieldDec(enc []byte) (interface{}, error) { - return string(enc), nil -} - -func TestPersistentFields(t *testing.T) { - mdb, clock := rawdb.NewMemoryDatabase(), &mclock.Simulated{} - - s, flags, fields := testSetup([]bool{true}, []reflect.Type{reflect.TypeOf(uint64(0)), reflect.TypeOf("")}) - ns := NewNodeStateMachine(mdb, []byte("-ns"), clock, s) - - ns.Start() - ns.SetState(testNode(1), flags[0], Flags{}, 0) - ns.SetField(testNode(1), fields[0], uint64(100)) - ns.SetField(testNode(1), fields[1], "hello world") - ns.Stop() - - ns2 := NewNodeStateMachine(mdb, []byte("-ns"), clock, s) - - ns2.Start() - field0 := ns2.GetField(testNode(1), fields[0]) - if !reflect.DeepEqual(field0, uint64(100)) { - t.Fatalf("Field changed") - } - field1 := ns2.GetField(testNode(1), fields[1]) - if !reflect.DeepEqual(field1, "hello world") { - t.Fatalf("Field changed") - } - - s.Version++ - ns3 := NewNodeStateMachine(mdb, []byte("-ns"), clock, s) - ns3.Start() - if ns3.GetField(testNode(1), fields[0]) != nil { - t.Fatalf("Old field version should have been discarded") - } -} - -func TestFieldSub(t *testing.T) { - mdb, clock := rawdb.NewMemoryDatabase(), &mclock.Simulated{} - - s, flags, fields := testSetup([]bool{true}, []reflect.Type{reflect.TypeOf(uint64(0))}) - ns := NewNodeStateMachine(mdb, []byte("-ns"), clock, s) - - var ( - lastState Flags - lastOldValue, lastNewValue interface{} - ) - ns.SubscribeField(fields[0], func(n *enode.Node, state Flags, oldValue, newValue interface{}) { - lastState, lastOldValue, lastNewValue = state, oldValue, newValue - }) - check := func(state Flags, oldValue, newValue interface{}) { - if !lastState.Equals(state) || lastOldValue != oldValue || lastNewValue != newValue { - t.Fatalf("Incorrect field sub callback (expected [%v %v %v], got [%v %v %v])", state, oldValue, newValue, lastState, lastOldValue, lastNewValue) - } - } - ns.Start() - ns.SetState(testNode(1), flags[0], Flags{}, 0) - ns.SetField(testNode(1), fields[0], uint64(100)) - check(flags[0], nil, uint64(100)) - ns.Stop() - check(s.OfflineFlag(), uint64(100), nil) - - ns2 := NewNodeStateMachine(mdb, []byte("-ns"), clock, s) - ns2.SubscribeField(fields[0], func(n *enode.Node, state Flags, oldValue, newValue interface{}) { - lastState, lastOldValue, lastNewValue = state, oldValue, newValue - }) - ns2.Start() - check(s.OfflineFlag(), nil, uint64(100)) - ns2.SetState(testNode(1), Flags{}, flags[0], 0) - ns2.SetField(testNode(1), fields[0], nil) - check(Flags{}, uint64(100), nil) - ns2.Stop() -} - -func TestDuplicatedFlags(t *testing.T) { - mdb, clock := rawdb.NewMemoryDatabase(), &mclock.Simulated{} - - s, flags, _ := testSetup([]bool{true}, nil) - ns := NewNodeStateMachine(mdb, []byte("-ns"), clock, s) - - type change struct{ old, new Flags } - set := make(chan change, 1) - ns.SubscribeState(flags[0], func(n *enode.Node, oldState, newState Flags) { - set <- change{oldState, newState} - }) - - ns.Start() - defer ns.Stop() - - check := func(expectOld, expectNew Flags, expectChange bool) { - if expectChange { - select { - case c := <-set: - if !c.old.Equals(expectOld) { - t.Fatalf("Old state mismatch") - } - if !c.new.Equals(expectNew) { - t.Fatalf("New state mismatch") - } - case <-time.After(time.Second): - } - return - } - select { - case <-set: - t.Fatalf("Unexpected change") - case <-time.After(time.Millisecond * 100): - return - } - } - ns.SetState(testNode(1), flags[0], Flags{}, time.Second) - check(Flags{}, flags[0], true) - ns.SetState(testNode(1), flags[0], Flags{}, 2*time.Second) // extend the timeout to 2s - check(Flags{}, flags[0], false) - - clock.Run(2 * time.Second) - check(flags[0], Flags{}, true) -} - -func TestCallbackOrder(t *testing.T) { - mdb, clock := rawdb.NewMemoryDatabase(), &mclock.Simulated{} - - s, flags, _ := testSetup([]bool{false, false, false, false}, nil) - ns := NewNodeStateMachine(mdb, []byte("-ns"), clock, s) - - ns.SubscribeState(flags[0], func(n *enode.Node, oldState, newState Flags) { - if newState.Equals(flags[0]) { - ns.SetStateSub(n, flags[1], Flags{}, 0) - ns.SetStateSub(n, flags[2], Flags{}, 0) - } - }) - ns.SubscribeState(flags[1], func(n *enode.Node, oldState, newState Flags) { - if newState.Equals(flags[1]) { - ns.SetStateSub(n, flags[3], Flags{}, 0) - } - }) - lastState := Flags{} - ns.SubscribeState(MergeFlags(flags[1], flags[2], flags[3]), func(n *enode.Node, oldState, newState Flags) { - if !oldState.Equals(lastState) { - t.Fatalf("Wrong callback order") - } - lastState = newState - }) - - ns.Start() - defer ns.Stop() - - ns.SetState(testNode(1), flags[0], Flags{}, 0) -} diff --git a/p2p/server.go b/p2p/server.go index 5b9a4aa71f..13eebed3f4 100644 --- a/p2p/server.go +++ b/p2p/server.go @@ -24,6 +24,7 @@ import ( "errors" "fmt" "net" + "net/netip" "slices" "sync" "sync/atomic" @@ -190,8 +191,8 @@ type Server struct { nodedb *enode.DB localnode *enode.LocalNode - ntab *discover.UDPv4 - DiscV5 *discover.UDPv5 + discv4 *discover.UDPv4 + discv5 *discover.UDPv5 discmix *enode.FairMix dialsched *dialScheduler @@ -400,6 +401,16 @@ func (srv *Server) Self() *enode.Node { return ln.Node() } +// DiscoveryV4 returns the discovery v4 instance, if configured. +func (srv *Server) DiscoveryV4() *discover.UDPv4 { + return srv.discv4 +} + +// DiscoveryV5 returns the discovery v5 instance, if configured. +func (srv *Server) DiscoveryV5() *discover.UDPv5 { + return srv.discv5 +} + // Stop terminates the server and all active peer connections. // It blocks until all active connections have been closed. func (srv *Server) Stop() { @@ -425,11 +436,11 @@ type sharedUDPConn struct { unhandled chan discover.ReadPacket } -// ReadFromUDP implements discover.UDPConn -func (s *sharedUDPConn) ReadFromUDP(b []byte) (n int, addr *net.UDPAddr, err error) { +// ReadFromUDPAddrPort implements discover.UDPConn +func (s *sharedUDPConn) ReadFromUDPAddrPort(b []byte) (n int, addr netip.AddrPort, err error) { packet, ok := <-s.unhandled if !ok { - return 0, nil, errors.New("connection was closed") + return 0, netip.AddrPort{}, errors.New("connection was closed") } l := len(packet.Data) if l > len(b) { @@ -547,13 +558,13 @@ func (srv *Server) setupDiscovery() error { ) // If both versions of discovery are running, setup a shared // connection, so v5 can read unhandled messages from v4. - if srv.DiscoveryV4 && srv.DiscoveryV5 { + if srv.Config.DiscoveryV4 && srv.Config.DiscoveryV5 { unhandled = make(chan discover.ReadPacket, 100) sconn = &sharedUDPConn{conn, unhandled} } // Start discovery services. - if srv.DiscoveryV4 { + if srv.Config.DiscoveryV4 { cfg := discover.Config{ PrivateKey: srv.PrivateKey, NetRestrict: srv.NetRestrict, @@ -565,17 +576,17 @@ func (srv *Server) setupDiscovery() error { if err != nil { return err } - srv.ntab = ntab + srv.discv4 = ntab srv.discmix.AddSource(ntab.RandomNodes()) } - if srv.DiscoveryV5 { + if srv.Config.DiscoveryV5 { cfg := discover.Config{ PrivateKey: srv.PrivateKey, NetRestrict: srv.NetRestrict, Bootnodes: srv.BootstrapNodesV5, Log: srv.log, } - srv.DiscV5, err = discover.ListenV5(sconn, srv.localnode, cfg) + srv.discv5, err = discover.ListenV5(sconn, srv.localnode, cfg) if err != nil { return err } @@ -602,8 +613,8 @@ func (srv *Server) setupDialScheduler() { dialer: srv.Dialer, clock: srv.clock, } - if srv.ntab != nil { - config.resolver = srv.ntab + if srv.discv4 != nil { + config.resolver = srv.discv4 } if config.dialer == nil { config.dialer = tcpDialer{&net.Dialer{Timeout: defaultDialTimeout}} @@ -799,11 +810,11 @@ running: srv.log.Trace("P2P networking is spinning down") // Terminate discovery. If there is a running lookup it will terminate soon. - if srv.ntab != nil { - srv.ntab.Close() + if srv.discv4 != nil { + srv.discv4.Close() } - if srv.DiscV5 != nil { - srv.DiscV5.Close() + if srv.discv5 != nil { + srv.discv5.Close() } // Disconnect all peers. for _, p := range peers { diff --git a/p2p/simulations/adapters/types.go b/p2p/simulations/adapters/types.go index f34315f170..e18aaacc33 100644 --- a/p2p/simulations/adapters/types.go +++ b/p2p/simulations/adapters/types.go @@ -42,7 +42,6 @@ import ( // // - SimNode, an in-memory node in the same process // - ExecNode, a child process node -// - DockerNode, a node running in a Docker container type Node interface { // Addr returns the node's address (e.g. an Enode URL) Addr() []byte diff --git a/p2p/simulations/examples/ping-pong.go b/p2p/simulations/examples/ping-pong.go index 70b35ad777..b0b8f22fdb 100644 --- a/p2p/simulations/examples/ping-pong.go +++ b/p2p/simulations/examples/ping-pong.go @@ -33,7 +33,7 @@ import ( "github.com/ethereum/go-ethereum/p2p/simulations/adapters" ) -var adapterType = flag.String("adapter", "sim", `node adapter to use (one of "sim", "exec" or "docker")`) +var adapterType = flag.String("adapter", "sim", `node adapter to use (one of "sim" or "exec")`) // main() starts a simulation network which contains nodes running a simple // ping-pong protocol diff --git a/params/config.go b/params/config.go index d5d93b8255..e7fabfca77 100644 --- a/params/config.go +++ b/params/config.go @@ -597,6 +597,11 @@ func (c *ChainConfig) IsVerkle(num *big.Int, time uint64) bool { return c.IsLondon(num) && isTimestampForked(c.VerkleTime, time) } +// IsEIP4762 returns whether eip 4762 has been activated at given block. +func (c *ChainConfig) IsEIP4762(num *big.Int, time uint64) bool { + return c.IsVerkle(num, time) +} + // CheckCompatible checks whether scheduled fork transitions have been imported // with a mismatching chain configuration. func (c *ChainConfig) CheckCompatible(newcfg *ChainConfig, height uint64, time uint64) *ConfigCompatError { @@ -931,6 +936,7 @@ type Rules struct { ChainID *big.Int ArbOSVersion uint64 IsHomestead, IsEIP150, IsEIP155, IsEIP158 bool + IsEIP2929, IsEIP4762 bool IsByzantium, IsConstantinople, IsPetersburg, IsIstanbul bool IsBerlin, IsLondon bool IsMerge, IsShanghai, IsCancun, IsPrague bool @@ -945,6 +951,7 @@ func (c *ChainConfig) Rules(num *big.Int, isMerge bool, timestamp uint64, curren } // disallow setting Merge out of order isMerge = isMerge && c.IsLondon(num) + isVerkle := isMerge && c.IsVerkle(num, timestamp) return Rules{ IsArbitrum: c.IsArbitrum(), IsStylus: c.IsArbitrum() && currentArbosVersion >= ArbosVersion_Stylus, @@ -959,11 +966,13 @@ func (c *ChainConfig) Rules(num *big.Int, isMerge bool, timestamp uint64, curren IsPetersburg: c.IsPetersburg(num), IsIstanbul: c.IsIstanbul(num), IsBerlin: c.IsBerlin(num), + IsEIP2929: c.IsBerlin(num) && !isVerkle, IsLondon: c.IsLondon(num), IsMerge: isMerge, IsShanghai: isMerge && c.IsShanghai(num, timestamp, currentArbosVersion), IsCancun: isMerge && c.IsCancun(num, timestamp, currentArbosVersion), IsPrague: isMerge && c.IsPrague(num, timestamp), - IsVerkle: isMerge && c.IsVerkle(num, timestamp), + IsVerkle: isVerkle, + IsEIP4762: isVerkle, } } diff --git a/params/protocol_params.go b/params/protocol_params.go index 2539a8c28c..9d99f9a1b8 100644 --- a/params/protocol_params.go +++ b/params/protocol_params.go @@ -86,6 +86,7 @@ const ( LogTopicGas uint64 = 375 // Multiplied by the * of the LOG*, per LOG transaction. e.g. LOG0 incurs 0 * c_txLogTopicGas, LOG4 incurs 4 * c_txLogTopicGas. CreateGas uint64 = 32000 // Once per CREATE operation & contract-creation transaction. Create2Gas uint64 = 32000 // Once per CREATE2 operation + CreateNGasEip4762 uint64 = 1000 // Once per CREATEn operations post-verkle SelfdestructRefundGas uint64 = 24000 // Refunded following a selfdestruct operation. MemoryGas uint64 = 3 // Times the address of the (highest referenced byte in memory + 1). NOTE: referencing happens on read, write and in instructions such as RETURN and CALL. @@ -188,6 +189,10 @@ var ( // BeaconRootsAddress is the address where historical beacon roots are stored as per EIP-4788 BeaconRootsAddress = common.HexToAddress("0x000F3df6D732807Ef1319fB7B8bB8522d0Beac02") + + // BeaconRootsCode is the code where historical beacon roots are stored as per EIP-4788 + BeaconRootsCode = common.FromHex("3373fffffffffffffffffffffffffffffffffffffffe14604d57602036146024575f5ffd5b5f35801560495762001fff810690815414603c575f5ffd5b62001fff01545f5260205ff35b5f5ffd5b62001fff42064281555f359062001fff015500") + // SystemAddress is where the system-transaction is sent from as per EIP-4788 SystemAddress = common.HexToAddress("0xfffffffffffffffffffffffffffffffffffffffe") ) diff --git a/params/verkle_params.go b/params/verkle_params.go new file mode 100644 index 0000000000..93d4f7cd64 --- /dev/null +++ b/params/verkle_params.go @@ -0,0 +1,36 @@ +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package params + +// Verkle tree EIP: costs associated to witness accesses +var ( + WitnessBranchReadCost uint64 = 1900 + WitnessChunkReadCost uint64 = 200 + WitnessBranchWriteCost uint64 = 3000 + WitnessChunkWriteCost uint64 = 500 + WitnessChunkFillCost uint64 = 6200 +) + +// ClearVerkleWitnessCosts sets all witness costs to 0, which is necessary +// for historical block replay simulations. +func ClearVerkleWitnessCosts() { + WitnessBranchReadCost = 0 + WitnessChunkReadCost = 0 + WitnessBranchWriteCost = 0 + WitnessChunkWriteCost = 0 + WitnessChunkFillCost = 0 +} diff --git a/params/version.go b/params/version.go index 0220cb6a6b..62140f05c7 100644 --- a/params/version.go +++ b/params/version.go @@ -23,7 +23,7 @@ import ( const ( VersionMajor = 1 // Major version component of the current release VersionMinor = 14 // Minor version component of the current release - VersionPatch = 3 // Patch version component of the current release + VersionPatch = 4 // Patch version component of the current release VersionMeta = "stable" // Version metadata to append to the version string ) diff --git a/tests/init.go b/tests/init.go index e333587a07..c85e714c00 100644 --- a/tests/init.go +++ b/tests/init.go @@ -212,7 +212,7 @@ var Forks = map[string]*params.ChainConfig{ LondonBlock: big.NewInt(0), ArrowGlacierBlock: big.NewInt(0), }, - "ArrowGlacierToMergeAtDiffC0000": { + "ArrowGlacierToParisAtDiffC0000": { ChainID: big.NewInt(1), HomesteadBlock: big.NewInt(0), EIP150Block: big.NewInt(0), @@ -246,6 +246,23 @@ var Forks = map[string]*params.ChainConfig{ ArrowGlacierBlock: big.NewInt(0), GrayGlacierBlock: big.NewInt(0), }, + "Paris": { + ChainID: big.NewInt(1), + HomesteadBlock: big.NewInt(0), + EIP150Block: big.NewInt(0), + EIP155Block: big.NewInt(0), + EIP158Block: big.NewInt(0), + ByzantiumBlock: big.NewInt(0), + ConstantinopleBlock: big.NewInt(0), + PetersburgBlock: big.NewInt(0), + IstanbulBlock: big.NewInt(0), + MuirGlacierBlock: big.NewInt(0), + BerlinBlock: big.NewInt(0), + LondonBlock: big.NewInt(0), + ArrowGlacierBlock: big.NewInt(0), + MergeNetsplitBlock: big.NewInt(0), + TerminalTotalDifficulty: big.NewInt(0), + }, "Merge": { ChainID: big.NewInt(1), HomesteadBlock: big.NewInt(0), @@ -281,7 +298,7 @@ var Forks = map[string]*params.ChainConfig{ TerminalTotalDifficulty: big.NewInt(0), ShanghaiTime: u64(0), }, - "MergeToShanghaiAtTime15k": { + "ParisToShanghaiAtTime15k": { ChainID: big.NewInt(1), HomesteadBlock: big.NewInt(0), EIP150Block: big.NewInt(0), diff --git a/tests/state_test.go b/tests/state_test.go index 605531d35d..52425ee216 100644 --- a/tests/state_test.go +++ b/tests/state_test.go @@ -54,14 +54,6 @@ func initMatcher(st *testMatcher) { // Uses 1GB RAM per tested fork st.skipLoad(`^stStaticCall/static_Call1MB`) - // These tests fail as of https://github.com/ethereum/go-ethereum/pull/28666, since we - // no longer delete "leftover storage" when deploying a contract. - st.skipLoad(`^stSStoreTest/InitCollision\.json`) - st.skipLoad(`^stRevertTest/RevertInCreateInInit\.json`) - st.skipLoad(`^stExtCodeHash/dynamicAccountOverwriteEmpty\.json`) - st.skipLoad(`^stCreate2/create2collisionStorage\.json`) - st.skipLoad(`^stCreate2/RevertInCreateInInitCreate2\.json`) - // Broken tests: // EOF is not part of cancun st.skipLoad(`^stEOF/`) diff --git a/tests/testdata b/tests/testdata index fa51c5c164..faf33b4714 160000 --- a/tests/testdata +++ b/tests/testdata @@ -1 +1 @@ -Subproject commit fa51c5c164f79140730ccb8fe26a46c3d3994338 +Subproject commit faf33b471465d3c6cdc3d04fbd690895f78d33f2 diff --git a/trie/iterator.go b/trie/iterator.go index 83ccc0740f..fa01611063 100644 --- a/trie/iterator.go +++ b/trie/iterator.go @@ -135,7 +135,7 @@ type nodeIteratorState struct { node node // Trie node being iterated parent common.Hash // Hash of the first full ancestor node (nil if current is the root) index int // Child to be processed next - pathlen int // Length of the path to this node + pathlen int // Length of the path to the parent node } type nodeIterator struct { @@ -145,7 +145,7 @@ type nodeIterator struct { err error // Failure set in case of an internal error in the iterator resolver NodeResolver // optional node resolver for avoiding disk hits - pool []*nodeIteratorState // local pool for iteratorstates + pool []*nodeIteratorState // local pool for iterator states } // errIteratorEnd is stored in nodeIterator.err when iteration is done. @@ -304,6 +304,7 @@ func (it *nodeIterator) seek(prefix []byte) error { // The path we're looking for is the hex encoded key without terminator. key := keybytesToHex(prefix) key = key[:len(key)-1] + // Move forward until we're just before the closest match to key. for { state, parentIndex, path, err := it.peekSeek(key) @@ -311,7 +312,7 @@ func (it *nodeIterator) seek(prefix []byte) error { return errIteratorEnd } else if err != nil { return seekError{prefix, err} - } else if bytes.Compare(path, key) >= 0 { + } else if reachedPath(path, key) { return nil } it.push(state, parentIndex, path) @@ -339,7 +340,6 @@ func (it *nodeIterator) peek(descend bool) (*nodeIteratorState, *int, []byte, er // If we're skipping children, pop the current node first it.pop() } - // Continue iteration to the next child for len(it.stack) > 0 { parent := it.stack[len(it.stack)-1] @@ -372,7 +372,6 @@ func (it *nodeIterator) peekSeek(seekKey []byte) (*nodeIteratorState, *int, []by // If we're skipping children, pop the current node first it.pop() } - // Continue iteration to the next child for len(it.stack) > 0 { parent := it.stack[len(it.stack)-1] @@ -449,16 +448,18 @@ func (it *nodeIterator) findChild(n *fullNode, index int, ancestor common.Hash) state *nodeIteratorState childPath []byte ) - for ; index < len(n.Children); index++ { + for ; index < len(n.Children); index = nextChildIndex(index) { if n.Children[index] != nil { child = n.Children[index] hash, _ := child.cache() + state = it.getFromPool() state.hash = common.BytesToHash(hash) state.node = child state.parent = ancestor state.index = -1 state.pathlen = len(path) + childPath = append(childPath, path...) childPath = append(childPath, byte(index)) return child, state, childPath, index @@ -471,8 +472,8 @@ func (it *nodeIterator) nextChild(parent *nodeIteratorState, ancestor common.Has switch node := parent.node.(type) { case *fullNode: // Full node, move to the first non-nil child. - if child, state, path, index := it.findChild(node, parent.index+1, ancestor); child != nil { - parent.index = index - 1 + if child, state, path, index := it.findChild(node, nextChildIndex(parent.index), ancestor); child != nil { + parent.index = prevChildIndex(index) return state, path, true } case *shortNode: @@ -498,23 +499,23 @@ func (it *nodeIterator) nextChildAt(parent *nodeIteratorState, ancestor common.H switch n := parent.node.(type) { case *fullNode: // Full node, move to the first non-nil child before the desired key position - child, state, path, index := it.findChild(n, parent.index+1, ancestor) + child, state, path, index := it.findChild(n, nextChildIndex(parent.index), ancestor) if child == nil { // No more children in this fullnode return parent, it.path, false } // If the child we found is already past the seek position, just return it. - if bytes.Compare(path, key) >= 0 { - parent.index = index - 1 + if reachedPath(path, key) { + parent.index = prevChildIndex(index) return state, path, true } // The child is before the seek position. Try advancing for { - nextChild, nextState, nextPath, nextIndex := it.findChild(n, index+1, ancestor) + nextChild, nextState, nextPath, nextIndex := it.findChild(n, nextChildIndex(index), ancestor) // If we run out of children, or skipped past the target, return the // previous one - if nextChild == nil || bytes.Compare(nextPath, key) >= 0 { - parent.index = index - 1 + if nextChild == nil || reachedPath(nextPath, key) { + parent.index = prevChildIndex(index) return state, path, true } // We found a better child closer to the target @@ -541,7 +542,7 @@ func (it *nodeIterator) push(state *nodeIteratorState, parentIndex *int, path [] it.path = path it.stack = append(it.stack, state) if parentIndex != nil { - *parentIndex++ + *parentIndex = nextChildIndex(*parentIndex) } } @@ -550,8 +551,54 @@ func (it *nodeIterator) pop() { it.path = it.path[:last.pathlen] it.stack[len(it.stack)-1] = nil it.stack = it.stack[:len(it.stack)-1] - // last is now unused - it.putInPool(last) + + it.putInPool(last) // last is now unused +} + +// reachedPath normalizes a path by truncating a terminator if present, and +// returns true if it is greater than or equal to the target. Using this, +// the path of a value node embedded a full node will compare less than the +// full node's children. +func reachedPath(path, target []byte) bool { + if hasTerm(path) { + path = path[:len(path)-1] + } + return bytes.Compare(path, target) >= 0 +} + +// A value embedded in a full node occupies the last slot (16) of the array of +// children. In order to produce a pre-order traversal when iterating children, +// we jump to this last slot first, then go back iterate the child nodes (and +// skip the last slot at the end): + +// prevChildIndex returns the index of a child in a full node which precedes +// the given index when performing a pre-order traversal. +func prevChildIndex(index int) int { + switch index { + case 0: // We jumped back to iterate the children, from the value slot + return 16 + case 16: // We jumped to the embedded value slot at the end, from the placeholder index + return -1 + case 17: // We skipped the value slot after iterating all the children + return 15 + default: // We are iterating the children in sequence + return index - 1 + } +} + +// nextChildIndex returns the index of a child in a full node which follows +// the given index when performing a pre-order traversal. +func nextChildIndex(index int) int { + switch index { + case -1: // Jump from the placeholder index to the embedded value slot + return 16 + case 15: // Skip the value slot after iterating the children + return 17 + case 16: // From the embedded value slot, jump back to iterate the children + return 0 + default: // Iterate children in sequence + return index + 1 + } } func compareNodes(a, b NodeIterator) int { diff --git a/trie/iterator_test.go b/trie/iterator_test.go index 41e83f6cb6..a8f3c4413e 100644 --- a/trie/iterator_test.go +++ b/trie/iterator_test.go @@ -182,14 +182,14 @@ func testNodeIteratorCoverage(t *testing.T, scheme string) { type kvs struct{ k, v string } var testdata1 = []kvs{ + {"bar", "b"}, {"barb", "ba"}, {"bard", "bc"}, {"bars", "bb"}, - {"bar", "b"}, {"fab", "z"}, + {"foo", "a"}, {"food", "ab"}, {"foos", "aa"}, - {"foo", "a"}, } var testdata2 = []kvs{ @@ -218,7 +218,7 @@ func TestIteratorSeek(t *testing.T) { // Seek to a non-existent key. it = NewIterator(trie.MustNodeIterator([]byte("barc"))) - if err := checkIteratorOrder(testdata1[1:], it); err != nil { + if err := checkIteratorOrder(testdata1[2:], it); err != nil { t.Fatal(err) } @@ -227,6 +227,12 @@ func TestIteratorSeek(t *testing.T) { if err := checkIteratorOrder(nil, it); err != nil { t.Fatal(err) } + + // Seek to a key for which a prefixing key exists. + it = NewIterator(trie.MustNodeIterator([]byte("food"))) + if err := checkIteratorOrder(testdata1[6:], it); err != nil { + t.Fatal(err) + } } func checkIteratorOrder(want []kvs, it *Iterator) error { @@ -311,16 +317,16 @@ func TestUnionIterator(t *testing.T) { all := []struct{ k, v string }{ {"aardvark", "c"}, + {"bar", "b"}, {"barb", "ba"}, {"barb", "bd"}, {"bard", "bc"}, {"bars", "bb"}, {"bars", "be"}, - {"bar", "b"}, {"fab", "z"}, + {"foo", "a"}, {"food", "ab"}, {"foos", "aa"}, - {"foo", "a"}, {"jars", "d"}, } @@ -512,7 +518,7 @@ func testIteratorContinueAfterSeekError(t *testing.T, memonly bool, scheme strin rawdb.WriteTrieNode(diskdb, common.Hash{}, barNodePath, barNodeHash, barNodeBlob, triedb.Scheme()) } // Check that iteration produces the right set of values. - if err := checkIteratorOrder(testdata1[2:], NewIterator(it)); err != nil { + if err := checkIteratorOrder(testdata1[3:], NewIterator(it)); err != nil { t.Fatal(err) } } diff --git a/trie/secure_trie.go b/trie/secure_trie.go index efd4dfb5d3..e38d5ac4dc 100644 --- a/trie/secure_trie.go +++ b/trie/secure_trie.go @@ -284,3 +284,7 @@ func (t *StateTrie) getSecKeyCache() map[string][]byte { } return t.secKeyCache } + +func (t *StateTrie) IsVerkle() bool { + return false +} diff --git a/trie/sync.go b/trie/sync.go index f6b20b2240..3b7caae5b1 100644 --- a/trie/sync.go +++ b/trie/sync.go @@ -22,6 +22,7 @@ import ( "sync" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/prque" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" @@ -149,15 +150,42 @@ type CodeSyncResult struct { // nodeOp represents an operation upon the trie node. It can either represent a // deletion to the specific node or a node write for persisting retrieved node. type nodeOp struct { + del bool // flag if op stands for a delete operation owner common.Hash // identifier of the trie (empty for account trie) path []byte // path from the root to the specified node. blob []byte // the content of the node (nil for deletion) hash common.Hash // hash of the node content (empty for node deletion) } -// isDelete indicates if the operation is a database deletion. -func (op *nodeOp) isDelete() bool { - return len(op.blob) == 0 +// valid checks whether the node operation is valid. +func (op *nodeOp) valid() bool { + if op.del && len(op.blob) != 0 { + return false + } + if !op.del && len(op.blob) == 0 { + return false + } + return true +} + +// string returns the node operation in string representation. +func (op *nodeOp) string() string { + var node string + if op.owner == (common.Hash{}) { + node = fmt.Sprintf("node: (%v)", op.path) + } else { + node = fmt.Sprintf("node: (%x-%v)", op.owner, op.path) + } + var blobHex string + if len(op.blob) == 0 { + blobHex = "nil" + } else { + blobHex = hexutil.Encode(op.blob) + } + if op.del { + return fmt.Sprintf("del %s %s %s", node, blobHex, op.hash.Hex()) + } + return fmt.Sprintf("write %s %s %s", node, blobHex, op.hash.Hex()) } // syncMemBatch is an in-memory buffer of successfully downloaded but not yet @@ -220,6 +248,7 @@ func (batch *syncMemBatch) delNode(owner common.Hash, path []byte) { batch.size += common.HashLength + uint64(len(path)) } batch.nodes = append(batch.nodes, nodeOp{ + del: true, owner: owner, path: path, }) @@ -428,7 +457,10 @@ func (s *Sync) Commit(dbw ethdb.Batch) error { storage int ) for _, op := range s.membatch.nodes { - if op.isDelete() { + if !op.valid() { + return fmt.Errorf("invalid op, %s", op.string()) + } + if op.del { // node deletion is only supported in path mode. if op.owner == (common.Hash{}) { rawdb.DeleteAccountTrieNode(dbw, op.path) diff --git a/trie/utils/verkle.go b/trie/utils/verkle.go index 328b2d2527..2a4a632d49 100644 --- a/trie/utils/verkle.go +++ b/trie/utils/verkle.go @@ -23,7 +23,7 @@ import ( "github.com/crate-crypto/go-ipa/bandersnatch/fr" "github.com/ethereum/go-ethereum/common/lru" "github.com/ethereum/go-ethereum/metrics" - "github.com/gballet/go-verkle" + "github.com/ethereum/go-verkle" "github.com/holiman/uint256" ) @@ -219,7 +219,7 @@ func CodeChunkKey(address []byte, chunk *uint256.Int) []byte { return GetTreeKey(address, treeIndex, subIndex) } -func storageIndex(bytes []byte) (*uint256.Int, byte) { +func StorageIndex(bytes []byte) (*uint256.Int, byte) { // If the storage slot is in the header, we need to add the header offset. var key uint256.Int key.SetBytes(bytes) @@ -245,7 +245,7 @@ func storageIndex(bytes []byte) (*uint256.Int, byte) { // StorageSlotKey returns the verkle tree key of the storage slot for the // specified account. func StorageSlotKey(address []byte, storageKey []byte) []byte { - treeIndex, subIndex := storageIndex(storageKey) + treeIndex, subIndex := StorageIndex(storageKey) return GetTreeKey(address, treeIndex, subIndex) } @@ -296,7 +296,7 @@ func CodeChunkKeyWithEvaluatedAddress(addressPoint *verkle.Point, chunk *uint256 // slot for the specified account. The difference between StorageSlotKey is the // address evaluation is already computed to minimize the computational overhead. func StorageSlotKeyWithEvaluatedAddress(evaluated *verkle.Point, storageKey []byte) []byte { - treeIndex, subIndex := storageIndex(storageKey) + treeIndex, subIndex := StorageIndex(storageKey) return GetTreeKeyWithEvaluatedAddress(evaluated, treeIndex, subIndex) } diff --git a/trie/utils/verkle_test.go b/trie/utils/verkle_test.go index 28b059c379..c29504a6d0 100644 --- a/trie/utils/verkle_test.go +++ b/trie/utils/verkle_test.go @@ -20,7 +20,7 @@ import ( "bytes" "testing" - "github.com/gballet/go-verkle" + "github.com/ethereum/go-verkle" "github.com/holiman/uint256" ) diff --git a/trie/verkle.go b/trie/verkle.go index 01d813d9ec..bb0c54857f 100644 --- a/trie/verkle.go +++ b/trie/verkle.go @@ -27,7 +27,7 @@ import ( "github.com/ethereum/go-ethereum/trie/trienode" "github.com/ethereum/go-ethereum/trie/utils" "github.com/ethereum/go-ethereum/triedb/database" - "github.com/gballet/go-verkle" + "github.com/ethereum/go-verkle" "github.com/holiman/uint256" ) diff --git a/triedb/pathdb/database.go b/triedb/pathdb/database.go index 05a28aa1ef..bd6aeaa6ab 100644 --- a/triedb/pathdb/database.go +++ b/triedb/pathdb/database.go @@ -190,7 +190,7 @@ func (db *Database) repairHistory() error { // all of them. Fix the tests first. return nil } - freezer, err := rawdb.NewStateFreezer(ancient, false) + freezer, err := rawdb.NewStateFreezer(ancient, db.readOnly) if err != nil { log.Crit("Failed to open state history freezer", "err", err) }