From da863bf04cd820069528705c3ef4c834dcd53d20 Mon Sep 17 00:00:00 2001 From: Konstantin <355847+Frozen@users.noreply.github.com> Date: Fri, 20 Oct 2023 11:16:07 -0800 Subject: [PATCH 001/128] Fixed memory leak and deadcode. (#4539) --- consensus/consensus.go | 2 +- consensus/consensus_service.go | 8 +++++--- consensus/consensus_v2.go | 8 +------- consensus/downloader.go | 31 ++++++++----------------------- 4 files changed, 15 insertions(+), 34 deletions(-) diff --git a/consensus/consensus.go b/consensus/consensus.go index 09bdef51ae..b396f6eadd 100644 --- a/consensus/consensus.go +++ b/consensus/consensus.go @@ -296,7 +296,7 @@ func New( // viewID has to be initialized as the height of // the blockchain during initialization as it was // displayed on explorer as Height right now - consensus.SetCurBlockViewID(0) + consensus.setCurBlockViewID(0) consensus.SlashChan = make(chan slash.Record) consensus.readySignal = make(chan ProposalType) consensus.commitSigChannel = make(chan []byte) diff --git a/consensus/consensus_service.go b/consensus/consensus_service.go index 23f0b47519..cd15333a01 100644 --- a/consensus/consensus_service.go +++ b/consensus/consensus_service.go @@ -514,12 +514,14 @@ func (consensus *Consensus) setViewIDs(height uint64) { // SetCurBlockViewID set the current view ID func (consensus *Consensus) SetCurBlockViewID(viewID uint64) uint64 { - return consensus.current.SetCurBlockViewID(viewID) + consensus.mutex.Lock() + defer consensus.mutex.Unlock() + return consensus.setCurBlockViewID(viewID) } // SetCurBlockViewID set the current view ID -func (consensus *Consensus) setCurBlockViewID(viewID uint64) { - consensus.current.SetCurBlockViewID(viewID) +func (consensus *Consensus) setCurBlockViewID(viewID uint64) uint64 { + return consensus.current.SetCurBlockViewID(viewID) } // SetViewChangingID set the current view change ID diff --git a/consensus/consensus_v2.go b/consensus/consensus_v2.go index bc0d185bdf..5beb54ed72 100644 --- a/consensus/consensus_v2.go +++ b/consensus/consensus_v2.go @@ -323,9 +323,7 @@ func (consensus *Consensus) Start( consensus.mutex.Unlock() }() - if consensus.dHelper != nil { - consensus.dHelper.start() - } + consensus.dHelper.start() } func (consensus *Consensus) StartChannel() { @@ -448,10 +446,6 @@ func (consensus *Consensus) BlockChannel(newBlock *types.Block) { Msg("[ConsensusMainLoop] STARTING CONSENSUS") consensus.announce(newBlock) }) - - if consensus.dHelper != nil { - consensus.dHelper.start() - } } // LastMileBlockIter is the iterator to iterate over the last mile blocks in consensus cache. diff --git a/consensus/downloader.go b/consensus/downloader.go index 26755bbd25..1fdc131e7a 100644 --- a/consensus/downloader.go +++ b/consensus/downloader.go @@ -39,7 +39,7 @@ func newDownloadHelper(c *Consensus, d downloader) *downloadHelper { finishedCh := make(chan struct{}, 1) finishedSub := d.SubscribeDownloadFinished(finishedCh) - return &downloadHelper{ + out := &downloadHelper{ c: c, d: d, startedCh: startedCh, @@ -47,16 +47,12 @@ func newDownloadHelper(c *Consensus, d downloader) *downloadHelper { startedSub: startedSub, finishedSub: finishedSub, } + go out.downloadStartedLoop() + go out.downloadFinishedLoop() + return out } func (dh *downloadHelper) start() { - go dh.downloadStartedLoop() - go dh.downloadFinishedLoop() -} - -func (dh *downloadHelper) close() { - dh.startedSub.Unsubscribe() - dh.finishedSub.Unsubscribe() } func (dh *downloadHelper) downloadStartedLoop() { @@ -107,21 +103,10 @@ func (consensus *Consensus) AddConsensusLastMile() error { } func (consensus *Consensus) spinUpStateSync() { - if consensus.dHelper != nil { - consensus.dHelper.d.DownloadAsync() - consensus.current.SetMode(Syncing) - for _, v := range consensus.consensusTimeout { - v.Stop() - } - } else { - select { - case consensus.BlockNumLowChan <- struct{}{}: - consensus.current.SetMode(Syncing) - for _, v := range consensus.consensusTimeout { - v.Stop() - } - default: - } + consensus.dHelper.d.DownloadAsync() + consensus.current.SetMode(Syncing) + for _, v := range consensus.consensusTimeout { + v.Stop() } } From a65f92d0aeb95c944f84651a2afec72e44754953 Mon Sep 17 00:00:00 2001 From: UncertainBadg3r <139782199+UncertainBadg3r@users.noreply.github.com> Date: Mon, 23 Oct 2023 12:19:55 -0400 Subject: [PATCH 002/128] Fix typo in README test steps (#4541) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Casey Gardiner <117784577+ONECasey@users.noreply.github.com> Co-authored-by: Max <82761650+MaxMustermann2@users.noreply.github.com> Co-authored-by: Soph <35721420+sophoah@users.noreply.github.com> Co-authored-by: Konstantin <355847+Frozen@users.noreply.github.com> Co-authored-by: Nita Neou (Soph) Co-authored-by: Diego Nava Co-authored-by: Diego Nava <8563843+diego1q2w@users.noreply.github.com> Co-authored-by: Gheis Mohammadi Co-authored-by: “GheisMohammadi” <36589218+GheisMohammadi@users.noreply.github.com> Co-authored-by: Adam Androulidakis <37982984+adsorptionenthalpy@users.noreply.github.com> --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 0f2aa11f96..998832b352 100644 --- a/README.md +++ b/README.md @@ -150,7 +150,7 @@ make debug-kill To keep things consistent, we have a docker image to run all tests. **These are the same tests ran on the pull request checks**. -Note that all testing docker container binds a couple of ports to the host machine for your convince. The ports are: +Note that all test Docker containers bind several ports to the host machine for your convenience. The ports are: * `9500` - Shard 0 RPC for a validator * `9501` - Shard 1 RPC for a validator * `9599` - Shard 0 RPC for an explorer From 5faeb5f79a5a48cddf5576a8a7302729f89f2ac0 Mon Sep 17 00:00:00 2001 From: Konstantin <355847+Frozen@users.noreply.github.com> Date: Mon, 23 Oct 2023 08:20:27 -0800 Subject: [PATCH 003/128] Call `UpdateConsensusInformation` after bingo. (#4542) --- node/node_handler.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/node/node_handler.go b/node/node_handler.go index 92c3396d4b..eeaf90f2d7 100644 --- a/node/node_handler.go +++ b/node/node_handler.go @@ -350,6 +350,9 @@ func (node *Node) PostConsensusProcessing(newBlock *types.Block) error { Int("numStakingTxns", len(newBlock.StakingTransactions())). Uint32("numSignatures", numSignatures). Msg("BINGO !!! Reached Consensus") + if node.Consensus.Mode() == consensus.Syncing { + node.Consensus.SetMode(node.Consensus.UpdateConsensusInformation()) + } node.Consensus.UpdateValidatorMetrics(float64(numSignatures), float64(newBlock.NumberU64())) From d49715e8f731f70c1b4ec6614adf6309d5ca394d Mon Sep 17 00:00:00 2001 From: Gheis Mohammadi Date: Wed, 25 Oct 2023 07:05:20 +0800 Subject: [PATCH 004/128] add new functions to p2p stream client for sharing the full states (#4540) * add new functions to p2p stream client for sharing the full states * remove extra comments, add bytes checking * add client tests for new p2p stream client functions * rename new client functions * complete tests for new functions of p2p stream client --- consensus/engine/consensus_engine.go | 8 + core/blockchain.go | 5 + core/blockchain_impl.go | 5 + core/blockchain_stub.go | 9 + go.mod | 68 +- go.sum | 121 +- hmy/downloader/adapter_test.go | 4 + internal/chain/engine_test.go | 6 + p2p/stream/protocols/sync/chain.go | 295 +++ p2p/stream/protocols/sync/chain_test.go | 174 ++ p2p/stream/protocols/sync/client.go | 417 ++++ p2p/stream/protocols/sync/client_test.go | 326 ++++ p2p/stream/protocols/sync/const.go | 33 + p2p/stream/protocols/sync/message/compose.go | 128 ++ p2p/stream/protocols/sync/message/msg.pb.go | 1802 ++++++++++++++---- p2p/stream/protocols/sync/message/msg.proto | 70 + p2p/stream/protocols/sync/message/parse.go | 64 + p2p/stream/protocols/sync/stream.go | 170 ++ p2p/stream/protocols/sync/stream_test.go | 108 ++ test/chain/chain/chain_makers.go | 6 + 20 files changed, 3372 insertions(+), 447 deletions(-) diff --git a/consensus/engine/consensus_engine.go b/consensus/engine/consensus_engine.go index 5ac2c776b0..37c4ea5901 100644 --- a/consensus/engine/consensus_engine.go +++ b/consensus/engine/consensus_engine.go @@ -4,9 +4,11 @@ import ( "math/big" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/trie" "github.com/harmony-one/harmony/block" "github.com/harmony-one/harmony/consensus/reward" "github.com/harmony-one/harmony/core/state" + "github.com/harmony-one/harmony/core/state/snapshot" "github.com/harmony-one/harmony/core/types" "github.com/harmony-one/harmony/crypto/bls" "github.com/harmony-one/harmony/internal/params" @@ -23,6 +25,9 @@ type ChainReader interface { // Config retrieves the blockchain's chain configuration. Config() *params.ChainConfig + // TrieDB returns trie database + TrieDB() *trie.Database + // TrieNode retrieves a blob of data associated with a trie node // either from ephemeral in-memory cache, or from persistent storage. TrieNode(hash common.Hash) ([]byte, error) @@ -62,6 +67,9 @@ type ChainReader interface { // GetBlock retrieves a block from the database by hash and number. GetBlock(hash common.Hash, number uint64) *types.Block + // Snapshots returns the blockchain snapshot tree. + Snapshots() *snapshot.Tree + // ReadShardState retrieves sharding state given the epoch number. // This api reads the shard state cached or saved on the chaindb. // Thus, only should be used to read the shard state of the current chain. diff --git a/core/blockchain.go b/core/blockchain.go index 24272a91ef..0adc96925e 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -6,6 +6,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/event" + "github.com/ethereum/go-ethereum/trie" "github.com/harmony-one/harmony/block" "github.com/harmony-one/harmony/consensus/engine" "github.com/harmony-one/harmony/consensus/reward" @@ -62,6 +63,10 @@ type BlockChain interface { State() (*state.DB, error) // StateAt returns a new mutable state based on a particular point in time. StateAt(root common.Hash) (*state.DB, error) + // Snapshots returns the blockchain snapshot tree. + Snapshots() *snapshot.Tree + // TrieDB returns trie database + TrieDB() *trie.Database // HasBlock checks if a block is fully present in the database or not. HasBlock(hash common.Hash, number uint64) bool // HasState checks if state trie is fully present in the database or not. diff --git a/core/blockchain_impl.go b/core/blockchain_impl.go index 3b5bc6bb10..e9eca1f4cd 100644 --- a/core/blockchain_impl.go +++ b/core/blockchain_impl.go @@ -1122,6 +1122,11 @@ func (bc *BlockChainImpl) GetUnclesInChain(b *types.Block, length int) []*block. return uncles } +// TrieDB returns trie database +func (bc *BlockChainImpl) TrieDB() *trie.Database { + return bc.stateCache.TrieDB() +} + // TrieNode retrieves a blob of data associated with a trie node (or code hash) // either from ephemeral in-memory cache, or from persistent storage. func (bc *BlockChainImpl) TrieNode(hash common.Hash) ([]byte, error) { diff --git a/core/blockchain_stub.go b/core/blockchain_stub.go index 804b48a00a..e9ef10ce94 100644 --- a/core/blockchain_stub.go +++ b/core/blockchain_stub.go @@ -8,6 +8,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/event" + "github.com/ethereum/go-ethereum/trie" "github.com/harmony-one/harmony/block" "github.com/harmony-one/harmony/consensus/engine" "github.com/harmony-one/harmony/consensus/reward" @@ -64,6 +65,14 @@ func (a Stub) StateAt(common.Hash) (*state.DB, error) { return nil, errors.Errorf("method StateAt not implemented for %s", a.Name) } +func (a Stub) Snapshots() *snapshot.Tree { + return nil +} + +func (a Stub) TrieDB() *trie.Database { + return nil +} + func (a Stub) TrieNode(hash common.Hash) ([]byte, error) { return []byte{}, errors.Errorf("method TrieNode not implemented for %s", a.Name) } diff --git a/go.mod b/go.mod index 8644ba7bd4..b1cf4fbc27 100644 --- a/go.mod +++ b/go.mod @@ -14,7 +14,7 @@ require ( github.com/coinbase/rosetta-sdk-go v0.7.0 github.com/davecgh/go-spew v1.1.1 github.com/deckarep/golang-set v1.8.0 - github.com/ethereum/go-ethereum v1.11.2 + github.com/ethereum/go-ethereum v1.13.4 github.com/go-redis/redis/v8 v8.11.5 github.com/golang/mock v1.6.0 github.com/golang/protobuf v1.5.3 @@ -38,7 +38,7 @@ require ( github.com/pborman/uuid v1.2.0 github.com/pelletier/go-toml v1.9.5 github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.14.0 + github.com/prometheus/client_golang v1.17.0 github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 github.com/rjeczalik/notify v0.9.2 github.com/rs/cors v1.7.0 @@ -52,14 +52,14 @@ require ( github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee go.uber.org/ratelimit v0.1.0 go.uber.org/zap v1.24.0 - golang.org/x/crypto v0.9.0 - golang.org/x/net v0.10.0 // indirect - golang.org/x/sync v0.2.0 - golang.org/x/sys v0.8.0 // indirect + golang.org/x/crypto v0.14.0 + golang.org/x/net v0.17.0 // indirect + golang.org/x/sync v0.4.0 + golang.org/x/sys v0.13.0 // indirect golang.org/x/time v0.3.0 - golang.org/x/tools v0.9.3 // indirect + golang.org/x/tools v0.14.0 // indirect google.golang.org/grpc v1.55.0 - google.golang.org/protobuf v1.30.0 + google.golang.org/protobuf v1.31.0 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6 @@ -76,27 +76,27 @@ require ( require ( github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 // indirect - github.com/BurntSushi/toml v1.2.0 // indirect - github.com/DataDog/zstd v1.5.2 // indirect + github.com/BurntSushi/toml v1.3.2 // indirect + github.com/DataDog/zstd v1.5.5 // indirect github.com/OpenPeeDeeP/depguard v1.0.1 // indirect github.com/VictoriaMetrics/metrics v1.23.1 // indirect github.com/benbjohnson/clock v1.3.0 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/bits-and-blooms/bitset v1.5.0 // indirect + github.com/bits-and-blooms/bitset v1.7.0 // indirect github.com/bombsimon/wsl/v2 v2.0.0 // indirect github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect github.com/btcsuite/btcd/chaincfg/chainhash v1.0.2 // indirect github.com/cespare/xxhash v1.1.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect - github.com/cockroachdb/errors v1.9.1 // indirect + github.com/cockroachdb/errors v1.11.1 // indirect github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect github.com/cockroachdb/pebble v0.0.0-20230302152029-717cbce0c2e3 // indirect - github.com/cockroachdb/redact v1.1.3 // indirect + github.com/cockroachdb/redact v1.1.5 // indirect github.com/containerd/cgroups v1.1.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect - github.com/deckarep/golang-set/v2 v2.3.0 // indirect - github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 // indirect + github.com/deckarep/golang-set/v2 v2.3.1 // indirect + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect github.com/dgraph-io/badger v1.6.2 // indirect github.com/dgraph-io/ristretto v0.0.3 // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect @@ -109,10 +109,10 @@ require ( github.com/francoispqt/gojay v1.2.13 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff // indirect - github.com/getsentry/sentry-go v0.18.0 // indirect + github.com/getsentry/sentry-go v0.25.0 // indirect github.com/go-critic/go-critic v0.4.0 // indirect github.com/go-lintpack/lintpack v0.5.2 // indirect - github.com/go-ole/go-ole v1.2.6 // indirect + github.com/go-ole/go-ole v1.3.0 // indirect github.com/go-stack/stack v1.8.1 // indirect github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/go-toolsmith/astcast v1.0.0 // indirect @@ -126,7 +126,7 @@ require ( github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/gofrs/flock v0.8.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/snappy v0.0.4 // indirect + github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 // indirect github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a // indirect github.com/golangci/errcheck v0.0.0-20181223084120-ef45e06d44b6 // indirect @@ -152,8 +152,8 @@ require ( github.com/hashicorp/golang-lru/v2 v2.0.2 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/holiman/big v0.0.0-20221017200358-a027dc42d04e // indirect - github.com/holiman/uint256 v1.2.2 // indirect - github.com/huin/goupnp v1.1.0 // indirect + github.com/holiman/uint256 v1.2.3 // indirect + github.com/huin/goupnp v1.3.0 // indirect github.com/inconshreveable/mousetrap v1.0.0 // indirect github.com/ipfs/go-cid v0.4.1 // indirect github.com/ipfs/go-datastore v0.6.0 // indirect @@ -167,7 +167,7 @@ require ( github.com/jbenet/goprocess v0.1.4 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/kisielk/gotool v1.0.0 // indirect - github.com/klauspost/compress v1.16.4 // indirect + github.com/klauspost/compress v1.17.1 // indirect github.com/klauspost/cpuid/v2 v2.2.4 // indirect github.com/koron/go-ssdp v0.0.4 // indirect github.com/kr/pretty v0.3.1 // indirect @@ -188,7 +188,7 @@ require ( github.com/matoous/godox v0.0.0-20190911065817-5d6d842e92eb // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.18 // indirect - github.com/mattn/go-runewidth v0.0.14 // indirect + github.com/mattn/go-runewidth v0.0.15 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/miekg/dns v1.1.53 // indirect github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect @@ -220,10 +220,10 @@ require ( github.com/pingcap/log v0.0.0-20211215031037-e024ba4eb0ee // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e // indirect - github.com/prometheus/client_model v0.3.0 // indirect - github.com/prometheus/common v0.42.0 // indirect - github.com/prometheus/procfs v0.9.0 // indirect - github.com/prometheus/tsdb v0.10.0 // indirect + github.com/prometheus/client_model v0.5.0 // indirect + github.com/prometheus/common v0.44.0 // indirect + github.com/prometheus/procfs v0.12.0 // indirect + github.com/prometheus/tsdb v0.7.1 // indirect github.com/quic-go/qpack v0.4.0 // indirect github.com/quic-go/qtls-go1-19 v0.3.3 // indirect github.com/quic-go/qtls-go1-20 v0.2.3 // indirect @@ -231,7 +231,7 @@ require ( github.com/quic-go/webtransport-go v0.5.2 // indirect github.com/raulk/go-watchdog v1.3.0 // indirect github.com/rivo/uniseg v0.4.4 // indirect - github.com/rogpeppe/go-internal v1.9.0 // indirect + github.com/rogpeppe/go-internal v1.11.0 // indirect github.com/securego/gosec v0.0.0-20191002120514-e680875ea14d // indirect github.com/shirou/gopsutil v3.21.11+incompatible // indirect github.com/sirupsen/logrus v1.9.0 // indirect @@ -245,8 +245,8 @@ require ( github.com/subosito/gotenv v1.4.1 // indirect github.com/tikv/pd/client v0.0.0-20220216070739-26c668271201 // indirect github.com/timakin/bodyclose v0.0.0-20190930140734-f7f2e9bca95e // indirect - github.com/tklauser/go-sysconf v0.3.11 // indirect - github.com/tklauser/numcpus v0.6.0 // indirect + github.com/tklauser/go-sysconf v0.3.12 // indirect + github.com/tklauser/numcpus v0.6.1 // indirect github.com/tommy-muehle/go-mnd v1.1.1 // indirect github.com/torquem-ch/mdbx-go v0.27.10 // indirect github.com/tyler-smith/go-bip39 v1.1.0 // indirect @@ -256,16 +256,16 @@ require ( github.com/valyala/fastrand v1.1.0 // indirect github.com/valyala/histogram v1.2.0 // indirect github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 // indirect - github.com/yusufpapurcu/wmi v1.2.2 // indirect + github.com/yusufpapurcu/wmi v1.2.3 // indirect go.opencensus.io v0.24.0 // indirect go.uber.org/atomic v1.10.0 // indirect go.uber.org/dig v1.16.1 // indirect go.uber.org/fx v1.19.2 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 // indirect - golang.org/x/mod v0.10.0 // indirect - golang.org/x/term v0.8.0 // indirect - golang.org/x/text v0.9.0 // indirect + golang.org/x/exp v0.0.0-20231006140011-7918f672742d // indirect + golang.org/x/mod v0.13.0 // indirect + golang.org/x/term v0.13.0 // indirect + golang.org/x/text v0.13.0 // indirect google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect diff --git a/go.sum b/go.sum index d5200066de..115ec6ebab 100644 --- a/go.sum +++ b/go.sum @@ -60,8 +60,9 @@ github.com/Azure/azure-sdk-for-go/sdk/internal v0.8.3/go.mod h1:KLF4gFr6DcKFZwSu github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.3.0/go.mod h1:tPaiy8S5bQ+S5sOiDlINkp7+Ef339+Nz5L5XO+cnOHo= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v1.1.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= -github.com/BurntSushi/toml v1.2.0 h1:Rt8g24XnyGTyglgET/PRUNlrUeu9F5L+7FilkXfZgs0= github.com/BurntSushi/toml v1.2.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= +github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/CloudyKit/fastprinter v0.0.0-20170127035650-74b38d55f37a/go.mod h1:EFZQ978U7x8IRnstaskI3IysnWY5Ao3QgZUKOXlsAdw= github.com/CloudyKit/fastprinter v0.0.0-20200109182630-33d98a066a53/go.mod h1:+3IMCy2vIlbG1XG/0ggNQv0SvxCAIpPM5b1nCz56Xno= @@ -71,8 +72,9 @@ github.com/CloudyKit/jet/v6 v6.1.0/go.mod h1:d3ypHeIRNo2+XyqnGA8s+aphtcVpjP5hPwP github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= -github.com/DataDog/zstd v1.5.2 h1:vUG4lAyuPCXO0TLbXvPv7EB7cNK1QV/luu55UHLrrn8= github.com/DataDog/zstd v1.5.2/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= +github.com/DataDog/zstd v1.5.5 h1:oWf5W7GtOLgp6bciQYDmhHHjdhYkALu6S/5Ni9ZgSvQ= +github.com/DataDog/zstd v1.5.5/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY= github.com/Joker/jade v1.0.1-0.20190614124447-d475f43051e7/go.mod h1:6E6s8o2AE4KhCrqr6GRJjdC/gNfTdxkIXvuGZZda2VM= @@ -138,8 +140,8 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= -github.com/bits-and-blooms/bitset v1.5.0 h1:NpE8frKRLGHIcEzkR+gZhiioW1+WbYV6fKwD6ZIpQT8= -github.com/bits-and-blooms/bitset v1.5.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= +github.com/bits-and-blooms/bitset v1.7.0 h1:YjAGVd3XmtK9ktAbX8Zg2g2PwLIMjGREZJHlV4j7NEo= +github.com/bits-and-blooms/bitset v1.7.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+WjiTceGBHIoTvn60HIbs7Hm7bcHjyrSqYB9c= @@ -204,8 +206,9 @@ github.com/cockroachdb/datadriven v1.0.2 h1:H9MtNqVoVhvd9nCBwOyDjUEdZCREqbIdCJD9 github.com/cockroachdb/datadriven v1.0.2/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= github.com/cockroachdb/errors v1.6.1/go.mod h1:tm6FTP5G81vwJ5lC0SizQo374JNCOPrHyXGitRJoDqM= github.com/cockroachdb/errors v1.8.1/go.mod h1:qGwQn6JmZ+oMjuLwjWzUNqblqk0xl4CVV3SQbGwK7Ac= -github.com/cockroachdb/errors v1.9.1 h1:yFVvsI0VxmRShfawbt/laCIDy/mtTqqnvoNgiy5bEV8= github.com/cockroachdb/errors v1.9.1/go.mod h1:2sxOtL2WIc096WSZqZ5h8fa17rdDq9HZOZLBCor4mBk= +github.com/cockroachdb/errors v1.11.1 h1:xSEW75zKaKCWzR3OfxXUxgrk/NtT4G1MiOv5lWZazG8= +github.com/cockroachdb/errors v1.11.1/go.mod h1:8MUxA3Gi6b25tYlFEBGLf+D8aISL+M4MIpiWMSNRfxw= github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= github.com/cockroachdb/logtags v0.0.0-20211118104740-dabe8e521a4f/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE= @@ -214,8 +217,9 @@ github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811/go.mod h1:Nb5lg github.com/cockroachdb/pebble v0.0.0-20230302152029-717cbce0c2e3 h1:S4re5MXHfznkOlgkgUfh9ptgaG2esdH95IuJWwP0fM0= github.com/cockroachdb/pebble v0.0.0-20230302152029-717cbce0c2e3/go.mod h1:9lRMC4XN3/BLPtIp6kAKwIaHu369NOf2rMucPzipz50= github.com/cockroachdb/redact v1.0.8/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= -github.com/cockroachdb/redact v1.1.3 h1:AKZds10rFSIj7qADf0g46UixK8NNLwWTNdCIGS5wfSQ= github.com/cockroachdb/redact v1.1.3/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= +github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30= +github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= github.com/cockroachdb/sentry-go v0.6.1-cockroachdb.2/go.mod h1:8BT+cPK6xvFOcRlk0R8eg+OTkcqI6baNH4xAkpiYVvQ= github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= github.com/coinbase/rosetta-sdk-go v0.7.0 h1:lmTO/JEpCvZgpbkOITL95rA80CPKb5CtMzLaqF2mCNg= @@ -258,13 +262,13 @@ github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6Uh github.com/deckarep/golang-set v1.8.0 h1:sk9/l/KqpunDwP7pSjUg0keiOOLEnOBHzykLrsPppp4= github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS383rP6+o6qqo= github.com/deckarep/golang-set/v2 v2.1.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4= -github.com/deckarep/golang-set/v2 v2.3.0 h1:qs18EKUfHm2X9fA50Mr/M5hccg2tNnVqsiBImnyDs0g= -github.com/deckarep/golang-set/v2 v2.3.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4= -github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0= +github.com/deckarep/golang-set/v2 v2.3.1 h1:vjmkvJt/IV27WXPyYQpAh4bRyWJc5Y435D17XQ9QU5A= +github.com/deckarep/golang-set/v2 v2.3.1/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4= github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc= +github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 h1:HbphB4TFFXpv7MNrT52FGrrgVXF1owhMVTHFZIlnvd4= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0/go.mod h1:DZGJHZMqrU4JJqFAWUS2UO1+lbSKsdiOoYi9Zzey7Fc= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etlyjdBU4sfcs2WYQMs= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218= github.com/deepmap/oapi-codegen v1.6.0/go.mod h1:ryDa9AgbELGeB+YEXE1dR53yAjHwFvE9iAUlWl9Al3M= github.com/deepmap/oapi-codegen v1.8.2/go.mod h1:YLgSKSDv/bZQB7N4ws6luhozi3cEdRktEqrX88CvjIw= @@ -352,8 +356,9 @@ github.com/gballet/go-verkle v0.0.0-20220902153445-097bd83b7732/go.mod h1:o/XfIX github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= github.com/getkin/kin-openapi v0.61.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= github.com/getsentry/sentry-go v0.12.0/go.mod h1:NSap0JBYWzHND8oMbyi0+XZhUalc1TBdRL1M71JZW2c= -github.com/getsentry/sentry-go v0.18.0 h1:MtBW5H9QgdcJabtZcuJG80BMOwaBpkRDZkxRkNC1sN0= github.com/getsentry/sentry-go v0.18.0/go.mod h1:Kgon4Mby+FJ7ZWHFUAZgVaIa8sxHtnRJRLTXZr51aKQ= +github.com/getsentry/sentry-go v0.25.0 h1:q6Eo+hS+yoJlTO3uu/azhQadsD8V+jQn2D8VvX1eOyI= +github.com/getsentry/sentry-go v0.25.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9/go.mod h1:106OIgooyS7OzLDOpUGgm9fA3bQENb/cFSyyBmMoJDs= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s= @@ -393,8 +398,9 @@ github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KE github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8= github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= -github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= +github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= @@ -518,8 +524,9 @@ github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8l github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.2-0.20190904063534-ff6b7dc882cf/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk= +github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 h1:23T5iq8rbUYlhpt5DB4XJkc6BU31uODLD1o1gKvZmD0= github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4= github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a h1:w8hkcTqaFpzKqonE9uMCefW1WDie15eSP/4MssdenaM= @@ -682,13 +689,13 @@ github.com/holiman/big v0.0.0-20221017200358-a027dc42d04e/go.mod h1:j9cQbcqHQujT github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao= github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA= github.com/holiman/uint256 v1.2.0/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw= -github.com/holiman/uint256 v1.2.2 h1:TXKcSGc2WaxPD2+bmzAsVthL4+pEN0YwXcL5qED83vk= -github.com/holiman/uint256 v1.2.2/go.mod h1:SC8Ryt4n+UBbPbIBKaG9zbbDlp4jOru9xFZmPzLUTxw= +github.com/holiman/uint256 v1.2.3 h1:K8UWO1HUJpRMXBxbmaY1Y8IAMZC/RsKB+ArEnnK4l5o= +github.com/holiman/uint256 v1.2.3/go.mod h1:SC8Ryt4n+UBbPbIBKaG9zbbDlp4jOru9xFZmPzLUTxw= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc= github.com/huin/goupnp v1.0.3/go.mod h1:ZxNlw5WqJj6wSsRK5+YfflQGXYfccj5VgQsMNixHM7Y= -github.com/huin/goupnp v1.1.0 h1:gEe0Dp/lZmPZiDFzJJaOfUpOvv2MKUkoBX8lDrn9vKU= -github.com/huin/goupnp v1.1.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= +github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= +github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= github.com/hydrogen18/memlistener v0.0.0-20141126152155-54553eb933fb/go.mod h1:qEIFzExnS6016fRpRfxrExeVn2gbClQA99gQhnIcdhE= github.com/hydrogen18/memlistener v0.0.0-20200120041712-dcc25e7acd91/go.mod h1:qEIFzExnS6016fRpRfxrExeVn2gbClQA99gQhnIcdhE= @@ -816,8 +823,8 @@ github.com/klauspost/compress v1.15.0/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47e github.com/klauspost/compress v1.15.10/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= github.com/klauspost/compress v1.15.11/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= github.com/klauspost/compress v1.15.15/go.mod h1:ZcK2JAFqKOpnBlxcLsJzYfrS9X1akm9fHZNnD9+Vo/4= -github.com/klauspost/compress v1.16.4 h1:91KN02FnsOYhuunwU4ssRe8lc2JosWmizWa91B5v1PU= -github.com/klauspost/compress v1.16.4/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/compress v1.17.1 h1:NE3C767s2ak2bweCZo3+rdP4U/HoyVXLv/X9f2gPS5g= +github.com/klauspost/compress v1.17.1/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= @@ -939,8 +946,8 @@ github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp9 github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU= -github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= +github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE= github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= @@ -1147,15 +1154,17 @@ github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.12.0/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= -github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= +github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= +github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.1-0.20210607210712-147c58e9608a/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= -github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= +github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= @@ -1166,8 +1175,8 @@ github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9 github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= github.com/prometheus/common v0.39.0/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y= -github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM= -github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= +github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= +github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -1176,11 +1185,11 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= -github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= +github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= +github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/prometheus/tsdb v0.10.0 h1:If5rVCMTp6W2SiRAQFlbpJNgVlgMEd+U2GZckwK38ic= -github.com/prometheus/tsdb v0.10.0/go.mod h1:oi49uRhEe9dPUTlS3JRZOwJuVi6tmh10QSgwXEyGCt4= github.com/quasilyte/go-consistent v0.0.0-20190521200055-c6f3937de18c/go.mod h1:5STLWrekHfjyYwxBRVRXNOSewLJ3PWfDJd1VyTS21fI= github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo= github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A= @@ -1209,8 +1218,9 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o= -github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= +github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= @@ -1363,12 +1373,12 @@ github.com/timakin/bodyclose v0.0.0-20190930140734-f7f2e9bca95e/go.mod h1:Qimiff github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tklauser/go-sysconf v0.3.5/go.mod h1:MkWzOF4RMCshBAMXuhXJs64Rte09mITnppBXY/rYEFI= github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk= -github.com/tklauser/go-sysconf v0.3.11 h1:89WgdJhk5SNwJfu+GKyYveZ4IaJ7xAkecBo+KdJV0CM= -github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI= +github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= +github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= github.com/tklauser/numcpus v0.2.2/go.mod h1:x3qojaO3uyYt0i56EW/VUYs7uBvdl2fkfZFu0T9wgjM= github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ= -github.com/tklauser/numcpus v0.6.0 h1:kebhY2Qt+3U6RNK7UqpYNA+tJ23IBEGKkB7JQBfDYms= -github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4= +github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= +github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tommy-muehle/go-mnd v1.1.1 h1:4D0wuPKjOTiK2garzuPGGvm4zZ/wLYDOH8TJSABC7KU= github.com/tommy-muehle/go-mnd v1.1.1/go.mod h1:dSUh0FtTP8VhvkL1S+gUR1OKd9ZnSaozuI6r3m6wOig= @@ -1445,8 +1455,9 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg= github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw= +github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= @@ -1546,8 +1557,8 @@ golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0 golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220926161630-eccd6366d1be/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= -golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g= -golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0= +golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1564,8 +1575,8 @@ golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMk golang.org/x/exp v0.0.0-20200513190911-00229845015e/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= golang.org/x/exp v0.0.0-20220426173459-3bcf042a4bf5/go.mod h1:lgLbSvA5ygNOMpwM/9anMpWVlVJ7Z+cHWq/eFuinpGE= golang.org/x/exp v0.0.0-20230206171751-46f607a40771/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= -golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 h1:k/i9J1pBpvlfR+9QsetwPyERsqu1GIbi967PQMq3Ivc= -golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w= +golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI= +golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= @@ -1597,8 +1608,8 @@ golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20211013180041-c96bc1413d57/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= -golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk= -golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.13.0 h1:I/DsJXRlw/8l/0c24sM9yb0T4z9liZTduXvdAWYiysY= +golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1673,8 +1684,8 @@ golang.org/x/net v0.0.0-20221002022538-bcab6841153b/go.mod h1:YDH+HFinaLZZlnHAfS golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= -golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= -golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1707,8 +1718,8 @@ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI= -golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.4.0 h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ= +golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1826,15 +1837,17 @@ golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= -golang.org/x/term v0.8.0 h1:n5xxQn2i3PC0yLAbjTpNT85q/Kgzcr2gIoX9OrJUols= -golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1847,8 +1860,8 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1946,8 +1959,8 @@ golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.8-0.20211029000441-d6a9af8af023/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= -golang.org/x/tools v0.9.3 h1:Gn1I8+64MsuTb/HpH+LmQtNas23LhUVr3rYZ0eKuaMM= -golang.org/x/tools v0.9.3/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= +golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc= +golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -2093,8 +2106,8 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= -google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/hmy/downloader/adapter_test.go b/hmy/downloader/adapter_test.go index 692ed8ad77..4bc023b5cc 100644 --- a/hmy/downloader/adapter_test.go +++ b/hmy/downloader/adapter_test.go @@ -8,11 +8,13 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/event" + "github.com/ethereum/go-ethereum/trie" "github.com/harmony-one/harmony/block" "github.com/harmony-one/harmony/consensus/engine" "github.com/harmony-one/harmony/consensus/reward" "github.com/harmony-one/harmony/core/state" + "github.com/harmony-one/harmony/core/state/snapshot" "github.com/harmony-one/harmony/core/types" "github.com/harmony-one/harmony/crypto/bls" "github.com/harmony-one/harmony/internal/params" @@ -88,7 +90,9 @@ func (bc *testBlockChain) changeBlockNumber(val uint64) { func (bc *testBlockChain) ShardID() uint32 { return 0 } func (bc *testBlockChain) ReadShardState(epoch *big.Int) (*shard.State, error) { return nil, nil } +func (bc *testBlockChain) Snapshots() *snapshot.Tree { return nil } func (bc *testBlockChain) TrieNode(hash common.Hash) ([]byte, error) { return []byte{}, nil } +func (bc *testBlockChain) TrieDB() *trie.Database { return nil } func (bc *testBlockChain) Config() *params.ChainConfig { return nil } func (bc *testBlockChain) WriteCommitSig(blockNum uint64, lastCommits []byte) error { return nil } func (bc *testBlockChain) GetHeader(hash common.Hash, number uint64) *block.Header { return nil } diff --git a/internal/chain/engine_test.go b/internal/chain/engine_test.go index 7654d9d6cd..530cbdc019 100644 --- a/internal/chain/engine_test.go +++ b/internal/chain/engine_test.go @@ -5,6 +5,7 @@ import ( "math/big" "testing" + "github.com/ethereum/go-ethereum/trie" bls_core "github.com/harmony-one/bls/ffi/go/bls" "github.com/harmony-one/harmony/block" blockfactory "github.com/harmony-one/harmony/block/factory" @@ -21,6 +22,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/harmony-one/harmony/core/rawdb" "github.com/harmony-one/harmony/core/state" + "github.com/harmony-one/harmony/core/state/snapshot" "github.com/harmony-one/harmony/core/types" "github.com/harmony-one/harmony/internal/params" ) @@ -328,6 +330,7 @@ func (bc *fakeBlockChain) ContractCode(hash common.Hash) ([]byte, error) func (bc *fakeBlockChain) ValidatorCode(hash common.Hash) ([]byte, error) { return []byte{}, nil } func (bc *fakeBlockChain) ShardID() uint32 { return 0 } func (bc *fakeBlockChain) ReadShardState(epoch *big.Int) (*shard.State, error) { return nil, nil } +func (bc *fakeBlockChain) TrieDB() *trie.Database { return nil } func (bc *fakeBlockChain) TrieNode(hash common.Hash) ([]byte, error) { return []byte{}, nil } func (bc *fakeBlockChain) WriteCommitSig(blockNum uint64, lastCommits []byte) error { return nil } func (bc *fakeBlockChain) GetHeaderByNumber(number uint64) *block.Header { return nil } @@ -353,6 +356,9 @@ func (bc *fakeBlockChain) Config() *params.ChainConfig { func (cr *fakeBlockChain) StateAt(root common.Hash) (*state.DB, error) { return nil, nil } +func (cr *fakeBlockChain) Snapshots() *snapshot.Tree { + return nil +} func (bc *fakeBlockChain) ReadValidatorSnapshot(addr common.Address) (*staking.ValidatorSnapshot, error) { return nil, nil } diff --git a/p2p/stream/protocols/sync/chain.go b/p2p/stream/protocols/sync/chain.go index a095fffc1f..efabd9307c 100644 --- a/p2p/stream/protocols/sync/chain.go +++ b/p2p/stream/protocols/sync/chain.go @@ -1,12 +1,21 @@ package sync import ( + Bytes "bytes" + "fmt" + "time" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/light" + "github.com/ethereum/go-ethereum/trie" "github.com/harmony-one/harmony/block" "github.com/harmony-one/harmony/consensus/engine" + "github.com/harmony-one/harmony/core/state" "github.com/harmony-one/harmony/core/types" shardingconfig "github.com/harmony-one/harmony/internal/configs/sharding" + "github.com/harmony-one/harmony/internal/utils" "github.com/harmony-one/harmony/internal/utils/keylocker" + "github.com/harmony-one/harmony/p2p/stream/protocols/sync/message" "github.com/pkg/errors" ) @@ -18,6 +27,10 @@ type chainHelper interface { getBlocksByHashes(hs []common.Hash) ([]*types.Block, error) getNodeData(hs []common.Hash) ([][]byte, error) getReceipts(hs []common.Hash) ([]types.Receipts, error) + getAccountRange(root common.Hash, origin common.Hash, limit common.Hash, bytes uint64) ([]*message.AccountData, [][]byte, error) + getStorageRanges(root common.Hash, accounts []common.Hash, origin common.Hash, limit common.Hash, bytes uint64) ([]*message.StoragesData, [][]byte, error) + getByteCodes(hs []common.Hash, bytes uint64) ([][]byte, error) + getTrieNodes(root common.Hash, paths []*message.TrieNodePathSet, bytes uint64, start time.Time) ([][]byte, error) } type chainHelperImpl struct { @@ -182,3 +195,285 @@ func (ch *chainHelperImpl) getReceipts(hs []common.Hash) ([]types.Receipts, erro } return receipts, nil } + +// getAccountRangeRequest +func (ch *chainHelperImpl) getAccountRange(root common.Hash, origin common.Hash, limit common.Hash, bytes uint64) ([]*message.AccountData, [][]byte, error) { + if bytes > softResponseLimit { + bytes = softResponseLimit + } + // Retrieve the requested state and bail out if non existent + tr, err := trie.New(trie.StateTrieID(root), ch.chain.TrieDB()) + if err != nil { + return nil, nil, err + } + it, err := ch.chain.Snapshots().AccountIterator(root, origin) + if err != nil { + return nil, nil, err + } + // Iterate over the requested range and pile accounts up + var ( + accounts []*message.AccountData + size uint64 + last common.Hash + ) + for it.Next() { + hash, account := it.Hash(), common.CopyBytes(it.Account()) + + // Track the returned interval for the Merkle proofs + last = hash + + // Assemble the reply item + size += uint64(common.HashLength + len(account)) + accounts = append(accounts, &message.AccountData{ + Hash: hash[:], + Body: account, + }) + // If we've exceeded the request threshold, abort + if Bytes.Compare(hash[:], limit[:]) >= 0 { + break + } + if size > bytes { + break + } + } + it.Release() + + // Generate the Merkle proofs for the first and last account + proof := light.NewNodeSet() + if err := tr.Prove(origin[:], 0, proof); err != nil { + utils.Logger().Warn().Err(err).Interface("origin", origin).Msg("Failed to prove account range") + return nil, nil, err + } + if last != (common.Hash{}) { + if err := tr.Prove(last[:], 0, proof); err != nil { + utils.Logger().Warn().Err(err).Interface("last", last).Msg("Failed to prove account range") + return nil, nil, err + } + } + var proofs [][]byte + for _, blob := range proof.NodeList() { + proofs = append(proofs, blob) + } + return accounts, proofs, nil +} + +// getStorageRangesRequest +func (ch *chainHelperImpl) getStorageRanges(root common.Hash, accounts []common.Hash, origin common.Hash, limit common.Hash, bytes uint64) ([]*message.StoragesData, [][]byte, error) { + if bytes > softResponseLimit { + bytes = softResponseLimit + } + + // Calculate the hard limit at which to abort, even if mid storage trie + hardLimit := uint64(float64(bytes) * (1 + stateLookupSlack)) + + // Retrieve storage ranges until the packet limit is reached + var ( + slots []*message.StoragesData + proofs [][]byte + size uint64 + ) + for _, account := range accounts { + // If we've exceeded the requested data limit, abort without opening + // a new storage range (that we'd need to prove due to exceeded size) + if size >= bytes { + break + } + // The first account might start from a different origin and end sooner + // origin==nil or limit ==nil + // Retrieve the requested state and bail out if non existent + it, err := ch.chain.Snapshots().StorageIterator(root, account, origin) + if err != nil { + return nil, nil, err + } + // Iterate over the requested range and pile slots up + var ( + storage []*message.StorageData + last common.Hash + abort bool + ) + for it.Next() { + if size >= hardLimit { + abort = true + break + } + hash, slot := it.Hash(), common.CopyBytes(it.Slot()) + + // Track the returned interval for the Merkle proofs + last = hash + + // Assemble the reply item + size += uint64(common.HashLength + len(slot)) + storage = append(storage, &message.StorageData{ + Hash: hash[:], + Body: slot, + }) + // If we've exceeded the request threshold, abort + if Bytes.Compare(hash[:], limit[:]) >= 0 { + break + } + } + + if len(storage) > 0 { + storages := &message.StoragesData{ + Data: storage, + } + slots = append(slots, storages) + } + it.Release() + + // Generate the Merkle proofs for the first and last storage slot, but + // only if the response was capped. If the entire storage trie included + // in the response, no need for any proofs. + if origin != (common.Hash{}) || (abort && len(storage) > 0) { + // Request started at a non-zero hash or was capped prematurely, add + // the endpoint Merkle proofs + accTrie, err := trie.NewStateTrie(trie.StateTrieID(root), ch.chain.TrieDB()) + if err != nil { + return nil, nil, err + } + acc, err := accTrie.TryGetAccountByHash(account) + if err != nil || acc == nil { + return nil, nil, err + } + id := trie.StorageTrieID(root, account, acc.Root) + stTrie, err := trie.NewStateTrie(id, ch.chain.TrieDB()) + if err != nil { + return nil, nil, err + } + proof := light.NewNodeSet() + if err := stTrie.Prove(origin[:], 0, proof); err != nil { + utils.Logger().Warn().Interface("origin", origin).Msg("Failed to prove storage range") + return nil, nil, err + } + if last != (common.Hash{}) { + if err := stTrie.Prove(last[:], 0, proof); err != nil { + utils.Logger().Warn().Interface("last", last).Msg("Failed to prove storage range") + return nil, nil, err + } + } + for _, blob := range proof.NodeList() { + proofs = append(proofs, blob) + } + // Proof terminates the reply as proofs are only added if a node + // refuses to serve more data (exception when a contract fetch is + // finishing, but that's that). + break + } + } + return slots, proofs, nil +} + +// getByteCodesRequest +func (ch *chainHelperImpl) getByteCodes(hashes []common.Hash, bytes uint64) ([][]byte, error) { + if bytes > softResponseLimit { + bytes = softResponseLimit + } + if len(hashes) > maxCodeLookups { + hashes = hashes[:maxCodeLookups] + } + // Retrieve bytecodes until the packet size limit is reached + var ( + codes [][]byte + totalBytes uint64 + ) + for _, hash := range hashes { + if hash == state.EmptyCodeHash { + // Peers should not request the empty code, but if they do, at + // least sent them back a correct response without db lookups + codes = append(codes, []byte{}) + } else if blob, err := ch.chain.ContractCode(hash); err == nil { // Double Check: ContractCodeWithPrefix + codes = append(codes, blob) + totalBytes += uint64(len(blob)) + } + if totalBytes > bytes { + break + } + } + return codes, nil +} + +// getTrieNodesRequest +func (ch *chainHelperImpl) getTrieNodes(root common.Hash, paths []*message.TrieNodePathSet, bytes uint64, start time.Time) ([][]byte, error) { + if bytes > softResponseLimit { + bytes = softResponseLimit + } + // Make sure we have the state associated with the request + triedb := ch.chain.TrieDB() + + accTrie, err := trie.NewStateTrie(trie.StateTrieID(root), triedb) + if err != nil { + // We don't have the requested state available, bail out + return nil, nil + } + // The 'snap' might be nil, in which case we cannot serve storage slots. + snap := ch.chain.Snapshots().Snapshot(root) + // Retrieve trie nodes until the packet size limit is reached + var ( + nodes [][]byte + TotalBytes uint64 + loads int // Trie hash expansions to count database reads + ) + for _, p := range paths { + switch len(p.Pathset) { + case 0: + // Ensure we penalize invalid requests + return nil, fmt.Errorf("zero-item pathset requested") + + case 1: + // If we're only retrieving an account trie node, fetch it directly + blob, resolved, err := accTrie.TryGetNode(p.Pathset[0]) + loads += resolved // always account database reads, even for failures + if err != nil { + break + } + nodes = append(nodes, blob) + TotalBytes += uint64(len(blob)) + + default: + var stRoot common.Hash + // Storage slots requested, open the storage trie and retrieve from there + if snap == nil { + // We don't have the requested state snapshotted yet (or it is stale), + // but can look up the account via the trie instead. + account, err := accTrie.TryGetAccountByHash(common.BytesToHash(p.Pathset[0])) + loads += 8 // We don't know the exact cost of lookup, this is an estimate + if err != nil || account == nil { + break + } + stRoot = account.Root + } else { + account, err := snap.Account(common.BytesToHash(p.Pathset[0])) + loads++ // always account database reads, even for failures + if err != nil || account == nil { + break + } + stRoot = common.BytesToHash(account.Root) + } + id := trie.StorageTrieID(root, common.BytesToHash(p.Pathset[0]), stRoot) + stTrie, err := trie.NewStateTrie(id, triedb) + loads++ // always account database reads, even for failures + if err != nil { + break + } + for _, path := range p.Pathset[1:] { + blob, resolved, err := stTrie.TryGetNode(path) + loads += resolved // always account database reads, even for failures + if err != nil { + break + } + nodes = append(nodes, blob) + TotalBytes += uint64(len(blob)) + + // Sanity check limits to avoid DoS on the store trie loads + if TotalBytes > bytes || loads > maxTrieNodeLookups || time.Since(start) > maxTrieNodeTimeSpent { + break + } + } + } + // Abort request processing if we've exceeded our limits + if TotalBytes > bytes || loads > maxTrieNodeLookups || time.Since(start) > maxTrieNodeTimeSpent { + break + } + } + return nodes, nil +} diff --git a/p2p/stream/protocols/sync/chain_test.go b/p2p/stream/protocols/sync/chain_test.go index 8d478e2b3f..414492054c 100644 --- a/p2p/stream/protocols/sync/chain_test.go +++ b/p2p/stream/protocols/sync/chain_test.go @@ -6,12 +6,15 @@ import ( "errors" "fmt" "math/big" + "time" + "unsafe" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/rlp" protobuf "github.com/golang/protobuf/proto" "github.com/harmony-one/harmony/block" "github.com/harmony-one/harmony/core/types" + "github.com/harmony-one/harmony/p2p/stream/protocols/sync/message" syncpb "github.com/harmony-one/harmony/p2p/stream/protocols/sync/message" ) @@ -60,6 +63,26 @@ func (tch *testChainHelper) getReceipts(hs []common.Hash) ([]types.Receipts, err return receipts, nil } +func (ch *testChainHelper) getAccountRange(root common.Hash, origin common.Hash, limit common.Hash, bytes uint64) ([]*message.AccountData, [][]byte, error) { + testAccountRanges, testProofs := makeTestAccountRanges(2) + return testAccountRanges, testProofs, nil +} + +func (ch *testChainHelper) getStorageRanges(root common.Hash, accounts []common.Hash, origin common.Hash, limit common.Hash, bytes uint64) ([]*message.StoragesData, [][]byte, error) { + testSlots, testProofs := makeTestStorageRanges(2) + return testSlots, testProofs, nil +} + +func (ch *testChainHelper) getByteCodes(hs []common.Hash, bytes uint64) ([][]byte, error) { + testByteCodes := makeTestByteCodes(2) + return testByteCodes, nil +} + +func (ch *testChainHelper) getTrieNodes(root common.Hash, paths []*message.TrieNodePathSet, bytes uint64, start time.Time) ([][]byte, error) { + testTrieNodes := makeTestTrieNodes(2) + return testTrieNodes, nil +} + func checkGetReceiptsResult(b []byte, hs []common.Hash) error { var msg = &syncpb.Message{} if err := protobuf.Unmarshal(b, msg); err != nil { @@ -156,6 +179,57 @@ func makeTestReceipts(n int, nPerBlock int) []*types.Receipt { return receipts } +func makeTestAccountRanges(n int) ([]*message.AccountData, [][]byte) { + accounts := make([]*message.AccountData, n) + proofs := make([][]byte, n) + for i := 0; i < n; i++ { + accounts[i] = &message.AccountData{ + Hash: numberToHash(uint64(i * 2)).Bytes(), + Body: numberToHash(uint64(i*2 + 1)).Bytes(), + } + } + for i := 0; i < n; i++ { + proofs[i] = numberToHash(uint64(i)).Bytes() + } + return accounts, proofs +} + +func makeTestStorageRanges(n int) ([]*message.StoragesData, [][]byte) { + slots := make([]*message.StoragesData, n) + proofs := make([][]byte, n) + for i := 0; i < n; i++ { + slots[i] = &message.StoragesData{ + Data: make([]*syncpb.StorageData, 2), + } + for j := 0; j < 2; j++ { + slots[i].Data[j] = &message.StorageData{ + Hash: numberToHash(uint64(i * 2)).Bytes(), + Body: numberToHash(uint64(i*2 + 1)).Bytes(), + } + } + } + for i := 0; i < n; i++ { + proofs[i] = numberToHash(uint64(i)).Bytes() + } + return slots, proofs +} + +func makeTestByteCodes(n int) [][]byte { + byteCodes := make([][]byte, n) + for i := 0; i < n; i++ { + byteCodes[i] = numberToHash(uint64(i)).Bytes() + } + return byteCodes +} + +func makeTestTrieNodes(n int) [][]byte { + trieNodes := make([][]byte, n) + for i := 0; i < n; i++ { + trieNodes[i] = numberToHash(uint64(i)).Bytes() + } + return trieNodes +} + func decodeBlocksBytes(bbs [][]byte) ([]*types.Block, error) { blocks := make([]*types.Block, 0, len(bbs)) @@ -169,6 +243,19 @@ func decodeBlocksBytes(bbs [][]byte) ([]*types.Block, error) { return blocks, nil } +func decodeHashBytes(hs [][]byte) ([]common.Hash, error) { + hashes := make([]common.Hash, 0) + + for _, h := range hs { + var hash common.Hash + if err := rlp.DecodeBytes(h, &hash); err != nil { + return nil, err + } + hashes = append(hashes, hash) + } + return hashes, nil +} + func checkBlockNumberResult(b []byte) error { var msg = &syncpb.Message{} if err := protobuf.Unmarshal(b, msg); err != nil { @@ -230,3 +317,90 @@ func checkBlocksByHashesResult(b []byte, hs []common.Hash) error { } return nil } + +func checkAccountRangeResult(bytes uint64, b []byte) error { + var msg = &syncpb.Message{} + if err := protobuf.Unmarshal(b, msg); err != nil { + return err + } + gbResp, err := msg.GetAccountRangesResponse() + if err != nil { + return err + } + if len(gbResp.Accounts) == 0 { + return errors.New("nil response from GetAccountRanges") + } + if len(gbResp.Proof) != len(gbResp.Accounts) { + return errors.New("unexpected proofs") + } + if len(b) > int(bytes) { + return errors.New("unexpected data bytes") + } + return nil +} + +func checkStorageRangesResult(accounts []common.Hash, bytes uint64, b []byte) error { + var msg = &syncpb.Message{} + if err := protobuf.Unmarshal(b, msg); err != nil { + return err + } + gbResp, err := msg.GetStorageRangesResponse() + if err != nil { + return err + } + if len(gbResp.Slots) == 0 { + return errors.New("nil response from GetStorageRanges") + } + if len(gbResp.Slots) != len(gbResp.Proof) { + return errors.New("unexpected proofs") + } + sz := unsafe.Sizeof(gbResp.Slots) + if sz > uintptr(bytes) { + return errors.New("unexpected slot bytes") + } + return nil +} + +func checkByteCodesResult(hs []common.Hash, bytes uint64, b []byte) error { + var msg = &syncpb.Message{} + if err := protobuf.Unmarshal(b, msg); err != nil { + return err + } + gbResp, err := msg.GetByteCodesResponse() + if err != nil { + return err + } + if len(gbResp.Codes) == 0 { + return errors.New("nil response from GetByteCodes") + } + if len(gbResp.Codes) != len(hs) { + return errors.New("unexpected byte codes") + } + sz := len(hs) * common.HashLength + if sz > int(bytes) { + return errors.New("unexpected data bytes") + } + return nil +} + +func checkTrieNodesResult(hs []common.Hash, bytes uint64, b []byte) error { + var msg = &syncpb.Message{} + if err := protobuf.Unmarshal(b, msg); err != nil { + return err + } + gbResp, err := msg.GetTrieNodesResponse() + if err != nil { + return err + } + if len(gbResp.Nodes) == 0 { + return errors.New("nil response from checkGetTrieNodes") + } + if len(gbResp.Nodes) != len(hs) { + return errors.New("unexpected byte codes") + } + sz := len(hs) * common.HashLength + if sz > int(bytes) { + return errors.New("unexpected data bytes") + } + return nil +} diff --git a/p2p/stream/protocols/sync/client.go b/p2p/stream/protocols/sync/client.go index 0b8a2a2fd9..9024142cef 100644 --- a/p2p/stream/protocols/sync/client.go +++ b/p2p/stream/protocols/sync/client.go @@ -10,6 +10,7 @@ import ( "github.com/ethereum/go-ethereum/rlp" protobuf "github.com/golang/protobuf/proto" "github.com/harmony-one/harmony/core/types" + "github.com/harmony-one/harmony/p2p/stream/protocols/sync/message" syncpb "github.com/harmony-one/harmony/p2p/stream/protocols/sync/message" sttypes "github.com/harmony-one/harmony/p2p/stream/types" "github.com/pkg/errors" @@ -181,6 +182,120 @@ func (p *Protocol) GetNodeData(ctx context.Context, hs []common.Hash, opts ...Op return } +// GetAccountRange do getAccountRange through sync stream protocol. +// returns the accounts along with proofs as result, target stream id, and error +func (p *Protocol) GetAccountRange(ctx context.Context, root common.Hash, origin common.Hash, limit common.Hash, bytes uint64, opts ...Option) (accounts []*message.AccountData, proof []common.Hash, stid sttypes.StreamID, err error) { + timer := p.doMetricClientRequest("getAccountRange") + defer p.doMetricPostClientRequest("getAccountRange", err, timer) + + if bytes == 0 { + err = fmt.Errorf("zero account ranges bytes requested") + return + } + if bytes > softResponseLimit { + err = fmt.Errorf("requested bytes exceed limit") + return + } + req := newGetAccountRangeRequest(root, origin, limit, bytes) + resp, stid, err := p.rm.DoRequest(ctx, req, opts...) + if err != nil { + return + } + accounts, proof, err = req.getAccountRangeFromResponse(resp) + return +} + +// GetStorageRanges do getStorageRanges through sync stream protocol. +// returns the slots along with proofs as result, target stream id, and error +func (p *Protocol) GetStorageRanges(ctx context.Context, root common.Hash, accounts []common.Hash, origin common.Hash, limit common.Hash, bytes uint64, opts ...Option) (slots []*message.StorageData, proof []common.Hash, stid sttypes.StreamID, err error) { + timer := p.doMetricClientRequest("getStorageRanges") + defer p.doMetricPostClientRequest("getStorageRanges", err, timer) + + if bytes == 0 { + err = fmt.Errorf("zero storage ranges bytes requested") + return + } + if bytes > softResponseLimit { + err = fmt.Errorf("requested bytes exceed limit") + return + } + if len(accounts) > GetStorageRangesRequestCap { + err = fmt.Errorf("number of requested accounts exceed limit") + return + } + req := newGetStorageRangesRequest(root, accounts, origin, limit, bytes) + resp, stid, err := p.rm.DoRequest(ctx, req, opts...) + if err != nil { + return + } + var storages []*message.StoragesData + storages, proof, err = req.getStorageRangesFromResponse(resp) + if err != nil { + return + } + slots = make([]*message.StorageData, 0) + for _, storage := range storages { + for _, data := range storage.Data { + slots = append(slots, data) + } + } + return +} + +// GetByteCodes do getByteCodes through sync stream protocol. +// returns the codes as result, target stream id, and error +func (p *Protocol) GetByteCodes(ctx context.Context, hs []common.Hash, bytes uint64, opts ...Option) (codes [][]byte, stid sttypes.StreamID, err error) { + timer := p.doMetricClientRequest("getByteCodes") + defer p.doMetricPostClientRequest("getByteCodes", err, timer) + + if bytes == 0 { + err = fmt.Errorf("zero bytecode bytes requested") + return + } + if bytes > softResponseLimit { + err = fmt.Errorf("requested bytes exceed limit") + return + } + if len(hs) > GetByteCodesRequestCap { + err = fmt.Errorf("number of requested hashes exceed limit") + return + } + req := newGetByteCodesRequest(hs, bytes) + resp, stid, err := p.rm.DoRequest(ctx, req, opts...) + if err != nil { + return + } + codes, err = req.getByteCodesFromResponse(resp) + return +} + +// GetTrieNodes do getTrieNodes through sync stream protocol. +// returns the nodes as result, target stream id, and error +func (p *Protocol) GetTrieNodes(ctx context.Context, root common.Hash, paths []*message.TrieNodePathSet, bytes uint64, opts ...Option) (nodes [][]byte, stid sttypes.StreamID, err error) { + timer := p.doMetricClientRequest("getTrieNodes") + defer p.doMetricPostClientRequest("getTrieNodes", err, timer) + + if bytes == 0 { + err = fmt.Errorf("zero trie nodes bytes requested") + return + } + if bytes > softResponseLimit { + err = fmt.Errorf("requested bytes exceed limit") + return + } + if len(paths) > GetTrieNodesRequestCap { + err = fmt.Errorf("number of requested paths exceed limit") + return + } + req := newGetTrieNodesRequest(root, paths, bytes) + resp, stid, err := p.rm.DoRequest(ctx, req, opts...) + if err != nil { + return + } + nodes, err = req.getTrieNodesFromResponse(resp) + return +} + // getBlocksByNumberRequest is the request for get block by numbers which implements // sttypes.Request interface type getBlocksByNumberRequest struct { @@ -571,3 +686,305 @@ func (req *getReceiptsRequest) parseGetReceiptsBytes(resp *syncResponse) ([]type } return receipts, nil } + +// getAccountRangeRequest is the request for get account ranges which implements +// sttypes.Request interface +type getAccountRangeRequest struct { + root common.Hash + origin common.Hash + limit common.Hash + bytes uint64 + pbReq *syncpb.Request +} + +func newGetAccountRangeRequest(root common.Hash, origin common.Hash, limit common.Hash, bytes uint64) *getAccountRangeRequest { + pbReq := syncpb.MakeGetAccountRangeRequest(root, origin, limit, bytes) + return &getAccountRangeRequest{ + root: root, + origin: origin, + limit: limit, + bytes: bytes, + pbReq: pbReq, + } +} + +func (req *getAccountRangeRequest) ReqID() uint64 { + return req.pbReq.GetReqId() +} + +func (req *getAccountRangeRequest) SetReqID(val uint64) { + req.pbReq.ReqId = val +} + +func (req *getAccountRangeRequest) String() string { + ss := make([]string, 0, 4) + ss = append(ss, req.root.String()) + ss = append(ss, req.origin.String()) + ss = append(ss, req.limit.String()) + ss = append(ss, fmt.Sprint(req.bytes)) + rqStr := strings.Join(ss, ",") + return fmt.Sprintf("REQUEST [GetAccountRange: %s]", rqStr) +} + +func (req *getAccountRangeRequest) IsSupportedByProto(target sttypes.ProtoSpec) bool { + return target.Version.GreaterThanOrEqual(MinVersion) +} + +func (req *getAccountRangeRequest) Encode() ([]byte, error) { + msg := syncpb.MakeMessageFromRequest(req.pbReq) + return protobuf.Marshal(msg) +} + +// []*message.AccountData, []common.Hash +func (req *getAccountRangeRequest) getAccountRangeFromResponse(resp sttypes.Response) ([]*message.AccountData, []common.Hash, error) { + sResp, ok := resp.(*syncResponse) + if !ok || sResp == nil { + return nil, nil, errors.New("not sync response") + } + return req.parseGetAccountRangeResponse(sResp) +} + +func (req *getAccountRangeRequest) parseGetAccountRangeResponse(resp *syncResponse) ([]*message.AccountData, []common.Hash, error) { + if errResp := resp.pb.GetErrorResponse(); errResp != nil { + return nil, nil, errors.New(errResp.Error) + } + grResp := resp.pb.GetGetAccountRangeResponse() + if grResp == nil { + return nil, nil, errors.New("response not GetAccountRange") + } + proofs := make([]common.Hash, 0) + for _, proofBytes := range grResp.Proof { + var proof common.Hash + if err := rlp.DecodeBytes(proofBytes, &proof); err != nil { + return nil, nil, errors.Wrap(err, "[GetAccountRangeResponse]") + } + proofs = append(proofs, proof) + } + return grResp.Accounts, proofs, nil +} + +// getStorageRangesRequest is the request for get storage ranges which implements +// sttypes.Request interface +type getStorageRangesRequest struct { + root common.Hash + accounts []common.Hash + origin common.Hash + limit common.Hash + bytes uint64 + pbReq *syncpb.Request +} + +func newGetStorageRangesRequest(root common.Hash, accounts []common.Hash, origin common.Hash, limit common.Hash, bytes uint64) *getStorageRangesRequest { + pbReq := syncpb.MakeGetStorageRangesRequest(root, accounts, origin, limit, bytes) + return &getStorageRangesRequest{ + root: root, + accounts: accounts, + origin: origin, + limit: limit, + bytes: bytes, + pbReq: pbReq, + } +} + +func (req *getStorageRangesRequest) ReqID() uint64 { + return req.pbReq.GetReqId() +} + +func (req *getStorageRangesRequest) SetReqID(val uint64) { + req.pbReq.ReqId = val +} + +func (req *getStorageRangesRequest) String() string { + ss := make([]string, 0, 4) + ss = append(ss, req.root.String()) + for _, acc := range req.accounts { + ss = append(ss, acc.String()) + } + ss = append(ss, req.origin.String()) + ss = append(ss, req.limit.String()) + ss = append(ss, fmt.Sprint(req.bytes)) + rqStr := strings.Join(ss, ",") + return fmt.Sprintf("REQUEST [GetStorageRanges: %s]", rqStr) +} + +func (req *getStorageRangesRequest) IsSupportedByProto(target sttypes.ProtoSpec) bool { + return target.Version.GreaterThanOrEqual(MinVersion) +} + +func (req *getStorageRangesRequest) Encode() ([]byte, error) { + msg := syncpb.MakeMessageFromRequest(req.pbReq) + return protobuf.Marshal(msg) +} + +// []*message.AccountData, []common.Hash +func (req *getStorageRangesRequest) getStorageRangesFromResponse(resp sttypes.Response) ([]*message.StoragesData, []common.Hash, error) { + sResp, ok := resp.(*syncResponse) + if !ok || sResp == nil { + return nil, nil, errors.New("not sync response") + } + return req.parseGetStorageRangesResponse(sResp) +} + +func (req *getStorageRangesRequest) parseGetStorageRangesResponse(resp *syncResponse) ([]*message.StoragesData, []common.Hash, error) { + if errResp := resp.pb.GetErrorResponse(); errResp != nil { + return nil, nil, errors.New(errResp.Error) + } + grResp := resp.pb.GetGetStorageRangesResponse() + if grResp == nil { + return nil, nil, errors.New("response not GetStorageRanges") + } + proofs := make([]common.Hash, 0) + for _, proofBytes := range grResp.Proof { + var proof common.Hash + if err := rlp.DecodeBytes(proofBytes, &proof); err != nil { + return nil, nil, errors.Wrap(err, "[GetStorageRangesResponse]") + } + proofs = append(proofs, proof) + } + return grResp.Slots, proofs, nil +} + +// getByteCodesRequest is the request for get code bytes which implements +// sttypes.Request interface +type getByteCodesRequest struct { + hashes []common.Hash + bytes uint64 + pbReq *syncpb.Request +} + +func newGetByteCodesRequest(hashes []common.Hash, bytes uint64) *getByteCodesRequest { + pbReq := syncpb.MakeGetByteCodesRequest(hashes, bytes) + return &getByteCodesRequest{ + hashes: hashes, + bytes: bytes, + pbReq: pbReq, + } +} + +func (req *getByteCodesRequest) ReqID() uint64 { + return req.pbReq.GetReqId() +} + +func (req *getByteCodesRequest) SetReqID(val uint64) { + req.pbReq.ReqId = val +} + +func (req *getByteCodesRequest) String() string { + ss := make([]string, 0, 4) + for _, h := range req.hashes { + ss = append(ss, h.String()) + } + ss = append(ss, fmt.Sprint(req.bytes)) + rqStr := strings.Join(ss, ",") + return fmt.Sprintf("REQUEST [GetByteCodes: %s]", rqStr) +} + +func (req *getByteCodesRequest) IsSupportedByProto(target sttypes.ProtoSpec) bool { + return target.Version.GreaterThanOrEqual(MinVersion) +} + +func (req *getByteCodesRequest) Encode() ([]byte, error) { + msg := syncpb.MakeMessageFromRequest(req.pbReq) + return protobuf.Marshal(msg) +} + +func (req *getByteCodesRequest) getByteCodesFromResponse(resp sttypes.Response) ([][]byte, error) { + sResp, ok := resp.(*syncResponse) + if !ok || sResp == nil { + return nil, errors.New("not sync response") + } + return req.parseGetByteCodesResponse(sResp) +} + +func (req *getByteCodesRequest) parseGetByteCodesResponse(resp *syncResponse) ([][]byte, error) { + if errResp := resp.pb.GetErrorResponse(); errResp != nil { + return nil, errors.New(errResp.Error) + } + grResp := resp.pb.GetGetByteCodesResponse() + if grResp == nil { + return nil, errors.New("response not GetByteCodes") + } + codes := make([][]byte, 0) + for _, codeBytes := range grResp.Codes { + var code []byte + if err := rlp.DecodeBytes(codeBytes, &code); err != nil { + return nil, errors.Wrap(err, "[GetByteCodesResponse]") + } + codes = append(codes, code) + } + return codes, nil +} + +// getTrieNodesRequest is the request for get trie nodes which implements +// sttypes.Request interface +type getTrieNodesRequest struct { + root common.Hash + paths []*message.TrieNodePathSet + bytes uint64 + pbReq *syncpb.Request +} + +func newGetTrieNodesRequest(root common.Hash, paths []*message.TrieNodePathSet, bytes uint64) *getTrieNodesRequest { + pbReq := syncpb.MakeGetTrieNodesRequest(root, paths, bytes) + return &getTrieNodesRequest{ + root: root, + paths: paths, + bytes: bytes, + pbReq: pbReq, + } +} + +func (req *getTrieNodesRequest) ReqID() uint64 { + return req.pbReq.GetReqId() +} + +func (req *getTrieNodesRequest) SetReqID(val uint64) { + req.pbReq.ReqId = val +} + +func (req *getTrieNodesRequest) String() string { + ss := make([]string, 0, 4) + ss = append(ss, req.root.String()) + for _, p := range req.paths { + ss = append(ss, p.String()) + } + ss = append(ss, fmt.Sprint(req.bytes)) + rqStr := strings.Join(ss, ",") + return fmt.Sprintf("REQUEST [GetTrieNodes: %s]", rqStr) +} + +func (req *getTrieNodesRequest) IsSupportedByProto(target sttypes.ProtoSpec) bool { + return target.Version.GreaterThanOrEqual(MinVersion) +} + +func (req *getTrieNodesRequest) Encode() ([]byte, error) { + msg := syncpb.MakeMessageFromRequest(req.pbReq) + return protobuf.Marshal(msg) +} + +func (req *getTrieNodesRequest) getTrieNodesFromResponse(resp sttypes.Response) ([][]byte, error) { + sResp, ok := resp.(*syncResponse) + if !ok || sResp == nil { + return nil, errors.New("not sync response") + } + return req.parseGetTrieNodesResponse(sResp) +} + +func (req *getTrieNodesRequest) parseGetTrieNodesResponse(resp *syncResponse) ([][]byte, error) { + if errResp := resp.pb.GetErrorResponse(); errResp != nil { + return nil, errors.New(errResp.Error) + } + grResp := resp.pb.GetGetTrieNodesResponse() + if grResp == nil { + return nil, errors.New("response not GetTrieNodes") + } + nodes := make([][]byte, 0) + for _, codeBytes := range grResp.Nodes { + var code []byte + if err := rlp.DecodeBytes(codeBytes, &code); err != nil { + return nil, errors.Wrap(err, "[GetTrieNodesResponse]") + } + nodes = append(nodes, code) + } + return nodes, nil +} diff --git a/p2p/stream/protocols/sync/client_test.go b/p2p/stream/protocols/sync/client_test.go index edfa126d0b..611afd7610 100644 --- a/p2p/stream/protocols/sync/client_test.go +++ b/p2p/stream/protocols/sync/client_test.go @@ -25,6 +25,8 @@ var ( _ sttypes.Request = &getBlockNumberRequest{} _ sttypes.Request = &getReceiptsRequest{} _ sttypes.Response = &syncResponse{&syncpb.Response{}} + // MaxHash represents the maximum possible hash value. + MaxHash = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") ) var ( @@ -67,6 +69,69 @@ var ( testNodeDataResponse = syncpb.MakeGetNodeDataResponse(0, [][]byte{testNodeDataBytes}) + account1 = common.HexToHash("0xf493f79c43bd747129a226ad42529885a4b108aba6046b2d12071695a6627844") + account2 = common.HexToHash("0xf493f79c43bd747129a226ad42529885a4b108aba6046b2d12071695a6627844") + resAccounts = []common.Hash{account1, account2} + + accountsData = []*message.AccountData{ + &syncpb.AccountData{ + Hash: account1[:], + Body: common.HexToHash("0x00bf100000000000000000000000000000000000000000000000000000000000").Bytes(), + }, + &syncpb.AccountData{ + Hash: account2[:], + Body: common.HexToHash("0x00bf100000000000000000000000000000000000000000000000000000000000").Bytes(), + }, + } + + slots = []*syncpb.StoragesData{ + &syncpb.StoragesData{ + Data: []*syncpb.StorageData{ + &syncpb.StorageData{ + Hash: account1[:], + Body: common.HexToHash("0x00bf100000000000000000000000000000000000000000000000000000000000").Bytes(), + }, + }, + }, + &syncpb.StoragesData{ + Data: []*syncpb.StorageData{ + &syncpb.StorageData{ + Hash: account2[:], + Body: common.HexToHash("0x00bf100000000000000000000000000000000000000000000000000000000000").Bytes(), + }, + }, + }, + } + + proofBytes1, _ = rlp.EncodeToBytes(account1) + proofBytes2, _ = rlp.EncodeToBytes(account2) + proof = [][]byte{proofBytes1, proofBytes2} + + codeBytes1, _ = rlp.EncodeToBytes(account1) + codeBytes2, _ = rlp.EncodeToBytes(account2) + testByteCodes = [][]byte{codeBytes1, codeBytes2} + dataNodeBytes1, _ = rlp.EncodeToBytes(numberToHash(1).Bytes()) + dataNodeBytes2, _ = rlp.EncodeToBytes(numberToHash(2).Bytes()) + testTrieNodes = [][]byte{dataNodeBytes1, dataNodeBytes2} + testPathSet = [][]byte{numberToHash(19850928).Bytes(), numberToHash(13640607).Bytes()} + + testPaths = []*syncpb.TrieNodePathSet{ + &syncpb.TrieNodePathSet{ + Pathset: testPathSet, + }, + &syncpb.TrieNodePathSet{ + Pathset: testPathSet, + }, + } + + testAccountRangeResponse = syncpb.MakeGetAccountRangeResponse(0, accountsData, proof) + + testStorageRangesResponse = syncpb.MakeGetStorageRangesResponse(0, slots, proof) + + testByteCodesResponse = syncpb.MakeGetByteCodesResponse(0, testByteCodes) + + testTrieNodesResponse = syncpb.MakeGetTrieNodesResponse(0, testTrieNodes) + testErrorResponse = syncpb.MakeErrorResponse(0, errors.New("test error")) ) @@ -428,6 +493,267 @@ func TestProtocol_GetNodeData(t *testing.T) { } } +func TestProtocol_GetAccountRange(t *testing.T) { + var ( + root = numberToHash(1985082913640607) + ffHash = MaxHash + zero = common.Hash{} + ) + + tests := []struct { + getResponse getResponseFn + expErr error + expStID sttypes.StreamID + }{ + { + getResponse: func(request sttypes.Request) (sttypes.Response, sttypes.StreamID) { + return &syncResponse{ + pb: testAccountRangeResponse, + }, makeTestStreamID(0) + }, + expErr: nil, + expStID: makeTestStreamID(0), + }, + { + getResponse: func(request sttypes.Request) (sttypes.Response, sttypes.StreamID) { + return &syncResponse{ + pb: testBlockResponse, + }, makeTestStreamID(0) + }, + expErr: errors.New("response not GetAccountRange"), + expStID: makeTestStreamID(0), + }, + { + getResponse: nil, + expErr: errors.New("get response error"), + expStID: "", + }, + { + getResponse: func(request sttypes.Request) (sttypes.Response, sttypes.StreamID) { + return &syncResponse{ + pb: testErrorResponse, + }, makeTestStreamID(0) + }, + expErr: errors.New("test error"), + expStID: makeTestStreamID(0), + }, + } + + for i, test := range tests { + protocol := makeTestProtocol(test.getResponse) + accounts, proof, stid, err := protocol.GetAccountRange(context.Background(), root, zero, ffHash, uint64(100)) + + if assErr := assertError(err, test.expErr); assErr != nil { + t.Errorf("Test %v: %v", i, assErr) + continue + } + if stid != test.expStID { + t.Errorf("Test %v: unexpected st id: %v / %v", i, stid, test.expStID) + } + if test.expErr == nil { + if len(accounts) != len(proof) { + t.Errorf("accounts: %v", test.getResponse) + t.Errorf("accounts: %v", accounts) + t.Errorf("proof: %v", proof) + t.Errorf("Test %v: accounts size (%d) not equal to proof size (%d)", i, len(accounts), len(proof)) + } + } + } +} + +func TestProtocol_GetStorageRanges(t *testing.T) { + var ( + root = numberToHash(1985082913640607) + firstKey = common.HexToHash("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2a") + secondKey = common.HexToHash("0x09e47cd5056a689e708f22fe1f932709a320518e444f5f7d8d46a3da523d6606") + testAccounts = []common.Hash{secondKey, firstKey} + ffHash = MaxHash + zero = common.Hash{} + ) + + tests := []struct { + getResponse getResponseFn + expErr error + expStID sttypes.StreamID + }{ + { + getResponse: func(request sttypes.Request) (sttypes.Response, sttypes.StreamID) { + return &syncResponse{ + pb: testStorageRangesResponse, + }, makeTestStreamID(0) + }, + expErr: nil, + expStID: makeTestStreamID(0), + }, + { + getResponse: func(request sttypes.Request) (sttypes.Response, sttypes.StreamID) { + return &syncResponse{ + pb: testBlockResponse, + }, makeTestStreamID(0) + }, + expErr: errors.New("response not GetStorageRanges"), + expStID: makeTestStreamID(0), + }, + { + getResponse: nil, + expErr: errors.New("get response error"), + expStID: "", + }, + { + getResponse: func(request sttypes.Request) (sttypes.Response, sttypes.StreamID) { + return &syncResponse{ + pb: testErrorResponse, + }, makeTestStreamID(0) + }, + expErr: errors.New("test error"), + expStID: makeTestStreamID(0), + }, + } + + for i, test := range tests { + protocol := makeTestProtocol(test.getResponse) + slots, proof, stid, err := protocol.GetStorageRanges(context.Background(), root, testAccounts, zero, ffHash, uint64(100)) + + if assErr := assertError(err, test.expErr); assErr != nil { + t.Errorf("Test %v: %v", i, assErr) + continue + } + if stid != test.expStID { + t.Errorf("Test %v: unexpected st id: %v / %v", i, stid, test.expStID) + } + if test.expErr == nil { + if len(slots) != len(testAccounts) { + t.Errorf("Test %v: slots size not equal to accounts size", i) + } + if len(slots) != len(proof) { + t.Errorf("Test %v: account size not equal to proof", i) + } + } + } +} + +func TestProtocol_GetByteCodes(t *testing.T) { + tests := []struct { + getResponse getResponseFn + expErr error + expStID sttypes.StreamID + }{ + { + getResponse: func(request sttypes.Request) (sttypes.Response, sttypes.StreamID) { + return &syncResponse{ + pb: testByteCodesResponse, + }, makeTestStreamID(0) + }, + expErr: nil, + expStID: makeTestStreamID(0), + }, + { + getResponse: func(request sttypes.Request) (sttypes.Response, sttypes.StreamID) { + return &syncResponse{ + pb: testBlockResponse, + }, makeTestStreamID(0) + }, + expErr: errors.New("response not GetByteCodes"), + expStID: makeTestStreamID(0), + }, + { + getResponse: nil, + expErr: errors.New("get response error"), + expStID: "", + }, + { + getResponse: func(request sttypes.Request) (sttypes.Response, sttypes.StreamID) { + return &syncResponse{ + pb: testErrorResponse, + }, makeTestStreamID(0) + }, + expErr: errors.New("test error"), + expStID: makeTestStreamID(0), + }, + } + + for i, test := range tests { + protocol := makeTestProtocol(test.getResponse) + codes, stid, err := protocol.GetByteCodes(context.Background(), []common.Hash{numberToHash(19850829)}, uint64(500)) + + if assErr := assertError(err, test.expErr); assErr != nil { + t.Errorf("Test %v: %v", i, assErr) + continue + } + if stid != test.expStID { + t.Errorf("Test %v: unexpected st id: %v / %v", i, stid, test.expStID) + } + if test.expErr == nil { + if len(codes) != 2 { + t.Errorf("Test %v: size not 2", i) + } + } + } +} + +func TestProtocol_GetTrieNodes(t *testing.T) { + var ( + root = numberToHash(1985082913640607) + ) + + tests := []struct { + getResponse getResponseFn + expErr error + expStID sttypes.StreamID + }{ + { + getResponse: func(request sttypes.Request) (sttypes.Response, sttypes.StreamID) { + return &syncResponse{ + pb: testTrieNodesResponse, + }, makeTestStreamID(0) + }, + expErr: nil, + expStID: makeTestStreamID(0), + }, + { + getResponse: func(request sttypes.Request) (sttypes.Response, sttypes.StreamID) { + return &syncResponse{ + pb: testBlockResponse, + }, makeTestStreamID(0) + }, + expErr: errors.New("response not GetTrieNodes"), + expStID: makeTestStreamID(0), + }, + { + getResponse: nil, + expErr: errors.New("get response error"), + expStID: "", + }, + { + getResponse: func(request sttypes.Request) (sttypes.Response, sttypes.StreamID) { + return &syncResponse{ + pb: testErrorResponse, + }, makeTestStreamID(0) + }, + expErr: errors.New("test error"), + expStID: makeTestStreamID(0), + }, + } + + for i, test := range tests { + protocol := makeTestProtocol(test.getResponse) + nodes, stid, err := protocol.GetTrieNodes(context.Background(), root, testPaths, uint64(500)) + + if assErr := assertError(err, test.expErr); assErr != nil { + t.Errorf("Test %v: %v", i, assErr) + continue + } + if stid != test.expStID { + t.Errorf("Test %v: unexpected st id: %v / %v", i, stid, test.expStID) + } + if test.expErr == nil { + if len(nodes) != 2 { + t.Errorf("Test %v: size not 2", i) + } + } + } +} + type getResponseFn func(request sttypes.Request) (sttypes.Response, sttypes.StreamID) type testHostRequestManager struct { diff --git a/p2p/stream/protocols/sync/const.go b/p2p/stream/protocols/sync/const.go index b4cf4410af..d606a46751 100644 --- a/p2p/stream/protocols/sync/const.go +++ b/p2p/stream/protocols/sync/const.go @@ -25,6 +25,39 @@ const ( // This number has an effect on maxMsgBytes as 20MB defined in github.com/harmony-one/harmony/p2p/stream/types. GetReceiptsCap = 128 + // GetStorageRangesRequestCap is the cap of request of single GetStorageRanges request + // This number has an effect on maxMsgBytes as 20MB defined in github.com/harmony-one/harmony/p2p/stream/types. + GetStorageRangesRequestCap = 256 + + // GetByteCodesRequestCap is the cap of request of single GetByteCodes request + // This number has an effect on maxMsgBytes as 20MB defined in github.com/harmony-one/harmony/p2p/stream/types. + GetByteCodesRequestCap = 128 + + // GetTrieNodesRequestCap is the cap of request of single GetTrieNodes request + // This number has an effect on maxMsgBytes as 20MB defined in github.com/harmony-one/harmony/p2p/stream/types. + GetTrieNodesRequestCap = 128 + + // stateLookupSlack defines the ratio by how much a state response can exceed + // the requested limit in order to try and avoid breaking up contracts into + // multiple packages and proving them. + stateLookupSlack = 0.1 + + // softResponseLimit is the target maximum size of replies to data retrievals. + softResponseLimit = 2 * 1024 * 1024 + + // maxCodeLookups is the maximum number of bytecodes to serve. This number is + // there to limit the number of disk lookups. + maxCodeLookups = 1024 + + // maxTrieNodeLookups is the maximum number of state trie nodes to serve. This + // number is there to limit the number of disk lookups. + maxTrieNodeLookups = 1024 + + // maxTrieNodeTimeSpent is the maximum time we should spend on looking up trie nodes. + // If we spend too much time, then it's a fairly high chance of timing out + // at the remote side, which means all the work is in vain. + maxTrieNodeTimeSpent = 5 * time.Second + // MaxStreamFailures is the maximum allowed failures before stream gets removed MaxStreamFailures = 5 diff --git a/p2p/stream/protocols/sync/message/compose.go b/p2p/stream/protocols/sync/message/compose.go index 2c0c367098..3be09da5b7 100644 --- a/p2p/stream/protocols/sync/message/compose.go +++ b/p2p/stream/protocols/sync/message/compose.go @@ -68,6 +68,60 @@ func MakeGetReceiptsRequest(hashes []common.Hash) *Request { } } +// MakeGetAccountRangeRequest makes the GetAccountRange request +func MakeGetAccountRangeRequest(root common.Hash, origin common.Hash, limit common.Hash, bytes uint64) *Request { + return &Request{ + Request: &Request_GetAccountRangeRequest{ + GetAccountRangeRequest: &GetAccountRangeRequest{ + Root: root[:], + Origin: origin[:], + Limit: limit[:], + Bytes: bytes, + }, + }, + } +} + +// MakeGetStorageRangesRequest makes the GetStorageRanges request +func MakeGetStorageRangesRequest(root common.Hash, accounts []common.Hash, origin common.Hash, limit common.Hash, bytes uint64) *Request { + return &Request{ + Request: &Request_GetStorageRangesRequest{ + GetStorageRangesRequest: &GetStorageRangesRequest{ + Root: root[:], + Accounts: hashesToBytes(accounts), + Origin: origin[:], + Limit: limit[:], + Bytes: bytes, + }, + }, + } +} + +// MakeGetByteCodesRequest makes the GetByteCodes request +func MakeGetByteCodesRequest(hashes []common.Hash, bytes uint64) *Request { + return &Request{ + Request: &Request_GetByteCodesRequest{ + GetByteCodesRequest: &GetByteCodesRequest{ + Hashes: hashesToBytes(hashes), + Bytes: bytes, + }, + }, + } +} + +// MakeGetTrieNodesRequest makes the GetTrieNodes request +func MakeGetTrieNodesRequest(root common.Hash, paths []*TrieNodePathSet, bytes uint64) *Request { + return &Request{ + Request: &Request_GetTrieNodesRequest{ + GetTrieNodesRequest: &GetTrieNodesRequest{ + Root: root[:], + Paths: paths, + Bytes: bytes, + }, + }, + } +} + // MakeErrorResponse makes the error response func MakeErrorResponseMessage(rid uint64, err error) *Message { resp := MakeErrorResponse(rid, err) @@ -196,6 +250,80 @@ func MakeGetReceiptsResponse(rid uint64, receipts map[uint64]*Receipts) *Respons } } +// MakeGetAccountRangeResponseMessage makes the GetAccountRangeResponse of Message type +func MakeGetAccountRangeResponseMessage(rid uint64, accounts []*AccountData, proof [][]byte) *Message { + resp := MakeGetAccountRangeResponse(rid, accounts, proof) + return makeMessageFromResponse(resp) +} + +// MakeGetAccountRangeResponse make the GetAccountRangeResponse of Response type +func MakeGetAccountRangeResponse(rid uint64, accounts []*AccountData, proof [][]byte) *Response { + return &Response{ + ReqId: rid, + Response: &Response_GetAccountRangeResponse{ + GetAccountRangeResponse: &GetAccountRangeResponse{ + Accounts: accounts, + Proof: proof, + }, + }, + } +} + +// MakeGetStorageRangesResponseMessage makes the GetStorageRangesResponse of Message type +func MakeGetStorageRangesResponseMessage(rid uint64, slots []*StoragesData, proof [][]byte) *Message { + resp := MakeGetStorageRangesResponse(rid, slots, proof) + return makeMessageFromResponse(resp) +} + +// MakeGetStorageRangesResponse make the GetStorageRangesResponse of Response type +func MakeGetStorageRangesResponse(rid uint64, slots []*StoragesData, proof [][]byte) *Response { + return &Response{ + ReqId: rid, + Response: &Response_GetStorageRangesResponse{ + GetStorageRangesResponse: &GetStorageRangesResponse{ + Slots: slots, + Proof: proof, + }, + }, + } +} + +// MakeGetByteCodesResponseMessage makes the GetByteCodesResponse of Message type +func MakeGetByteCodesResponseMessage(rid uint64, codes [][]byte) *Message { + resp := MakeGetByteCodesResponse(rid, codes) + return makeMessageFromResponse(resp) +} + +// MakeGetByteCodesResponse make the GetByteCodesResponse of Response type +func MakeGetByteCodesResponse(rid uint64, codes [][]byte) *Response { + return &Response{ + ReqId: rid, + Response: &Response_GetByteCodesResponse{ + GetByteCodesResponse: &GetByteCodesResponse{ + Codes: codes, + }, + }, + } +} + +// MakeGetTrieNodesResponseMessage makes the GetTrieNodesResponse of Message type +func MakeGetTrieNodesResponseMessage(rid uint64, nodes [][]byte) *Message { + resp := MakeGetTrieNodesResponse(rid, nodes) + return makeMessageFromResponse(resp) +} + +// MakeGetTrieNodesResponse make the GetTrieNodesResponse of Response type +func MakeGetTrieNodesResponse(rid uint64, nodes [][]byte) *Response { + return &Response{ + ReqId: rid, + Response: &Response_GetTrieNodesResponse{ + GetTrieNodesResponse: &GetTrieNodesResponse{ + Nodes: nodes, + }, + }, + } +} + // MakeMessageFromRequest makes a message from the request func MakeMessageFromRequest(req *Request) *Message { return &Message{ diff --git a/p2p/stream/protocols/sync/message/msg.pb.go b/p2p/stream/protocols/sync/message/msg.pb.go index be7a95995f..db37f7c7da 100644 --- a/p2p/stream/protocols/sync/message/msg.pb.go +++ b/p2p/stream/protocols/sync/message/msg.pb.go @@ -115,6 +115,10 @@ type Request struct { // *Request_GetBlocksByHashesRequest // *Request_GetNodeDataRequest // *Request_GetReceiptsRequest + // *Request_GetAccountRangeRequest + // *Request_GetStorageRangesRequest + // *Request_GetByteCodesRequest + // *Request_GetTrieNodesRequest Request isRequest_Request `protobuf_oneof:"request"` } @@ -206,6 +210,34 @@ func (x *Request) GetGetReceiptsRequest() *GetReceiptsRequest { return nil } +func (x *Request) GetGetAccountRangeRequest() *GetAccountRangeRequest { + if x, ok := x.GetRequest().(*Request_GetAccountRangeRequest); ok { + return x.GetAccountRangeRequest + } + return nil +} + +func (x *Request) GetGetStorageRangesRequest() *GetStorageRangesRequest { + if x, ok := x.GetRequest().(*Request_GetStorageRangesRequest); ok { + return x.GetStorageRangesRequest + } + return nil +} + +func (x *Request) GetGetByteCodesRequest() *GetByteCodesRequest { + if x, ok := x.GetRequest().(*Request_GetByteCodesRequest); ok { + return x.GetByteCodesRequest + } + return nil +} + +func (x *Request) GetGetTrieNodesRequest() *GetTrieNodesRequest { + if x, ok := x.GetRequest().(*Request_GetTrieNodesRequest); ok { + return x.GetTrieNodesRequest + } + return nil +} + type isRequest_Request interface { isRequest_Request() } @@ -234,6 +266,22 @@ type Request_GetReceiptsRequest struct { GetReceiptsRequest *GetReceiptsRequest `protobuf:"bytes,7,opt,name=get_receipts_request,json=getReceiptsRequest,proto3,oneof"` } +type Request_GetAccountRangeRequest struct { + GetAccountRangeRequest *GetAccountRangeRequest `protobuf:"bytes,8,opt,name=get_account_range_request,json=getAccountRangeRequest,proto3,oneof"` +} + +type Request_GetStorageRangesRequest struct { + GetStorageRangesRequest *GetStorageRangesRequest `protobuf:"bytes,9,opt,name=get_storage_ranges_request,json=getStorageRangesRequest,proto3,oneof"` +} + +type Request_GetByteCodesRequest struct { + GetByteCodesRequest *GetByteCodesRequest `protobuf:"bytes,10,opt,name=get_byte_codes_request,json=getByteCodesRequest,proto3,oneof"` +} + +type Request_GetTrieNodesRequest struct { + GetTrieNodesRequest *GetTrieNodesRequest `protobuf:"bytes,11,opt,name=get_trie_nodes_request,json=getTrieNodesRequest,proto3,oneof"` +} + func (*Request_GetBlockNumberRequest) isRequest_Request() {} func (*Request_GetBlockHashesRequest) isRequest_Request() {} @@ -246,6 +294,14 @@ func (*Request_GetNodeDataRequest) isRequest_Request() {} func (*Request_GetReceiptsRequest) isRequest_Request() {} +func (*Request_GetAccountRangeRequest) isRequest_Request() {} + +func (*Request_GetStorageRangesRequest) isRequest_Request() {} + +func (*Request_GetByteCodesRequest) isRequest_Request() {} + +func (*Request_GetTrieNodesRequest) isRequest_Request() {} + type GetBlockNumberRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -519,26 +575,19 @@ func (x *GetReceiptsRequest) GetBlockHashes() [][]byte { return nil } -type Response struct { +type GetAccountRangeRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ReqId uint64 `protobuf:"varint,1,opt,name=req_id,json=reqId,proto3" json:"req_id,omitempty"` - // Types that are assignable to Response: - // - // *Response_ErrorResponse - // *Response_GetBlockNumberResponse - // *Response_GetBlockHashesResponse - // *Response_GetBlocksByNumResponse - // *Response_GetBlocksByHashesResponse - // *Response_GetNodeDataResponse - // *Response_GetReceiptsResponse - Response isResponse_Response `protobuf_oneof:"response"` + Root []byte `protobuf:"bytes,1,opt,name=root,proto3" json:"root,omitempty"` + Origin []byte `protobuf:"bytes,2,opt,name=origin,proto3" json:"origin,omitempty"` + Limit []byte `protobuf:"bytes,3,opt,name=limit,proto3" json:"limit,omitempty"` + Bytes uint64 `protobuf:"varint,4,opt,name=bytes,proto3" json:"bytes,omitempty"` } -func (x *Response) Reset() { - *x = Response{} +func (x *GetAccountRangeRequest) Reset() { + *x = GetAccountRangeRequest{} if protoimpl.UnsafeEnabled { mi := &file_msg_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -546,13 +595,13 @@ func (x *Response) Reset() { } } -func (x *Response) String() string { +func (x *GetAccountRangeRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*Response) ProtoMessage() {} +func (*GetAccountRangeRequest) ProtoMessage() {} -func (x *Response) ProtoReflect() protoreflect.Message { +func (x *GetAccountRangeRequest) ProtoReflect() protoreflect.Message { mi := &file_msg_proto_msgTypes[8] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -564,130 +613,53 @@ func (x *Response) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use Response.ProtoReflect.Descriptor instead. -func (*Response) Descriptor() ([]byte, []int) { +// Deprecated: Use GetAccountRangeRequest.ProtoReflect.Descriptor instead. +func (*GetAccountRangeRequest) Descriptor() ([]byte, []int) { return file_msg_proto_rawDescGZIP(), []int{8} } -func (x *Response) GetReqId() uint64 { +func (x *GetAccountRangeRequest) GetRoot() []byte { if x != nil { - return x.ReqId - } - return 0 -} - -func (m *Response) GetResponse() isResponse_Response { - if m != nil { - return m.Response - } - return nil -} - -func (x *Response) GetErrorResponse() *ErrorResponse { - if x, ok := x.GetResponse().(*Response_ErrorResponse); ok { - return x.ErrorResponse - } - return nil -} - -func (x *Response) GetGetBlockNumberResponse() *GetBlockNumberResponse { - if x, ok := x.GetResponse().(*Response_GetBlockNumberResponse); ok { - return x.GetBlockNumberResponse - } - return nil -} - -func (x *Response) GetGetBlockHashesResponse() *GetBlockHashesResponse { - if x, ok := x.GetResponse().(*Response_GetBlockHashesResponse); ok { - return x.GetBlockHashesResponse - } - return nil -} - -func (x *Response) GetGetBlocksByNumResponse() *GetBlocksByNumResponse { - if x, ok := x.GetResponse().(*Response_GetBlocksByNumResponse); ok { - return x.GetBlocksByNumResponse + return x.Root } return nil } -func (x *Response) GetGetBlocksByHashesResponse() *GetBlocksByHashesResponse { - if x, ok := x.GetResponse().(*Response_GetBlocksByHashesResponse); ok { - return x.GetBlocksByHashesResponse +func (x *GetAccountRangeRequest) GetOrigin() []byte { + if x != nil { + return x.Origin } return nil } -func (x *Response) GetGetNodeDataResponse() *GetNodeDataResponse { - if x, ok := x.GetResponse().(*Response_GetNodeDataResponse); ok { - return x.GetNodeDataResponse +func (x *GetAccountRangeRequest) GetLimit() []byte { + if x != nil { + return x.Limit } return nil } -func (x *Response) GetGetReceiptsResponse() *GetReceiptsResponse { - if x, ok := x.GetResponse().(*Response_GetReceiptsResponse); ok { - return x.GetReceiptsResponse +func (x *GetAccountRangeRequest) GetBytes() uint64 { + if x != nil { + return x.Bytes } - return nil -} - -type isResponse_Response interface { - isResponse_Response() -} - -type Response_ErrorResponse struct { - ErrorResponse *ErrorResponse `protobuf:"bytes,2,opt,name=error_response,json=errorResponse,proto3,oneof"` -} - -type Response_GetBlockNumberResponse struct { - GetBlockNumberResponse *GetBlockNumberResponse `protobuf:"bytes,3,opt,name=get_block_number_response,json=getBlockNumberResponse,proto3,oneof"` -} - -type Response_GetBlockHashesResponse struct { - GetBlockHashesResponse *GetBlockHashesResponse `protobuf:"bytes,4,opt,name=get_block_hashes_response,json=getBlockHashesResponse,proto3,oneof"` -} - -type Response_GetBlocksByNumResponse struct { - GetBlocksByNumResponse *GetBlocksByNumResponse `protobuf:"bytes,5,opt,name=get_blocks_by_num_response,json=getBlocksByNumResponse,proto3,oneof"` -} - -type Response_GetBlocksByHashesResponse struct { - GetBlocksByHashesResponse *GetBlocksByHashesResponse `protobuf:"bytes,6,opt,name=get_blocks_by_hashes_response,json=getBlocksByHashesResponse,proto3,oneof"` -} - -type Response_GetNodeDataResponse struct { - GetNodeDataResponse *GetNodeDataResponse `protobuf:"bytes,7,opt,name=get_node_data_response,json=getNodeDataResponse,proto3,oneof"` -} - -type Response_GetReceiptsResponse struct { - GetReceiptsResponse *GetReceiptsResponse `protobuf:"bytes,8,opt,name=get_receipts_response,json=getReceiptsResponse,proto3,oneof"` + return 0 } -func (*Response_ErrorResponse) isResponse_Response() {} - -func (*Response_GetBlockNumberResponse) isResponse_Response() {} - -func (*Response_GetBlockHashesResponse) isResponse_Response() {} - -func (*Response_GetBlocksByNumResponse) isResponse_Response() {} - -func (*Response_GetBlocksByHashesResponse) isResponse_Response() {} - -func (*Response_GetNodeDataResponse) isResponse_Response() {} - -func (*Response_GetReceiptsResponse) isResponse_Response() {} - -type ErrorResponse struct { +type GetStorageRangesRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` + Root []byte `protobuf:"bytes,1,opt,name=root,proto3" json:"root,omitempty"` + Accounts [][]byte `protobuf:"bytes,2,rep,name=accounts,proto3" json:"accounts,omitempty"` + Origin []byte `protobuf:"bytes,3,opt,name=origin,proto3" json:"origin,omitempty"` + Limit []byte `protobuf:"bytes,4,opt,name=limit,proto3" json:"limit,omitempty"` + Bytes uint64 `protobuf:"varint,5,opt,name=bytes,proto3" json:"bytes,omitempty"` } -func (x *ErrorResponse) Reset() { - *x = ErrorResponse{} +func (x *GetStorageRangesRequest) Reset() { + *x = GetStorageRangesRequest{} if protoimpl.UnsafeEnabled { mi := &file_msg_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -695,13 +667,13 @@ func (x *ErrorResponse) Reset() { } } -func (x *ErrorResponse) String() string { +func (x *GetStorageRangesRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ErrorResponse) ProtoMessage() {} +func (*GetStorageRangesRequest) ProtoMessage() {} -func (x *ErrorResponse) ProtoReflect() protoreflect.Message { +func (x *GetStorageRangesRequest) ProtoReflect() protoreflect.Message { mi := &file_msg_proto_msgTypes[9] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -713,28 +685,57 @@ func (x *ErrorResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ErrorResponse.ProtoReflect.Descriptor instead. -func (*ErrorResponse) Descriptor() ([]byte, []int) { +// Deprecated: Use GetStorageRangesRequest.ProtoReflect.Descriptor instead. +func (*GetStorageRangesRequest) Descriptor() ([]byte, []int) { return file_msg_proto_rawDescGZIP(), []int{9} } -func (x *ErrorResponse) GetError() string { +func (x *GetStorageRangesRequest) GetRoot() []byte { if x != nil { - return x.Error + return x.Root } - return "" + return nil } -type GetBlockNumberResponse struct { +func (x *GetStorageRangesRequest) GetAccounts() [][]byte { + if x != nil { + return x.Accounts + } + return nil +} + +func (x *GetStorageRangesRequest) GetOrigin() []byte { + if x != nil { + return x.Origin + } + return nil +} + +func (x *GetStorageRangesRequest) GetLimit() []byte { + if x != nil { + return x.Limit + } + return nil +} + +func (x *GetStorageRangesRequest) GetBytes() uint64 { + if x != nil { + return x.Bytes + } + return 0 +} + +type GetByteCodesRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Number uint64 `protobuf:"varint,1,opt,name=number,proto3" json:"number,omitempty"` + Hashes [][]byte `protobuf:"bytes,1,rep,name=hashes,proto3" json:"hashes,omitempty"` + Bytes uint64 `protobuf:"varint,2,opt,name=bytes,proto3" json:"bytes,omitempty"` } -func (x *GetBlockNumberResponse) Reset() { - *x = GetBlockNumberResponse{} +func (x *GetByteCodesRequest) Reset() { + *x = GetByteCodesRequest{} if protoimpl.UnsafeEnabled { mi := &file_msg_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -742,13 +743,13 @@ func (x *GetBlockNumberResponse) Reset() { } } -func (x *GetBlockNumberResponse) String() string { +func (x *GetByteCodesRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetBlockNumberResponse) ProtoMessage() {} +func (*GetByteCodesRequest) ProtoMessage() {} -func (x *GetBlockNumberResponse) ProtoReflect() protoreflect.Message { +func (x *GetByteCodesRequest) ProtoReflect() protoreflect.Message { mi := &file_msg_proto_msgTypes[10] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -760,28 +761,35 @@ func (x *GetBlockNumberResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetBlockNumberResponse.ProtoReflect.Descriptor instead. -func (*GetBlockNumberResponse) Descriptor() ([]byte, []int) { +// Deprecated: Use GetByteCodesRequest.ProtoReflect.Descriptor instead. +func (*GetByteCodesRequest) Descriptor() ([]byte, []int) { return file_msg_proto_rawDescGZIP(), []int{10} } -func (x *GetBlockNumberResponse) GetNumber() uint64 { +func (x *GetByteCodesRequest) GetHashes() [][]byte { if x != nil { - return x.Number + return x.Hashes + } + return nil +} + +func (x *GetByteCodesRequest) GetBytes() uint64 { + if x != nil { + return x.Bytes } return 0 } -type GetBlockHashesResponse struct { +type TrieNodePathSet struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Hashes [][]byte `protobuf:"bytes,1,rep,name=hashes,proto3" json:"hashes,omitempty"` + Pathset [][]byte `protobuf:"bytes,1,rep,name=pathset,proto3" json:"pathset,omitempty"` } -func (x *GetBlockHashesResponse) Reset() { - *x = GetBlockHashesResponse{} +func (x *TrieNodePathSet) Reset() { + *x = TrieNodePathSet{} if protoimpl.UnsafeEnabled { mi := &file_msg_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -789,13 +797,13 @@ func (x *GetBlockHashesResponse) Reset() { } } -func (x *GetBlockHashesResponse) String() string { +func (x *TrieNodePathSet) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetBlockHashesResponse) ProtoMessage() {} +func (*TrieNodePathSet) ProtoMessage() {} -func (x *GetBlockHashesResponse) ProtoReflect() protoreflect.Message { +func (x *TrieNodePathSet) ProtoReflect() protoreflect.Message { mi := &file_msg_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -807,29 +815,30 @@ func (x *GetBlockHashesResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetBlockHashesResponse.ProtoReflect.Descriptor instead. -func (*GetBlockHashesResponse) Descriptor() ([]byte, []int) { +// Deprecated: Use TrieNodePathSet.ProtoReflect.Descriptor instead. +func (*TrieNodePathSet) Descriptor() ([]byte, []int) { return file_msg_proto_rawDescGZIP(), []int{11} } -func (x *GetBlockHashesResponse) GetHashes() [][]byte { +func (x *TrieNodePathSet) GetPathset() [][]byte { if x != nil { - return x.Hashes + return x.Pathset } return nil } -type GetBlocksByNumResponse struct { +type GetTrieNodesRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - BlocksBytes [][]byte `protobuf:"bytes,1,rep,name=blocks_bytes,json=blocksBytes,proto3" json:"blocks_bytes,omitempty"` - CommitSig [][]byte `protobuf:"bytes,2,rep,name=commit_sig,json=commitSig,proto3" json:"commit_sig,omitempty"` + Root []byte `protobuf:"bytes,1,opt,name=root,proto3" json:"root,omitempty"` + Paths []*TrieNodePathSet `protobuf:"bytes,2,rep,name=paths,proto3" json:"paths,omitempty"` + Bytes uint64 `protobuf:"varint,3,opt,name=bytes,proto3" json:"bytes,omitempty"` } -func (x *GetBlocksByNumResponse) Reset() { - *x = GetBlocksByNumResponse{} +func (x *GetTrieNodesRequest) Reset() { + *x = GetTrieNodesRequest{} if protoimpl.UnsafeEnabled { mi := &file_msg_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -837,13 +846,13 @@ func (x *GetBlocksByNumResponse) Reset() { } } -func (x *GetBlocksByNumResponse) String() string { +func (x *GetTrieNodesRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetBlocksByNumResponse) ProtoMessage() {} +func (*GetTrieNodesRequest) ProtoMessage() {} -func (x *GetBlocksByNumResponse) ProtoReflect() protoreflect.Message { +func (x *GetTrieNodesRequest) ProtoReflect() protoreflect.Message { mi := &file_msg_proto_msgTypes[12] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -855,36 +864,56 @@ func (x *GetBlocksByNumResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetBlocksByNumResponse.ProtoReflect.Descriptor instead. -func (*GetBlocksByNumResponse) Descriptor() ([]byte, []int) { +// Deprecated: Use GetTrieNodesRequest.ProtoReflect.Descriptor instead. +func (*GetTrieNodesRequest) Descriptor() ([]byte, []int) { return file_msg_proto_rawDescGZIP(), []int{12} } -func (x *GetBlocksByNumResponse) GetBlocksBytes() [][]byte { +func (x *GetTrieNodesRequest) GetRoot() []byte { if x != nil { - return x.BlocksBytes + return x.Root } return nil } -func (x *GetBlocksByNumResponse) GetCommitSig() [][]byte { +func (x *GetTrieNodesRequest) GetPaths() []*TrieNodePathSet { if x != nil { - return x.CommitSig + return x.Paths } return nil } -type GetBlocksByHashesResponse struct { +func (x *GetTrieNodesRequest) GetBytes() uint64 { + if x != nil { + return x.Bytes + } + return 0 +} + +type Response struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - BlocksBytes [][]byte `protobuf:"bytes,1,rep,name=blocks_bytes,json=blocksBytes,proto3" json:"blocks_bytes,omitempty"` - CommitSig [][]byte `protobuf:"bytes,2,rep,name=commit_sig,json=commitSig,proto3" json:"commit_sig,omitempty"` + ReqId uint64 `protobuf:"varint,1,opt,name=req_id,json=reqId,proto3" json:"req_id,omitempty"` + // Types that are assignable to Response: + // + // *Response_ErrorResponse + // *Response_GetBlockNumberResponse + // *Response_GetBlockHashesResponse + // *Response_GetBlocksByNumResponse + // *Response_GetBlocksByHashesResponse + // *Response_GetNodeDataResponse + // *Response_GetReceiptsResponse + // *Response_GetAccountRangeResponse + // *Response_GetStorageRangesResponse + // *Response_GetByteCodesResponse + // *Response_GetTrieNodesResponse + Response isResponse_Response `protobuf_oneof:"response"` } -func (x *GetBlocksByHashesResponse) Reset() { - *x = GetBlocksByHashesResponse{} +func (x *Response) Reset() { + *x = Response{} if protoimpl.UnsafeEnabled { mi := &file_msg_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -892,13 +921,13 @@ func (x *GetBlocksByHashesResponse) Reset() { } } -func (x *GetBlocksByHashesResponse) String() string { +func (x *Response) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetBlocksByHashesResponse) ProtoMessage() {} +func (*Response) ProtoMessage() {} -func (x *GetBlocksByHashesResponse) ProtoReflect() protoreflect.Message { +func (x *Response) ProtoReflect() protoreflect.Message { mi := &file_msg_proto_msgTypes[13] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -910,50 +939,754 @@ func (x *GetBlocksByHashesResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetBlocksByHashesResponse.ProtoReflect.Descriptor instead. -func (*GetBlocksByHashesResponse) Descriptor() ([]byte, []int) { +// Deprecated: Use Response.ProtoReflect.Descriptor instead. +func (*Response) Descriptor() ([]byte, []int) { return file_msg_proto_rawDescGZIP(), []int{13} } -func (x *GetBlocksByHashesResponse) GetBlocksBytes() [][]byte { +func (x *Response) GetReqId() uint64 { + if x != nil { + return x.ReqId + } + return 0 +} + +func (m *Response) GetResponse() isResponse_Response { + if m != nil { + return m.Response + } + return nil +} + +func (x *Response) GetErrorResponse() *ErrorResponse { + if x, ok := x.GetResponse().(*Response_ErrorResponse); ok { + return x.ErrorResponse + } + return nil +} + +func (x *Response) GetGetBlockNumberResponse() *GetBlockNumberResponse { + if x, ok := x.GetResponse().(*Response_GetBlockNumberResponse); ok { + return x.GetBlockNumberResponse + } + return nil +} + +func (x *Response) GetGetBlockHashesResponse() *GetBlockHashesResponse { + if x, ok := x.GetResponse().(*Response_GetBlockHashesResponse); ok { + return x.GetBlockHashesResponse + } + return nil +} + +func (x *Response) GetGetBlocksByNumResponse() *GetBlocksByNumResponse { + if x, ok := x.GetResponse().(*Response_GetBlocksByNumResponse); ok { + return x.GetBlocksByNumResponse + } + return nil +} + +func (x *Response) GetGetBlocksByHashesResponse() *GetBlocksByHashesResponse { + if x, ok := x.GetResponse().(*Response_GetBlocksByHashesResponse); ok { + return x.GetBlocksByHashesResponse + } + return nil +} + +func (x *Response) GetGetNodeDataResponse() *GetNodeDataResponse { + if x, ok := x.GetResponse().(*Response_GetNodeDataResponse); ok { + return x.GetNodeDataResponse + } + return nil +} + +func (x *Response) GetGetReceiptsResponse() *GetReceiptsResponse { + if x, ok := x.GetResponse().(*Response_GetReceiptsResponse); ok { + return x.GetReceiptsResponse + } + return nil +} + +func (x *Response) GetGetAccountRangeResponse() *GetAccountRangeResponse { + if x, ok := x.GetResponse().(*Response_GetAccountRangeResponse); ok { + return x.GetAccountRangeResponse + } + return nil +} + +func (x *Response) GetGetStorageRangesResponse() *GetStorageRangesResponse { + if x, ok := x.GetResponse().(*Response_GetStorageRangesResponse); ok { + return x.GetStorageRangesResponse + } + return nil +} + +func (x *Response) GetGetByteCodesResponse() *GetByteCodesResponse { + if x, ok := x.GetResponse().(*Response_GetByteCodesResponse); ok { + return x.GetByteCodesResponse + } + return nil +} + +func (x *Response) GetGetTrieNodesResponse() *GetTrieNodesResponse { + if x, ok := x.GetResponse().(*Response_GetTrieNodesResponse); ok { + return x.GetTrieNodesResponse + } + return nil +} + +type isResponse_Response interface { + isResponse_Response() +} + +type Response_ErrorResponse struct { + ErrorResponse *ErrorResponse `protobuf:"bytes,2,opt,name=error_response,json=errorResponse,proto3,oneof"` +} + +type Response_GetBlockNumberResponse struct { + GetBlockNumberResponse *GetBlockNumberResponse `protobuf:"bytes,3,opt,name=get_block_number_response,json=getBlockNumberResponse,proto3,oneof"` +} + +type Response_GetBlockHashesResponse struct { + GetBlockHashesResponse *GetBlockHashesResponse `protobuf:"bytes,4,opt,name=get_block_hashes_response,json=getBlockHashesResponse,proto3,oneof"` +} + +type Response_GetBlocksByNumResponse struct { + GetBlocksByNumResponse *GetBlocksByNumResponse `protobuf:"bytes,5,opt,name=get_blocks_by_num_response,json=getBlocksByNumResponse,proto3,oneof"` +} + +type Response_GetBlocksByHashesResponse struct { + GetBlocksByHashesResponse *GetBlocksByHashesResponse `protobuf:"bytes,6,opt,name=get_blocks_by_hashes_response,json=getBlocksByHashesResponse,proto3,oneof"` +} + +type Response_GetNodeDataResponse struct { + GetNodeDataResponse *GetNodeDataResponse `protobuf:"bytes,7,opt,name=get_node_data_response,json=getNodeDataResponse,proto3,oneof"` +} + +type Response_GetReceiptsResponse struct { + GetReceiptsResponse *GetReceiptsResponse `protobuf:"bytes,8,opt,name=get_receipts_response,json=getReceiptsResponse,proto3,oneof"` +} + +type Response_GetAccountRangeResponse struct { + GetAccountRangeResponse *GetAccountRangeResponse `protobuf:"bytes,9,opt,name=get_account_range_response,json=getAccountRangeResponse,proto3,oneof"` +} + +type Response_GetStorageRangesResponse struct { + GetStorageRangesResponse *GetStorageRangesResponse `protobuf:"bytes,10,opt,name=get_storage_ranges_response,json=getStorageRangesResponse,proto3,oneof"` +} + +type Response_GetByteCodesResponse struct { + GetByteCodesResponse *GetByteCodesResponse `protobuf:"bytes,11,opt,name=get_byte_codes_response,json=getByteCodesResponse,proto3,oneof"` +} + +type Response_GetTrieNodesResponse struct { + GetTrieNodesResponse *GetTrieNodesResponse `protobuf:"bytes,12,opt,name=get_trie_nodes_response,json=getTrieNodesResponse,proto3,oneof"` +} + +func (*Response_ErrorResponse) isResponse_Response() {} + +func (*Response_GetBlockNumberResponse) isResponse_Response() {} + +func (*Response_GetBlockHashesResponse) isResponse_Response() {} + +func (*Response_GetBlocksByNumResponse) isResponse_Response() {} + +func (*Response_GetBlocksByHashesResponse) isResponse_Response() {} + +func (*Response_GetNodeDataResponse) isResponse_Response() {} + +func (*Response_GetReceiptsResponse) isResponse_Response() {} + +func (*Response_GetAccountRangeResponse) isResponse_Response() {} + +func (*Response_GetStorageRangesResponse) isResponse_Response() {} + +func (*Response_GetByteCodesResponse) isResponse_Response() {} + +func (*Response_GetTrieNodesResponse) isResponse_Response() {} + +type ErrorResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *ErrorResponse) Reset() { + *x = ErrorResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_msg_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ErrorResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ErrorResponse) ProtoMessage() {} + +func (x *ErrorResponse) ProtoReflect() protoreflect.Message { + mi := &file_msg_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ErrorResponse.ProtoReflect.Descriptor instead. +func (*ErrorResponse) Descriptor() ([]byte, []int) { + return file_msg_proto_rawDescGZIP(), []int{14} +} + +func (x *ErrorResponse) GetError() string { + if x != nil { + return x.Error + } + return "" +} + +type GetBlockNumberResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Number uint64 `protobuf:"varint,1,opt,name=number,proto3" json:"number,omitempty"` +} + +func (x *GetBlockNumberResponse) Reset() { + *x = GetBlockNumberResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_msg_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetBlockNumberResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetBlockNumberResponse) ProtoMessage() {} + +func (x *GetBlockNumberResponse) ProtoReflect() protoreflect.Message { + mi := &file_msg_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetBlockNumberResponse.ProtoReflect.Descriptor instead. +func (*GetBlockNumberResponse) Descriptor() ([]byte, []int) { + return file_msg_proto_rawDescGZIP(), []int{15} +} + +func (x *GetBlockNumberResponse) GetNumber() uint64 { + if x != nil { + return x.Number + } + return 0 +} + +type GetBlockHashesResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Hashes [][]byte `protobuf:"bytes,1,rep,name=hashes,proto3" json:"hashes,omitempty"` +} + +func (x *GetBlockHashesResponse) Reset() { + *x = GetBlockHashesResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_msg_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetBlockHashesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetBlockHashesResponse) ProtoMessage() {} + +func (x *GetBlockHashesResponse) ProtoReflect() protoreflect.Message { + mi := &file_msg_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetBlockHashesResponse.ProtoReflect.Descriptor instead. +func (*GetBlockHashesResponse) Descriptor() ([]byte, []int) { + return file_msg_proto_rawDescGZIP(), []int{16} +} + +func (x *GetBlockHashesResponse) GetHashes() [][]byte { + if x != nil { + return x.Hashes + } + return nil +} + +type GetBlocksByNumResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + BlocksBytes [][]byte `protobuf:"bytes,1,rep,name=blocks_bytes,json=blocksBytes,proto3" json:"blocks_bytes,omitempty"` + CommitSig [][]byte `protobuf:"bytes,2,rep,name=commit_sig,json=commitSig,proto3" json:"commit_sig,omitempty"` +} + +func (x *GetBlocksByNumResponse) Reset() { + *x = GetBlocksByNumResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_msg_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetBlocksByNumResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetBlocksByNumResponse) ProtoMessage() {} + +func (x *GetBlocksByNumResponse) ProtoReflect() protoreflect.Message { + mi := &file_msg_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetBlocksByNumResponse.ProtoReflect.Descriptor instead. +func (*GetBlocksByNumResponse) Descriptor() ([]byte, []int) { + return file_msg_proto_rawDescGZIP(), []int{17} +} + +func (x *GetBlocksByNumResponse) GetBlocksBytes() [][]byte { + if x != nil { + return x.BlocksBytes + } + return nil +} + +func (x *GetBlocksByNumResponse) GetCommitSig() [][]byte { + if x != nil { + return x.CommitSig + } + return nil +} + +type GetBlocksByHashesResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + BlocksBytes [][]byte `protobuf:"bytes,1,rep,name=blocks_bytes,json=blocksBytes,proto3" json:"blocks_bytes,omitempty"` + CommitSig [][]byte `protobuf:"bytes,2,rep,name=commit_sig,json=commitSig,proto3" json:"commit_sig,omitempty"` +} + +func (x *GetBlocksByHashesResponse) Reset() { + *x = GetBlocksByHashesResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_msg_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetBlocksByHashesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetBlocksByHashesResponse) ProtoMessage() {} + +func (x *GetBlocksByHashesResponse) ProtoReflect() protoreflect.Message { + mi := &file_msg_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetBlocksByHashesResponse.ProtoReflect.Descriptor instead. +func (*GetBlocksByHashesResponse) Descriptor() ([]byte, []int) { + return file_msg_proto_rawDescGZIP(), []int{18} +} + +func (x *GetBlocksByHashesResponse) GetBlocksBytes() [][]byte { if x != nil { return x.BlocksBytes } return nil } -func (x *GetBlocksByHashesResponse) GetCommitSig() [][]byte { +func (x *GetBlocksByHashesResponse) GetCommitSig() [][]byte { + if x != nil { + return x.CommitSig + } + return nil +} + +type GetNodeDataResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + DataBytes [][]byte `protobuf:"bytes,1,rep,name=data_bytes,json=dataBytes,proto3" json:"data_bytes,omitempty"` +} + +func (x *GetNodeDataResponse) Reset() { + *x = GetNodeDataResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_msg_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetNodeDataResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetNodeDataResponse) ProtoMessage() {} + +func (x *GetNodeDataResponse) ProtoReflect() protoreflect.Message { + mi := &file_msg_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetNodeDataResponse.ProtoReflect.Descriptor instead. +func (*GetNodeDataResponse) Descriptor() ([]byte, []int) { + return file_msg_proto_rawDescGZIP(), []int{19} +} + +func (x *GetNodeDataResponse) GetDataBytes() [][]byte { + if x != nil { + return x.DataBytes + } + return nil +} + +type Receipts struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ReceiptBytes [][]byte `protobuf:"bytes,1,rep,name=receipt_bytes,json=receiptBytes,proto3" json:"receipt_bytes,omitempty"` +} + +func (x *Receipts) Reset() { + *x = Receipts{} + if protoimpl.UnsafeEnabled { + mi := &file_msg_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Receipts) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Receipts) ProtoMessage() {} + +func (x *Receipts) ProtoReflect() protoreflect.Message { + mi := &file_msg_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Receipts.ProtoReflect.Descriptor instead. +func (*Receipts) Descriptor() ([]byte, []int) { + return file_msg_proto_rawDescGZIP(), []int{20} +} + +func (x *Receipts) GetReceiptBytes() [][]byte { + if x != nil { + return x.ReceiptBytes + } + return nil +} + +type GetReceiptsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Receipts map[uint64]*Receipts `protobuf:"bytes,1,rep,name=receipts,proto3" json:"receipts,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *GetReceiptsResponse) Reset() { + *x = GetReceiptsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_msg_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetReceiptsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetReceiptsResponse) ProtoMessage() {} + +func (x *GetReceiptsResponse) ProtoReflect() protoreflect.Message { + mi := &file_msg_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetReceiptsResponse.ProtoReflect.Descriptor instead. +func (*GetReceiptsResponse) Descriptor() ([]byte, []int) { + return file_msg_proto_rawDescGZIP(), []int{21} +} + +func (x *GetReceiptsResponse) GetReceipts() map[uint64]*Receipts { + if x != nil { + return x.Receipts + } + return nil +} + +type AccountData struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Hash []byte `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"` + Body []byte `protobuf:"bytes,2,opt,name=body,proto3" json:"body,omitempty"` +} + +func (x *AccountData) Reset() { + *x = AccountData{} + if protoimpl.UnsafeEnabled { + mi := &file_msg_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AccountData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AccountData) ProtoMessage() {} + +func (x *AccountData) ProtoReflect() protoreflect.Message { + mi := &file_msg_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AccountData.ProtoReflect.Descriptor instead. +func (*AccountData) Descriptor() ([]byte, []int) { + return file_msg_proto_rawDescGZIP(), []int{22} +} + +func (x *AccountData) GetHash() []byte { + if x != nil { + return x.Hash + } + return nil +} + +func (x *AccountData) GetBody() []byte { + if x != nil { + return x.Body + } + return nil +} + +type GetAccountRangeResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Accounts []*AccountData `protobuf:"bytes,1,rep,name=accounts,proto3" json:"accounts,omitempty"` + Proof [][]byte `protobuf:"bytes,2,rep,name=proof,proto3" json:"proof,omitempty"` +} + +func (x *GetAccountRangeResponse) Reset() { + *x = GetAccountRangeResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_msg_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetAccountRangeResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetAccountRangeResponse) ProtoMessage() {} + +func (x *GetAccountRangeResponse) ProtoReflect() protoreflect.Message { + mi := &file_msg_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetAccountRangeResponse.ProtoReflect.Descriptor instead. +func (*GetAccountRangeResponse) Descriptor() ([]byte, []int) { + return file_msg_proto_rawDescGZIP(), []int{23} +} + +func (x *GetAccountRangeResponse) GetAccounts() []*AccountData { + if x != nil { + return x.Accounts + } + return nil +} + +func (x *GetAccountRangeResponse) GetProof() [][]byte { + if x != nil { + return x.Proof + } + return nil +} + +type StorageData struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Hash []byte `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"` + Body []byte `protobuf:"bytes,2,opt,name=body,proto3" json:"body,omitempty"` +} + +func (x *StorageData) Reset() { + *x = StorageData{} + if protoimpl.UnsafeEnabled { + mi := &file_msg_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StorageData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StorageData) ProtoMessage() {} + +func (x *StorageData) ProtoReflect() protoreflect.Message { + mi := &file_msg_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StorageData.ProtoReflect.Descriptor instead. +func (*StorageData) Descriptor() ([]byte, []int) { + return file_msg_proto_rawDescGZIP(), []int{24} +} + +func (x *StorageData) GetHash() []byte { + if x != nil { + return x.Hash + } + return nil +} + +func (x *StorageData) GetBody() []byte { if x != nil { - return x.CommitSig + return x.Body } return nil } -type GetNodeDataResponse struct { +type StoragesData struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - DataBytes [][]byte `protobuf:"bytes,1,rep,name=data_bytes,json=dataBytes,proto3" json:"data_bytes,omitempty"` + Data []*StorageData `protobuf:"bytes,1,rep,name=data,proto3" json:"data,omitempty"` } -func (x *GetNodeDataResponse) Reset() { - *x = GetNodeDataResponse{} +func (x *StoragesData) Reset() { + *x = StoragesData{} if protoimpl.UnsafeEnabled { - mi := &file_msg_proto_msgTypes[14] + mi := &file_msg_proto_msgTypes[25] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetNodeDataResponse) String() string { +func (x *StoragesData) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetNodeDataResponse) ProtoMessage() {} +func (*StoragesData) ProtoMessage() {} -func (x *GetNodeDataResponse) ProtoReflect() protoreflect.Message { - mi := &file_msg_proto_msgTypes[14] +func (x *StoragesData) ProtoReflect() protoreflect.Message { + mi := &file_msg_proto_msgTypes[25] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -964,43 +1697,44 @@ func (x *GetNodeDataResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetNodeDataResponse.ProtoReflect.Descriptor instead. -func (*GetNodeDataResponse) Descriptor() ([]byte, []int) { - return file_msg_proto_rawDescGZIP(), []int{14} +// Deprecated: Use StoragesData.ProtoReflect.Descriptor instead. +func (*StoragesData) Descriptor() ([]byte, []int) { + return file_msg_proto_rawDescGZIP(), []int{25} } -func (x *GetNodeDataResponse) GetDataBytes() [][]byte { +func (x *StoragesData) GetData() []*StorageData { if x != nil { - return x.DataBytes + return x.Data } return nil } -type Receipts struct { +type GetStorageRangesResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ReceiptBytes [][]byte `protobuf:"bytes,1,rep,name=receipt_bytes,json=receiptBytes,proto3" json:"receipt_bytes,omitempty"` + Slots []*StoragesData `protobuf:"bytes,1,rep,name=slots,proto3" json:"slots,omitempty"` + Proof [][]byte `protobuf:"bytes,2,rep,name=proof,proto3" json:"proof,omitempty"` } -func (x *Receipts) Reset() { - *x = Receipts{} +func (x *GetStorageRangesResponse) Reset() { + *x = GetStorageRangesResponse{} if protoimpl.UnsafeEnabled { - mi := &file_msg_proto_msgTypes[15] + mi := &file_msg_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *Receipts) String() string { +func (x *GetStorageRangesResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*Receipts) ProtoMessage() {} +func (*GetStorageRangesResponse) ProtoMessage() {} -func (x *Receipts) ProtoReflect() protoreflect.Message { - mi := &file_msg_proto_msgTypes[15] +func (x *GetStorageRangesResponse) ProtoReflect() protoreflect.Message { + mi := &file_msg_proto_msgTypes[26] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1011,43 +1745,50 @@ func (x *Receipts) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use Receipts.ProtoReflect.Descriptor instead. -func (*Receipts) Descriptor() ([]byte, []int) { - return file_msg_proto_rawDescGZIP(), []int{15} +// Deprecated: Use GetStorageRangesResponse.ProtoReflect.Descriptor instead. +func (*GetStorageRangesResponse) Descriptor() ([]byte, []int) { + return file_msg_proto_rawDescGZIP(), []int{26} } -func (x *Receipts) GetReceiptBytes() [][]byte { +func (x *GetStorageRangesResponse) GetSlots() []*StoragesData { if x != nil { - return x.ReceiptBytes + return x.Slots } return nil } -type GetReceiptsResponse struct { +func (x *GetStorageRangesResponse) GetProof() [][]byte { + if x != nil { + return x.Proof + } + return nil +} + +type GetByteCodesResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Receipts map[uint64]*Receipts `protobuf:"bytes,1,rep,name=receipts,proto3" json:"receipts,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Codes [][]byte `protobuf:"bytes,1,rep,name=codes,proto3" json:"codes,omitempty"` } -func (x *GetReceiptsResponse) Reset() { - *x = GetReceiptsResponse{} +func (x *GetByteCodesResponse) Reset() { + *x = GetByteCodesResponse{} if protoimpl.UnsafeEnabled { - mi := &file_msg_proto_msgTypes[16] + mi := &file_msg_proto_msgTypes[27] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetReceiptsResponse) String() string { +func (x *GetByteCodesResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetReceiptsResponse) ProtoMessage() {} +func (*GetByteCodesResponse) ProtoMessage() {} -func (x *GetReceiptsResponse) ProtoReflect() protoreflect.Message { - mi := &file_msg_proto_msgTypes[16] +func (x *GetByteCodesResponse) ProtoReflect() protoreflect.Message { + mi := &file_msg_proto_msgTypes[27] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1058,14 +1799,61 @@ func (x *GetReceiptsResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetReceiptsResponse.ProtoReflect.Descriptor instead. -func (*GetReceiptsResponse) Descriptor() ([]byte, []int) { - return file_msg_proto_rawDescGZIP(), []int{16} +// Deprecated: Use GetByteCodesResponse.ProtoReflect.Descriptor instead. +func (*GetByteCodesResponse) Descriptor() ([]byte, []int) { + return file_msg_proto_rawDescGZIP(), []int{27} } -func (x *GetReceiptsResponse) GetReceipts() map[uint64]*Receipts { +func (x *GetByteCodesResponse) GetCodes() [][]byte { if x != nil { - return x.Receipts + return x.Codes + } + return nil +} + +type GetTrieNodesResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Nodes [][]byte `protobuf:"bytes,1,rep,name=nodes,proto3" json:"nodes,omitempty"` +} + +func (x *GetTrieNodesResponse) Reset() { + *x = GetTrieNodesResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_msg_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetTrieNodesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetTrieNodesResponse) ProtoMessage() {} + +func (x *GetTrieNodesResponse) ProtoReflect() protoreflect.Message { + mi := &file_msg_proto_msgTypes[28] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetTrieNodesResponse.ProtoReflect.Descriptor instead. +func (*GetTrieNodesResponse) Descriptor() ([]byte, []int) { + return file_msg_proto_rawDescGZIP(), []int{28} +} + +func (x *GetTrieNodesResponse) GetNodes() [][]byte { + if x != nil { + return x.Nodes } return nil } @@ -1084,7 +1872,7 @@ var file_msg_proto_rawDesc = []byte{ 0x61, 0x72, 0x6d, 0x6f, 0x6e, 0x79, 0x2e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x04, 0x72, 0x65, 0x73, 0x70, 0x42, 0x0d, 0x0a, 0x0b, 0x72, - 0x65, 0x71, 0x5f, 0x6f, 0x72, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x22, 0xbd, 0x05, 0x0a, 0x07, 0x52, + 0x65, 0x71, 0x5f, 0x6f, 0x72, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x22, 0xf6, 0x08, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x15, 0x0a, 0x06, 0x72, 0x65, 0x71, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x72, 0x65, 0x71, 0x49, 0x64, 0x12, 0x6d, 0x0a, 0x18, 0x67, 0x65, 0x74, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, @@ -1127,119 +1915,239 @@ var file_msg_proto_rawDesc = []byte{ 0x6e, 0x79, 0x2e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x63, 0x65, 0x69, 0x70, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x12, 0x67, 0x65, 0x74, - 0x52, 0x65, 0x63, 0x65, 0x69, 0x70, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, - 0x09, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x17, 0x0a, 0x15, 0x47, 0x65, - 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x22, 0x2f, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48, - 0x61, 0x73, 0x68, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x04, - 0x6e, 0x75, 0x6d, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x04, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, - 0x6e, 0x75, 0x6d, 0x73, 0x22, 0x2f, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, - 0x73, 0x42, 0x79, 0x4e, 0x75, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, - 0x04, 0x6e, 0x75, 0x6d, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x04, 0x42, 0x02, 0x10, 0x01, 0x52, - 0x04, 0x6e, 0x75, 0x6d, 0x73, 0x22, 0x3d, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, - 0x6b, 0x73, 0x42, 0x79, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x65, - 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, - 0x73, 0x68, 0x65, 0x73, 0x22, 0x35, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x44, - 0x61, 0x74, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x6e, 0x6f, - 0x64, 0x65, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, - 0x0a, 0x6e, 0x6f, 0x64, 0x65, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, 0x22, 0x37, 0x0a, 0x12, 0x47, - 0x65, 0x74, 0x52, 0x65, 0x63, 0x65, 0x69, 0x70, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x65, - 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, - 0x73, 0x68, 0x65, 0x73, 0x22, 0xa6, 0x06, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x15, 0x0a, 0x06, 0x72, 0x65, 0x71, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x04, 0x52, 0x05, 0x72, 0x65, 0x71, 0x49, 0x64, 0x12, 0x53, 0x0a, 0x0e, 0x65, 0x72, 0x72, 0x6f, - 0x72, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x2a, 0x2e, 0x68, 0x61, 0x72, 0x6d, 0x6f, 0x6e, 0x79, 0x2e, 0x73, 0x74, 0x72, 0x65, 0x61, - 0x6d, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x45, - 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x0d, - 0x65, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x70, 0x0a, - 0x19, 0x67, 0x65, 0x74, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, - 0x72, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x33, 0x2e, 0x68, 0x61, 0x72, 0x6d, 0x6f, 0x6e, 0x79, 0x2e, 0x73, 0x74, 0x72, 0x65, 0x61, - 0x6d, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x47, - 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x16, 0x67, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, - 0x6b, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x70, 0x0a, 0x19, 0x67, 0x65, 0x74, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x61, 0x73, - 0x68, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x04, 0x20, 0x01, + 0x52, 0x65, 0x63, 0x65, 0x69, 0x70, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x70, 0x0a, 0x19, 0x67, 0x65, 0x74, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x72, + 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x68, 0x61, 0x72, 0x6d, 0x6f, 0x6e, 0x79, 0x2e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x2e, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x16, 0x67, 0x65, 0x74, 0x42, 0x6c, - 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x71, 0x0a, 0x1a, 0x67, 0x65, 0x74, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x5f, - 0x62, 0x79, 0x5f, 0x6e, 0x75, 0x6d, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x68, 0x61, 0x72, 0x6d, 0x6f, 0x6e, 0x79, 0x2e, + 0x2e, 0x47, 0x65, 0x74, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x16, 0x67, 0x65, 0x74, 0x41, 0x63, + 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x73, 0x0a, 0x1a, 0x67, 0x65, 0x74, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, + 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x68, 0x61, 0x72, 0x6d, 0x6f, 0x6e, 0x79, 0x2e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x6d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x42, 0x79, 0x4e, - 0x75, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x16, 0x67, 0x65, - 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x42, 0x79, 0x4e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7a, 0x0a, 0x1d, 0x67, 0x65, 0x74, 0x5f, 0x62, 0x6c, 0x6f, 0x63, - 0x6b, 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x68, 0x61, - 0x72, 0x6d, 0x6f, 0x6e, 0x79, 0x2e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x73, 0x79, 0x6e, - 0x63, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, + 0x61, 0x67, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x52, 0x61, + 0x6e, 0x67, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x17, 0x67, + 0x65, 0x74, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x67, 0x0a, 0x16, 0x67, 0x65, 0x74, 0x5f, 0x62, 0x79, + 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x68, 0x61, 0x72, 0x6d, 0x6f, 0x6e, 0x79, + 0x2e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x6d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x79, 0x74, 0x65, 0x43, 0x6f, 0x64, 0x65, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x13, 0x67, 0x65, 0x74, 0x42, + 0x79, 0x74, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x67, 0x0a, 0x16, 0x67, 0x65, 0x74, 0x5f, 0x74, 0x72, 0x69, 0x65, 0x5f, 0x6e, 0x6f, 0x64, 0x65, + 0x73, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x30, 0x2e, 0x68, 0x61, 0x72, 0x6d, 0x6f, 0x6e, 0x79, 0x2e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x47, 0x65, + 0x74, 0x54, 0x72, 0x69, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x48, 0x00, 0x52, 0x13, 0x67, 0x65, 0x74, 0x54, 0x72, 0x69, 0x65, 0x4e, 0x6f, 0x64, 0x65, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x09, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x22, 0x17, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, + 0x75, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x2f, 0x0a, 0x15, + 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x04, 0x6e, 0x75, 0x6d, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x04, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x75, 0x6d, 0x73, 0x22, 0x2f, 0x0a, + 0x15, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x42, 0x79, 0x4e, 0x75, 0x6d, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x04, 0x6e, 0x75, 0x6d, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x04, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x75, 0x6d, 0x73, 0x22, 0x3d, + 0x0a, 0x18, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x42, 0x79, 0x48, 0x61, 0x73, + 0x68, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x6c, + 0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, + 0x52, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, 0x22, 0x35, 0x0a, + 0x12, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x68, 0x61, 0x73, 0x68, + 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0a, 0x6e, 0x6f, 0x64, 0x65, 0x48, 0x61, + 0x73, 0x68, 0x65, 0x73, 0x22, 0x37, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x52, 0x65, 0x63, 0x65, 0x69, + 0x70, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x6c, + 0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, + 0x52, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, 0x22, 0x70, 0x0a, + 0x16, 0x47, 0x65, 0x74, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6f, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x72, 0x6f, 0x6f, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6f, + 0x72, 0x69, 0x67, 0x69, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x6f, 0x72, 0x69, + 0x67, 0x69, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74, + 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x22, + 0x8d, 0x01, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x52, 0x61, + 0x6e, 0x67, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x72, + 0x6f, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x72, 0x6f, 0x6f, 0x74, 0x12, + 0x1a, 0x0a, 0x08, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0c, 0x52, 0x08, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x6f, + 0x72, 0x69, 0x67, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x6f, 0x72, 0x69, + 0x67, 0x69, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74, + 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x22, + 0x43, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x42, 0x79, 0x74, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x61, 0x73, 0x68, 0x65, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x06, 0x68, 0x61, 0x73, 0x68, 0x65, 0x73, 0x12, 0x14, + 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x62, + 0x79, 0x74, 0x65, 0x73, 0x22, 0x2b, 0x0a, 0x0f, 0x54, 0x72, 0x69, 0x65, 0x4e, 0x6f, 0x64, 0x65, + 0x50, 0x61, 0x74, 0x68, 0x53, 0x65, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x68, 0x73, + 0x65, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x61, 0x74, 0x68, 0x73, 0x65, + 0x74, 0x22, 0x83, 0x01, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x54, 0x72, 0x69, 0x65, 0x4e, 0x6f, 0x64, + 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6f, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x72, 0x6f, 0x6f, 0x74, 0x12, 0x42, 0x0a, + 0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x68, + 0x61, 0x72, 0x6d, 0x6f, 0x6e, 0x79, 0x2e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x73, 0x79, + 0x6e, 0x63, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x54, 0x72, 0x69, 0x65, 0x4e, + 0x6f, 0x64, 0x65, 0x50, 0x61, 0x74, 0x68, 0x53, 0x65, 0x74, 0x52, 0x05, 0x70, 0x61, 0x74, 0x68, + 0x73, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x22, 0xeb, 0x09, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x15, 0x0a, 0x06, 0x72, 0x65, 0x71, 0x5f, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x72, 0x65, 0x71, 0x49, 0x64, 0x12, 0x53, 0x0a, 0x0e, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x68, 0x61, 0x72, 0x6d, 0x6f, 0x6e, 0x79, 0x2e, 0x73, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, + 0x00, 0x52, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x70, 0x0a, 0x19, 0x67, 0x65, 0x74, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6e, 0x75, + 0x6d, 0x62, 0x65, 0x72, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x68, 0x61, 0x72, 0x6d, 0x6f, 0x6e, 0x79, 0x2e, 0x73, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x16, 0x67, 0x65, 0x74, 0x42, + 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x70, 0x0a, 0x19, 0x67, 0x65, 0x74, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, + 0x68, 0x61, 0x73, 0x68, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x68, 0x61, 0x72, 0x6d, 0x6f, 0x6e, 0x79, 0x2e, + 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x6d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, + 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x16, 0x67, 0x65, + 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x71, 0x0a, 0x1a, 0x67, 0x65, 0x74, 0x5f, 0x62, 0x6c, 0x6f, 0x63, + 0x6b, 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x6e, 0x75, 0x6d, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x68, 0x61, 0x72, 0x6d, 0x6f, + 0x6e, 0x79, 0x2e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, + 0x42, 0x79, 0x4e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, + 0x16, 0x67, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x42, 0x79, 0x4e, 0x75, 0x6d, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7a, 0x0a, 0x1d, 0x67, 0x65, 0x74, 0x5f, 0x62, + 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x65, 0x73, 0x5f, + 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x36, + 0x2e, 0x68, 0x61, 0x72, 0x6d, 0x6f, 0x6e, 0x79, 0x2e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, + 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x47, 0x65, 0x74, + 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x42, 0x79, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x19, 0x67, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x42, 0x79, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x19, 0x67, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, - 0x42, 0x79, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x67, 0x0a, 0x16, 0x67, 0x65, 0x74, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x64, 0x61, 0x74, - 0x61, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x30, 0x2e, 0x68, 0x61, 0x72, 0x6d, 0x6f, 0x6e, 0x79, 0x2e, 0x73, 0x74, 0x72, 0x65, 0x61, - 0x6d, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x47, + 0x6e, 0x73, 0x65, 0x12, 0x67, 0x0a, 0x16, 0x67, 0x65, 0x74, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x5f, + 0x64, 0x61, 0x74, 0x61, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x68, 0x61, 0x72, 0x6d, 0x6f, 0x6e, 0x79, 0x2e, 0x73, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x13, 0x67, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, + 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x66, 0x0a, 0x15, + 0x67, 0x65, 0x74, 0x5f, 0x72, 0x65, 0x63, 0x65, 0x69, 0x70, 0x74, 0x73, 0x5f, 0x72, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x68, 0x61, + 0x72, 0x6d, 0x6f, 0x6e, 0x79, 0x2e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x73, 0x79, 0x6e, + 0x63, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x63, + 0x65, 0x69, 0x70, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, + 0x13, 0x67, 0x65, 0x74, 0x52, 0x65, 0x63, 0x65, 0x69, 0x70, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x73, 0x0a, 0x1a, 0x67, 0x65, 0x74, 0x5f, 0x61, 0x63, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x68, 0x61, 0x72, 0x6d, 0x6f, + 0x6e, 0x79, 0x2e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, + 0x52, 0x17, 0x67, 0x65, 0x74, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x61, 0x6e, 0x67, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x76, 0x0a, 0x1b, 0x67, 0x65, 0x74, + 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x5f, + 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, + 0x2e, 0x68, 0x61, 0x72, 0x6d, 0x6f, 0x6e, 0x79, 0x2e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, + 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x47, 0x65, 0x74, + 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x18, 0x67, 0x65, 0x74, 0x53, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x6a, 0x0a, 0x17, 0x67, 0x65, 0x74, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x5f, 0x63, 0x6f, + 0x64, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x0b, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x68, 0x61, 0x72, 0x6d, 0x6f, 0x6e, 0x79, 0x2e, 0x73, 0x74, 0x72, + 0x65, 0x61, 0x6d, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x2e, 0x47, 0x65, 0x74, 0x42, 0x79, 0x74, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x14, 0x67, 0x65, 0x74, 0x42, 0x79, 0x74, 0x65, + 0x43, 0x6f, 0x64, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6a, 0x0a, + 0x17, 0x67, 0x65, 0x74, 0x5f, 0x74, 0x72, 0x69, 0x65, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x5f, + 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, + 0x2e, 0x68, 0x61, 0x72, 0x6d, 0x6f, 0x6e, 0x79, 0x2e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, + 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x47, 0x65, 0x74, + 0x54, 0x72, 0x69, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x48, 0x00, 0x52, 0x14, 0x67, 0x65, 0x74, 0x54, 0x72, 0x69, 0x65, 0x4e, 0x6f, 0x64, 0x65, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x0a, 0x0a, 0x08, 0x72, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x25, 0x0a, 0x0d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x30, 0x0a, 0x16, + 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0x30, + 0x0a, 0x16, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x61, 0x73, 0x68, + 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x06, 0x68, 0x61, 0x73, 0x68, 0x65, 0x73, + 0x22, 0x5a, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x42, 0x79, 0x4e, + 0x75, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x6c, + 0x6f, 0x63, 0x6b, 0x73, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, + 0x52, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x1d, 0x0a, + 0x0a, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x5f, 0x73, 0x69, 0x67, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0c, 0x52, 0x09, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x53, 0x69, 0x67, 0x22, 0x5d, 0x0a, 0x19, + 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x42, 0x79, 0x48, 0x61, 0x73, 0x68, 0x65, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x6c, 0x6f, + 0x63, 0x6b, 0x73, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, + 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x1d, 0x0a, 0x0a, + 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x5f, 0x73, 0x69, 0x67, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0c, + 0x52, 0x09, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x53, 0x69, 0x67, 0x22, 0x34, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x48, 0x00, 0x52, 0x13, 0x67, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x44, 0x61, 0x74, - 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x66, 0x0a, 0x15, 0x67, 0x65, 0x74, - 0x5f, 0x72, 0x65, 0x63, 0x65, 0x69, 0x70, 0x74, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x68, 0x61, 0x72, 0x6d, 0x6f, + 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x09, 0x64, 0x61, 0x74, 0x61, 0x42, 0x79, 0x74, 0x65, + 0x73, 0x22, 0x2f, 0x0a, 0x08, 0x52, 0x65, 0x63, 0x65, 0x69, 0x70, 0x74, 0x73, 0x12, 0x23, 0x0a, + 0x0d, 0x72, 0x65, 0x63, 0x65, 0x69, 0x70, 0x74, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0c, 0x72, 0x65, 0x63, 0x65, 0x69, 0x70, 0x74, 0x42, 0x79, 0x74, + 0x65, 0x73, 0x22, 0xd5, 0x01, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x52, 0x65, 0x63, 0x65, 0x69, 0x70, + 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5a, 0x0a, 0x08, 0x72, 0x65, + 0x63, 0x65, 0x69, 0x70, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x68, + 0x61, 0x72, 0x6d, 0x6f, 0x6e, 0x79, 0x2e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x73, 0x79, + 0x6e, 0x63, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65, + 0x63, 0x65, 0x69, 0x70, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, + 0x65, 0x63, 0x65, 0x69, 0x70, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x72, 0x65, + 0x63, 0x65, 0x69, 0x70, 0x74, 0x73, 0x1a, 0x62, 0x0a, 0x0d, 0x52, 0x65, 0x63, 0x65, 0x69, 0x70, + 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x3b, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x68, 0x61, 0x72, 0x6d, 0x6f, 0x6e, 0x79, 0x2e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x6d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x63, 0x65, 0x69, 0x70, - 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x13, 0x67, 0x65, - 0x74, 0x52, 0x65, 0x63, 0x65, 0x69, 0x70, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x42, 0x0a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x25, 0x0a, - 0x0d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, - 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, - 0x72, 0x72, 0x6f, 0x72, 0x22, 0x30, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, - 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, - 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, - 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0x30, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, - 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x16, 0x0a, 0x06, 0x68, 0x61, 0x73, 0x68, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, - 0x52, 0x06, 0x68, 0x61, 0x73, 0x68, 0x65, 0x73, 0x22, 0x5a, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x42, - 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x42, 0x79, 0x4e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x5f, 0x62, 0x79, 0x74, - 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, - 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x5f, - 0x73, 0x69, 0x67, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x09, 0x63, 0x6f, 0x6d, 0x6d, 0x69, - 0x74, 0x53, 0x69, 0x67, 0x22, 0x5d, 0x0a, 0x19, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, - 0x73, 0x42, 0x79, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x5f, 0x62, 0x79, 0x74, 0x65, - 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x42, - 0x79, 0x74, 0x65, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x5f, 0x73, - 0x69, 0x67, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x09, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, - 0x53, 0x69, 0x67, 0x22, 0x34, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x44, 0x61, - 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x64, 0x61, - 0x74, 0x61, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x09, - 0x64, 0x61, 0x74, 0x61, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x2f, 0x0a, 0x08, 0x52, 0x65, 0x63, - 0x65, 0x69, 0x70, 0x74, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x63, 0x65, 0x69, 0x70, 0x74, - 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0c, 0x72, 0x65, - 0x63, 0x65, 0x69, 0x70, 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xd5, 0x01, 0x0a, 0x13, 0x47, - 0x65, 0x74, 0x52, 0x65, 0x63, 0x65, 0x69, 0x70, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x5a, 0x0a, 0x08, 0x72, 0x65, 0x63, 0x65, 0x69, 0x70, 0x74, 0x73, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x68, 0x61, 0x72, 0x6d, 0x6f, 0x6e, 0x79, 0x2e, 0x73, - 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x63, 0x65, 0x69, 0x70, 0x74, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x63, 0x65, 0x69, 0x70, 0x74, 0x73, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x72, 0x65, 0x63, 0x65, 0x69, 0x70, 0x74, 0x73, 0x1a, 0x62, - 0x0a, 0x0d, 0x52, 0x65, 0x63, 0x65, 0x69, 0x70, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, - 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x6b, 0x65, - 0x79, 0x12, 0x3b, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x25, 0x2e, 0x68, 0x61, 0x72, 0x6d, 0x6f, 0x6e, 0x79, 0x2e, 0x73, 0x74, 0x72, 0x65, 0x61, - 0x6d, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x52, - 0x65, 0x63, 0x65, 0x69, 0x70, 0x74, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, - 0x38, 0x01, 0x42, 0x0c, 0x5a, 0x0a, 0x2e, 0x2f, 0x3b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x52, 0x65, 0x63, 0x65, 0x69, 0x70, 0x74, 0x73, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x35, 0x0a, 0x0b, 0x41, 0x63, + 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x44, 0x61, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x61, 0x73, + 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x68, 0x61, 0x73, 0x68, 0x12, 0x12, 0x0a, + 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x62, 0x6f, 0x64, + 0x79, 0x22, 0x75, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, + 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x44, 0x0a, 0x08, + 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, + 0x2e, 0x68, 0x61, 0x72, 0x6d, 0x6f, 0x6e, 0x79, 0x2e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, + 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x41, 0x63, 0x63, + 0x6f, 0x75, 0x6e, 0x74, 0x44, 0x61, 0x74, 0x61, 0x52, 0x08, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0c, 0x52, 0x05, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x22, 0x35, 0x0a, 0x0b, 0x53, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x44, 0x61, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x61, 0x73, 0x68, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x68, 0x61, 0x73, 0x68, 0x12, 0x12, 0x0a, 0x04, 0x62, + 0x6f, 0x64, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x22, + 0x4c, 0x0a, 0x0c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x73, 0x44, 0x61, 0x74, 0x61, 0x12, + 0x3c, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, + 0x68, 0x61, 0x72, 0x6d, 0x6f, 0x6e, 0x79, 0x2e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x73, + 0x79, 0x6e, 0x63, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x53, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x44, 0x61, 0x74, 0x61, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x71, 0x0a, + 0x18, 0x47, 0x65, 0x74, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, 0x05, 0x73, 0x6c, 0x6f, + 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x68, 0x61, 0x72, 0x6d, 0x6f, + 0x6e, 0x79, 0x2e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x73, 0x44, + 0x61, 0x74, 0x61, 0x52, 0x05, 0x73, 0x6c, 0x6f, 0x74, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x70, 0x72, + 0x6f, 0x6f, 0x66, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x05, 0x70, 0x72, 0x6f, 0x6f, 0x66, + 0x22, 0x2c, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x42, 0x79, 0x74, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x64, 0x65, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x05, 0x63, 0x6f, 0x64, 0x65, 0x73, 0x22, 0x2c, + 0x0a, 0x14, 0x47, 0x65, 0x74, 0x54, 0x72, 0x69, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x05, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x42, 0x0c, 0x5a, 0x0a, + 0x2e, 0x2f, 0x3b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, } var ( @@ -1254,7 +2162,7 @@ func file_msg_proto_rawDescGZIP() []byte { return file_msg_proto_rawDescData } -var file_msg_proto_msgTypes = make([]protoimpl.MessageInfo, 18) +var file_msg_proto_msgTypes = make([]protoimpl.MessageInfo, 30) var file_msg_proto_goTypes = []interface{}{ (*Message)(nil), // 0: harmony.stream.sync.message.Message (*Request)(nil), // 1: harmony.stream.sync.message.Request @@ -1264,40 +2172,64 @@ var file_msg_proto_goTypes = []interface{}{ (*GetBlocksByHashesRequest)(nil), // 5: harmony.stream.sync.message.GetBlocksByHashesRequest (*GetNodeDataRequest)(nil), // 6: harmony.stream.sync.message.GetNodeDataRequest (*GetReceiptsRequest)(nil), // 7: harmony.stream.sync.message.GetReceiptsRequest - (*Response)(nil), // 8: harmony.stream.sync.message.Response - (*ErrorResponse)(nil), // 9: harmony.stream.sync.message.ErrorResponse - (*GetBlockNumberResponse)(nil), // 10: harmony.stream.sync.message.GetBlockNumberResponse - (*GetBlockHashesResponse)(nil), // 11: harmony.stream.sync.message.GetBlockHashesResponse - (*GetBlocksByNumResponse)(nil), // 12: harmony.stream.sync.message.GetBlocksByNumResponse - (*GetBlocksByHashesResponse)(nil), // 13: harmony.stream.sync.message.GetBlocksByHashesResponse - (*GetNodeDataResponse)(nil), // 14: harmony.stream.sync.message.GetNodeDataResponse - (*Receipts)(nil), // 15: harmony.stream.sync.message.Receipts - (*GetReceiptsResponse)(nil), // 16: harmony.stream.sync.message.GetReceiptsResponse - nil, // 17: harmony.stream.sync.message.GetReceiptsResponse.ReceiptsEntry + (*GetAccountRangeRequest)(nil), // 8: harmony.stream.sync.message.GetAccountRangeRequest + (*GetStorageRangesRequest)(nil), // 9: harmony.stream.sync.message.GetStorageRangesRequest + (*GetByteCodesRequest)(nil), // 10: harmony.stream.sync.message.GetByteCodesRequest + (*TrieNodePathSet)(nil), // 11: harmony.stream.sync.message.TrieNodePathSet + (*GetTrieNodesRequest)(nil), // 12: harmony.stream.sync.message.GetTrieNodesRequest + (*Response)(nil), // 13: harmony.stream.sync.message.Response + (*ErrorResponse)(nil), // 14: harmony.stream.sync.message.ErrorResponse + (*GetBlockNumberResponse)(nil), // 15: harmony.stream.sync.message.GetBlockNumberResponse + (*GetBlockHashesResponse)(nil), // 16: harmony.stream.sync.message.GetBlockHashesResponse + (*GetBlocksByNumResponse)(nil), // 17: harmony.stream.sync.message.GetBlocksByNumResponse + (*GetBlocksByHashesResponse)(nil), // 18: harmony.stream.sync.message.GetBlocksByHashesResponse + (*GetNodeDataResponse)(nil), // 19: harmony.stream.sync.message.GetNodeDataResponse + (*Receipts)(nil), // 20: harmony.stream.sync.message.Receipts + (*GetReceiptsResponse)(nil), // 21: harmony.stream.sync.message.GetReceiptsResponse + (*AccountData)(nil), // 22: harmony.stream.sync.message.AccountData + (*GetAccountRangeResponse)(nil), // 23: harmony.stream.sync.message.GetAccountRangeResponse + (*StorageData)(nil), // 24: harmony.stream.sync.message.StorageData + (*StoragesData)(nil), // 25: harmony.stream.sync.message.StoragesData + (*GetStorageRangesResponse)(nil), // 26: harmony.stream.sync.message.GetStorageRangesResponse + (*GetByteCodesResponse)(nil), // 27: harmony.stream.sync.message.GetByteCodesResponse + (*GetTrieNodesResponse)(nil), // 28: harmony.stream.sync.message.GetTrieNodesResponse + nil, // 29: harmony.stream.sync.message.GetReceiptsResponse.ReceiptsEntry } var file_msg_proto_depIdxs = []int32{ 1, // 0: harmony.stream.sync.message.Message.req:type_name -> harmony.stream.sync.message.Request - 8, // 1: harmony.stream.sync.message.Message.resp:type_name -> harmony.stream.sync.message.Response + 13, // 1: harmony.stream.sync.message.Message.resp:type_name -> harmony.stream.sync.message.Response 2, // 2: harmony.stream.sync.message.Request.get_block_number_request:type_name -> harmony.stream.sync.message.GetBlockNumberRequest 3, // 3: harmony.stream.sync.message.Request.get_block_hashes_request:type_name -> harmony.stream.sync.message.GetBlockHashesRequest 4, // 4: harmony.stream.sync.message.Request.get_blocks_by_num_request:type_name -> harmony.stream.sync.message.GetBlocksByNumRequest 5, // 5: harmony.stream.sync.message.Request.get_blocks_by_hashes_request:type_name -> harmony.stream.sync.message.GetBlocksByHashesRequest 6, // 6: harmony.stream.sync.message.Request.get_node_data_request:type_name -> harmony.stream.sync.message.GetNodeDataRequest 7, // 7: harmony.stream.sync.message.Request.get_receipts_request:type_name -> harmony.stream.sync.message.GetReceiptsRequest - 9, // 8: harmony.stream.sync.message.Response.error_response:type_name -> harmony.stream.sync.message.ErrorResponse - 10, // 9: harmony.stream.sync.message.Response.get_block_number_response:type_name -> harmony.stream.sync.message.GetBlockNumberResponse - 11, // 10: harmony.stream.sync.message.Response.get_block_hashes_response:type_name -> harmony.stream.sync.message.GetBlockHashesResponse - 12, // 11: harmony.stream.sync.message.Response.get_blocks_by_num_response:type_name -> harmony.stream.sync.message.GetBlocksByNumResponse - 13, // 12: harmony.stream.sync.message.Response.get_blocks_by_hashes_response:type_name -> harmony.stream.sync.message.GetBlocksByHashesResponse - 14, // 13: harmony.stream.sync.message.Response.get_node_data_response:type_name -> harmony.stream.sync.message.GetNodeDataResponse - 16, // 14: harmony.stream.sync.message.Response.get_receipts_response:type_name -> harmony.stream.sync.message.GetReceiptsResponse - 17, // 15: harmony.stream.sync.message.GetReceiptsResponse.receipts:type_name -> harmony.stream.sync.message.GetReceiptsResponse.ReceiptsEntry - 15, // 16: harmony.stream.sync.message.GetReceiptsResponse.ReceiptsEntry.value:type_name -> harmony.stream.sync.message.Receipts - 17, // [17:17] is the sub-list for method output_type - 17, // [17:17] is the sub-list for method input_type - 17, // [17:17] is the sub-list for extension type_name - 17, // [17:17] is the sub-list for extension extendee - 0, // [0:17] is the sub-list for field type_name + 8, // 8: harmony.stream.sync.message.Request.get_account_range_request:type_name -> harmony.stream.sync.message.GetAccountRangeRequest + 9, // 9: harmony.stream.sync.message.Request.get_storage_ranges_request:type_name -> harmony.stream.sync.message.GetStorageRangesRequest + 10, // 10: harmony.stream.sync.message.Request.get_byte_codes_request:type_name -> harmony.stream.sync.message.GetByteCodesRequest + 12, // 11: harmony.stream.sync.message.Request.get_trie_nodes_request:type_name -> harmony.stream.sync.message.GetTrieNodesRequest + 11, // 12: harmony.stream.sync.message.GetTrieNodesRequest.paths:type_name -> harmony.stream.sync.message.TrieNodePathSet + 14, // 13: harmony.stream.sync.message.Response.error_response:type_name -> harmony.stream.sync.message.ErrorResponse + 15, // 14: harmony.stream.sync.message.Response.get_block_number_response:type_name -> harmony.stream.sync.message.GetBlockNumberResponse + 16, // 15: harmony.stream.sync.message.Response.get_block_hashes_response:type_name -> harmony.stream.sync.message.GetBlockHashesResponse + 17, // 16: harmony.stream.sync.message.Response.get_blocks_by_num_response:type_name -> harmony.stream.sync.message.GetBlocksByNumResponse + 18, // 17: harmony.stream.sync.message.Response.get_blocks_by_hashes_response:type_name -> harmony.stream.sync.message.GetBlocksByHashesResponse + 19, // 18: harmony.stream.sync.message.Response.get_node_data_response:type_name -> harmony.stream.sync.message.GetNodeDataResponse + 21, // 19: harmony.stream.sync.message.Response.get_receipts_response:type_name -> harmony.stream.sync.message.GetReceiptsResponse + 23, // 20: harmony.stream.sync.message.Response.get_account_range_response:type_name -> harmony.stream.sync.message.GetAccountRangeResponse + 26, // 21: harmony.stream.sync.message.Response.get_storage_ranges_response:type_name -> harmony.stream.sync.message.GetStorageRangesResponse + 27, // 22: harmony.stream.sync.message.Response.get_byte_codes_response:type_name -> harmony.stream.sync.message.GetByteCodesResponse + 28, // 23: harmony.stream.sync.message.Response.get_trie_nodes_response:type_name -> harmony.stream.sync.message.GetTrieNodesResponse + 29, // 24: harmony.stream.sync.message.GetReceiptsResponse.receipts:type_name -> harmony.stream.sync.message.GetReceiptsResponse.ReceiptsEntry + 22, // 25: harmony.stream.sync.message.GetAccountRangeResponse.accounts:type_name -> harmony.stream.sync.message.AccountData + 24, // 26: harmony.stream.sync.message.StoragesData.data:type_name -> harmony.stream.sync.message.StorageData + 25, // 27: harmony.stream.sync.message.GetStorageRangesResponse.slots:type_name -> harmony.stream.sync.message.StoragesData + 20, // 28: harmony.stream.sync.message.GetReceiptsResponse.ReceiptsEntry.value:type_name -> harmony.stream.sync.message.Receipts + 29, // [29:29] is the sub-list for method output_type + 29, // [29:29] is the sub-list for method input_type + 29, // [29:29] is the sub-list for extension type_name + 29, // [29:29] is the sub-list for extension extendee + 0, // [0:29] is the sub-list for field type_name } func init() { file_msg_proto_init() } @@ -1403,7 +2335,7 @@ func file_msg_proto_init() { } } file_msg_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Response); i { + switch v := v.(*GetAccountRangeRequest); i { case 0: return &v.state case 1: @@ -1415,7 +2347,7 @@ func file_msg_proto_init() { } } file_msg_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ErrorResponse); i { + switch v := v.(*GetStorageRangesRequest); i { case 0: return &v.state case 1: @@ -1427,7 +2359,7 @@ func file_msg_proto_init() { } } file_msg_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetBlockNumberResponse); i { + switch v := v.(*GetByteCodesRequest); i { case 0: return &v.state case 1: @@ -1439,7 +2371,7 @@ func file_msg_proto_init() { } } file_msg_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetBlockHashesResponse); i { + switch v := v.(*TrieNodePathSet); i { case 0: return &v.state case 1: @@ -1451,7 +2383,7 @@ func file_msg_proto_init() { } } file_msg_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetBlocksByNumResponse); i { + switch v := v.(*GetTrieNodesRequest); i { case 0: return &v.state case 1: @@ -1463,7 +2395,7 @@ func file_msg_proto_init() { } } file_msg_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetBlocksByHashesResponse); i { + switch v := v.(*Response); i { case 0: return &v.state case 1: @@ -1475,7 +2407,7 @@ func file_msg_proto_init() { } } file_msg_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetNodeDataResponse); i { + switch v := v.(*ErrorResponse); i { case 0: return &v.state case 1: @@ -1487,7 +2419,7 @@ func file_msg_proto_init() { } } file_msg_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Receipts); i { + switch v := v.(*GetBlockNumberResponse); i { case 0: return &v.state case 1: @@ -1499,6 +2431,66 @@ func file_msg_proto_init() { } } file_msg_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetBlockHashesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_msg_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetBlocksByNumResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_msg_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetBlocksByHashesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_msg_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetNodeDataResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_msg_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Receipts); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_msg_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetReceiptsResponse); i { case 0: return &v.state @@ -1510,6 +2502,90 @@ func file_msg_proto_init() { return nil } } + file_msg_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AccountData); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_msg_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetAccountRangeResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_msg_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StorageData); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_msg_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StoragesData); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_msg_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetStorageRangesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_msg_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetByteCodesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_msg_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetTrieNodesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } file_msg_proto_msgTypes[0].OneofWrappers = []interface{}{ (*Message_Req)(nil), @@ -1522,8 +2598,12 @@ func file_msg_proto_init() { (*Request_GetBlocksByHashesRequest)(nil), (*Request_GetNodeDataRequest)(nil), (*Request_GetReceiptsRequest)(nil), + (*Request_GetAccountRangeRequest)(nil), + (*Request_GetStorageRangesRequest)(nil), + (*Request_GetByteCodesRequest)(nil), + (*Request_GetTrieNodesRequest)(nil), } - file_msg_proto_msgTypes[8].OneofWrappers = []interface{}{ + file_msg_proto_msgTypes[13].OneofWrappers = []interface{}{ (*Response_ErrorResponse)(nil), (*Response_GetBlockNumberResponse)(nil), (*Response_GetBlockHashesResponse)(nil), @@ -1531,6 +2611,10 @@ func file_msg_proto_init() { (*Response_GetBlocksByHashesResponse)(nil), (*Response_GetNodeDataResponse)(nil), (*Response_GetReceiptsResponse)(nil), + (*Response_GetAccountRangeResponse)(nil), + (*Response_GetStorageRangesResponse)(nil), + (*Response_GetByteCodesResponse)(nil), + (*Response_GetTrieNodesResponse)(nil), } type x struct{} out := protoimpl.TypeBuilder{ @@ -1538,7 +2622,7 @@ func file_msg_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_msg_proto_rawDesc, NumEnums: 0, - NumMessages: 18, + NumMessages: 30, NumExtensions: 0, NumServices: 0, }, diff --git a/p2p/stream/protocols/sync/message/msg.proto b/p2p/stream/protocols/sync/message/msg.proto index f48341868f..85fa67460d 100644 --- a/p2p/stream/protocols/sync/message/msg.proto +++ b/p2p/stream/protocols/sync/message/msg.proto @@ -19,6 +19,10 @@ message Request { GetBlocksByHashesRequest get_blocks_by_hashes_request = 5; GetNodeDataRequest get_node_data_request = 6; GetReceiptsRequest get_receipts_request = 7; + GetAccountRangeRequest get_account_range_request = 8; + GetStorageRangesRequest get_storage_ranges_request = 9; + GetByteCodesRequest get_byte_codes_request = 10; + GetTrieNodesRequest get_trie_nodes_request = 11; } } @@ -44,6 +48,36 @@ message GetReceiptsRequest { repeated bytes block_hashes = 1; } +message GetAccountRangeRequest { + bytes root = 1; + bytes origin = 2; + bytes limit = 3; + uint64 bytes = 4; +} + +message GetStorageRangesRequest { + bytes root = 1; + repeated bytes accounts = 2; + bytes origin = 3; + bytes limit = 4; + uint64 bytes = 5; +} + +message GetByteCodesRequest { + repeated bytes hashes = 1; + uint64 bytes = 2; +} + +message TrieNodePathSet { + repeated bytes pathset = 1; +} + +message GetTrieNodesRequest { + bytes root = 1; + repeated TrieNodePathSet paths = 2; + uint64 bytes = 3; +} + message Response { uint64 req_id = 1; oneof response { @@ -54,6 +88,10 @@ message Response { GetBlocksByHashesResponse get_blocks_by_hashes_response = 6; GetNodeDataResponse get_node_data_response = 7; GetReceiptsResponse get_receipts_response = 8; + GetAccountRangeResponse get_account_range_response = 9; + GetStorageRangesResponse get_storage_ranges_response = 10; + GetByteCodesResponse get_byte_codes_response = 11; + GetTrieNodesResponse get_trie_nodes_response = 12; } } @@ -90,3 +128,35 @@ message Receipts { message GetReceiptsResponse { map receipts = 1; } + +message AccountData { + bytes hash = 1; + bytes body = 2; +} + +message GetAccountRangeResponse { + repeated AccountData accounts = 1; + repeated bytes proof = 2; +} + +message StorageData { + bytes hash = 1; + bytes body = 2; +} + +message StoragesData { + repeated StorageData data = 1; +} + +message GetStorageRangesResponse { + repeated StoragesData slots = 1; + repeated bytes proof = 2; +} + +message GetByteCodesResponse { + repeated bytes codes = 1; +} + +message GetTrieNodesResponse { + repeated bytes nodes = 1; +} \ No newline at end of file diff --git a/p2p/stream/protocols/sync/message/parse.go b/p2p/stream/protocols/sync/message/parse.go index b0bf360a8a..4c9849c06c 100644 --- a/p2p/stream/protocols/sync/message/parse.go +++ b/p2p/stream/protocols/sync/message/parse.go @@ -111,3 +111,67 @@ func (msg *Message) GetNodeDataResponse() (*GetNodeDataResponse, error) { } return gnResp, nil } + +// GetAccountRangesResponse parse the message to GetAccountRangesResponse +func (msg *Message) GetAccountRangesResponse() (*GetAccountRangeResponse, error) { + resp := msg.GetResp() + if resp == nil { + return nil, errors.New("not response message") + } + if errResp := resp.GetErrorResponse(); errResp != nil { + return nil, &ResponseError{errResp.Error} + } + gnResp := resp.GetGetAccountRangeResponse() + if gnResp == nil { + return nil, errors.New("not GetGetAccountRangeResponse") + } + return gnResp, nil +} + +// GetStorageRangesResponse parse the message to GetStorageRangesResponse +func (msg *Message) GetStorageRangesResponse() (*GetStorageRangesResponse, error) { + resp := msg.GetResp() + if resp == nil { + return nil, errors.New("not response message") + } + if errResp := resp.GetErrorResponse(); errResp != nil { + return nil, &ResponseError{errResp.Error} + } + gnResp := resp.GetGetStorageRangesResponse() + if gnResp == nil { + return nil, errors.New("not GetGetStorageRangesResponse") + } + return gnResp, nil +} + +// GetByteCodesResponse parse the message to GetByteCodesResponse +func (msg *Message) GetByteCodesResponse() (*GetByteCodesResponse, error) { + resp := msg.GetResp() + if resp == nil { + return nil, errors.New("not response message") + } + if errResp := resp.GetErrorResponse(); errResp != nil { + return nil, &ResponseError{errResp.Error} + } + gnResp := resp.GetGetByteCodesResponse() + if gnResp == nil { + return nil, errors.New("not GetByteCodesResponse") + } + return gnResp, nil +} + +// GetTrieNodesResponse parse the message to GetTrieNodesResponse +func (msg *Message) GetTrieNodesResponse() (*GetTrieNodesResponse, error) { + resp := msg.GetResp() + if resp == nil { + return nil, errors.New("not response message") + } + if errResp := resp.GetErrorResponse(); errResp != nil { + return nil, &ResponseError{errResp.Error} + } + gnResp := resp.GetGetTrieNodesResponse() + if gnResp == nil { + return nil, errors.New("not GetTrieNodesResponse") + } + return gnResp, nil +} diff --git a/p2p/stream/protocols/sync/stream.go b/p2p/stream/protocols/sync/stream.go index 56419767f0..3077a8a135 100644 --- a/p2p/stream/protocols/sync/stream.go +++ b/p2p/stream/protocols/sync/stream.go @@ -10,6 +10,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/rlp" protobuf "github.com/golang/protobuf/proto" + "github.com/harmony-one/harmony/p2p/stream/protocols/sync/message" syncpb "github.com/harmony-one/harmony/p2p/stream/protocols/sync/message" sttypes "github.com/harmony-one/harmony/p2p/stream/types" libp2p_network "github.com/libp2p/go-libp2p/core/network" @@ -188,6 +189,18 @@ func (st *syncStream) handleReq(req *syncpb.Request) error { if rReq := req.GetGetReceiptsRequest(); rReq != nil { return st.handleGetReceiptsRequest(req.ReqId, rReq) } + if ndReq := req.GetGetAccountRangeRequest(); ndReq != nil { + return st.handleGetAccountRangeRequest(req.ReqId, ndReq) + } + if ndReq := req.GetGetStorageRangesRequest(); ndReq != nil { + return st.handleGetStorageRangesRequest(req.ReqId, ndReq) + } + if ndReq := req.GetGetByteCodesRequest(); ndReq != nil { + return st.handleGetByteCodesRequest(req.ReqId, ndReq) + } + if ndReq := req.GetGetTrieNodesRequest(); ndReq != nil { + return st.handleGetTrieNodesRequest(req.ReqId, ndReq) + } // unsupported request type return st.handleUnknownRequest(req.ReqId) } @@ -308,6 +321,95 @@ func (st *syncStream) handleGetReceiptsRequest(rid uint64, req *syncpb.GetReceip return errors.Wrap(err, "[GetReceipts]") } +func (st *syncStream) handleGetAccountRangeRequest(rid uint64, req *syncpb.GetAccountRangeRequest) error { + serverRequestCounterVec.With(prometheus.Labels{ + "topic": string(st.ProtoID()), + "request_type": "getAccountRangeRequest", + }).Inc() + + root := common.BytesToHash(req.Root) + origin := common.BytesToHash(req.Origin) + limit := common.BytesToHash(req.Limit) + resp, err := st.computeGetAccountRangeRequest(rid, root, origin, limit, req.Bytes) + if resp == nil && err != nil { + resp = syncpb.MakeErrorResponseMessage(rid, err) + } + if writeErr := st.writeMsg(resp); writeErr != nil { + if err == nil { + err = writeErr + } else { + err = fmt.Errorf("%v; [writeMsg] %v", err.Error(), writeErr) + } + } + return errors.Wrap(err, "[GetAccountRange]") +} + +func (st *syncStream) handleGetStorageRangesRequest(rid uint64, req *syncpb.GetStorageRangesRequest) error { + serverRequestCounterVec.With(prometheus.Labels{ + "topic": string(st.ProtoID()), + "request_type": "getStorageRangesRequest", + }).Inc() + + root := common.BytesToHash(req.Root) + accounts := bytesToHashes(req.Accounts) + origin := common.BytesToHash(req.Origin) + limit := common.BytesToHash(req.Limit) + resp, err := st.computeGetStorageRangesRequest(rid, root, accounts, origin, limit, req.Bytes) + if resp == nil && err != nil { + resp = syncpb.MakeErrorResponseMessage(rid, err) + } + if writeErr := st.writeMsg(resp); writeErr != nil { + if err == nil { + err = writeErr + } else { + err = fmt.Errorf("%v; [writeMsg] %v", err.Error(), writeErr) + } + } + return errors.Wrap(err, "[GetStorageRanges]") +} + +func (st *syncStream) handleGetByteCodesRequest(rid uint64, req *syncpb.GetByteCodesRequest) error { + serverRequestCounterVec.With(prometheus.Labels{ + "topic": string(st.ProtoID()), + "request_type": "getByteCodesRequest", + }).Inc() + + hashes := bytesToHashes(req.Hashes) + resp, err := st.computeGetByteCodesRequest(rid, hashes, req.Bytes) + if resp == nil && err != nil { + resp = syncpb.MakeErrorResponseMessage(rid, err) + } + if writeErr := st.writeMsg(resp); writeErr != nil { + if err == nil { + err = writeErr + } else { + err = fmt.Errorf("%v; [writeMsg] %v", err.Error(), writeErr) + } + } + return errors.Wrap(err, "[GetByteCodes]") +} + +func (st *syncStream) handleGetTrieNodesRequest(rid uint64, req *syncpb.GetTrieNodesRequest) error { + serverRequestCounterVec.With(prometheus.Labels{ + "topic": string(st.ProtoID()), + "request_type": "getTrieNodesRequest", + }).Inc() + + root := common.BytesToHash(req.Root) + resp, err := st.computeGetTrieNodesRequest(rid, root, req.Paths, req.Bytes) + if resp == nil && err != nil { + resp = syncpb.MakeErrorResponseMessage(rid, err) + } + if writeErr := st.writeMsg(resp); writeErr != nil { + if err == nil { + err = writeErr + } else { + err = fmt.Errorf("%v; [writeMsg] %v", err.Error(), writeErr) + } + } + return errors.Wrap(err, "[GetTrieNodes]") +} + func (st *syncStream) handleUnknownRequest(rid uint64) error { serverRequestCounterVec.With(prometheus.Labels{ "topic": string(st.ProtoID()), @@ -453,6 +555,74 @@ func (st *syncStream) computeGetReceipts(rid uint64, hs []common.Hash) (*syncpb. return syncpb.MakeGetReceiptsResponseMessage(rid, normalizedReceipts), nil } +func (st *syncStream) computeGetAccountRangeRequest(rid uint64, root common.Hash, origin common.Hash, limit common.Hash, bytes uint64) (*syncpb.Message, error) { + if bytes == 0 { + return nil, fmt.Errorf("zero account ranges bytes requested") + } + if bytes > softResponseLimit { + return nil, fmt.Errorf("requested bytes exceed limit") + } + accounts, proof, err := st.chain.getAccountRange(root, origin, limit, bytes) + if err != nil { + return nil, err + } + return syncpb.MakeGetAccountRangeResponseMessage(rid, accounts, proof), nil +} + +func (st *syncStream) computeGetStorageRangesRequest(rid uint64, root common.Hash, accounts []common.Hash, origin common.Hash, limit common.Hash, bytes uint64) (*syncpb.Message, error) { + if bytes == 0 { + return nil, fmt.Errorf("zero storage ranges bytes requested") + } + if bytes > softResponseLimit { + return nil, fmt.Errorf("requested bytes exceed limit") + } + if len(accounts) > GetStorageRangesRequestCap { + err := fmt.Errorf("GetStorageRangesRequest amount exceed cap: %v > %v", len(accounts), GetStorageRangesRequestCap) + return nil, err + } + slots, proofs, err := st.chain.getStorageRanges(root, accounts, origin, limit, bytes) + if err != nil { + return nil, err + } + return syncpb.MakeGetStorageRangesResponseMessage(rid, slots, proofs), nil +} + +func (st *syncStream) computeGetByteCodesRequest(rid uint64, hs []common.Hash, bytes uint64) (*syncpb.Message, error) { + if bytes == 0 { + return nil, fmt.Errorf("zero byte code bytes requested") + } + if bytes > softResponseLimit { + return nil, fmt.Errorf("requested bytes exceed limit") + } + if len(hs) > GetByteCodesRequestCap { + err := fmt.Errorf("GetByteCodesRequest amount exceed cap: %v > %v", len(hs), GetByteCodesRequestCap) + return nil, err + } + codes, err := st.chain.getByteCodes(hs, bytes) + if err != nil { + return nil, err + } + return syncpb.MakeGetByteCodesResponseMessage(rid, codes), nil +} + +func (st *syncStream) computeGetTrieNodesRequest(rid uint64, root common.Hash, paths []*message.TrieNodePathSet, bytes uint64) (*syncpb.Message, error) { + if bytes == 0 { + return nil, fmt.Errorf("zero trie node bytes requested") + } + if bytes > softResponseLimit { + return nil, fmt.Errorf("requested bytes exceed limit") + } + if len(paths) > GetTrieNodesRequestCap { + err := fmt.Errorf("GetTrieNodesRequest amount exceed cap: %v > %v", len(paths), GetTrieNodesRequestCap) + return nil, err + } + nodes, err := st.chain.getTrieNodes(root, paths, bytes, time.Now()) + if err != nil { + return nil, err + } + return syncpb.MakeGetTrieNodesResponseMessage(rid, nodes), nil +} + func bytesToHashes(bs [][]byte) []common.Hash { hs := make([]common.Hash, 0, len(bs)) for _, b := range bs { diff --git a/p2p/stream/protocols/sync/stream_test.go b/p2p/stream/protocols/sync/stream_test.go index f33bc3eb9f..3b538c14b8 100644 --- a/p2p/stream/protocols/sync/stream_test.go +++ b/p2p/stream/protocols/sync/stream_test.go @@ -60,6 +60,30 @@ var ( } testGetNodeDataRequest = syncpb.MakeGetNodeDataRequest(testGetNodeData) testGetNodeDataRequestMsg = syncpb.MakeMessageFromRequest(testGetNodeDataRequest) + + maxBytes = uint64(500) + root = numberToHash(1) + origin = numberToHash(2) + limit = numberToHash(3) + + testHashes = []common.Hash{ + numberToHash(1), + numberToHash(2), + } + + testAccounts = []common.Hash{account1, account2} + + testGetAccountRangesRequest = syncpb.MakeGetAccountRangeRequest(root, origin, limit, maxBytes) + testGetAccountRangesRequestMsg = syncpb.MakeMessageFromRequest(testGetAccountRangesRequest) + + testGetStorageRangesRequest = syncpb.MakeGetStorageRangesRequest(root, testAccounts, origin, limit, maxBytes) + testGetStorageRangesRequestMsg = syncpb.MakeMessageFromRequest(testGetStorageRangesRequest) + + testGetByteCodesRequest = syncpb.MakeGetByteCodesRequest(testHashes, maxBytes) + testGetByteCodesRequestMsg = syncpb.MakeMessageFromRequest(testGetByteCodesRequest) + + testGetTrieNodesRequest = syncpb.MakeGetTrieNodesRequest(root, testPaths, maxBytes) + testGetTrieNodesRequestMsg = syncpb.MakeMessageFromRequest(testGetTrieNodesRequest) ) func TestSyncStream_HandleGetBlocksByRequest(t *testing.T) { @@ -188,6 +212,90 @@ func TestSyncStream_HandleGetNodeData(t *testing.T) { } } +func TestSyncStream_HandleGetAccountRanges(t *testing.T) { + st, remoteSt := makeTestSyncStream() + + go st.run() + defer close(st.closeC) + + req := testGetAccountRangesRequestMsg + b, _ := protobuf.Marshal(req) + err := remoteSt.WriteBytes(b) + if err != nil { + t.Fatal(err) + } + + time.Sleep(200 * time.Millisecond) + receivedBytes, _ := remoteSt.ReadBytes() + + if err := checkAccountRangeResult(maxBytes, receivedBytes); err != nil { + t.Fatal(err) + } +} + +func TestSyncStream_HandleGetStorageRanges(t *testing.T) { + st, remoteSt := makeTestSyncStream() + + go st.run() + defer close(st.closeC) + + req := testGetStorageRangesRequestMsg + b, _ := protobuf.Marshal(req) + err := remoteSt.WriteBytes(b) + if err != nil { + t.Fatal(err) + } + + time.Sleep(200 * time.Millisecond) + receivedBytes, _ := remoteSt.ReadBytes() + + if err := checkStorageRangesResult(testAccounts, maxBytes, receivedBytes); err != nil { + t.Fatal(err) + } +} + +func TestSyncStream_HandleGetByteCodesResult(t *testing.T) { + st, remoteSt := makeTestSyncStream() + + go st.run() + defer close(st.closeC) + + req := testGetByteCodesRequestMsg + b, _ := protobuf.Marshal(req) + err := remoteSt.WriteBytes(b) + if err != nil { + t.Fatal(err) + } + + time.Sleep(200 * time.Millisecond) + receivedBytes, _ := remoteSt.ReadBytes() + + if err := checkByteCodesResult(testHashes, maxBytes, receivedBytes); err != nil { + t.Fatal(err) + } +} + +func TestSyncStream_HandleGetTrieNodes(t *testing.T) { + st, remoteSt := makeTestSyncStream() + + go st.run() + defer close(st.closeC) + + req := testGetTrieNodesRequestMsg + b, _ := protobuf.Marshal(req) + err := remoteSt.WriteBytes(b) + if err != nil { + t.Fatal(err) + } + + time.Sleep(200 * time.Millisecond) + receivedBytes, _ := remoteSt.ReadBytes() + + if err := checkTrieNodesResult(testHashes, maxBytes, receivedBytes); err != nil { + t.Fatal(err) + } +} + func makeTestSyncStream() (*syncStream, *testRemoteBaseStream) { localRaw, remoteRaw := makePairP2PStreams() remote := newTestRemoteBaseStream(remoteRaw) diff --git a/test/chain/chain/chain_makers.go b/test/chain/chain/chain_makers.go index 2b82beb575..122540038b 100644 --- a/test/chain/chain/chain_makers.go +++ b/test/chain/chain/chain_makers.go @@ -22,12 +22,14 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/trie" "github.com/harmony-one/harmony/core" "github.com/harmony-one/harmony/block" blockfactory "github.com/harmony-one/harmony/block/factory" consensus_engine "github.com/harmony-one/harmony/consensus/engine" "github.com/harmony-one/harmony/core/state" + "github.com/harmony-one/harmony/core/state/snapshot" "github.com/harmony-one/harmony/core/types" "github.com/harmony-one/harmony/core/vm" "github.com/harmony-one/harmony/internal/params" @@ -252,6 +254,7 @@ func (cr *fakeChainReader) GetReceiptsByHash(hash common.Hash) types.Receipts func (cr *fakeChainReader) ContractCode(hash common.Hash) ([]byte, error) { return []byte{}, nil } func (cr *fakeChainReader) ValidatorCode(hash common.Hash) ([]byte, error) { return []byte{}, nil } func (cr *fakeChainReader) ReadShardState(epoch *big.Int) (*shard.State, error) { return nil, nil } +func (cr *fakeChainReader) TrieDB() *trie.Database { return nil } func (cr *fakeChainReader) TrieNode(hash common.Hash) ([]byte, error) { return []byte{}, nil } func (cr *fakeChainReader) ReadValidatorList() ([]common.Address, error) { return nil, nil } func (cr *fakeChainReader) ValidatorCandidates() []common.Address { return nil } @@ -273,6 +276,9 @@ func (cr *fakeChainReader) ReadValidatorInformationAtState( func (cr *fakeChainReader) StateAt(root common.Hash) (*state.DB, error) { return nil, nil } +func (cr *fakeChainReader) Snapshots() *snapshot.Tree { + return nil +} func (cr *fakeChainReader) ReadValidatorSnapshot( addr common.Address, ) (*staking.ValidatorSnapshot, error) { From 57dd5f26783deab49cf4ec37e6bd9c772106dfda Mon Sep 17 00:00:00 2001 From: Konstantin <355847+Frozen@users.noreply.github.com> Date: Thu, 26 Oct 2023 07:35:32 -0800 Subject: [PATCH 005/128] Fixed. (#4543) --- shard/committee/assignment.go | 15 +-------------- 1 file changed, 1 insertion(+), 14 deletions(-) diff --git a/shard/committee/assignment.go b/shard/committee/assignment.go index 85162e6712..4978b61559 100644 --- a/shard/committee/assignment.go +++ b/shard/committee/assignment.go @@ -25,19 +25,6 @@ import ( "github.com/pkg/errors" ) -// ValidatorListProvider .. -type ValidatorListProvider interface { - Compute( - epoch *big.Int, reader DataProvider, - ) (*shard.State, error) - ReadFromDB(epoch *big.Int, reader DataProvider) (*shard.State, error) -} - -// Reader is committee.Reader and it is the API that committee membership assignment needs -type Reader interface { - ValidatorListProvider -} - // StakingCandidatesReader .. type StakingCandidatesReader interface { CurrentBlock() *types.Block @@ -272,7 +259,7 @@ type partialStakingEnabled struct{} var ( // WithStakingEnabled .. - WithStakingEnabled Reader = partialStakingEnabled{} + WithStakingEnabled = partialStakingEnabled{} // ErrComputeForEpochInPast .. ErrComputeForEpochInPast = errors.New("cannot compute for epoch in past") ) From b1389dacc452688584199958b58544c688e62e17 Mon Sep 17 00:00:00 2001 From: Konstantin <355847+Frozen@users.noreply.github.com> Date: Thu, 26 Oct 2023 07:36:13 -0800 Subject: [PATCH 006/128] Fixed data race. (#4544) * Fixed data race. * Additional error checking for InsertChain. --- api/service/legacysync/epoch_syncing.go | 14 ++- api/service/stagedstreamsync/beacon_helper.go | 5 +- api/service/stagedstreamsync/sig_verify.go | 9 +- api/service/stagedstreamsync/stage_epoch.go | 20 +++-- .../stagedstreamsync/staged_stream_sync.go | 8 +- api/service/stagedsync/stage_state.go | 2 +- api/service/stagedsync/stagedsync.go | 7 +- consensus/consensus_v2.go | 3 +- consensus/downloader.go | 3 +- node/node_explorer.go | 2 +- p2p/host.go | 2 +- p2p/security/security.go | 88 ++++++++----------- p2p/security/security_test.go | 24 +++-- 13 files changed, 105 insertions(+), 82 deletions(-) diff --git a/api/service/legacysync/epoch_syncing.go b/api/service/legacysync/epoch_syncing.go index e4453cb69c..5d9b4dab10 100644 --- a/api/service/legacysync/epoch_syncing.go +++ b/api/service/legacysync/epoch_syncing.go @@ -199,8 +199,18 @@ func processWithPayload(payload [][]byte, bc core.BlockChain) error { decoded = append(decoded, block) } - _, err := bc.InsertChain(decoded, true) - return err + for _, block := range decoded { + _, err := bc.InsertChain([]*types.Block{block}, true) + switch { + case errors.Is(err, core.ErrKnownBlock): + continue + case err != nil: + return err + default: + } + } + + return nil } // CreateSyncConfig creates SyncConfig for StateSync object. diff --git a/api/service/stagedstreamsync/beacon_helper.go b/api/service/stagedstreamsync/beacon_helper.go index a996f368bf..4fe70dc23d 100644 --- a/api/service/stagedstreamsync/beacon_helper.go +++ b/api/service/stagedstreamsync/beacon_helper.go @@ -1,8 +1,10 @@ package stagedstreamsync import ( + "errors" "time" + "github.com/harmony-one/harmony/core" "github.com/harmony-one/harmony/core/types" "github.com/harmony-one/harmony/internal/utils" "github.com/rs/zerolog" @@ -126,7 +128,8 @@ func (bh *beaconHelper) insertLastMileBlocks() (inserted int, bn uint64, err err } // TODO: Instruct the beacon helper to verify signatures. This may require some forks // in pub-sub message (add commit sigs in node.block.sync messages) - if _, err = bh.bc.InsertChain(types.Blocks{b}, true); err != nil { + _, err = bh.bc.InsertChain(types.Blocks{b}, true) + if err != nil && !errors.Is(err, core.ErrKnownBlock) { bn-- return } diff --git a/api/service/stagedstreamsync/sig_verify.go b/api/service/stagedstreamsync/sig_verify.go index 649c6eaec1..8de71effca 100644 --- a/api/service/stagedstreamsync/sig_verify.go +++ b/api/service/stagedstreamsync/sig_verify.go @@ -3,6 +3,7 @@ package stagedstreamsync import ( "fmt" + "github.com/harmony-one/harmony/core" "github.com/harmony-one/harmony/core/types" "github.com/harmony-one/harmony/crypto/bls" "github.com/harmony-one/harmony/internal/chain" @@ -53,8 +54,14 @@ func verifyAndInsertBlock(bc blockChain, block *types.Block, nextBlocks ...*type if err := bc.Engine().VerifyHeader(bc, block.Header(), true); err != nil { return errors.Wrap(err, "[VerifyHeader]") } - if _, err := bc.InsertChain(types.Blocks{block}, false); err != nil { + _, err = bc.InsertChain(types.Blocks{block}, false) + switch { + case errors.Is(err, core.ErrKnownBlock): + return nil + case err != nil: return errors.Wrap(err, "[InsertChain]") + default: + } return nil } diff --git a/api/service/stagedstreamsync/stage_epoch.go b/api/service/stagedstreamsync/stage_epoch.go index e84b74f340..8129ce0db4 100644 --- a/api/service/stagedstreamsync/stage_epoch.go +++ b/api/service/stagedstreamsync/stage_epoch.go @@ -4,6 +4,7 @@ import ( "context" "github.com/harmony-one/harmony/core" + "github.com/harmony-one/harmony/core/types" "github.com/harmony-one/harmony/internal/utils" sttypes "github.com/harmony-one/harmony/p2p/stream/types" "github.com/harmony-one/harmony/shard" @@ -129,13 +130,20 @@ func (sr *StageEpoch) doShortRangeSyncForEpochSync(ctx context.Context, s *Stage return 0, nil } - n, err := s.state.bc.InsertChain(blocks, true) - numBlocksInsertedShortRangeHistogramVec.With(s.state.promLabels()).Observe(float64(n)) - if err != nil { - utils.Logger().Info().Err(err).Int("blocks inserted", n).Msg("Insert block failed") - sh.streamsFailed([]sttypes.StreamID{streamID}, "corrupted data") - return n, err + n := 0 + for _, block := range blocks { + _, err := s.state.bc.InsertChain([]*types.Block{block}, true) + switch { + case errors.Is(err, core.ErrKnownBlock): + case err != nil: + utils.Logger().Info().Err(err).Int("blocks inserted", n).Msg("Insert block failed") + sh.streamsFailed([]sttypes.StreamID{streamID}, "corrupted data") + return n, err + default: + } + n++ } + numBlocksInsertedShortRangeHistogramVec.With(s.state.promLabels()).Observe(float64(n)) return n, nil } diff --git a/api/service/stagedstreamsync/staged_stream_sync.go b/api/service/stagedstreamsync/staged_stream_sync.go index 1592186b52..fc59290474 100644 --- a/api/service/stagedstreamsync/staged_stream_sync.go +++ b/api/service/stagedstreamsync/staged_stream_sync.go @@ -636,7 +636,8 @@ func (ss *StagedStreamSync) addConsensusLastMile(bc core.BlockChain, cs *consens if block == nil { break } - if _, err := bc.InsertChain(types.Blocks{block}, true); err != nil { + _, err := bc.InsertChain(types.Blocks{block}, true) + if err != nil && !errors.Is(err, core.ErrKnownBlock) { return errors.Wrap(err, "failed to InsertChain") } hashes = append(hashes, block.Header().Hash()) @@ -704,13 +705,16 @@ func (ss *StagedStreamSync) UpdateBlockAndStatus(block *types.Block, bc core.Blo } _, err := bc.InsertChain([]*types.Block{block}, false /* verifyHeaders */) - if err != nil { + switch { + case errors.Is(err, core.ErrKnownBlock): + case err != nil: utils.Logger().Error(). Err(err). Uint64("block number", block.NumberU64()). Uint32("shard", block.ShardID()). Msgf("[STAGED_STREAM_SYNC] UpdateBlockAndStatus: Error adding new block to blockchain") return err + default: } utils.Logger().Info(). Uint64("blockHeight", block.NumberU64()). diff --git a/api/service/stagedsync/stage_state.go b/api/service/stagedsync/stage_state.go index 7086acec18..1e2b38bd97 100644 --- a/api/service/stagedsync/stage_state.go +++ b/api/service/stagedsync/stage_state.go @@ -178,7 +178,7 @@ func (stg *StageStates) Exec(firstCycle bool, invalidBlockRevert bool, s *StageS headBeforeNewBlocks := stg.configs.bc.CurrentBlock().NumberU64() headHashBeforeNewBlocks := stg.configs.bc.CurrentBlock().Hash() _, err = stg.configs.bc.InsertChain(newBlocks, false) //TODO: verifyHeaders can be done here - if err != nil { + if err != nil && !errors.Is(err, core.ErrKnownBlock) { // TODO: handle chain rollback because of bad block utils.Logger().Error(). Err(err). diff --git a/api/service/stagedsync/stagedsync.go b/api/service/stagedsync/stagedsync.go index f1de66f9fc..4cc2a98ea7 100644 --- a/api/service/stagedsync/stagedsync.go +++ b/api/service/stagedsync/stagedsync.go @@ -1091,13 +1091,16 @@ func (ss *StagedSync) UpdateBlockAndStatus(block *types.Block, bc core.BlockChai } _, err := bc.InsertChain([]*types.Block{block}, false /* verifyHeaders */) - if err != nil { + switch { + case errors.Is(err, core.ErrKnownBlock): + case err != nil: utils.Logger().Error(). Err(err). Uint64("block number", block.NumberU64()). Uint32("shard", block.ShardID()). Msgf("[STAGED_SYNC] UpdateBlockAndStatus: Error adding new block to blockchain") return err + default: } utils.Logger().Info(). Uint64("blockHeight", block.NumberU64()). @@ -1218,7 +1221,7 @@ func (ss *StagedSync) addConsensusLastMile(bc core.BlockChain, cs *consensus.Con if block == nil { break } - if _, err := bc.InsertChain(types.Blocks{block}, true); err != nil { + if _, err := bc.InsertChain(types.Blocks{block}, true); err != nil && !errors.Is(err, core.ErrKnownBlock) { return errors.Wrap(err, "failed to InsertChain") } } diff --git a/consensus/consensus_v2.go b/consensus/consensus_v2.go index 5beb54ed72..04c590b2eb 100644 --- a/consensus/consensus_v2.go +++ b/consensus/consensus_v2.go @@ -661,7 +661,8 @@ func (consensus *Consensus) tryCatchup() error { func (consensus *Consensus) commitBlock(blk *types.Block, committedMsg *FBFTMessage) error { if consensus.Blockchain().CurrentBlock().NumberU64() < blk.NumberU64() { - if _, err := consensus.Blockchain().InsertChain([]*types.Block{blk}, !consensus.fBFTLog.IsBlockVerified(blk.Hash())); err != nil { + _, err := consensus.Blockchain().InsertChain([]*types.Block{blk}, !consensus.fBFTLog.IsBlockVerified(blk.Hash())) + if err != nil && !errors.Is(err, core.ErrKnownBlock) { consensus.getLogger().Error().Err(err).Msg("[commitBlock] Failed to add block to chain") return err } diff --git a/consensus/downloader.go b/consensus/downloader.go index 1fdc131e7a..84414aa804 100644 --- a/consensus/downloader.go +++ b/consensus/downloader.go @@ -2,6 +2,7 @@ package consensus import ( "github.com/ethereum/go-ethereum/event" + "github.com/harmony-one/harmony/core" "github.com/harmony-one/harmony/core/types" "github.com/pkg/errors" ) @@ -93,7 +94,7 @@ func (consensus *Consensus) AddConsensusLastMile() error { if block == nil { break } - if _, err := consensus.Blockchain().InsertChain(types.Blocks{block}, true); err != nil { + if _, err := consensus.Blockchain().InsertChain(types.Blocks{block}, true); err != nil && !errors.Is(err, core.ErrKnownBlock) { return errors.Wrap(err, "failed to InsertChain") } } diff --git a/node/node_explorer.go b/node/node_explorer.go index fbb5b88985..ce1b0a2445 100644 --- a/node/node_explorer.go +++ b/node/node_explorer.go @@ -154,7 +154,7 @@ func (node *Node) AddNewBlockForExplorer(block *types.Block) { utils.Logger().Info().Uint64("blockHeight", block.NumberU64()).Msg("[Explorer] Adding new block for explorer node") - if _, err := node.Blockchain().InsertChain([]*types.Block{block}, false); err == nil { + if _, err := node.Blockchain().InsertChain([]*types.Block{block}, false); err == nil || errors.Is(err, core.ErrKnownBlock) { if block.IsLastBlockInEpoch() { node.Consensus.UpdateConsensusInformation() } diff --git a/p2p/host.go b/p2p/host.go index 31c9c2c44a..62015fc8cf 100644 --- a/p2p/host.go +++ b/p2p/host.go @@ -254,7 +254,7 @@ func NewHost(cfg HostConfig) (Host, error) { self.PeerID = p2pHost.ID() subLogger := utils.Logger().With().Str("hostID", p2pHost.ID().Pretty()).Logger() - security := security.NewManager(cfg.MaxConnPerIP, cfg.MaxPeers) + security := security.NewManager(cfg.MaxConnPerIP, int(cfg.MaxPeers)) // has to save the private key for host h := &HostV2{ h: p2pHost, diff --git a/p2p/security/security.go b/p2p/security/security.go index 932f8b6e9a..7c8825ffb8 100644 --- a/p2p/security/security.go +++ b/p2p/security/security.go @@ -3,7 +3,6 @@ package security import ( "fmt" "sync" - "sync/atomic" "github.com/harmony-one/harmony/internal/utils" libp2p_network "github.com/libp2p/go-libp2p/core/network" @@ -18,56 +17,53 @@ type Security interface { type Manager struct { maxConnPerIP int - maxPeers int64 + maxPeers int mutex sync.Mutex - peers peerMap // All the connected nodes, key is the Peer's IP, value is the peer's ID array + peers *peerMap // All the connected nodes, key is the Peer's IP, value is the peer's ID array } type peerMap struct { - count int64 - peers sync.Map + peers map[string][]string } -func (peerMap *peerMap) Len() int64 { - return atomic.LoadInt64(&peerMap.count) +func newPeersMap() *peerMap { + return &peerMap{ + peers: make(map[string][]string), + } } -func (peerMap *peerMap) Store(key, value interface{}) { - // only increment if you didn't have this key - hasKey := peerMap.HasKey(key) - peerMap.peers.Store(key, value) - if !hasKey { - atomic.AddInt64(&peerMap.count, 1) - } +func (peerMap *peerMap) Len() int { + return len(peerMap.peers) } -func (peerMap *peerMap) HasKey(key interface{}) bool { - hasKey := false - peerMap.peers.Range(func(k, v interface{}) bool { - if k == key { - hasKey = true - return false - } - return true - }) - return hasKey +func (peerMap *peerMap) Store(key string, value []string) { + peerMap.peers[key] = value +} + +func (peerMap *peerMap) HasKey(key string) bool { + _, ok := peerMap.peers[key] + return ok } -func (peerMap *peerMap) Delete(key interface{}) { - peerMap.peers.Delete(key) - atomic.AddInt64(&peerMap.count, -1) +func (peerMap *peerMap) Delete(key string) { + delete(peerMap.peers, key) } -func (peerMap *peerMap) Load(key interface{}) (value interface{}, ok bool) { - return peerMap.peers.Load(key) +func (peerMap *peerMap) Load(key string) (value []string, ok bool) { + value, ok = peerMap.peers[key] + return value, ok } -func (peerMap *peerMap) Range(f func(key, value any) bool) { - peerMap.peers.Range(f) +func (peerMap *peerMap) Range(f func(key string, value []string) bool) { + for key, value := range peerMap.peers { + if !f(key, value) { + break + } + } } -func NewManager(maxConnPerIP int, maxPeers int64) *Manager { +func NewManager(maxConnPerIP int, maxPeers int) *Manager { if maxConnPerIP < 0 { panic("maximum connections per IP must not be negative") } @@ -77,9 +73,16 @@ func NewManager(maxConnPerIP int, maxPeers int64) *Manager { return &Manager{ maxConnPerIP: maxConnPerIP, maxPeers: maxPeers, + peers: newPeersMap(), } } +func (m *Manager) RangePeers(f func(key string, value []string) bool) { + m.mutex.Lock() + defer m.mutex.Unlock() + m.peers.Range(f) +} + func (m *Manager) OnConnectCheck(net libp2p_network.Network, conn libp2p_network.Conn) error { m.mutex.Lock() defer m.mutex.Unlock() @@ -89,19 +92,11 @@ func (m *Manager) OnConnectCheck(net libp2p_network.Network, conn libp2p_network return errors.Wrap(err, "failed on get remote ip") } - value, ok := m.peers.Load(remoteIp) - if !ok { - value = []string{} - } - - peers, ok := value.([]string) - if !ok { - return errors.New("peers info type err") - } + peers, _ := m.peers.Load(remoteIp) // avoid add repeatedly peerID := conn.RemotePeer().String() - _, ok = find(peers, peerID) + _, ok := find(peers, peerID) if !ok { peers = append(peers, peerID) } @@ -118,7 +113,7 @@ func (m *Manager) OnConnectCheck(net libp2p_network.Network, conn libp2p_network // only limit addition if it's a new peer and not an existing peer with new connection if m.maxPeers > 0 && currentPeerCount >= m.maxPeers && !m.peers.HasKey(remoteIp) { utils.Logger().Warn(). - Int64("connected peers", currentPeerCount). + Int("connected peers", currentPeerCount). Str("new peer", remoteIp). Msg("too many peers, closing") return net.ClosePeer(conn.RemotePeer()) @@ -136,16 +131,11 @@ func (m *Manager) OnDisconnectCheck(conn libp2p_network.Conn) error { return errors.Wrap(err, "failed on get ip") } - value, ok := m.peers.Load(ip) + peers, ok := m.peers.Load(ip) if !ok { return nil } - peers, ok := value.([]string) - if !ok { - return errors.New("peers info type err") - } - peerID := conn.RemotePeer().String() index, ok := find(peers, peerID) if ok { diff --git a/p2p/security/security_test.go b/p2p/security/security_test.go index 73ce4741e7..cdaa99f933 100644 --- a/p2p/security/security_test.go +++ b/p2p/security/security_test.go @@ -13,6 +13,7 @@ import ( "github.com/libp2p/go-libp2p/core/peer" ma "github.com/multiformats/go-multiaddr" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) type ConnectCallback func(net libp2p_network.Network, conn libp2p_network.Conn) error @@ -53,7 +54,7 @@ func (mh *fakeHost) SetDisconnectCallback(callback DisconnectCallback) { func TestManager_OnConnectCheck(t *testing.T) { h1, err := newPeer(50550) - assert.Nil(t, err) + require.NoError(t, err) defer h1.Close() fakeHost := &fakeHost{} @@ -65,10 +66,9 @@ func TestManager_OnConnectCheck(t *testing.T) { assert.Nil(t, err) defer h2.Close() err = h2.Connect(context.Background(), peer.AddrInfo{ID: h1.ID(), Addrs: h1.Network().ListenAddresses()}) - assert.Nil(t, err) + require.NoError(t, err) - security.peers.Range(func(k, v interface{}) bool { - peers := v.([]string) + security.RangePeers(func(k string, peers []string) bool { assert.Equal(t, 1, len(peers)) return true }) @@ -78,9 +78,8 @@ func TestManager_OnConnectCheck(t *testing.T) { defer h3.Close() err = h3.Connect(context.Background(), peer.AddrInfo{ID: h1.ID(), Addrs: h1.Network().ListenAddresses()}) assert.Nil(t, err) - security.peers.Range(func(k, v interface{}) bool { - peers := v.([]string) - assert.Equal(t, 2, len(peers)) + security.RangePeers(func(k string, peers []string) bool { + require.Equal(t, 2, len(peers)) return true }) @@ -89,9 +88,8 @@ func TestManager_OnConnectCheck(t *testing.T) { defer h4.Close() err = h4.Connect(context.Background(), peer.AddrInfo{ID: h1.ID(), Addrs: h1.Network().ListenAddresses()}) assert.Nil(t, err) - security.peers.Range(func(k, v interface{}) bool { - peers := v.([]string) - assert.Equal(t, 2, len(peers)) + security.RangePeers(func(k string, peers []string) bool { + require.Equal(t, 2, len(peers)) return true }) } @@ -112,8 +110,7 @@ func TestManager_OnDisconnectCheck(t *testing.T) { err = h2.Connect(context.Background(), peer.AddrInfo{ID: h1.ID(), Addrs: h1.Network().ListenAddresses()}) assert.Nil(t, err) - security.peers.Range(func(k, v interface{}) bool { - peers := v.([]string) + security.RangePeers(func(k string, peers []string) bool { assert.Equal(t, 1, len(peers)) return true }) @@ -121,8 +118,7 @@ func TestManager_OnDisconnectCheck(t *testing.T) { err = h2.Network().ClosePeer(h1.ID()) assert.Nil(t, err) time.Sleep(200 * time.Millisecond) - security.peers.Range(func(k, v interface{}) bool { - peers := v.([]string) + security.RangePeers(func(k string, peers []string) bool { assert.Equal(t, 0, len(peers)) return true }) From 2f23d81879455e55cdb30f3079a7870cf30e48fe Mon Sep 17 00:00:00 2001 From: frozen <355847+Frozen@users.noreply.github.com> Date: Tue, 13 Jun 2023 17:13:40 -0400 Subject: [PATCH 007/128] Last mile blocks insert. --- api/service/stagedstreamsync/downloader.go | 3 +- api/service/stagedstreamsync/downloaders.go | 4 +-- api/service/stagedstreamsync/service.go | 4 +-- .../stagedstreamsync/stage_short_range.go | 34 ++++++++++++++++--- api/service/stagedstreamsync/syncing.go | 6 ++-- cmd/harmony/main.go | 5 +-- core/blockchain.go | 8 ----- core/blockchain_impl.go | 5 +-- core/blockchain_stub.go | 4 --- node/node.go | 4 +++ node/node_handler.go | 19 ++++++----- 11 files changed, 60 insertions(+), 36 deletions(-) diff --git a/api/service/stagedstreamsync/downloader.go b/api/service/stagedstreamsync/downloader.go index 3711048955..156722a143 100644 --- a/api/service/stagedstreamsync/downloader.go +++ b/api/service/stagedstreamsync/downloader.go @@ -6,6 +6,7 @@ import ( "time" "github.com/ethereum/go-ethereum/event" + "github.com/harmony-one/harmony/consensus" "github.com/rs/zerolog" "github.com/harmony-one/harmony/consensus" @@ -38,7 +39,7 @@ type ( ) // NewDownloader creates a new downloader -func NewDownloader(host p2p.Host, bc core.BlockChain, consensus *consensus.Consensus, dbDir string, isBeaconNode bool, config Config) *Downloader { +func NewDownloader(host p2p.Host, bc core.BlockChain, consensus *consensus.Consensus, dbDir string, isBeaconNode bool, config Config, c *consensus.Consensus) *Downloader { config.fixValues() sp := sync.NewProtocol(sync.Config{ diff --git a/api/service/stagedstreamsync/downloaders.go b/api/service/stagedstreamsync/downloaders.go index 583f3e1523..08a8e40de4 100644 --- a/api/service/stagedstreamsync/downloaders.go +++ b/api/service/stagedstreamsync/downloaders.go @@ -16,7 +16,7 @@ type Downloaders struct { } // NewDownloaders creates Downloaders for sync of multiple blockchains -func NewDownloaders(host p2p.Host, bcs []core.BlockChain, consensus *consensus.Consensus, dbDir string, config Config) *Downloaders { +func NewDownloaders(host p2p.Host, bcs []core.BlockChain, consensus *consensus.Consensus, dbDir string, config Config, c *consensus.Consensus) *Downloaders { ds := make(map[uint32]*Downloader) isBeaconNode := len(bcs) == 1 for _, bc := range bcs { @@ -26,7 +26,7 @@ func NewDownloaders(host p2p.Host, bcs []core.BlockChain, consensus *consensus.C if _, ok := ds[bc.ShardID()]; ok { continue } - ds[bc.ShardID()] = NewDownloader(host, bc, consensus, dbDir, isBeaconNode, config) + ds[bc.ShardID()] = NewDownloader(host, bc, consensus, dbDir, isBeaconNode, config, c) } return &Downloaders{ ds: ds, diff --git a/api/service/stagedstreamsync/service.go b/api/service/stagedstreamsync/service.go index f7ffd7f2d9..90db7eadac 100644 --- a/api/service/stagedstreamsync/service.go +++ b/api/service/stagedstreamsync/service.go @@ -12,9 +12,9 @@ type StagedStreamSyncService struct { } // NewService creates a new downloader service -func NewService(host p2p.Host, bcs []core.BlockChain, consensus *consensus.Consensus, config Config, dbDir string) *StagedStreamSyncService { +func NewService(host p2p.Host, bcs []core.BlockChain, consensus *consensus.Consensus, config Config, dbDir string, c *consensus.Consensus) *StagedStreamSyncService { return &StagedStreamSyncService{ - Downloaders: NewDownloaders(host, bcs, consensus, dbDir, config), + Downloaders: NewDownloaders(host, bcs, consensus, dbDir, config, c), } } diff --git a/api/service/stagedstreamsync/stage_short_range.go b/api/service/stagedstreamsync/stage_short_range.go index ce6cdf36bc..f3037869ae 100644 --- a/api/service/stagedstreamsync/stage_short_range.go +++ b/api/service/stagedstreamsync/stage_short_range.go @@ -3,7 +3,9 @@ package stagedstreamsync import ( "context" + "github.com/harmony-one/harmony/consensus" "github.com/harmony-one/harmony/core" + "github.com/harmony-one/harmony/core/types" "github.com/harmony-one/harmony/internal/utils" sttypes "github.com/harmony-one/harmony/p2p/stream/types" "github.com/harmony-one/harmony/shard" @@ -18,6 +20,7 @@ type StageShortRange struct { type StageShortRangeCfg struct { bc core.BlockChain db kv.RwDB + c *consensus.Consensus } func NewStageShortRange(cfg StageShortRangeCfg) *StageShortRange { @@ -26,10 +29,11 @@ func NewStageShortRange(cfg StageShortRangeCfg) *StageShortRange { } } -func NewStageShortRangeCfg(bc core.BlockChain, db kv.RwDB) StageShortRangeCfg { +func NewStageShortRangeCfg(bc core.BlockChain, db kv.RwDB, c *consensus.Consensus) StageShortRangeCfg { return StageShortRangeCfg{ bc: bc, db: db, + c: c, } } @@ -104,9 +108,12 @@ func (sr *StageShortRange) doShortRangeSync(ctx context.Context, s *StageState) return 0, errors.Wrap(err, "prerequisite") } } - curBN := sr.configs.bc.CurrentBlock().NumberU64() - blkNums := sh.prepareBlockHashNumbers(curBN) - hashChain, whitelist, err := sh.getHashChain(ctx, blkNums) + var ( + bc = sr.configs.bc + curBN = bc.CurrentHeader().NumberU64() + blkNums = sh.prepareBlockHashNumbers(curBN) + hashChain, whitelist, err = sh.getHashChain(ctx, blkNums) + ) if err != nil { if errors.Is(err, context.DeadlineExceeded) || errors.Is(err, context.Canceled) { return 0, nil @@ -156,6 +163,25 @@ func (sr *StageShortRange) doShortRangeSync(ctx context.Context, s *StageState) return 0, err } + numInserted := 0 + err = sr.configs.c.GetLastMileBlockIter(sr.configs.bc.CurrentHeader().NumberU64()+1, func(blockIter *consensus.LastMileBlockIter) error { + for { + block := blockIter.Next() + if block == nil { + break + } + if _, err := bc.InsertChain(types.Blocks{block}, true); err != nil { + return errors.Wrap(err, "failed to InsertChain") + } + numInserted++ + } + return nil + }) + if err != nil { + return 0, errors.WithMessage(err, "failed to InsertChain for last mile blocks") + } + utils.Logger().Info().Int("last mile blocks inserted", numInserted).Msg("Insert last mile blocks success") + return n, nil } diff --git a/api/service/stagedstreamsync/syncing.go b/api/service/stagedstreamsync/syncing.go index 738f2f9203..adf52ae9f6 100644 --- a/api/service/stagedstreamsync/syncing.go +++ b/api/service/stagedstreamsync/syncing.go @@ -46,8 +46,9 @@ func CreateStagedSync(ctx context.Context, protocol syncProtocol, config Config, logger zerolog.Logger, + logProgress bool, + c *consensus.Consensus, ) (*StagedStreamSync, error) { - logger.Info(). Uint32("shard", bc.ShardID()). Bool("beaconNode", isBeaconNode). @@ -56,7 +57,6 @@ func CreateStagedSync(ctx context.Context, Bool("serverOnly", config.ServerOnly). Int("minStreams", config.MinStreams). Msg(WrapStagedSyncMsg("creating staged sync")) - var mainDB kv.RwDB dbs := make([]kv.RwDB, config.Concurrency) if config.UseMemDB { @@ -82,7 +82,7 @@ func CreateStagedSync(ctx context.Context, } stageHeadsCfg := NewStageHeadersCfg(bc, mainDB) - stageShortRangeCfg := NewStageShortRangeCfg(bc, mainDB) + stageShortRangeCfg := NewStageShortRangeCfg(bc, mainDB, c) stageSyncEpochCfg := NewStageEpochCfg(bc, mainDB) stageBodiesCfg := NewStageBodiesCfg(bc, mainDB, dbs, config.Concurrency, protocol, isBeaconNode, config.LogProgress) stageStatesCfg := NewStageStatesCfg(bc, mainDB, dbs, config.Concurrency, logger, config.LogProgress) diff --git a/cmd/harmony/main.go b/cmd/harmony/main.go index 9fc89d45da..b1afbe6bf8 100644 --- a/cmd/harmony/main.go +++ b/cmd/harmony/main.go @@ -1,6 +1,7 @@ package main import ( + "context" "fmt" "math/big" "math/rand" @@ -521,7 +522,7 @@ func setupNodeAndRun(hc harmonyconfig.HarmonyConfig) { Msg("Start p2p host failed") } - if err := currentNode.BootstrapConsensus(); err != nil { + if err := node.BootstrapConsensus(context.TODO(), currentNode.Consensus, currentNode.Host()); err != nil { fmt.Fprint(os.Stderr, "could not bootstrap consensus", err.Error()) if !currentNode.NodeConfig.IsOffline { os.Exit(-1) @@ -1032,7 +1033,7 @@ func setupStagedSyncService(node *node.Node, host p2p.Host, hc harmonyconfig.Har } } //Setup stream sync service - s := stagedstreamsync.NewService(host, blockchains, node.Consensus, sConfig, hc.General.DataDir) + s := stagedstreamsync.NewService(host, blockchains, node.Consensus, sConfig, hc.General.DataDir, node.Consensus) node.RegisterService(service.StagedStreamSync, s) diff --git a/core/blockchain.go b/core/blockchain.go index 0adc96925e..40d33100a2 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -109,14 +109,6 @@ type BlockChain interface { // but does not write any state. This is used to construct competing side forks // up to the point where they exceed the canonical total difficulty. WriteBlockWithoutState(block *types.Block, td *big.Int) (err error) - // WriteBlockWithState writes the block and all associated state to the database. - WriteBlockWithState( - block *types.Block, receipts []*types.Receipt, - cxReceipts []*types.CXReceipt, - stakeMsgs []types2.StakeMsg, - paid reward.Reader, - state *state.DB, - ) (status WriteStatus, err error) // GetMaxGarbageCollectedBlockNumber .. GetMaxGarbageCollectedBlockNumber() int64 // InsertChain attempts to insert the given batch of blocks in to the canonical diff --git a/core/blockchain_impl.go b/core/blockchain_impl.go index e9eca1f4cd..9e7f1134b0 100644 --- a/core/blockchain_impl.go +++ b/core/blockchain_impl.go @@ -1473,7 +1473,8 @@ func (bc *BlockChainImpl) WriteBlockWithoutState(block *types.Block, td *big.Int return nil } -func (bc *BlockChainImpl) WriteBlockWithState( +// writeBlockWithState writes the block and all associated state to the database. +func (bc *BlockChainImpl) writeBlockWithState( block *types.Block, receipts []*types.Receipt, cxReceipts []*types.CXReceipt, stakeMsgs []staking.StakeMsg, @@ -1880,7 +1881,7 @@ func (bc *BlockChainImpl) insertChain(chain types.Blocks, verifyHeaders bool) (i // Write the block to the chain and get the status. substart = time.Now() - status, err := bc.WriteBlockWithState( + status, err := bc.writeBlockWithState( block, receipts, cxReceipts, stakeMsgs, payout, state, ) if err != nil { diff --git a/core/blockchain_stub.go b/core/blockchain_stub.go index e9ef10ce94..e42a12b10e 100644 --- a/core/blockchain_stub.go +++ b/core/blockchain_stub.go @@ -124,10 +124,6 @@ func (a Stub) WriteBlockWithoutState(block *types.Block, td *big.Int) (err error return errors.Errorf("method WriteBlockWithoutState not implemented for %s", a.Name) } -func (a Stub) WriteBlockWithState(block *types.Block, receipts []*types.Receipt, cxReceipts []*types.CXReceipt, stakeMsgs []staking.StakeMsg, paid reward.Reader, state *state.DB) (status WriteStatus, err error) { - return 0, errors.Errorf("method WriteBlockWithState not implemented for %s", a.Name) -} - func (a Stub) GetMaxGarbageCollectedBlockNumber() int64 { return 0 } diff --git a/node/node.go b/node/node.go index e4d567066b..41373e1b5d 100644 --- a/node/node.go +++ b/node/node.go @@ -149,6 +149,10 @@ type Node struct { registry *registry.Registry } +func (node *Node) Host() p2p.Host { + return node.host +} + // Blockchain returns the blockchain for the node's current shard. func (node *Node) Blockchain() core.BlockChain { return node.registry.GetBlockchain() diff --git a/node/node_handler.go b/node/node_handler.go index eeaf90f2d7..89464d3c0c 100644 --- a/node/node_handler.go +++ b/node/node_handler.go @@ -404,16 +404,21 @@ func (node *Node) PostConsensusProcessing(newBlock *types.Block) error { } // BootstrapConsensus is a goroutine to check number of peers and start the consensus -func (node *Node) BootstrapConsensus() error { - ctx, cancel := context.WithTimeout(context.Background(), time.Minute) +func BootstrapConsensus(ctx context.Context, consensus *consensus.Consensus, host p2p.Host) error { + ctx, cancel := context.WithTimeout(ctx, time.Minute) defer cancel() - min := node.Consensus.MinPeers + min := consensus.MinPeers enoughMinPeers := make(chan struct{}) const checkEvery = 3 * time.Second go func() { for { - <-time.After(checkEvery) - numPeersNow := node.host.GetPeerCount() + select { + case <-ctx.Done(): + return + case <-time.After(checkEvery): + } + + numPeersNow := host.GetPeerCount() if numPeersNow >= min { utils.Logger().Info().Msg("[bootstrap] StartConsensus") enoughMinPeers <- struct{}{} @@ -432,9 +437,7 @@ func (node *Node) BootstrapConsensus() error { case <-ctx.Done(): return ctx.Err() case <-enoughMinPeers: - go func() { - node.Consensus.StartChannel() - }() + go consensus.StartChannel() return nil } } From fad17fe5393f308a362a9c912b90e6ae72d906ef Mon Sep 17 00:00:00 2001 From: frozen <355847+Frozen@users.noreply.github.com> Date: Tue, 13 Jun 2023 20:26:17 -0400 Subject: [PATCH 008/128] Added logs. --- api/service/stagedstreamsync/syncing.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/api/service/stagedstreamsync/syncing.go b/api/service/stagedstreamsync/syncing.go index adf52ae9f6..416c5c1a16 100644 --- a/api/service/stagedstreamsync/syncing.go +++ b/api/service/stagedstreamsync/syncing.go @@ -248,8 +248,14 @@ func (s *StagedStreamSync) doSync(downloaderContext context.Context, initSync bo for { ctx, cancel := context.WithCancel(downloaderContext) - + started := s.bc.CurrentHeader().NumberU64() n, err := s.doSyncCycle(ctx, initSync) + finished := s.bc.CurrentHeader().NumberU64() + utils.Logger().Info(). + Uint64("from", started). + Int("returned", n). + Uint64("to", finished). + Msg(WrapStagedSyncMsg("synced blocks")) if err != nil { utils.Logger().Error(). Err(err). From c63818480c0c2a808331afadbad2185520f38579 Mon Sep 17 00:00:00 2001 From: frozen <355847+Frozen@users.noreply.github.com> Date: Tue, 13 Jun 2023 20:47:55 -0400 Subject: [PATCH 009/128] Added logs. --- api/service/stagedstreamsync/syncing.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/api/service/stagedstreamsync/syncing.go b/api/service/stagedstreamsync/syncing.go index 416c5c1a16..18a1d79d8d 100644 --- a/api/service/stagedstreamsync/syncing.go +++ b/api/service/stagedstreamsync/syncing.go @@ -246,7 +246,9 @@ func (s *StagedStreamSync) doSync(downloaderContext context.Context, initSync bo s.startSyncing() defer s.finishSyncing() + i := 0 for { + i++ ctx, cancel := context.WithCancel(downloaderContext) started := s.bc.CurrentHeader().NumberU64() n, err := s.doSyncCycle(ctx, initSync) @@ -255,6 +257,8 @@ func (s *StagedStreamSync) doSync(downloaderContext context.Context, initSync bo Uint64("from", started). Int("returned", n). Uint64("to", finished). + Bool("initSync", initSync). + Int("cycle", i). Msg(WrapStagedSyncMsg("synced blocks")) if err != nil { utils.Logger().Error(). From b6987ec68fadcab6a1fb1ef42b6a651178cc916e Mon Sep 17 00:00:00 2001 From: frozen <355847+Frozen@users.noreply.github.com> Date: Tue, 13 Jun 2023 20:51:01 -0400 Subject: [PATCH 010/128] Added logs. --- api/service/stagedstreamsync/syncing.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/api/service/stagedstreamsync/syncing.go b/api/service/stagedstreamsync/syncing.go index 18a1d79d8d..bf5b595261 100644 --- a/api/service/stagedstreamsync/syncing.go +++ b/api/service/stagedstreamsync/syncing.go @@ -379,6 +379,9 @@ func (s *StagedStreamSync) finishSyncing() { if s.evtDownloadFinishedSubscribed { s.evtDownloadFinished.Send(struct{}{}) } + utils.Logger().Info(). + Bool("evtDownloadFinishedSubscribed", s.evtDownloadFinishedSubscribed). + Msg(WrapStagedSyncMsg("finished syncing")) } func (s *StagedStreamSync) checkPrerequisites() error { From 525b20ae20a8d503cd3932cc2b7053627e9758cf Mon Sep 17 00:00:00 2001 From: frozen <355847+Frozen@users.noreply.github.com> Date: Tue, 13 Jun 2023 21:14:34 -0400 Subject: [PATCH 011/128] Start and finish syncing. --- api/service/stagedstreamsync/syncing.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/api/service/stagedstreamsync/syncing.go b/api/service/stagedstreamsync/syncing.go index bf5b595261..3ebb4f21ba 100644 --- a/api/service/stagedstreamsync/syncing.go +++ b/api/service/stagedstreamsync/syncing.go @@ -227,6 +227,9 @@ func (s *StagedStreamSync) doSync(downloaderContext context.Context, initSync bo return 0, 0, err } + s.startSyncing() + defer s.finishSyncing() + var estimatedHeight uint64 if initSync { if h, err := s.estimateCurrentNumber(downloaderContext); err != nil { @@ -243,9 +246,6 @@ func (s *StagedStreamSync) doSync(downloaderContext context.Context, initSync bo } } - s.startSyncing() - defer s.finishSyncing() - i := 0 for { i++ From 0e4568253a5e027ddedffaf9191d143f23593892 Mon Sep 17 00:00:00 2001 From: frozen <355847+Frozen@users.noreply.github.com> Date: Sun, 25 Jun 2023 00:24:26 -0400 Subject: [PATCH 012/128] Passed peerID to handlers. --- api/service/stagedstreamsync/stage_heads.go | 2 +- consensus/consensus.go | 17 ++++------- consensus/consensus_test.go | 5 ++++ consensus/consensus_v2.go | 26 +++++++--------- consensus/fbft_log.go | 8 ++++- consensus/fbft_log_test.go | 4 +-- consensus/validator.go | 13 ++++---- consensus/view_change_construct.go | 4 ++- consensus/view_change_msg.go | 2 +- internal/utils/blockedpeers/manager.go | 33 +++++++++++++++++++++ internal/utils/blockedpeers/manager_test.go | 26 ++++++++++++++++ internal/utils/lrucache/lrucache.go | 6 ++++ internal/utils/timer.go | 4 ++- node/node.go | 10 +++++-- p2p/host.go | 20 +++++++------ p2p/security/security.go | 29 +++++++++++------- p2p/security/security_test.go | 5 ++-- 17 files changed, 149 insertions(+), 65 deletions(-) create mode 100644 internal/utils/blockedpeers/manager.go create mode 100644 internal/utils/blockedpeers/manager_test.go diff --git a/api/service/stagedstreamsync/stage_heads.go b/api/service/stagedstreamsync/stage_heads.go index c917884a36..d05543c065 100644 --- a/api/service/stagedstreamsync/stage_heads.go +++ b/api/service/stagedstreamsync/stage_heads.go @@ -53,7 +53,7 @@ func (heads *StageHeads) Exec(ctx context.Context, firstCycle bool, invalidBlock maxHeight := s.state.status.targetBN maxBlocksPerSyncCycle := uint64(1024) // TODO: should be in config -> s.state.MaxBlocksPerSyncCycle - currentHeight := heads.configs.bc.CurrentBlock().NumberU64() + currentHeight := heads.configs.bc.CurrentHeader().NumberU64() s.state.currentCycle.TargetHeight = maxHeight targetHeight := uint64(0) if errV := CreateView(ctx, heads.configs.db, tx, func(etx kv.Tx) (err error) { diff --git a/consensus/consensus.go b/consensus/consensus.go index b396f6eadd..bdb4803bb4 100644 --- a/consensus/consensus.go +++ b/consensus/consensus.go @@ -94,8 +94,6 @@ type Consensus struct { // The post-consensus job func passed from Node object // Called when consensus on a new block is done PostConsensusJob func(*types.Block) error - // The verifier func passed from Node object - BlockVerifier VerifyBlockFunc // verified block to state sync broadcast VerifiedNewBlock chan *types.Block // will trigger state syncing when blockNum is low @@ -171,12 +169,12 @@ func (consensus *Consensus) Beaconchain() core.BlockChain { } // VerifyBlock is a function used to verify the block and keep trace of verified blocks. -func (consensus *Consensus) verifyBlock(block *types.Block) error { - if !consensus.fBFTLog.IsBlockVerified(block.Hash()) { - if err := consensus.BlockVerifier(block); err != nil { +func (FBFTLog *FBFTLog) verifyBlock(block *types.Block) error { + if !FBFTLog.IsBlockVerified(block.Hash()) { + if err := FBFTLog.BlockVerify(block); err != nil { return errors.Errorf("Block verification failed: %s", err) } - consensus.fBFTLog.MarkBlockVerified(block) + FBFTLog.MarkBlockVerified(block) } return nil } @@ -304,12 +302,7 @@ func New( consensus.RndChannel = make(chan [vdfAndSeedSize]byte) consensus.IgnoreViewIDCheck = abool.NewBool(false) // Make Sure Verifier is not null - consensus.vc = newViewChange() - // TODO: reference to blockchain/beaconchain should be removed. - verifier := VerifyNewBlock(registry.GetWebHooks(), consensus.Blockchain(), consensus.Beaconchain()) - consensus.BlockVerifier = verifier - consensus.vc.verifyBlock = consensus.verifyBlock - + consensus.vc = newViewChange(consensus.FBFTLog.BlockVerify) // init prometheus metrics initMetrics() consensus.AddPubkeyMetrics() diff --git a/consensus/consensus_test.go b/consensus/consensus_test.go index 697ba49525..83d2021e32 100644 --- a/consensus/consensus_test.go +++ b/consensus/consensus_test.go @@ -22,6 +22,7 @@ func TestConsensusInitialization(t *testing.T) { assert.NoError(t, err) messageSender := &MessageSender{host: host, retryTimes: int(phaseDuration.Seconds()) / RetryIntervalInSec} + fbtLog := NewFBFTLog(consensus.FBFTLog.verifyNewBlock) state := State{mode: Normal} timeouts := createTimeout() @@ -36,6 +37,10 @@ func TestConsensusInitialization(t *testing.T) { assert.IsType(t, make(chan struct{}), consensus.BlockNumLowChan) // FBFTLog + assert.Equal(t, fbtLog.blocks, consensus.FBFTLog.blocks) + assert.Equal(t, fbtLog.messages, consensus.FBFTLog.messages) + assert.Equal(t, len(fbtLog.verifiedBlocks), 0) + assert.Equal(t, fbtLog.verifiedBlocks, consensus.FBFTLog.verifiedBlocks) assert.NotNil(t, consensus.FBFTLog()) assert.Equal(t, FBFTAnnounce, consensus.phase) diff --git a/consensus/consensus_v2.go b/consensus/consensus_v2.go index 04c590b2eb..27c9b15bf3 100644 --- a/consensus/consensus_v2.go +++ b/consensus/consensus_v2.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "encoding/hex" + libp2p_peer "github.com/libp2p/go-libp2p/core/peer" "math/big" "sync/atomic" "time" @@ -55,7 +56,7 @@ func (consensus *Consensus) isViewChangingMode() bool { } // HandleMessageUpdate will update the consensus state according to received message -func (consensus *Consensus) HandleMessageUpdate(ctx context.Context, msg *msg_pb.Message, senderKey *bls.SerializedPublicKey) error { +func (consensus *Consensus) HandleMessageUpdate(ctx context.Context, peer libp2p_peer.ID, msg *msg_pb.Message, senderKey *bls.SerializedPublicKey) error { consensus.mutex.Lock() defer consensus.mutex.Unlock() // when node is in ViewChanging mode, it still accepts normal messages into FBFTLog @@ -393,11 +394,12 @@ func (consensus *Consensus) tick() { // the bootstrap timer will be stopped once consensus is reached or view change // is succeeded if k != timeoutBootstrap { - consensus.getLogger().Debug(). - Str("k", k.String()). - Str("Mode", consensus.current.Mode().String()). - Msg("[ConsensusMainLoop] consensusTimeout stopped!!!") - v.Stop() + if v.Stop() { // prevent useless logs + consensus.getLogger().Debug(). + Str("k", k.String()). + Str("Mode", consensus.current.Mode().String()). + Msg("[ConsensusMainLoop] consensusTimeout stopped!!!") + } continue } } @@ -453,7 +455,6 @@ func (consensus *Consensus) BlockChannel(newBlock *types.Block) { type LastMileBlockIter struct { blockCandidates []*types.Block fbftLog *FBFTLog - verify func(*types.Block) error curIndex int logger *zerolog.Logger } @@ -468,9 +469,6 @@ func (consensus *Consensus) GetLastMileBlockIter(bnStart uint64, cb func(iter *L // GetLastMileBlockIter get the iterator of the last mile blocks starting from number bnStart func (consensus *Consensus) getLastMileBlockIter(bnStart uint64, cb func(iter *LastMileBlockIter) error) error { - if consensus.BlockVerifier == nil { - return errors.New("consensus haven't initialized yet") - } blocks, _, err := consensus.getLastMileBlocksAndMsg(bnStart) if err != nil { return err @@ -478,7 +476,6 @@ func (consensus *Consensus) getLastMileBlockIter(bnStart uint64, cb func(iter *L return cb(&LastMileBlockIter{ blockCandidates: blocks, fbftLog: consensus.fBFTLog, - verify: consensus.BlockVerifier, curIndex: 0, logger: consensus.getLogger(), }) @@ -493,7 +490,7 @@ func (iter *LastMileBlockIter) Next() *types.Block { iter.curIndex++ if !iter.fbftLog.IsBlockVerified(block.Hash()) { - if err := iter.verify(block); err != nil { + if err := iter.fbftLog.BlockVerify(block); err != nil { iter.logger.Debug().Err(err).Msg("block verification failed in consensus last mile block") return nil } @@ -620,9 +617,6 @@ func (consensus *Consensus) verifyLastCommitSig(lastCommitSig []byte, blk *types // tryCatchup add the last mile block in PBFT log memory cache to blockchain. func (consensus *Consensus) tryCatchup() error { // TODO: change this to a more systematic symbol - if consensus.BlockVerifier == nil { - return errors.New("consensus haven't finished initialization") - } initBN := consensus.getBlockNum() defer consensus.postCatchup(initBN) @@ -637,7 +631,7 @@ func (consensus *Consensus) tryCatchup() error { } blk.SetCurrentCommitSig(msg.Payload) - if err := consensus.verifyBlock(blk); err != nil { + if err := consensus.FBFTLog.verifyBlock(blk); err != nil { consensus.getLogger().Err(err).Msg("[TryCatchup] failed block verifier") return err } diff --git a/consensus/fbft_log.go b/consensus/fbft_log.go index 982aecab75..7ffa2ff9ec 100644 --- a/consensus/fbft_log.go +++ b/consensus/fbft_log.go @@ -113,14 +113,16 @@ type FBFTLog struct { blocks map[common.Hash]*types.Block // store blocks received in FBFT verifiedBlocks map[common.Hash]struct{} // store block hashes for blocks that has already been verified messages map[fbftMsgID]*FBFTMessage // store messages received in FBFT + verifyNewBlock func(*types.Block) error // block verification function } // NewFBFTLog returns new instance of FBFTLog -func NewFBFTLog() *FBFTLog { +func NewFBFTLog(verifyNewBlock func(*types.Block) error) *FBFTLog { pbftLog := FBFTLog{ blocks: make(map[common.Hash]*types.Block), messages: make(map[fbftMsgID]*FBFTMessage), verifiedBlocks: make(map[common.Hash]struct{}), + verifyNewBlock: verifyNewBlock, } return &pbftLog } @@ -130,6 +132,10 @@ func (log *FBFTLog) AddBlock(block *types.Block) { log.blocks[block.Hash()] = block } +func (log *FBFTLog) BlockVerify(block *types.Block) error { + return log.verifyNewBlock(block) +} + // MarkBlockVerified marks the block as verified func (log *FBFTLog) MarkBlockVerified(block *types.Block) { log.verifiedBlocks[block.Hash()] = struct{}{} diff --git a/consensus/fbft_log_test.go b/consensus/fbft_log_test.go index 420effff4e..c22c70b3e7 100644 --- a/consensus/fbft_log_test.go +++ b/consensus/fbft_log_test.go @@ -65,7 +65,7 @@ func TestGetMessagesByTypeSeqViewHash(t *testing.T) { ViewID: 3, BlockHash: [32]byte{01, 02}, } - log := NewFBFTLog() + log := NewFBFTLog(nil) log.AddVerifiedMessage(&pbftMsg) found := log.GetMessagesByTypeSeqViewHash( @@ -90,7 +90,7 @@ func TestHasMatchingAnnounce(t *testing.T) { ViewID: 3, BlockHash: [32]byte{01, 02}, } - log := NewFBFTLog() + log := NewFBFTLog(nil) log.AddVerifiedMessage(&pbftMsg) found := log.HasMatchingViewAnnounce(2, 3, [32]byte{01, 02}) if !found { diff --git a/consensus/validator.go b/consensus/validator.go index 0506f4359d..02f92cd509 100644 --- a/consensus/validator.go +++ b/consensus/validator.go @@ -63,6 +63,11 @@ func (consensus *Consensus) onAnnounce(msg *msg_pb.Message) { go func() { // Best effort check, no need to error out. _, err := consensus.ValidateNewBlock(recvMsg) + if err != nil { + // maybe ban sender + consensus.getLogger().Error(). + Err(err).Msgf("[Announce] Failed to validate block") + } if err == nil { consensus.GetLogger().Info(). Msg("[Announce] Block verified") @@ -76,6 +81,7 @@ func (consensus *Consensus) ValidateNewBlock(recvMsg *FBFTMessage) (*types.Block defer consensus.mutex.Unlock() return consensus.validateNewBlock(recvMsg) } + func (consensus *Consensus) validateNewBlock(recvMsg *FBFTMessage) (*types.Block, error) { if consensus.fBFTLog.IsBlockVerified(recvMsg.BlockHash) { var blockObj *types.Block @@ -125,12 +131,7 @@ func (consensus *Consensus) validateNewBlock(recvMsg *FBFTMessage) (*types.Block Hex("blockHash", recvMsg.BlockHash[:]). Msg("[validateNewBlock] Prepared message and block added") - if consensus.BlockVerifier == nil { - consensus.getLogger().Debug().Msg("[validateNewBlock] consensus received message before init. Ignoring") - return nil, errors.New("nil block verifier") - } - - if err := consensus.verifyBlock(&blockObj); err != nil { + if err := consensus.FBFTLog.verifyBlock(&blockObj); err != nil { consensus.getLogger().Error().Err(err).Msg("[validateNewBlock] Block verification failed") return nil, errors.Errorf("Block verification failed: %s", err.Error()) } diff --git a/consensus/view_change_construct.go b/consensus/view_change_construct.go index 061d2a795c..0c3aa1e60e 100644 --- a/consensus/view_change_construct.go +++ b/consensus/view_change_construct.go @@ -51,9 +51,11 @@ type viewChange struct { } // newViewChange returns a new viewChange object -func newViewChange() *viewChange { +func newViewChange(verifyBlock VerifyBlockFunc) *viewChange { vc := viewChange{} vc.Reset() + vc.verifyBlock = verifyBlock + return &vc } diff --git a/consensus/view_change_msg.go b/consensus/view_change_msg.go index 6c4b080055..21ec801aaa 100644 --- a/consensus/view_change_msg.go +++ b/consensus/view_change_msg.go @@ -45,7 +45,7 @@ func (consensus *Consensus) constructViewChangeMessage(priKey *bls.PrivateKeyWra Interface("preparedMsg", preparedMsg). Msg("[constructViewChangeMessage] found prepared msg") if block != nil { - if err := consensus.verifyBlock(block); err == nil { + if err := consensus.FBFTLog.verifyBlock(block); err == nil { tmpEncoded, err := rlp.EncodeToBytes(block) if err != nil { consensus.getLogger().Err(err).Msg("[constructViewChangeMessage] Failed encoding block") diff --git a/internal/utils/blockedpeers/manager.go b/internal/utils/blockedpeers/manager.go new file mode 100644 index 0000000000..04f50225d0 --- /dev/null +++ b/internal/utils/blockedpeers/manager.go @@ -0,0 +1,33 @@ +package blockedpeers + +import ( + "github.com/harmony-one/harmony/internal/utils/lrucache" + libp2p_peer "github.com/libp2p/go-libp2p/core/peer" + "time" +) + +type Manager struct { + internal *lrucache.Cache[libp2p_peer.ID, time.Time] +} + +func NewManager(size int) *Manager { + return &Manager{ + internal: lrucache.NewCache[libp2p_peer.ID, time.Time](size), + } +} + +func (m *Manager) IsBanned(key libp2p_peer.ID, now time.Time) bool { + future, ok := m.internal.Get(key) + if ok { + return future.After(now) // future > now + } + return ok +} + +func (m *Manager) Ban(key libp2p_peer.ID, future time.Time) { + m.internal.Set(key, future) +} + +func (m *Manager) Contains(key libp2p_peer.ID) bool { + return m.internal.Contains(key) +} diff --git a/internal/utils/blockedpeers/manager_test.go b/internal/utils/blockedpeers/manager_test.go new file mode 100644 index 0000000000..31d0b680df --- /dev/null +++ b/internal/utils/blockedpeers/manager_test.go @@ -0,0 +1,26 @@ +package blockedpeers + +import ( + libp2p_peer "github.com/libp2p/go-libp2p/core/peer" + "github.com/stretchr/testify/require" + "testing" + "time" +) + +func TestNewManager(t *testing.T) { + var ( + peer1 libp2p_peer.ID = "peer1" + now = time.Now() + m = NewManager(4) + ) + + t.Run("check_empty", func(t *testing.T) { + require.False(t, m.IsBanned(peer1, now), "peer1 should not be banned") + }) + t.Run("ban_peer1", func(t *testing.T) { + m.Ban(peer1, now.Add(2*time.Second)) + require.True(t, m.IsBanned(peer1, now), "peer1 should be banned") + require.False(t, m.IsBanned(peer1, now.Add(3*time.Second)), "peer1 should not be banned after 3 seconds") + }) + +} diff --git a/internal/utils/lrucache/lrucache.go b/internal/utils/lrucache/lrucache.go index 4859811b51..95d9b88bbe 100644 --- a/internal/utils/lrucache/lrucache.go +++ b/internal/utils/lrucache/lrucache.go @@ -25,3 +25,9 @@ func (c *Cache[K, V]) Get(key K) (V, bool) { func (c *Cache[K, V]) Set(key K, value V) { c.cache.Add(key, value) } + +// Contains checks if a key is in the cache, without updating the +// recent-ness or deleting it for being stale. +func (c *Cache[K, V]) Contains(key K) bool { + return c.cache.Contains(key) +} diff --git a/internal/utils/timer.go b/internal/utils/timer.go index d355d5c719..176732fca3 100644 --- a/internal/utils/timer.go +++ b/internal/utils/timer.go @@ -34,9 +34,11 @@ func (timeout *Timeout) Start() { } // Stop stops the timeout clock -func (timeout *Timeout) Stop() { +func (timeout *Timeout) Stop() (stopped bool) { + stopped = timeout.state != Inactive timeout.state = Inactive timeout.start = time.Now() + return stopped } // Expired checks whether the timeout is reached/expired diff --git a/node/node.go b/node/node.go index 41373e1b5d..8d9665854f 100644 --- a/node/node.go +++ b/node/node.go @@ -559,7 +559,7 @@ func (node *Node) validateNodeMessage(ctx context.Context, payload []byte) ( // validate shardID // validate public key size // verify message signature -func validateShardBoundMessage(consensus *consensus.Consensus, nodeConfig *nodeconfig.ConfigType, payload []byte, +func validateShardBoundMessage(consensus *consensus.Consensus, peer libp2p_peer.ID, nodeConfig *nodeconfig.ConfigType, payload []byte, ) (*msg_pb.Message, *bls.SerializedPublicKey, bool, error) { var ( m msg_pb.Message @@ -740,6 +740,7 @@ func (node *Node) StartPubSub() error { // p2p consensus message handler function type p2pHandlerConsensus func( ctx context.Context, + peer libp2p_peer.ID, msg *msg_pb.Message, key *bls.SerializedPublicKey, ) error @@ -753,6 +754,7 @@ func (node *Node) StartPubSub() error { // interface pass to p2p message validator type validated struct { + peerID libp2p_peer.ID consensusBound bool handleC p2pHandlerConsensus handleCArg *msg_pb.Message @@ -810,7 +812,7 @@ func (node *Node) StartPubSub() error { // validate consensus message validMsg, senderPubKey, ignore, err := validateShardBoundMessage( - node.Consensus, node.NodeConfig, openBox[proto.MessageCategoryBytes:], + node.Consensus, peer, node.NodeConfig, openBox[proto.MessageCategoryBytes:], ) if err != nil { @@ -824,6 +826,7 @@ func (node *Node) StartPubSub() error { } msg.ValidatorData = validated{ + peerID: peer, consensusBound: true, handleC: node.Consensus.HandleMessageUpdate, handleCArg: validMsg, @@ -854,6 +857,7 @@ func (node *Node) StartPubSub() error { } } msg.ValidatorData = validated{ + peerID: peer, consensusBound: false, handleE: node.HandleNodeMessage, handleEArg: validMsg, @@ -905,7 +909,7 @@ func (node *Node) StartPubSub() error { errChan <- withError{err, nil} } } else { - if err := msg.handleC(ctx, msg.handleCArg, msg.senderPubKey); err != nil { + if err := msg.handleC(ctx, msg.peerID, msg.handleCArg, msg.senderPubKey); err != nil { errChan <- withError{err, msg.senderPubKey} } } diff --git a/p2p/host.go b/p2p/host.go index 62015fc8cf..745e9cef33 100644 --- a/p2p/host.go +++ b/p2p/host.go @@ -11,6 +11,13 @@ import ( "sync" "time" + "github.com/harmony-one/bls/ffi/go/bls" + nodeconfig "github.com/harmony-one/harmony/internal/configs/node" + "github.com/harmony-one/harmony/internal/utils" + "github.com/harmony-one/harmony/internal/utils/blockedpeers" + "github.com/harmony-one/harmony/p2p/discovery" + "github.com/harmony-one/harmony/p2p/security" + sttypes "github.com/harmony-one/harmony/p2p/stream/types" "github.com/libp2p/go-libp2p" dht "github.com/libp2p/go-libp2p-kad-dht" libp2p_pubsub "github.com/libp2p/go-libp2p-pubsub" @@ -24,19 +31,11 @@ import ( "github.com/libp2p/go-libp2p/core/protocol" "github.com/libp2p/go-libp2p/core/routing" "github.com/libp2p/go-libp2p/p2p/net/connmgr" - "github.com/libp2p/go-libp2p/p2p/security/noise" libp2ptls "github.com/libp2p/go-libp2p/p2p/security/tls" ma "github.com/multiformats/go-multiaddr" "github.com/pkg/errors" "github.com/rs/zerolog" - - "github.com/harmony-one/bls/ffi/go/bls" - nodeconfig "github.com/harmony-one/harmony/internal/configs/node" - "github.com/harmony-one/harmony/internal/utils" - "github.com/harmony-one/harmony/p2p/discovery" - "github.com/harmony-one/harmony/p2p/security" - sttypes "github.com/harmony-one/harmony/p2p/stream/types" ) type ConnectCallback func(net libp2p_network.Network, conn libp2p_network.Conn) error @@ -254,7 +253,8 @@ func NewHost(cfg HostConfig) (Host, error) { self.PeerID = p2pHost.ID() subLogger := utils.Logger().With().Str("hostID", p2pHost.ID().Pretty()).Logger() - security := security.NewManager(cfg.MaxConnPerIP, int(cfg.MaxPeers)) + banned := blockedpeers.NewManager(1024) + security := security.NewManager(cfg.MaxConnPerIP, int(cfg.MaxPeers, banned)) // has to save the private key for host h := &HostV2{ h: p2pHost, @@ -269,6 +269,7 @@ func NewHost(cfg HostConfig) (Host, error) { logger: &subLogger, ctx: ctx, cancel: cancel, + banned: banned, } utils.Logger().Info(). @@ -323,6 +324,7 @@ type HostV2 struct { onDisconnects DisconnectCallbacks ctx context.Context cancel func() + banned *blockedpeers.Manager } // PubSub .. diff --git a/p2p/security/security.go b/p2p/security/security.go index 7c8825ffb8..e9523cd028 100644 --- a/p2p/security/security.go +++ b/p2p/security/security.go @@ -4,7 +4,7 @@ import ( "fmt" "sync" - "github.com/harmony-one/harmony/internal/utils" + "github.com/harmony-one/harmony/internal/utils/blockedpeers" libp2p_network "github.com/libp2p/go-libp2p/core/network" ma "github.com/multiformats/go-multiaddr" "github.com/pkg/errors" @@ -15,14 +15,6 @@ type Security interface { OnDisconnectCheck(conn libp2p_network.Conn) error } -type Manager struct { - maxConnPerIP int - maxPeers int - - mutex sync.Mutex - peers *peerMap // All the connected nodes, key is the Peer's IP, value is the peer's ID array -} - type peerMap struct { peers map[string][]string } @@ -63,7 +55,16 @@ func (peerMap *peerMap) Range(f func(key string, value []string) bool) { } } -func NewManager(maxConnPerIP int, maxPeers int) *Manager { +type Manager struct { + maxConnPerIP int + maxPeers int64 + + mutex sync.Mutex + peers peerMap // All the connected nodes, key is the Peer's IP, value is the peer's ID array + banned *blockedpeers.Manager +} + +func NewManager(maxConnPerIP int, maxPeers int64, banned *blockedpeers.Manager) *Manager { if maxConnPerIP < 0 { panic("maximum connections per IP must not be negative") } @@ -74,6 +75,7 @@ func NewManager(maxConnPerIP int, maxPeers int) *Manager { maxConnPerIP: maxConnPerIP, maxPeers: maxPeers, peers: newPeersMap(), + banned: banned, } } @@ -118,6 +120,13 @@ func (m *Manager) OnConnectCheck(net libp2p_network.Network, conn libp2p_network Msg("too many peers, closing") return net.ClosePeer(conn.RemotePeer()) } + if m.banned.IsBanned(conn.RemotePeer(), time.Now()) { + utils.Logger().Warn(). + Str("new peer", remoteIp). + Msg("peer is banned, closing") + return net.ClosePeer(conn.RemotePeer()) + } + m.peers.Store(remoteIp, peers) return nil } diff --git a/p2p/security/security_test.go b/p2p/security/security_test.go index cdaa99f933..79a2dac28d 100644 --- a/p2p/security/security_test.go +++ b/p2p/security/security_test.go @@ -3,6 +3,7 @@ package security import ( "context" "fmt" + "github.com/harmony-one/harmony/internal/utils/blockedpeers" "testing" "time" @@ -58,7 +59,7 @@ func TestManager_OnConnectCheck(t *testing.T) { defer h1.Close() fakeHost := &fakeHost{} - security := NewManager(2, 1) + security := NewManager(2, 1, blockedpeers.NewManager(4)) h1.Network().Notify(fakeHost) fakeHost.SetConnectCallback(security.OnConnectCheck) fakeHost.SetDisconnectCallback(security.OnDisconnectCheck) @@ -100,7 +101,7 @@ func TestManager_OnDisconnectCheck(t *testing.T) { defer h1.Close() fakeHost := &fakeHost{} - security := NewManager(2, 0) + security := NewManager(2, 0, blockedpeers.NewManager(4)) h1.Network().Notify(fakeHost) fakeHost.SetConnectCallback(security.OnConnectCheck) fakeHost.SetDisconnectCallback(security.OnDisconnectCheck) From a6ddc19fedbc54716041ebaedf684a023ed48d59 Mon Sep 17 00:00:00 2001 From: frozen <355847+Frozen@users.noreply.github.com> Date: Sun, 25 Jun 2023 00:42:04 -0400 Subject: [PATCH 013/128] Implemented method for interface. --- internal/utils/blockedpeers/manager.go | 9 ++++++++ internal/utils/lrucache/lrucache.go | 12 +++++++++++ internal/utils/lrucache/lrucache_test.go | 27 ++++++++++++++++++++++++ p2p/host.go | 4 +--- 4 files changed, 49 insertions(+), 3 deletions(-) create mode 100644 internal/utils/lrucache/lrucache_test.go diff --git a/internal/utils/blockedpeers/manager.go b/internal/utils/blockedpeers/manager.go index 04f50225d0..021199f7b5 100644 --- a/internal/utils/blockedpeers/manager.go +++ b/internal/utils/blockedpeers/manager.go @@ -18,6 +18,7 @@ func NewManager(size int) *Manager { func (m *Manager) IsBanned(key libp2p_peer.ID, now time.Time) bool { future, ok := m.internal.Get(key) + if ok { return future.After(now) // future > now } @@ -31,3 +32,11 @@ func (m *Manager) Ban(key libp2p_peer.ID, future time.Time) { func (m *Manager) Contains(key libp2p_peer.ID) bool { return m.internal.Contains(key) } + +func (m *Manager) Len() int { + return m.internal.Len() +} + +func (m *Manager) Keys() []libp2p_peer.ID { + return m.internal.Keys() +} diff --git a/internal/utils/lrucache/lrucache.go b/internal/utils/lrucache/lrucache.go index 95d9b88bbe..e20424a108 100644 --- a/internal/utils/lrucache/lrucache.go +++ b/internal/utils/lrucache/lrucache.go @@ -31,3 +31,15 @@ func (c *Cache[K, V]) Set(key K, value V) { func (c *Cache[K, V]) Contains(key K) bool { return c.cache.Contains(key) } + +func (c *Cache[K, V]) Len() int { + return c.cache.Len() +} + +func (c *Cache[K, V]) Keys() []K { + out := make([]K, 0, c.cache.Len()) + for _, v := range c.cache.Keys() { + out = append(out, v.(K)) + } + return out +} diff --git a/internal/utils/lrucache/lrucache_test.go b/internal/utils/lrucache/lrucache_test.go new file mode 100644 index 0000000000..67f16ff39a --- /dev/null +++ b/internal/utils/lrucache/lrucache_test.go @@ -0,0 +1,27 @@ +package lrucache + +import ( + "github.com/stretchr/testify/require" + "testing" +) + +func TestKeys(t *testing.T) { + c := NewCache[int, int](10) + + for i := 0; i < 3; i++ { + c.Set(i, i) + } + m := map[int]int{ + 0: 0, + 1: 1, + 2: 2, + } + keys := c.Keys() + + m2 := map[int]int{} + for _, k := range keys { + m2[k] = k + } + + require.Equal(t, m, m2) +} diff --git a/p2p/host.go b/p2p/host.go index 745e9cef33..aeafea7dcd 100644 --- a/p2p/host.go +++ b/p2p/host.go @@ -494,9 +494,7 @@ func (host *HostV2) ListPeer(topic string) []libp2p_peer.ID { // ListBlockedPeer returns list of blocked peer func (host *HostV2) ListBlockedPeer() []libp2p_peer.ID { - // TODO: this is a place holder for now - peers := make([]libp2p_peer.ID, 0) - return peers + return host.banned.Keys() } // GetPeerCount ... From 82b3f0cedaf76290a123f419bf6fef6aed742592 Mon Sep 17 00:00:00 2001 From: frozen <355847+Frozen@users.noreply.github.com> Date: Sun, 23 Jul 2023 12:41:04 -0400 Subject: [PATCH 014/128] Updated. --- consensus/consensus.go | 4 ++-- consensus/consensus_test.go | 8 ++++---- consensus/consensus_v2.go | 5 +++-- consensus/validator.go | 2 +- consensus/view_change_msg.go | 2 +- 5 files changed, 11 insertions(+), 10 deletions(-) diff --git a/consensus/consensus.go b/consensus/consensus.go index bdb4803bb4..1e35773ef4 100644 --- a/consensus/consensus.go +++ b/consensus/consensus.go @@ -268,7 +268,7 @@ func New( consensus := Consensus{ mutex: &sync.RWMutex{}, ShardID: shard, - fBFTLog: NewFBFTLog(), + fBFTLog: NewFBFTLog(VerifyNewBlock(registry.GetWebHooks(), registry.GetBlockchain(), registry.GetBeaconchain())), phase: FBFTAnnounce, current: State{mode: Normal}, Decider: Decider, @@ -302,7 +302,7 @@ func New( consensus.RndChannel = make(chan [vdfAndSeedSize]byte) consensus.IgnoreViewIDCheck = abool.NewBool(false) // Make Sure Verifier is not null - consensus.vc = newViewChange(consensus.FBFTLog.BlockVerify) + consensus.vc = newViewChange(consensus.fBFTLog.BlockVerify) // init prometheus metrics initMetrics() consensus.AddPubkeyMetrics() diff --git a/consensus/consensus_test.go b/consensus/consensus_test.go index 83d2021e32..725e70c3f2 100644 --- a/consensus/consensus_test.go +++ b/consensus/consensus_test.go @@ -22,7 +22,7 @@ func TestConsensusInitialization(t *testing.T) { assert.NoError(t, err) messageSender := &MessageSender{host: host, retryTimes: int(phaseDuration.Seconds()) / RetryIntervalInSec} - fbtLog := NewFBFTLog(consensus.FBFTLog.verifyNewBlock) + fbtLog := NewFBFTLog(nil) state := State{mode: Normal} timeouts := createTimeout() @@ -37,10 +37,10 @@ func TestConsensusInitialization(t *testing.T) { assert.IsType(t, make(chan struct{}), consensus.BlockNumLowChan) // FBFTLog - assert.Equal(t, fbtLog.blocks, consensus.FBFTLog.blocks) - assert.Equal(t, fbtLog.messages, consensus.FBFTLog.messages) + assert.Equal(t, fbtLog.blocks, consensus.fBFTLog.blocks) + assert.Equal(t, fbtLog.messages, consensus.fBFTLog.messages) assert.Equal(t, len(fbtLog.verifiedBlocks), 0) - assert.Equal(t, fbtLog.verifiedBlocks, consensus.FBFTLog.verifiedBlocks) + assert.Equal(t, fbtLog.verifiedBlocks, consensus.fBFTLog.verifiedBlocks) assert.NotNil(t, consensus.FBFTLog()) assert.Equal(t, FBFTAnnounce, consensus.phase) diff --git a/consensus/consensus_v2.go b/consensus/consensus_v2.go index 27c9b15bf3..258d82a8fa 100644 --- a/consensus/consensus_v2.go +++ b/consensus/consensus_v2.go @@ -4,11 +4,12 @@ import ( "bytes" "context" "encoding/hex" - libp2p_peer "github.com/libp2p/go-libp2p/core/peer" "math/big" "sync/atomic" "time" + libp2p_peer "github.com/libp2p/go-libp2p/core/peer" + "github.com/ethereum/go-ethereum/common" bls2 "github.com/harmony-one/bls/ffi/go/bls" "github.com/harmony-one/harmony/consensus/signature" @@ -631,7 +632,7 @@ func (consensus *Consensus) tryCatchup() error { } blk.SetCurrentCommitSig(msg.Payload) - if err := consensus.FBFTLog.verifyBlock(blk); err != nil { + if err := consensus.fBFTLog.verifyBlock(blk); err != nil { consensus.getLogger().Err(err).Msg("[TryCatchup] failed block verifier") return err } diff --git a/consensus/validator.go b/consensus/validator.go index 02f92cd509..6a1f21e1a9 100644 --- a/consensus/validator.go +++ b/consensus/validator.go @@ -131,7 +131,7 @@ func (consensus *Consensus) validateNewBlock(recvMsg *FBFTMessage) (*types.Block Hex("blockHash", recvMsg.BlockHash[:]). Msg("[validateNewBlock] Prepared message and block added") - if err := consensus.FBFTLog.verifyBlock(&blockObj); err != nil { + if err := consensus.fBFTLog.verifyBlock(&blockObj); err != nil { consensus.getLogger().Error().Err(err).Msg("[validateNewBlock] Block verification failed") return nil, errors.Errorf("Block verification failed: %s", err.Error()) } diff --git a/consensus/view_change_msg.go b/consensus/view_change_msg.go index 21ec801aaa..c0a9863dd6 100644 --- a/consensus/view_change_msg.go +++ b/consensus/view_change_msg.go @@ -45,7 +45,7 @@ func (consensus *Consensus) constructViewChangeMessage(priKey *bls.PrivateKeyWra Interface("preparedMsg", preparedMsg). Msg("[constructViewChangeMessage] found prepared msg") if block != nil { - if err := consensus.FBFTLog.verifyBlock(block); err == nil { + if err := consensus.fBFTLog.verifyBlock(block); err == nil { tmpEncoded, err := rlp.EncodeToBytes(block) if err != nil { consensus.getLogger().Err(err).Msg("[constructViewChangeMessage] Failed encoding block") From 41900b0400824d268f346af854236f91615341f6 Mon Sep 17 00:00:00 2001 From: frozen <355847+Frozen@users.noreply.github.com> Date: Mon, 16 Oct 2023 22:15:13 -0400 Subject: [PATCH 015/128] Rebased onto dev. --- api/service/stagedstreamsync/downloader.go | 4 +--- api/service/stagedstreamsync/syncing.go | 4 +--- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/api/service/stagedstreamsync/downloader.go b/api/service/stagedstreamsync/downloader.go index 156722a143..2b112bbac9 100644 --- a/api/service/stagedstreamsync/downloader.go +++ b/api/service/stagedstreamsync/downloader.go @@ -6,9 +6,6 @@ import ( "time" "github.com/ethereum/go-ethereum/event" - "github.com/harmony-one/harmony/consensus" - "github.com/rs/zerolog" - "github.com/harmony-one/harmony/consensus" "github.com/harmony-one/harmony/core" nodeconfig "github.com/harmony-one/harmony/internal/configs/node" @@ -17,6 +14,7 @@ import ( "github.com/harmony-one/harmony/p2p/stream/common/streammanager" "github.com/harmony-one/harmony/p2p/stream/protocols/sync" "github.com/harmony-one/harmony/shard" + "github.com/rs/zerolog" ) type ( diff --git a/api/service/stagedstreamsync/syncing.go b/api/service/stagedstreamsync/syncing.go index 3ebb4f21ba..0be19902a5 100644 --- a/api/service/stagedstreamsync/syncing.go +++ b/api/service/stagedstreamsync/syncing.go @@ -46,8 +46,6 @@ func CreateStagedSync(ctx context.Context, protocol syncProtocol, config Config, logger zerolog.Logger, - logProgress bool, - c *consensus.Consensus, ) (*StagedStreamSync, error) { logger.Info(). Uint32("shard", bc.ShardID()). @@ -82,7 +80,7 @@ func CreateStagedSync(ctx context.Context, } stageHeadsCfg := NewStageHeadersCfg(bc, mainDB) - stageShortRangeCfg := NewStageShortRangeCfg(bc, mainDB, c) + stageShortRangeCfg := NewStageShortRangeCfg(bc, mainDB, consensus) stageSyncEpochCfg := NewStageEpochCfg(bc, mainDB) stageBodiesCfg := NewStageBodiesCfg(bc, mainDB, dbs, config.Concurrency, protocol, isBeaconNode, config.LogProgress) stageStatesCfg := NewStageStatesCfg(bc, mainDB, dbs, config.Concurrency, logger, config.LogProgress) From f0b70d9f7162c8d70c878f0f076651a8be89449c Mon Sep 17 00:00:00 2001 From: frozen <355847+Frozen@users.noreply.github.com> Date: Fri, 20 Oct 2023 15:40:10 -0400 Subject: [PATCH 016/128] Fixed imports. --- internal/utils/blockedpeers/manager.go | 17 +++++++++-------- internal/utils/blockedpeers/manager_test.go | 11 ++++++----- 2 files changed, 15 insertions(+), 13 deletions(-) diff --git a/internal/utils/blockedpeers/manager.go b/internal/utils/blockedpeers/manager.go index 021199f7b5..a56fe29e6f 100644 --- a/internal/utils/blockedpeers/manager.go +++ b/internal/utils/blockedpeers/manager.go @@ -1,22 +1,23 @@ package blockedpeers import ( - "github.com/harmony-one/harmony/internal/utils/lrucache" - libp2p_peer "github.com/libp2p/go-libp2p/core/peer" "time" + + "github.com/harmony-one/harmony/internal/utils/lrucache" + "github.com/libp2p/go-libp2p/core/peer" ) type Manager struct { - internal *lrucache.Cache[libp2p_peer.ID, time.Time] + internal *lrucache.Cache[peer.ID, time.Time] } func NewManager(size int) *Manager { return &Manager{ - internal: lrucache.NewCache[libp2p_peer.ID, time.Time](size), + internal: lrucache.NewCache[peer.ID, time.Time](size), } } -func (m *Manager) IsBanned(key libp2p_peer.ID, now time.Time) bool { +func (m *Manager) IsBanned(key peer.ID, now time.Time) bool { future, ok := m.internal.Get(key) if ok { @@ -25,11 +26,11 @@ func (m *Manager) IsBanned(key libp2p_peer.ID, now time.Time) bool { return ok } -func (m *Manager) Ban(key libp2p_peer.ID, future time.Time) { +func (m *Manager) Ban(key peer.ID, future time.Time) { m.internal.Set(key, future) } -func (m *Manager) Contains(key libp2p_peer.ID) bool { +func (m *Manager) Contains(key peer.ID) bool { return m.internal.Contains(key) } @@ -37,6 +38,6 @@ func (m *Manager) Len() int { return m.internal.Len() } -func (m *Manager) Keys() []libp2p_peer.ID { +func (m *Manager) Keys() []peer.ID { return m.internal.Keys() } diff --git a/internal/utils/blockedpeers/manager_test.go b/internal/utils/blockedpeers/manager_test.go index 31d0b680df..b1bccb54ab 100644 --- a/internal/utils/blockedpeers/manager_test.go +++ b/internal/utils/blockedpeers/manager_test.go @@ -1,17 +1,18 @@ package blockedpeers import ( - libp2p_peer "github.com/libp2p/go-libp2p/core/peer" - "github.com/stretchr/testify/require" "testing" "time" + + "github.com/libp2p/go-libp2p/core/peer" + "github.com/stretchr/testify/require" ) func TestNewManager(t *testing.T) { var ( - peer1 libp2p_peer.ID = "peer1" - now = time.Now() - m = NewManager(4) + peer1 peer.ID = "peer1" + now = time.Now() + m = NewManager(4) ) t.Run("check_empty", func(t *testing.T) { From 61794505074dfbdf8c65715c0887749dd7a4057a Mon Sep 17 00:00:00 2001 From: frozen <355847+Frozen@users.noreply.github.com> Date: Mon, 23 Oct 2023 01:16:54 -0400 Subject: [PATCH 017/128] Fixed imports. --- internal/utils/lrucache/lrucache_test.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/internal/utils/lrucache/lrucache_test.go b/internal/utils/lrucache/lrucache_test.go index 67f16ff39a..005603b9db 100644 --- a/internal/utils/lrucache/lrucache_test.go +++ b/internal/utils/lrucache/lrucache_test.go @@ -1,8 +1,9 @@ package lrucache import ( - "github.com/stretchr/testify/require" "testing" + + "github.com/stretchr/testify/require" ) func TestKeys(t *testing.T) { From 96e03868aeed121aa03dd09116b5be430f142969 Mon Sep 17 00:00:00 2001 From: frozen <355847+Frozen@users.noreply.github.com> Date: Mon, 23 Oct 2023 02:06:09 -0400 Subject: [PATCH 018/128] Fixed imports. --- p2p/host.go | 7 ------- p2p/security/security.go | 14 ++++++++------ p2p/security/security_test.go | 2 +- 3 files changed, 9 insertions(+), 14 deletions(-) diff --git a/p2p/host.go b/p2p/host.go index aeafea7dcd..18dce6a467 100644 --- a/p2p/host.go +++ b/p2p/host.go @@ -11,13 +11,6 @@ import ( "sync" "time" - "github.com/harmony-one/bls/ffi/go/bls" - nodeconfig "github.com/harmony-one/harmony/internal/configs/node" - "github.com/harmony-one/harmony/internal/utils" - "github.com/harmony-one/harmony/internal/utils/blockedpeers" - "github.com/harmony-one/harmony/p2p/discovery" - "github.com/harmony-one/harmony/p2p/security" - sttypes "github.com/harmony-one/harmony/p2p/stream/types" "github.com/libp2p/go-libp2p" dht "github.com/libp2p/go-libp2p-kad-dht" libp2p_pubsub "github.com/libp2p/go-libp2p-pubsub" diff --git a/p2p/security/security.go b/p2p/security/security.go index e9523cd028..f8d0322531 100644 --- a/p2p/security/security.go +++ b/p2p/security/security.go @@ -3,16 +3,18 @@ package security import ( "fmt" "sync" + "time" + "github.com/harmony-one/harmony/internal/utils" "github.com/harmony-one/harmony/internal/utils/blockedpeers" - libp2p_network "github.com/libp2p/go-libp2p/core/network" + libp2pnetwork "github.com/libp2p/go-libp2p/core/network" ma "github.com/multiformats/go-multiaddr" "github.com/pkg/errors" ) type Security interface { - OnConnectCheck(net libp2p_network.Network, conn libp2p_network.Conn) error - OnDisconnectCheck(conn libp2p_network.Conn) error + OnConnectCheck(net libp2pnetwork.Network, conn libp2pnetwork.Conn) error + OnDisconnectCheck(conn libp2pnetwork.Conn) error } type peerMap struct { @@ -85,7 +87,7 @@ func (m *Manager) RangePeers(f func(key string, value []string) bool) { m.peers.Range(f) } -func (m *Manager) OnConnectCheck(net libp2p_network.Network, conn libp2p_network.Conn) error { +func (m *Manager) OnConnectCheck(net libp2pnetwork.Network, conn libp2pnetwork.Conn) error { m.mutex.Lock() defer m.mutex.Unlock() @@ -131,7 +133,7 @@ func (m *Manager) OnConnectCheck(net libp2p_network.Network, conn libp2p_network return nil } -func (m *Manager) OnDisconnectCheck(conn libp2p_network.Conn) error { +func (m *Manager) OnDisconnectCheck(conn libp2pnetwork.Conn) error { m.mutex.Lock() defer m.mutex.Unlock() @@ -169,7 +171,7 @@ func find(slice []string, val string) (int, bool) { return -1, false } -func getRemoteIP(conn libp2p_network.Conn) (string, error) { +func getRemoteIP(conn libp2pnetwork.Conn) (string, error) { for _, protocol := range conn.RemoteMultiaddr().Protocols() { switch protocol.Code { case ma.P_IP4: diff --git a/p2p/security/security_test.go b/p2p/security/security_test.go index 79a2dac28d..7d610707ec 100644 --- a/p2p/security/security_test.go +++ b/p2p/security/security_test.go @@ -3,10 +3,10 @@ package security import ( "context" "fmt" - "github.com/harmony-one/harmony/internal/utils/blockedpeers" "testing" "time" + "github.com/harmony-one/harmony/internal/utils/blockedpeers" "github.com/libp2p/go-libp2p" ic "github.com/libp2p/go-libp2p/core/crypto" "github.com/libp2p/go-libp2p/core/host" From 64d7392c3b57c7db9e3d4bc5c0fbece1c546ae7b Mon Sep 17 00:00:00 2001 From: frozen <355847+Frozen@users.noreply.github.com> Date: Tue, 24 Oct 2023 20:42:27 -0400 Subject: [PATCH 019/128] Clean up. --- Makefile | 5 ++- api/service/stagedstreamsync/downloader.go | 5 +-- api/service/stagedstreamsync/downloaders.go | 4 +-- api/service/stagedstreamsync/service.go | 4 +-- api/service/stagedstreamsync/stage_heads.go | 2 +- .../stagedstreamsync/stage_short_range.go | 34 +++---------------- api/service/stagedstreamsync/syncing.go | 25 ++++---------- cmd/harmony/main.go | 5 ++- consensus/consensus.go | 19 +++++++---- consensus/consensus_test.go | 5 --- consensus/consensus_v2.go | 26 ++++++++------ consensus/fbft_log.go | 8 +---- consensus/fbft_log_test.go | 4 +-- consensus/validator.go | 13 ++++--- consensus/view_change_construct.go | 4 +-- consensus/view_change_msg.go | 2 +- core/blockchain.go | 8 +++++ core/blockchain_impl.go | 7 ++-- core/blockchain_stub.go | 4 +++ internal/utils/timer.go | 4 +-- node/node.go | 4 --- node/node_handler.go | 19 +++++------ p2p/security/security.go | 12 +++---- 23 files changed, 96 insertions(+), 127 deletions(-) diff --git a/Makefile b/Makefile index deb990bf50..6fc8f26071 100644 --- a/Makefile +++ b/Makefile @@ -179,4 +179,7 @@ debug_external: clean bash test/debug-external.sh build_localnet_validator: - bash test/build-localnet-validator.sh \ No newline at end of file + bash test/build-localnet-validator.sh + +tt: + go test -v -test.run OnDisconnectCheck ./p2p/security \ No newline at end of file diff --git a/api/service/stagedstreamsync/downloader.go b/api/service/stagedstreamsync/downloader.go index 2b112bbac9..3711048955 100644 --- a/api/service/stagedstreamsync/downloader.go +++ b/api/service/stagedstreamsync/downloader.go @@ -6,6 +6,8 @@ import ( "time" "github.com/ethereum/go-ethereum/event" + "github.com/rs/zerolog" + "github.com/harmony-one/harmony/consensus" "github.com/harmony-one/harmony/core" nodeconfig "github.com/harmony-one/harmony/internal/configs/node" @@ -14,7 +16,6 @@ import ( "github.com/harmony-one/harmony/p2p/stream/common/streammanager" "github.com/harmony-one/harmony/p2p/stream/protocols/sync" "github.com/harmony-one/harmony/shard" - "github.com/rs/zerolog" ) type ( @@ -37,7 +38,7 @@ type ( ) // NewDownloader creates a new downloader -func NewDownloader(host p2p.Host, bc core.BlockChain, consensus *consensus.Consensus, dbDir string, isBeaconNode bool, config Config, c *consensus.Consensus) *Downloader { +func NewDownloader(host p2p.Host, bc core.BlockChain, consensus *consensus.Consensus, dbDir string, isBeaconNode bool, config Config) *Downloader { config.fixValues() sp := sync.NewProtocol(sync.Config{ diff --git a/api/service/stagedstreamsync/downloaders.go b/api/service/stagedstreamsync/downloaders.go index 08a8e40de4..583f3e1523 100644 --- a/api/service/stagedstreamsync/downloaders.go +++ b/api/service/stagedstreamsync/downloaders.go @@ -16,7 +16,7 @@ type Downloaders struct { } // NewDownloaders creates Downloaders for sync of multiple blockchains -func NewDownloaders(host p2p.Host, bcs []core.BlockChain, consensus *consensus.Consensus, dbDir string, config Config, c *consensus.Consensus) *Downloaders { +func NewDownloaders(host p2p.Host, bcs []core.BlockChain, consensus *consensus.Consensus, dbDir string, config Config) *Downloaders { ds := make(map[uint32]*Downloader) isBeaconNode := len(bcs) == 1 for _, bc := range bcs { @@ -26,7 +26,7 @@ func NewDownloaders(host p2p.Host, bcs []core.BlockChain, consensus *consensus.C if _, ok := ds[bc.ShardID()]; ok { continue } - ds[bc.ShardID()] = NewDownloader(host, bc, consensus, dbDir, isBeaconNode, config, c) + ds[bc.ShardID()] = NewDownloader(host, bc, consensus, dbDir, isBeaconNode, config) } return &Downloaders{ ds: ds, diff --git a/api/service/stagedstreamsync/service.go b/api/service/stagedstreamsync/service.go index 90db7eadac..f7ffd7f2d9 100644 --- a/api/service/stagedstreamsync/service.go +++ b/api/service/stagedstreamsync/service.go @@ -12,9 +12,9 @@ type StagedStreamSyncService struct { } // NewService creates a new downloader service -func NewService(host p2p.Host, bcs []core.BlockChain, consensus *consensus.Consensus, config Config, dbDir string, c *consensus.Consensus) *StagedStreamSyncService { +func NewService(host p2p.Host, bcs []core.BlockChain, consensus *consensus.Consensus, config Config, dbDir string) *StagedStreamSyncService { return &StagedStreamSyncService{ - Downloaders: NewDownloaders(host, bcs, consensus, dbDir, config, c), + Downloaders: NewDownloaders(host, bcs, consensus, dbDir, config), } } diff --git a/api/service/stagedstreamsync/stage_heads.go b/api/service/stagedstreamsync/stage_heads.go index d05543c065..c917884a36 100644 --- a/api/service/stagedstreamsync/stage_heads.go +++ b/api/service/stagedstreamsync/stage_heads.go @@ -53,7 +53,7 @@ func (heads *StageHeads) Exec(ctx context.Context, firstCycle bool, invalidBlock maxHeight := s.state.status.targetBN maxBlocksPerSyncCycle := uint64(1024) // TODO: should be in config -> s.state.MaxBlocksPerSyncCycle - currentHeight := heads.configs.bc.CurrentHeader().NumberU64() + currentHeight := heads.configs.bc.CurrentBlock().NumberU64() s.state.currentCycle.TargetHeight = maxHeight targetHeight := uint64(0) if errV := CreateView(ctx, heads.configs.db, tx, func(etx kv.Tx) (err error) { diff --git a/api/service/stagedstreamsync/stage_short_range.go b/api/service/stagedstreamsync/stage_short_range.go index f3037869ae..ce6cdf36bc 100644 --- a/api/service/stagedstreamsync/stage_short_range.go +++ b/api/service/stagedstreamsync/stage_short_range.go @@ -3,9 +3,7 @@ package stagedstreamsync import ( "context" - "github.com/harmony-one/harmony/consensus" "github.com/harmony-one/harmony/core" - "github.com/harmony-one/harmony/core/types" "github.com/harmony-one/harmony/internal/utils" sttypes "github.com/harmony-one/harmony/p2p/stream/types" "github.com/harmony-one/harmony/shard" @@ -20,7 +18,6 @@ type StageShortRange struct { type StageShortRangeCfg struct { bc core.BlockChain db kv.RwDB - c *consensus.Consensus } func NewStageShortRange(cfg StageShortRangeCfg) *StageShortRange { @@ -29,11 +26,10 @@ func NewStageShortRange(cfg StageShortRangeCfg) *StageShortRange { } } -func NewStageShortRangeCfg(bc core.BlockChain, db kv.RwDB, c *consensus.Consensus) StageShortRangeCfg { +func NewStageShortRangeCfg(bc core.BlockChain, db kv.RwDB) StageShortRangeCfg { return StageShortRangeCfg{ bc: bc, db: db, - c: c, } } @@ -108,12 +104,9 @@ func (sr *StageShortRange) doShortRangeSync(ctx context.Context, s *StageState) return 0, errors.Wrap(err, "prerequisite") } } - var ( - bc = sr.configs.bc - curBN = bc.CurrentHeader().NumberU64() - blkNums = sh.prepareBlockHashNumbers(curBN) - hashChain, whitelist, err = sh.getHashChain(ctx, blkNums) - ) + curBN := sr.configs.bc.CurrentBlock().NumberU64() + blkNums := sh.prepareBlockHashNumbers(curBN) + hashChain, whitelist, err := sh.getHashChain(ctx, blkNums) if err != nil { if errors.Is(err, context.DeadlineExceeded) || errors.Is(err, context.Canceled) { return 0, nil @@ -163,25 +156,6 @@ func (sr *StageShortRange) doShortRangeSync(ctx context.Context, s *StageState) return 0, err } - numInserted := 0 - err = sr.configs.c.GetLastMileBlockIter(sr.configs.bc.CurrentHeader().NumberU64()+1, func(blockIter *consensus.LastMileBlockIter) error { - for { - block := blockIter.Next() - if block == nil { - break - } - if _, err := bc.InsertChain(types.Blocks{block}, true); err != nil { - return errors.Wrap(err, "failed to InsertChain") - } - numInserted++ - } - return nil - }) - if err != nil { - return 0, errors.WithMessage(err, "failed to InsertChain for last mile blocks") - } - utils.Logger().Info().Int("last mile blocks inserted", numInserted).Msg("Insert last mile blocks success") - return n, nil } diff --git a/api/service/stagedstreamsync/syncing.go b/api/service/stagedstreamsync/syncing.go index 0be19902a5..738f2f9203 100644 --- a/api/service/stagedstreamsync/syncing.go +++ b/api/service/stagedstreamsync/syncing.go @@ -47,6 +47,7 @@ func CreateStagedSync(ctx context.Context, config Config, logger zerolog.Logger, ) (*StagedStreamSync, error) { + logger.Info(). Uint32("shard", bc.ShardID()). Bool("beaconNode", isBeaconNode). @@ -55,6 +56,7 @@ func CreateStagedSync(ctx context.Context, Bool("serverOnly", config.ServerOnly). Int("minStreams", config.MinStreams). Msg(WrapStagedSyncMsg("creating staged sync")) + var mainDB kv.RwDB dbs := make([]kv.RwDB, config.Concurrency) if config.UseMemDB { @@ -80,7 +82,7 @@ func CreateStagedSync(ctx context.Context, } stageHeadsCfg := NewStageHeadersCfg(bc, mainDB) - stageShortRangeCfg := NewStageShortRangeCfg(bc, mainDB, consensus) + stageShortRangeCfg := NewStageShortRangeCfg(bc, mainDB) stageSyncEpochCfg := NewStageEpochCfg(bc, mainDB) stageBodiesCfg := NewStageBodiesCfg(bc, mainDB, dbs, config.Concurrency, protocol, isBeaconNode, config.LogProgress) stageStatesCfg := NewStageStatesCfg(bc, mainDB, dbs, config.Concurrency, logger, config.LogProgress) @@ -225,9 +227,6 @@ func (s *StagedStreamSync) doSync(downloaderContext context.Context, initSync bo return 0, 0, err } - s.startSyncing() - defer s.finishSyncing() - var estimatedHeight uint64 if initSync { if h, err := s.estimateCurrentNumber(downloaderContext); err != nil { @@ -244,20 +243,13 @@ func (s *StagedStreamSync) doSync(downloaderContext context.Context, initSync bo } } - i := 0 + s.startSyncing() + defer s.finishSyncing() + for { - i++ ctx, cancel := context.WithCancel(downloaderContext) - started := s.bc.CurrentHeader().NumberU64() + n, err := s.doSyncCycle(ctx, initSync) - finished := s.bc.CurrentHeader().NumberU64() - utils.Logger().Info(). - Uint64("from", started). - Int("returned", n). - Uint64("to", finished). - Bool("initSync", initSync). - Int("cycle", i). - Msg(WrapStagedSyncMsg("synced blocks")) if err != nil { utils.Logger().Error(). Err(err). @@ -377,9 +369,6 @@ func (s *StagedStreamSync) finishSyncing() { if s.evtDownloadFinishedSubscribed { s.evtDownloadFinished.Send(struct{}{}) } - utils.Logger().Info(). - Bool("evtDownloadFinishedSubscribed", s.evtDownloadFinishedSubscribed). - Msg(WrapStagedSyncMsg("finished syncing")) } func (s *StagedStreamSync) checkPrerequisites() error { diff --git a/cmd/harmony/main.go b/cmd/harmony/main.go index b1afbe6bf8..9fc89d45da 100644 --- a/cmd/harmony/main.go +++ b/cmd/harmony/main.go @@ -1,7 +1,6 @@ package main import ( - "context" "fmt" "math/big" "math/rand" @@ -522,7 +521,7 @@ func setupNodeAndRun(hc harmonyconfig.HarmonyConfig) { Msg("Start p2p host failed") } - if err := node.BootstrapConsensus(context.TODO(), currentNode.Consensus, currentNode.Host()); err != nil { + if err := currentNode.BootstrapConsensus(); err != nil { fmt.Fprint(os.Stderr, "could not bootstrap consensus", err.Error()) if !currentNode.NodeConfig.IsOffline { os.Exit(-1) @@ -1033,7 +1032,7 @@ func setupStagedSyncService(node *node.Node, host p2p.Host, hc harmonyconfig.Har } } //Setup stream sync service - s := stagedstreamsync.NewService(host, blockchains, node.Consensus, sConfig, hc.General.DataDir, node.Consensus) + s := stagedstreamsync.NewService(host, blockchains, node.Consensus, sConfig, hc.General.DataDir) node.RegisterService(service.StagedStreamSync, s) diff --git a/consensus/consensus.go b/consensus/consensus.go index 1e35773ef4..b396f6eadd 100644 --- a/consensus/consensus.go +++ b/consensus/consensus.go @@ -94,6 +94,8 @@ type Consensus struct { // The post-consensus job func passed from Node object // Called when consensus on a new block is done PostConsensusJob func(*types.Block) error + // The verifier func passed from Node object + BlockVerifier VerifyBlockFunc // verified block to state sync broadcast VerifiedNewBlock chan *types.Block // will trigger state syncing when blockNum is low @@ -169,12 +171,12 @@ func (consensus *Consensus) Beaconchain() core.BlockChain { } // VerifyBlock is a function used to verify the block and keep trace of verified blocks. -func (FBFTLog *FBFTLog) verifyBlock(block *types.Block) error { - if !FBFTLog.IsBlockVerified(block.Hash()) { - if err := FBFTLog.BlockVerify(block); err != nil { +func (consensus *Consensus) verifyBlock(block *types.Block) error { + if !consensus.fBFTLog.IsBlockVerified(block.Hash()) { + if err := consensus.BlockVerifier(block); err != nil { return errors.Errorf("Block verification failed: %s", err) } - FBFTLog.MarkBlockVerified(block) + consensus.fBFTLog.MarkBlockVerified(block) } return nil } @@ -268,7 +270,7 @@ func New( consensus := Consensus{ mutex: &sync.RWMutex{}, ShardID: shard, - fBFTLog: NewFBFTLog(VerifyNewBlock(registry.GetWebHooks(), registry.GetBlockchain(), registry.GetBeaconchain())), + fBFTLog: NewFBFTLog(), phase: FBFTAnnounce, current: State{mode: Normal}, Decider: Decider, @@ -302,7 +304,12 @@ func New( consensus.RndChannel = make(chan [vdfAndSeedSize]byte) consensus.IgnoreViewIDCheck = abool.NewBool(false) // Make Sure Verifier is not null - consensus.vc = newViewChange(consensus.fBFTLog.BlockVerify) + consensus.vc = newViewChange() + // TODO: reference to blockchain/beaconchain should be removed. + verifier := VerifyNewBlock(registry.GetWebHooks(), consensus.Blockchain(), consensus.Beaconchain()) + consensus.BlockVerifier = verifier + consensus.vc.verifyBlock = consensus.verifyBlock + // init prometheus metrics initMetrics() consensus.AddPubkeyMetrics() diff --git a/consensus/consensus_test.go b/consensus/consensus_test.go index 725e70c3f2..697ba49525 100644 --- a/consensus/consensus_test.go +++ b/consensus/consensus_test.go @@ -22,7 +22,6 @@ func TestConsensusInitialization(t *testing.T) { assert.NoError(t, err) messageSender := &MessageSender{host: host, retryTimes: int(phaseDuration.Seconds()) / RetryIntervalInSec} - fbtLog := NewFBFTLog(nil) state := State{mode: Normal} timeouts := createTimeout() @@ -37,10 +36,6 @@ func TestConsensusInitialization(t *testing.T) { assert.IsType(t, make(chan struct{}), consensus.BlockNumLowChan) // FBFTLog - assert.Equal(t, fbtLog.blocks, consensus.fBFTLog.blocks) - assert.Equal(t, fbtLog.messages, consensus.fBFTLog.messages) - assert.Equal(t, len(fbtLog.verifiedBlocks), 0) - assert.Equal(t, fbtLog.verifiedBlocks, consensus.fBFTLog.verifiedBlocks) assert.NotNil(t, consensus.FBFTLog()) assert.Equal(t, FBFTAnnounce, consensus.phase) diff --git a/consensus/consensus_v2.go b/consensus/consensus_v2.go index 258d82a8fa..6d3ef5b470 100644 --- a/consensus/consensus_v2.go +++ b/consensus/consensus_v2.go @@ -8,14 +8,13 @@ import ( "sync/atomic" "time" - libp2p_peer "github.com/libp2p/go-libp2p/core/peer" - "github.com/ethereum/go-ethereum/common" bls2 "github.com/harmony-one/bls/ffi/go/bls" "github.com/harmony-one/harmony/consensus/signature" "github.com/harmony-one/harmony/core" nodeconfig "github.com/harmony-one/harmony/internal/configs/node" "github.com/harmony-one/harmony/internal/utils" + libp2p_peer "github.com/libp2p/go-libp2p/core/peer" "github.com/rs/zerolog" msg_pb "github.com/harmony-one/harmony/api/proto/message" @@ -395,12 +394,11 @@ func (consensus *Consensus) tick() { // the bootstrap timer will be stopped once consensus is reached or view change // is succeeded if k != timeoutBootstrap { - if v.Stop() { // prevent useless logs - consensus.getLogger().Debug(). - Str("k", k.String()). - Str("Mode", consensus.current.Mode().String()). - Msg("[ConsensusMainLoop] consensusTimeout stopped!!!") - } + consensus.getLogger().Debug(). + Str("k", k.String()). + Str("Mode", consensus.current.Mode().String()). + Msg("[ConsensusMainLoop] consensusTimeout stopped!!!") + v.Stop() continue } } @@ -456,6 +454,7 @@ func (consensus *Consensus) BlockChannel(newBlock *types.Block) { type LastMileBlockIter struct { blockCandidates []*types.Block fbftLog *FBFTLog + verify func(*types.Block) error curIndex int logger *zerolog.Logger } @@ -470,6 +469,9 @@ func (consensus *Consensus) GetLastMileBlockIter(bnStart uint64, cb func(iter *L // GetLastMileBlockIter get the iterator of the last mile blocks starting from number bnStart func (consensus *Consensus) getLastMileBlockIter(bnStart uint64, cb func(iter *LastMileBlockIter) error) error { + if consensus.BlockVerifier == nil { + return errors.New("consensus haven't initialized yet") + } blocks, _, err := consensus.getLastMileBlocksAndMsg(bnStart) if err != nil { return err @@ -477,6 +479,7 @@ func (consensus *Consensus) getLastMileBlockIter(bnStart uint64, cb func(iter *L return cb(&LastMileBlockIter{ blockCandidates: blocks, fbftLog: consensus.fBFTLog, + verify: consensus.BlockVerifier, curIndex: 0, logger: consensus.getLogger(), }) @@ -491,7 +494,7 @@ func (iter *LastMileBlockIter) Next() *types.Block { iter.curIndex++ if !iter.fbftLog.IsBlockVerified(block.Hash()) { - if err := iter.fbftLog.BlockVerify(block); err != nil { + if err := iter.verify(block); err != nil { iter.logger.Debug().Err(err).Msg("block verification failed in consensus last mile block") return nil } @@ -618,6 +621,9 @@ func (consensus *Consensus) verifyLastCommitSig(lastCommitSig []byte, blk *types // tryCatchup add the last mile block in PBFT log memory cache to blockchain. func (consensus *Consensus) tryCatchup() error { // TODO: change this to a more systematic symbol + if consensus.BlockVerifier == nil { + return errors.New("consensus haven't finished initialization") + } initBN := consensus.getBlockNum() defer consensus.postCatchup(initBN) @@ -632,7 +638,7 @@ func (consensus *Consensus) tryCatchup() error { } blk.SetCurrentCommitSig(msg.Payload) - if err := consensus.fBFTLog.verifyBlock(blk); err != nil { + if err := consensus.verifyBlock(blk); err != nil { consensus.getLogger().Err(err).Msg("[TryCatchup] failed block verifier") return err } diff --git a/consensus/fbft_log.go b/consensus/fbft_log.go index 7ffa2ff9ec..982aecab75 100644 --- a/consensus/fbft_log.go +++ b/consensus/fbft_log.go @@ -113,16 +113,14 @@ type FBFTLog struct { blocks map[common.Hash]*types.Block // store blocks received in FBFT verifiedBlocks map[common.Hash]struct{} // store block hashes for blocks that has already been verified messages map[fbftMsgID]*FBFTMessage // store messages received in FBFT - verifyNewBlock func(*types.Block) error // block verification function } // NewFBFTLog returns new instance of FBFTLog -func NewFBFTLog(verifyNewBlock func(*types.Block) error) *FBFTLog { +func NewFBFTLog() *FBFTLog { pbftLog := FBFTLog{ blocks: make(map[common.Hash]*types.Block), messages: make(map[fbftMsgID]*FBFTMessage), verifiedBlocks: make(map[common.Hash]struct{}), - verifyNewBlock: verifyNewBlock, } return &pbftLog } @@ -132,10 +130,6 @@ func (log *FBFTLog) AddBlock(block *types.Block) { log.blocks[block.Hash()] = block } -func (log *FBFTLog) BlockVerify(block *types.Block) error { - return log.verifyNewBlock(block) -} - // MarkBlockVerified marks the block as verified func (log *FBFTLog) MarkBlockVerified(block *types.Block) { log.verifiedBlocks[block.Hash()] = struct{}{} diff --git a/consensus/fbft_log_test.go b/consensus/fbft_log_test.go index c22c70b3e7..420effff4e 100644 --- a/consensus/fbft_log_test.go +++ b/consensus/fbft_log_test.go @@ -65,7 +65,7 @@ func TestGetMessagesByTypeSeqViewHash(t *testing.T) { ViewID: 3, BlockHash: [32]byte{01, 02}, } - log := NewFBFTLog(nil) + log := NewFBFTLog() log.AddVerifiedMessage(&pbftMsg) found := log.GetMessagesByTypeSeqViewHash( @@ -90,7 +90,7 @@ func TestHasMatchingAnnounce(t *testing.T) { ViewID: 3, BlockHash: [32]byte{01, 02}, } - log := NewFBFTLog(nil) + log := NewFBFTLog() log.AddVerifiedMessage(&pbftMsg) found := log.HasMatchingViewAnnounce(2, 3, [32]byte{01, 02}) if !found { diff --git a/consensus/validator.go b/consensus/validator.go index 6a1f21e1a9..0506f4359d 100644 --- a/consensus/validator.go +++ b/consensus/validator.go @@ -63,11 +63,6 @@ func (consensus *Consensus) onAnnounce(msg *msg_pb.Message) { go func() { // Best effort check, no need to error out. _, err := consensus.ValidateNewBlock(recvMsg) - if err != nil { - // maybe ban sender - consensus.getLogger().Error(). - Err(err).Msgf("[Announce] Failed to validate block") - } if err == nil { consensus.GetLogger().Info(). Msg("[Announce] Block verified") @@ -81,7 +76,6 @@ func (consensus *Consensus) ValidateNewBlock(recvMsg *FBFTMessage) (*types.Block defer consensus.mutex.Unlock() return consensus.validateNewBlock(recvMsg) } - func (consensus *Consensus) validateNewBlock(recvMsg *FBFTMessage) (*types.Block, error) { if consensus.fBFTLog.IsBlockVerified(recvMsg.BlockHash) { var blockObj *types.Block @@ -131,7 +125,12 @@ func (consensus *Consensus) validateNewBlock(recvMsg *FBFTMessage) (*types.Block Hex("blockHash", recvMsg.BlockHash[:]). Msg("[validateNewBlock] Prepared message and block added") - if err := consensus.fBFTLog.verifyBlock(&blockObj); err != nil { + if consensus.BlockVerifier == nil { + consensus.getLogger().Debug().Msg("[validateNewBlock] consensus received message before init. Ignoring") + return nil, errors.New("nil block verifier") + } + + if err := consensus.verifyBlock(&blockObj); err != nil { consensus.getLogger().Error().Err(err).Msg("[validateNewBlock] Block verification failed") return nil, errors.Errorf("Block verification failed: %s", err.Error()) } diff --git a/consensus/view_change_construct.go b/consensus/view_change_construct.go index 0c3aa1e60e..061d2a795c 100644 --- a/consensus/view_change_construct.go +++ b/consensus/view_change_construct.go @@ -51,11 +51,9 @@ type viewChange struct { } // newViewChange returns a new viewChange object -func newViewChange(verifyBlock VerifyBlockFunc) *viewChange { +func newViewChange() *viewChange { vc := viewChange{} vc.Reset() - vc.verifyBlock = verifyBlock - return &vc } diff --git a/consensus/view_change_msg.go b/consensus/view_change_msg.go index c0a9863dd6..6c4b080055 100644 --- a/consensus/view_change_msg.go +++ b/consensus/view_change_msg.go @@ -45,7 +45,7 @@ func (consensus *Consensus) constructViewChangeMessage(priKey *bls.PrivateKeyWra Interface("preparedMsg", preparedMsg). Msg("[constructViewChangeMessage] found prepared msg") if block != nil { - if err := consensus.fBFTLog.verifyBlock(block); err == nil { + if err := consensus.verifyBlock(block); err == nil { tmpEncoded, err := rlp.EncodeToBytes(block) if err != nil { consensus.getLogger().Err(err).Msg("[constructViewChangeMessage] Failed encoding block") diff --git a/core/blockchain.go b/core/blockchain.go index 40d33100a2..0adc96925e 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -109,6 +109,14 @@ type BlockChain interface { // but does not write any state. This is used to construct competing side forks // up to the point where they exceed the canonical total difficulty. WriteBlockWithoutState(block *types.Block, td *big.Int) (err error) + // WriteBlockWithState writes the block and all associated state to the database. + WriteBlockWithState( + block *types.Block, receipts []*types.Receipt, + cxReceipts []*types.CXReceipt, + stakeMsgs []types2.StakeMsg, + paid reward.Reader, + state *state.DB, + ) (status WriteStatus, err error) // GetMaxGarbageCollectedBlockNumber .. GetMaxGarbageCollectedBlockNumber() int64 // InsertChain attempts to insert the given batch of blocks in to the canonical diff --git a/core/blockchain_impl.go b/core/blockchain_impl.go index 9e7f1134b0..b12de56373 100644 --- a/core/blockchain_impl.go +++ b/core/blockchain_impl.go @@ -1473,8 +1473,7 @@ func (bc *BlockChainImpl) WriteBlockWithoutState(block *types.Block, td *big.Int return nil } -// writeBlockWithState writes the block and all associated state to the database. -func (bc *BlockChainImpl) writeBlockWithState( +func (bc *BlockChainImpl) WriteBlockWithState( block *types.Block, receipts []*types.Receipt, cxReceipts []*types.CXReceipt, stakeMsgs []staking.StakeMsg, @@ -1683,6 +1682,8 @@ func (bc *BlockChainImpl) insertChain(chain types.Blocks, verifyHeaders bool) (i if len(chain) == 0 { return 0, nil, nil, ErrEmptyChain } + first := chain[0] + fmt.Println("insertChain", utils.GetPort(), first.ShardID(), first.Epoch().Uint64(), first.NumberU64()) // Do a sanity check that the provided chain is actually ordered and linked for i := 1; i < len(chain); i++ { if chain[i].NumberU64() != chain[i-1].NumberU64()+1 || chain[i].ParentHash() != chain[i-1].Hash() { @@ -1881,7 +1882,7 @@ func (bc *BlockChainImpl) insertChain(chain types.Blocks, verifyHeaders bool) (i // Write the block to the chain and get the status. substart = time.Now() - status, err := bc.writeBlockWithState( + status, err := bc.WriteBlockWithState( block, receipts, cxReceipts, stakeMsgs, payout, state, ) if err != nil { diff --git a/core/blockchain_stub.go b/core/blockchain_stub.go index e42a12b10e..e9ef10ce94 100644 --- a/core/blockchain_stub.go +++ b/core/blockchain_stub.go @@ -124,6 +124,10 @@ func (a Stub) WriteBlockWithoutState(block *types.Block, td *big.Int) (err error return errors.Errorf("method WriteBlockWithoutState not implemented for %s", a.Name) } +func (a Stub) WriteBlockWithState(block *types.Block, receipts []*types.Receipt, cxReceipts []*types.CXReceipt, stakeMsgs []staking.StakeMsg, paid reward.Reader, state *state.DB) (status WriteStatus, err error) { + return 0, errors.Errorf("method WriteBlockWithState not implemented for %s", a.Name) +} + func (a Stub) GetMaxGarbageCollectedBlockNumber() int64 { return 0 } diff --git a/internal/utils/timer.go b/internal/utils/timer.go index 176732fca3..d355d5c719 100644 --- a/internal/utils/timer.go +++ b/internal/utils/timer.go @@ -34,11 +34,9 @@ func (timeout *Timeout) Start() { } // Stop stops the timeout clock -func (timeout *Timeout) Stop() (stopped bool) { - stopped = timeout.state != Inactive +func (timeout *Timeout) Stop() { timeout.state = Inactive timeout.start = time.Now() - return stopped } // Expired checks whether the timeout is reached/expired diff --git a/node/node.go b/node/node.go index 8d9665854f..f035bf4910 100644 --- a/node/node.go +++ b/node/node.go @@ -149,10 +149,6 @@ type Node struct { registry *registry.Registry } -func (node *Node) Host() p2p.Host { - return node.host -} - // Blockchain returns the blockchain for the node's current shard. func (node *Node) Blockchain() core.BlockChain { return node.registry.GetBlockchain() diff --git a/node/node_handler.go b/node/node_handler.go index 89464d3c0c..eeaf90f2d7 100644 --- a/node/node_handler.go +++ b/node/node_handler.go @@ -404,21 +404,16 @@ func (node *Node) PostConsensusProcessing(newBlock *types.Block) error { } // BootstrapConsensus is a goroutine to check number of peers and start the consensus -func BootstrapConsensus(ctx context.Context, consensus *consensus.Consensus, host p2p.Host) error { - ctx, cancel := context.WithTimeout(ctx, time.Minute) +func (node *Node) BootstrapConsensus() error { + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) defer cancel() - min := consensus.MinPeers + min := node.Consensus.MinPeers enoughMinPeers := make(chan struct{}) const checkEvery = 3 * time.Second go func() { for { - select { - case <-ctx.Done(): - return - case <-time.After(checkEvery): - } - - numPeersNow := host.GetPeerCount() + <-time.After(checkEvery) + numPeersNow := node.host.GetPeerCount() if numPeersNow >= min { utils.Logger().Info().Msg("[bootstrap] StartConsensus") enoughMinPeers <- struct{}{} @@ -437,7 +432,9 @@ func BootstrapConsensus(ctx context.Context, consensus *consensus.Consensus, hos case <-ctx.Done(): return ctx.Err() case <-enoughMinPeers: - go consensus.StartChannel() + go func() { + node.Consensus.StartChannel() + }() return nil } } diff --git a/p2p/security/security.go b/p2p/security/security.go index f8d0322531..db70c76d49 100644 --- a/p2p/security/security.go +++ b/p2p/security/security.go @@ -7,14 +7,14 @@ import ( "github.com/harmony-one/harmony/internal/utils" "github.com/harmony-one/harmony/internal/utils/blockedpeers" - libp2pnetwork "github.com/libp2p/go-libp2p/core/network" + libp2p_network "github.com/libp2p/go-libp2p/core/network" ma "github.com/multiformats/go-multiaddr" "github.com/pkg/errors" ) type Security interface { - OnConnectCheck(net libp2pnetwork.Network, conn libp2pnetwork.Conn) error - OnDisconnectCheck(conn libp2pnetwork.Conn) error + OnConnectCheck(net libp2p_network.Network, conn libp2p_network.Conn) error + OnDisconnectCheck(conn libp2p_network.Conn) error } type peerMap struct { @@ -87,7 +87,7 @@ func (m *Manager) RangePeers(f func(key string, value []string) bool) { m.peers.Range(f) } -func (m *Manager) OnConnectCheck(net libp2pnetwork.Network, conn libp2pnetwork.Conn) error { +func (m *Manager) OnConnectCheck(net libp2p_network.Network, conn libp2p_network.Conn) error { m.mutex.Lock() defer m.mutex.Unlock() @@ -133,7 +133,7 @@ func (m *Manager) OnConnectCheck(net libp2pnetwork.Network, conn libp2pnetwork.C return nil } -func (m *Manager) OnDisconnectCheck(conn libp2pnetwork.Conn) error { +func (m *Manager) OnDisconnectCheck(conn libp2p_network.Conn) error { m.mutex.Lock() defer m.mutex.Unlock() @@ -171,7 +171,7 @@ func find(slice []string, val string) (int, bool) { return -1, false } -func getRemoteIP(conn libp2pnetwork.Conn) (string, error) { +func getRemoteIP(conn libp2p_network.Conn) (string, error) { for _, protocol := range conn.RemoteMultiaddr().Protocols() { switch protocol.Code { case ma.P_IP4: From dc326f6fb249379ae3eda97ed1806d109802fb0e Mon Sep 17 00:00:00 2001 From: frozen <355847+Frozen@users.noreply.github.com> Date: Thu, 26 Oct 2023 11:58:16 -0400 Subject: [PATCH 020/128] Rebased. --- p2p/host.go | 9 ++++++++- p2p/security/security.go | 6 +++--- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/p2p/host.go b/p2p/host.go index 18dce6a467..4a4240c565 100644 --- a/p2p/host.go +++ b/p2p/host.go @@ -11,6 +11,13 @@ import ( "sync" "time" + "github.com/harmony-one/bls/ffi/go/bls" + nodeconfig "github.com/harmony-one/harmony/internal/configs/node" + "github.com/harmony-one/harmony/internal/utils" + "github.com/harmony-one/harmony/internal/utils/blockedpeers" + "github.com/harmony-one/harmony/p2p/discovery" + "github.com/harmony-one/harmony/p2p/security" + sttypes "github.com/harmony-one/harmony/p2p/stream/types" "github.com/libp2p/go-libp2p" dht "github.com/libp2p/go-libp2p-kad-dht" libp2p_pubsub "github.com/libp2p/go-libp2p-pubsub" @@ -247,7 +254,7 @@ func NewHost(cfg HostConfig) (Host, error) { subLogger := utils.Logger().With().Str("hostID", p2pHost.ID().Pretty()).Logger() banned := blockedpeers.NewManager(1024) - security := security.NewManager(cfg.MaxConnPerIP, int(cfg.MaxPeers, banned)) + security := security.NewManager(cfg.MaxConnPerIP, int(cfg.MaxPeers), banned) // has to save the private key for host h := &HostV2{ h: p2pHost, diff --git a/p2p/security/security.go b/p2p/security/security.go index db70c76d49..d363a96470 100644 --- a/p2p/security/security.go +++ b/p2p/security/security.go @@ -59,14 +59,14 @@ func (peerMap *peerMap) Range(f func(key string, value []string) bool) { type Manager struct { maxConnPerIP int - maxPeers int64 + maxPeers int mutex sync.Mutex - peers peerMap // All the connected nodes, key is the Peer's IP, value is the peer's ID array + peers *peerMap // All the connected nodes, key is the Peer's IP, value is the peer's ID array banned *blockedpeers.Manager } -func NewManager(maxConnPerIP int, maxPeers int64, banned *blockedpeers.Manager) *Manager { +func NewManager(maxConnPerIP int, maxPeers int, banned *blockedpeers.Manager) *Manager { if maxConnPerIP < 0 { panic("maximum connections per IP must not be negative") } From e124173a81785916600250561b2ea94983e08992 Mon Sep 17 00:00:00 2001 From: Konstantin <355847+Frozen@users.noreply.github.com> Date: Sun, 29 Oct 2023 17:48:30 -0800 Subject: [PATCH 021/128] Cleanup. (#4547) * Cleanup * Cleanup --- Makefile | 3 --- core/blockchain_impl.go | 2 -- core/state_processor.go | 10 +++++++++- 3 files changed, 9 insertions(+), 6 deletions(-) diff --git a/Makefile b/Makefile index 6fc8f26071..906e8c06a0 100644 --- a/Makefile +++ b/Makefile @@ -180,6 +180,3 @@ debug_external: clean build_localnet_validator: bash test/build-localnet-validator.sh - -tt: - go test -v -test.run OnDisconnectCheck ./p2p/security \ No newline at end of file diff --git a/core/blockchain_impl.go b/core/blockchain_impl.go index b12de56373..e9eca1f4cd 100644 --- a/core/blockchain_impl.go +++ b/core/blockchain_impl.go @@ -1682,8 +1682,6 @@ func (bc *BlockChainImpl) insertChain(chain types.Blocks, verifyHeaders bool) (i if len(chain) == 0 { return 0, nil, nil, ErrEmptyChain } - first := chain[0] - fmt.Println("insertChain", utils.GetPort(), first.ShardID(), first.Epoch().Uint64(), first.NumberU64()) // Do a sanity check that the provided chain is actually ordered and linked for i := 1; i < len(chain); i++ { if chain[i].NumberU64() != chain[i-1].NumberU64()+1 || chain[i].ParentHash() != chain[i-1].Hash() { diff --git a/core/state_processor.go b/core/state_processor.go index 6ea9e244a6..9ccb256a78 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -310,7 +310,15 @@ func ApplyTransaction(bc ChainContext, author *common.Address, gp *GasPool, stat // Apply the transaction to the current state (included in the env) result, err := ApplyMessage(vmenv, msg, gp) if err != nil { - return nil, nil, nil, 0, errors.Wrapf(err, "apply failed from='%s' to='%s' balance='%s'", msg.From().Hex(), msg.To().Hex(), statedb.GetBalance(msg.From()).String()) + to := "" + if m := msg.To(); m != nil { + to = m.Hex() + } + balance := "" + if a := statedb.GetBalance(msg.From()); a != nil { + balance = a.String() + } + return nil, nil, nil, 0, errors.Wrapf(err, "apply failed from='%s' to='%s' balance='%s'", msg.From().Hex(), to, balance) } // Update the state with pending changes var root []byte From 4b8cf56055c5a69e199836283aa88a6724fa90fb Mon Sep 17 00:00:00 2001 From: Konstantin <355847+Frozen@users.noreply.github.com> Date: Tue, 31 Oct 2023 14:20:41 -0800 Subject: [PATCH 022/128] Leader rotation. Check next leader aliveness. (#4359) * Cleanup and fix update pub keys. * Skip the next leader if it doesn't sign blocks. * Comment & constant. * Updated with dev. * Updated with latest dev. * Cleanup --- cmd/harmony/main.go | 3 +- consensus/consensus_v2.go | 70 +++++++++++++++++++++++++++++++------- consensus/quorum/quorum.go | 2 +- internal/utils/math.go | 17 +++++++++ 4 files changed, 78 insertions(+), 14 deletions(-) create mode 100644 internal/utils/math.go diff --git a/cmd/harmony/main.go b/cmd/harmony/main.go index 9fc89d45da..021061c75b 100644 --- a/cmd/harmony/main.go +++ b/cmd/harmony/main.go @@ -788,6 +788,8 @@ func setupChain(hc harmonyconfig.HarmonyConfig, nodeConfig *nodeconfig.ConfigTyp } func setupConsensusAndNode(hc harmonyconfig.HarmonyConfig, nodeConfig *nodeconfig.ConfigType, registry *registry.Registry) *node.Node { + decider := quorum.NewDecider(quorum.SuperMajorityVote, uint32(hc.General.ShardID)) + // Parse minPeers from harmonyconfig.HarmonyConfig var minPeers int var aggregateSig bool @@ -821,7 +823,6 @@ func setupConsensusAndNode(hc harmonyconfig.HarmonyConfig, nodeConfig *nodeconfi registry.SetCxPool(cxPool) // Consensus object. - decider := quorum.NewDecider(quorum.SuperMajorityVote, nodeConfig.ShardID) registry.SetIsBackup(isBackup(hc)) currentConsensus, err := consensus.New( myHost, nodeConfig.ShardID, nodeConfig.ConsensusPriKey, registry, decider, minPeers, aggregateSig) diff --git a/consensus/consensus_v2.go b/consensus/consensus_v2.go index 6d3ef5b470..b3c94a77fc 100644 --- a/consensus/consensus_v2.go +++ b/consensus/consensus_v2.go @@ -690,10 +690,15 @@ func (consensus *Consensus) commitBlock(blk *types.Block, committedMsg *FBFTMess // This function must be called with enabled leader rotation. func (consensus *Consensus) rotateLeader(epoch *big.Int) { var ( - bc = consensus.Blockchain() - prev = consensus.getLeaderPubKey() - leader = consensus.getLeaderPubKey() + bc = consensus.Blockchain() + prev = consensus.getLeaderPubKey() + leader = consensus.getLeaderPubKey() + curBlock = bc.CurrentBlock() + curNumber = curBlock.NumberU64() + curEpoch = curBlock.Epoch().Uint64() ) + const blocksCountAliveness = 10 + utils.Logger().Info().Msgf("[Rotating leader] epoch: %v rotation:%v external rotation %v", epoch.Uint64(), bc.Config().IsLeaderRotationInternalValidators(epoch), bc.Config().IsLeaderRotationExternalValidatorsAllowed(epoch)) ss, err := bc.ReadShardState(epoch) if err != nil { @@ -741,18 +746,59 @@ func (consensus *Consensus) rotateLeader(epoch *big.Int) { var ( wasFound bool next *bls.PublicKeyWrapper + offset = 1 ) - if bc.Config().IsLeaderRotationExternalValidatorsAllowed(epoch) { - wasFound, next = consensus.Decider.NthNextValidator(committee.Slots, leader, 1) - } else { - wasFound, next = consensus.Decider.NthNextHmy(shard.Schedule.InstanceForEpoch(epoch), leader, 1) - } - if !wasFound { - utils.Logger().Error().Msg("Failed to get next leader") - return - } else { + + for { + if bc.Config().IsLeaderRotationExternalValidatorsAllowed(epoch) { + wasFound, next = consensus.Decider.NthNextValidator(committee.Slots, leader, offset) + } else { + wasFound, next = consensus.Decider.NthNextHmy(shard.Schedule.InstanceForEpoch(epoch), leader, offset) + } + if !wasFound { + utils.Logger().Error().Msg("Failed to get next leader") + // Seems like nothing we can do here. + return + } + members := consensus.Decider.Participants() + mask := bls.NewMask(members) + skipped := 0 + for i := 0; i < blocksCountAliveness; i++ { + header := bc.GetHeaderByNumber(curNumber - uint64(i)) + if header == nil { + utils.Logger().Error().Msgf("Failed to get header by number %d", curNumber-uint64(i)) + return + } + // if epoch is different, we should not check this block. + if header.Epoch().Uint64() != curEpoch { + break + } + // Populate the mask with the bitmap. + err = mask.SetMask(header.LastCommitBitmap()) + if err != nil { + utils.Logger().Err(err).Msg("Failed to set mask") + return + } + ok, err := mask.KeyEnabled(next.Bytes) + if err != nil { + utils.Logger().Err(err).Msg("Failed to get key enabled") + return + } + if !ok { + skipped++ + } + } + + // no signature from the next leader at all, we should skip it. + if skipped >= blocksCountAliveness { + // Next leader is not signing blocks, we should skip it. + offset++ + continue + } consensus.setLeaderPubKey(next) + break } + if consensus.isLeader() && !consensus.getLeaderPubKey().Object.IsEqual(prev.Object) { // leader changed go func() { diff --git a/consensus/quorum/quorum.go b/consensus/quorum/quorum.go index aaeaab236d..3930abef12 100644 --- a/consensus/quorum/quorum.go +++ b/consensus/quorum/quorum.go @@ -77,7 +77,7 @@ type ParticipantTracker interface { ParticipantsCount() int64 // NthNextValidator returns key for next validator. It assumes external validators and leader rotation. NthNextValidator(slotList shard.SlotList, pubKey *bls.PublicKeyWrapper, next int) (bool, *bls.PublicKeyWrapper) - NthNextHmy(shardingconfig.Instance, *bls.PublicKeyWrapper, int) (bool, *bls.PublicKeyWrapper) + NthNextHmy(instance shardingconfig.Instance, pubkey *bls.PublicKeyWrapper, next int) (bool, *bls.PublicKeyWrapper) NthNextHmyExt(shardingconfig.Instance, *bls.PublicKeyWrapper, int) (bool, *bls.PublicKeyWrapper) FirstParticipant(shardingconfig.Instance) *bls.PublicKeyWrapper UpdateParticipants(pubKeys, allowlist []bls.PublicKeyWrapper) diff --git a/internal/utils/math.go b/internal/utils/math.go new file mode 100644 index 0000000000..6dceec5eb2 --- /dev/null +++ b/internal/utils/math.go @@ -0,0 +1,17 @@ +package utils + +import "golang.org/x/exp/constraints" + +func Min[T constraints.Ordered](a, b T) T { + if a < b { + return a + } + return b +} + +func Max[T constraints.Ordered](a, b T) T { + if a > b { + return a + } + return b +} From 3167545f66d1b31f1d3af5a6aabba1a7d531b144 Mon Sep 17 00:00:00 2001 From: Konstantin <355847+Frozen@users.noreply.github.com> Date: Wed, 1 Nov 2023 18:55:41 -0800 Subject: [PATCH 023/128] Cleanup (#4548) --- p2p/security/security_test.go | 31 +++++++++++++++++++++++++------ 1 file changed, 25 insertions(+), 6 deletions(-) diff --git a/p2p/security/security_test.go b/p2p/security/security_test.go index 7d610707ec..483de2ad6f 100644 --- a/p2p/security/security_test.go +++ b/p2p/security/security_test.go @@ -3,6 +3,7 @@ package security import ( "context" "fmt" + "net" "testing" "time" @@ -53,8 +54,26 @@ func (mh *fakeHost) SetDisconnectCallback(callback DisconnectCallback) { mh.onDisconnects = append(mh.onDisconnects, callback) } +func GetFreePort(t *testing.T) int { + t.Helper() + a, err := net.ResolveTCPAddr("tcp", "localhost:0") + if err != nil { + t.Fatal(err) + return 0 + } else { + l, err := net.ListenTCP("tcp", a) + if err != nil { + t.Fatal(err) + return 0 + } else { + defer l.Close() + return l.Addr().(*net.TCPAddr).Port + } + } +} + func TestManager_OnConnectCheck(t *testing.T) { - h1, err := newPeer(50550) + h1, err := newPeer(GetFreePort(t)) require.NoError(t, err) defer h1.Close() @@ -63,7 +82,7 @@ func TestManager_OnConnectCheck(t *testing.T) { h1.Network().Notify(fakeHost) fakeHost.SetConnectCallback(security.OnConnectCheck) fakeHost.SetDisconnectCallback(security.OnDisconnectCheck) - h2, err := newPeer(50551) + h2, err := newPeer(GetFreePort(t)) assert.Nil(t, err) defer h2.Close() err = h2.Connect(context.Background(), peer.AddrInfo{ID: h1.ID(), Addrs: h1.Network().ListenAddresses()}) @@ -74,7 +93,7 @@ func TestManager_OnConnectCheck(t *testing.T) { return true }) - h3, err := newPeer(50552) + h3, err := newPeer(GetFreePort(t)) assert.Nil(t, err) defer h3.Close() err = h3.Connect(context.Background(), peer.AddrInfo{ID: h1.ID(), Addrs: h1.Network().ListenAddresses()}) @@ -84,7 +103,7 @@ func TestManager_OnConnectCheck(t *testing.T) { return true }) - h4, err := newPeer(50553) + h4, err := newPeer(GetFreePort(t)) assert.Nil(t, err) defer h4.Close() err = h4.Connect(context.Background(), peer.AddrInfo{ID: h1.ID(), Addrs: h1.Network().ListenAddresses()}) @@ -96,7 +115,7 @@ func TestManager_OnConnectCheck(t *testing.T) { } func TestManager_OnDisconnectCheck(t *testing.T) { - h1, err := newPeer(50550) + h1, err := newPeer(GetFreePort(t)) assert.Nil(t, err) defer h1.Close() @@ -105,7 +124,7 @@ func TestManager_OnDisconnectCheck(t *testing.T) { h1.Network().Notify(fakeHost) fakeHost.SetConnectCallback(security.OnConnectCheck) fakeHost.SetDisconnectCallback(security.OnDisconnectCheck) - h2, err := newPeer(50551) + h2, err := newPeer(GetFreePort(t)) assert.Nil(t, err) defer h2.Close() err = h2.Connect(context.Background(), peer.AddrInfo{ID: h1.ID(), Addrs: h1.Network().ListenAddresses()}) From b187cd7e44ac4fa16292b08298b2dc9a0f0abda2 Mon Sep 17 00:00:00 2001 From: Konstantin <355847+Frozen@users.noreply.github.com> Date: Wed, 8 Nov 2023 09:03:01 -0900 Subject: [PATCH 024/128] Deadcode and cleanup. (#4550) * Cleanup * Fix. --- consensus/consensus.go | 25 +++++++++++-------------- consensus/consensus_test.go | 1 - consensus/downloader.go | 11 ----------- node/node.go | 4 ---- node/node_syncing.go | 36 +++++++++++++++--------------------- 5 files changed, 26 insertions(+), 51 deletions(-) diff --git a/consensus/consensus.go b/consensus/consensus.go index b396f6eadd..8ec5cc81fd 100644 --- a/consensus/consensus.go +++ b/consensus/consensus.go @@ -98,8 +98,6 @@ type Consensus struct { BlockVerifier VerifyBlockFunc // verified block to state sync broadcast VerifiedNewBlock chan *types.Block - // will trigger state syncing when blockNum is low - BlockNumLowChan chan struct{} // Channel for DRG protocol to send pRnd (preimage of randomness resulting from combined vrf // randomnesses) to consensus. The first 32 bytes are randomness, the rest is for bitmap. PRndChannel chan []byte @@ -268,18 +266,17 @@ func New( Decider quorum.Decider, minPeers int, aggregateSig bool, ) (*Consensus, error) { consensus := Consensus{ - mutex: &sync.RWMutex{}, - ShardID: shard, - fBFTLog: NewFBFTLog(), - phase: FBFTAnnounce, - current: State{mode: Normal}, - Decider: Decider, - registry: registry, - MinPeers: minPeers, - AggregateSig: aggregateSig, - host: host, - msgSender: NewMessageSender(host), - BlockNumLowChan: make(chan struct{}, 1), + mutex: &sync.RWMutex{}, + ShardID: shard, + fBFTLog: NewFBFTLog(), + phase: FBFTAnnounce, + current: State{mode: Normal}, + Decider: Decider, + registry: registry, + MinPeers: minPeers, + AggregateSig: aggregateSig, + host: host, + msgSender: NewMessageSender(host), // FBFT timeout consensusTimeout: createTimeout(), } diff --git a/consensus/consensus_test.go b/consensus/consensus_test.go index 697ba49525..fa1deaf573 100644 --- a/consensus/consensus_test.go +++ b/consensus/consensus_test.go @@ -33,7 +33,6 @@ func TestConsensusInitialization(t *testing.T) { assert.Equal(t, decider, consensus.Decider) assert.Equal(t, host, consensus.host) assert.Equal(t, messageSender, consensus.msgSender) - assert.IsType(t, make(chan struct{}), consensus.BlockNumLowChan) // FBFTLog assert.NotNil(t, consensus.FBFTLog()) diff --git a/consensus/downloader.go b/consensus/downloader.go index 84414aa804..dde7deab76 100644 --- a/consensus/downloader.go +++ b/consensus/downloader.go @@ -110,14 +110,3 @@ func (consensus *Consensus) spinUpStateSync() { v.Stop() } } - -func (consensus *Consensus) spinLegacyStateSync() { - select { - case consensus.BlockNumLowChan <- struct{}{}: - consensus.current.SetMode(Syncing) - for _, v := range consensus.consensusTimeout { - v.Stop() - } - default: - } -} diff --git a/node/node.go b/node/node.go index f035bf4910..a77939f563 100644 --- a/node/node.go +++ b/node/node.go @@ -158,10 +158,6 @@ func (node *Node) SyncInstance() ISync { return node.GetOrCreateSyncInstance(true) } -func (node *Node) CurrentSyncInstance() bool { - return node.GetOrCreateSyncInstance(false) != nil -} - // GetOrCreateSyncInstance returns an instance of state sync, either legacy or staged // if initiate sets to true, it generates a new instance func (node *Node) GetOrCreateSyncInstance(initiate bool) ISync { diff --git a/node/node_syncing.go b/node/node_syncing.go index fa90ec5c78..830df25c0d 100644 --- a/node/node_syncing.go +++ b/node/node_syncing.go @@ -7,30 +7,28 @@ import ( "strconv" "time" - "github.com/harmony-one/harmony/internal/tikv" - "github.com/multiformats/go-multiaddr" - - prom "github.com/harmony-one/harmony/api/service/prometheus" - "github.com/prometheus/client_golang/prometheus" - "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/rlp" - lru "github.com/hashicorp/golang-lru" - "github.com/pkg/errors" - "github.com/harmony-one/harmony/api/service" "github.com/harmony-one/harmony/api/service/legacysync" legdownloader "github.com/harmony-one/harmony/api/service/legacysync/downloader" downloader_pb "github.com/harmony-one/harmony/api/service/legacysync/downloader/proto" + prom "github.com/harmony-one/harmony/api/service/prometheus" "github.com/harmony-one/harmony/api/service/stagedstreamsync" "github.com/harmony-one/harmony/api/service/stagedsync" "github.com/harmony-one/harmony/api/service/synchronize" + "github.com/harmony-one/harmony/consensus" "github.com/harmony-one/harmony/core" "github.com/harmony-one/harmony/core/types" nodeconfig "github.com/harmony-one/harmony/internal/configs/node" + "github.com/harmony-one/harmony/internal/tikv" "github.com/harmony-one/harmony/internal/utils" "github.com/harmony-one/harmony/p2p" "github.com/harmony-one/harmony/shard" + lru "github.com/hashicorp/golang-lru" + "github.com/multiformats/go-multiaddr" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" ) // Constants related to doing syncing. @@ -279,20 +277,16 @@ func (node *Node) DoSyncing(bc core.BlockChain, willJoinConsensus bool) { for { select { case <-ticker.C: - node.doSync(bc, willJoinConsensus) - case <-node.Consensus.BlockNumLowChan: - node.doSync(bc, willJoinConsensus) + node.doSync(node.SyncInstance(), node.SyncingPeerProvider, bc, node.Consensus, willJoinConsensus) } } } // doSync keep the node in sync with other peers, willJoinConsensus means the node will try to join consensus after catch up -func (node *Node) doSync(bc core.BlockChain, willJoinConsensus bool) { - - syncInstance := node.SyncInstance() +func (node *Node) doSync(syncInstance ISync, syncingPeerProvider SyncingPeerProvider, bc core.BlockChain, consensus *consensus.Consensus, willJoinConsensus bool) { if syncInstance.GetActivePeerNumber() < legacysync.NumPeersLowBound { shardID := bc.ShardID() - peers, err := node.SyncingPeerProvider.SyncingPeers(shardID) + peers, err := syncingPeerProvider.SyncingPeers(shardID) if err != nil { utils.Logger().Warn(). Err(err). @@ -313,13 +307,13 @@ func (node *Node) doSync(bc core.BlockChain, willJoinConsensus bool) { if isSynchronized, _, _ := syncInstance.GetParsedSyncStatusDoubleChecked(); !isSynchronized { node.IsSynchronized.UnSet() if willJoinConsensus { - node.Consensus.BlocksNotSynchronized() + consensus.BlocksNotSynchronized() } isBeacon := bc.ShardID() == shard.BeaconChainShardID - syncInstance.SyncLoop(bc, isBeacon, node.Consensus, legacysync.LoopMinTime) + syncInstance.SyncLoop(bc, isBeacon, consensus, legacysync.LoopMinTime) if willJoinConsensus { node.IsSynchronized.Set() - node.Consensus.BlocksSynchronized() + consensus.BlocksSynchronized() } } node.IsSynchronized.Set() @@ -415,7 +409,7 @@ func (node *Node) SendNewBlockToUnsync() { utils.Logger().Warn().Msg("[SYNC] unable to encode block to hashes") continue } - blockWithSigBytes, err := node.getEncodedBlockWithSigFromBlock(block) + blockWithSigBytes, err := getEncodedBlockWithSigFromBlock(block) if err != nil { utils.Logger().Warn().Err(err).Msg("[SYNC] rlp encode BlockWithSig") continue @@ -747,7 +741,7 @@ func (node *Node) getEncodedBlockWithSigByHeight(height uint64) ([]byte, error) return b, nil } -func (node *Node) getEncodedBlockWithSigFromBlock(block *types.Block) ([]byte, error) { +func getEncodedBlockWithSigFromBlock(block *types.Block) ([]byte, error) { bwh := legacysync.BlockWithSig{ Block: block, CommitSigAndBitmap: block.GetCurrentCommitSig(), From 1f7d67859dddffcb06060ab2f4f287404f3902a5 Mon Sep 17 00:00:00 2001 From: Konstantin <355847+Frozen@users.noreply.github.com> Date: Fri, 10 Nov 2023 12:16:01 -0900 Subject: [PATCH 025/128] Refactored verify block. (#4554) --- consensus/consensus.go | 12 +----- consensus/consensus_service.go | 60 +++++++++++++++--------------- consensus/consensus_v2.go | 7 ---- consensus/validator.go | 5 --- consensus/view_change.go | 15 +++++--- consensus/view_change_construct.go | 11 +++--- node/node_handler_test.go | 2 +- node/node_newblock_test.go | 2 +- 8 files changed, 48 insertions(+), 66 deletions(-) diff --git a/consensus/consensus.go b/consensus/consensus.go index 8ec5cc81fd..066d6cee04 100644 --- a/consensus/consensus.go +++ b/consensus/consensus.go @@ -39,9 +39,6 @@ const ( AsyncProposal ) -// VerifyBlockFunc is a function used to verify the block and keep trace of verified blocks -type VerifyBlockFunc func(*types.Block) error - // Consensus is the main struct with all states and data related to consensus process. type Consensus struct { Decider quorum.Decider @@ -94,8 +91,6 @@ type Consensus struct { // The post-consensus job func passed from Node object // Called when consensus on a new block is done PostConsensusJob func(*types.Block) error - // The verifier func passed from Node object - BlockVerifier VerifyBlockFunc // verified block to state sync broadcast VerifiedNewBlock chan *types.Block // Channel for DRG protocol to send pRnd (preimage of randomness resulting from combined vrf @@ -168,7 +163,7 @@ func (consensus *Consensus) Beaconchain() core.BlockChain { return consensus.registry.GetBeaconchain() } -// VerifyBlock is a function used to verify the block and keep trace of verified blocks. +// verifyBlock is a function used to verify the block and keep trace of verified blocks. func (consensus *Consensus) verifyBlock(block *types.Block) error { if !consensus.fBFTLog.IsBlockVerified(block.Hash()) { if err := consensus.BlockVerifier(block); err != nil { @@ -302,11 +297,6 @@ func New( consensus.IgnoreViewIDCheck = abool.NewBool(false) // Make Sure Verifier is not null consensus.vc = newViewChange() - // TODO: reference to blockchain/beaconchain should be removed. - verifier := VerifyNewBlock(registry.GetWebHooks(), consensus.Blockchain(), consensus.Beaconchain()) - consensus.BlockVerifier = verifier - consensus.vc.verifyBlock = consensus.verifyBlock - // init prometheus metrics initMetrics() consensus.AddPubkeyMetrics() diff --git a/consensus/consensus_service.go b/consensus/consensus_service.go index cd15333a01..c3b0a5ce45 100644 --- a/consensus/consensus_service.go +++ b/consensus/consensus_service.go @@ -660,40 +660,38 @@ func (consensus *Consensus) getLogger() *zerolog.Logger { return &logger } -// VerifyNewBlock is called by consensus participants to verify the block (account model) they are +// BlockVerifier is called by consensus participants to verify the block (account model) they are // running consensus on. -func VerifyNewBlock(hooks *webhooks.Hooks, blockChain core.BlockChain, beaconChain core.BlockChain) func(*types.Block) error { - return func(newBlock *types.Block) error { - if err := blockChain.ValidateNewBlock(newBlock, beaconChain); err != nil { - switch { - case errors.Is(err, core.ErrKnownBlock): - return nil - default: - } +func (consensus *Consensus) BlockVerifier(newBlock *types.Block) error { + if err := consensus.Blockchain().ValidateNewBlock(newBlock, consensus.Beaconchain()); err != nil { + switch { + case errors.Is(err, core.ErrKnownBlock): + return nil + default: + } - if hooks := hooks; hooks != nil { - if p := hooks.ProtocolIssues; p != nil { - url := p.OnCannotCommit - go func() { - webhooks.DoPost(url, map[string]interface{}{ - "bad-header": newBlock.Header(), - "reason": err.Error(), - }) - }() - } + if hooks := consensus.registry.GetWebHooks(); hooks != nil { + if p := hooks.ProtocolIssues; p != nil { + url := p.OnCannotCommit + go func() { + webhooks.DoPost(url, map[string]interface{}{ + "bad-header": newBlock.Header(), + "reason": err.Error(), + }) + }() } - utils.Logger().Error(). - Str("blockHash", newBlock.Hash().Hex()). - Int("numTx", len(newBlock.Transactions())). - Int("numStakingTx", len(newBlock.StakingTransactions())). - Err(err). - Msgf("[VerifyNewBlock] Cannot Verify New Block!!!, blockHeight %d, myHeight %d", newBlock.NumberU64(), blockChain.CurrentHeader().NumberU64()) - return errors.WithMessagef(err, - "[VerifyNewBlock] Cannot Verify New Block!!! block-hash %s txn-count %d", - newBlock.Hash().Hex(), - len(newBlock.Transactions()), - ) } - return nil + utils.Logger().Error(). + Str("blockHash", newBlock.Hash().Hex()). + Int("numTx", len(newBlock.Transactions())). + Int("numStakingTx", len(newBlock.StakingTransactions())). + Err(err). + Msgf("[VerifyNewBlock] Cannot Verify New Block!!!, blockHeight %d, myHeight %d", newBlock.NumberU64(), consensus.Blockchain().CurrentHeader().NumberU64()) + return errors.WithMessagef(err, + "[VerifyNewBlock] Cannot Verify New Block!!! block-hash %s txn-count %d", + newBlock.Hash().Hex(), + len(newBlock.Transactions()), + ) } + return nil } diff --git a/consensus/consensus_v2.go b/consensus/consensus_v2.go index b3c94a77fc..c963f6d149 100644 --- a/consensus/consensus_v2.go +++ b/consensus/consensus_v2.go @@ -469,9 +469,6 @@ func (consensus *Consensus) GetLastMileBlockIter(bnStart uint64, cb func(iter *L // GetLastMileBlockIter get the iterator of the last mile blocks starting from number bnStart func (consensus *Consensus) getLastMileBlockIter(bnStart uint64, cb func(iter *LastMileBlockIter) error) error { - if consensus.BlockVerifier == nil { - return errors.New("consensus haven't initialized yet") - } blocks, _, err := consensus.getLastMileBlocksAndMsg(bnStart) if err != nil { return err @@ -620,10 +617,6 @@ func (consensus *Consensus) verifyLastCommitSig(lastCommitSig []byte, blk *types // tryCatchup add the last mile block in PBFT log memory cache to blockchain. func (consensus *Consensus) tryCatchup() error { - // TODO: change this to a more systematic symbol - if consensus.BlockVerifier == nil { - return errors.New("consensus haven't finished initialization") - } initBN := consensus.getBlockNum() defer consensus.postCatchup(initBN) diff --git a/consensus/validator.go b/consensus/validator.go index 0506f4359d..c148a61890 100644 --- a/consensus/validator.go +++ b/consensus/validator.go @@ -125,11 +125,6 @@ func (consensus *Consensus) validateNewBlock(recvMsg *FBFTMessage) (*types.Block Hex("blockHash", recvMsg.BlockHash[:]). Msg("[validateNewBlock] Prepared message and block added") - if consensus.BlockVerifier == nil { - consensus.getLogger().Debug().Msg("[validateNewBlock] consensus received message before init. Ignoring") - return nil, errors.New("nil block verifier") - } - if err := consensus.verifyBlock(&blockObj); err != nil { consensus.getLogger().Error().Err(err).Msg("[validateNewBlock] Block verification failed") return nil, errors.Errorf("Block verification failed: %s", err.Error()) diff --git a/consensus/view_change.go b/consensus/view_change.go index efc1760e84..d03ae5a13a 100644 --- a/consensus/view_change.go +++ b/consensus/view_change.go @@ -290,7 +290,9 @@ func (consensus *Consensus) startViewChange() { nextViewID, consensus.getBlockNum(), consensus.priKey, - members); err != nil { + members, + consensus.verifyBlock, + ); err != nil { consensus.getLogger().Error().Err(err).Msg("[startViewChange] Init Payload Error") } @@ -406,16 +408,19 @@ func (consensus *Consensus) onViewChange(recvMsg *FBFTMessage) { consensus.vc.AddViewIDKeyIfNotExist(recvMsg.ViewID, members) // do it once only per viewID/Leader - if err := consensus.vc.InitPayload(consensus.fBFTLog, + if err := consensus.vc.InitPayload( + consensus.fBFTLog, recvMsg.ViewID, recvMsg.BlockNum, consensus.priKey, - members); err != nil { + members, + consensus.verifyBlock, + ); err != nil { consensus.getLogger().Error().Err(err).Msg("[onViewChange] Init Payload Error") return } - err = consensus.vc.ProcessViewChangeMsg(consensus.fBFTLog, consensus.Decider, recvMsg) + err = consensus.vc.ProcessViewChangeMsg(consensus.fBFTLog, consensus.Decider, recvMsg, consensus.verifyBlock) if err != nil { consensus.getLogger().Error().Err(err). Uint64("viewID", recvMsg.ViewID). @@ -483,7 +488,7 @@ func (consensus *Consensus) onNewView(recvMsg *FBFTMessage) { return } - preparedBlock, err := consensus.vc.VerifyNewViewMsg(recvMsg) + preparedBlock, err := consensus.vc.VerifyNewViewMsg(recvMsg, consensus.verifyBlock) if err != nil { consensus.getLogger().Warn().Err(err).Msg("[onNewView] Verify New View Msg Failed") return diff --git a/consensus/view_change_construct.go b/consensus/view_change_construct.go index 061d2a795c..fcf025e74d 100644 --- a/consensus/view_change_construct.go +++ b/consensus/view_change_construct.go @@ -46,7 +46,6 @@ type viewChange struct { m1Payload []byte // message payload for type m1 := |vcBlockHash|prepared_agg_sigs|prepared_bitmap|, new leader only need one - verifyBlock VerifyBlockFunc viewChangeDuration time.Duration } @@ -152,7 +151,7 @@ func (vc *viewChange) GetM3Bitmap(viewID uint64) ([]byte, []byte) { } // VerifyNewViewMsg returns true if the new view message is valid -func (vc *viewChange) VerifyNewViewMsg(recvMsg *FBFTMessage) (*types.Block, error) { +func (vc *viewChange) VerifyNewViewMsg(recvMsg *FBFTMessage, verifyBlock func(block *types.Block) error) (*types.Block, error) { if recvMsg.M3AggSig == nil || recvMsg.M3Bitmap == nil { return nil, errors.New("[VerifyNewViewMsg] M3AggSig or M3Bitmap is nil") } @@ -215,7 +214,7 @@ func (vc *viewChange) VerifyNewViewMsg(recvMsg *FBFTMessage) (*types.Block, erro if !bytes.Equal(preparedBlockHash[:], blockHash) { return nil, errors.New("[VerifyNewViewMsg] Prepared block hash doesn't match msg block hash") } - if err := vc.verifyBlock(preparedBlock); err != nil { + if err := verifyBlock(preparedBlock); err != nil { return nil, err } return preparedBlock, nil @@ -239,6 +238,7 @@ func (vc *viewChange) ProcessViewChangeMsg( fbftlog *FBFTLog, decider quorum.Decider, recvMsg *FBFTMessage, + verifyBlock func(block *types.Block) error, ) error { preparedBlock := &types.Block{} if !recvMsg.HasSingleSender() { @@ -256,7 +256,7 @@ func (vc *viewChange) ProcessViewChangeMsg( if err := rlp.DecodeBytes(recvMsg.Block, preparedBlock); err != nil { return err } - if err := vc.verifyBlock(preparedBlock); err != nil { + if err := verifyBlock(preparedBlock); err != nil { return err } _, ok := vc.bhpSigs[recvMsg.ViewID][senderKeyStr] @@ -381,6 +381,7 @@ func (vc *viewChange) InitPayload( blockNum uint64, privKeys multibls.PrivateKeys, members multibls.PublicKeys, + verifyBlock func(block *types.Block) error, ) error { // m1 or m2 init once per viewID/key. // m1 and m2 are mutually exclusive. @@ -405,7 +406,7 @@ func (vc *viewChange) InitPayload( hasBlock := false if preparedMsg != nil { if preparedBlock := fbftlog.GetBlockByHash(preparedMsg.BlockHash); preparedBlock != nil { - if err := vc.verifyBlock(preparedBlock); err == nil { + if err := verifyBlock(preparedBlock); err == nil { vc.getLogger().Info().Uint64("viewID", viewID).Uint64("blockNum", blockNum).Int("size", binary.Size(preparedBlock)).Msg("[InitPayload] add my M1 (prepared) type messaage") msgToSign := append(preparedMsg.BlockHash[:], preparedMsg.Payload...) for _, key := range privKeys { diff --git a/node/node_handler_test.go b/node/node_handler_test.go index a5085652b0..867a9616dc 100644 --- a/node/node_handler_test.go +++ b/node/node_handler_test.go @@ -134,7 +134,7 @@ func TestVerifyNewBlock(t *testing.T) { // work around vrf verification as it's tested in another test. node.Blockchain().Config().VRFEpoch = big.NewInt(2) - if err := consensus.VerifyNewBlock(nil, node.Blockchain(), node.Beaconchain())(block); err != nil { + if err := node.Blockchain().ValidateNewBlock(block, node.Beaconchain()); err != nil { t.Error("New block is not verified successfully:", err) } } diff --git a/node/node_newblock_test.go b/node/node_newblock_test.go index 86dd1e6c7e..5780b7cda0 100644 --- a/node/node_newblock_test.go +++ b/node/node_newblock_test.go @@ -74,7 +74,7 @@ func TestFinalizeNewBlockAsync(t *testing.T) { commitSigs, func() uint64 { return 0 }, common.Address{}, nil, nil, ) - if err := consensus.VerifyNewBlock(nil, blockchain, nil)(block); err != nil { + if err := blockchain.ValidateNewBlock(block, blockchain); err != nil { t.Error("New block is not verified successfully:", err) } From 6f7a04799d401c6126b344bc3c6978e9af82acf8 Mon Sep 17 00:00:00 2001 From: Konstantin <355847+Frozen@users.noreply.github.com> Date: Fri, 10 Nov 2023 12:16:15 -0900 Subject: [PATCH 026/128] Fixed data race. (#4559) --- consensus/consensus_service.go | 6 +++++- consensus/consensus_v2.go | 8 ++++---- core/blockchain_impl.go | 7 ++----- p2p/stream/common/streammanager/streammanager.go | 6 +++--- 4 files changed, 14 insertions(+), 13 deletions(-) diff --git a/consensus/consensus_service.go b/consensus/consensus_service.go index c3b0a5ce45..40f0bc23d8 100644 --- a/consensus/consensus_service.go +++ b/consensus/consensus_service.go @@ -627,14 +627,18 @@ func (consensus *Consensus) selfCommit(payload []byte) error { // NumSignaturesIncludedInBlock returns the number of signatures included in the block func (consensus *Consensus) NumSignaturesIncludedInBlock(block *types.Block) uint32 { count := uint32(0) + consensus.mutex.Lock() members := consensus.Decider.Participants() + pubKeys := consensus.getPublicKeys() + consensus.mutex.Unlock() + // TODO(audit): do not reconstruct the Mask mask := bls.NewMask(members) err := mask.SetMask(block.Header().LastCommitBitmap()) if err != nil { return count } - for _, key := range consensus.GetPublicKeys() { + for _, key := range pubKeys { if ok, err := mask.KeyEnabled(key.Bytes); err == nil && ok { count++ } diff --git a/consensus/consensus_v2.go b/consensus/consensus_v2.go index c963f6d149..cc4acd9fc0 100644 --- a/consensus/consensus_v2.go +++ b/consensus/consensus_v2.go @@ -572,19 +572,19 @@ func (consensus *Consensus) preCommitAndPropose(blk *types.Block) error { if _, err := consensus.Blockchain().InsertChain([]*types.Block{blk}, !consensus.FBFTLog().IsBlockVerified(blk.Hash())); err != nil { switch { case errors.Is(err, core.ErrKnownBlock): - consensus.getLogger().Info().Msg("[preCommitAndPropose] Block already known") + consensus.GetLogger().Info().Msg("[preCommitAndPropose] Block already known") default: - consensus.getLogger().Error().Err(err).Msg("[preCommitAndPropose] Failed to add block to chain") + consensus.GetLogger().Error().Err(err).Msg("[preCommitAndPropose] Failed to add block to chain") return } } - + consensus.mutex.Lock() consensus.getLogger().Info().Msg("[preCommitAndPropose] Start consensus timer") consensus.consensusTimeout[timeoutConsensus].Start() // Send signal to Node to propose the new block for consensus consensus.getLogger().Info().Msg("[preCommitAndPropose] sending block proposal signal") - + consensus.mutex.Unlock() consensus.ReadySignal(AsyncProposal) }() diff --git a/core/blockchain_impl.go b/core/blockchain_impl.go index e9eca1f4cd..2c38416985 100644 --- a/core/blockchain_impl.go +++ b/core/blockchain_impl.go @@ -1646,6 +1646,8 @@ func (bc *BlockChainImpl) InsertChain(chain types.Blocks, verifyHeaders bool) (i } prevHash := bc.CurrentBlock().Hash() + bc.chainmu.Lock() + defer bc.chainmu.Unlock() n, events, logs, err := bc.insertChain(chain, verifyHeaders) bc.PostChainEvents(events, logs) if err == nil { @@ -1699,9 +1701,6 @@ func (bc *BlockChainImpl) insertChain(chain types.Blocks, verifyHeaders bool) (i } } - bc.chainmu.Lock() - defer bc.chainmu.Unlock() - // A queued approach to delivering events. This is generally // faster than direct delivery and requires much less mutex // acquiring. @@ -1801,9 +1800,7 @@ func (bc *BlockChainImpl) insertChain(chain types.Blocks, verifyHeaders bool) (i // Prune in case non-empty winner chain if len(winner) > 0 { // Import all the pruned blocks to make the state available - bc.chainmu.Unlock() _, evs, logs, err := bc.insertChain(winner, true /* verifyHeaders */) - bc.chainmu.Lock() events, coalescedLogs = evs, logs if err != nil { diff --git a/p2p/stream/common/streammanager/streammanager.go b/p2p/stream/common/streammanager/streammanager.go index 26025fb720..8273fea581 100644 --- a/p2p/stream/common/streammanager/streammanager.go +++ b/p2p/stream/common/streammanager/streammanager.go @@ -139,8 +139,8 @@ func (sm *streamManager) loop() { discCancel() // cancel last discovery } discCtx, discCancel = context.WithCancel(sm.ctx) - go func() { - discovered, err := sm.discoverAndSetupStream(discCtx) + go func(ctx context.Context) { + discovered, err := sm.discoverAndSetupStream(ctx) if err != nil { sm.logger.Err(err) } @@ -152,7 +152,7 @@ func (sm *streamManager) loop() { sm.coolDown.UnSet() }() } - }() + }(discCtx) case addStream := <-sm.addStreamCh: err := sm.handleAddStream(addStream.st) From dbe4d43b36ed87b7061121b8d1bdbd3d70afb3f8 Mon Sep 17 00:00:00 2001 From: Konstantin <355847+Frozen@users.noreply.github.com> Date: Fri, 10 Nov 2023 12:16:38 -0900 Subject: [PATCH 027/128] Timeout for block proposal. (#4553) * Timeout for proposal. --- cmd/harmony/main.go | 29 +++++++---------- consensus/consensus_test.go | 3 -- consensus/consensus_v2.go | 64 +++++++++++++++++++------------------ consensus/fbft_log.go | 15 +++++++++ consensus/leader.go | 4 +++ go.mod | 4 ++- go.sum | 4 +++ 7 files changed, 71 insertions(+), 52 deletions(-) diff --git a/cmd/harmony/main.go b/cmd/harmony/main.go index 021061c75b..fb4365ad82 100644 --- a/cmd/harmony/main.go +++ b/cmd/harmony/main.go @@ -14,28 +14,12 @@ import ( "syscall" "time" - "github.com/harmony-one/harmony/consensus/quorum" - "github.com/harmony-one/harmony/internal/chain" - "github.com/harmony-one/harmony/internal/registry" - "github.com/harmony-one/harmony/internal/shardchain/tikv_manage" - "github.com/harmony-one/harmony/internal/tikv/redis_helper" - "github.com/harmony-one/harmony/internal/tikv/statedb_cache" - - "github.com/harmony-one/harmony/api/service/crosslink_sending" - rosetta_common "github.com/harmony-one/harmony/rosetta/common" - - harmonyconfig "github.com/harmony-one/harmony/internal/configs/harmony" - rpc_common "github.com/harmony-one/harmony/rpc/common" - ethCommon "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/log" - "github.com/pkg/errors" - "github.com/spf13/cobra" - "github.com/harmony-one/bls/ffi/go/bls" - "github.com/harmony-one/harmony/api/service" + "github.com/harmony-one/harmony/api/service/crosslink_sending" "github.com/harmony-one/harmony/api/service/pprof" "github.com/harmony-one/harmony/api/service/prometheus" "github.com/harmony-one/harmony/api/service/stagedstreamsync" @@ -43,22 +27,33 @@ import ( "github.com/harmony-one/harmony/common/fdlimit" "github.com/harmony-one/harmony/common/ntp" "github.com/harmony-one/harmony/consensus" + "github.com/harmony-one/harmony/consensus/quorum" "github.com/harmony-one/harmony/core" "github.com/harmony-one/harmony/hmy/downloader" + "github.com/harmony-one/harmony/internal/chain" "github.com/harmony-one/harmony/internal/cli" "github.com/harmony-one/harmony/internal/common" + harmonyconfig "github.com/harmony-one/harmony/internal/configs/harmony" nodeconfig "github.com/harmony-one/harmony/internal/configs/node" shardingconfig "github.com/harmony-one/harmony/internal/configs/sharding" "github.com/harmony-one/harmony/internal/genesis" "github.com/harmony-one/harmony/internal/params" + "github.com/harmony-one/harmony/internal/registry" "github.com/harmony-one/harmony/internal/shardchain" + "github.com/harmony-one/harmony/internal/shardchain/tikv_manage" + "github.com/harmony-one/harmony/internal/tikv/redis_helper" + "github.com/harmony-one/harmony/internal/tikv/statedb_cache" "github.com/harmony-one/harmony/internal/utils" "github.com/harmony-one/harmony/multibls" "github.com/harmony-one/harmony/node" "github.com/harmony-one/harmony/numeric" "github.com/harmony-one/harmony/p2p" + rosetta_common "github.com/harmony-one/harmony/rosetta/common" + rpc_common "github.com/harmony-one/harmony/rpc/common" "github.com/harmony-one/harmony/shard" "github.com/harmony-one/harmony/webhooks" + "github.com/pkg/errors" + "github.com/spf13/cobra" ) // Host diff --git a/consensus/consensus_test.go b/consensus/consensus_test.go index fa1deaf573..992e725e75 100644 --- a/consensus/consensus_test.go +++ b/consensus/consensus_test.go @@ -64,9 +64,6 @@ func TestConsensusInitialization(t *testing.T) { assert.IsType(t, make(chan slash.Record), consensus.SlashChan) assert.NotNil(t, consensus.SlashChan) - assert.IsType(t, make(chan ProposalType), consensus.GetReadySignal()) - assert.NotNil(t, consensus.GetReadySignal()) - assert.IsType(t, make(chan [vdfAndSeedSize]byte), consensus.RndChannel) assert.NotNil(t, consensus.RndChannel) diff --git a/consensus/consensus_v2.go b/consensus/consensus_v2.go index cc4acd9fc0..de7d4650b8 100644 --- a/consensus/consensus_v2.go +++ b/consensus/consensus_v2.go @@ -10,24 +10,23 @@ import ( "github.com/ethereum/go-ethereum/common" bls2 "github.com/harmony-one/bls/ffi/go/bls" - "github.com/harmony-one/harmony/consensus/signature" - "github.com/harmony-one/harmony/core" - nodeconfig "github.com/harmony-one/harmony/internal/configs/node" - "github.com/harmony-one/harmony/internal/utils" - libp2p_peer "github.com/libp2p/go-libp2p/core/peer" - "github.com/rs/zerolog" - msg_pb "github.com/harmony-one/harmony/api/proto/message" "github.com/harmony-one/harmony/block" "github.com/harmony-one/harmony/consensus/quorum" + "github.com/harmony-one/harmony/consensus/signature" + "github.com/harmony-one/harmony/core" "github.com/harmony-one/harmony/core/types" "github.com/harmony-one/harmony/crypto/bls" vrf_bls "github.com/harmony-one/harmony/crypto/vrf/bls" + nodeconfig "github.com/harmony-one/harmony/internal/configs/node" + "github.com/harmony-one/harmony/internal/utils" "github.com/harmony-one/harmony/p2p" "github.com/harmony-one/harmony/shard" "github.com/harmony-one/vdf/src/vdf_go" + libp2p_peer "github.com/libp2p/go-libp2p/core/peer" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" + "github.com/rs/zerolog" ) var ( @@ -681,37 +680,36 @@ func (consensus *Consensus) commitBlock(blk *types.Block, committedMsg *FBFTMess // rotateLeader rotates the leader to the next leader in the committee. // This function must be called with enabled leader rotation. -func (consensus *Consensus) rotateLeader(epoch *big.Int) { +func (consensus *Consensus) rotateLeader(epoch *big.Int) *bls.PublicKeyWrapper { var ( bc = consensus.Blockchain() - prev = consensus.getLeaderPubKey() leader = consensus.getLeaderPubKey() curBlock = bc.CurrentBlock() curNumber = curBlock.NumberU64() curEpoch = curBlock.Epoch().Uint64() ) - const blocksCountAliveness = 10 + const blocksCountAliveness = 4 utils.Logger().Info().Msgf("[Rotating leader] epoch: %v rotation:%v external rotation %v", epoch.Uint64(), bc.Config().IsLeaderRotationInternalValidators(epoch), bc.Config().IsLeaderRotationExternalValidatorsAllowed(epoch)) ss, err := bc.ReadShardState(epoch) if err != nil { utils.Logger().Error().Err(err).Msg("Failed to read shard state") - return + return nil } committee, err := ss.FindCommitteeByID(consensus.ShardID) if err != nil { utils.Logger().Error().Err(err).Msg("Failed to find committee") - return + return nil } slotsCount := len(committee.Slots) blocksPerEpoch := shard.Schedule.InstanceForEpoch(epoch).BlocksPerEpoch() if blocksPerEpoch == 0 { utils.Logger().Error().Msg("[Rotating leader] blocks per epoch is 0") - return + return nil } if slotsCount == 0 { utils.Logger().Error().Msg("[Rotating leader] slots count is 0") - return + return nil } numBlocksProducedByLeader := blocksPerEpoch / uint64(slotsCount) rest := blocksPerEpoch % uint64(slotsCount) @@ -723,7 +721,7 @@ func (consensus *Consensus) rotateLeader(epoch *big.Int) { s := bc.LeaderRotationMeta() if !bytes.Equal(leader.Bytes[:], s.Pub) { // Another leader. - return + return nil } // If it is the first validator producing blocks, it should also produce the remaining 'rest' of the blocks. if s.Shifts == 0 { @@ -731,7 +729,7 @@ func (consensus *Consensus) rotateLeader(epoch *big.Int) { } if s.Count < numBlocksProducedByLeader { // Not enough blocks produced by the leader, continue producing by the same leader. - return + return nil } // Passed all checks, we can change leader. // NthNext will move the leader to the next leader in the committee. @@ -742,7 +740,7 @@ func (consensus *Consensus) rotateLeader(epoch *big.Int) { offset = 1 ) - for { + for i := 0; i < len(committee.Slots); i++ { if bc.Config().IsLeaderRotationExternalValidatorsAllowed(epoch) { wasFound, next = consensus.Decider.NthNextValidator(committee.Slots, leader, offset) } else { @@ -751,7 +749,7 @@ func (consensus *Consensus) rotateLeader(epoch *big.Int) { if !wasFound { utils.Logger().Error().Msg("Failed to get next leader") // Seems like nothing we can do here. - return + return nil } members := consensus.Decider.Participants() mask := bls.NewMask(members) @@ -760,7 +758,7 @@ func (consensus *Consensus) rotateLeader(epoch *big.Int) { header := bc.GetHeaderByNumber(curNumber - uint64(i)) if header == nil { utils.Logger().Error().Msgf("Failed to get header by number %d", curNumber-uint64(i)) - return + return nil } // if epoch is different, we should not check this block. if header.Epoch().Uint64() != curEpoch { @@ -770,12 +768,12 @@ func (consensus *Consensus) rotateLeader(epoch *big.Int) { err = mask.SetMask(header.LastCommitBitmap()) if err != nil { utils.Logger().Err(err).Msg("Failed to set mask") - return + return nil } ok, err := mask.KeyEnabled(next.Bytes) if err != nil { utils.Logger().Err(err).Msg("Failed to get key enabled") - return + return nil } if !ok { skipped++ @@ -788,16 +786,9 @@ func (consensus *Consensus) rotateLeader(epoch *big.Int) { offset++ continue } - consensus.setLeaderPubKey(next) - break - } - - if consensus.isLeader() && !consensus.getLeaderPubKey().Object.IsEqual(prev.Object) { - // leader changed - go func() { - consensus.ReadySignal(SyncProposal) - }() + return next } + return nil } // SetupForNewConsensus sets the state for new consensus @@ -812,7 +803,18 @@ func (consensus *Consensus) setupForNewConsensus(blk *types.Block, committedMsg epoch = blk.Epoch() } if consensus.Blockchain().Config().IsLeaderRotationInternalValidators(epoch) { - consensus.rotateLeader(epoch) + if next := consensus.rotateLeader(epoch); next != nil { + prev := consensus.getLeaderPubKey() + consensus.setLeaderPubKey(next) + if consensus.isLeader() && !consensus.getLeaderPubKey().Object.IsEqual(prev.Object) { + // leader changed + blockPeriod := consensus.BlockPeriod + go func() { + <-time.After(blockPeriod) + consensus.ReadySignal(SyncProposal) + }() + } + } } // Update consensus keys at last so the change of leader status doesn't mess up normal flow diff --git a/consensus/fbft_log.go b/consensus/fbft_log.go index 982aecab75..cec74e314b 100644 --- a/consensus/fbft_log.go +++ b/consensus/fbft_log.go @@ -3,6 +3,8 @@ package consensus import ( "encoding/binary" "fmt" + "hash/crc32" + "strconv" "sync" "github.com/ethereum/go-ethereum/common" @@ -36,6 +38,19 @@ type FBFTMessage struct { Verified bool } +func (m *FBFTMessage) Hash() []byte { + // Hash returns hash of the struct + + c := crc32.NewIEEE() + c.Write([]byte(strconv.FormatUint(uint64(m.MessageType), 10))) + c.Write([]byte(strconv.FormatUint(m.ViewID, 10))) + c.Write([]byte(strconv.FormatUint(m.BlockNum, 10))) + c.Write(m.BlockHash[:]) + c.Write(m.Block[:]) + c.Write(m.Payload[:]) + return c.Sum(nil) +} + // String .. func (m *FBFTMessage) String() string { sender := "" diff --git a/consensus/leader.go b/consensus/leader.go index 82ba3069bb..cdab1dad4b 100644 --- a/consensus/leader.go +++ b/consensus/leader.go @@ -7,6 +7,7 @@ import ( "github.com/harmony-one/harmony/crypto/bls" "github.com/harmony-one/harmony/internal/common" nodeconfig "github.com/harmony-one/harmony/internal/configs/node" + "github.com/harmony-one/harmony/internal/utils/rclient" "github.com/ethereum/go-ethereum/rlp" bls_core "github.com/harmony-one/bls/ffi/go/bls" @@ -16,6 +17,7 @@ import ( "github.com/harmony-one/harmony/p2p" ) +// announce fires leader func (consensus *Consensus) announce(block *types.Block) { blockHash := block.Hash() @@ -92,6 +94,7 @@ func (consensus *Consensus) announce(block *types.Block) { consensus.switchPhase("Announce", FBFTPrepare) } +// this method is called for each validator sent their vote message func (consensus *Consensus) onPrepare(recvMsg *FBFTMessage) { // TODO(audit): make FBFT lookup using map instead of looping through all items. if !consensus.fBFTLog.HasMatchingViewAnnounce( @@ -189,6 +192,7 @@ func (consensus *Consensus) onPrepare(recvMsg *FBFTMessage) { //// Read - End } +// this method is called by leader func (consensus *Consensus) onCommit(recvMsg *FBFTMessage) { //// Read - Start if !consensus.isRightBlockNumAndViewID(recvMsg) { diff --git a/go.mod b/go.mod index b1cf4fbc27..ac5fecc538 100644 --- a/go.mod +++ b/go.mod @@ -68,10 +68,12 @@ require ( require ( github.com/c2h5oh/datasize v0.0.0-20220606134207-859f65c6625b + github.com/grafana/pyroscope-go v1.0.4 github.com/holiman/bloomfilter/v2 v2.0.3 github.com/ledgerwatch/erigon-lib v0.0.0-20230607152933-42c9c28cac68 github.com/ledgerwatch/log/v3 v3.8.0 github.com/olekukonko/tablewriter v0.0.5 + golang.org/x/exp v0.0.0-20231006140011-7918f672742d ) require ( @@ -146,6 +148,7 @@ require ( github.com/google/pprof v0.0.0-20230405160723-4a4c7d95572b // indirect github.com/google/uuid v1.3.0 // indirect github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3 // indirect + github.com/grafana/pyroscope-go/godeltaprof v0.1.4 // indirect github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect @@ -262,7 +265,6 @@ require ( go.uber.org/dig v1.16.1 // indirect go.uber.org/fx v1.19.2 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/exp v0.0.0-20231006140011-7918f672742d // indirect golang.org/x/mod v0.13.0 // indirect golang.org/x/term v0.13.0 // indirect golang.org/x/text v0.13.0 // indirect diff --git a/go.sum b/go.sum index 115ec6ebab..4f620c9014 100644 --- a/go.sum +++ b/go.sum @@ -632,6 +632,10 @@ github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWm github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3 h1:JVnpOZS+qxli+rgVl98ILOXVNbW+kb5wcxeGx8ShUIw= github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= +github.com/grafana/pyroscope-go v1.0.4 h1:oyQX0BOkL+iARXzHuCdIF5TQ7/sRSel1YFViMHC7Bm0= +github.com/grafana/pyroscope-go v1.0.4/go.mod h1:0d7ftwSMBV/Awm7CCiYmHQEG8Y44Ma3YSjt+nWcWztY= +github.com/grafana/pyroscope-go/godeltaprof v0.1.4 h1:mDsJ3ngul7UfrHibGQpV66PbZ3q1T8glz/tK3bQKKEk= +github.com/grafana/pyroscope-go/godeltaprof v0.1.4/go.mod h1:1HSPtjU8vLG0jE9JrTdzjgFqdJ/VgN7fvxBNq3luJko= github.com/graph-gophers/graphql-go v1.3.0/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= From 2089a0c25b62b6879b701a07ea315c1236d9b7f6 Mon Sep 17 00:00:00 2001 From: Konstantin <355847+Frozen@users.noreply.github.com> Date: Mon, 13 Nov 2023 05:58:13 -0900 Subject: [PATCH 028/128] Fixed import and workaround for failing test. (#4560) * Fixed import. * Workaround for failing test. --- consensus/leader.go | 1 - p2p/security/security_test.go | 30 +++++++++++++++++++++++------- 2 files changed, 23 insertions(+), 8 deletions(-) diff --git a/consensus/leader.go b/consensus/leader.go index cdab1dad4b..0bd934cb7f 100644 --- a/consensus/leader.go +++ b/consensus/leader.go @@ -7,7 +7,6 @@ import ( "github.com/harmony-one/harmony/crypto/bls" "github.com/harmony-one/harmony/internal/common" nodeconfig "github.com/harmony-one/harmony/internal/configs/node" - "github.com/harmony-one/harmony/internal/utils/rclient" "github.com/ethereum/go-ethereum/rlp" bls_core "github.com/harmony-one/bls/ffi/go/bls" diff --git a/p2p/security/security_test.go b/p2p/security/security_test.go index 483de2ad6f..3ebe589461 100644 --- a/p2p/security/security_test.go +++ b/p2p/security/security_test.go @@ -4,8 +4,8 @@ import ( "context" "fmt" "net" + "sync" "testing" - "time" "github.com/harmony-one/harmony/internal/utils/blockedpeers" "github.com/libp2p/go-libp2p" @@ -119,14 +119,30 @@ func TestManager_OnDisconnectCheck(t *testing.T) { assert.Nil(t, err) defer h1.Close() - fakeHost := &fakeHost{} - security := NewManager(2, 0, blockedpeers.NewManager(4)) - h1.Network().Notify(fakeHost) - fakeHost.SetConnectCallback(security.OnConnectCheck) - fakeHost.SetDisconnectCallback(security.OnDisconnectCheck) h2, err := newPeer(GetFreePort(t)) assert.Nil(t, err) defer h2.Close() + + fakeHost := &fakeHost{} + security := NewManager(2, 0, blockedpeers.NewManager(4)) + h1.Network().Notify(fakeHost) + var wrap = func() ( + func(net libp2p_network.Network, conn libp2p_network.Conn) error, + func(conn libp2p_network.Conn) error, + *sync.WaitGroup) { + wg := &sync.WaitGroup{} + return func(net libp2p_network.Network, conn libp2p_network.Conn) error { + wg.Add(1) + return security.OnConnectCheck(net, conn) + }, func(conn libp2p_network.Conn) error { + defer wg.Done() + return security.OnDisconnectCheck(conn) + }, wg + } + OnConnectCheck, OnDisconnectCheck, wg := wrap() + fakeHost.SetConnectCallback(OnConnectCheck) + fakeHost.SetDisconnectCallback(OnDisconnectCheck) + err = h2.Connect(context.Background(), peer.AddrInfo{ID: h1.ID(), Addrs: h1.Network().ListenAddresses()}) assert.Nil(t, err) @@ -137,7 +153,7 @@ func TestManager_OnDisconnectCheck(t *testing.T) { err = h2.Network().ClosePeer(h1.ID()) assert.Nil(t, err) - time.Sleep(200 * time.Millisecond) + wg.Wait() security.RangePeers(func(k string, peers []string) bool { assert.Equal(t, 0, len(peers)) return true From 8f774ea9cd06af54f0ffcce4eeb6c3c8859fc538 Mon Sep 17 00:00:00 2001 From: omahs <73983677+omahs@users.noreply.github.com> Date: Wed, 15 Nov 2023 15:59:17 +0100 Subject: [PATCH 029/128] Fix typos (#4563) undefined --- CONTRIBUTING.md | 4 ++-- README.md | 4 ++-- node/node.md | 8 ++++---- rosetta/infra/README.md | 2 +- scripts/package/readme.md | 4 ++-- 5 files changed, 11 insertions(+), 11 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 1b5abab6b5..a1c670b1ac 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -11,7 +11,7 @@ This [github document](https://help.github.com/articles/creating-a-pull-request/) provides some guidance on how to create a pull request in github. ## PR requirement -To pursue engineering excellence, we have insisted on the highest stardard on the quality of each PR. +To pursue engineering excellence, we have insisted on the highest standard for the quality of each PR. * For each PR, please run [golint](https://github.com/golang/lint), [goimports](https://godoc.org/golang.org/x/tools/cmd/goimports), to fix the basic issues/warnings. * Make sure you understand [How to Write a Git Commit Message](https://chris.beams.io/posts/git-commit/). @@ -21,7 +21,7 @@ To pursue engineering excellence, we have insisted on the highest stardard on th The best practice is to reorder and squash your local commits before the PR submission to create an atomic and self-contained PR. This [book chapter](https://git-scm.com/book/en/v2/Git-Tools-Rewriting-History) provides detailed explanation and guidance on how to rewrite the local git history. -For exampple, a typical workflow is like the following. +For example, a typical workflow is like the following. ```bash # assuming you are working on a fix of bug1, and use a local branch called "fixes_of_bug1". diff --git a/README.md b/README.md index 998832b352..5d2b1bd4cf 100644 --- a/README.md +++ b/README.md @@ -174,7 +174,7 @@ To run this test, do: make test-rpc ``` This test starts a localnet (within the Docker container), **ensures it reaches a consensus**, and runs a series of tests to ensure correct RPC behavior. -This test also acts as a preliminary integration test (more through tests are done on the testnets). +This test also acts as a preliminary integration test (more thorough tests are done on the testnets). > The tests ran by this command can be found [here](https://github.com/harmony-one/harmony-test/tree/master/localnet). If you wish to debug further with the localnet after the tests are done, open a new shell and run: @@ -194,7 +194,7 @@ To run this test, do: make test-rosetta ``` This test starts a localnet (within the Docker container), **ensures it reaches a consensus**, and runs the Construction & Data API checks using the [rosetta-cli](https://github.com/coinbase/rosetta-cli). -This test also acts as a preliminary integration test (more through tests are done on the testnets). +This test also acts as a preliminary integration test (more thorough tests are done on the testnets). > The config for this test can be found [here](https://github.com/harmony-one/harmony-test/blob/master/localnet/configs/localnet_rosetta_test_s0.json) & [here](https://github.com/harmony-one/harmony-test/blob/master/localnet/configs/localnet_rosetta_test_s1.json) Similar to the RPC tests, if you wish to debug further with the localnet after the tests are done, open a new shell and run: diff --git a/node/node.md b/node/node.md index a783df5ead..168c7edeb5 100644 --- a/node/node.md +++ b/node/node.md @@ -3,7 +3,7 @@ ### Services In Harmony network, a node can be treated as one of the roles: validator, leader, beacon validator, -beacon leader depending on its context. With each role, a node can run a certian set of services. +beacon leader depending on its context. With each role, a node can run a certain set of services. For example, a leader needs to run explorer support service, syncing support service etc.. while a normal validator does not run such many. @@ -13,8 +13,8 @@ service etc.. while a normal validator does not run such many. To support such behavior, we architecture Node logic with service manager which can wait for actions which each triggers its management operation such as starting some service, stopping some service. -Each service needs to implement minimal interace behavior like Start, Stop so that the service -manager can handle those operation. +Each service needs to implement minimal interface behavior like Start, Stop so that the service +manager can handle those operations. ```go // ServiceInterface is the collection of functions any service needs to implement. @@ -26,7 +26,7 @@ type ServiceInterface interface { ### Creating a service. -To create a service, you need to have an struct which implements above interface function +To create a service, you need to have a struct which implements above interface function `StartService`, `StopService`. Since different services may have different ways to be created you may need to have a method diff --git a/rosetta/infra/README.md b/rosetta/infra/README.md index 2c7d620c49..c04f998502 100644 --- a/rosetta/infra/README.md +++ b/rosetta/infra/README.md @@ -108,7 +108,7 @@ Note that the directory structure for `/root/data` (== `./data`) should look som ``` ### Inspecting Logs -If you mount `./data` on the host to `/root/data` in the container, you van view the harmony node logs at +If you mount `./data` on the host to `/root/data` in the container, you can view the harmony node logs at `./data/logs/` on your host machine. ### View rosetta request logs diff --git a/scripts/package/readme.md b/scripts/package/readme.md index 9ea4af4090..510bc387f3 100644 --- a/scripts/package/readme.md +++ b/scripts/package/readme.md @@ -19,7 +19,7 @@ The default blockchain DBs are stored in `/home/harmony/harmony_db_?` directory. The configuration of harmony process is in `/etc/harmony/harmony.conf`. # Package Manager -Please take sometime to learn about the package managers used on Fedora/Debian based distributions. +Please take some time to learn about the package managers used on Fedora/Debian based distributions. There are many other package managers can be used to manage rpm/deb packages like [Apt], or [Yum] @@ -128,7 +128,7 @@ The default configuration is for validators on mainnet. No need to run `harmony- * `systemctl status harmony` to check status of node # Change node configuration -The node configuration file is in `/etc/harmony/harmony.conf`. Please edit the file as you needed. +The node configuration file is in `/etc/harmony/harmony.conf`. Please edit the file as you need. ```bash sudo vim /etc/harmony/harmony.conf ``` From 582a4cf5cb27b3c87210035c4200d42258c9ffdf Mon Sep 17 00:00:00 2001 From: Konstantin <355847+Frozen@users.noreply.github.com> Date: Thu, 16 Nov 2023 13:40:51 -0900 Subject: [PATCH 030/128] Fixed panic with dsync. (#4562) --- cmd/harmony/main.go | 2 +- consensus/consensus.go | 85 ++++++++++++++++++++++++++++++++++++--- consensus/consensus_v2.go | 38 +++++++++-------- consensus/downloader.go | 29 ++++++------- node/node.go | 67 ------------------------------ 5 files changed, 111 insertions(+), 110 deletions(-) diff --git a/cmd/harmony/main.go b/cmd/harmony/main.go index fb4365ad82..549237d1ce 100644 --- a/cmd/harmony/main.go +++ b/cmd/harmony/main.go @@ -862,7 +862,7 @@ func setupConsensusAndNode(hc harmonyconfig.HarmonyConfig, nodeConfig *nodeconfi currentNode.NodeConfig.ConsensusPriKey = nodeConfig.ConsensusPriKey // This needs to be executed after consensus setup - if err := currentNode.InitConsensusWithValidators(); err != nil { + if err := currentConsensus.InitConsensusWithValidators(); err != nil { utils.Logger().Warn(). Int("shardID", hc.General.ShardID). Err(err). diff --git a/consensus/consensus.go b/consensus/consensus.go index 066d6cee04..019fd85429 100644 --- a/consensus/consensus.go +++ b/consensus/consensus.go @@ -6,19 +6,20 @@ import ( "sync/atomic" "time" - "github.com/harmony-one/harmony/consensus/engine" - "github.com/harmony-one/harmony/core" - "github.com/harmony-one/harmony/crypto/bls" - "github.com/harmony-one/harmony/internal/registry" - "github.com/harmony-one/abool" bls_core "github.com/harmony-one/bls/ffi/go/bls" + "github.com/harmony-one/harmony/consensus/engine" "github.com/harmony-one/harmony/consensus/quorum" + "github.com/harmony-one/harmony/core" "github.com/harmony-one/harmony/core/types" + "github.com/harmony-one/harmony/crypto/bls" bls_cosi "github.com/harmony-one/harmony/crypto/bls" + "github.com/harmony-one/harmony/internal/registry" "github.com/harmony-one/harmony/internal/utils" "github.com/harmony-one/harmony/multibls" "github.com/harmony-one/harmony/p2p" + "github.com/harmony-one/harmony/shard" + "github.com/harmony-one/harmony/shard/committee" "github.com/harmony-one/harmony/staking/slash" "github.com/pkg/errors" ) @@ -121,7 +122,9 @@ type Consensus struct { // finalityCounter keep tracks of the finality time finalityCounter atomic.Value //int64 - dHelper *downloadHelper + dHelper interface { + DownloadAsync() + } // Both flags only for initialization state. start bool @@ -177,6 +180,10 @@ func (consensus *Consensus) verifyBlock(block *types.Block) error { // BlocksSynchronized lets the main loop know that block synchronization finished // thus the blockchain is likely to be up to date. func (consensus *Consensus) BlocksSynchronized() { + err := consensus.AddConsensusLastMile() + if err != nil { + consensus.GetLogger().Error().Err(err).Msg("add last mile failed") + } consensus.mutex.Lock() defer consensus.mutex.Unlock() consensus.syncReadyChan() @@ -274,6 +281,7 @@ func New( msgSender: NewMessageSender(host), // FBFT timeout consensusTimeout: createTimeout(), + dHelper: downloadAsync{}, } if multiBLSPriKey != nil { @@ -311,3 +319,68 @@ func (consensus *Consensus) GetHost() p2p.Host { func (consensus *Consensus) Registry() *registry.Registry { return consensus.registry } + +// InitConsensusWithValidators initialize shard state +// from latest epoch and update committee pub +// keys for consensus +func (consensus *Consensus) InitConsensusWithValidators() (err error) { + shardID := consensus.ShardID + currentBlock := consensus.Blockchain().CurrentBlock() + blockNum := currentBlock.NumberU64() + consensus.SetMode(Listening) + epoch := currentBlock.Epoch() + utils.Logger().Info(). + Uint64("blockNum", blockNum). + Uint32("shardID", shardID). + Uint64("epoch", epoch.Uint64()). + Msg("[InitConsensusWithValidators] Try To Get PublicKeys") + shardState, err := committee.WithStakingEnabled.Compute( + epoch, consensus.Blockchain(), + ) + if err != nil { + utils.Logger().Err(err). + Uint64("blockNum", blockNum). + Uint32("shardID", shardID). + Uint64("epoch", epoch.Uint64()). + Msg("[InitConsensusWithValidators] Failed getting shard state") + return err + } + subComm, err := shardState.FindCommitteeByID(shardID) + if err != nil { + utils.Logger().Err(err). + Interface("shardState", shardState). + Msg("[InitConsensusWithValidators] Find CommitteeByID") + return err + } + pubKeys, err := subComm.BLSPublicKeys() + if err != nil { + utils.Logger().Error(). + Uint32("shardID", shardID). + Uint64("blockNum", blockNum). + Msg("[InitConsensusWithValidators] PublicKeys is Empty, Cannot update public keys") + return errors.Wrapf( + err, + "[InitConsensusWithValidators] PublicKeys is Empty, Cannot update public keys", + ) + } + + for _, key := range pubKeys { + if consensus.GetPublicKeys().Contains(key.Object) { + utils.Logger().Info(). + Uint64("blockNum", blockNum). + Int("numPubKeys", len(pubKeys)). + Str("mode", consensus.Mode().String()). + Msg("[InitConsensusWithValidators] Successfully updated public keys") + consensus.UpdatePublicKeys(pubKeys, shard.Schedule.InstanceForEpoch(epoch).ExternalAllowlist()) + consensus.SetMode(Normal) + return nil + } + } + return nil +} + +type downloadAsync struct { +} + +func (a downloadAsync) DownloadAsync() { +} diff --git a/consensus/consensus_v2.go b/consensus/consensus_v2.go index de7d4650b8..514feaf86f 100644 --- a/consensus/consensus_v2.go +++ b/consensus/consensus_v2.go @@ -277,6 +277,8 @@ func (consensus *Consensus) BlockCommitSigs(blockNum uint64) ([]byte, error) { return nil, nil } lastCommits, err := consensus.Blockchain().ReadCommitSig(blockNum) + consensus.mutex.Lock() + defer consensus.mutex.Unlock() if err != nil || len(lastCommits) < bls.BLSSignatureSizeInBytes { msgs := consensus.FBFTLog().GetMessagesByTypeSeq( @@ -300,30 +302,26 @@ func (consensus *Consensus) BlockCommitSigs(blockNum uint64) ([]byte, error) { func (consensus *Consensus) Start( stopChan chan struct{}, ) { + consensus.GetLogger().Info().Time("time", time.Now()).Msg("[ConsensusMainLoop] Consensus started") go func() { - consensus.getLogger().Info().Time("time", time.Now()).Msg("[ConsensusMainLoop] Consensus started") - go func() { - ticker := time.NewTicker(250 * time.Millisecond) - defer ticker.Stop() - for { - select { - case <-stopChan: - return - case <-ticker.C: - consensus.Tick() - } + ticker := time.NewTicker(250 * time.Millisecond) + defer ticker.Stop() + for { + select { + case <-stopChan: + return + case <-ticker.C: + consensus.Tick() } - }() - - consensus.mutex.Lock() - consensus.consensusTimeout[timeoutBootstrap].Start() - consensus.getLogger().Info().Msg("[ConsensusMainLoop] Start bootstrap timeout (only once)") - // Set up next block due time. - consensus.NextBlockDue = time.Now().Add(consensus.BlockPeriod) - consensus.mutex.Unlock() + } }() - consensus.dHelper.start() + consensus.mutex.Lock() + consensus.consensusTimeout[timeoutBootstrap].Start() + consensus.getLogger().Info().Msg("[ConsensusMainLoop] Start bootstrap timeout (only once)") + // Set up next block due time. + consensus.NextBlockDue = time.Now().Add(consensus.BlockPeriod) + consensus.mutex.Unlock() } func (consensus *Consensus) StartChannel() { diff --git a/consensus/downloader.go b/consensus/downloader.go index dde7deab76..f6e0e71003 100644 --- a/consensus/downloader.go +++ b/consensus/downloader.go @@ -19,12 +19,13 @@ type downloader interface { // Set downloader set the downloader of the shard to consensus // TODO: It will be better to move this to consensus.New and register consensus as a service func (consensus *Consensus) SetDownloader(d downloader) { + consensus.mutex.Lock() + defer consensus.mutex.Unlock() consensus.dHelper = newDownloadHelper(consensus, d) } type downloadHelper struct { d downloader - c *Consensus startedCh chan struct{} finishedCh chan struct{} @@ -41,46 +42,42 @@ func newDownloadHelper(c *Consensus, d downloader) *downloadHelper { finishedSub := d.SubscribeDownloadFinished(finishedCh) out := &downloadHelper{ - c: c, d: d, startedCh: startedCh, finishedCh: finishedCh, startedSub: startedSub, finishedSub: finishedSub, } - go out.downloadStartedLoop() - go out.downloadFinishedLoop() + go out.downloadStartedLoop(c) + go out.downloadFinishedLoop(c) return out } -func (dh *downloadHelper) start() { +func (dh *downloadHelper) DownloadAsync() { + dh.d.DownloadAsync() } -func (dh *downloadHelper) downloadStartedLoop() { +func (dh *downloadHelper) downloadStartedLoop(c *Consensus) { for { select { case <-dh.startedCh: - dh.c.BlocksNotSynchronized() + c.BlocksNotSynchronized() case err := <-dh.startedSub.Err(): - dh.c.getLogger().Info().Err(err).Msg("consensus download finished loop closed") + c.GetLogger().Info().Err(err).Msg("consensus download finished loop closed") return } } } -func (dh *downloadHelper) downloadFinishedLoop() { +func (dh *downloadHelper) downloadFinishedLoop(c *Consensus) { for { select { case <-dh.finishedCh: - err := dh.c.AddConsensusLastMile() - if err != nil { - dh.c.getLogger().Error().Err(err).Msg("add last mile failed") - } - dh.c.BlocksSynchronized() + c.BlocksSynchronized() case err := <-dh.finishedSub.Err(): - dh.c.getLogger().Info().Err(err).Msg("consensus download finished loop closed") + c.GetLogger().Info().Err(err).Msg("consensus download finished loop closed") return } } @@ -104,7 +101,7 @@ func (consensus *Consensus) AddConsensusLastMile() error { } func (consensus *Consensus) spinUpStateSync() { - consensus.dHelper.d.DownloadAsync() + consensus.dHelper.DownloadAsync() consensus.current.SetMode(Syncing) for _, v := range consensus.consensusTimeout { v.Stop() diff --git a/node/node.go b/node/node.go index a77939f563..dbc9639eb8 100644 --- a/node/node.go +++ b/node/node.go @@ -52,7 +52,6 @@ import ( "github.com/harmony-one/harmony/node/worker" "github.com/harmony-one/harmony/p2p" "github.com/harmony-one/harmony/shard" - "github.com/harmony-one/harmony/shard/committee" "github.com/harmony-one/harmony/staking/reward" "github.com/harmony-one/harmony/staking/slash" staking "github.com/harmony-one/harmony/staking/types" @@ -1196,72 +1195,6 @@ func (node *Node) updateInitialRewardValues() { reward.SetTotalInitialTokens(initTotal) } -// InitConsensusWithValidators initialize shard state -// from latest epoch and update committee pub -// keys for consensus -func (node *Node) InitConsensusWithValidators() (err error) { - if node.Consensus == nil { - utils.Logger().Error(). - Msg("[InitConsensusWithValidators] consenus is nil; Cannot figure out shardID") - return errors.New( - "[InitConsensusWithValidators] consenus is nil; Cannot figure out shardID", - ) - } - shardID := node.Consensus.ShardID - currentBlock := node.Blockchain().CurrentBlock() - blockNum := currentBlock.NumberU64() - node.Consensus.SetMode(consensus.Listening) - epoch := currentBlock.Epoch() - utils.Logger().Info(). - Uint64("blockNum", blockNum). - Uint32("shardID", shardID). - Uint64("epoch", epoch.Uint64()). - Msg("[InitConsensusWithValidators] Try To Get PublicKeys") - shardState, err := committee.WithStakingEnabled.Compute( - epoch, node.Consensus.Blockchain(), - ) - if err != nil { - utils.Logger().Err(err). - Uint64("blockNum", blockNum). - Uint32("shardID", shardID). - Uint64("epoch", epoch.Uint64()). - Msg("[InitConsensusWithValidators] Failed getting shard state") - return err - } - subComm, err := shardState.FindCommitteeByID(shardID) - if err != nil { - utils.Logger().Err(err). - Interface("shardState", shardState). - Msg("[InitConsensusWithValidators] Find CommitteeByID") - return err - } - pubKeys, err := subComm.BLSPublicKeys() - if err != nil { - utils.Logger().Error(). - Uint32("shardID", shardID). - Uint64("blockNum", blockNum). - Msg("[InitConsensusWithValidators] PublicKeys is Empty, Cannot update public keys") - return errors.Wrapf( - err, - "[InitConsensusWithValidators] PublicKeys is Empty, Cannot update public keys", - ) - } - - for _, key := range pubKeys { - if node.Consensus.GetPublicKeys().Contains(key.Object) { - utils.Logger().Info(). - Uint64("blockNum", blockNum). - Int("numPubKeys", len(pubKeys)). - Str("mode", node.Consensus.Mode().String()). - Msg("[InitConsensusWithValidators] Successfully updated public keys") - node.Consensus.UpdatePublicKeys(pubKeys, shard.Schedule.InstanceForEpoch(epoch).ExternalAllowlist()) - node.Consensus.SetMode(consensus.Normal) - return nil - } - } - return nil -} - func (node *Node) initNodeConfiguration() (service.NodeConfig, chan p2p.Peer, error) { chanPeer := make(chan p2p.Peer) nodeConfig := service.NodeConfig{ From 6eda7857d02bd37b06b59a74b6aa905ed984e65f Mon Sep 17 00:00:00 2001 From: Gheis Mohammadi Date: Sat, 18 Nov 2023 04:35:16 +0800 Subject: [PATCH 031/128] fix the issue of adding an existed block in legacy sync (#4565) --- api/service/legacysync/syncing.go | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/api/service/legacysync/syncing.go b/api/service/legacysync/syncing.go index 44c5de64a2..bc5d9aed45 100644 --- a/api/service/legacysync/syncing.go +++ b/api/service/legacysync/syncing.go @@ -904,7 +904,16 @@ func (ss *StateSync) UpdateBlockAndStatus(block *types.Block, bc core.BlockChain } _, err := bc.InsertChain([]*types.Block{block}, false /* verifyHeaders */) - if err != nil { + switch { + case errors.Is(err, core.ErrKnownBlock): + utils.Logger().Info(). + Uint64("blockHeight", block.NumberU64()). + Uint64("blockEpoch", block.Epoch().Uint64()). + Str("blockHex", block.Hash().Hex()). + Uint32("ShardID", block.ShardID()). + Msg("[SYNC] UpdateBlockAndStatus: Block exists") + return nil + case err != nil: utils.Logger().Error(). Err(err). Msgf( @@ -913,6 +922,7 @@ func (ss *StateSync) UpdateBlockAndStatus(block *types.Block, bc core.BlockChain block.ShardID(), ) return err + default: } utils.Logger().Info(). Uint64("blockHeight", block.NumberU64()). From 78685f9ffa39f336d1df8a2b6345800e18198282 Mon Sep 17 00:00:00 2001 From: Konstantin <355847+Frozen@users.noreply.github.com> Date: Wed, 22 Nov 2023 12:07:11 -0400 Subject: [PATCH 032/128] Removed future blocks from blockchain_impl.go (#4569) --- core/blockchain_impl.go | 56 +++-------------------------------------- 1 file changed, 3 insertions(+), 53 deletions(-) diff --git a/core/blockchain_impl.go b/core/blockchain_impl.go index 2c38416985..48e35325be 100644 --- a/core/blockchain_impl.go +++ b/core/blockchain_impl.go @@ -103,8 +103,6 @@ const ( bodyCacheLimit = 128 blockCacheLimit = 128 receiptsCacheLimit = 32 - maxFutureBlocks = 16 - maxTimeFutureBlocks = 30 badBlockLimit = 10 triesInRedis = 1000 shardCacheLimit = 10 @@ -198,7 +196,6 @@ type BlockChainImpl struct { bodyRLPCache *lru.Cache // Cache for the most recent block bodies in RLP encoded format receiptsCache *lru.Cache // Cache for the most recent receipts per block blockCache *lru.Cache // Cache for the most recent entire blocks - futureBlocks *lru.Cache // future blocks are blocks added for later processing shardStateCache *lru.Cache lastCommitsCache *lru.Cache epochCache *lru.Cache // Cache epoch number → first block number @@ -270,7 +267,6 @@ func newBlockChainWithOptions( bodyRLPCache, _ := lru.New(bodyCacheLimit) receiptsCache, _ := lru.New(receiptsCacheLimit) blockCache, _ := lru.New(blockCacheLimit) - futureBlocks, _ := lru.New(maxFutureBlocks) badBlocks, _ := lru.New(badBlockLimit) shardCache, _ := lru.New(shardCacheLimit) commitsCache, _ := lru.New(commitsCacheLimit) @@ -296,7 +292,6 @@ func newBlockChainWithOptions( bodyRLPCache: bodyRLPCache, receiptsCache: receiptsCache, blockCache: blockCache, - futureBlocks: futureBlocks, shardStateCache: shardCache, lastCommitsCache: commitsCache, epochCache: epochCache, @@ -373,9 +368,6 @@ func newBlockChainWithOptions( return nil, errors.WithMessage(err, "failed to write pre-image start end blocks") } } - - // Take ownership of this particular state - go bc.update() return bc, nil } @@ -684,7 +676,6 @@ func (bc *BlockChainImpl) SetHead(head uint64) error { bc.bodyRLPCache.Purge() bc.receiptsCache.Purge() bc.blockCache.Purge() - bc.futureBlocks.Purge() bc.shardStateCache.Purge() // Rewind the block chain, ensuring we don't end up with a stateless head block @@ -1216,23 +1207,6 @@ func (bc *BlockChainImpl) Stop() { utils.Logger().Info().Msg("Blockchain manager stopped") } -func (bc *BlockChainImpl) procFutureBlocks() { - blocks := make([]*types.Block, 0, bc.futureBlocks.Len()) - for _, hash := range bc.futureBlocks.Keys() { - if block, exist := bc.futureBlocks.Peek(hash); exist { - blocks = append(blocks, block.(*types.Block)) - } - } - if len(blocks) > 0 { - types.BlockBy(types.Number).Sort(blocks) - - // Insert one by one as chain insertion needs contiguous ancestry between blocks - for i := range blocks { - bc.InsertChain(blocks[i:i+1], true /* verifyHeaders */) - } - } -} - // WriteStatus status of write type WriteStatus byte @@ -1624,7 +1598,6 @@ func (bc *BlockChainImpl) WriteBlockWithState( return NonStatTy, errors.Wrap(err, "writeHeadBlock") } - bc.futureBlocks.Remove(block.Hash()) return CanonStatTy, nil } @@ -1758,20 +1731,10 @@ func (bc *BlockChainImpl) insertChain(chain types.Blocks, verifyHeaders bool) (i } case err == consensus_engine.ErrFutureBlock: - // Allow up to MaxFuture second in the future blocks. If this limit is exceeded - // the chain is discarded and processed at a later time if given. - max := big.NewInt(time.Now().Unix() + maxTimeFutureBlocks) - if block.Time().Cmp(max) > 0 { - return i, events, coalescedLogs, fmt.Errorf("future block: %v > %v", block.Time(), max) - } - bc.futureBlocks.Add(block.Hash(), block) - stats.queued++ - continue + return i, events, coalescedLogs, err - case err == consensus_engine.ErrUnknownAncestor && bc.futureBlocks.Contains(block.ParentHash()): - bc.futureBlocks.Add(block.Hash(), block) - stats.queued++ - continue + case err == consensus_engine.ErrUnknownAncestor: + return i, events, coalescedLogs, err case err == consensus_engine.ErrPrunedAncestor: // TODO: add fork choice mechanism @@ -2020,19 +1983,6 @@ func (bc *BlockChainImpl) PostChainEvents(events []interface{}, logs []*types.Lo } } -func (bc *BlockChainImpl) update() { - futureTimer := time.NewTicker(5 * time.Second) - defer futureTimer.Stop() - for { - select { - case <-futureTimer.C: - bc.procFutureBlocks() - case <-bc.quit: - return - } - } -} - // BadBlock .. type BadBlock struct { Block *types.Block From 1f974af163e16d7867a07e9878808b95e15c67aa Mon Sep 17 00:00:00 2001 From: Konstantin <355847+Frozen@users.noreply.github.com> Date: Thu, 23 Nov 2023 15:27:12 -0400 Subject: [PATCH 033/128] Removed fast block. (#4571) --- core/blockchain_impl.go | 170 +--------------------------------------- 1 file changed, 3 insertions(+), 167 deletions(-) diff --git a/core/blockchain_impl.go b/core/blockchain_impl.go index 48e35325be..4ce147af07 100644 --- a/core/blockchain_impl.go +++ b/core/blockchain_impl.go @@ -70,9 +70,8 @@ import ( ) var ( - headBlockGauge = metrics.NewRegisteredGauge("chain/head/block", nil) - headHeaderGauge = metrics.NewRegisteredGauge("chain/head/header", nil) - headFastBlockGauge = metrics.NewRegisteredGauge("chain/head/receipt", nil) + headBlockGauge = metrics.NewRegisteredGauge("chain/head/block", nil) + headHeaderGauge = metrics.NewRegisteredGauge("chain/head/header", nil) accountReadTimer = metrics.NewRegisteredTimer("chain/account/reads", nil) accountHashTimer = metrics.NewRegisteredTimer("chain/account/hashes", nil) @@ -188,8 +187,7 @@ type BlockChainImpl struct { pendingCrossLinksMutex sync.RWMutex // pending crosslinks lock pendingSlashingCandidatesMU sync.RWMutex // pending slashing candidates - currentBlock atomic.Value // Current head of the block chain - currentFastBlock atomic.Value // Current head of the fast-sync chain (may be above the block chain!) + currentBlock atomic.Value // Current head of the block chain stateCache state.Database // State database to reuse between imports (contains state cache) bodyCache *lru.Cache // Cache for the most recent block bodies @@ -323,7 +321,6 @@ func newBlockChainWithOptions( } var nilBlock *types.Block bc.currentBlock.Store(nilBlock) - bc.currentFastBlock.Store(nilBlock) if err := bc.loadLastState(); err != nil { return nil, err } @@ -617,22 +614,8 @@ func (bc *BlockChainImpl) loadLastState() error { return errors.Wrap(err, "headerChain SetCurrentHeader") } - // Restore the last known head fast block - bc.currentFastBlock.Store(currentBlock) - headFastBlockGauge.Update(int64(currentBlock.NumberU64())) - if head := rawdb.ReadHeadFastBlockHash(bc.db); head != (common.Hash{}) { - if block := bc.GetBlockByHash(head); block != nil { - bc.currentFastBlock.Store(block) - headFastBlockGauge.Update(int64(block.NumberU64())) - } - } - - // Issue a status log for the user - currentFastBlock := bc.CurrentFastBlock() - headerTd := bc.GetTd(currentHeader.Hash(), currentHeader.Number().Uint64()) blockTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64()) - fastTd := bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64()) utils.Logger().Info(). Str("number", currentHeader.Number().String()). @@ -646,12 +629,6 @@ func (bc *BlockChainImpl) loadLastState() error { Str("td", blockTd.String()). Str("age", common.PrettyAge(time.Unix(currentBlock.Time().Int64(), 0)).String()). Msg("Loaded most recent local full block") - utils.Logger().Info(). - Str("number", currentFastBlock.Number().String()). - Str("hash", currentFastBlock.Hash().Hex()). - Str("td", fastTd.String()). - Str("age", common.PrettyAge(time.Unix(currentFastBlock.Time().Int64(), 0)).String()). - Msg("Loaded most recent local fast block") return nil } @@ -691,30 +668,16 @@ func (bc *BlockChainImpl) SetHead(head uint64) error { headBlockGauge.Update(int64(bc.genesisBlock.NumberU64())) } } - // Rewind the fast block in a simpleton way to the target head - if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock != nil && currentHeader.Number().Uint64() < currentFastBlock.NumberU64() { - newHeadFastBlock := bc.GetBlock(currentHeader.Hash(), currentHeader.Number().Uint64()) - bc.currentFastBlock.Store(newHeadFastBlock) - headFastBlockGauge.Update(int64(newHeadFastBlock.NumberU64())) - } // If either blocks reached nil, reset to the genesis state if currentBlock := bc.CurrentBlock(); currentBlock == nil { bc.currentBlock.Store(bc.genesisBlock) headBlockGauge.Update(int64(bc.genesisBlock.NumberU64())) } - if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock == nil { - bc.currentFastBlock.Store(bc.genesisBlock) - headFastBlockGauge.Update(int64(bc.genesisBlock.NumberU64())) - } currentBlock := bc.CurrentBlock() - currentFastBlock := bc.CurrentFastBlock() if err := rawdb.WriteHeadBlockHash(bc.db, currentBlock.Hash()); err != nil { return err } - if err := rawdb.WriteHeadFastBlockHash(bc.db, currentFastBlock.Hash()); err != nil { - return err - } return bc.loadLastState() } @@ -727,12 +690,6 @@ func (bc *BlockChainImpl) CurrentBlock() *types.Block { return bc.currentBlock.Load().(*types.Block) } -// CurrentFastBlock retrieves the current fast-sync head block of the canonical -// chain. The block is retrieved from the blockchain's internal cache. -func (bc *BlockChainImpl) CurrentFastBlock() *types.Block { - return bc.currentFastBlock.Load().(*types.Block) -} - func (bc *BlockChainImpl) Processor() Processor { bc.procmu.RLock() defer bc.procmu.RUnlock() @@ -779,8 +736,6 @@ func (bc *BlockChainImpl) ResetWithGenesisBlock(genesis *types.Block) error { } bc.currentBlock.Store(bc.genesisBlock) headBlockGauge.Update(int64(bc.genesisBlock.NumberU64())) - bc.currentFastBlock.Store(bc.genesisBlock) - headFastBlockGauge.Update(int64(bc.genesisBlock.NumberU64())) return nil } @@ -914,9 +869,6 @@ func (bc *BlockChainImpl) writeHeadBlock(block *types.Block) error { if err := rawdb.WriteHeadFastBlockHash(bc.db, block.Hash()); err != nil { return err } - - bc.currentFastBlock.Store(block) - headFastBlockGauge.Update(int64(block.NumberU64())) } return nil } @@ -930,9 +882,6 @@ func (bc *BlockChainImpl) tikvFastForward(block *types.Block, logs []*types.Log) return errors.Wrap(err, "HeaderChain SetCurrentHeader") } - bc.currentFastBlock.Store(block) - headFastBlockGauge.Update(int64(block.NumberU64())) - var events []interface{} events = append(events, ChainEvent{block, block.Hash(), logs}) events = append(events, ChainHeadEvent{block}) @@ -1234,14 +1183,6 @@ func (bc *BlockChainImpl) Rollback(chain []common.Hash) error { } } } - if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock != nil && currentFastBlock.Hash() == hash { - newFastBlock := bc.GetBlock(currentFastBlock.ParentHash(), currentFastBlock.NumberU64()-1) - if newFastBlock != nil { - bc.currentFastBlock.Store(newFastBlock) - headFastBlockGauge.Update(int64(newFastBlock.NumberU64())) - rawdb.WriteHeadFastBlockHash(bc.db, newFastBlock.Hash()) - } - } if currentBlock := bc.CurrentBlock(); currentBlock != nil && currentBlock.Hash() == hash { newBlock := bc.GetBlock(currentBlock.ParentHash(), currentBlock.NumberU64()-1) if newBlock != nil { @@ -1326,111 +1267,6 @@ func SetReceiptsData(config *params.ChainConfig, block *types.Block, receipts ty return nil } -// InsertReceiptChain attempts to complete an already existing header chain with -// transaction and receipt data. -// Deprecated: no usages of this function found. -// TODO: should be removed -func (bc *BlockChainImpl) InsertReceiptChain(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) { - // Do a sanity check that the provided chain is actually ordered and linked - for i := 1; i < len(blockChain); i++ { - if blockChain[i].NumberU64() != blockChain[i-1].NumberU64()+1 || blockChain[i].ParentHash() != blockChain[i-1].Hash() { - utils.Logger().Error(). - Str("number", blockChain[i].Number().String()). - Str("hash", blockChain[i].Hash().Hex()). - Str("parent", blockChain[i].ParentHash().Hex()). - Str("prevnumber", blockChain[i-1].Number().String()). - Str("prevhash", blockChain[i-1].Hash().Hex()). - Msg("Non contiguous receipt insert") - return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, blockChain[i-1].NumberU64(), - blockChain[i-1].Hash().Bytes()[:4], i, blockChain[i].NumberU64(), blockChain[i].Hash().Bytes()[:4], blockChain[i].ParentHash().Bytes()[:4]) - } - } - - bc.chainmu.Lock() - defer bc.chainmu.Unlock() - - var ( - stats = struct{ processed, ignored int32 }{} - start = time.Now() - bytes = 0 - batch = bc.db.NewBatch() - ) - for i, block := range blockChain { - receipts := receiptChain[i] - // Short circuit insertion if shutting down or processing failed - if atomic.LoadInt32(&bc.procInterrupt) == 1 { - return 0, nil - } - // Short circuit if the owner header is unknown - if !bc.HasHeader(block.Hash(), block.NumberU64()) { - return 0, fmt.Errorf("containing header #%d [%x…] unknown", block.Number(), block.Hash().Bytes()[:4]) - } - // Skip if the entire data is already known - if bc.HasBlock(block.Hash(), block.NumberU64()) { - stats.ignored++ - continue - } - // Compute all the non-consensus fields of the receipts - if err := SetReceiptsData(bc.chainConfig, block, receipts); err != nil { - return 0, fmt.Errorf("failed to set receipts data: %v", err) - } - // Write all the data out into the database - if err := rawdb.WriteBody(batch, block.Hash(), block.NumberU64(), block.Body()); err != nil { - return 0, err - } - if err := rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receipts); err != nil { - return 0, err - } - if err := rawdb.WriteBlockTxLookUpEntries(batch, block); err != nil { - return 0, err - } - if err := rawdb.WriteBlockStxLookUpEntries(batch, block); err != nil { - return 0, err - } - - stats.processed++ - - if batch.ValueSize() >= ethdb.IdealBatchSize { - if err := batch.Write(); err != nil { - return 0, err - } - bytes += batch.ValueSize() - batch.Reset() - } - } - if batch.ValueSize() > 0 { - bytes += batch.ValueSize() - if err := batch.Write(); err != nil { - return 0, err - } - } - - // Update the head fast sync block if better - bc.mu.Lock() - head := blockChain[len(blockChain)-1] - if td := bc.GetTd(head.Hash(), head.NumberU64()); td != nil { // Rewind may have occurred, skip in that case - currentFastBlock := bc.CurrentFastBlock() - if bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64()).Cmp(td) < 0 { - rawdb.WriteHeadFastBlockHash(bc.db, head.Hash()) - bc.currentFastBlock.Store(head) - headFastBlockGauge.Update(int64(head.NumberU64())) - } - } - bc.mu.Unlock() - - utils.Logger().Info(). - Int32("count", stats.processed). - Str("elapsed", common.PrettyDuration(time.Since(start)).String()). - Str("age", common.PrettyAge(time.Unix(head.Time().Int64(), 0)).String()). - Str("head", head.Number().String()). - Str("hash", head.Hash().Hex()). - Str("size", common.StorageSize(bytes).String()). - Int32("ignored", stats.ignored). - Msg("Imported new block receipts") - - return 0, nil -} - var lastWrite uint64 func (bc *BlockChainImpl) WriteBlockWithoutState(block *types.Block, td *big.Int) (err error) { From 368fc9e07b6b36e3a4eda2d403dc4f00ffe03308 Mon Sep 17 00:00:00 2001 From: Konstantin <355847+Frozen@users.noreply.github.com> Date: Fri, 24 Nov 2023 16:56:27 -0400 Subject: [PATCH 034/128] Removed unused functions and improved locks usage. (#4572) --- core/blockchain.go | 31 --------- core/blockchain_impl.go | 149 ++++------------------------------------ core/headerchain.go | 51 -------------- 3 files changed, 13 insertions(+), 218 deletions(-) diff --git a/core/blockchain.go b/core/blockchain.go index 0adc96925e..41f72a9a2b 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -47,11 +47,6 @@ type Options struct { type BlockChain interface { // ValidateNewBlock validates new block. ValidateNewBlock(block *types.Block, beaconChain BlockChain) error - // SetHead rewinds the local chain to a new head. In the case of headers, everything - // above the new head will be deleted and the new one set. In the case of blocks - // though, the head may be further rewound if block bodies are missing (non-archive - // nodes after a fast sync). - SetHead(head uint64) error // ShardID returns the shard Id of the blockchain. ShardID() uint32 // CurrentBlock retrieves the current head block of the canonical chain. The @@ -105,18 +100,6 @@ type BlockChain interface { // Rollback is designed to remove a chain of links from the database that aren't // certain enough to be valid. Rollback(chain []common.Hash) error - // WriteBlockWithoutState writes only the block and its metadata to the database, - // but does not write any state. This is used to construct competing side forks - // up to the point where they exceed the canonical total difficulty. - WriteBlockWithoutState(block *types.Block, td *big.Int) (err error) - // WriteBlockWithState writes the block and all associated state to the database. - WriteBlockWithState( - block *types.Block, receipts []*types.Receipt, - cxReceipts []*types.CXReceipt, - stakeMsgs []types2.StakeMsg, - paid reward.Reader, - state *state.DB, - ) (status WriteStatus, err error) // GetMaxGarbageCollectedBlockNumber .. GetMaxGarbageCollectedBlockNumber() int64 // InsertChain attempts to insert the given batch of blocks in to the canonical @@ -167,8 +150,6 @@ type BlockChain interface { WriteShardStateBytes(db rawdb.DatabaseWriter, epoch *big.Int, shardState []byte, ) (*shard.State, error) - // WriteHeadBlock writes head block. - WriteHeadBlock(block *types.Block) error // ReadCommitSig retrieves the commit signature on a block. ReadCommitSig(blockNum uint64) ([]byte, error) // WriteCommitSig saves the commits signatures signed on a block. @@ -179,20 +160,8 @@ type BlockChain interface { GetVrfByNumber(number uint64) []byte // ChainDb returns the database. ChainDb() ethdb.Database - // GetEpochBlockNumber returns the first block number of the given epoch. - GetEpochBlockNumber(epoch *big.Int) (*big.Int, error) - // StoreEpochBlockNumber stores the given epoch-first block number. - StoreEpochBlockNumber( - epoch *big.Int, blockNum *big.Int, - ) error // ReadEpochVrfBlockNums retrieves block numbers with valid VRF for the specified epoch. ReadEpochVrfBlockNums(epoch *big.Int) ([]uint64, error) - // WriteEpochVrfBlockNums saves block numbers with valid VRF for the specified epoch. - WriteEpochVrfBlockNums(epoch *big.Int, vrfNumbers []uint64) error - // ReadEpochVdfBlockNum retrieves block number with valid VDF for the specified epoch. - ReadEpochVdfBlockNum(epoch *big.Int) (*big.Int, error) - // WriteEpochVdfBlockNum saves block number with valid VDF for the specified epoch. - WriteEpochVdfBlockNum(epoch *big.Int, blockNum *big.Int) error // WriteCrossLinks saves the hashes of crosslinks by shardID and blockNum combination key. WriteCrossLinks(batch rawdb.DatabaseWriter, cls []types.CrossLink) error // DeleteCrossLinks removes the hashes of crosslinks by shardID and blockNum combination key. diff --git a/core/blockchain_impl.go b/core/blockchain_impl.go index 4ce147af07..abf745c896 100644 --- a/core/blockchain_impl.go +++ b/core/blockchain_impl.go @@ -34,7 +34,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/mclock" "github.com/ethereum/go-ethereum/common/prque" - "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/metrics" @@ -181,9 +180,7 @@ type BlockChainImpl struct { scope event.SubscriptionScope genesisBlock *types.Block - mu sync.RWMutex // global mutex for locking chain operations chainmu sync.RWMutex // blockchain insertion lock - procmu sync.RWMutex // block processor lock pendingCrossLinksMutex sync.RWMutex // pending crosslinks lock pendingSlashingCandidatesMU sync.RWMutex // pending slashing candidates @@ -576,14 +573,14 @@ func (bc *BlockChainImpl) loadLastState() error { if head == (common.Hash{}) { // Corrupt or empty database, init from scratch utils.Logger().Warn().Msg("Empty database, resetting chain") - return bc.Reset() + return bc.reset() } // Make sure the entire head block is available currentBlock := bc.GetBlockByHash(head) if currentBlock == nil { // Corrupt or empty database, init from scratch utils.Logger().Warn().Str("hash", head.Hex()).Msg("Head block missing, resetting chain") - return bc.Reset() + return bc.reset() } // Make sure the state associated with the block is available if _, err := state.New(currentBlock.Root(), bc.stateCache, bc.snaps); err != nil { @@ -633,12 +630,9 @@ func (bc *BlockChainImpl) loadLastState() error { return nil } -func (bc *BlockChainImpl) SetHead(head uint64) error { +func (bc *BlockChainImpl) setHead(head uint64) error { utils.Logger().Warn().Uint64("target", head).Msg("Rewinding blockchain") - bc.mu.Lock() - defer bc.mu.Unlock() - // Rewind the header chain, deleting all block bodies until then delFn := func(db rawdb.DatabaseDeleter, hash common.Hash, num uint64) error { return rawdb.DeleteBody(db, hash, num) @@ -691,8 +685,6 @@ func (bc *BlockChainImpl) CurrentBlock() *types.Block { } func (bc *BlockChainImpl) Processor() Processor { - bc.procmu.RLock() - defer bc.procmu.RUnlock() return bc.processor } @@ -709,17 +701,15 @@ func (bc *BlockChainImpl) Snapshots() *snapshot.Tree { return bc.snaps } -func (bc *BlockChainImpl) Reset() error { - return bc.ResetWithGenesisBlock(bc.genesisBlock) +func (bc *BlockChainImpl) reset() error { + return bc.resetWithGenesisBlock(bc.genesisBlock) } -func (bc *BlockChainImpl) ResetWithGenesisBlock(genesis *types.Block) error { +func (bc *BlockChainImpl) resetWithGenesisBlock(genesis *types.Block) error { // Dump the entire block chain and purge the caches - if err := bc.SetHead(0); err != nil { + if err := bc.setHead(0); err != nil { return err } - bc.mu.Lock() - defer bc.mu.Unlock() // Prepare the genesis block and reinitialise the chain if err := rawdb.WriteBlock(bc.db, genesis); err != nil { @@ -808,8 +798,8 @@ func (bc *BlockChainImpl) Export(w io.Writer) error { // ExportN writes a subset of the active chain to the given writer. func (bc *BlockChainImpl) ExportN(w io.Writer, first uint64, last uint64) error { - bc.mu.RLock() - defer bc.mu.RUnlock() + bc.chainmu.RLock() + defer bc.chainmu.RUnlock() if first > last { return fmt.Errorf("export failed: first (%d) is greater than last (%d)", first, last) @@ -837,10 +827,6 @@ func (bc *BlockChainImpl) ExportN(w io.Writer, first uint64, last uint64) error return nil } -func (bc *BlockChainImpl) WriteHeadBlock(block *types.Block) error { - return bc.writeHeadBlock(block) -} - // writeHeadBlock writes a new head block func (bc *BlockChainImpl) writeHeadBlock(block *types.Block) error { // If the block is on a side chain or an unknown one, force other heads onto it too @@ -1167,8 +1153,8 @@ const ( ) func (bc *BlockChainImpl) Rollback(chain []common.Hash) error { - bc.mu.Lock() - defer bc.mu.Unlock() + bc.chainmu.Lock() + defer bc.chainmu.Unlock() valsToRemove := map[common.Address]struct{}{} for i := len(chain) - 1; i >= 0; i-- { @@ -1205,95 +1191,15 @@ func (bc *BlockChainImpl) Rollback(chain []common.Hash) error { return bc.removeInValidatorList(valsToRemove) } -// SetReceiptsData computes all the non-consensus fields of the receipts -func SetReceiptsData(config *params.ChainConfig, block *types.Block, receipts types.Receipts) error { - signer := types.MakeSigner(config, block.Epoch()) - ethSigner := types.NewEIP155Signer(config.EthCompatibleChainID) - - transactions, stakingTransactions, logIndex := block.Transactions(), block.StakingTransactions(), uint(0) - if len(transactions)+len(stakingTransactions) != len(receipts) { - return errors.New("transaction+stakingTransactions and receipt count mismatch") - } - - // The used gas can be calculated based on previous receipts - if len(receipts) > 0 && len(transactions) > 0 { - receipts[0].GasUsed = receipts[0].CumulativeGasUsed - } - for j := 1; j < len(transactions); j++ { - // The transaction hash can be retrieved from the transaction itself - receipts[j].TxHash = transactions[j].Hash() - receipts[j].GasUsed = receipts[j].CumulativeGasUsed - receipts[j-1].CumulativeGasUsed - // The contract address can be derived from the transaction itself - if transactions[j].To() == nil { - // Deriving the signer is expensive, only do if it's actually needed - var from common.Address - if transactions[j].IsEthCompatible() { - from, _ = types.Sender(ethSigner, transactions[j]) - } else { - from, _ = types.Sender(signer, transactions[j]) - } - receipts[j].ContractAddress = crypto.CreateAddress(from, transactions[j].Nonce()) - } - // The derived log fields can simply be set from the block and transaction - for k := 0; k < len(receipts[j].Logs); k++ { - receipts[j].Logs[k].BlockNumber = block.NumberU64() - receipts[j].Logs[k].BlockHash = block.Hash() - receipts[j].Logs[k].TxHash = receipts[j].TxHash - receipts[j].Logs[k].TxIndex = uint(j) - receipts[j].Logs[k].Index = logIndex - logIndex++ - } - } - - // The used gas can be calculated based on previous receipts - if len(receipts) > len(transactions) && len(stakingTransactions) > 0 { - receipts[len(transactions)].GasUsed = receipts[len(transactions)].CumulativeGasUsed - } - // in a block, txns are processed before staking txns - for j := len(transactions) + 1; j < len(transactions)+len(stakingTransactions); j++ { - // The transaction hash can be retrieved from the staking transaction itself - receipts[j].TxHash = stakingTransactions[j].Hash() - receipts[j].GasUsed = receipts[j].CumulativeGasUsed - receipts[j-1].CumulativeGasUsed - // The derived log fields can simply be set from the block and transaction - for k := 0; k < len(receipts[j].Logs); k++ { - receipts[j].Logs[k].BlockNumber = block.NumberU64() - receipts[j].Logs[k].BlockHash = block.Hash() - receipts[j].Logs[k].TxHash = receipts[j].TxHash - receipts[j].Logs[k].TxIndex = uint(j) + uint(len(transactions)) - receipts[j].Logs[k].Index = logIndex - logIndex++ - } - } - return nil -} - var lastWrite uint64 -func (bc *BlockChainImpl) WriteBlockWithoutState(block *types.Block, td *big.Int) (err error) { - bc.chainmu.Lock() - defer bc.chainmu.Unlock() - - if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), td); err != nil { - return err - } - if err := rawdb.WriteBlock(bc.db, block); err != nil { - return err - } - - return nil -} - -func (bc *BlockChainImpl) WriteBlockWithState( +func (bc *BlockChainImpl) writeBlockWithState( block *types.Block, receipts []*types.Receipt, cxReceipts []*types.CXReceipt, stakeMsgs []staking.StakeMsg, paid reward.Reader, state *state.DB, ) (status WriteStatus, err error) { - // Make sure no inconsistent state is leaked during insertion - bc.mu.Lock() - defer bc.mu.Unlock() - currentBlock := bc.CurrentBlock() if currentBlock == nil { return NonStatTy, errors.New("Current block is nil") @@ -1676,7 +1582,7 @@ func (bc *BlockChainImpl) insertChain(chain types.Blocks, verifyHeaders bool) (i // Write the block to the chain and get the status. substart = time.Now() - status, err := bc.WriteBlockWithState( + status, err := bc.writeBlockWithState( block, receipts, cxReceipts, stakeMsgs, payout, state, ) if err != nil { @@ -1888,35 +1794,6 @@ Error: %v } } -// InsertHeaderChain attempts to insert the given header chain in to the local -// chain, possibly creating a reorg. If an error is returned, it will return the -// index number of the failing header as well an error describing what went wrong. -// -// The verify parameter can be used to fine tune whether nonce verification -// should be done or not. The reason behind the optional check is because some -// of the header retrieval mechanisms already need to verify nonces, as well as -// because nonces can be verified sparsely, not needing to check each. -func (bc *BlockChainImpl) InsertHeaderChain(chain []*block.Header, checkFreq int) (int, error) { - start := time.Now() - if i, err := bc.hc.ValidateHeaderChain(chain, checkFreq); err != nil { - return i, err - } - - // Make sure only one thread manipulates the chain at once - bc.chainmu.Lock() - defer bc.chainmu.Unlock() - - whFunc := func(header *block.Header) error { - bc.mu.Lock() - defer bc.mu.Unlock() - - _, err := bc.hc.WriteHeader(header) - return err - } - - return bc.hc.InsertHeaderChain(chain, whFunc, start) -} - func (bc *BlockChainImpl) CurrentHeader() *block.Header { return bc.hc.CurrentHeader() } diff --git a/core/headerchain.go b/core/headerchain.go index a902d5a124..4f5e8a066c 100644 --- a/core/headerchain.go +++ b/core/headerchain.go @@ -18,13 +18,11 @@ package core import ( crand "crypto/rand" - "errors" "fmt" "math" "math/big" mrand "math/rand" "sync/atomic" - "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/ethdb" @@ -260,55 +258,6 @@ func (hc *HeaderChain) ValidateHeaderChain(chain []*block.Header, checkFreq int) return 0, nil } -// InsertHeaderChain attempts to insert the given header chain in to the local -// chain, possibly creating a reorg. If an error is returned, it will return the -// index number of the failing header as well an error describing what went wrong. -// -// The verify parameter can be used to fine tune whether nonce verification -// should be done or not. The reason behind the optional check is because some -// of the header retrieval mechanisms already need to verfy nonces, as well as -// because nonces can be verified sparsely, not needing to check each. -func (hc *HeaderChain) InsertHeaderChain(chain []*block.Header, writeHeader WhCallback, start time.Time) (int, error) { - // Collect some import statistics to report on - stats := struct{ processed, ignored int }{} - // All headers passed verification, import them into the database - for i, header := range chain { - // Short circuit insertion if shutting down - if hc.procInterrupt() { - utils.Logger().Debug().Msg("Premature abort during headers import") - return i, errors.New("aborted") - } - // If the header's already known, skip it, otherwise store - if hc.HasHeader(header.Hash(), header.Number().Uint64()) { - stats.ignored++ - continue - } - if err := writeHeader(header); err != nil { - return i, err - } - stats.processed++ - } - // Report some public statistics so the user has a clue what's going on - last := chain[len(chain)-1] - - context := utils.Logger().With(). - Int("count", stats.processed). - Str("elapsed", common.PrettyDuration(time.Since(start)).String()). - Str("number", last.Number().String()). - Str("hash", last.Hash().Hex()) - - if timestamp := time.Unix(last.Time().Int64(), 0); time.Since(timestamp) > time.Minute { - context = context.Str("age", common.PrettyAge(timestamp).String()) - } - if stats.ignored > 0 { - context = context.Int("ignored", stats.ignored) - } - logger := context.Logger() - logger.Info().Msg("Imported new block headers") - - return 0, nil -} - // GetBlockHashesFromHash retrieves a number of block hashes starting at a given // hash, fetching towards the genesis block. func (hc *HeaderChain) GetBlockHashesFromHash(hash common.Hash, max uint64) []common.Hash { From b7b7fbf2b3154d5ddf39100ac5d626f56f016498 Mon Sep 17 00:00:00 2001 From: Konstantin <355847+Frozen@users.noreply.github.com> Date: Fri, 24 Nov 2023 16:58:10 -0400 Subject: [PATCH 035/128] Skip epochchain errors. (#4573) --- api/service/legacysync/syncing.go | 6 +++++- api/service/stagedstreamsync/staged_stream_sync.go | 5 ++++- api/service/stagedsync/stagedsync.go | 6 +++++- consensus/downloader.go | 6 +++++- core/blockchain_impl.go | 5 +++-- core/epochchain.go | 2 +- 6 files changed, 23 insertions(+), 7 deletions(-) diff --git a/api/service/legacysync/syncing.go b/api/service/legacysync/syncing.go index bc5d9aed45..92c8a457f0 100644 --- a/api/service/legacysync/syncing.go +++ b/api/service/legacysync/syncing.go @@ -1158,7 +1158,11 @@ func (ss *StateSync) addConsensusLastMile(bc core.BlockChain, consensus *consens if block == nil { break } - if _, err := bc.InsertChain(types.Blocks{block}, true); err != nil { + _, err := bc.InsertChain(types.Blocks{block}, true) + switch { + case errors.Is(err, core.ErrKnownBlock): + case errors.Is(err, core.ErrNotLastBlockInEpoch): + case err != nil: return errors.Wrap(err, "failed to InsertChain") } } diff --git a/api/service/stagedstreamsync/staged_stream_sync.go b/api/service/stagedstreamsync/staged_stream_sync.go index fc59290474..98922af28d 100644 --- a/api/service/stagedstreamsync/staged_stream_sync.go +++ b/api/service/stagedstreamsync/staged_stream_sync.go @@ -637,7 +637,10 @@ func (ss *StagedStreamSync) addConsensusLastMile(bc core.BlockChain, cs *consens break } _, err := bc.InsertChain(types.Blocks{block}, true) - if err != nil && !errors.Is(err, core.ErrKnownBlock) { + switch { + case errors.Is(err, core.ErrKnownBlock): + case errors.Is(err, core.ErrNotLastBlockInEpoch): + case err != nil: return errors.Wrap(err, "failed to InsertChain") } hashes = append(hashes, block.Header().Hash()) diff --git a/api/service/stagedsync/stagedsync.go b/api/service/stagedsync/stagedsync.go index 4cc2a98ea7..83be4bae46 100644 --- a/api/service/stagedsync/stagedsync.go +++ b/api/service/stagedsync/stagedsync.go @@ -1221,7 +1221,11 @@ func (ss *StagedSync) addConsensusLastMile(bc core.BlockChain, cs *consensus.Con if block == nil { break } - if _, err := bc.InsertChain(types.Blocks{block}, true); err != nil && !errors.Is(err, core.ErrKnownBlock) { + _, err := bc.InsertChain(types.Blocks{block}, true) + switch { + case errors.Is(err, core.ErrKnownBlock): + case errors.Is(err, core.ErrNotLastBlockInEpoch): + case err != nil: return errors.Wrap(err, "failed to InsertChain") } } diff --git a/consensus/downloader.go b/consensus/downloader.go index f6e0e71003..804a25aabb 100644 --- a/consensus/downloader.go +++ b/consensus/downloader.go @@ -91,7 +91,11 @@ func (consensus *Consensus) AddConsensusLastMile() error { if block == nil { break } - if _, err := consensus.Blockchain().InsertChain(types.Blocks{block}, true); err != nil && !errors.Is(err, core.ErrKnownBlock) { + _, err := consensus.Blockchain().InsertChain(types.Blocks{block}, true) + switch { + case errors.Is(err, core.ErrKnownBlock): + case errors.Is(err, core.ErrNotLastBlockInEpoch): + case err != nil: return errors.Wrap(err, "failed to InsertChain") } } diff --git a/core/blockchain_impl.go b/core/blockchain_impl.go index abf745c896..f6084b4be4 100644 --- a/core/blockchain_impl.go +++ b/core/blockchain_impl.go @@ -88,8 +88,9 @@ var ( blockWriteTimer = metrics.NewRegisteredTimer("chain/write", nil) // ErrNoGenesis is the error when there is no genesis. - ErrNoGenesis = errors.New("Genesis not found in chain") - ErrEmptyChain = errors.New("empty chain") + ErrNoGenesis = errors.New("Genesis not found in chain") + ErrEmptyChain = errors.New("empty chain") + ErrNotLastBlockInEpoch = errors.New("not last block in epoch") // errExceedMaxPendingSlashes .. errExceedMaxPendingSlashes = errors.New("exceeed max pending slashes") errNilEpoch = errors.New("nil epoch for voting power computation") diff --git a/core/epochchain.go b/core/epochchain.go index 7a3c40677b..7d9aeae1a8 100644 --- a/core/epochchain.go +++ b/core/epochchain.go @@ -124,7 +124,7 @@ func (bc *EpochChain) InsertChain(blocks types.Blocks, _ bool) (int, error) { }() for i, block := range blocks { if !block.IsLastBlockInEpoch() { - return i, errors.New("block is not last block in epoch") + return i, ErrNotLastBlockInEpoch } sig, bitmap, err := chain.ParseCommitSigAndBitmap(block.GetCurrentCommitSig()) if err != nil { From fe1d97a4608ad56f93b3fdd845c182b1cb835582 Mon Sep 17 00:00:00 2001 From: Konstantin <355847+Frozen@users.noreply.github.com> Date: Wed, 29 Nov 2023 17:35:04 -0400 Subject: [PATCH 036/128] Removed outdated check. (#4574) * Removed outdated check. * Fallback for old sync for BeaconBlockChannel. --- node/node.go | 4 ---- node/node_handler.go | 2 -- node/node_syncing.go | 11 ++++++++++- 3 files changed, 10 insertions(+), 7 deletions(-) diff --git a/node/node.go b/node/node.go index dbc9639eb8..573786c009 100644 --- a/node/node.go +++ b/node/node.go @@ -499,10 +499,6 @@ func (node *Node) validateNodeMessage(ctx context.Context, payload []byte) ( utils.Logger().Debug().Uint64("receivedNum", block.NumberU64()). Uint64("currentNum", curBeaconHeight).Msg("beacon block sync message rejected") return nil, 0, errors.New("beacon block height smaller than current height beyond tolerance") - } else if block.NumberU64()-beaconBlockHeightTolerance > curBeaconHeight { - utils.Logger().Debug().Uint64("receivedNum", block.NumberU64()). - Uint64("currentNum", curBeaconHeight).Msg("beacon block sync message rejected") - return nil, 0, errors.Errorf("beacon block height too much higher than current height beyond tolerance, block %d, current %d, epoch %d , current %d", block.NumberU64(), curBeaconHeight, block.Epoch().Uint64(), curBeaconBlock.Epoch().Uint64()) } else if block.NumberU64() <= curBeaconHeight { utils.Logger().Debug().Uint64("receivedNum", block.NumberU64()). Uint64("currentNum", curBeaconHeight).Msg("beacon block sync message ignored") diff --git a/node/node_handler.go b/node/node_handler.go index eeaf90f2d7..c5feeed071 100644 --- a/node/node_handler.go +++ b/node/node_handler.go @@ -76,8 +76,6 @@ func (node *Node) HandleNodeMessage( if node.Blockchain().ShardID() != shard.BeaconChainShardID { for _, block := range blocks { if block.ShardID() == 0 { - utils.Logger().Info(). - Msgf("Beacon block being handled by block channel: %d", block.NumberU64()) if block.IsLastBlockInEpoch() { go func(blk *types.Block) { node.BeaconBlockChannel <- blk diff --git a/node/node_syncing.go b/node/node_syncing.go index 830df25c0d..5319827ffd 100644 --- a/node/node_syncing.go +++ b/node/node_syncing.go @@ -231,7 +231,16 @@ func (node *Node) doBeaconSyncing() { // If Downloader is not working, we need also deal with blocks from beaconBlockChannel go func(node *Node) { // TODO ek – infinite loop; add shutdown/cleanup logic - for _ = range node.BeaconBlockChannel { + for b := range node.BeaconBlockChannel { + if b != nil && b.IsLastBlockInEpoch() { + _, err := node.EpochChain().InsertChain(types.Blocks{b}, true) + if err != nil { + utils.Logger().Error().Err(err).Msgf("[SYNC] InsertChain failed shard: %d epoch:%d number:%d", b.Header().ShardID(), b.Epoch().Uint64(), b.NumberU64()) + } else { + utils.Logger().Info(). + Msgf("Beacon block being handled by block channel: epoch: %d, number: %d", b.Epoch().Uint64(), b.NumberU64()) + } + } } }(node) } From f9934683252e42d73e1cd6f71e82f687a456999b Mon Sep 17 00:00:00 2001 From: Diego Nava <8563843+diego1q2w@users.noreply.github.com> Date: Wed, 29 Nov 2023 22:35:36 +0100 Subject: [PATCH 037/128] fix: max-rate bellow the era min-rate (#4552) * fix: max-rate bellow the era min-rate * fix comments * add localnet epoch config * update config * update config * update config * update config --- internal/chain/engine.go | 8 ++++++++ internal/params/config.go | 19 +++++++++++++++++-- staking/availability/measure.go | 24 ++++++++++++++++++++++++ test/build-localnet-validator.sh | 4 ++-- 4 files changed, 51 insertions(+), 4 deletions(-) diff --git a/internal/chain/engine.go b/internal/chain/engine.go index 4f3aac9ff4..c77d86487d 100644 --- a/internal/chain/engine.go +++ b/internal/chain/engine.go @@ -448,6 +448,14 @@ func setElectionEpochAndMinFee(chain engine.ChainReader, header *block.Header, s } isElected[addr] = struct{}{} } + + if config.IsMaxRate(newShardState.Epoch) { + for _, addr := range chain.ValidatorCandidates() { + if _, err := availability.UpdateMaxCommissionFee(state, addr, minRate); err != nil { + return err + } + } + } // due to a bug in the old implementation of the minimum fee, // unelected validators did not have their fee updated even // when the protocol required them to do so. here we fix it, diff --git a/internal/params/config.go b/internal/params/config.go index 86695ba40e..090e5599cf 100644 --- a/internal/params/config.go +++ b/internal/params/config.go @@ -75,6 +75,7 @@ var ( ValidatorCodeFixEpoch: big.NewInt(1535), // 2023-07-20 05:51:07+00:00 HIP30Epoch: big.NewInt(1673), // 2023-11-02 17:30:00+00:00 BlockGas30MEpoch: big.NewInt(1673), // 2023-11-02 17:30:00+00:00 + MaxRateEpoch: EpochTBD, } // TestnetChainConfig contains the chain parameters to run a node on the harmony test network. @@ -118,6 +119,7 @@ var ( ValidatorCodeFixEpoch: big.NewInt(1296), // 2023-04-28 07:14:20+00:00 HIP30Epoch: big.NewInt(2176), // 2023-10-12 10:00:00+00:00 BlockGas30MEpoch: big.NewInt(2176), // 2023-10-12 10:00:00+00:00 + MaxRateEpoch: EpochTBD, } // PangaeaChainConfig contains the chain parameters for the Pangaea network. // All features except for CrossLink are enabled at launch. @@ -161,6 +163,7 @@ var ( ValidatorCodeFixEpoch: EpochTBD, HIP30Epoch: EpochTBD, BlockGas30MEpoch: big.NewInt(0), + MaxRateEpoch: EpochTBD, } // PartnerChainConfig contains the chain parameters for the Partner network. @@ -205,6 +208,7 @@ var ( ValidatorCodeFixEpoch: big.NewInt(5), HIP30Epoch: big.NewInt(7), BlockGas30MEpoch: big.NewInt(7), + MaxRateEpoch: EpochTBD, } // StressnetChainConfig contains the chain parameters for the Stress test network. @@ -249,6 +253,7 @@ var ( ValidatorCodeFixEpoch: EpochTBD, HIP30Epoch: EpochTBD, BlockGas30MEpoch: big.NewInt(0), + MaxRateEpoch: EpochTBD, } // LocalnetChainConfig contains the chain parameters to run for local development. @@ -290,8 +295,9 @@ var ( LeaderRotationExternalValidatorsEpoch: big.NewInt(6), FeeCollectEpoch: big.NewInt(2), ValidatorCodeFixEpoch: big.NewInt(2), - HIP30Epoch: EpochTBD, + HIP30Epoch: big.NewInt(3), BlockGas30MEpoch: big.NewInt(0), + MaxRateEpoch: big.NewInt(4), } // AllProtocolChanges ... @@ -336,7 +342,8 @@ var ( big.NewInt(0), // FeeCollectEpoch big.NewInt(0), // ValidatorCodeFixEpoch big.NewInt(0), // BlockGas30M - big.NewInt(0), // HIP30Epoch + big.NewInt(0), // BlockGas30M + big.NewInt(0), // MaxRateEpoch } // TestChainConfig ... @@ -382,6 +389,7 @@ var ( big.NewInt(0), // ValidatorCodeFixEpoch big.NewInt(0), // HIP30Epoch big.NewInt(0), // BlockGas30M + big.NewInt(0), // MaxRateEpoch } // TestRules ... @@ -547,6 +555,9 @@ type ChainConfig struct { HIP30Epoch *big.Int `json:"hip30-epoch,omitempty"` BlockGas30MEpoch *big.Int `json:"block-gas-30m-epoch,omitempty"` + + // MaxRateEpoch will make sure the validator max-rate is at least equal to the minRate + the validator max-rate-increase + MaxRateEpoch *big.Int `json:"max-rate-epoch,omitempty"` } // String implements the fmt.Stringer interface. @@ -803,6 +814,10 @@ func (c *ChainConfig) IsHIP30(epoch *big.Int) bool { return isForked(c.HIP30Epoch, epoch) } +func (c *ChainConfig) IsMaxRate(epoch *big.Int) bool { + return isForked(c.MaxRateEpoch, epoch) +} + // During this epoch, shards 2 and 3 will start sending // their balances over to shard 0 or 1. func (c *ChainConfig) IsOneEpochBeforeHIP30(epoch *big.Int) bool { diff --git a/staking/availability/measure.go b/staking/availability/measure.go index 881baa8553..6bf36bfb05 100644 --- a/staking/availability/measure.go +++ b/staking/availability/measure.go @@ -267,3 +267,27 @@ func UpdateMinimumCommissionFee( } return false, nil } + +// UpdateMaxCommissionFee makes sure the max-rate is at least higher than the rate + max-rate-change. +func UpdateMaxCommissionFee(state *state.DB, addr common.Address, minRate numeric.Dec) (bool, error) { + utils.Logger().Info().Msg("begin update max commission fee") + + wrapper, err := state.ValidatorWrapper(addr, true, false) + if err != nil { + return false, err + } + + minMaxRate := minRate.Add(wrapper.MaxChangeRate) + + if wrapper.MaxRate.LT(minMaxRate) { + utils.Logger().Info(). + Str("addr", addr.Hex()). + Str("old max-rate", wrapper.MaxRate.String()). + Str("new max-rate", minMaxRate.String()). + Msg("updating max commission rate") + wrapper.MaxRate.SetBytes(minMaxRate.Bytes()) + return true, nil + } + + return false, nil +} diff --git a/test/build-localnet-validator.sh b/test/build-localnet-validator.sh index 08d9877779..70501c8d67 100644 --- a/test/build-localnet-validator.sh +++ b/test/build-localnet-validator.sh @@ -32,7 +32,7 @@ hmy --node="http://localhost:9500" staking create-validator \ --bls-pubkeys 4f41a37a3a8d0695dd6edcc58142c6b7d98e74da5c90e79b587b3b960b6a4f5e048e6d8b8a000d77a478d44cd640270c,7dcc035a943e29e17959dabe636efad7303d2c6f273ace457ba9dcc2fd19d3f37e70ba1cd8d082cf8ff7be2f861db48c \ --name "s0-localnet-validator1" --identity "validator1" --details "validator1" \ --security-contact "localnet" --website "localnet.one" \ - --max-change-rate 0.1 --max-rate 0.1 --rate 0.1 \ + --max-change-rate 0.01 --max-rate 0.01 --rate 0.01 \ --max-total-delegation 100000000 --min-self-delegation 10000 --bls-pubkeys-dir .hmy/extbls/ hmy --node="http://localhost:9500" staking create-validator \ @@ -40,7 +40,7 @@ hmy --node="http://localhost:9500" staking create-validator \ --bls-pubkeys b0917378b179a519a5055259c4f8980cce37d58af300b00dd98b07076d3d9a3b16c4a55f84522f553872225a7b1efc0c \ --name "s0-localnet-validator2" --identity "validator2" --details "validator2" \ --security-contact "localnet" --website "localnet.one" \ - --max-change-rate 0.1 --max-rate 0.1 --rate 0.1 \ + --max-change-rate 0.1 --max-rate 0.1 --rate 0.05 \ --max-total-delegation 100000000 --min-self-delegation 10000 --bls-pubkeys-dir .hmy/extbls/ hmy --node="http://localhost:9500" staking create-validator \ From 7be2b0260bdfb6e98397f923ad441b65b79367ca Mon Sep 17 00:00:00 2001 From: Konstantin <355847+Frozen@users.noreply.github.com> Date: Fri, 1 Dec 2023 21:19:57 -0400 Subject: [PATCH 038/128] Revert "fix: max-rate bellow the era min-rate (#4552)" (#4578) This reverts commit f9934683252e42d73e1cd6f71e82f687a456999b. --- internal/chain/engine.go | 8 -------- internal/params/config.go | 19 ++----------------- staking/availability/measure.go | 24 ------------------------ test/build-localnet-validator.sh | 4 ++-- 4 files changed, 4 insertions(+), 51 deletions(-) diff --git a/internal/chain/engine.go b/internal/chain/engine.go index c77d86487d..4f3aac9ff4 100644 --- a/internal/chain/engine.go +++ b/internal/chain/engine.go @@ -448,14 +448,6 @@ func setElectionEpochAndMinFee(chain engine.ChainReader, header *block.Header, s } isElected[addr] = struct{}{} } - - if config.IsMaxRate(newShardState.Epoch) { - for _, addr := range chain.ValidatorCandidates() { - if _, err := availability.UpdateMaxCommissionFee(state, addr, minRate); err != nil { - return err - } - } - } // due to a bug in the old implementation of the minimum fee, // unelected validators did not have their fee updated even // when the protocol required them to do so. here we fix it, diff --git a/internal/params/config.go b/internal/params/config.go index 090e5599cf..86695ba40e 100644 --- a/internal/params/config.go +++ b/internal/params/config.go @@ -75,7 +75,6 @@ var ( ValidatorCodeFixEpoch: big.NewInt(1535), // 2023-07-20 05:51:07+00:00 HIP30Epoch: big.NewInt(1673), // 2023-11-02 17:30:00+00:00 BlockGas30MEpoch: big.NewInt(1673), // 2023-11-02 17:30:00+00:00 - MaxRateEpoch: EpochTBD, } // TestnetChainConfig contains the chain parameters to run a node on the harmony test network. @@ -119,7 +118,6 @@ var ( ValidatorCodeFixEpoch: big.NewInt(1296), // 2023-04-28 07:14:20+00:00 HIP30Epoch: big.NewInt(2176), // 2023-10-12 10:00:00+00:00 BlockGas30MEpoch: big.NewInt(2176), // 2023-10-12 10:00:00+00:00 - MaxRateEpoch: EpochTBD, } // PangaeaChainConfig contains the chain parameters for the Pangaea network. // All features except for CrossLink are enabled at launch. @@ -163,7 +161,6 @@ var ( ValidatorCodeFixEpoch: EpochTBD, HIP30Epoch: EpochTBD, BlockGas30MEpoch: big.NewInt(0), - MaxRateEpoch: EpochTBD, } // PartnerChainConfig contains the chain parameters for the Partner network. @@ -208,7 +205,6 @@ var ( ValidatorCodeFixEpoch: big.NewInt(5), HIP30Epoch: big.NewInt(7), BlockGas30MEpoch: big.NewInt(7), - MaxRateEpoch: EpochTBD, } // StressnetChainConfig contains the chain parameters for the Stress test network. @@ -253,7 +249,6 @@ var ( ValidatorCodeFixEpoch: EpochTBD, HIP30Epoch: EpochTBD, BlockGas30MEpoch: big.NewInt(0), - MaxRateEpoch: EpochTBD, } // LocalnetChainConfig contains the chain parameters to run for local development. @@ -295,9 +290,8 @@ var ( LeaderRotationExternalValidatorsEpoch: big.NewInt(6), FeeCollectEpoch: big.NewInt(2), ValidatorCodeFixEpoch: big.NewInt(2), - HIP30Epoch: big.NewInt(3), + HIP30Epoch: EpochTBD, BlockGas30MEpoch: big.NewInt(0), - MaxRateEpoch: big.NewInt(4), } // AllProtocolChanges ... @@ -342,8 +336,7 @@ var ( big.NewInt(0), // FeeCollectEpoch big.NewInt(0), // ValidatorCodeFixEpoch big.NewInt(0), // BlockGas30M - big.NewInt(0), // BlockGas30M - big.NewInt(0), // MaxRateEpoch + big.NewInt(0), // HIP30Epoch } // TestChainConfig ... @@ -389,7 +382,6 @@ var ( big.NewInt(0), // ValidatorCodeFixEpoch big.NewInt(0), // HIP30Epoch big.NewInt(0), // BlockGas30M - big.NewInt(0), // MaxRateEpoch } // TestRules ... @@ -555,9 +547,6 @@ type ChainConfig struct { HIP30Epoch *big.Int `json:"hip30-epoch,omitempty"` BlockGas30MEpoch *big.Int `json:"block-gas-30m-epoch,omitempty"` - - // MaxRateEpoch will make sure the validator max-rate is at least equal to the minRate + the validator max-rate-increase - MaxRateEpoch *big.Int `json:"max-rate-epoch,omitempty"` } // String implements the fmt.Stringer interface. @@ -814,10 +803,6 @@ func (c *ChainConfig) IsHIP30(epoch *big.Int) bool { return isForked(c.HIP30Epoch, epoch) } -func (c *ChainConfig) IsMaxRate(epoch *big.Int) bool { - return isForked(c.MaxRateEpoch, epoch) -} - // During this epoch, shards 2 and 3 will start sending // their balances over to shard 0 or 1. func (c *ChainConfig) IsOneEpochBeforeHIP30(epoch *big.Int) bool { diff --git a/staking/availability/measure.go b/staking/availability/measure.go index 6bf36bfb05..881baa8553 100644 --- a/staking/availability/measure.go +++ b/staking/availability/measure.go @@ -267,27 +267,3 @@ func UpdateMinimumCommissionFee( } return false, nil } - -// UpdateMaxCommissionFee makes sure the max-rate is at least higher than the rate + max-rate-change. -func UpdateMaxCommissionFee(state *state.DB, addr common.Address, minRate numeric.Dec) (bool, error) { - utils.Logger().Info().Msg("begin update max commission fee") - - wrapper, err := state.ValidatorWrapper(addr, true, false) - if err != nil { - return false, err - } - - minMaxRate := minRate.Add(wrapper.MaxChangeRate) - - if wrapper.MaxRate.LT(minMaxRate) { - utils.Logger().Info(). - Str("addr", addr.Hex()). - Str("old max-rate", wrapper.MaxRate.String()). - Str("new max-rate", minMaxRate.String()). - Msg("updating max commission rate") - wrapper.MaxRate.SetBytes(minMaxRate.Bytes()) - return true, nil - } - - return false, nil -} diff --git a/test/build-localnet-validator.sh b/test/build-localnet-validator.sh index 70501c8d67..08d9877779 100644 --- a/test/build-localnet-validator.sh +++ b/test/build-localnet-validator.sh @@ -32,7 +32,7 @@ hmy --node="http://localhost:9500" staking create-validator \ --bls-pubkeys 4f41a37a3a8d0695dd6edcc58142c6b7d98e74da5c90e79b587b3b960b6a4f5e048e6d8b8a000d77a478d44cd640270c,7dcc035a943e29e17959dabe636efad7303d2c6f273ace457ba9dcc2fd19d3f37e70ba1cd8d082cf8ff7be2f861db48c \ --name "s0-localnet-validator1" --identity "validator1" --details "validator1" \ --security-contact "localnet" --website "localnet.one" \ - --max-change-rate 0.01 --max-rate 0.01 --rate 0.01 \ + --max-change-rate 0.1 --max-rate 0.1 --rate 0.1 \ --max-total-delegation 100000000 --min-self-delegation 10000 --bls-pubkeys-dir .hmy/extbls/ hmy --node="http://localhost:9500" staking create-validator \ @@ -40,7 +40,7 @@ hmy --node="http://localhost:9500" staking create-validator \ --bls-pubkeys b0917378b179a519a5055259c4f8980cce37d58af300b00dd98b07076d3d9a3b16c4a55f84522f553872225a7b1efc0c \ --name "s0-localnet-validator2" --identity "validator2" --details "validator2" \ --security-contact "localnet" --website "localnet.one" \ - --max-change-rate 0.1 --max-rate 0.1 --rate 0.05 \ + --max-change-rate 0.1 --max-rate 0.1 --rate 0.1 \ --max-total-delegation 100000000 --min-self-delegation 10000 --bls-pubkeys-dir .hmy/extbls/ hmy --node="http://localhost:9500" staking create-validator \ From b7123fb30e079c4bf4d818582f53bbb434a59a93 Mon Sep 17 00:00:00 2001 From: Diego Nava <8563843+diego1q2w@users.noreply.github.com> Date: Tue, 5 Dec 2023 15:46:32 +0100 Subject: [PATCH 039/128] fix: eth json transaction (#4581) --- rpc/eth/types.go | 31 +++++++++++++++++++++++++++++++ rpc/transaction.go | 2 +- 2 files changed, 32 insertions(+), 1 deletion(-) diff --git a/rpc/eth/types.go b/rpc/eth/types.go index f76aa44423..f1ad725eb7 100644 --- a/rpc/eth/types.go +++ b/rpc/eth/types.go @@ -110,6 +110,37 @@ func NewTransaction( } return result, nil } +func NewTransactionFromTransaction( + tx *types.Transaction, blockHash common.Hash, + blockNumber uint64, timestamp uint64, index uint64, +) (*Transaction, error) { + from, err := tx.SenderAddress() + if err != nil { + return nil, fmt.Errorf("unable to get sender address: %w", err) + } + v, r, s := tx.RawSignatureValues() + + result := &Transaction{ + From: from, + Gas: hexutil.Uint64(tx.GasLimit()), + GasPrice: (*hexutil.Big)(tx.GasPrice()), + Hash: tx.Hash(), + Input: hexutil.Bytes(tx.Data()), + Nonce: hexutil.Uint64(tx.Nonce()), + To: tx.To(), + Value: (*hexutil.Big)(tx.Value()), + Timestamp: hexutil.Uint64(timestamp), + V: (*hexutil.Big)(v), + R: (*hexutil.Big)(r), + S: (*hexutil.Big)(s), + } + if blockHash != (common.Hash{}) { + result.BlockHash = &blockHash + result.BlockNumber = (*hexutil.Big)(new(big.Int).SetUint64(blockNumber)) + result.TransactionIndex = (*hexutil.Uint64)(&index) + } + return result, nil +} // NewReceipt returns the RPC data for a new receipt func NewReceipt(tx *types.EthTransaction, blockHash common.Hash, blockNumber, blockIndex uint64, receipt *types.Receipt) (map[string]interface{}, error) { diff --git a/rpc/transaction.go b/rpc/transaction.go index 8ea211d6a9..7ee27e7a97 100644 --- a/rpc/transaction.go +++ b/rpc/transaction.go @@ -236,7 +236,7 @@ func (s *PublicTransactionService) newRPCTransaction(tx *types.Transaction, bloc } return NewStructuredResponse(tx) case Eth: - tx, err := eth.NewTransaction(tx.ConvertToEth(), blockHash, blockNumber, timestamp, index) + tx, err := eth.NewTransactionFromTransaction(tx, blockHash, blockNumber, timestamp, index) if err != nil { DoMetricRPCQueryInfo(GetTransactionByHash, FailedNumber) return nil, err From 86fca2070f141a989fcee4b3f6fa14cfd789c37e Mon Sep 17 00:00:00 2001 From: Konstantin <355847+Frozen@users.noreply.github.com> Date: Tue, 5 Dec 2023 11:34:03 -0400 Subject: [PATCH 040/128] Fixed infinity loop sync. (#4575) * Removed outdated check. * Fallback for old sync for BeaconBlockChannel. * Additional logs. * fix: max-rate bellow the era min-rate (#4552) * fix: max-rate bellow the era min-rate * fix comments * add localnet epoch config * update config * update config * update config * update config * Revert "fix: max-rate bellow the era min-rate (#4552)" (#4578) This reverts commit f9934683252e42d73e1cd6f71e82f687a456999b. --------- Co-authored-by: Diego Nava <8563843+diego1q2w@users.noreply.github.com> --- api/service/stagedstreamsync/staged_stream_sync.go | 3 ++- api/service/stagedstreamsync/syncing.go | 8 ++++++-- consensus/consensus.go | 12 +++++++----- consensus/consensus_v2.go | 7 ++++--- consensus/downloader.go | 2 +- consensus/validator.go | 2 +- core/blockchain_impl.go | 2 +- core/epochchain.go | 3 ++- core/rawdb/accessors_offchain.go | 2 +- node/node_handler.go | 7 +++++-- node/node_syncing.go | 2 +- 11 files changed, 31 insertions(+), 19 deletions(-) diff --git a/api/service/stagedstreamsync/staged_stream_sync.go b/api/service/stagedstreamsync/staged_stream_sync.go index 98922af28d..0a14d0cb3e 100644 --- a/api/service/stagedstreamsync/staged_stream_sync.go +++ b/api/service/stagedstreamsync/staged_stream_sync.go @@ -642,8 +642,9 @@ func (ss *StagedStreamSync) addConsensusLastMile(bc core.BlockChain, cs *consens case errors.Is(err, core.ErrNotLastBlockInEpoch): case err != nil: return errors.Wrap(err, "failed to InsertChain") + default: + hashes = append(hashes, block.Header().Hash()) } - hashes = append(hashes, block.Header().Hash()) } return nil }) diff --git a/api/service/stagedstreamsync/syncing.go b/api/service/stagedstreamsync/syncing.go index 738f2f9203..9e8926468e 100644 --- a/api/service/stagedstreamsync/syncing.go +++ b/api/service/stagedstreamsync/syncing.go @@ -219,6 +219,8 @@ func (s *StagedStreamSync) Debug(source string, msg interface{}) { // For each iteration, estimate the current block number, then fetch block & insert to blockchain func (s *StagedStreamSync) doSync(downloaderContext context.Context, initSync bool) (uint64, int, error) { + startedNumber := s.bc.CurrentBlock().NumberU64() + var totalInserted int s.initSync = initSync @@ -249,7 +251,7 @@ func (s *StagedStreamSync) doSync(downloaderContext context.Context, initSync bo for { ctx, cancel := context.WithCancel(downloaderContext) - n, err := s.doSyncCycle(ctx, initSync) + n, err := s.doSyncCycle(ctx) if err != nil { utils.Logger().Error(). Err(err). @@ -281,6 +283,8 @@ func (s *StagedStreamSync) doSync(downloaderContext context.Context, initSync bo Bool("isBeacon", s.isBeacon). Uint32("shard", s.bc.ShardID()). Int("blocks", totalInserted). + Uint64("startedNumber", startedNumber). + Uint64("currentNumber", s.bc.CurrentBlock().NumberU64()). Msg(WrapStagedSyncMsg("sync cycle blocks inserted successfully")) } @@ -304,7 +308,7 @@ func (s *StagedStreamSync) doSync(downloaderContext context.Context, initSync bo return estimatedHeight, totalInserted, nil } -func (s *StagedStreamSync) doSyncCycle(ctx context.Context, initSync bool) (int, error) { +func (s *StagedStreamSync) doSyncCycle(ctx context.Context) (int, error) { // TODO: initSync=true means currentCycleNumber==0, so we can remove initSync diff --git a/consensus/consensus.go b/consensus/consensus.go index 019fd85429..18b53e682d 100644 --- a/consensus/consensus.go +++ b/consensus/consensus.go @@ -40,6 +40,10 @@ const ( AsyncProposal ) +type DownloadAsync interface { + DownloadAsync() +} + // Consensus is the main struct with all states and data related to consensus process. type Consensus struct { Decider quorum.Decider @@ -122,9 +126,7 @@ type Consensus struct { // finalityCounter keep tracks of the finality time finalityCounter atomic.Value //int64 - dHelper interface { - DownloadAsync() - } + dHelper DownloadAsync // Both flags only for initialization state. start bool @@ -190,10 +192,10 @@ func (consensus *Consensus) BlocksSynchronized() { } // BlocksNotSynchronized lets the main loop know that block is not synchronized -func (consensus *Consensus) BlocksNotSynchronized() { +func (consensus *Consensus) BlocksNotSynchronized(reason string) { consensus.mutex.Lock() defer consensus.mutex.Unlock() - consensus.syncNotReadyChan() + consensus.syncNotReadyChan(reason) } // VdfSeedSize returns the number of VRFs for VDF computation diff --git a/consensus/consensus_v2.go b/consensus/consensus_v2.go index 514feaf86f..0eb6e338d2 100644 --- a/consensus/consensus_v2.go +++ b/consensus/consensus_v2.go @@ -359,11 +359,12 @@ func (consensus *Consensus) syncReadyChan() { } } -func (consensus *Consensus) syncNotReadyChan() { - consensus.getLogger().Info().Msg("[ConsensusMainLoop] syncNotReadyChan") +func (consensus *Consensus) syncNotReadyChan(reason string) { + mode := consensus.current.Mode() consensus.setBlockNum(consensus.Blockchain().CurrentHeader().Number().Uint64() + 1) consensus.current.SetMode(Syncing) - consensus.getLogger().Info().Msg("[ConsensusMainLoop] Node is OUT OF SYNC") + consensus.getLogger().Info().Msgf("[ConsensusMainLoop] syncNotReadyChan, prev %s, reason %s", mode.String(), reason) + consensus.getLogger().Info().Msgf("[ConsensusMainLoop] Node is OUT OF SYNC, reason: %s", reason) consensusSyncCounterVec.With(prometheus.Labels{"consensus": "out_of_sync"}).Inc() } diff --git a/consensus/downloader.go b/consensus/downloader.go index 804a25aabb..595d07b01d 100644 --- a/consensus/downloader.go +++ b/consensus/downloader.go @@ -61,7 +61,7 @@ func (dh *downloadHelper) downloadStartedLoop(c *Consensus) { for { select { case <-dh.startedCh: - c.BlocksNotSynchronized() + c.BlocksNotSynchronized("downloadStartedLoop") case err := <-dh.startedSub.Err(): c.GetLogger().Info().Err(err).Msg("consensus download finished loop closed") diff --git a/consensus/validator.go b/consensus/validator.go index c148a61890..891fe0c035 100644 --- a/consensus/validator.go +++ b/consensus/validator.go @@ -65,7 +65,7 @@ func (consensus *Consensus) onAnnounce(msg *msg_pb.Message) { _, err := consensus.ValidateNewBlock(recvMsg) if err == nil { consensus.GetLogger().Info(). - Msg("[Announce] Block verified") + Msgf("[Announce] Block verified %d", recvMsg.BlockNum) } }() } diff --git a/core/blockchain_impl.go b/core/blockchain_impl.go index f6084b4be4..cc30315677 100644 --- a/core/blockchain_impl.go +++ b/core/blockchain_impl.go @@ -1608,7 +1608,7 @@ func (bc *BlockChainImpl) insertChain(chain types.Blocks, verifyHeaders bool) (i switch status { case CanonStatTy: - logger.Info().Msg("Inserted new block") + logger.Info().Msgf("Inserted new block s: %d e: %d n:%d", block.ShardID(), block.Epoch().Uint64(), block.NumberU64()) coalescedLogs = append(coalescedLogs, logs...) blockInsertTimer.UpdateSince(bstart) events = append(events, ChainEvent{block, block.Hash(), logs}) diff --git a/core/epochchain.go b/core/epochchain.go index 7d9aeae1a8..2dab284713 100644 --- a/core/epochchain.go +++ b/core/epochchain.go @@ -166,7 +166,8 @@ func (bc *EpochChain) InsertChain(blocks types.Blocks, _ bool) (int, error) { se1() se2() utils.Logger().Info(). - Msgf("[EPOCHSYNC] Added block %d %s", block.NumberU64(), block.Hash().Hex()) + Msgf("[EPOCHSYNC] Added block %d, epoch %d, %s", block.NumberU64(), block.Epoch().Uint64(), block.Hash().Hex()) + } return 0, nil } diff --git a/core/rawdb/accessors_offchain.go b/core/rawdb/accessors_offchain.go index dd43299034..4808c8c231 100644 --- a/core/rawdb/accessors_offchain.go +++ b/core/rawdb/accessors_offchain.go @@ -43,7 +43,7 @@ func WriteShardStateBytes(db DatabaseWriter, epoch *big.Int, data []byte) error } utils.Logger().Info(). Str("epoch", epoch.String()). - Int("size", len(data)).Msg("wrote sharding state") + Int("size", len(data)).Msgf("wrote sharding state, epoch %d", epoch.Uint64()) return nil } diff --git a/node/node_handler.go b/node/node_handler.go index c5feeed071..b745ca7136 100644 --- a/node/node_handler.go +++ b/node/node_handler.go @@ -337,7 +337,7 @@ func (node *Node) PostConsensusProcessing(newBlock *types.Block) error { } BroadcastCXReceipts(newBlock, node.Consensus) } else { - if node.Consensus.Mode() != consensus.Listening { + if mode := node.Consensus.Mode(); mode != consensus.Listening { numSignatures := node.Consensus.NumSignaturesIncludedInBlock(newBlock) utils.Logger().Info(). Uint64("blockNum", newBlock.NumberU64()). @@ -347,9 +347,12 @@ func (node *Node) PostConsensusProcessing(newBlock *types.Block) error { Int("numTxns", len(newBlock.Transactions())). Int("numStakingTxns", len(newBlock.StakingTransactions())). Uint32("numSignatures", numSignatures). + Str("mode", mode.String()). Msg("BINGO !!! Reached Consensus") if node.Consensus.Mode() == consensus.Syncing { - node.Consensus.SetMode(node.Consensus.UpdateConsensusInformation()) + mode = node.Consensus.UpdateConsensusInformation() + utils.Logger().Info().Msgf("Switching to mode %s", mode) + node.Consensus.SetMode(mode) } node.Consensus.UpdateValidatorMetrics(float64(numSignatures), float64(newBlock.NumberU64())) diff --git a/node/node_syncing.go b/node/node_syncing.go index 5319827ffd..b1ee21ea7d 100644 --- a/node/node_syncing.go +++ b/node/node_syncing.go @@ -316,7 +316,7 @@ func (node *Node) doSync(syncInstance ISync, syncingPeerProvider SyncingPeerProv if isSynchronized, _, _ := syncInstance.GetParsedSyncStatusDoubleChecked(); !isSynchronized { node.IsSynchronized.UnSet() if willJoinConsensus { - consensus.BlocksNotSynchronized() + consensus.BlocksNotSynchronized("node.doSync") } isBeacon := bc.ShardID() == shard.BeaconChainShardID syncInstance.SyncLoop(bc, isBeacon, consensus, legacysync.LoopMinTime) From 3b27215e03aa42a33652e6b06172afa7b6da557e Mon Sep 17 00:00:00 2001 From: static Date: Wed, 6 Dec 2023 04:55:02 +0000 Subject: [PATCH 041/128] use v2.NewReceipt for eth type receipt --- rpc/transaction.go | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/rpc/transaction.go b/rpc/transaction.go index 7ee27e7a97..4106425c27 100644 --- a/rpc/transaction.go +++ b/rpc/transaction.go @@ -751,7 +751,7 @@ func (s *PublicTransactionService) GetTransactionReceipt( return nil, err } return NewStructuredResponse(RPCReceipt) - case V2: + case V2, Eth: if tx == nil { RPCReceipt, err = v2.NewReceipt(stx, blockHash, blockNumber, index, receipt) } else { @@ -761,14 +761,6 @@ func (s *PublicTransactionService) GetTransactionReceipt( return nil, err } return NewStructuredResponse(RPCReceipt) - case Eth: - if tx != nil { - RPCReceipt, err = eth.NewReceipt(tx.ConvertToEth(), blockHash, blockNumber, index, receipt) - } - if err != nil { - return nil, err - } - return NewStructuredResponse(RPCReceipt) default: return nil, ErrUnknownRPCVersion } From 29c62490ce6f1df7380ae601f8ea36d15ee08894 Mon Sep 17 00:00:00 2001 From: Konstantin <355847+Frozen@users.noreply.github.com> Date: Thu, 7 Dec 2023 00:18:24 -0400 Subject: [PATCH 042/128] Fix. (#4583) --- internal/params/config.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/params/config.go b/internal/params/config.go index 86695ba40e..44bd5cbd7d 100644 --- a/internal/params/config.go +++ b/internal/params/config.go @@ -199,7 +199,7 @@ var ( SlotsLimitedEpoch: EpochTBD, // epoch to enable HIP-16 CrossShardXferPrecompileEpoch: big.NewInt(5), AllowlistEpoch: EpochTBD, - LeaderRotationInternalValidatorsEpoch: EpochTBD, + LeaderRotationInternalValidatorsEpoch: big.NewInt(2379), LeaderRotationExternalValidatorsEpoch: EpochTBD, FeeCollectEpoch: big.NewInt(5), ValidatorCodeFixEpoch: big.NewInt(5), From ca05f3f10f0c20a05e88698a97f08fc21a9c4b29 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CGheisMohammadi=E2=80=9D?= <36589218+GheisMohammadi@users.noreply.github.com> Date: Sun, 25 Jun 2023 14:25:56 +0800 Subject: [PATCH 043/128] add statesync as a new state to staged sync --- .../stagedstreamsync/default_stages.go | 10 + .../stagedstreamsync/stage_statesync.go | 215 ++++++++++++++++++ api/service/stagedstreamsync/stages.go | 1 + api/service/stagedstreamsync/syncing.go | 4 + 4 files changed, 230 insertions(+) create mode 100644 api/service/stagedstreamsync/stage_statesync.go diff --git a/api/service/stagedstreamsync/default_stages.go b/api/service/stagedstreamsync/default_stages.go index 55986ff6e8..4a1e719f22 100644 --- a/api/service/stagedstreamsync/default_stages.go +++ b/api/service/stagedstreamsync/default_stages.go @@ -13,6 +13,7 @@ var DefaultForwardOrder = ForwardOrder{ SyncEpoch, ShortRange, BlockBodies, + StateSync, // Stages below don't use Internet States, LastMile, @@ -23,6 +24,7 @@ var DefaultRevertOrder = RevertOrder{ Finish, LastMile, States, + StateSync, BlockBodies, ShortRange, SyncEpoch, @@ -33,6 +35,7 @@ var DefaultCleanUpOrder = CleanUpOrder{ Finish, LastMile, States, + StateSync, BlockBodies, ShortRange, SyncEpoch, @@ -44,6 +47,7 @@ func DefaultStages(ctx context.Context, seCfg StageEpochCfg, srCfg StageShortRangeCfg, bodiesCfg StageBodiesCfg, + stateSyncCfg StageStateSyncCfg, statesCfg StageStatesCfg, lastMileCfg StageLastMileCfg, finishCfg StageFinishCfg, @@ -53,6 +57,7 @@ func DefaultStages(ctx context.Context, handlerStageShortRange := NewStageShortRange(srCfg) handlerStageEpochSync := NewStageEpoch(seCfg) handlerStageBodies := NewStageBodies(bodiesCfg) + handlerStageStateSync := NewStageStateSync(stateSyncCfg) handlerStageStates := NewStageStates(statesCfg) handlerStageLastMile := NewStageLastMile(lastMileCfg) handlerStageFinish := NewStageFinish(finishCfg) @@ -78,6 +83,11 @@ func DefaultStages(ctx context.Context, Description: "Retrieve Block Bodies", Handler: handlerStageBodies, }, + { + ID: StateSync, + Description: "Retrieve States", + Handler: handlerStageStateSync, + }, { ID: States, Description: "Update Blockchain State", diff --git a/api/service/stagedstreamsync/stage_statesync.go b/api/service/stagedstreamsync/stage_statesync.go new file mode 100644 index 0000000000..10cce84622 --- /dev/null +++ b/api/service/stagedstreamsync/stage_statesync.go @@ -0,0 +1,215 @@ +package stagedstreamsync + +import ( + "context" + "fmt" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/trie" + "github.com/harmony-one/harmony/core" + "github.com/harmony-one/harmony/core/rawdb" + "github.com/harmony-one/harmony/core/state" + "github.com/harmony-one/harmony/internal/utils" + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/prometheus/client_golang/prometheus" + "github.com/rs/zerolog" + "golang.org/x/crypto/sha3" +) + +type StageStateSync struct { + configs StageStateSyncCfg +} + +// trieTask represents a single trie node download task, containing a set of +// peers already attempted retrieval from to detect stalled syncs and abort. +type trieTask struct { + hash common.Hash + path [][]byte + attempts map[string]struct{} +} + +// codeTask represents a single byte code download task, containing a set of +// peers already attempted retrieval from to detect stalled syncs and abort. +type codeTask struct { + attempts map[string]struct{} +} + +type StageStateSyncCfg struct { + bc core.BlockChain + protocol syncProtocol + db kv.RwDB + root common.Hash // State root currently being synced + sched *trie.Sync // State trie sync scheduler defining the tasks + keccak crypto.KeccakState // Keccak256 hasher to verify deliveries with + trieTasks map[string]*trieTask // Set of trie node tasks currently queued for retrieval, indexed by path + codeTasks map[common.Hash]*codeTask // Set of byte code tasks currently queued for retrieval, indexed by hash + concurrency int + logger zerolog.Logger + logProgress bool +} + +func NewStageStateSync(cfg StageStateSyncCfg) *StageStateSync { + return &StageStateSync{ + configs: cfg, + } +} + +func NewStageStateSyncCfg(bc core.BlockChain, + db kv.RwDB, + root common.Hash, + concurrency int, + protocol syncProtocol, + logger zerolog.Logger, + logProgress bool) StageStateSyncCfg { + + return StageStateSyncCfg{ + bc: bc, + db: db, + root: root, + sched: state.NewStateSync(root, bc.ChainDb(), nil, rawdb.HashScheme), + keccak: sha3.NewLegacyKeccak256().(crypto.KeccakState), + trieTasks: make(map[string]*trieTask), + codeTasks: make(map[common.Hash]*codeTask), + concurrency: concurrency, + logger: logger, + logProgress: logProgress, + } +} + +// Exec progresses States stage in the forward direction +func (stg *StageStateSync) Exec(ctx context.Context, bool, invalidBlockRevert bool, s *StageState, reverter Reverter, tx kv.RwTx) (err error) { + + // for short range sync, skip this step + if !s.state.initSync { + return nil + } + + maxHeight := s.state.status.targetBN + currentHead := stg.configs.bc.CurrentBlock().NumberU64() + if currentHead >= maxHeight { + return nil + } + currProgress := stg.configs.bc.CurrentBlock().NumberU64() + targetHeight := s.state.currentCycle.TargetHeight + if currProgress >= targetHeight { + return nil + } + useInternalTx := tx == nil + if useInternalTx { + var err error + tx, err = stg.configs.db.BeginRw(ctx) + if err != nil { + return err + } + defer tx.Rollback() + } + + // isLastCycle := targetHeight >= maxHeight + startTime := time.Now() + startBlock := currProgress + + if stg.configs.logProgress { + fmt.Print("\033[s") // save the cursor position + } + + for i := currProgress + 1; i <= targetHeight; i++ { + // log the stage progress in console + if stg.configs.logProgress { + //calculating block speed + dt := time.Now().Sub(startTime).Seconds() + speed := float64(0) + if dt > 0 { + speed = float64(currProgress-startBlock) / dt + } + blockSpeed := fmt.Sprintf("%.2f", speed) + fmt.Print("\033[u\033[K") // restore the cursor position and clear the line + fmt.Println("insert blocks progress:", currProgress, "/", targetHeight, "(", blockSpeed, "blocks/s", ")") + } + + } + + if useInternalTx { + if err := tx.Commit(); err != nil { + return err + } + } + + return nil +} + +func (stg *StageStateSync) insertChain(gbm *blockDownloadManager, + protocol syncProtocol, + lbls prometheus.Labels, + targetBN uint64) { + +} + +func (stg *StageStateSync) saveProgress(s *StageState, tx kv.RwTx) (err error) { + + useInternalTx := tx == nil + if useInternalTx { + var err error + tx, err = stg.configs.db.BeginRw(context.Background()) + if err != nil { + return err + } + defer tx.Rollback() + } + + // save progress + if err = s.Update(tx, stg.configs.bc.CurrentBlock().NumberU64()); err != nil { + utils.Logger().Error(). + Err(err). + Msgf("[STAGED_SYNC] saving progress for block States stage failed") + return ErrSaveStateProgressFail + } + + if useInternalTx { + if err := tx.Commit(); err != nil { + return err + } + } + return nil +} + +func (stg *StageStateSync) Revert(ctx context.Context, firstCycle bool, u *RevertState, s *StageState, tx kv.RwTx) (err error) { + useInternalTx := tx == nil + if useInternalTx { + tx, err = stg.configs.db.BeginRw(ctx) + if err != nil { + return err + } + defer tx.Rollback() + } + + if err = u.Done(tx); err != nil { + return err + } + + if useInternalTx { + if err = tx.Commit(); err != nil { + return err + } + } + return nil +} + +func (stg *StageStateSync) CleanUp(ctx context.Context, firstCycle bool, p *CleanUpState, tx kv.RwTx) (err error) { + useInternalTx := tx == nil + if useInternalTx { + tx, err = stg.configs.db.BeginRw(ctx) + if err != nil { + return err + } + defer tx.Rollback() + } + + if useInternalTx { + if err = tx.Commit(); err != nil { + return err + } + } + return nil +} diff --git a/api/service/stagedstreamsync/stages.go b/api/service/stagedstreamsync/stages.go index 6a21fe7071..cb6efa0cd8 100644 --- a/api/service/stagedstreamsync/stages.go +++ b/api/service/stagedstreamsync/stages.go @@ -12,6 +12,7 @@ const ( ShortRange SyncStageID = "ShortRange" // short range SyncEpoch SyncStageID = "SyncEpoch" // epoch sync BlockBodies SyncStageID = "BlockBodies" // Block bodies are downloaded, TxHash and UncleHash are getting verified + StateSync SyncStageID = "StateSync" // State sync States SyncStageID = "States" // will construct most recent state from downloaded blocks LastMile SyncStageID = "LastMile" // update blocks after sync and update last mile blocks as well Finish SyncStageID = "Finish" // Nominal stage after all other stages diff --git a/api/service/stagedstreamsync/syncing.go b/api/service/stagedstreamsync/syncing.go index 9e8926468e..ba5ab3a201 100644 --- a/api/service/stagedstreamsync/syncing.go +++ b/api/service/stagedstreamsync/syncing.go @@ -10,6 +10,7 @@ import ( "time" "github.com/harmony-one/harmony/consensus" + "github.com/ethereum/go-ethereum/common" "github.com/harmony-one/harmony/core" "github.com/harmony-one/harmony/internal/utils" sttypes "github.com/harmony-one/harmony/p2p/stream/types" @@ -84,9 +85,11 @@ func CreateStagedSync(ctx context.Context, stageHeadsCfg := NewStageHeadersCfg(bc, mainDB) stageShortRangeCfg := NewStageShortRangeCfg(bc, mainDB) stageSyncEpochCfg := NewStageEpochCfg(bc, mainDB) + stageBodiesCfg := NewStageBodiesCfg(bc, mainDB, dbs, config.Concurrency, protocol, isBeaconNode, config.LogProgress) stageStatesCfg := NewStageStatesCfg(bc, mainDB, dbs, config.Concurrency, logger, config.LogProgress) lastMileCfg := NewStageLastMileCfg(ctx, bc, mainDB) + stageStateSyncCfg := NewStageStateSyncCfg(bc, mainDB, common.Hash{}, config.Concurrency, protocol, logger, config.LogProgress) stageFinishCfg := NewStageFinishCfg(mainDB) stages := DefaultStages(ctx, @@ -94,6 +97,7 @@ func CreateStagedSync(ctx context.Context, stageSyncEpochCfg, stageShortRangeCfg, stageBodiesCfg, + stageStateSyncCfg, stageStatesCfg, lastMileCfg, stageFinishCfg, From 9ec0272261aad303d53106e3679970a0e84b3363 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CGheisMohammadi=E2=80=9D?= <36589218+GheisMohammadi@users.noreply.github.com> Date: Mon, 26 Jun 2023 15:43:21 +0800 Subject: [PATCH 044/128] add initial state download manager to stream sync --- .../state_download_manager.go | 127 ++++++++++++++++++ 1 file changed, 127 insertions(+) create mode 100644 api/service/stagedstreamsync/state_download_manager.go diff --git a/api/service/stagedstreamsync/state_download_manager.go b/api/service/stagedstreamsync/state_download_manager.go new file mode 100644 index 0000000000..3ad5bd1e71 --- /dev/null +++ b/api/service/stagedstreamsync/state_download_manager.go @@ -0,0 +1,127 @@ +package stagedstreamsync + +import ( + "sync" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/trie" + "github.com/harmony-one/harmony/core" + "github.com/harmony-one/harmony/core/rawdb" + "github.com/harmony-one/harmony/core/state" + sttypes "github.com/harmony-one/harmony/p2p/stream/types" + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/rs/zerolog" + "golang.org/x/crypto/sha3" +) + +// trieTask represents a single trie node download task, containing a set of +// peers already attempted retrieval from to detect stalled syncs and abort. +type trieTask struct { + hash common.Hash + path [][]byte + attempts map[string]struct{} +} + +// codeTask represents a single byte code download task, containing a set of +// peers already attempted retrieval from to detect stalled syncs and abort. +type codeTask struct { + attempts map[string]struct{} +} + +type task struct { + trieTasks map[string]*trieTask // Set of trie node tasks currently queued for retrieval, indexed by path + codeTasks map[common.Hash]*codeTask // Set of byte code tasks currently queued for retrieval, indexed by hash +} + +func newTask() *task { + return &task{ + trieTasks: make(map[string]*trieTask), + codeTasks: make(map[common.Hash]*codeTask), + } +} + +func (t *task) addCodeTask(h common.Hash, ct *codeTask) { + t.codeTasks[h] = &codeTask{ + attempts: ct.attempts, + } +} + +func (t *task) getCodeTask(h common.Hash) *codeTask { + return t.codeTasks[h] +} + +func (t *task) addNewCodeTask(h common.Hash) { + t.codeTasks[h] = &codeTask{ + attempts: make(map[string]struct{}), + } +} + +func (t *task) deleteCodeTask(hash common.Hash) { + delete(t.codeTasks, hash) +} + +func (t *task) addTrieTask(hash common.Hash, path string) { + t.trieTasks[path] = &trieTask{ + hash: hash, + path: trie.NewSyncPath([]byte(path)), + attempts: make(map[string]struct{}), + } +} + +func (t *task) setTrieTask(path string, tt *trieTask) { + t.trieTasks[path] = &trieTask{ + hash: tt.hash, + path: tt.path, + attempts: tt.attempts, + } +} + +func (t *task) getTrieTask(path string) *trieTask { + return t.trieTasks[path] +} + +func (t *task) deleteTrieTask(path string) { + delete(t.trieTasks, path) +} + +// StateDownloadManager is the helper structure for get blocks request management +type StateDownloadManager struct { + bc core.BlockChain + tx kv.RwTx + + protocol syncProtocol + root common.Hash // State root currently being synced + sched *trie.Sync // State trie sync scheduler defining the tasks + keccak crypto.KeccakState // Keccak256 hasher to verify deliveries with + concurrency int + logger zerolog.Logger + lock sync.Mutex + + tasks *task + requesting *task + processing *task + retries *task +} + +func newStateDownloadManager(tx kv.RwTx, + bc core.BlockChain, + root common.Hash, + concurrency int, + logger zerolog.Logger) *StateDownloadManager { + + return &StateDownloadManager{ + bc: bc, + tx: tx, + root: root, + sched: state.NewStateSync(root, bc.ChainDb(), nil, rawdb.HashScheme), + keccak: sha3.NewLegacyKeccak256().(crypto.KeccakState), + concurrency: concurrency, + logger: logger, + tasks: newTask(), + requesting: newTask(), + processing: newTask(), + retries: newTask(), + } +} + From 2064cfd62ce753c601759d786634c2a8f6755873 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CGheisMohammadi=E2=80=9D?= <36589218+GheisMohammadi@users.noreply.github.com> Date: Mon, 26 Jun 2023 15:56:36 +0800 Subject: [PATCH 045/128] add protocol to stage statesync --- .../stagedstreamsync/stage_statesync.go | 34 ++----------------- 1 file changed, 2 insertions(+), 32 deletions(-) diff --git a/api/service/stagedstreamsync/stage_statesync.go b/api/service/stagedstreamsync/stage_statesync.go index 10cce84622..9391944b7b 100644 --- a/api/service/stagedstreamsync/stage_statesync.go +++ b/api/service/stagedstreamsync/stage_statesync.go @@ -5,47 +5,22 @@ import ( "fmt" "time" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/trie" "github.com/harmony-one/harmony/core" - "github.com/harmony-one/harmony/core/rawdb" - "github.com/harmony-one/harmony/core/state" "github.com/harmony-one/harmony/internal/utils" "github.com/ledgerwatch/erigon-lib/kv" "github.com/prometheus/client_golang/prometheus" "github.com/rs/zerolog" - "golang.org/x/crypto/sha3" ) type StageStateSync struct { configs StageStateSyncCfg } -// trieTask represents a single trie node download task, containing a set of -// peers already attempted retrieval from to detect stalled syncs and abort. -type trieTask struct { - hash common.Hash - path [][]byte - attempts map[string]struct{} -} - -// codeTask represents a single byte code download task, containing a set of -// peers already attempted retrieval from to detect stalled syncs and abort. -type codeTask struct { - attempts map[string]struct{} -} - type StageStateSyncCfg struct { bc core.BlockChain - protocol syncProtocol db kv.RwDB - root common.Hash // State root currently being synced - sched *trie.Sync // State trie sync scheduler defining the tasks - keccak crypto.KeccakState // Keccak256 hasher to verify deliveries with - trieTasks map[string]*trieTask // Set of trie node tasks currently queued for retrieval, indexed by path - codeTasks map[common.Hash]*codeTask // Set of byte code tasks currently queued for retrieval, indexed by hash concurrency int + protocol syncProtocol logger zerolog.Logger logProgress bool } @@ -58,7 +33,6 @@ func NewStageStateSync(cfg StageStateSyncCfg) *StageStateSync { func NewStageStateSyncCfg(bc core.BlockChain, db kv.RwDB, - root common.Hash, concurrency int, protocol syncProtocol, logger zerolog.Logger, @@ -67,12 +41,8 @@ func NewStageStateSyncCfg(bc core.BlockChain, return StageStateSyncCfg{ bc: bc, db: db, - root: root, - sched: state.NewStateSync(root, bc.ChainDb(), nil, rawdb.HashScheme), - keccak: sha3.NewLegacyKeccak256().(crypto.KeccakState), - trieTasks: make(map[string]*trieTask), - codeTasks: make(map[common.Hash]*codeTask), concurrency: concurrency, + protocol: protocol, logger: logger, logProgress: logProgress, } From 702eb5e1fb54b90b7715ef6a66404d50d12889b6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CGheisMohammadi=E2=80=9D?= <36589218+GheisMohammadi@users.noreply.github.com> Date: Mon, 26 Jun 2023 23:01:11 +0800 Subject: [PATCH 046/128] add task management logic to state download manager in stream sync --- api/service/stagedstreamsync/const.go | 8 + .../state_download_manager.go | 272 ++++++++++++++++-- 2 files changed, 251 insertions(+), 29 deletions(-) diff --git a/api/service/stagedstreamsync/const.go b/api/service/stagedstreamsync/const.go index 048b5d812d..d7b6510a40 100644 --- a/api/service/stagedstreamsync/const.go +++ b/api/service/stagedstreamsync/const.go @@ -23,6 +23,14 @@ const ( // no more request will be assigned to workers to wait for InsertChain to finish. SoftQueueCap int = 100 + StatesPerRequest int = 10 // number of get nodes by hashes for each request + + // DefaultConcurrency is the default settings for concurrency + DefaultConcurrency int = 4 + + // MaxTriesToFetchNodeData is the maximum number of tries to fetch node data + MaxTriesToFetchNodeData int = 5 + // ShortRangeTimeout is the timeout for each short range sync, which allow short range sync // to restart automatically when stuck in `getBlockHashes` ShortRangeTimeout time.Duration = 1 * time.Minute diff --git a/api/service/stagedstreamsync/state_download_manager.go b/api/service/stagedstreamsync/state_download_manager.go index 3ad5bd1e71..295cc50235 100644 --- a/api/service/stagedstreamsync/state_download_manager.go +++ b/api/service/stagedstreamsync/state_download_manager.go @@ -1,6 +1,7 @@ package stagedstreamsync import ( + "fmt" "sync" "github.com/ethereum/go-ethereum/common" @@ -15,30 +16,10 @@ import ( "golang.org/x/crypto/sha3" ) -// trieTask represents a single trie node download task, containing a set of -// peers already attempted retrieval from to detect stalled syncs and abort. -type trieTask struct { - hash common.Hash - path [][]byte - attempts map[string]struct{} -} - // codeTask represents a single byte code download task, containing a set of // peers already attempted retrieval from to detect stalled syncs and abort. type codeTask struct { - attempts map[string]struct{} -} - -type task struct { - trieTasks map[string]*trieTask // Set of trie node tasks currently queued for retrieval, indexed by path - codeTasks map[common.Hash]*codeTask // Set of byte code tasks currently queued for retrieval, indexed by hash -} - -func newTask() *task { - return &task{ - trieTasks: make(map[string]*trieTask), - codeTasks: make(map[common.Hash]*codeTask), - } + attempts map[sttypes.StreamID]int } func (t *task) addCodeTask(h common.Hash, ct *codeTask) { @@ -53,7 +34,7 @@ func (t *task) getCodeTask(h common.Hash) *codeTask { func (t *task) addNewCodeTask(h common.Hash) { t.codeTasks[h] = &codeTask{ - attempts: make(map[string]struct{}), + attempts: make(map[sttypes.StreamID]int), } } @@ -61,15 +42,15 @@ func (t *task) deleteCodeTask(hash common.Hash) { delete(t.codeTasks, hash) } -func (t *task) addTrieTask(hash common.Hash, path string) { - t.trieTasks[path] = &trieTask{ - hash: hash, - path: trie.NewSyncPath([]byte(path)), - attempts: make(map[string]struct{}), - } +// trieTask represents a single trie node download task, containing a set of +// peers already attempted retrieval from to detect stalled syncs and abort. +type trieTask struct { + hash common.Hash + path [][]byte + attempts map[sttypes.StreamID]int } -func (t *task) setTrieTask(path string, tt *trieTask) { +func (t *task) addTrieTask(path string, tt *trieTask) { t.trieTasks[path] = &trieTask{ hash: tt.hash, path: tt.path, @@ -81,10 +62,30 @@ func (t *task) getTrieTask(path string) *trieTask { return t.trieTasks[path] } +func (t *task) addNewTrieTask(hash common.Hash, path string) { + t.trieTasks[path] = &trieTask{ + hash: hash, + path: trie.NewSyncPath([]byte(path)), + attempts: make(map[sttypes.StreamID]int), + } +} + func (t *task) deleteTrieTask(path string) { delete(t.trieTasks, path) } +type task struct { + trieTasks map[string]*trieTask // Set of trie node tasks currently queued for retrieval, indexed by path + codeTasks map[common.Hash]*codeTask // Set of byte code tasks currently queued for retrieval, indexed by hash +} + +func newTask() *task { + return &task{ + trieTasks: make(map[string]*trieTask), + codeTasks: make(map[common.Hash]*codeTask), + } +} + // StateDownloadManager is the helper structure for get blocks request management type StateDownloadManager struct { bc core.BlockChain @@ -125,3 +126,216 @@ func newStateDownloadManager(tx kv.RwTx, } } +// fillTasks fills the tasks to send to the remote peer. +func (s *StateDownloadManager) fillTasks(n int) error { + // Refill available tasks from the scheduler. + if fill := n - (len(s.tasks.trieTasks) + len(s.tasks.codeTasks)); fill > 0 { + paths, hashes, codes := s.sched.Missing(fill) + for i, path := range paths { + s.tasks.addNewTrieTask(hashes[i], path) + } + for _, hash := range codes { + s.tasks.addNewCodeTask(hash) + } + } + return nil +} + +// getNextBatch returns objects with a maximum of n state download +// tasks to send to the remote peer. +func (s *StateDownloadManager) GetNextBatch() (nodes []common.Hash, paths []trie.SyncPath, codes []common.Hash) { + s.lock.Lock() + defer s.lock.Unlock() + + cap := StatesPerRequest + + nodes, paths, codes = s.getBatchFromRetries(cap) + nItems := len(nodes) + len(codes) + cap -= nItems + + if cap > 0 { + newNodes, newPaths, newCodes := s.getBatchFromUnprocessed(cap) + nodes = append(nodes, newNodes...) + paths = append(paths, newPaths...) + codes = append(codes, newCodes...) + } + + return nodes, paths, codes +} + +// getNextBatch returns objects with a maximum of n state download +// tasks to send to the remote peer. +func (s *StateDownloadManager) getBatchFromUnprocessed(n int) (nodes []common.Hash, paths []trie.SyncPath, codes []common.Hash) { + // over trie nodes as those can be written to disk and forgotten about. + nodes = make([]common.Hash, 0, n) + paths = make([]trie.SyncPath, 0, n) + codes = make([]common.Hash, 0, n) + + for hash, t := range s.tasks.codeTasks { + // Stop when we've gathered enough requests + if len(nodes)+len(codes) == n { + break + } + codes = append(codes, hash) + s.requesting.addCodeTask(hash, t) + s.tasks.deleteCodeTask(hash) + } + for path, t := range s.tasks.trieTasks { + // Stop when we've gathered enough requests + if len(nodes)+len(codes) == n { + break + } + nodes = append(nodes, t.hash) + paths = append(paths, t.path) + s.requesting.addTrieTask(path, t) + s.tasks.deleteTrieTask(path) + } + return nodes, paths, codes +} + +// getBatchFromRetries get the block number batch to be requested from retries. +func (s *StateDownloadManager) getBatchFromRetries(n int) (nodes []common.Hash, paths []trie.SyncPath, codes []common.Hash) { + // over trie nodes as those can be written to disk and forgotten about. + nodes = make([]common.Hash, 0, n) + paths = make([]trie.SyncPath, 0, n) + codes = make([]common.Hash, 0, n) + + for hash, t := range s.retries.codeTasks { + // Stop when we've gathered enough requests + if len(nodes)+len(codes) == n { + break + } + codes = append(codes, hash) + s.requesting.addCodeTask(hash, t) + s.retries.deleteCodeTask(hash) + } + for path, t := range s.retries.trieTasks { + // Stop when we've gathered enough requests + if len(nodes)+len(codes) == n { + break + } + nodes = append(nodes, t.hash) + paths = append(paths, t.path) + s.requesting.addTrieTask(path, t) + s.retries.deleteTrieTask(path) + } + return nodes, paths, codes +} + +// HandleRequestError handles the error result +func (s *StateDownloadManager) HandleRequestError(codeHashes []common.Hash, triePaths []string, streamID sttypes.StreamID, err error) { + s.lock.Lock() + defer s.lock.Unlock() + + // add requested code hashes to retries + for _, h := range codeHashes { + s.retries.codeTasks[h] = &codeTask{ + attempts: s.requesting.codeTasks[h].attempts, + } + delete(s.requesting.codeTasks, h) + } + + // add requested trie paths to retries + for _, p := range triePaths { + s.retries.trieTasks[p] = &trieTask{ + hash: s.requesting.trieTasks[p].hash, + path: s.requesting.trieTasks[p].path, + attempts: s.requesting.trieTasks[p].attempts, + } + delete(s.requesting.trieTasks, p) + } +} + +// HandleRequestResult handles get trie paths and code hashes result +func (s *StateDownloadManager) HandleRequestResult(trieTasks map[string]*trieTask, codeTasks map[common.Hash]*codeTask, response [][]byte, loopID int, streamID sttypes.StreamID) error { + s.lock.Lock() + defer s.lock.Unlock() + + // Collect processing stats and update progress if valid data was received + duplicate, unexpected, successful, numUncommitted, bytesUncommitted := 0, 0, 0, 0, 0 + + for _, blob := range response { + hash, err := s.processNodeData(trieTasks, codeTasks, blob) + switch err { + case nil: + numUncommitted++ + bytesUncommitted += len(blob) + successful++ + case trie.ErrNotRequested: + unexpected++ + case trie.ErrAlreadyProcessed: + duplicate++ + default: + return fmt.Errorf("invalid state node %s: %v", hash.TerminalString(), err) + } + } + + //TODO: remove successful tasks from requesting + + for path, task := range trieTasks { + // If the node did deliver something, missing items may be due to a protocol + // limit or a previous timeout + delayed delivery. Both cases should permit + // the node to retry the missing items (to avoid single-peer stalls). + if len(response) > 0 { //TODO: if timeout also do same + delete(task.attempts, streamID) + } else if task.attempts[streamID] >= MaxTriesToFetchNodeData { + // If we've requested the node too many times already, it may be a malicious + // sync where nobody has the right data. Abort. + return fmt.Errorf("trie node %s failed with peer %s", task.hash.TerminalString(), task.attempts[streamID]) + } + // Missing item, place into the retry queue. + s.retries.addTrieTask(path, task) + } + + for hash, task := range codeTasks { + // If the node did deliver something, missing items may be due to a protocol + // limit or a previous timeout + delayed delivery. Both cases should permit + // the node to retry the missing items (to avoid single-peer stalls). + if len(response) > 0 { //TODO: if timeout also do same + delete(task.attempts, streamID) + } else if task.attempts[streamID] >= MaxTriesToFetchNodeData { + // If we've requested the node too many times already, it may be a malicious + // sync where nobody has the right data. Abort. + return fmt.Errorf("byte code %s failed with peer %s", hash.TerminalString(), task.attempts[streamID]) + } + // Missing item, place into the retry queue. + s.retries.addCodeTask(hash, task) + } + + return nil +} + +// processNodeData tries to inject a trie node data blob delivered from a remote +// peer into the state trie, returning whether anything useful was written or any +// error occurred. +// +// If multiple requests correspond to the same hash, this method will inject the +// blob as a result for the first one only, leaving the remaining duplicates to +// be fetched again. +func (s *StateDownloadManager) processNodeData(nodeTasks map[string]*trieTask, codeTasks map[common.Hash]*codeTask, responseData []byte) (common.Hash, error) { + var hash common.Hash + s.keccak.Reset() + s.keccak.Write(responseData) + s.keccak.Read(hash[:]) + + //TODO: remove from requesting + if _, present := codeTasks[hash]; present { + err := s.sched.ProcessCode(trie.CodeSyncResult{ + Hash: hash, + Data: responseData, + }) + delete(codeTasks, hash) + return hash, err + } + for path, task := range nodeTasks { + if task.hash == hash { + err := s.sched.ProcessNode(trie.NodeSyncResult{ + Path: path, + Data: responseData, + }) + delete(nodeTasks, path) + return hash, err + } + } + return common.Hash{}, trie.ErrNotRequested +} From 4629fda90bc7d9789b1c1465adcf088391cd1cc1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CGheisMohammadi=E2=80=9D?= <36589218+GheisMohammadi@users.noreply.github.com> Date: Mon, 26 Jun 2023 23:18:05 +0800 Subject: [PATCH 047/128] fix statesync config --- api/service/stagedstreamsync/syncing.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/api/service/stagedstreamsync/syncing.go b/api/service/stagedstreamsync/syncing.go index ba5ab3a201..6e0d01a4b7 100644 --- a/api/service/stagedstreamsync/syncing.go +++ b/api/service/stagedstreamsync/syncing.go @@ -10,7 +10,6 @@ import ( "time" "github.com/harmony-one/harmony/consensus" - "github.com/ethereum/go-ethereum/common" "github.com/harmony-one/harmony/core" "github.com/harmony-one/harmony/internal/utils" sttypes "github.com/harmony-one/harmony/p2p/stream/types" @@ -85,11 +84,11 @@ func CreateStagedSync(ctx context.Context, stageHeadsCfg := NewStageHeadersCfg(bc, mainDB) stageShortRangeCfg := NewStageShortRangeCfg(bc, mainDB) stageSyncEpochCfg := NewStageEpochCfg(bc, mainDB) - + stageBodiesCfg := NewStageBodiesCfg(bc, mainDB, dbs, config.Concurrency, protocol, isBeaconNode, config.LogProgress) stageStatesCfg := NewStageStatesCfg(bc, mainDB, dbs, config.Concurrency, logger, config.LogProgress) + stageStateSyncCfg := NewStageStateSyncCfg(bc, mainDB, config.Concurrency, protocol, logger, config.LogProgress) lastMileCfg := NewStageLastMileCfg(ctx, bc, mainDB) - stageStateSyncCfg := NewStageStateSyncCfg(bc, mainDB, common.Hash{}, config.Concurrency, protocol, logger, config.LogProgress) stageFinishCfg := NewStageFinishCfg(mainDB) stages := DefaultStages(ctx, From 9e1249a836df6de6279ff7f785a9fd67ffdcf32c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CGheisMohammadi=E2=80=9D?= <36589218+GheisMohammadi@users.noreply.github.com> Date: Wed, 28 Jun 2023 15:42:48 +0800 Subject: [PATCH 048/128] refactor state download manager --- .../state_download_manager.go | 48 ++++++++++--------- 1 file changed, 25 insertions(+), 23 deletions(-) diff --git a/api/service/stagedstreamsync/state_download_manager.go b/api/service/stagedstreamsync/state_download_manager.go index 295cc50235..8c136aebdd 100644 --- a/api/service/stagedstreamsync/state_download_manager.go +++ b/api/service/stagedstreamsync/state_download_manager.go @@ -143,7 +143,7 @@ func (s *StateDownloadManager) fillTasks(n int) error { // getNextBatch returns objects with a maximum of n state download // tasks to send to the remote peer. -func (s *StateDownloadManager) GetNextBatch() (nodes []common.Hash, paths []trie.SyncPath, codes []common.Hash) { +func (s *StateDownloadManager) GetNextBatch() (nodes []common.Hash, paths []string, codes []common.Hash) { s.lock.Lock() defer s.lock.Unlock() @@ -165,10 +165,10 @@ func (s *StateDownloadManager) GetNextBatch() (nodes []common.Hash, paths []trie // getNextBatch returns objects with a maximum of n state download // tasks to send to the remote peer. -func (s *StateDownloadManager) getBatchFromUnprocessed(n int) (nodes []common.Hash, paths []trie.SyncPath, codes []common.Hash) { +func (s *StateDownloadManager) getBatchFromUnprocessed(n int) (nodes []common.Hash, paths []string, codes []common.Hash) { // over trie nodes as those can be written to disk and forgotten about. nodes = make([]common.Hash, 0, n) - paths = make([]trie.SyncPath, 0, n) + paths = make([]string, 0, n) codes = make([]common.Hash, 0, n) for hash, t := range s.tasks.codeTasks { @@ -186,7 +186,7 @@ func (s *StateDownloadManager) getBatchFromUnprocessed(n int) (nodes []common.Ha break } nodes = append(nodes, t.hash) - paths = append(paths, t.path) + paths = append(paths, path) s.requesting.addTrieTask(path, t) s.tasks.deleteTrieTask(path) } @@ -194,10 +194,10 @@ func (s *StateDownloadManager) getBatchFromUnprocessed(n int) (nodes []common.Ha } // getBatchFromRetries get the block number batch to be requested from retries. -func (s *StateDownloadManager) getBatchFromRetries(n int) (nodes []common.Hash, paths []trie.SyncPath, codes []common.Hash) { +func (s *StateDownloadManager) getBatchFromRetries(n int) (nodes []common.Hash, paths []string, codes []common.Hash) { // over trie nodes as those can be written to disk and forgotten about. nodes = make([]common.Hash, 0, n) - paths = make([]trie.SyncPath, 0, n) + paths = make([]string, 0, n) codes = make([]common.Hash, 0, n) for hash, t := range s.retries.codeTasks { @@ -215,7 +215,7 @@ func (s *StateDownloadManager) getBatchFromRetries(n int) (nodes []common.Hash, break } nodes = append(nodes, t.hash) - paths = append(paths, t.path) + paths = append(paths, path) s.requesting.addTrieTask(path, t) s.retries.deleteTrieTask(path) } @@ -236,18 +236,18 @@ func (s *StateDownloadManager) HandleRequestError(codeHashes []common.Hash, trie } // add requested trie paths to retries - for _, p := range triePaths { - s.retries.trieTasks[p] = &trieTask{ - hash: s.requesting.trieTasks[p].hash, - path: s.requesting.trieTasks[p].path, - attempts: s.requesting.trieTasks[p].attempts, + for _, path := range triePaths { + s.retries.trieTasks[path] = &trieTask{ + hash: s.requesting.trieTasks[path].hash, + path: s.requesting.trieTasks[path].path, + attempts: s.requesting.trieTasks[path].attempts, } - delete(s.requesting.trieTasks, p) + delete(s.requesting.trieTasks, path) } } // HandleRequestResult handles get trie paths and code hashes result -func (s *StateDownloadManager) HandleRequestResult(trieTasks map[string]*trieTask, codeTasks map[common.Hash]*codeTask, response [][]byte, loopID int, streamID sttypes.StreamID) error { +func (s *StateDownloadManager) HandleRequestResult(codeHashes []common.Hash, triePaths []string, response [][]byte, loopID int, streamID sttypes.StreamID) error { s.lock.Lock() defer s.lock.Unlock() @@ -255,7 +255,7 @@ func (s *StateDownloadManager) HandleRequestResult(trieTasks map[string]*trieTas duplicate, unexpected, successful, numUncommitted, bytesUncommitted := 0, 0, 0, 0, 0 for _, blob := range response { - hash, err := s.processNodeData(trieTasks, codeTasks, blob) + hash, err := s.processNodeData(codeHashes, triePaths, blob) switch err { case nil: numUncommitted++ @@ -271,8 +271,7 @@ func (s *StateDownloadManager) HandleRequestResult(trieTasks map[string]*trieTas } //TODO: remove successful tasks from requesting - - for path, task := range trieTasks { + for path, task := range s.requesting.trieTasks { // If the node did deliver something, missing items may be due to a protocol // limit or a previous timeout + delayed delivery. Both cases should permit // the node to retry the missing items (to avoid single-peer stalls). @@ -285,9 +284,10 @@ func (s *StateDownloadManager) HandleRequestResult(trieTasks map[string]*trieTas } // Missing item, place into the retry queue. s.retries.addTrieTask(path, task) + s.requesting.deleteTrieTask(path) } - for hash, task := range codeTasks { + for hash, task := range s.requesting.codeTasks { // If the node did deliver something, missing items may be due to a protocol // limit or a previous timeout + delayed delivery. Both cases should permit // the node to retry the missing items (to avoid single-peer stalls). @@ -300,6 +300,7 @@ func (s *StateDownloadManager) HandleRequestResult(trieTasks map[string]*trieTas } // Missing item, place into the retry queue. s.retries.addCodeTask(hash, task) + s.requesting.deleteCodeTask(hash) } return nil @@ -312,28 +313,29 @@ func (s *StateDownloadManager) HandleRequestResult(trieTasks map[string]*trieTas // If multiple requests correspond to the same hash, this method will inject the // blob as a result for the first one only, leaving the remaining duplicates to // be fetched again. -func (s *StateDownloadManager) processNodeData(nodeTasks map[string]*trieTask, codeTasks map[common.Hash]*codeTask, responseData []byte) (common.Hash, error) { +func (s *StateDownloadManager) processNodeData(codeHashes []common.Hash, triePaths []string, responseData []byte) (common.Hash, error) { var hash common.Hash s.keccak.Reset() s.keccak.Write(responseData) s.keccak.Read(hash[:]) //TODO: remove from requesting - if _, present := codeTasks[hash]; present { + if _, present := s.requesting.codeTasks[hash]; present { err := s.sched.ProcessCode(trie.CodeSyncResult{ Hash: hash, Data: responseData, }) - delete(codeTasks, hash) + s.requesting.deleteCodeTask(hash) return hash, err } - for path, task := range nodeTasks { + for _, path := range triePaths { + task := s.requesting.getTrieTask(path) if task.hash == hash { err := s.sched.ProcessNode(trie.NodeSyncResult{ Path: path, Data: responseData, }) - delete(nodeTasks, path) + s.requesting.deleteTrieTask(path) return hash, err } } From 841073da60cf260162eb00ce49ec4b8976c7333e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CGheisMohammadi=E2=80=9D?= <36589218+GheisMohammadi@users.noreply.github.com> Date: Wed, 28 Jun 2023 16:46:53 +0800 Subject: [PATCH 049/128] refactor stage state sync --- .../stagedstreamsync/stage_statesync.go | 123 +++++++++++++++--- 1 file changed, 105 insertions(+), 18 deletions(-) diff --git a/api/service/stagedstreamsync/stage_statesync.go b/api/service/stagedstreamsync/stage_statesync.go index 9391944b7b..75326b6ace 100644 --- a/api/service/stagedstreamsync/stage_statesync.go +++ b/api/service/stagedstreamsync/stage_statesync.go @@ -3,11 +3,15 @@ package stagedstreamsync import ( "context" "fmt" + "sync" "time" + "github.com/ethereum/go-ethereum/common" "github.com/harmony-one/harmony/core" "github.com/harmony-one/harmony/internal/utils" + sttypes "github.com/harmony-one/harmony/p2p/stream/types" "github.com/ledgerwatch/erigon-lib/kv" + "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/rs/zerolog" ) @@ -49,7 +53,7 @@ func NewStageStateSyncCfg(bc core.BlockChain, } // Exec progresses States stage in the forward direction -func (stg *StageStateSync) Exec(ctx context.Context, bool, invalidBlockRevert bool, s *StageState, reverter Reverter, tx kv.RwTx) (err error) { +func (sss *StageStateSync) Exec(ctx context.Context, bool, invalidBlockRevert bool, s *StageState, reverter Reverter, tx kv.RwTx) (err error) { // for short range sync, skip this step if !s.state.initSync { @@ -57,19 +61,29 @@ func (stg *StageStateSync) Exec(ctx context.Context, bool, invalidBlockRevert bo } maxHeight := s.state.status.targetBN - currentHead := stg.configs.bc.CurrentBlock().NumberU64() + currentHead := sss.configs.bc.CurrentBlock().NumberU64() if currentHead >= maxHeight { return nil } - currProgress := stg.configs.bc.CurrentBlock().NumberU64() + currProgress := sss.configs.bc.CurrentBlock().NumberU64() targetHeight := s.state.currentCycle.TargetHeight + + if errV := CreateView(ctx, sss.configs.db, tx, func(etx kv.Tx) error { + if currProgress, err = s.CurrentStageProgress(etx); err != nil { + return err + } + return nil + }); errV != nil { + return errV + } + if currProgress >= targetHeight { return nil } useInternalTx := tx == nil if useInternalTx { var err error - tx, err = stg.configs.db.BeginRw(ctx) + tx, err = sss.configs.db.BeginRw(ctx) if err != nil { return err } @@ -78,34 +92,107 @@ func (stg *StageStateSync) Exec(ctx context.Context, bool, invalidBlockRevert bo // isLastCycle := targetHeight >= maxHeight startTime := time.Now() - startBlock := currProgress - if stg.configs.logProgress { + if sss.configs.logProgress { fmt.Print("\033[s") // save the cursor position } - for i := currProgress + 1; i <= targetHeight; i++ { - // log the stage progress in console - if stg.configs.logProgress { - //calculating block speed + // Fetch blocks from neighbors + root := sss.configs.bc.CurrentBlock().Root() + sdm := newStateDownloadManager(tx, sss.configs.bc, root, sss.configs.concurrency, s.state.logger) + + // Setup workers to fetch blocks from remote node + var wg sync.WaitGroup + + for i := 0; i != s.state.config.Concurrency; i++ { + wg.Add(1) + go sss.runStateWorkerLoop(ctx, sdm, &wg, i, startTime) + } + + wg.Wait() + + if useInternalTx { + if err := tx.Commit(); err != nil { + return err + } + } + + return nil +} + +// runStateWorkerLoop creates a work loop for download states +func (sss *StageStateSync) runStateWorkerLoop(ctx context.Context, sdm *StateDownloadManager, wg *sync.WaitGroup, loopID int, startTime time.Time) { + defer wg.Done() + + for { + select { + case <-ctx.Done(): + return + default: + } + nodes, paths, codes := sdm.GetNextBatch() + if len(nodes)+len(codes) == 0 { + select { + case <-ctx.Done(): + return + case <-time.After(100 * time.Millisecond): + return + } + } + + data, stid, err := sss.downloadStates(ctx, nodes, codes) + if err != nil { + if !errors.Is(err, context.Canceled) { + sss.configs.protocol.StreamFailed(stid, "downloadStates failed") + } + utils.Logger().Error(). + Err(err). + Str("stream", string(stid)). + Msg(WrapStagedSyncMsg("downloadStates failed")) + err = errors.Wrap(err, "request error") + sdm.HandleRequestError(codes, paths, stid, err) + } else if data == nil || len(data) == 0 { + utils.Logger().Warn(). + Str("stream", string(stid)). + Msg(WrapStagedSyncMsg("downloadStates failed, received empty data bytes")) + err := errors.New("downloadStates received empty data bytes") + sdm.HandleRequestError(codes, paths, stid, err) + } + sdm.HandleRequestResult(nodes, paths, data, loopID, stid) + if sss.configs.logProgress { + //calculating block download speed dt := time.Now().Sub(startTime).Seconds() speed := float64(0) if dt > 0 { - speed = float64(currProgress-startBlock) / dt + speed = float64(len(data)) / dt } - blockSpeed := fmt.Sprintf("%.2f", speed) + stateDownloadSpeed := fmt.Sprintf("%.2f", speed) + fmt.Print("\033[u\033[K") // restore the cursor position and clear the line - fmt.Println("insert blocks progress:", currProgress, "/", targetHeight, "(", blockSpeed, "blocks/s", ")") + fmt.Println("state download speed:", stateDownloadSpeed, "states/s") } - } +} - if useInternalTx { - if err := tx.Commit(); err != nil { - return err - } +func (sss *StageStateSync) downloadStates(ctx context.Context, nodes []common.Hash, codes []common.Hash) ([][]byte, sttypes.StreamID, error) { + ctx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + + hashes := append(codes, nodes...) + data, stid, err := sss.configs.protocol.GetNodeData(ctx, hashes) + if err != nil { + return nil, stid, err + } + if err := validateGetNodeDataResult(hashes, data); err != nil { + return nil, stid, err } + return data, stid, nil +} +func validateGetNodeDataResult(requested []common.Hash, result [][]byte) error { + if len(result) != len(requested) { + return fmt.Errorf("unexpected number of nodes delivered: %v / %v", len(result), len(requested)) + } return nil } From 975857fc85ec28e445141dca56238a38e9301acf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CGheisMohammadi=E2=80=9D?= <36589218+GheisMohammadi@users.noreply.github.com> Date: Thu, 29 Jun 2023 11:47:11 +0800 Subject: [PATCH 050/128] fix state download manager tasks issue --- .../state_download_manager.go | 23 ++++++++----------- 1 file changed, 9 insertions(+), 14 deletions(-) diff --git a/api/service/stagedstreamsync/state_download_manager.go b/api/service/stagedstreamsync/state_download_manager.go index 8c136aebdd..f06ec9cb67 100644 --- a/api/service/stagedstreamsync/state_download_manager.go +++ b/api/service/stagedstreamsync/state_download_manager.go @@ -163,7 +163,7 @@ func (s *StateDownloadManager) GetNextBatch() (nodes []common.Hash, paths []stri return nodes, paths, codes } -// getNextBatch returns objects with a maximum of n state download +// getBatchFromUnprocessed returns objects with a maximum of n unprocessed state download // tasks to send to the remote peer. func (s *StateDownloadManager) getBatchFromUnprocessed(n int) (nodes []common.Hash, paths []string, codes []common.Hash) { // over trie nodes as those can be written to disk and forgotten about. @@ -229,19 +229,13 @@ func (s *StateDownloadManager) HandleRequestError(codeHashes []common.Hash, trie // add requested code hashes to retries for _, h := range codeHashes { - s.retries.codeTasks[h] = &codeTask{ - attempts: s.requesting.codeTasks[h].attempts, - } + s.retries.addCodeTask(h,s.requesting.codeTasks[h]) delete(s.requesting.codeTasks, h) } // add requested trie paths to retries for _, path := range triePaths { - s.retries.trieTasks[path] = &trieTask{ - hash: s.requesting.trieTasks[path].hash, - path: s.requesting.trieTasks[path].path, - attempts: s.requesting.trieTasks[path].attempts, - } + s.retries.addTrieTask(path,s.requesting.trieTasks[path]) delete(s.requesting.trieTasks, path) } } @@ -270,13 +264,13 @@ func (s *StateDownloadManager) HandleRequestResult(codeHashes []common.Hash, tri } } - //TODO: remove successful tasks from requesting - for path, task := range s.requesting.trieTasks { + for _, path := range triePaths { + task := s.requesting.getTrieTask(path) // If the node did deliver something, missing items may be due to a protocol // limit or a previous timeout + delayed delivery. Both cases should permit // the node to retry the missing items (to avoid single-peer stalls). if len(response) > 0 { //TODO: if timeout also do same - delete(task.attempts, streamID) + delete(s.requesting.trieTasks[path].attempts, streamID) } else if task.attempts[streamID] >= MaxTriesToFetchNodeData { // If we've requested the node too many times already, it may be a malicious // sync where nobody has the right data. Abort. @@ -287,12 +281,13 @@ func (s *StateDownloadManager) HandleRequestResult(codeHashes []common.Hash, tri s.requesting.deleteTrieTask(path) } - for hash, task := range s.requesting.codeTasks { + for _, hash := range codeHashes { + task:= s.requesting.getCodeTask(hash) // If the node did deliver something, missing items may be due to a protocol // limit or a previous timeout + delayed delivery. Both cases should permit // the node to retry the missing items (to avoid single-peer stalls). if len(response) > 0 { //TODO: if timeout also do same - delete(task.attempts, streamID) + delete(s.requesting.codeTasks[hash].attempts, streamID) //TODO: do we need delete attempts??? } else if task.attempts[streamID] >= MaxTriesToFetchNodeData { // If we've requested the node too many times already, it may be a malicious // sync where nobody has the right data. Abort. From 0da96b942e2a1ff3a019c506bd24c18f6fa47f79 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CGheisMohammadi=E2=80=9D?= <36589218+GheisMohammadi@users.noreply.github.com> Date: Thu, 29 Jun 2023 11:57:50 +0800 Subject: [PATCH 051/128] add receipt download manager --- .../receipt_download_manager.go | 172 ++++++++++++++++++ 1 file changed, 172 insertions(+) create mode 100644 api/service/stagedstreamsync/receipt_download_manager.go diff --git a/api/service/stagedstreamsync/receipt_download_manager.go b/api/service/stagedstreamsync/receipt_download_manager.go new file mode 100644 index 0000000000..7b486a9aa0 --- /dev/null +++ b/api/service/stagedstreamsync/receipt_download_manager.go @@ -0,0 +1,172 @@ +package stagedstreamsync + +import ( + "sync" + + sttypes "github.com/harmony-one/harmony/p2p/stream/types" + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/rs/zerolog" +) + +type ReceiptDownloadDetails struct { + loopID int + streamID sttypes.StreamID +} + +// receiptDownloadManager is the helper structure for get receipts request management +type receiptDownloadManager struct { + chain blockChain + tx kv.RwTx + + targetBN uint64 + requesting map[uint64]struct{} // receipt numbers that have been assigned to workers but not received + processing map[uint64]struct{} // receipt numbers received requests but not inserted + retries *prioritizedNumbers // requests where error happens + rq *resultQueue // result queue wait to be inserted into blockchain + rdd map[uint64]ReceiptDownloadDetails // details about how this receipt was downloaded + + logger zerolog.Logger + lock sync.Mutex +} + +func newReceiptDownloadManager(tx kv.RwTx, chain blockChain, targetBN uint64, logger zerolog.Logger) *receiptDownloadManager { + return &receiptDownloadManager{ + chain: chain, + tx: tx, + targetBN: targetBN, + requesting: make(map[uint64]struct{}), + processing: make(map[uint64]struct{}), + retries: newPrioritizedNumbers(), + rq: newResultQueue(), + rdd: make(map[uint64]ReceiptDownloadDetails), + logger: logger, + } +} + +// GetNextBatch get the next receipt numbers batch +func (rdm *receiptDownloadManager) GetNextBatch() []uint64 { + rdm.lock.Lock() + defer rdm.lock.Unlock() + + cap := ReceiptsPerRequest + + bns := rdm.getBatchFromRetries(cap) + if len(bns) > 0 { + cap -= len(bns) + rdm.addBatchToRequesting(bns) + } + + if rdm.availableForMoreTasks() { + addBNs := rdm.getBatchFromUnprocessed(cap) + rdm.addBatchToRequesting(addBNs) + bns = append(bns, addBNs...) + } + + return bns +} + +// HandleRequestError handles the error result +func (rdm *receiptDownloadManager) HandleRequestError(bns []uint64, err error, streamID sttypes.StreamID) { + rdm.lock.Lock() + defer rdm.lock.Unlock() + + // add requested receipt numbers to retries + for _, bn := range bns { + delete(rdm.requesting, bn) + rdm.retries.push(bn) + } +} + +// HandleRequestResult handles get blocks result +func (rdm *receiptDownloadManager) HandleRequestResult(bns []uint64, blockBytes [][]byte, sigBytes [][]byte, loopID int, streamID sttypes.StreamID) error { + rdm.lock.Lock() + defer rdm.lock.Unlock() + + for i, bn := range bns { + delete(rdm.requesting, bn) + if indexExists(blockBytes, i) && len(blockBytes[i]) <= 1 { + rdm.retries.push(bn) + } else { + rdm.processing[bn] = struct{}{} + rdm.rdd[bn] = ReceiptDownloadDetails{ + loopID: loopID, + streamID: streamID, + } + } + } + return nil +} + +// SetDownloadDetails sets the download details for a batch of blocks +func (rdm *receiptDownloadManager) SetDownloadDetails(bns []uint64, loopID int, streamID sttypes.StreamID) error { + rdm.lock.Lock() + defer rdm.lock.Unlock() + + for _, bn := range bns { + rdm.rdd[bn] = ReceiptDownloadDetails{ + loopID: loopID, + streamID: streamID, + } + } + return nil +} + +// GetDownloadDetails returns the download details for a receipt +func (rdm *receiptDownloadManager) GetDownloadDetails(blockNumber uint64) (loopID int, streamID sttypes.StreamID) { + rdm.lock.Lock() + defer rdm.lock.Unlock() + + return rdm.rdd[blockNumber].loopID, rdm.rdd[blockNumber].streamID +} + +// getBatchFromRetries get the receipt number batch to be requested from retries. +func (rdm *receiptDownloadManager) getBatchFromRetries(cap int) []uint64 { + var ( + requestBNs []uint64 + curHeight = rdm.chain.CurrentBlock().NumberU64() + ) + for cnt := 0; cnt < cap; cnt++ { + bn := rdm.retries.pop() + if bn == 0 { + break // no more retries + } + if bn <= curHeight { + continue + } + requestBNs = append(requestBNs, bn) + } + return requestBNs +} + +// getBatchFromUnprocessed returns a batch of receipt numbers to be requested from unprocessed. +func (rdm *receiptDownloadManager) getBatchFromUnprocessed(cap int) []uint64 { + var ( + requestBNs []uint64 + curHeight = rdm.chain.CurrentBlock().NumberU64() + ) + bn := curHeight + 1 + // TODO: this algorithm can be potentially optimized. + for cnt := 0; cnt < cap && bn <= rdm.targetBN; cnt++ { + for bn <= rdm.targetBN { + _, ok1 := rdm.requesting[bn] + _, ok2 := rdm.processing[bn] + if !ok1 && !ok2 { + requestBNs = append(requestBNs, bn) + bn++ + break + } + bn++ + } + } + return requestBNs +} + +func (rdm *receiptDownloadManager) availableForMoreTasks() bool { + return rdm.rq.results.Len() < SoftQueueCap +} + +func (rdm *receiptDownloadManager) addBatchToRequesting(bns []uint64) { + for _, bn := range bns { + rdm.requesting[bn] = struct{}{} + } +} From 12d930fd97d942f87820cd0352986ed130a20e72 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CGheisMohammadi=E2=80=9D?= <36589218+GheisMohammadi@users.noreply.github.com> Date: Thu, 29 Jun 2023 12:09:13 +0800 Subject: [PATCH 052/128] fix receipt download manager result queue --- api/service/stagedstreamsync/const.go | 6 +++++- .../stagedstreamsync/receipt_download_manager.go | 12 +++++------- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/api/service/stagedstreamsync/const.go b/api/service/stagedstreamsync/const.go index d7b6510a40..f82bff5726 100644 --- a/api/service/stagedstreamsync/const.go +++ b/api/service/stagedstreamsync/const.go @@ -23,7 +23,11 @@ const ( // no more request will be assigned to workers to wait for InsertChain to finish. SoftQueueCap int = 100 - StatesPerRequest int = 10 // number of get nodes by hashes for each request + // number of get nodes by hashes for each request + StatesPerRequest int = 100 + + // maximum number of blocks for get receipts request + ReceiptsPerRequest int = 10 // DefaultConcurrency is the default settings for concurrency DefaultConcurrency int = 4 diff --git a/api/service/stagedstreamsync/receipt_download_manager.go b/api/service/stagedstreamsync/receipt_download_manager.go index 7b486a9aa0..ffe20d0beb 100644 --- a/api/service/stagedstreamsync/receipt_download_manager.go +++ b/api/service/stagedstreamsync/receipt_download_manager.go @@ -22,7 +22,6 @@ type receiptDownloadManager struct { requesting map[uint64]struct{} // receipt numbers that have been assigned to workers but not received processing map[uint64]struct{} // receipt numbers received requests but not inserted retries *prioritizedNumbers // requests where error happens - rq *resultQueue // result queue wait to be inserted into blockchain rdd map[uint64]ReceiptDownloadDetails // details about how this receipt was downloaded logger zerolog.Logger @@ -37,7 +36,6 @@ func newReceiptDownloadManager(tx kv.RwTx, chain blockChain, targetBN uint64, lo requesting: make(map[uint64]struct{}), processing: make(map[uint64]struct{}), retries: newPrioritizedNumbers(), - rq: newResultQueue(), rdd: make(map[uint64]ReceiptDownloadDetails), logger: logger, } @@ -77,14 +75,14 @@ func (rdm *receiptDownloadManager) HandleRequestError(bns []uint64, err error, s } } -// HandleRequestResult handles get blocks result -func (rdm *receiptDownloadManager) HandleRequestResult(bns []uint64, blockBytes [][]byte, sigBytes [][]byte, loopID int, streamID sttypes.StreamID) error { +// HandleRequestResult handles get receipts result +func (rdm *receiptDownloadManager) HandleRequestResult(bns []uint64, receiptBytes [][]byte, sigBytes [][]byte, loopID int, streamID sttypes.StreamID) error { rdm.lock.Lock() defer rdm.lock.Unlock() for i, bn := range bns { delete(rdm.requesting, bn) - if indexExists(blockBytes, i) && len(blockBytes[i]) <= 1 { + if indexExists(receiptBytes, i) && len(receiptBytes[i]) <= 1 { rdm.retries.push(bn) } else { rdm.processing[bn] = struct{}{} @@ -111,7 +109,7 @@ func (rdm *receiptDownloadManager) SetDownloadDetails(bns []uint64, loopID int, return nil } -// GetDownloadDetails returns the download details for a receipt +// GetDownloadDetails returns the download details for a certain block number func (rdm *receiptDownloadManager) GetDownloadDetails(blockNumber uint64) (loopID int, streamID sttypes.StreamID) { rdm.lock.Lock() defer rdm.lock.Unlock() @@ -162,7 +160,7 @@ func (rdm *receiptDownloadManager) getBatchFromUnprocessed(cap int) []uint64 { } func (rdm *receiptDownloadManager) availableForMoreTasks() bool { - return rdm.rq.results.Len() < SoftQueueCap + return len(rdm.requesting) < SoftQueueCap } func (rdm *receiptDownloadManager) addBatchToRequesting(bns []uint64) { From 6f3aa67b88d4e026fc1117b7c2db88b0fc81a1ec Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CGheisMohammadi=E2=80=9D?= <36589218+GheisMohammadi@users.noreply.github.com> Date: Thu, 29 Jun 2023 18:43:05 +0800 Subject: [PATCH 053/128] refactor stage receipts and change the stages sorting --- .../stagedstreamsync/default_stages.go | 25 +- .../receipt_download_manager.go | 5 +- .../stagedstreamsync/stage_receipts.go | 327 ++++++++++++++++++ api/service/stagedstreamsync/stages.go | 3 +- 4 files changed, 349 insertions(+), 11 deletions(-) create mode 100644 api/service/stagedstreamsync/stage_receipts.go diff --git a/api/service/stagedstreamsync/default_stages.go b/api/service/stagedstreamsync/default_stages.go index 4a1e719f22..e1bb8578db 100644 --- a/api/service/stagedstreamsync/default_stages.go +++ b/api/service/stagedstreamsync/default_stages.go @@ -13,9 +13,9 @@ var DefaultForwardOrder = ForwardOrder{ SyncEpoch, ShortRange, BlockBodies, - StateSync, - // Stages below don't use Internet States, + StateSync, + Receipts, LastMile, Finish, } @@ -23,8 +23,9 @@ var DefaultForwardOrder = ForwardOrder{ var DefaultRevertOrder = RevertOrder{ Finish, LastMile, - States, + Receipts, StateSync, + States, BlockBodies, ShortRange, SyncEpoch, @@ -34,8 +35,9 @@ var DefaultRevertOrder = RevertOrder{ var DefaultCleanUpOrder = CleanUpOrder{ Finish, LastMile, - States, + Receipts, StateSync, + States, BlockBodies, ShortRange, SyncEpoch, @@ -49,6 +51,7 @@ func DefaultStages(ctx context.Context, bodiesCfg StageBodiesCfg, stateSyncCfg StageStateSyncCfg, statesCfg StageStatesCfg, + receiptsCfg StageReceiptsCfg, lastMileCfg StageLastMileCfg, finishCfg StageFinishCfg, ) []*Stage { @@ -57,8 +60,9 @@ func DefaultStages(ctx context.Context, handlerStageShortRange := NewStageShortRange(srCfg) handlerStageEpochSync := NewStageEpoch(seCfg) handlerStageBodies := NewStageBodies(bodiesCfg) - handlerStageStateSync := NewStageStateSync(stateSyncCfg) handlerStageStates := NewStageStates(statesCfg) + handlerStageStateSync := NewStageStateSync(stateSyncCfg) + handlerStageReceipts := NewStageReceipts(receiptsCfg) handlerStageLastMile := NewStageLastMile(lastMileCfg) handlerStageFinish := NewStageFinish(finishCfg) @@ -83,15 +87,20 @@ func DefaultStages(ctx context.Context, Description: "Retrieve Block Bodies", Handler: handlerStageBodies, }, + { + ID: States, + Description: "Update Blockchain State", + Handler: handlerStageStates, + }, { ID: StateSync, Description: "Retrieve States", Handler: handlerStageStateSync, }, { - ID: States, - Description: "Update Blockchain State", - Handler: handlerStageStates, + ID: Receipts, + Description: "Retrieve Receipts", + Handler: handlerStageReceipts, }, { ID: LastMile, diff --git a/api/service/stagedstreamsync/receipt_download_manager.go b/api/service/stagedstreamsync/receipt_download_manager.go index ffe20d0beb..2eaa3ca452 100644 --- a/api/service/stagedstreamsync/receipt_download_manager.go +++ b/api/service/stagedstreamsync/receipt_download_manager.go @@ -3,6 +3,7 @@ package stagedstreamsync import ( "sync" + "github.com/harmony-one/harmony/core/types" sttypes "github.com/harmony-one/harmony/p2p/stream/types" "github.com/ledgerwatch/erigon-lib/kv" "github.com/rs/zerolog" @@ -76,13 +77,13 @@ func (rdm *receiptDownloadManager) HandleRequestError(bns []uint64, err error, s } // HandleRequestResult handles get receipts result -func (rdm *receiptDownloadManager) HandleRequestResult(bns []uint64, receiptBytes [][]byte, sigBytes [][]byte, loopID int, streamID sttypes.StreamID) error { +func (rdm *receiptDownloadManager) HandleRequestResult(bns []uint64, receipts []*types.Receipt, loopID int, streamID sttypes.StreamID) error { rdm.lock.Lock() defer rdm.lock.Unlock() for i, bn := range bns { delete(rdm.requesting, bn) - if indexExists(receiptBytes, i) && len(receiptBytes[i]) <= 1 { + if indexExists(receipts, i) { rdm.retries.push(bn) } else { rdm.processing[bn] = struct{}{} diff --git a/api/service/stagedstreamsync/stage_receipts.go b/api/service/stagedstreamsync/stage_receipts.go new file mode 100644 index 0000000000..a9bffa30ff --- /dev/null +++ b/api/service/stagedstreamsync/stage_receipts.go @@ -0,0 +1,327 @@ +package stagedstreamsync + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/harmony-one/harmony/core" + "github.com/harmony-one/harmony/core/types" + "github.com/harmony-one/harmony/internal/utils" + sttypes "github.com/harmony-one/harmony/p2p/stream/types" + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/pkg/errors" +) + +type StageReceipts struct { + configs StageReceiptsCfg +} + +type StageReceiptsCfg struct { + bc core.BlockChain + db kv.RwDB + blockDBs []kv.RwDB + concurrency int + protocol syncProtocol + isBeacon bool + logProgress bool +} + +func NewStageReceipts(cfg StageReceiptsCfg) *StageReceipts { + return &StageReceipts{ + configs: cfg, + } +} + +func NewStageReceiptsCfg(bc core.BlockChain, db kv.RwDB, blockDBs []kv.RwDB, concurrency int, protocol syncProtocol, isBeacon bool, logProgress bool) StageReceiptsCfg { + return StageReceiptsCfg{ + bc: bc, + db: db, + blockDBs: blockDBs, + concurrency: concurrency, + protocol: protocol, + isBeacon: isBeacon, + logProgress: logProgress, + } +} + +// Exec progresses Bodies stage in the forward direction +func (b *StageReceipts) Exec(ctx context.Context, firstCycle bool, invalidBlockRevert bool, s *StageState, reverter Reverter, tx kv.RwTx) (err error) { + + useInternalTx := tx == nil + + if invalidBlockRevert { + return nil + } + + // for short range sync, skip this stage + if !s.state.initSync { + return nil + } + + maxHeight := s.state.status.targetBN + currentHead := b.configs.bc.CurrentBlock().NumberU64() + if currentHead >= maxHeight { + return nil + } + currProgress := uint64(0) + targetHeight := s.state.currentCycle.TargetHeight + + if errV := CreateView(ctx, b.configs.db, tx, func(etx kv.Tx) error { + if currProgress, err = s.CurrentStageProgress(etx); err != nil { + return err + } + return nil + }); errV != nil { + return errV + } + + if currProgress == 0 { + currProgress = currentHead + } + + if currProgress >= targetHeight { + return nil + } + + // size := uint64(0) + startTime := time.Now() + // startBlock := currProgress + if b.configs.logProgress { + fmt.Print("\033[s") // save the cursor position + } + + if useInternalTx { + var err error + tx, err = b.configs.db.BeginRw(ctx) + if err != nil { + return err + } + defer tx.Rollback() + } + + // Fetch blocks from neighbors + s.state.rdm = newReceiptDownloadManager(tx, b.configs.bc, targetHeight, s.state.logger) + + // Setup workers to fetch blocks from remote node + var wg sync.WaitGroup + + for i := 0; i != s.state.config.Concurrency; i++ { + wg.Add(1) + go b.runReceiptWorkerLoop(ctx, s.state.rdm, &wg, i, startTime) + } + + wg.Wait() + + if useInternalTx { + if err := tx.Commit(); err != nil { + return err + } + } + + return nil +} + +// runReceiptWorkerLoop creates a work loop for download receipts +func (b *StageReceipts) runReceiptWorkerLoop(ctx context.Context, rdm *receiptDownloadManager, wg *sync.WaitGroup, loopID int, startTime time.Time) { + + currentBlock := int(b.configs.bc.CurrentBlock().NumberU64()) + + defer wg.Done() + + for { + select { + case <-ctx.Done(): + return + default: + } + batch := rdm.GetNextBatch() + if len(batch) == 0 { + select { + case <-ctx.Done(): + return + case <-time.After(100 * time.Millisecond): + return + } + } + var hashes []common.Hash + for _, bn := range batch { + header := b.configs.bc.GetHeaderByNumber(bn) + hashes = append(hashes, header.ReceiptHash()) + } + receipts, stid, err := b.downloadReceipts(ctx, hashes) + if err != nil { + if !errors.Is(err, context.Canceled) { + b.configs.protocol.StreamFailed(stid, "downloadRawBlocks failed") + } + utils.Logger().Error(). + Err(err). + Str("stream", string(stid)). + Interface("block numbers", batch). + Msg(WrapStagedSyncMsg("downloadRawBlocks failed")) + err = errors.Wrap(err, "request error") + rdm.HandleRequestError(batch, err, stid) + } else if receipts == nil || len(receipts) == 0 { + utils.Logger().Warn(). + Str("stream", string(stid)). + Interface("block numbers", batch). + Msg(WrapStagedSyncMsg("downloadRawBlocks failed, received empty reciptBytes")) + err := errors.New("downloadRawBlocks received empty reciptBytes") + rdm.HandleRequestError(batch, err, stid) + } else { + rdm.HandleRequestResult(batch, receipts, loopID, stid) + if b.configs.logProgress { + //calculating block download speed + dt := time.Now().Sub(startTime).Seconds() + speed := float64(0) + if dt > 0 { + speed = float64(len(rdm.rdd)) / dt + } + blockSpeed := fmt.Sprintf("%.2f", speed) + + fmt.Print("\033[u\033[K") // restore the cursor position and clear the line + fmt.Println("downloaded blocks:", currentBlock+len(rdm.rdd), "/", int(rdm.targetBN), "(", blockSpeed, "blocks/s", ")") + } + } + } +} + +func (b *StageReceipts) downloadReceipts(ctx context.Context, hs []common.Hash) ([]*types.Receipt, sttypes.StreamID, error) { + ctx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + + receipts, stid, err := b.configs.protocol.GetReceipts(ctx, hs) + if err != nil { + return nil, stid, err + } + if err := validateGetReceiptsResult(hs, receipts); err != nil { + return nil, stid, err + } + return receipts, stid, nil +} + +func (b *StageReceipts) downloadRawBlocks(ctx context.Context, bns []uint64) ([][]byte, [][]byte, sttypes.StreamID, error) { + ctx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + + return b.configs.protocol.GetRawBlocksByNumber(ctx, bns) +} + +func validateGetReceiptsResult(requested []common.Hash, result []*types.Receipt) error { + // TODO: validate each receipt here + + return nil +} + +func (b *StageReceipts) saveProgress(ctx context.Context, s *StageState, progress uint64, tx kv.RwTx) (err error) { + useInternalTx := tx == nil + if useInternalTx { + var err error + tx, err = b.configs.db.BeginRw(ctx) + if err != nil { + return err + } + defer tx.Rollback() + } + + // save progress + if err = s.Update(tx, progress); err != nil { + utils.Logger().Error(). + Err(err). + Msgf("[STAGED_SYNC] saving progress for block bodies stage failed") + return ErrSavingBodiesProgressFail + } + + if useInternalTx { + if err := tx.Commit(); err != nil { + return err + } + } + return nil +} + +func (b *StageReceipts) cleanBlocksDB(ctx context.Context, loopID int) (err error) { + tx, errb := b.configs.blockDBs[loopID].BeginRw(ctx) + if errb != nil { + return errb + } + defer tx.Rollback() + + // clean block bodies db + if err = tx.ClearBucket(BlocksBucket); err != nil { + utils.Logger().Error(). + Err(err). + Msgf("[STAGED_STREAM_SYNC] clear blocks bucket after revert failed") + return err + } + // clean block signatures db + if err = tx.ClearBucket(BlockSignaturesBucket); err != nil { + utils.Logger().Error(). + Err(err). + Msgf("[STAGED_STREAM_SYNC] clear block signatures bucket after revert failed") + return err + } + + if err = tx.Commit(); err != nil { + return err + } + + return nil +} + +func (b *StageReceipts) cleanAllBlockDBs(ctx context.Context) (err error) { + //clean all blocks DBs + for i := 0; i < b.configs.concurrency; i++ { + if err := b.cleanBlocksDB(ctx, i); err != nil { + return err + } + } + return nil +} + +func (b *StageReceipts) Revert(ctx context.Context, firstCycle bool, u *RevertState, s *StageState, tx kv.RwTx) (err error) { + + //clean all blocks DBs + if err := b.cleanAllBlockDBs(ctx); err != nil { + return err + } + + useInternalTx := tx == nil + if useInternalTx { + tx, err = b.configs.db.BeginRw(ctx) + if err != nil { + return err + } + defer tx.Rollback() + } + // save progress + currentHead := b.configs.bc.CurrentBlock().NumberU64() + if err = s.Update(tx, currentHead); err != nil { + utils.Logger().Error(). + Err(err). + Msgf("[STAGED_SYNC] saving progress for block bodies stage after revert failed") + return err + } + + if err = u.Done(tx); err != nil { + return err + } + + if useInternalTx { + if err = tx.Commit(); err != nil { + return err + } + } + return nil +} + +func (b *StageReceipts) CleanUp(ctx context.Context, firstCycle bool, p *CleanUpState, tx kv.RwTx) (err error) { + //clean all blocks DBs + if err := b.cleanAllBlockDBs(ctx); err != nil { + return err + } + + return nil +} diff --git a/api/service/stagedstreamsync/stages.go b/api/service/stagedstreamsync/stages.go index cb6efa0cd8..909bb25c08 100644 --- a/api/service/stagedstreamsync/stages.go +++ b/api/service/stagedstreamsync/stages.go @@ -12,8 +12,9 @@ const ( ShortRange SyncStageID = "ShortRange" // short range SyncEpoch SyncStageID = "SyncEpoch" // epoch sync BlockBodies SyncStageID = "BlockBodies" // Block bodies are downloaded, TxHash and UncleHash are getting verified - StateSync SyncStageID = "StateSync" // State sync States SyncStageID = "States" // will construct most recent state from downloaded blocks + StateSync SyncStageID = "StateSync" // State sync + Receipts SyncStageID = "Receipts" // Receipts LastMile SyncStageID = "LastMile" // update blocks after sync and update last mile blocks as well Finish SyncStageID = "Finish" // Nominal stage after all other stages ) From e11b6ef1227c11b028acb4bf7227428c903465d0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CGheisMohammadi=E2=80=9D?= <36589218+GheisMohammadi@users.noreply.github.com> Date: Thu, 29 Jun 2023 18:45:47 +0800 Subject: [PATCH 054/128] goimports staged stream sync --- api/service/stagedstreamsync/const.go | 2 +- .../stagedstreamsync/staged_stream_sync.go | 34 +++++++++---------- api/service/stagedstreamsync/stages.go | 2 +- .../state_download_manager.go | 6 ++-- 4 files changed, 22 insertions(+), 22 deletions(-) diff --git a/api/service/stagedstreamsync/const.go b/api/service/stagedstreamsync/const.go index f82bff5726..a41d2e859d 100644 --- a/api/service/stagedstreamsync/const.go +++ b/api/service/stagedstreamsync/const.go @@ -27,7 +27,7 @@ const ( StatesPerRequest int = 100 // maximum number of blocks for get receipts request - ReceiptsPerRequest int = 10 + ReceiptsPerRequest int = 10 // DefaultConcurrency is the default settings for concurrency DefaultConcurrency int = 4 diff --git a/api/service/stagedstreamsync/staged_stream_sync.go b/api/service/stagedstreamsync/staged_stream_sync.go index 0a14d0cb3e..fea59b02aa 100644 --- a/api/service/stagedstreamsync/staged_stream_sync.go +++ b/api/service/stagedstreamsync/staged_stream_sync.go @@ -59,23 +59,23 @@ func (ib *InvalidBlock) addBadStream(bsID sttypes.StreamID) { } type StagedStreamSync struct { - bc core.BlockChain - consensus *consensus.Consensus - isBeacon bool - isExplorer bool - db kv.RwDB - protocol syncProtocol - isBeaconNode bool - gbm *blockDownloadManager // initialized when finished get block number - lastMileBlocks []*types.Block // last mile blocks to catch up with the consensus - lastMileMux sync.Mutex - inserted int - config Config - logger zerolog.Logger - status *status //TODO: merge this with currentSyncCycle - initSync bool // if sets to true, node start long range syncing - UseMemDB bool - + bc core.BlockChain + consensus *consensus.Consensus + isBeacon bool + isExplorer bool + db kv.RwDB + protocol syncProtocol + isBeaconNode bool + gbm *blockDownloadManager // initialized when finished get block number + rdm *receiptDownloadManager + lastMileBlocks []*types.Block // last mile blocks to catch up with the consensus + lastMileMux sync.Mutex + inserted int + config Config + logger zerolog.Logger + status *status //TODO: merge this with currentSyncCycle + initSync bool // if sets to true, node start long range syncing + UseMemDB bool revertPoint *uint64 // used to run stages prevRevertPoint *uint64 // used to get value from outside of staged sync after cycle (for example to notify RPCDaemon) invalidBlock InvalidBlock diff --git a/api/service/stagedstreamsync/stages.go b/api/service/stagedstreamsync/stages.go index 909bb25c08..6ad9e4519c 100644 --- a/api/service/stagedstreamsync/stages.go +++ b/api/service/stagedstreamsync/stages.go @@ -13,7 +13,7 @@ const ( SyncEpoch SyncStageID = "SyncEpoch" // epoch sync BlockBodies SyncStageID = "BlockBodies" // Block bodies are downloaded, TxHash and UncleHash are getting verified States SyncStageID = "States" // will construct most recent state from downloaded blocks - StateSync SyncStageID = "StateSync" // State sync + StateSync SyncStageID = "StateSync" // State sync Receipts SyncStageID = "Receipts" // Receipts LastMile SyncStageID = "LastMile" // update blocks after sync and update last mile blocks as well Finish SyncStageID = "Finish" // Nominal stage after all other stages diff --git a/api/service/stagedstreamsync/state_download_manager.go b/api/service/stagedstreamsync/state_download_manager.go index f06ec9cb67..1cd414757b 100644 --- a/api/service/stagedstreamsync/state_download_manager.go +++ b/api/service/stagedstreamsync/state_download_manager.go @@ -229,13 +229,13 @@ func (s *StateDownloadManager) HandleRequestError(codeHashes []common.Hash, trie // add requested code hashes to retries for _, h := range codeHashes { - s.retries.addCodeTask(h,s.requesting.codeTasks[h]) + s.retries.addCodeTask(h, s.requesting.codeTasks[h]) delete(s.requesting.codeTasks, h) } // add requested trie paths to retries for _, path := range triePaths { - s.retries.addTrieTask(path,s.requesting.trieTasks[path]) + s.retries.addTrieTask(path, s.requesting.trieTasks[path]) delete(s.requesting.trieTasks, path) } } @@ -282,7 +282,7 @@ func (s *StateDownloadManager) HandleRequestResult(codeHashes []common.Hash, tri } for _, hash := range codeHashes { - task:= s.requesting.getCodeTask(hash) + task := s.requesting.getCodeTask(hash) // If the node did deliver something, missing items may be due to a protocol // limit or a previous timeout + delayed delivery. Both cases should permit // the node to retry the missing items (to avoid single-peer stalls). From 91034682c79f4e92fc9aad434506952342d70eee Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CGheisMohammadi=E2=80=9D?= <36589218+GheisMohammadi@users.noreply.github.com> Date: Mon, 3 Jul 2023 12:32:33 +0800 Subject: [PATCH 055/128] add block insertion without execution to blockchain implementation --- core/blockchain.go | 18 +++- core/blockchain_impl.go | 147 ++++++++++++++++++++++++++++++++- core/blockchain_stub.go | 4 +- core/epochchain.go | 2 +- hmy/downloader/adapter.go | 2 +- hmy/downloader/beaconhelper.go | 2 +- hmy/downloader/downloader.go | 8 +- hmy/downloader/longrange.go | 2 +- hmy/downloader/shortrange.go | 4 +- node/node_handler_test.go | 2 +- 10 files changed, 171 insertions(+), 20 deletions(-) diff --git a/core/blockchain.go b/core/blockchain.go index 41f72a9a2b..a286af1171 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -100,6 +100,18 @@ type BlockChain interface { // Rollback is designed to remove a chain of links from the database that aren't // certain enough to be valid. Rollback(chain []common.Hash) error + // WriteBlockWithoutState writes only the block and its metadata to the database, + // but does not write any state. This is used to construct competing side forks + // up to the point where they exceed the canonical total difficulty. + WriteBlockWithoutState(block *types.Block) (err error) + // WriteBlockWithState writes the block and all associated state to the database. + WriteBlockWithState( + block *types.Block, receipts []*types.Receipt, + cxReceipts []*types.CXReceipt, + stakeMsgs []types2.StakeMsg, + paid reward.Reader, + state *state.DB, + ) (status WriteStatus, err error) // GetMaxGarbageCollectedBlockNumber .. GetMaxGarbageCollectedBlockNumber() int64 // InsertChain attempts to insert the given batch of blocks in to the canonical @@ -108,9 +120,9 @@ type BlockChain interface { // wrong. // // After insertion is done, all accumulated events will be fired. - InsertChain(chain types.Blocks, verifyHeaders bool) (int, error) - // LeaderRotationMeta returns info about leader rotation. - LeaderRotationMeta() LeaderRotationMeta + InsertChain(chain types.Blocks, verifyHeaders bool, blockExecution bool) (int, error) + // LeaderRotationMeta returns the number of continuous blocks by the leader. + LeaderRotationMeta() (publicKeyBytes []byte, epoch, count, shifts uint64, err error) // BadBlocks returns a list of the last 'bad blocks' that // the client has seen on the network. BadBlocks() []BadBlock diff --git a/core/blockchain_impl.go b/core/blockchain_impl.go index cc30315677..8d9de8dbc3 100644 --- a/core/blockchain_impl.go +++ b/core/blockchain_impl.go @@ -1194,7 +1194,18 @@ func (bc *BlockChainImpl) Rollback(chain []common.Hash) error { var lastWrite uint64 -func (bc *BlockChainImpl) writeBlockWithState( +func (bc *BlockChainImpl) WriteBlockWithoutState(block *types.Block) (err error) { + bc.chainmu.Lock() + defer bc.chainmu.Unlock() + + if err := rawdb.WriteBlock(bc.db, block); err != nil { + return err + } + + return nil +} + +func (bc *BlockChainImpl) WriteBlockWithState( block *types.Block, receipts []*types.Receipt, cxReceipts []*types.CXReceipt, stakeMsgs []staking.StakeMsg, @@ -1348,7 +1359,7 @@ func (bc *BlockChainImpl) GetMaxGarbageCollectedBlockNumber() int64 { return bc.maxGarbCollectedBlkNum } -func (bc *BlockChainImpl) InsertChain(chain types.Blocks, verifyHeaders bool) (int, error) { +func (bc *BlockChainImpl) InsertChain(chain types.Blocks, verifyHeaders bool, blockExecution bool) (int, error) { // if in tikv mode, writer node need preempt master or come be a follower if bc.isInitTiKV() && !bc.tikvPreemptMaster(bc.rangeBlock(chain)) { return len(chain), nil @@ -1392,10 +1403,17 @@ func (bc *BlockChainImpl) LeaderRotationMeta() LeaderRotationMeta { return bc.leaderRotationMeta.Clone() } +func (bc *BlockChainImpl) insertChain(chain types.Blocks, verifyHeaders bool, blockExecution bool) (int, []interface{}, []*types.Log, error) { + if blockExecution { + return bc.insertChainWithBlockExecution(chain, verifyHeaders) + } + return bc.insertChainWithoutBlockExecution(chain, verifyHeaders) +} + // insertChain will execute the actual chain insertion and event aggregation. The // only reason this method exists as a separate one is to make locking cleaner // with deferred statements. -func (bc *BlockChainImpl) insertChain(chain types.Blocks, verifyHeaders bool) (int, []interface{}, []*types.Log, error) { +func (bc *BlockChainImpl) insertChainWithBlockExecution(chain types.Blocks, verifyHeaders bool) (int, []interface{}, []*types.Log, error) { // Sanity check that we have something meaningful to import if len(chain) == 0 { return 0, nil, nil, ErrEmptyChain @@ -1506,7 +1524,9 @@ func (bc *BlockChainImpl) insertChain(chain types.Blocks, verifyHeaders bool) (i // Prune in case non-empty winner chain if len(winner) > 0 { // Import all the pruned blocks to make the state available - _, evs, logs, err := bc.insertChain(winner, true /* verifyHeaders */) + bc.chainmu.Unlock() + _, evs, logs, err := bc.insertChainWithBlockExecution(winner, true /* verifyHeaders */) + bc.chainmu.Lock() events, coalescedLogs = evs, logs if err != nil { @@ -1639,6 +1659,125 @@ func (bc *BlockChainImpl) insertChain(chain types.Blocks, verifyHeaders bool) (i return 0, events, coalescedLogs, nil } +//receiptChain []types.Receipts, +func (bc *BlockChainImpl) insertChainWithoutBlockExecution(chain types.Blocks, verifyHeaders bool) (int, []interface{}, []*types.Log, error) { + // Sanity check that we have something meaningful to import + if len(chain) == 0 { + return 0, nil, nil, nil + } + // Do a sanity check that the provided chain is actually ordered and linked + for i := 1; i < len(chain); i++ { + if chain[i].NumberU64() != chain[i-1].NumberU64()+1 || chain[i].ParentHash() != chain[i-1].Hash() { + // Chain broke ancestry, log a message (programming error) and skip insertion + utils.Logger().Error(). + Str("number", chain[i].Number().String()). + Str("hash", chain[i].Hash().Hex()). + Str("parent", chain[i].ParentHash().Hex()). + Str("prevnumber", chain[i-1].Number().String()). + Str("prevhash", chain[i-1].Hash().Hex()). + Msg("insertChain: non contiguous block insert") + + return 0, nil, nil, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, chain[i-1].NumberU64(), + chain[i-1].Hash().Bytes()[:4], i, chain[i].NumberU64(), chain[i].Hash().Bytes()[:4], chain[i].ParentHash().Bytes()[:4]) + } + } + + bc.chainmu.Lock() + defer bc.chainmu.Unlock() + + var verifyHeadersResults <-chan error + + // If the block header chain has not been verified, conduct header verification here. + if verifyHeaders { + headers := make([]*block.Header, len(chain)) + seals := make([]bool, len(chain)) + + for i, block := range chain { + headers[i] = block.Header() + seals[i] = true + } + // Note that VerifyHeaders verifies headers in the chain in parallel + abort, results := bc.Engine().VerifyHeaders(bc, headers, seals) + verifyHeadersResults = results + defer close(abort) + } + + // Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss) + //senderCacher.recoverFromBlocks(types.MakeSigner(bc.chainConfig, chain[0].Number()), chain) + + // Iterate over the blocks and insert when the verifier permits + for i, block := range chain { + // If the chain is terminating, stop processing blocks + if atomic.LoadInt32(&bc.procInterrupt) == 1 { + utils.Logger().Debug().Msg("Premature abort during blocks processing") + break + } + + var err error + if verifyHeaders { + err = <-verifyHeadersResults + } + if err == nil { + err = bc.Validator().ValidateBody(block) + } + switch { + case err == ErrKnownBlock: + // Block and state both already known. However if the current block is below + // this number we did a rollback and we should reimport it nonetheless. + if bc.CurrentBlock().NumberU64() >= block.NumberU64() { + continue + } + + case err == consensus_engine.ErrFutureBlock: + // Allow up to MaxFuture second in the future blocks. If this limit is exceeded + // the chain is discarded and processed at a later time if given. + max := big.NewInt(time.Now().Unix() + maxTimeFutureBlocks) + if block.Time().Cmp(max) > 0 { + return i, nil, nil, fmt.Errorf("future block: %v > %v", block.Time(), max) + } + bc.futureBlocks.Add(block.Hash(), block) + continue + + case err == consensus_engine.ErrUnknownAncestor && bc.futureBlocks.Contains(block.ParentHash()): + bc.futureBlocks.Add(block.Hash(), block) + continue + + case err == consensus_engine.ErrPrunedAncestor: + var winner []*types.Block + parent := bc.GetBlock(block.ParentHash(), block.NumberU64()-1) + for parent != nil && !bc.HasState(parent.Root()) { + winner = append(winner, parent) + parent = bc.GetBlock(parent.ParentHash(), parent.NumberU64()-1) + } + for j := 0; j < len(winner)/2; j++ { + winner[j], winner[len(winner)-1-j] = winner[len(winner)-1-j], winner[j] + } + // Prune in case non-empty winner chain + if len(winner) > 0 { + // Import all the pruned blocks to make the state available + bc.chainmu.Unlock() + _, _, _, err := bc.insertChainWithoutBlockExecution(winner, true /* verifyHeaders */) + bc.chainmu.Lock() + if err != nil { + return i, nil, nil, err + } + } + + case err != nil: + bc.reportBlock(block, nil, err) + return i, nil, nil, err + } + + // Create a new statedb using the parent block and report an + // error if it fails. + if err = bc.WriteBlockWithoutState(block); err != nil { + return i, nil, nil, err + } + } + + return 0, nil, nil, nil +} + // insertStats tracks and reports on block insertion. type insertStats struct { queued, processed, ignored int diff --git a/core/blockchain_stub.go b/core/blockchain_stub.go index e9ef10ce94..32a0b1c194 100644 --- a/core/blockchain_stub.go +++ b/core/blockchain_stub.go @@ -120,7 +120,7 @@ func (a Stub) Rollback(chain []common.Hash) error { return errors.Errorf("method Rollback not implemented for %s", a.Name) } -func (a Stub) WriteBlockWithoutState(block *types.Block, td *big.Int) (err error) { +func (a Stub) WriteBlockWithoutState(block *types.Block) (err error) { return errors.Errorf("method WriteBlockWithoutState not implemented for %s", a.Name) } @@ -132,7 +132,7 @@ func (a Stub) GetMaxGarbageCollectedBlockNumber() int64 { return 0 } -func (a Stub) InsertChain(chain types.Blocks, verifyHeaders bool) (int, error) { +func (a Stub) InsertChain(chain types.Blocks, verifyHeaders bool, blockExecution bool) (int, error) { return 0, errors.Errorf("method InsertChain not implemented for %s", a.Name) } diff --git a/core/epochchain.go b/core/epochchain.go index 2dab284713..3df271b11b 100644 --- a/core/epochchain.go +++ b/core/epochchain.go @@ -114,7 +114,7 @@ func (bc *EpochChain) Stop() { }) } -func (bc *EpochChain) InsertChain(blocks types.Blocks, _ bool) (int, error) { +func (bc *EpochChain) InsertChain(blocks types.Blocks, _ bool, _ bool) (int, error) { if len(blocks) == 0 { return 0, nil } diff --git a/hmy/downloader/adapter.go b/hmy/downloader/adapter.go index c8758b506d..70e4ca3257 100644 --- a/hmy/downloader/adapter.go +++ b/hmy/downloader/adapter.go @@ -27,6 +27,6 @@ type blockChain interface { engine.ChainReader Engine() engine.Engine - InsertChain(chain types.Blocks, verifyHeaders bool) (int, error) + InsertChain(chain types.Blocks, verifyHeaders bool, blockExecution bool) (int, error) WriteCommitSig(blockNum uint64, lastCommits []byte) error } diff --git a/hmy/downloader/beaconhelper.go b/hmy/downloader/beaconhelper.go index 96d06ebf8e..2c7f056754 100644 --- a/hmy/downloader/beaconhelper.go +++ b/hmy/downloader/beaconhelper.go @@ -123,7 +123,7 @@ func (bh *beaconHelper) insertLastMileBlocks() (inserted int, bn uint64, err err } // TODO: Instruct the beacon helper to verify signatures. This may require some forks // in pub-sub message (add commit sigs in node.block.sync messages) - if _, err = bh.bc.InsertChain(types.Blocks{b}, true); err != nil { + if _, err = bh.bc.InsertChain(types.Blocks{b}, true, true); err != nil { bn-- return } diff --git a/hmy/downloader/downloader.go b/hmy/downloader/downloader.go index 01ec242abb..378b1e6301 100644 --- a/hmy/downloader/downloader.go +++ b/hmy/downloader/downloader.go @@ -280,16 +280,16 @@ func (e *sigVerifyErr) Error() string { return fmt.Sprintf("[VerifyHeaderSignature] %v", e.err.Error()) } -func verifyAndInsertBlocks(bc blockChain, blocks types.Blocks) (int, error) { +func verifyAndInsertBlocks(bc blockChain, blockExecution bool, blocks types.Blocks) (int, error) { for i, block := range blocks { - if err := verifyAndInsertBlock(bc, block, blocks[i+1:]...); err != nil { + if err := verifyAndInsertBlock(bc, block, blockExecution, blocks[i+1:]...); err != nil { return i, err } } return len(blocks), nil } -func verifyAndInsertBlock(bc blockChain, block *types.Block, nextBlocks ...*types.Block) error { +func verifyAndInsertBlock(bc blockChain, block *types.Block, blockExecution bool, nextBlocks ...*types.Block) error { var ( sigBytes bls.SerializedSignature bitmap []byte @@ -314,7 +314,7 @@ func verifyAndInsertBlock(bc blockChain, block *types.Block, nextBlocks ...*type if err := bc.Engine().VerifyHeader(bc, block.Header(), true); err != nil { return errors.Wrap(err, "[VerifyHeader]") } - if _, err := bc.InsertChain(types.Blocks{block}, false); err != nil { + if _, err := bc.InsertChain(types.Blocks{block}, false, blockExecution); err != nil { return errors.Wrap(err, "[InsertChain]") } return nil diff --git a/hmy/downloader/longrange.go b/hmy/downloader/longrange.go index 4d4935b8f2..fc4d4962f4 100644 --- a/hmy/downloader/longrange.go +++ b/hmy/downloader/longrange.go @@ -210,7 +210,7 @@ func (lsi *lrSyncIter) processBlocks(results []*blockResult, targetBN uint64) { blocks := blockResultsToBlocks(results) for i, block := range blocks { - if err := verifyAndInsertBlock(lsi.bc, block); err != nil { + if err := verifyAndInsertBlock(lsi.bc, block, true); err != nil { lsi.logger.Warn().Err(err).Uint64("target block", targetBN). Uint64("block number", block.NumberU64()). Msg("insert blocks failed in long range") diff --git a/hmy/downloader/shortrange.go b/hmy/downloader/shortrange.go index 8276911d4f..2a705f99a4 100644 --- a/hmy/downloader/shortrange.go +++ b/hmy/downloader/shortrange.go @@ -74,7 +74,7 @@ func (d *Downloader) doShortRangeSync() (int, error) { } d.logger.Info().Int("num blocks", len(blocks)).Msg("getBlockByHashes result") - n, err := verifyAndInsertBlocks(d.bc, blocks) + n, err := verifyAndInsertBlocks(d.bc, true, blocks) numBlocksInsertedShortRangeHistogramVec.With(d.promLabels()).Observe(float64(n)) if err != nil { d.logger.Warn().Err(err).Int("blocks inserted", n).Msg("Insert block failed") @@ -131,7 +131,7 @@ func (d *Downloader) doShortRangeSyncForEpochSync() (int, error) { // short circuit for no sync is needed return 0, nil } - n, err := d.bc.InsertChain(blocks, true) + n, err := d.bc.InsertChain(blocks, true, true) numBlocksInsertedShortRangeHistogramVec.With(d.promLabels()).Observe(float64(n)) if err != nil { sh.removeStreams([]sttypes.StreamID{streamID}) // Data provided by remote nodes is corrupted diff --git a/node/node_handler_test.go b/node/node_handler_test.go index 867a9616dc..23c5498fed 100644 --- a/node/node_handler_test.go +++ b/node/node_handler_test.go @@ -69,7 +69,7 @@ func TestAddNewBlock(t *testing.T) { commitSigs, func() uint64 { return 0 }, common.Address{}, nil, nil, ) - _, err = node.Blockchain().InsertChain([]*types.Block{block}, true) + _, err = node.Blockchain().InsertChain([]*types.Block{block}, true, true) if err != nil { t.Errorf("error when adding new block %v", err) } From cfc94bb4e148700019a17bfd0695c38da1c0cbc9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CGheisMohammadi=E2=80=9D?= <36589218+GheisMohammadi@users.noreply.github.com> Date: Mon, 3 Jul 2023 12:33:17 +0800 Subject: [PATCH 056/128] fix tests for new block insertion --- hmy/downloader/adapter_test.go | 6 +++--- node/node_newblock_test.go | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/hmy/downloader/adapter_test.go b/hmy/downloader/adapter_test.go index 4bc023b5cc..3b76640078 100644 --- a/hmy/downloader/adapter_test.go +++ b/hmy/downloader/adapter_test.go @@ -60,7 +60,7 @@ func (bc *testBlockChain) currentBlockNumber() uint64 { return bc.curBN } -func (bc *testBlockChain) InsertChain(chain types.Blocks, verifyHeaders bool) (int, error) { +func (bc *testBlockChain) InsertChain(chain types.Blocks, verifyHeaders bool, blockExecution bool) (int, error) { bc.lock.Lock() defer bc.lock.Unlock() @@ -169,11 +169,11 @@ type testInsertHelper struct { } func (ch *testInsertHelper) verifyAndInsertBlock(block *types.Block) error { - _, err := ch.bc.InsertChain(types.Blocks{block}, true) + _, err := ch.bc.InsertChain(types.Blocks{block}, true, true) return err } func (ch *testInsertHelper) verifyAndInsertBlocks(blocks types.Blocks) (int, error) { - return ch.bc.InsertChain(blocks, true) + return ch.bc.InsertChain(blocks, true, true) } const ( diff --git a/node/node_newblock_test.go b/node/node_newblock_test.go index 5780b7cda0..b8ca6c9e02 100644 --- a/node/node_newblock_test.go +++ b/node/node_newblock_test.go @@ -78,7 +78,7 @@ func TestFinalizeNewBlockAsync(t *testing.T) { t.Error("New block is not verified successfully:", err) } - node.Blockchain().InsertChain(types.Blocks{block}, false) + node.Blockchain().InsertChain(types.Blocks{block}, false, true) node.Worker.UpdateCurrent() From 9954a904aeb00684812016e26e20c48e61d4558a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CGheisMohammadi=E2=80=9D?= <36589218+GheisMohammadi@users.noreply.github.com> Date: Mon, 3 Jul 2023 15:16:27 +0800 Subject: [PATCH 057/128] refactor staged stream sync to process fast sync and new block insertion --- api/service/stagedstreamsync/adapter.go | 2 +- api/service/stagedstreamsync/const.go | 13 +++++++ api/service/stagedstreamsync/sig_verify.go | 6 ++-- .../stagedstreamsync/stage_short_range.go | 4 ++- api/service/stagedstreamsync/stage_state.go | 35 ++++++++++++------- .../stagedstreamsync/staged_stream_sync.go | 7 ++-- api/service/stagedstreamsync/syncing.go | 8 +++-- 7 files changed, 52 insertions(+), 23 deletions(-) diff --git a/api/service/stagedstreamsync/adapter.go b/api/service/stagedstreamsync/adapter.go index ca9c6a6787..9988ccc51a 100644 --- a/api/service/stagedstreamsync/adapter.go +++ b/api/service/stagedstreamsync/adapter.go @@ -31,6 +31,6 @@ type blockChain interface { engine.ChainReader Engine() engine.Engine - InsertChain(chain types.Blocks, verifyHeaders bool) (int, error) + InsertChain(chain types.Blocks, verifyHeaders bool, blockExecution bool) (int, error) WriteCommitSig(blockNum uint64, lastCommits []byte) error } diff --git a/api/service/stagedstreamsync/const.go b/api/service/stagedstreamsync/const.go index a41d2e859d..c87932bf73 100644 --- a/api/service/stagedstreamsync/const.go +++ b/api/service/stagedstreamsync/const.go @@ -40,6 +40,16 @@ const ( ShortRangeTimeout time.Duration = 1 * time.Minute ) +// SyncMode represents the synchronization mode of the downloader. +// It is a uint32 as it is used with atomic operations. +type SyncMode uint32 + +const ( + FullSync SyncMode = iota // Synchronize the entire blockchain history from full blocks + FastSync // Download all blocks and states + SnapSync // Download the chain and the state via compact snapshots +) + type ( // Config is the downloader config Config struct { @@ -47,6 +57,9 @@ type ( // TODO: remove this when stream sync is fully up. ServerOnly bool + // Synchronization mode of the downloader + SyncMode SyncMode + // parameters Network nodeconfig.NetworkType Concurrency int // Number of concurrent sync requests diff --git a/api/service/stagedstreamsync/sig_verify.go b/api/service/stagedstreamsync/sig_verify.go index 8de71effca..dc0b4cf4c5 100644 --- a/api/service/stagedstreamsync/sig_verify.go +++ b/api/service/stagedstreamsync/sig_verify.go @@ -20,16 +20,16 @@ func (e *sigVerifyErr) Error() string { return fmt.Sprintf("[VerifyHeaderSignature] %v", e.err.Error()) } -func verifyAndInsertBlocks(bc blockChain, blocks types.Blocks) (int, error) { +func verifyAndInsertBlocks(bc blockChain, blocks types.Blocks, blockExecution bool) (int, error) { for i, block := range blocks { - if err := verifyAndInsertBlock(bc, block, blocks[i+1:]...); err != nil { + if err := verifyAndInsertBlock(bc, block, blockExecution, blocks[i+1:]...); err != nil { return i, err } } return len(blocks), nil } -func verifyAndInsertBlock(bc blockChain, block *types.Block, nextBlocks ...*types.Block) error { +func verifyAndInsertBlock(bc blockChain, block *types.Block, blockExecution bool, nextBlocks ...*types.Block) error { var ( sigBytes bls.SerializedSignature bitmap []byte diff --git a/api/service/stagedstreamsync/stage_short_range.go b/api/service/stagedstreamsync/stage_short_range.go index ce6cdf36bc..a651490eb0 100644 --- a/api/service/stagedstreamsync/stage_short_range.go +++ b/api/service/stagedstreamsync/stage_short_range.go @@ -136,7 +136,9 @@ func (sr *StageShortRange) doShortRangeSync(ctx context.Context, s *StageState) sh.streamsFailed(whitelist, "remote nodes cannot provide blocks with target hashes") } - n, err := verifyAndInsertBlocks(sr.configs.bc, blocks) + utils.Logger().Info().Int("num blocks", len(blocks)).Msg("getBlockByHashes result") + + n, err := verifyAndInsertBlocks(sr.configs.bc, blocks, true) numBlocksInsertedShortRangeHistogramVec.With(s.state.promLabels()).Observe(float64(n)) if err != nil { utils.Logger().Warn().Err(err).Int("blocks inserted", n).Msg("Insert block failed") diff --git a/api/service/stagedstreamsync/stage_state.go b/api/service/stagedstreamsync/stage_state.go index b8dfb18288..ea4775d1f1 100644 --- a/api/service/stagedstreamsync/stage_state.go +++ b/api/service/stagedstreamsync/stage_state.go @@ -19,12 +19,13 @@ type StageStates struct { configs StageStatesCfg } type StageStatesCfg struct { - bc core.BlockChain - db kv.RwDB - blockDBs []kv.RwDB - concurrency int - logger zerolog.Logger - logProgress bool + bc core.BlockChain + db kv.RwDB + blockDBs []kv.RwDB + concurrency int + blockExecution bool + logger zerolog.Logger + logProgress bool } func NewStageStates(cfg StageStatesCfg) *StageStates { @@ -38,16 +39,18 @@ func NewStageStatesCfg( db kv.RwDB, blockDBs []kv.RwDB, concurrency int, + blockExecution bool, logger zerolog.Logger, logProgress bool) StageStatesCfg { return StageStatesCfg{ - bc: bc, - db: db, - blockDBs: blockDBs, - concurrency: concurrency, - logger: logger, - logProgress: logProgress, + bc: bc, + db: db, + blockDBs: blockDBs, + concurrency: concurrency, + blockExecution: blockExecution, + logger: logger, + logProgress: logProgress, } } @@ -108,6 +111,8 @@ func (stg *StageStates) Exec(ctx context.Context, firstCycle bool, invalidBlockR fmt.Print("\033[s") // save the cursor position } + s.state.currentCycle.ReceiptHashes = make(map[uint64]common.Hash) + for i := currProgress + 1; i <= targetHeight; i++ { blkKey := marshalData(i) loopID, streamID := gbm.GetDownloadDetails(i) @@ -157,7 +162,7 @@ func (stg *StageStates) Exec(ctx context.Context, firstCycle bool, invalidBlockR return ErrInvalidBlockNumber } - if err := verifyAndInsertBlock(stg.configs.bc, block); err != nil { + if err := verifyAndInsertBlock(stg.configs.bc, block, stg.configs.blockExecution); err != nil { stg.configs.logger.Warn().Err(err).Uint64("cycle target block", targetHeight). Uint64("block number", block.NumberU64()). Msg(WrapStagedSyncMsg("insert blocks failed in long range")) @@ -169,6 +174,10 @@ func (stg *StageStates) Exec(ctx context.Context, firstCycle bool, invalidBlockR return err } + // TODO: only for fast sync + // add receipt hash for next stage + s.state.currentCycle.ReceiptHashes[block.NumberU64()]=block.Header().ReceiptHash() + if invalidBlockRevert { if s.state.invalidBlock.Number == i { s.state.invalidBlock.resolve() diff --git a/api/service/stagedstreamsync/staged_stream_sync.go b/api/service/stagedstreamsync/staged_stream_sync.go index fea59b02aa..c5a201ef71 100644 --- a/api/service/stagedstreamsync/staged_stream_sync.go +++ b/api/service/stagedstreamsync/staged_stream_sync.go @@ -102,9 +102,10 @@ type Timing struct { } type SyncCycle struct { - Number uint64 - TargetHeight uint64 - lock sync.RWMutex + Number uint64 + TargetHeight uint64 + ReceiptHashes map[uint64]common.Hash + lock sync.RWMutex } func (s *StagedStreamSync) Len() int { return len(s.stages) } diff --git a/api/service/stagedstreamsync/syncing.go b/api/service/stagedstreamsync/syncing.go index 6e0d01a4b7..c741151cd3 100644 --- a/api/service/stagedstreamsync/syncing.go +++ b/api/service/stagedstreamsync/syncing.go @@ -81,14 +81,17 @@ func CreateStagedSync(ctx context.Context, return nil, errInitDB } + fastSync := config.SyncMode == FastSync + stageHeadsCfg := NewStageHeadersCfg(bc, mainDB) stageShortRangeCfg := NewStageShortRangeCfg(bc, mainDB) stageSyncEpochCfg := NewStageEpochCfg(bc, mainDB) stageBodiesCfg := NewStageBodiesCfg(bc, mainDB, dbs, config.Concurrency, protocol, isBeaconNode, config.LogProgress) - stageStatesCfg := NewStageStatesCfg(bc, mainDB, dbs, config.Concurrency, logger, config.LogProgress) + stageStatesCfg := NewStageStatesCfg(bc, mainDB, dbs, config.Concurrency, !fastSync, logger, config.LogProgress) stageStateSyncCfg := NewStageStateSyncCfg(bc, mainDB, config.Concurrency, protocol, logger, config.LogProgress) - lastMileCfg := NewStageLastMileCfg(ctx, bc, mainDB) + stageReceiptsCfg := NewStageReceiptsCfg(bc, mainDB, dbs, config.Concurrency, protocol, isBeaconNode, config.LogProgress) + lastMileCfg := NewStageLastMileCfg(ctx, bc, mainDB) stageFinishCfg := NewStageFinishCfg(mainDB) stages := DefaultStages(ctx, @@ -98,6 +101,7 @@ func CreateStagedSync(ctx context.Context, stageBodiesCfg, stageStateSyncCfg, stageStatesCfg, + stageReceiptsCfg, lastMileCfg, stageFinishCfg, ) From 352212744de65cd94cad254f27a7ebffc1870f67 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CGheisMohammadi=E2=80=9D?= <36589218+GheisMohammadi@users.noreply.github.com> Date: Mon, 3 Jul 2023 15:16:56 +0800 Subject: [PATCH 058/128] refactor stage receipts --- api/service/stagedstreamsync/stage_receipts.go | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/api/service/stagedstreamsync/stage_receipts.go b/api/service/stagedstreamsync/stage_receipts.go index a9bffa30ff..71a46e0d27 100644 --- a/api/service/stagedstreamsync/stage_receipts.go +++ b/api/service/stagedstreamsync/stage_receipts.go @@ -110,7 +110,7 @@ func (b *StageReceipts) Exec(ctx context.Context, firstCycle bool, invalidBlockR for i := 0; i != s.state.config.Concurrency; i++ { wg.Add(1) - go b.runReceiptWorkerLoop(ctx, s.state.rdm, &wg, i, startTime) + go b.runReceiptWorkerLoop(ctx, s.state.rdm, &wg, i, s, startTime) } wg.Wait() @@ -125,7 +125,7 @@ func (b *StageReceipts) Exec(ctx context.Context, firstCycle bool, invalidBlockR } // runReceiptWorkerLoop creates a work loop for download receipts -func (b *StageReceipts) runReceiptWorkerLoop(ctx context.Context, rdm *receiptDownloadManager, wg *sync.WaitGroup, loopID int, startTime time.Time) { +func (b *StageReceipts) runReceiptWorkerLoop(ctx context.Context, rdm *receiptDownloadManager, wg *sync.WaitGroup, loopID int, s *StageState, startTime time.Time) { currentBlock := int(b.configs.bc.CurrentBlock().NumberU64()) @@ -148,8 +148,13 @@ func (b *StageReceipts) runReceiptWorkerLoop(ctx context.Context, rdm *receiptDo } var hashes []common.Hash for _, bn := range batch { + /* + // TODO: check if we can directly use bc rather than receipt hashes map header := b.configs.bc.GetHeaderByNumber(bn) hashes = append(hashes, header.ReceiptHash()) + */ + receiptHash := s.state.currentCycle.ReceiptHashes[bn] + hashes = append(hashes, receiptHash) } receipts, stid, err := b.downloadReceipts(ctx, hashes) if err != nil { From 591f223e84d40f3e0aa2be43d8f70b5caa6552bd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CGheisMohammadi=E2=80=9D?= <36589218+GheisMohammadi@users.noreply.github.com> Date: Mon, 3 Jul 2023 15:18:12 +0800 Subject: [PATCH 059/128] fix block insertion in main.go --- test/chain/main.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test/chain/main.go b/test/chain/main.go index 4b935292f0..d3f518dc20 100644 --- a/test/chain/main.go +++ b/test/chain/main.go @@ -134,7 +134,7 @@ func fundFaucetContract(chain core.BlockChain) { }() block, _ := contractworker. FinalizeNewBlock(commitSigs, func() uint64 { return 0 }, common.Address{}, nil, nil) - _, err = chain.InsertChain(types.Blocks{block}, true /* verifyHeaders */) + _, err = chain.InsertChain(types.Blocks{block}, true /* verifyHeaders */, true) if err != nil { fmt.Println(err) } @@ -184,7 +184,7 @@ func callFaucetContractToFundAnAddress(chain core.BlockChain) { block, _ := contractworker.FinalizeNewBlock( commitSigs, func() uint64 { return 0 }, common.Address{}, nil, nil, ) - _, err = chain.InsertChain(types.Blocks{block}, true /* verifyHeaders */) + _, err = chain.InsertChain(types.Blocks{block}, true /* verifyHeaders */, true) if err != nil { fmt.Println(err) } @@ -227,7 +227,7 @@ func main() { gen.SetShardID(0) gen.AddTx(pendingTxs[i].(*types.Transaction)) }) - if _, err := chain.InsertChain(blocks, true /* verifyHeaders */); err != nil { + if _, err := chain.InsertChain(blocks, true /* verifyHeaders */, true); err != nil { log.Fatal(err) } } From 7006e1568dca7128954cda88d4fc526f577b7704 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CGheisMohammadi=E2=80=9D?= <36589218+GheisMohammadi@users.noreply.github.com> Date: Mon, 3 Jul 2023 15:22:23 +0800 Subject: [PATCH 060/128] goimports staged sync files --- api/service/stagedstreamsync/stage_receipts.go | 6 +++--- api/service/stagedstreamsync/stage_state.go | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/api/service/stagedstreamsync/stage_receipts.go b/api/service/stagedstreamsync/stage_receipts.go index 71a46e0d27..692222c6cf 100644 --- a/api/service/stagedstreamsync/stage_receipts.go +++ b/api/service/stagedstreamsync/stage_receipts.go @@ -149,9 +149,9 @@ func (b *StageReceipts) runReceiptWorkerLoop(ctx context.Context, rdm *receiptDo var hashes []common.Hash for _, bn := range batch { /* - // TODO: check if we can directly use bc rather than receipt hashes map - header := b.configs.bc.GetHeaderByNumber(bn) - hashes = append(hashes, header.ReceiptHash()) + // TODO: check if we can directly use bc rather than receipt hashes map + header := b.configs.bc.GetHeaderByNumber(bn) + hashes = append(hashes, header.ReceiptHash()) */ receiptHash := s.state.currentCycle.ReceiptHashes[bn] hashes = append(hashes, receiptHash) diff --git a/api/service/stagedstreamsync/stage_state.go b/api/service/stagedstreamsync/stage_state.go index ea4775d1f1..e5cab702f2 100644 --- a/api/service/stagedstreamsync/stage_state.go +++ b/api/service/stagedstreamsync/stage_state.go @@ -176,7 +176,7 @@ func (stg *StageStates) Exec(ctx context.Context, firstCycle bool, invalidBlockR // TODO: only for fast sync // add receipt hash for next stage - s.state.currentCycle.ReceiptHashes[block.NumberU64()]=block.Header().ReceiptHash() + s.state.currentCycle.ReceiptHashes[block.NumberU64()] = block.Header().ReceiptHash() if invalidBlockRevert { if s.state.invalidBlock.Number == i { From 30de7c27e349595a74a9478dbd9c28f8b454d66a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CGheisMohammadi=E2=80=9D?= <36589218+GheisMohammadi@users.noreply.github.com> Date: Mon, 3 Jul 2023 15:50:36 +0800 Subject: [PATCH 061/128] refactor stages list initialization based on the sync mode --- .../stagedstreamsync/default_stages.go | 111 +++++++++++++----- .../stagedstreamsync/staged_stream_sync.go | 18 ++- 2 files changed, 97 insertions(+), 32 deletions(-) diff --git a/api/service/stagedstreamsync/default_stages.go b/api/service/stagedstreamsync/default_stages.go index e1bb8578db..3bebe8a00d 100644 --- a/api/service/stagedstreamsync/default_stages.go +++ b/api/service/stagedstreamsync/default_stages.go @@ -8,40 +8,91 @@ type ForwardOrder []SyncStageID type RevertOrder []SyncStageID type CleanUpOrder []SyncStageID -var DefaultForwardOrder = ForwardOrder{ - Heads, - SyncEpoch, - ShortRange, - BlockBodies, - States, - StateSync, - Receipts, - LastMile, - Finish, +var ( + StagesForwardOrder ForwardOrder + StagesRevertOrder RevertOrder + StagesCleanUpOrder CleanUpOrder +) + +func initStagesOrder(syncMode SyncMode) { + switch syncMode { + case FullSync: + initFullSyncStagesOrder() + case FastSync: + initFastSyncStagesOrder() + default: + panic("not supported sync mode") + } } -var DefaultRevertOrder = RevertOrder{ - Finish, - LastMile, - Receipts, - StateSync, - States, - BlockBodies, - ShortRange, - SyncEpoch, - Heads, +func initFullSyncStagesOrder() { + StagesForwardOrder = ForwardOrder{ + Heads, + SyncEpoch, + ShortRange, + BlockBodies, + States, + LastMile, + Finish, + } + + StagesRevertOrder = RevertOrder{ + Finish, + LastMile, + States, + BlockBodies, + ShortRange, + SyncEpoch, + Heads, + } + + StagesCleanUpOrder = CleanUpOrder{ + Finish, + LastMile, + States, + BlockBodies, + ShortRange, + SyncEpoch, + Heads, + } } -var DefaultCleanUpOrder = CleanUpOrder{ - Finish, - LastMile, - Receipts, - StateSync, - States, - BlockBodies, - ShortRange, - SyncEpoch, - Heads, +func initFastSyncStagesOrder() { + StagesForwardOrder = ForwardOrder{ + Heads, + SyncEpoch, + ShortRange, + BlockBodies, + States, + StateSync, + Receipts, + LastMile, + Finish, + } + + StagesRevertOrder = RevertOrder{ + Finish, + LastMile, + Receipts, + StateSync, + States, + BlockBodies, + ShortRange, + SyncEpoch, + Heads, + } + + StagesCleanUpOrder = CleanUpOrder{ + Finish, + LastMile, + Receipts, + StateSync, + States, + BlockBodies, + ShortRange, + SyncEpoch, + Heads, + } } func DefaultStages(ctx context.Context, diff --git a/api/service/stagedstreamsync/staged_stream_sync.go b/api/service/stagedstreamsync/staged_stream_sync.go index c5a201ef71..48a47f28d3 100644 --- a/api/service/stagedstreamsync/staged_stream_sync.go +++ b/api/service/stagedstreamsync/staged_stream_sync.go @@ -268,8 +268,21 @@ func New( logger zerolog.Logger, ) *StagedStreamSync { + // init stages order based on sync mode + initStagesOrder(config.SyncMode) + + forwardStages := make([]*Stage, len(stagesList)) + for i, stageIndex := range StagesForwardOrder { + for _, s := range stagesList { + if s.ID == stageIndex { + forwardStages[i] = s + break + } + } + } + revertStages := make([]*Stage, len(stagesList)) - for i, stageIndex := range DefaultRevertOrder { + for i, stageIndex := range StagesRevertOrder { for _, s := range stagesList { if s.ID == stageIndex { revertStages[i] = s @@ -277,8 +290,9 @@ func New( } } } + pruneStages := make([]*Stage, len(stagesList)) - for i, stageIndex := range DefaultCleanUpOrder { + for i, stageIndex := range StagesCleanUpOrder { for _, s := range stagesList { if s.ID == stageIndex { pruneStages[i] = s From f10dd1eec2dde9133b6e427ea35d48a42f94b1a0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CGheisMohammadi=E2=80=9D?= <36589218+GheisMohammadi@users.noreply.github.com> Date: Mon, 3 Jul 2023 20:08:42 +0800 Subject: [PATCH 062/128] add SyncMode to configs --- cmd/harmony/default.go | 5 +++++ cmd/harmony/main.go | 1 + internal/configs/harmony/harmony.go | 1 + 3 files changed, 7 insertions(+) diff --git a/cmd/harmony/default.go b/cmd/harmony/default.go index 986a2f7f66..86ed4226a5 100644 --- a/cmd/harmony/default.go +++ b/cmd/harmony/default.go @@ -192,6 +192,7 @@ var defaultStagedSyncConfig = harmonyconfig.StagedSyncConfig{ var ( defaultMainnetSyncConfig = harmonyconfig.SyncConfig{ Enabled: false, + SyncMode: 0, Downloader: false, StagedSync: false, StagedSyncCfg: defaultStagedSyncConfig, @@ -207,6 +208,7 @@ var ( defaultTestNetSyncConfig = harmonyconfig.SyncConfig{ Enabled: true, + SyncMode: 0, Downloader: false, StagedSync: false, StagedSyncCfg: defaultStagedSyncConfig, @@ -222,6 +224,7 @@ var ( defaultLocalNetSyncConfig = harmonyconfig.SyncConfig{ Enabled: true, + SyncMode: 0, Downloader: true, StagedSync: true, StagedSyncCfg: defaultStagedSyncConfig, @@ -237,6 +240,7 @@ var ( defaultPartnerSyncConfig = harmonyconfig.SyncConfig{ Enabled: true, + SyncMode: 0, Downloader: true, StagedSync: false, StagedSyncCfg: defaultStagedSyncConfig, @@ -252,6 +256,7 @@ var ( defaultElseSyncConfig = harmonyconfig.SyncConfig{ Enabled: true, + SyncMode: 0, Downloader: true, StagedSync: false, StagedSyncCfg: defaultStagedSyncConfig, diff --git a/cmd/harmony/main.go b/cmd/harmony/main.go index 549237d1ce..a29698f407 100644 --- a/cmd/harmony/main.go +++ b/cmd/harmony/main.go @@ -1005,6 +1005,7 @@ func setupStagedSyncService(node *node.Node, host p2p.Host, hc harmonyconfig.Har sConfig := stagedstreamsync.Config{ ServerOnly: !hc.Sync.Downloader, + SyncMode: stagedstreamsync.SyncMode(hc.Sync.SyncMode), Network: nodeconfig.NetworkType(hc.Network.NetworkType), Concurrency: hc.Sync.Concurrency, MinStreams: hc.Sync.MinPeers, diff --git a/internal/configs/harmony/harmony.go b/internal/configs/harmony/harmony.go index 2fcb200c42..7ff2501481 100644 --- a/internal/configs/harmony/harmony.go +++ b/internal/configs/harmony/harmony.go @@ -329,6 +329,7 @@ type PrometheusConfig struct { type SyncConfig struct { // TODO: Remove this bool after stream sync is fully up. Enabled bool // enable the stream sync protocol + SyncMode uint32 // sync mode (default:Full sync, 1: Fast Sync, 2: Snap Sync(not implemented yet)) Downloader bool // start the sync downloader client StagedSync bool // use staged sync StagedSyncCfg StagedSyncConfig // staged sync configurations From 498bcc0416ace1113486217f4fc85d6b124ac26e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CGheisMohammadi=E2=80=9D?= <36589218+GheisMohammadi@users.noreply.github.com> Date: Tue, 4 Jul 2023 15:01:32 +0800 Subject: [PATCH 063/128] fix state download manager failure message --- api/service/stagedstreamsync/state_download_manager.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/api/service/stagedstreamsync/state_download_manager.go b/api/service/stagedstreamsync/state_download_manager.go index 1cd414757b..80a7583881 100644 --- a/api/service/stagedstreamsync/state_download_manager.go +++ b/api/service/stagedstreamsync/state_download_manager.go @@ -274,7 +274,7 @@ func (s *StateDownloadManager) HandleRequestResult(codeHashes []common.Hash, tri } else if task.attempts[streamID] >= MaxTriesToFetchNodeData { // If we've requested the node too many times already, it may be a malicious // sync where nobody has the right data. Abort. - return fmt.Errorf("trie node %s failed with peer %s", task.hash.TerminalString(), task.attempts[streamID]) + return fmt.Errorf("trie node %s failed with peer %s (%d tries)", task.hash.TerminalString(), streamID, task.attempts[streamID]) } // Missing item, place into the retry queue. s.retries.addTrieTask(path, task) @@ -291,7 +291,7 @@ func (s *StateDownloadManager) HandleRequestResult(codeHashes []common.Hash, tri } else if task.attempts[streamID] >= MaxTriesToFetchNodeData { // If we've requested the node too many times already, it may be a malicious // sync where nobody has the right data. Abort. - return fmt.Errorf("byte code %s failed with peer %s", hash.TerminalString(), task.attempts[streamID]) + return fmt.Errorf("byte code %s failed with peer %s (%d tries)", hash.TerminalString(), streamID, task.attempts[streamID]) } // Missing item, place into the retry queue. s.retries.addCodeTask(hash, task) From 1f26944a33be0280ade1817c67396a310b0480f3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CGheisMohammadi=E2=80=9D?= <36589218+GheisMohammadi@users.noreply.github.com> Date: Wed, 5 Jul 2023 19:12:47 +0800 Subject: [PATCH 064/128] split verifyAndInsertBlock function to be able to reuse verification part --- api/service/stagedstreamsync/sig_verify.go | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/api/service/stagedstreamsync/sig_verify.go b/api/service/stagedstreamsync/sig_verify.go index dc0b4cf4c5..bc204fb1a3 100644 --- a/api/service/stagedstreamsync/sig_verify.go +++ b/api/service/stagedstreamsync/sig_verify.go @@ -29,7 +29,7 @@ func verifyAndInsertBlocks(bc blockChain, blocks types.Blocks, blockExecution bo return len(blocks), nil } -func verifyAndInsertBlock(bc blockChain, block *types.Block, blockExecution bool, nextBlocks ...*types.Block) error { +func verifyBlock(bc blockChain, block *types.Block, nextBlocks ...*types.Block) error { var ( sigBytes bls.SerializedSignature bitmap []byte @@ -61,7 +61,18 @@ func verifyAndInsertBlock(bc blockChain, block *types.Block, blockExecution bool case err != nil: return errors.Wrap(err, "[InsertChain]") default: + } + return nil +} +func verifyAndInsertBlock(bc blockChain, block *types.Block, blockExecution bool, nextBlocks ...*types.Block) error { + //verify block + if err := verifyBlock(bc, block, nextBlocks...); err != nil { + return err + } + // insert block + if _, err := bc.InsertChain(types.Blocks{block}, false, blockExecution); err != nil { + return errors.Wrap(err, "[InsertChain]") } return nil } From 7c3807a525882354b7f777b9497f182a6ccc0c09 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CGheisMohammadi=E2=80=9D?= <36589218+GheisMohammadi@users.noreply.github.com> Date: Wed, 5 Jul 2023 19:14:13 +0800 Subject: [PATCH 065/128] refactor stage bodies to extract receip hashes in this stage rather than stage state --- .../stagedstreamsync/default_stages.go | 9 ++-- api/service/stagedstreamsync/stage_bodies.go | 46 +++++++++++++++++-- api/service/stagedstreamsync/syncing.go | 8 ++-- 3 files changed, 50 insertions(+), 13 deletions(-) diff --git a/api/service/stagedstreamsync/default_stages.go b/api/service/stagedstreamsync/default_stages.go index 3bebe8a00d..60e9f49620 100644 --- a/api/service/stagedstreamsync/default_stages.go +++ b/api/service/stagedstreamsync/default_stages.go @@ -63,9 +63,8 @@ func initFastSyncStagesOrder() { SyncEpoch, ShortRange, BlockBodies, - States, - StateSync, Receipts, + StateSync, LastMile, Finish, } @@ -73,9 +72,8 @@ func initFastSyncStagesOrder() { StagesRevertOrder = RevertOrder{ Finish, LastMile, - Receipts, StateSync, - States, + Receipts, BlockBodies, ShortRange, SyncEpoch, @@ -85,9 +83,8 @@ func initFastSyncStagesOrder() { StagesCleanUpOrder = CleanUpOrder{ Finish, LastMile, - Receipts, StateSync, - States, + Receipts, BlockBodies, ShortRange, SyncEpoch, diff --git a/api/service/stagedstreamsync/stage_bodies.go b/api/service/stagedstreamsync/stage_bodies.go index b5d92e3a1a..401a8bc6c7 100644 --- a/api/service/stagedstreamsync/stage_bodies.go +++ b/api/service/stagedstreamsync/stage_bodies.go @@ -6,6 +6,7 @@ import ( "sync" "time" + "github.com/ethereum/go-ethereum/rlp" "github.com/harmony-one/harmony/core" "github.com/harmony-one/harmony/core/types" "github.com/harmony-one/harmony/internal/utils" @@ -26,6 +27,7 @@ type StageBodiesCfg struct { concurrency int protocol syncProtocol isBeacon bool + extractReceiptHashes bool logProgress bool } @@ -35,7 +37,7 @@ func NewStageBodies(cfg StageBodiesCfg) *StageBodies { } } -func NewStageBodiesCfg(bc core.BlockChain, db kv.RwDB, blockDBs []kv.RwDB, concurrency int, protocol syncProtocol, isBeacon bool, logProgress bool) StageBodiesCfg { +func NewStageBodiesCfg(bc core.BlockChain, db kv.RwDB, blockDBs []kv.RwDB, concurrency int, protocol syncProtocol, isBeacon bool, extractReceiptHashes bool, logProgress bool) StageBodiesCfg { return StageBodiesCfg{ bc: bc, db: db, @@ -43,6 +45,7 @@ func NewStageBodiesCfg(bc core.BlockChain, db kv.RwDB, blockDBs []kv.RwDB, concu concurrency: concurrency, protocol: protocol, isBeacon: isBeacon, + extractReceiptHashes: extractReceiptHashes, logProgress: logProgress, } } @@ -118,7 +121,7 @@ func (b *StageBodies) Exec(ctx context.Context, firstCycle bool, invalidBlockRev for i := 0; i != s.state.config.Concurrency; i++ { wg.Add(1) - go b.runBlockWorkerLoop(ctx, s.state.gbm, &wg, i, startTime) + go b.runBlockWorkerLoop(ctx, s.state.gbm, &wg, i, s, startTime) } wg.Wait() @@ -133,7 +136,7 @@ func (b *StageBodies) Exec(ctx context.Context, firstCycle bool, invalidBlockRev } // runBlockWorkerLoop creates a work loop for download blocks -func (b *StageBodies) runBlockWorkerLoop(ctx context.Context, gbm *blockDownloadManager, wg *sync.WaitGroup, loopID int, startTime time.Time) { +func (b *StageBodies) runBlockWorkerLoop(ctx context.Context, gbm *blockDownloadManager, wg *sync.WaitGroup, loopID int, s *StageState, startTime time.Time) { currentBlock := int(b.configs.bc.CurrentBlock().NumberU64()) @@ -184,6 +187,12 @@ func (b *StageBodies) runBlockWorkerLoop(ctx context.Context, gbm *blockDownload gbm.HandleRequestError(batch, err, stid) b.configs.protocol.RemoveStream(stid) } else { + if b.configs.extractReceiptHashes { + if err = b.verifyBlockAndExtractReceiptsData(blockBytes, sigBytes, s); err != nil { + gbm.HandleRequestError(batch, err, stid) + continue + } + } if err = b.saveBlocks(ctx, gbm.tx, batch, blockBytes, sigBytes, loopID, stid); err != nil { panic(ErrSaveBlocksToDbFailed) } @@ -204,6 +213,37 @@ func (b *StageBodies) runBlockWorkerLoop(ctx context.Context, gbm *blockDownload } } +func (b *StageBodies) verifyBlockAndExtractReceiptsData(batchBlockBytes [][]byte, batchSigBytes [][]byte, s *StageState) error { + var block *types.Block + for i := uint64(0); i < uint64(len(batchBlockBytes)); i++ { + blockBytes := batchBlockBytes[i] + sigBytes := batchSigBytes[i] + if blockBytes == nil { + continue + } + if err := rlp.DecodeBytes(blockBytes, &block); err != nil { + utils.Logger().Error(). + Uint64("block number", i). + Msg("block size invalid") + return ErrInvalidBlockBytes + } + if sigBytes != nil { + block.SetCurrentCommitSig(sigBytes) + } + + if block.NumberU64() != i { + return ErrInvalidBlockNumber + } + + if err := verifyBlock(b.configs.bc, block); err != nil { + return err + } + // add receipt hash for next stage + s.state.currentCycle.ReceiptHashes[block.NumberU64()] = block.Header().ReceiptHash() + } + return nil +} + // redownloadBadBlock tries to redownload the bad block from other streams func (b *StageBodies) redownloadBadBlock(ctx context.Context, s *StageState) error { diff --git a/api/service/stagedstreamsync/syncing.go b/api/service/stagedstreamsync/syncing.go index c741151cd3..7abf00e7a5 100644 --- a/api/service/stagedstreamsync/syncing.go +++ b/api/service/stagedstreamsync/syncing.go @@ -81,14 +81,14 @@ func CreateStagedSync(ctx context.Context, return nil, errInitDB } - fastSync := config.SyncMode == FastSync + blockExecution := config.SyncMode == FullSync + extractReceiptHashes := config.SyncMode == FastSync || config.SyncMode == SnapSync stageHeadsCfg := NewStageHeadersCfg(bc, mainDB) stageShortRangeCfg := NewStageShortRangeCfg(bc, mainDB) stageSyncEpochCfg := NewStageEpochCfg(bc, mainDB) - - stageBodiesCfg := NewStageBodiesCfg(bc, mainDB, dbs, config.Concurrency, protocol, isBeaconNode, config.LogProgress) - stageStatesCfg := NewStageStatesCfg(bc, mainDB, dbs, config.Concurrency, !fastSync, logger, config.LogProgress) + stageBodiesCfg := NewStageBodiesCfg(bc, mainDB, dbs, config.Concurrency, protocol, isBeaconNode, extractReceiptHashes, config.LogProgress) + stageStatesCfg := NewStageStatesCfg(bc, mainDB, dbs, config.Concurrency, blockExecution, logger, config.LogProgress) stageStateSyncCfg := NewStageStateSyncCfg(bc, mainDB, config.Concurrency, protocol, logger, config.LogProgress) stageReceiptsCfg := NewStageReceiptsCfg(bc, mainDB, dbs, config.Concurrency, protocol, isBeaconNode, config.LogProgress) lastMileCfg := NewStageLastMileCfg(ctx, bc, mainDB) From d4c85772e268445fd107e9e64509d89e12c8023b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CGheisMohammadi=E2=80=9D?= <36589218+GheisMohammadi@users.noreply.github.com> Date: Wed, 5 Jul 2023 19:18:35 +0800 Subject: [PATCH 066/128] goimports --- api/service/stagedstreamsync/stage_bodies.go | 28 ++++++++++---------- core/blockchain_impl.go | 2 +- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/api/service/stagedstreamsync/stage_bodies.go b/api/service/stagedstreamsync/stage_bodies.go index 401a8bc6c7..6b2fb65edf 100644 --- a/api/service/stagedstreamsync/stage_bodies.go +++ b/api/service/stagedstreamsync/stage_bodies.go @@ -21,14 +21,14 @@ type StageBodies struct { } type StageBodiesCfg struct { - bc core.BlockChain - db kv.RwDB - blockDBs []kv.RwDB - concurrency int - protocol syncProtocol - isBeacon bool + bc core.BlockChain + db kv.RwDB + blockDBs []kv.RwDB + concurrency int + protocol syncProtocol + isBeacon bool extractReceiptHashes bool - logProgress bool + logProgress bool } func NewStageBodies(cfg StageBodiesCfg) *StageBodies { @@ -39,14 +39,14 @@ func NewStageBodies(cfg StageBodiesCfg) *StageBodies { func NewStageBodiesCfg(bc core.BlockChain, db kv.RwDB, blockDBs []kv.RwDB, concurrency int, protocol syncProtocol, isBeacon bool, extractReceiptHashes bool, logProgress bool) StageBodiesCfg { return StageBodiesCfg{ - bc: bc, - db: db, - blockDBs: blockDBs, - concurrency: concurrency, - protocol: protocol, - isBeacon: isBeacon, + bc: bc, + db: db, + blockDBs: blockDBs, + concurrency: concurrency, + protocol: protocol, + isBeacon: isBeacon, extractReceiptHashes: extractReceiptHashes, - logProgress: logProgress, + logProgress: logProgress, } } diff --git a/core/blockchain_impl.go b/core/blockchain_impl.go index 8d9de8dbc3..efb26df072 100644 --- a/core/blockchain_impl.go +++ b/core/blockchain_impl.go @@ -1659,7 +1659,7 @@ func (bc *BlockChainImpl) insertChainWithBlockExecution(chain types.Blocks, veri return 0, events, coalescedLogs, nil } -//receiptChain []types.Receipts, +// insertChainWithoutBlockExecution adds a set of blocks to blockchain without adding states func (bc *BlockChainImpl) insertChainWithoutBlockExecution(chain types.Blocks, verifyHeaders bool) (int, []interface{}, []*types.Log, error) { // Sanity check that we have something meaningful to import if len(chain) == 0 { From 8f818100a77be15ba97039836d5e9b881a9da503 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CGheisMohammadi=E2=80=9D?= <36589218+GheisMohammadi@users.noreply.github.com> Date: Wed, 5 Jul 2023 19:31:25 +0800 Subject: [PATCH 067/128] add InsertReceiptChain to blockchain interface --- core/blockchain.go | 3 + core/blockchain_impl.go | 165 ++++++++++++++++++++++++++++++++++++++++ core/blockchain_stub.go | 4 + 3 files changed, 172 insertions(+) diff --git a/core/blockchain.go b/core/blockchain.go index a286af1171..c66f26ed38 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -121,6 +121,9 @@ type BlockChain interface { // // After insertion is done, all accumulated events will be fired. InsertChain(chain types.Blocks, verifyHeaders bool, blockExecution bool) (int, error) + // InsertReceiptChain attempts to complete an already existing header chain with + // transaction and receipt data. + InsertReceiptChain(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) // LeaderRotationMeta returns the number of continuous blocks by the leader. LeaderRotationMeta() (publicKeyBytes []byte, epoch, count, shifts uint64, err error) // BadBlocks returns a list of the last 'bad blocks' that diff --git a/core/blockchain_impl.go b/core/blockchain_impl.go index efb26df072..c91c322b83 100644 --- a/core/blockchain_impl.go +++ b/core/blockchain_impl.go @@ -1192,6 +1192,171 @@ func (bc *BlockChainImpl) Rollback(chain []common.Hash) error { return bc.removeInValidatorList(valsToRemove) } +// SetReceiptsData computes all the non-consensus fields of the receipts +func SetReceiptsData(config *params.ChainConfig, block *types.Block, receipts types.Receipts) error { + signer := types.MakeSigner(config, block.Epoch()) + ethSigner := types.NewEIP155Signer(config.EthCompatibleChainID) + + transactions, stakingTransactions, logIndex := block.Transactions(), block.StakingTransactions(), uint(0) + if len(transactions)+len(stakingTransactions) != len(receipts) { + return errors.New("transaction+stakingTransactions and receipt count mismatch") + } + + // The used gas can be calculated based on previous receipts + if len(receipts) > 0 && len(transactions) > 0 { + receipts[0].GasUsed = receipts[0].CumulativeGasUsed + } + for j := 1; j < len(transactions); j++ { + // The transaction hash can be retrieved from the transaction itself + receipts[j].TxHash = transactions[j].Hash() + receipts[j].GasUsed = receipts[j].CumulativeGasUsed - receipts[j-1].CumulativeGasUsed + // The contract address can be derived from the transaction itself + if transactions[j].To() == nil { + // Deriving the signer is expensive, only do if it's actually needed + var from common.Address + if transactions[j].IsEthCompatible() { + from, _ = types.Sender(ethSigner, transactions[j]) + } else { + from, _ = types.Sender(signer, transactions[j]) + } + receipts[j].ContractAddress = crypto.CreateAddress(from, transactions[j].Nonce()) + } + // The derived log fields can simply be set from the block and transaction + for k := 0; k < len(receipts[j].Logs); k++ { + receipts[j].Logs[k].BlockNumber = block.NumberU64() + receipts[j].Logs[k].BlockHash = block.Hash() + receipts[j].Logs[k].TxHash = receipts[j].TxHash + receipts[j].Logs[k].TxIndex = uint(j) + receipts[j].Logs[k].Index = logIndex + logIndex++ + } + } + + // The used gas can be calculated based on previous receipts + if len(receipts) > len(transactions) && len(stakingTransactions) > 0 { + receipts[len(transactions)].GasUsed = receipts[len(transactions)].CumulativeGasUsed + } + // in a block, txns are processed before staking txns + for j := len(transactions) + 1; j < len(transactions)+len(stakingTransactions); j++ { + // The transaction hash can be retrieved from the staking transaction itself + receipts[j].TxHash = stakingTransactions[j].Hash() + receipts[j].GasUsed = receipts[j].CumulativeGasUsed - receipts[j-1].CumulativeGasUsed + // The derived log fields can simply be set from the block and transaction + for k := 0; k < len(receipts[j].Logs); k++ { + receipts[j].Logs[k].BlockNumber = block.NumberU64() + receipts[j].Logs[k].BlockHash = block.Hash() + receipts[j].Logs[k].TxHash = receipts[j].TxHash + receipts[j].Logs[k].TxIndex = uint(j) + uint(len(transactions)) + receipts[j].Logs[k].Index = logIndex + logIndex++ + } + } + return nil +} + +// InsertReceiptChain attempts to complete an already existing header chain with +// transaction and receipt data. +func (bc *BlockChainImpl) InsertReceiptChain(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) { + // Do a sanity check that the provided chain is actually ordered and linked + for i := 1; i < len(blockChain); i++ { + if blockChain[i].NumberU64() != blockChain[i-1].NumberU64()+1 || blockChain[i].ParentHash() != blockChain[i-1].Hash() { + utils.Logger().Error(). + Str("number", blockChain[i].Number().String()). + Str("hash", blockChain[i].Hash().Hex()). + Str("parent", blockChain[i].ParentHash().Hex()). + Str("prevnumber", blockChain[i-1].Number().String()). + Str("prevhash", blockChain[i-1].Hash().Hex()). + Msg("Non contiguous receipt insert") + return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, blockChain[i-1].NumberU64(), + blockChain[i-1].Hash().Bytes()[:4], i, blockChain[i].NumberU64(), blockChain[i].Hash().Bytes()[:4], blockChain[i].ParentHash().Bytes()[:4]) + } + } + + bc.chainmu.Lock() + defer bc.chainmu.Unlock() + + var ( + stats = struct{ processed, ignored int32 }{} + start = time.Now() + bytes = 0 + batch = bc.db.NewBatch() + ) + for i, block := range blockChain { + receipts := receiptChain[i] + // Short circuit insertion if shutting down or processing failed + if atomic.LoadInt32(&bc.procInterrupt) == 1 { + return 0, nil + } + // Short circuit if the owner header is unknown + if !bc.HasHeader(block.Hash(), block.NumberU64()) { + return 0, fmt.Errorf("containing header #%d [%x…] unknown", block.Number(), block.Hash().Bytes()[:4]) + } + // Skip if the entire data is already known + if bc.HasBlock(block.Hash(), block.NumberU64()) { + stats.ignored++ + continue + } + // Compute all the non-consensus fields of the receipts + if err := SetReceiptsData(bc.chainConfig, block, receipts); err != nil { + return 0, fmt.Errorf("failed to set receipts data: %v", err) + } + // Write all the data out into the database + if err := rawdb.WriteBody(batch, block.Hash(), block.NumberU64(), block.Body()); err != nil { + return 0, err + } + if err := rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receipts); err != nil { + return 0, err + } + if err := rawdb.WriteBlockTxLookUpEntries(batch, block); err != nil { + return 0, err + } + if err := rawdb.WriteBlockStxLookUpEntries(batch, block); err != nil { + return 0, err + } + + stats.processed++ + + if batch.ValueSize() >= ethdb.IdealBatchSize { + if err := batch.Write(); err != nil { + return 0, err + } + bytes += batch.ValueSize() + batch.Reset() + } + } + if batch.ValueSize() > 0 { + bytes += batch.ValueSize() + if err := batch.Write(); err != nil { + return 0, err + } + } + + // Update the head fast sync block if better + bc.mu.Lock() + head := blockChain[len(blockChain)-1] + if td := bc.GetTd(head.Hash(), head.NumberU64()); td != nil { // Rewind may have occurred, skip in that case + currentFastBlock := bc.CurrentFastBlock() + if bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64()).Cmp(td) < 0 { + rawdb.WriteHeadFastBlockHash(bc.db, head.Hash()) + bc.currentFastBlock.Store(head) + headFastBlockGauge.Update(int64(head.NumberU64())) + } + } + bc.mu.Unlock() + + utils.Logger().Info(). + Int32("count", stats.processed). + Str("elapsed", common.PrettyDuration(time.Since(start)).String()). + Str("age", common.PrettyAge(time.Unix(head.Time().Int64(), 0)).String()). + Str("head", head.Number().String()). + Str("hash", head.Hash().Hex()). + Str("size", common.StorageSize(bytes).String()). + Int32("ignored", stats.ignored). + Msg("Imported new block receipts") + + return 0, nil +} + var lastWrite uint64 func (bc *BlockChainImpl) WriteBlockWithoutState(block *types.Block) (err error) { diff --git a/core/blockchain_stub.go b/core/blockchain_stub.go index 32a0b1c194..a1eb92a05c 100644 --- a/core/blockchain_stub.go +++ b/core/blockchain_stub.go @@ -136,6 +136,10 @@ func (a Stub) InsertChain(chain types.Blocks, verifyHeaders bool, blockExecution return 0, errors.Errorf("method InsertChain not implemented for %s", a.Name) } +func (a Stub) InsertReceiptChain(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) { + return 0, errors.Errorf("method InsertReceiptChain not implemented for %s", a.Name) +} + func (a Stub) BadBlocks() []BadBlock { return nil } From 57a77ab0f1c4c3cb0d0bd86b5167f4b4eaff3f1f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CGheisMohammadi=E2=80=9D?= <36589218+GheisMohammadi@users.noreply.github.com> Date: Thu, 6 Jul 2023 23:17:15 +0800 Subject: [PATCH 068/128] refactor get receipts stage to use insertReceiptsChain --- .../receipt_download_manager.go | 2 +- .../stagedstreamsync/stage_receipts.go | 85 +++++++++++++++---- api/service/stagedstreamsync/stage_state.go | 4 - 3 files changed, 68 insertions(+), 23 deletions(-) diff --git a/api/service/stagedstreamsync/receipt_download_manager.go b/api/service/stagedstreamsync/receipt_download_manager.go index 2eaa3ca452..2bab10ade9 100644 --- a/api/service/stagedstreamsync/receipt_download_manager.go +++ b/api/service/stagedstreamsync/receipt_download_manager.go @@ -77,7 +77,7 @@ func (rdm *receiptDownloadManager) HandleRequestError(bns []uint64, err error, s } // HandleRequestResult handles get receipts result -func (rdm *receiptDownloadManager) HandleRequestResult(bns []uint64, receipts []*types.Receipt, loopID int, streamID sttypes.StreamID) error { +func (rdm *receiptDownloadManager) HandleRequestResult(bns []uint64, receipts []types.Receipts, loopID int, streamID sttypes.StreamID) error { rdm.lock.Lock() defer rdm.lock.Unlock() diff --git a/api/service/stagedstreamsync/stage_receipts.go b/api/service/stagedstreamsync/stage_receipts.go index 692222c6cf..5faa553f84 100644 --- a/api/service/stagedstreamsync/stage_receipts.go +++ b/api/service/stagedstreamsync/stage_receipts.go @@ -7,6 +7,7 @@ import ( "time" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/rlp" "github.com/harmony-one/harmony/core" "github.com/harmony-one/harmony/core/types" "github.com/harmony-one/harmony/internal/utils" @@ -89,6 +90,22 @@ func (b *StageReceipts) Exec(ctx context.Context, firstCycle bool, invalidBlockR // size := uint64(0) startTime := time.Now() // startBlock := currProgress + + // prepare db transactions + txs := make([]kv.RwTx, b.configs.concurrency) + for i := 0; i < b.configs.concurrency; i++ { + txs[i], err = b.configs.blockDBs[i].BeginRw(ctx) + if err != nil { + return err + } + } + + defer func() { + for i := 0; i < b.configs.concurrency; i++ { + txs[i].Rollback() + } + }() + if b.configs.logProgress { fmt.Print("\033[s") // save the cursor position } @@ -110,7 +127,7 @@ func (b *StageReceipts) Exec(ctx context.Context, firstCycle bool, invalidBlockR for i := 0; i != s.state.config.Concurrency; i++ { wg.Add(1) - go b.runReceiptWorkerLoop(ctx, s.state.rdm, &wg, i, s, startTime) + go b.runReceiptWorkerLoop(ctx, s.state.rdm, &wg, i, s, txs, startTime) } wg.Wait() @@ -125,9 +142,10 @@ func (b *StageReceipts) Exec(ctx context.Context, firstCycle bool, invalidBlockR } // runReceiptWorkerLoop creates a work loop for download receipts -func (b *StageReceipts) runReceiptWorkerLoop(ctx context.Context, rdm *receiptDownloadManager, wg *sync.WaitGroup, loopID int, s *StageState, startTime time.Time) { +func (b *StageReceipts) runReceiptWorkerLoop(ctx context.Context, rdm *receiptDownloadManager, wg *sync.WaitGroup, loopID int, s *StageState, txs []kv.RwTx, startTime time.Time) { currentBlock := int(b.configs.bc.CurrentBlock().NumberU64()) + gbm := s.state.gbm defer wg.Done() @@ -137,6 +155,7 @@ func (b *StageReceipts) runReceiptWorkerLoop(ctx context.Context, rdm *receiptDo return default: } + // get next batch of block numbers batch := rdm.GetNextBatch() if len(batch) == 0 { select { @@ -146,16 +165,43 @@ func (b *StageReceipts) runReceiptWorkerLoop(ctx context.Context, rdm *receiptDo return } } + // retrieve corresponding blocks from cache db var hashes []common.Hash + var blocks []*types.Block for _, bn := range batch { - /* - // TODO: check if we can directly use bc rather than receipt hashes map - header := b.configs.bc.GetHeaderByNumber(bn) - hashes = append(hashes, header.ReceiptHash()) - */ - receiptHash := s.state.currentCycle.ReceiptHashes[bn] - hashes = append(hashes, receiptHash) + blkKey := marshalData(bn) + loopID, _ := gbm.GetDownloadDetails(bn) + blockBytes, err := txs[loopID].GetOne(BlocksBucket, blkKey) + if err != nil { + return + } + sigBytes, err := txs[loopID].GetOne(BlockSignaturesBucket, blkKey) + if err != nil { + return + } + sz := len(blockBytes) + if sz <= 1 { + return + } + var block *types.Block + if err := rlp.DecodeBytes(blockBytes, &block); err != nil { + return + } + if sigBytes != nil { + block.SetCurrentCommitSig(sigBytes) + } + if block.NumberU64() != bn { + return + } + if block.Header().ReceiptHash() == emptyHash { + return + } + // receiptHash := s.state.currentCycle.ReceiptHashes[bn] + hashes = append(hashes, block.Header().ReceiptHash()) + blocks = append(blocks, block) } + + // download receipts receipts, stid, err := b.downloadReceipts(ctx, hashes) if err != nil { if !errors.Is(err, context.Canceled) { @@ -176,7 +222,17 @@ func (b *StageReceipts) runReceiptWorkerLoop(ctx context.Context, rdm *receiptDo err := errors.New("downloadRawBlocks received empty reciptBytes") rdm.HandleRequestError(batch, err, stid) } else { + // insert block and receipts to chain + if inserted, err := b.configs.bc.InsertReceiptChain(blocks, receipts); err != nil { + + } else { + if inserted != len(blocks) { + + } + } + rdm.HandleRequestResult(batch, receipts, loopID, stid) + if b.configs.logProgress { //calculating block download speed dt := time.Now().Sub(startTime).Seconds() @@ -193,7 +249,7 @@ func (b *StageReceipts) runReceiptWorkerLoop(ctx context.Context, rdm *receiptDo } } -func (b *StageReceipts) downloadReceipts(ctx context.Context, hs []common.Hash) ([]*types.Receipt, sttypes.StreamID, error) { +func (b *StageReceipts) downloadReceipts(ctx context.Context, hs []common.Hash) ([]types.Receipts, sttypes.StreamID, error) { ctx, cancel := context.WithTimeout(ctx, 10*time.Second) defer cancel() @@ -207,14 +263,7 @@ func (b *StageReceipts) downloadReceipts(ctx context.Context, hs []common.Hash) return receipts, stid, nil } -func (b *StageReceipts) downloadRawBlocks(ctx context.Context, bns []uint64) ([][]byte, [][]byte, sttypes.StreamID, error) { - ctx, cancel := context.WithTimeout(ctx, 10*time.Second) - defer cancel() - - return b.configs.protocol.GetRawBlocksByNumber(ctx, bns) -} - -func validateGetReceiptsResult(requested []common.Hash, result []*types.Receipt) error { +func validateGetReceiptsResult(requested []common.Hash, result []types.Receipts) error { // TODO: validate each receipt here return nil diff --git a/api/service/stagedstreamsync/stage_state.go b/api/service/stagedstreamsync/stage_state.go index e5cab702f2..a5297b9031 100644 --- a/api/service/stagedstreamsync/stage_state.go +++ b/api/service/stagedstreamsync/stage_state.go @@ -174,10 +174,6 @@ func (stg *StageStates) Exec(ctx context.Context, firstCycle bool, invalidBlockR return err } - // TODO: only for fast sync - // add receipt hash for next stage - s.state.currentCycle.ReceiptHashes[block.NumberU64()] = block.Header().ReceiptHash() - if invalidBlockRevert { if s.state.invalidBlock.Number == i { s.state.invalidBlock.resolve() From bcf1b770a1ede44e3e66212c7bacefa0e592d67c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CGheisMohammadi=E2=80=9D?= <36589218+GheisMohammadi@users.noreply.github.com> Date: Fri, 7 Jul 2023 00:24:33 +0800 Subject: [PATCH 069/128] remove using currentCycle, cleanup stage receipts --- api/service/stagedstreamsync/stage_bodies.go | 2 - .../stagedstreamsync/stage_receipts.go | 137 +++++++----------- api/service/stagedstreamsync/stage_state.go | 2 - .../stagedstreamsync/staged_stream_sync.go | 1 - 4 files changed, 51 insertions(+), 91 deletions(-) diff --git a/api/service/stagedstreamsync/stage_bodies.go b/api/service/stagedstreamsync/stage_bodies.go index 6b2fb65edf..d2ed95c9d1 100644 --- a/api/service/stagedstreamsync/stage_bodies.go +++ b/api/service/stagedstreamsync/stage_bodies.go @@ -238,8 +238,6 @@ func (b *StageBodies) verifyBlockAndExtractReceiptsData(batchBlockBytes [][]byte if err := verifyBlock(b.configs.bc, block); err != nil { return err } - // add receipt hash for next stage - s.state.currentCycle.ReceiptHashes[block.NumberU64()] = block.Header().ReceiptHash() } return nil } diff --git a/api/service/stagedstreamsync/stage_receipts.go b/api/service/stagedstreamsync/stage_receipts.go index 5faa553f84..fb0af99bc0 100644 --- a/api/service/stagedstreamsync/stage_receipts.go +++ b/api/service/stagedstreamsync/stage_receipts.go @@ -48,8 +48,8 @@ func NewStageReceiptsCfg(bc core.BlockChain, db kv.RwDB, blockDBs []kv.RwDB, con } } -// Exec progresses Bodies stage in the forward direction -func (b *StageReceipts) Exec(ctx context.Context, firstCycle bool, invalidBlockRevert bool, s *StageState, reverter Reverter, tx kv.RwTx) (err error) { +// Exec progresses receipts stage in the forward direction +func (r *StageReceipts) Exec(ctx context.Context, firstCycle bool, invalidBlockRevert bool, s *StageState, reverter Reverter, tx kv.RwTx) (err error) { useInternalTx := tx == nil @@ -63,14 +63,14 @@ func (b *StageReceipts) Exec(ctx context.Context, firstCycle bool, invalidBlockR } maxHeight := s.state.status.targetBN - currentHead := b.configs.bc.CurrentBlock().NumberU64() + currentHead := r.configs.bc.CurrentBlock().NumberU64() if currentHead >= maxHeight { return nil } currProgress := uint64(0) targetHeight := s.state.currentCycle.TargetHeight - if errV := CreateView(ctx, b.configs.db, tx, func(etx kv.Tx) error { + if errV := CreateView(ctx, r.configs.db, tx, func(etx kv.Tx) error { if currProgress, err = s.CurrentStageProgress(etx); err != nil { return err } @@ -92,27 +92,27 @@ func (b *StageReceipts) Exec(ctx context.Context, firstCycle bool, invalidBlockR // startBlock := currProgress // prepare db transactions - txs := make([]kv.RwTx, b.configs.concurrency) - for i := 0; i < b.configs.concurrency; i++ { - txs[i], err = b.configs.blockDBs[i].BeginRw(ctx) + txs := make([]kv.RwTx, r.configs.concurrency) + for i := 0; i < r.configs.concurrency; i++ { + txs[i], err = r.configs.blockDBs[i].BeginRw(ctx) if err != nil { return err } } defer func() { - for i := 0; i < b.configs.concurrency; i++ { + for i := 0; i < r.configs.concurrency; i++ { txs[i].Rollback() } }() - if b.configs.logProgress { + if r.configs.logProgress { fmt.Print("\033[s") // save the cursor position } if useInternalTx { var err error - tx, err = b.configs.db.BeginRw(ctx) + tx, err = r.configs.db.BeginRw(ctx) if err != nil { return err } @@ -120,14 +120,14 @@ func (b *StageReceipts) Exec(ctx context.Context, firstCycle bool, invalidBlockR } // Fetch blocks from neighbors - s.state.rdm = newReceiptDownloadManager(tx, b.configs.bc, targetHeight, s.state.logger) + s.state.rdm = newReceiptDownloadManager(tx, r.configs.bc, targetHeight, s.state.logger) // Setup workers to fetch blocks from remote node var wg sync.WaitGroup for i := 0; i != s.state.config.Concurrency; i++ { wg.Add(1) - go b.runReceiptWorkerLoop(ctx, s.state.rdm, &wg, i, s, txs, startTime) + go r.runReceiptWorkerLoop(ctx, s.state.rdm, &wg, i, s, txs, startTime) } wg.Wait() @@ -142,9 +142,9 @@ func (b *StageReceipts) Exec(ctx context.Context, firstCycle bool, invalidBlockR } // runReceiptWorkerLoop creates a work loop for download receipts -func (b *StageReceipts) runReceiptWorkerLoop(ctx context.Context, rdm *receiptDownloadManager, wg *sync.WaitGroup, loopID int, s *StageState, txs []kv.RwTx, startTime time.Time) { +func (r *StageReceipts) runReceiptWorkerLoop(ctx context.Context, rdm *receiptDownloadManager, wg *sync.WaitGroup, loopID int, s *StageState, txs []kv.RwTx, startTime time.Time) { - currentBlock := int(b.configs.bc.CurrentBlock().NumberU64()) + currentBlock := int(r.configs.bc.CurrentBlock().NumberU64()) gbm := s.state.gbm defer wg.Done() @@ -202,10 +202,10 @@ func (b *StageReceipts) runReceiptWorkerLoop(ctx context.Context, rdm *receiptDo } // download receipts - receipts, stid, err := b.downloadReceipts(ctx, hashes) + receipts, stid, err := r.downloadReceipts(ctx, hashes) if err != nil { if !errors.Is(err, context.Canceled) { - b.configs.protocol.StreamFailed(stid, "downloadRawBlocks failed") + r.configs.protocol.StreamFailed(stid, "downloadRawBlocks failed") } utils.Logger().Error(). Err(err). @@ -223,37 +223,46 @@ func (b *StageReceipts) runReceiptWorkerLoop(ctx context.Context, rdm *receiptDo rdm.HandleRequestError(batch, err, stid) } else { // insert block and receipts to chain - if inserted, err := b.configs.bc.InsertReceiptChain(blocks, receipts); err != nil { - + if inserted, err := r.configs.bc.InsertReceiptChain(blocks, receipts); err != nil { + utils.Logger().Err(err). + Str("stream", string(stid)). + Interface("block numbers", batch). + Msg(WrapStagedSyncMsg("InsertReceiptChain failed")) + err := errors.New("InsertReceiptChain failed") + rdm.HandleRequestError(batch, err, stid) } else { if inserted != len(blocks) { - + utils.Logger().Warn(). + Interface("block numbers", batch). + Int("inserted", inserted). + Int("blocks to insert", len(blocks)). + Msg(WrapStagedSyncMsg("InsertReceiptChain couldn't insert all downloaded blocks/receipts")) } } - + // handle request result rdm.HandleRequestResult(batch, receipts, loopID, stid) - - if b.configs.logProgress { + // log progress + if r.configs.logProgress { //calculating block download speed dt := time.Now().Sub(startTime).Seconds() speed := float64(0) if dt > 0 { speed = float64(len(rdm.rdd)) / dt } - blockSpeed := fmt.Sprintf("%.2f", speed) + blockReceiptSpeed := fmt.Sprintf("%.2f", speed) fmt.Print("\033[u\033[K") // restore the cursor position and clear the line - fmt.Println("downloaded blocks:", currentBlock+len(rdm.rdd), "/", int(rdm.targetBN), "(", blockSpeed, "blocks/s", ")") + fmt.Println("downloaded blocks and receipts:", currentBlock+len(rdm.rdd), "/", int(rdm.targetBN), "(", blockReceiptSpeed, "BlocksAndReceipts/s", ")") } } } } -func (b *StageReceipts) downloadReceipts(ctx context.Context, hs []common.Hash) ([]types.Receipts, sttypes.StreamID, error) { +func (r *StageReceipts) downloadReceipts(ctx context.Context, hs []common.Hash) ([]types.Receipts, sttypes.StreamID, error) { ctx, cancel := context.WithTimeout(ctx, 10*time.Second) defer cancel() - receipts, stid, err := b.configs.protocol.GetReceipts(ctx, hs) + receipts, stid, err := r.configs.protocol.GetReceipts(ctx, hs) if err != nil { return nil, stid, err } @@ -269,11 +278,11 @@ func validateGetReceiptsResult(requested []common.Hash, result []types.Receipts) return nil } -func (b *StageReceipts) saveProgress(ctx context.Context, s *StageState, progress uint64, tx kv.RwTx) (err error) { +func (r *StageReceipts) saveProgress(ctx context.Context, s *StageState, progress uint64, tx kv.RwTx) (err error) { useInternalTx := tx == nil if useInternalTx { var err error - tx, err = b.configs.db.BeginRw(ctx) + tx, err = r.configs.db.BeginRw(ctx) if err != nil { return err } @@ -284,7 +293,7 @@ func (b *StageReceipts) saveProgress(ctx context.Context, s *StageState, progres if err = s.Update(tx, progress); err != nil { utils.Logger().Error(). Err(err). - Msgf("[STAGED_SYNC] saving progress for block bodies stage failed") + Msgf("[STAGED_SYNC] saving progress for receipt stage failed") return ErrSavingBodiesProgressFail } @@ -296,72 +305,37 @@ func (b *StageReceipts) saveProgress(ctx context.Context, s *StageState, progres return nil } -func (b *StageReceipts) cleanBlocksDB(ctx context.Context, loopID int) (err error) { - tx, errb := b.configs.blockDBs[loopID].BeginRw(ctx) - if errb != nil { - return errb - } - defer tx.Rollback() - - // clean block bodies db - if err = tx.ClearBucket(BlocksBucket); err != nil { - utils.Logger().Error(). - Err(err). - Msgf("[STAGED_STREAM_SYNC] clear blocks bucket after revert failed") - return err - } - // clean block signatures db - if err = tx.ClearBucket(BlockSignaturesBucket); err != nil { - utils.Logger().Error(). - Err(err). - Msgf("[STAGED_STREAM_SYNC] clear block signatures bucket after revert failed") - return err +func (r *StageReceipts) Revert(ctx context.Context, firstCycle bool, u *RevertState, s *StageState, tx kv.RwTx) (err error) { + useInternalTx := tx == nil + if useInternalTx { + tx, err = r.configs.db.BeginRw(ctx) + if err != nil { + return err + } + defer tx.Rollback() } - if err = tx.Commit(); err != nil { + if err = u.Done(tx); err != nil { return err } - return nil -} - -func (b *StageReceipts) cleanAllBlockDBs(ctx context.Context) (err error) { - //clean all blocks DBs - for i := 0; i < b.configs.concurrency; i++ { - if err := b.cleanBlocksDB(ctx, i); err != nil { + if useInternalTx { + if err = tx.Commit(); err != nil { return err } } return nil } -func (b *StageReceipts) Revert(ctx context.Context, firstCycle bool, u *RevertState, s *StageState, tx kv.RwTx) (err error) { - - //clean all blocks DBs - if err := b.cleanAllBlockDBs(ctx); err != nil { - return err - } - +func (r *StageReceipts) CleanUp(ctx context.Context, firstCycle bool, p *CleanUpState, tx kv.RwTx) (err error) { useInternalTx := tx == nil if useInternalTx { - tx, err = b.configs.db.BeginRw(ctx) + tx, err = r.configs.db.BeginRw(ctx) if err != nil { return err } defer tx.Rollback() } - // save progress - currentHead := b.configs.bc.CurrentBlock().NumberU64() - if err = s.Update(tx, currentHead); err != nil { - utils.Logger().Error(). - Err(err). - Msgf("[STAGED_SYNC] saving progress for block bodies stage after revert failed") - return err - } - - if err = u.Done(tx); err != nil { - return err - } if useInternalTx { if err = tx.Commit(); err != nil { @@ -370,12 +344,3 @@ func (b *StageReceipts) Revert(ctx context.Context, firstCycle bool, u *RevertSt } return nil } - -func (b *StageReceipts) CleanUp(ctx context.Context, firstCycle bool, p *CleanUpState, tx kv.RwTx) (err error) { - //clean all blocks DBs - if err := b.cleanAllBlockDBs(ctx); err != nil { - return err - } - - return nil -} diff --git a/api/service/stagedstreamsync/stage_state.go b/api/service/stagedstreamsync/stage_state.go index a5297b9031..f579019f6c 100644 --- a/api/service/stagedstreamsync/stage_state.go +++ b/api/service/stagedstreamsync/stage_state.go @@ -111,8 +111,6 @@ func (stg *StageStates) Exec(ctx context.Context, firstCycle bool, invalidBlockR fmt.Print("\033[s") // save the cursor position } - s.state.currentCycle.ReceiptHashes = make(map[uint64]common.Hash) - for i := currProgress + 1; i <= targetHeight; i++ { blkKey := marshalData(i) loopID, streamID := gbm.GetDownloadDetails(i) diff --git a/api/service/stagedstreamsync/staged_stream_sync.go b/api/service/stagedstreamsync/staged_stream_sync.go index 48a47f28d3..786f597792 100644 --- a/api/service/stagedstreamsync/staged_stream_sync.go +++ b/api/service/stagedstreamsync/staged_stream_sync.go @@ -104,7 +104,6 @@ type Timing struct { type SyncCycle struct { Number uint64 TargetHeight uint64 - ReceiptHashes map[uint64]common.Hash lock sync.RWMutex } From 6f9a1ecda1c0003a2095ceb95c1258b661e24586 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CGheisMohammadi=E2=80=9D?= <36589218+GheisMohammadi@users.noreply.github.com> Date: Fri, 7 Jul 2023 00:25:12 +0800 Subject: [PATCH 070/128] goimports --- api/service/stagedstreamsync/staged_stream_sync.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/api/service/stagedstreamsync/staged_stream_sync.go b/api/service/stagedstreamsync/staged_stream_sync.go index 786f597792..5abe45b54e 100644 --- a/api/service/stagedstreamsync/staged_stream_sync.go +++ b/api/service/stagedstreamsync/staged_stream_sync.go @@ -102,9 +102,9 @@ type Timing struct { } type SyncCycle struct { - Number uint64 - TargetHeight uint64 - lock sync.RWMutex + Number uint64 + TargetHeight uint64 + lock sync.RWMutex } func (s *StagedStreamSync) Len() int { return len(s.stages) } From 12235f5c5c81c89b7e74216c1c5ff9159a9de1ad Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CGheisMohammadi=E2=80=9D?= <36589218+GheisMohammadi@users.noreply.github.com> Date: Mon, 10 Jul 2023 17:38:14 +0800 Subject: [PATCH 071/128] fix stages forward order for staged stream sync --- .../stagedstreamsync/staged_stream_sync.go | 3 --- api/service/stagedstreamsync/syncing.go | 18 ++++++++++++++++-- 2 files changed, 16 insertions(+), 5 deletions(-) diff --git a/api/service/stagedstreamsync/staged_stream_sync.go b/api/service/stagedstreamsync/staged_stream_sync.go index 5abe45b54e..29832ef18c 100644 --- a/api/service/stagedstreamsync/staged_stream_sync.go +++ b/api/service/stagedstreamsync/staged_stream_sync.go @@ -267,9 +267,6 @@ func New( logger zerolog.Logger, ) *StagedStreamSync { - // init stages order based on sync mode - initStagesOrder(config.SyncMode) - forwardStages := make([]*Stage, len(stagesList)) for i, stageIndex := range StagesForwardOrder { for _, s := range stagesList { diff --git a/api/service/stagedstreamsync/syncing.go b/api/service/stagedstreamsync/syncing.go index 7abf00e7a5..b2c1aacdfa 100644 --- a/api/service/stagedstreamsync/syncing.go +++ b/api/service/stagedstreamsync/syncing.go @@ -91,10 +91,13 @@ func CreateStagedSync(ctx context.Context, stageStatesCfg := NewStageStatesCfg(bc, mainDB, dbs, config.Concurrency, blockExecution, logger, config.LogProgress) stageStateSyncCfg := NewStageStateSyncCfg(bc, mainDB, config.Concurrency, protocol, logger, config.LogProgress) stageReceiptsCfg := NewStageReceiptsCfg(bc, mainDB, dbs, config.Concurrency, protocol, isBeaconNode, config.LogProgress) - lastMileCfg := NewStageLastMileCfg(ctx, bc, mainDB) + lastMileCfg := NewStageLastMileCfg(ctx, bc, mainDB) stageFinishCfg := NewStageFinishCfg(mainDB) - stages := DefaultStages(ctx, + // init stages order based on sync mode + initStagesOrder(config.SyncMode) + + defaultStages := DefaultStages(ctx, stageHeadsCfg, stageSyncEpochCfg, stageShortRangeCfg, @@ -115,6 +118,17 @@ func CreateStagedSync(ctx context.Context, Int("minStreams", config.MinStreams). Msg(WrapStagedSyncMsg("staged sync created successfully")) + var stages []*Stage + // if any of the default stages doesn't exist in forward order, delete it from the list of stages + for _, stg := range defaultStages { + for _, stageID := range StagesForwardOrder { + if stg.ID == stageID { + stages = append(stages, stg) + break + } + } + } + return New( bc, consensus, From f6b89516568fba4064f60756acac98a99c8e1894 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CGheisMohammadi=E2=80=9D?= <36589218+GheisMohammadi@users.noreply.github.com> Date: Thu, 13 Jul 2023 19:31:58 +0800 Subject: [PATCH 072/128] add SyncMode to flags --- cmd/harmony/flags.go | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/cmd/harmony/flags.go b/cmd/harmony/flags.go index 46a1decb06..2af21cb24c 100644 --- a/cmd/harmony/flags.go +++ b/cmd/harmony/flags.go @@ -238,6 +238,7 @@ var ( syncFlags = []cli.Flag{ syncStreamEnabledFlag, + syncModeFlag, syncDownloaderFlag, syncStagedSyncFlag, syncConcurrencyFlag, @@ -1876,6 +1877,13 @@ var ( Usage: "Enable the stream sync protocol (experimental feature)", DefValue: false, } + + syncModeFlag = cli.IntFlag{ + Name: "sync.mode", + Usage: "synchronization mode of the downloader (0=FullSync, 1=FastSync, 2=SnapSync)", + DefValue: 0, + } + // TODO: Deprecate this flag, and always set to true after stream sync is fully up. syncDownloaderFlag = cli.BoolFlag{ Name: "sync.downloader", @@ -1937,6 +1945,10 @@ func applySyncFlags(cmd *cobra.Command, config *harmonyconfig.HarmonyConfig) { config.Sync.Enabled = cli.GetBoolFlagValue(cmd, syncStreamEnabledFlag) } + if cli.IsFlagChanged(cmd, syncModeFlag) { + config.Sync.SyncMode = uint32(cli.GetIntFlagValue(cmd, syncModeFlag)) + } + if cli.IsFlagChanged(cmd, syncDownloaderFlag) { config.Sync.Downloader = cli.GetBoolFlagValue(cmd, syncDownloaderFlag) } From cd7ccbe827eb86e70cb972a6e82b68fbe29d28ef Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CGheisMohammadi=E2=80=9D?= <36589218+GheisMohammadi@users.noreply.github.com> Date: Wed, 16 Aug 2023 18:11:51 +0800 Subject: [PATCH 073/128] fix stages and replace with forward stages --- api/service/stagedstreamsync/staged_stream_sync.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/service/stagedstreamsync/staged_stream_sync.go b/api/service/stagedstreamsync/staged_stream_sync.go index 29832ef18c..7d94126b0c 100644 --- a/api/service/stagedstreamsync/staged_stream_sync.go +++ b/api/service/stagedstreamsync/staged_stream_sync.go @@ -317,7 +317,7 @@ func New( inserted: 0, config: config, logger: logger, - stages: stagesList, + stages: forwardStages, currentStage: 0, revertOrder: revertStages, pruningOrder: pruneStages, From 772d8658e1ed1b3c6b2a8132806e4eef34b687ac Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CGheisMohammadi=E2=80=9D?= <36589218+GheisMohammadi@users.noreply.github.com> Date: Wed, 6 Sep 2023 23:14:56 +0800 Subject: [PATCH 074/128] fix block validation in stage bodies --- api/service/stagedstreamsync/stage_bodies.go | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/api/service/stagedstreamsync/stage_bodies.go b/api/service/stagedstreamsync/stage_bodies.go index d2ed95c9d1..8cce5207ca 100644 --- a/api/service/stagedstreamsync/stage_bodies.go +++ b/api/service/stagedstreamsync/stage_bodies.go @@ -187,12 +187,6 @@ func (b *StageBodies) runBlockWorkerLoop(ctx context.Context, gbm *blockDownload gbm.HandleRequestError(batch, err, stid) b.configs.protocol.RemoveStream(stid) } else { - if b.configs.extractReceiptHashes { - if err = b.verifyBlockAndExtractReceiptsData(blockBytes, sigBytes, s); err != nil { - gbm.HandleRequestError(batch, err, stid) - continue - } - } if err = b.saveBlocks(ctx, gbm.tx, batch, blockBytes, sigBytes, loopID, stid); err != nil { panic(ErrSaveBlocksToDbFailed) } @@ -231,10 +225,9 @@ func (b *StageBodies) verifyBlockAndExtractReceiptsData(batchBlockBytes [][]byte block.SetCurrentCommitSig(sigBytes) } - if block.NumberU64() != i { - return ErrInvalidBlockNumber - } - + // if block.NumberU64() != i { + // return ErrInvalidBlockNumber + // } if err := verifyBlock(b.configs.bc, block); err != nil { return err } From c1d352b8955a9250cf43fc9a3a5f7b1052fdbd46 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CGheisMohammadi=E2=80=9D?= <36589218+GheisMohammadi@users.noreply.github.com> Date: Wed, 27 Sep 2023 00:35:40 +0800 Subject: [PATCH 075/128] add pivot to chain accessor, add CurrentFastBlock to blockchain_impl, fix receipt insertion --- core/blockchain.go | 5 +++++ core/blockchain_impl.go | 23 +++++++++-------------- core/blockchain_stub.go | 4 ++++ core/rawdb/accessors_chain.go | 5 ++++- p2p/stream/protocols/sync/chain.go | 5 ++++- 5 files changed, 26 insertions(+), 16 deletions(-) diff --git a/core/blockchain.go b/core/blockchain.go index c66f26ed38..766bfb668b 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -52,6 +52,11 @@ type BlockChain interface { // CurrentBlock retrieves the current head block of the canonical chain. The // block is retrieved from the blockchain's internal cache. CurrentBlock() *types.Block + // CurrentFastBlock retrieves the current fast-sync head block of the canonical + // block is retrieved from the blockchain's internal cache. + CurrentFastBlock() *types.Block + // Validator returns the current validator. + Validator() Validator // Processor returns the current processor. Processor() Processor // State returns a new mutable state based on the current HEAD block. diff --git a/core/blockchain_impl.go b/core/blockchain_impl.go index c91c322b83..e9360dc15f 100644 --- a/core/blockchain_impl.go +++ b/core/blockchain_impl.go @@ -1285,11 +1285,14 @@ func (bc *BlockChainImpl) InsertReceiptChain(blockChain types.Blocks, receiptCha receipts := receiptChain[i] // Short circuit insertion if shutting down or processing failed if atomic.LoadInt32(&bc.procInterrupt) == 1 { - return 0, nil + return 0, fmt.Errorf("Premature abort during blocks processing") } - // Short circuit if the owner header is unknown + // Add header if the owner header is unknown if !bc.HasHeader(block.Hash(), block.NumberU64()) { - return 0, fmt.Errorf("containing header #%d [%x…] unknown", block.Number(), block.Hash().Bytes()[:4]) + if err := rawdb.WriteHeader(batch, block.Header()); err != nil { + return 0, err + } + // return 0, fmt.Errorf("containing header #%d [%x…] unknown", block.Number(), block.Hash().Bytes()[:4]) } // Skip if the entire data is already known if bc.HasBlock(block.Hash(), block.NumberU64()) { @@ -1332,17 +1335,9 @@ func (bc *BlockChainImpl) InsertReceiptChain(blockChain types.Blocks, receiptCha } // Update the head fast sync block if better - bc.mu.Lock() head := blockChain[len(blockChain)-1] - if td := bc.GetTd(head.Hash(), head.NumberU64()); td != nil { // Rewind may have occurred, skip in that case - currentFastBlock := bc.CurrentFastBlock() - if bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64()).Cmp(td) < 0 { - rawdb.WriteHeadFastBlockHash(bc.db, head.Hash()) - bc.currentFastBlock.Store(head) - headFastBlockGauge.Update(int64(head.NumberU64())) - } - } - bc.mu.Unlock() + rawdb.WriteHeadFastBlockHash(bc.db, head.Hash()) + bc.currentFastBlock.Store(head) utils.Logger().Info(). Int32("count", stats.processed). @@ -1354,7 +1349,7 @@ func (bc *BlockChainImpl) InsertReceiptChain(blockChain types.Blocks, receiptCha Int32("ignored", stats.ignored). Msg("Imported new block receipts") - return 0, nil + return int(stats.processed), nil } var lastWrite uint64 diff --git a/core/blockchain_stub.go b/core/blockchain_stub.go index a1eb92a05c..3b9713804c 100644 --- a/core/blockchain_stub.go +++ b/core/blockchain_stub.go @@ -49,6 +49,10 @@ func (a Stub) CurrentBlock() *types.Block { return nil } +func (a Stub) CurrentFastBlock() *types.Block { + return nil +} + func (a Stub) Validator() Validator { return nil } diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go index 72ce358e29..b01dc09655 100644 --- a/core/rawdb/accessors_chain.go +++ b/core/rawdb/accessors_chain.go @@ -597,14 +597,17 @@ func ReadLastPivotNumber(db ethdb.KeyValueReader) *uint64 { } // WriteLastPivotNumber stores the number of the last pivot block. -func WriteLastPivotNumber(db ethdb.KeyValueWriter, pivot uint64) { +func WriteLastPivotNumber(db ethdb.KeyValueWriter, pivot uint64) error { enc, err := rlp.EncodeToBytes(pivot) if err != nil { utils.Logger().Error().Err(err).Msg("Failed to encode pivot block number") + return err } if err := db.Put(lastPivotKey, enc); err != nil { utils.Logger().Error().Err(err).Msg("Failed to store pivot block number") + return err } + return nil } // ReadTxIndexTail retrieves the number of oldest indexed block diff --git a/p2p/stream/protocols/sync/chain.go b/p2p/stream/protocols/sync/chain.go index efabd9307c..aa4dced3f5 100644 --- a/p2p/stream/protocols/sync/chain.go +++ b/p2p/stream/protocols/sync/chain.go @@ -171,7 +171,10 @@ func (ch *chainHelperImpl) getNodeData(hs []common.Hash) ([][]byte, error) { entry, err = ch.chain.ValidatorCode(hash) } } - if err == nil && len(entry) > 0 { + if err != nil { + return nil, err + } + if len(entry) > 0 { nodes = append(nodes, entry) bytes += len(entry) } From 8d66bdf078d51e5b00b56156033b359bb84e4198 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CGheisMohammadi=E2=80=9D?= <36589218+GheisMohammadi@users.noreply.github.com> Date: Thu, 28 Sep 2023 00:24:16 +0800 Subject: [PATCH 076/128] add getBlockByMaxVote to sync helper --- api/service/stagedstreamsync/helpers.go | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/api/service/stagedstreamsync/helpers.go b/api/service/stagedstreamsync/helpers.go index 75e504214f..96c1c22b07 100644 --- a/api/service/stagedstreamsync/helpers.go +++ b/api/service/stagedstreamsync/helpers.go @@ -73,6 +73,27 @@ func checkGetBlockByHashesResult(blocks []*types.Block, hashes []common.Hash) er return nil } +func getBlockByMaxVote(blocks []*types.Block) (*types.Block, error) { + hashesVote := make(map[common.Hash]int) + maxVote := int(-1) + maxVotedBlockIndex := int(0) + + for i, block := range blocks { + if block == nil { + continue + } + hashesVote[block.Header().Hash()]++ + if hashesVote[block.Header().Hash()] > maxVote { + maxVote = hashesVote[block.Header().Hash()] + maxVotedBlockIndex = i + } + } + if maxVote < 0 { + return nil, ErrInvalidBlockBytes + } + return blocks[maxVotedBlockIndex], nil +} + func countHashMaxVote(m map[sttypes.StreamID]common.Hash, whitelist map[sttypes.StreamID]struct{}) (common.Hash, map[sttypes.StreamID]struct{}) { var ( voteM = make(map[common.Hash]int) From 917a3019e244b3d3705c87152316b27d3ce9ef5c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CGheisMohammadi=E2=80=9D?= <36589218+GheisMohammadi@users.noreply.github.com> Date: Tue, 3 Oct 2023 14:35:40 +0800 Subject: [PATCH 077/128] add tests for node data request --- p2p/stream/protocols/sync/stream_test.go | 25 ++++++++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) diff --git a/p2p/stream/protocols/sync/stream_test.go b/p2p/stream/protocols/sync/stream_test.go index 3b538c14b8..cd3a3f061b 100644 --- a/p2p/stream/protocols/sync/stream_test.go +++ b/p2p/stream/protocols/sync/stream_test.go @@ -51,14 +51,14 @@ var ( testGetReceiptsRequest = syncpb.MakeGetReceiptsRequest(testGetReceipts) testGetReceiptsRequestMsg = syncpb.MakeMessageFromRequest(testGetReceiptsRequest) - testGetNodeData = []common.Hash{ + testGetNodes = []common.Hash{ numberToHash(1), numberToHash(2), numberToHash(3), numberToHash(4), numberToHash(5), } - testGetNodeDataRequest = syncpb.MakeGetNodeDataRequest(testGetNodeData) + testGetNodeDataRequest = syncpb.MakeGetNodeDataRequest(testGetNodes) testGetNodeDataRequestMsg = syncpb.MakeMessageFromRequest(testGetNodeDataRequest) maxBytes = uint64(500) @@ -296,6 +296,27 @@ func TestSyncStream_HandleGetTrieNodes(t *testing.T) { } } +func TestSyncStream_HandleGetNodeData(t *testing.T) { + st, remoteSt := makeTestSyncStream() + + go st.run() + defer close(st.closeC) + + req := testGetNodeDataRequestMsg + b, _ := protobuf.Marshal(req) + err := remoteSt.WriteBytes(b) + if err != nil { + t.Fatal(err) + } + + time.Sleep(200 * time.Millisecond) + receivedBytes, _ := remoteSt.ReadBytes() + + if err := checkGetNodeDataResult(receivedBytes, testGetBlockByHashes); err != nil { + t.Fatal(err) + } +} + func makeTestSyncStream() (*syncStream, *testRemoteBaseStream) { localRaw, remoteRaw := makePairP2PStreams() remote := newTestRemoteBaseStream(remoteRaw) From 7c21eef5ef8b50d482f7c42fa6b840aad4c6baa2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CGheisMohammadi=E2=80=9D?= <36589218+GheisMohammadi@users.noreply.github.com> Date: Tue, 3 Oct 2023 17:20:45 +0800 Subject: [PATCH 078/128] fix stream tests --- p2p/stream/protocols/sync/stream_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/p2p/stream/protocols/sync/stream_test.go b/p2p/stream/protocols/sync/stream_test.go index cd3a3f061b..9511de2ce1 100644 --- a/p2p/stream/protocols/sync/stream_test.go +++ b/p2p/stream/protocols/sync/stream_test.go @@ -51,14 +51,14 @@ var ( testGetReceiptsRequest = syncpb.MakeGetReceiptsRequest(testGetReceipts) testGetReceiptsRequestMsg = syncpb.MakeMessageFromRequest(testGetReceiptsRequest) - testGetNodes = []common.Hash{ + testGetNodeData = []common.Hash{ numberToHash(1), numberToHash(2), numberToHash(3), numberToHash(4), numberToHash(5), } - testGetNodeDataRequest = syncpb.MakeGetNodeDataRequest(testGetNodes) + testGetNodeDataRequest = syncpb.MakeGetNodeDataRequest(testGetNodeData) testGetNodeDataRequestMsg = syncpb.MakeMessageFromRequest(testGetNodeDataRequest) maxBytes = uint64(500) @@ -312,7 +312,7 @@ func TestSyncStream_HandleGetNodeData(t *testing.T) { time.Sleep(200 * time.Millisecond) receivedBytes, _ := remoteSt.ReadBytes() - if err := checkGetNodeDataResult(receivedBytes, testGetBlockByHashes); err != nil { + if err := checkGetNodeDataResult(receivedBytes, testGetNodeData); err != nil { t.Fatal(err) } } From d534fea261fa9b9ba7403b478592b05260cc7aff Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CGheisMohammadi=E2=80=9D?= <36589218+GheisMohammadi@users.noreply.github.com> Date: Tue, 3 Oct 2023 17:22:18 +0800 Subject: [PATCH 079/128] add Validator method to blockchain to fix the interface --- core/blockchain_impl.go | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/core/blockchain_impl.go b/core/blockchain_impl.go index e9360dc15f..2fbdb92d7c 100644 --- a/core/blockchain_impl.go +++ b/core/blockchain_impl.go @@ -685,6 +685,17 @@ func (bc *BlockChainImpl) CurrentBlock() *types.Block { return bc.currentBlock.Load().(*types.Block) } +// CurrentFastBlock retrieves the current fast-sync head block of the canonical +// chain. The block is retrieved from the blockchain's internal cache. +func (bc *BlockChainImpl) CurrentFastBlock() *types.Block { + return bc.currentFastBlock.Load().(*types.Block) +} + +// Validator returns the current validator. +func (bc *BlockChainImpl) Validator() Validator { + return bc.validator +} + func (bc *BlockChainImpl) Processor() Processor { return bc.processor } From e96855b6ff7523d04a37f44cdbf82065abf609df Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CGheisMohammadi=E2=80=9D?= <36589218+GheisMohammadi@users.noreply.github.com> Date: Tue, 3 Oct 2023 17:22:46 +0800 Subject: [PATCH 080/128] fix shard chain test --- core_test/shardchain_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core_test/shardchain_test.go b/core_test/shardchain_test.go index a6a9238bab..ad091482b7 100644 --- a/core_test/shardchain_test.go +++ b/core_test/shardchain_test.go @@ -72,7 +72,7 @@ func TestAddNewBlock(t *testing.T) { nn := node.Blockchain().CurrentBlock() t.Log("[*]", nn.NumberU64(), nn.Hash().Hex(), nn.ParentHash()) - _, err = blockchain.InsertChain([]*types.Block{block}, false) + _, err = blockchain.InsertChain([]*types.Block{block}, false, true) require.NoError(t, err, "error when adding new block") meta := blockchain.LeaderRotationMeta() From ebd689f1a0b6297e9b906487ced785a5fc4eaccf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CGheisMohammadi=E2=80=9D?= <36589218+GheisMohammadi@users.noreply.github.com> Date: Wed, 4 Oct 2023 00:51:58 +0800 Subject: [PATCH 081/128] remove blockExecution option from insertChain --- cmd/harmony/main.go | 2 +- core/blockchain.go | 2 +- core/blockchain_impl.go | 13 +++---------- core/blockchain_stub.go | 2 +- core/epochchain.go | 2 +- core_test/shardchain_test.go | 2 +- hmy/downloader/adapter.go | 2 +- hmy/downloader/adapter_test.go | 6 +++--- hmy/downloader/beaconhelper.go | 2 +- hmy/downloader/downloader.go | 2 +- hmy/downloader/shortrange.go | 2 +- node/node_handler_test.go | 2 +- node/node_newblock_test.go | 2 +- test/chain/main.go | 6 +++--- 14 files changed, 20 insertions(+), 27 deletions(-) diff --git a/cmd/harmony/main.go b/cmd/harmony/main.go index a29698f407..ec05e2419f 100644 --- a/cmd/harmony/main.go +++ b/cmd/harmony/main.go @@ -1017,7 +1017,7 @@ func setupStagedSyncService(node *node.Node, host p2p.Host, hc harmonyconfig.Har SmDiscBatch: hc.Sync.DiscBatch, UseMemDB: hc.Sync.StagedSyncCfg.UseMemDB, LogProgress: hc.Sync.StagedSyncCfg.LogProgress, - DebugMode: hc.Sync.StagedSyncCfg.DebugMode, + DebugMode: true, // hc.Sync.StagedSyncCfg.DebugMode, } // If we are running side chain, we will need to do some extra works for beacon diff --git a/core/blockchain.go b/core/blockchain.go index 766bfb668b..f6f50e71f4 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -125,7 +125,7 @@ type BlockChain interface { // wrong. // // After insertion is done, all accumulated events will be fired. - InsertChain(chain types.Blocks, verifyHeaders bool, blockExecution bool) (int, error) + InsertChain(chain types.Blocks, verifyHeaders bool) (int, error) // InsertReceiptChain attempts to complete an already existing header chain with // transaction and receipt data. InsertReceiptChain(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) diff --git a/core/blockchain_impl.go b/core/blockchain_impl.go index 2fbdb92d7c..97660544da 100644 --- a/core/blockchain_impl.go +++ b/core/blockchain_impl.go @@ -1530,7 +1530,7 @@ func (bc *BlockChainImpl) GetMaxGarbageCollectedBlockNumber() int64 { return bc.maxGarbCollectedBlkNum } -func (bc *BlockChainImpl) InsertChain(chain types.Blocks, verifyHeaders bool, blockExecution bool) (int, error) { +func (bc *BlockChainImpl) InsertChain(chain types.Blocks, verifyHeaders bool) (int, error) { // if in tikv mode, writer node need preempt master or come be a follower if bc.isInitTiKV() && !bc.tikvPreemptMaster(bc.rangeBlock(chain)) { return len(chain), nil @@ -1574,17 +1574,10 @@ func (bc *BlockChainImpl) LeaderRotationMeta() LeaderRotationMeta { return bc.leaderRotationMeta.Clone() } -func (bc *BlockChainImpl) insertChain(chain types.Blocks, verifyHeaders bool, blockExecution bool) (int, []interface{}, []*types.Log, error) { - if blockExecution { - return bc.insertChainWithBlockExecution(chain, verifyHeaders) - } - return bc.insertChainWithoutBlockExecution(chain, verifyHeaders) -} - // insertChain will execute the actual chain insertion and event aggregation. The // only reason this method exists as a separate one is to make locking cleaner // with deferred statements. -func (bc *BlockChainImpl) insertChainWithBlockExecution(chain types.Blocks, verifyHeaders bool) (int, []interface{}, []*types.Log, error) { +func (bc *BlockChainImpl) insertChain(chain types.Blocks, verifyHeaders bool) (int, []interface{}, []*types.Log, error) { // Sanity check that we have something meaningful to import if len(chain) == 0 { return 0, nil, nil, ErrEmptyChain @@ -1696,7 +1689,7 @@ func (bc *BlockChainImpl) insertChainWithBlockExecution(chain types.Blocks, veri if len(winner) > 0 { // Import all the pruned blocks to make the state available bc.chainmu.Unlock() - _, evs, logs, err := bc.insertChainWithBlockExecution(winner, true /* verifyHeaders */) + _, evs, logs, err := bc.insertChain(winner, true /* verifyHeaders */) bc.chainmu.Lock() events, coalescedLogs = evs, logs diff --git a/core/blockchain_stub.go b/core/blockchain_stub.go index 3b9713804c..437bc32e77 100644 --- a/core/blockchain_stub.go +++ b/core/blockchain_stub.go @@ -136,7 +136,7 @@ func (a Stub) GetMaxGarbageCollectedBlockNumber() int64 { return 0 } -func (a Stub) InsertChain(chain types.Blocks, verifyHeaders bool, blockExecution bool) (int, error) { +func (a Stub) InsertChain(chain types.Blocks, verifyHeaders bool) (int, error) { return 0, errors.Errorf("method InsertChain not implemented for %s", a.Name) } diff --git a/core/epochchain.go b/core/epochchain.go index 3df271b11b..2dab284713 100644 --- a/core/epochchain.go +++ b/core/epochchain.go @@ -114,7 +114,7 @@ func (bc *EpochChain) Stop() { }) } -func (bc *EpochChain) InsertChain(blocks types.Blocks, _ bool, _ bool) (int, error) { +func (bc *EpochChain) InsertChain(blocks types.Blocks, _ bool) (int, error) { if len(blocks) == 0 { return 0, nil } diff --git a/core_test/shardchain_test.go b/core_test/shardchain_test.go index ad091482b7..a6a9238bab 100644 --- a/core_test/shardchain_test.go +++ b/core_test/shardchain_test.go @@ -72,7 +72,7 @@ func TestAddNewBlock(t *testing.T) { nn := node.Blockchain().CurrentBlock() t.Log("[*]", nn.NumberU64(), nn.Hash().Hex(), nn.ParentHash()) - _, err = blockchain.InsertChain([]*types.Block{block}, false, true) + _, err = blockchain.InsertChain([]*types.Block{block}, false) require.NoError(t, err, "error when adding new block") meta := blockchain.LeaderRotationMeta() diff --git a/hmy/downloader/adapter.go b/hmy/downloader/adapter.go index 70e4ca3257..c8758b506d 100644 --- a/hmy/downloader/adapter.go +++ b/hmy/downloader/adapter.go @@ -27,6 +27,6 @@ type blockChain interface { engine.ChainReader Engine() engine.Engine - InsertChain(chain types.Blocks, verifyHeaders bool, blockExecution bool) (int, error) + InsertChain(chain types.Blocks, verifyHeaders bool) (int, error) WriteCommitSig(blockNum uint64, lastCommits []byte) error } diff --git a/hmy/downloader/adapter_test.go b/hmy/downloader/adapter_test.go index 3b76640078..4bc023b5cc 100644 --- a/hmy/downloader/adapter_test.go +++ b/hmy/downloader/adapter_test.go @@ -60,7 +60,7 @@ func (bc *testBlockChain) currentBlockNumber() uint64 { return bc.curBN } -func (bc *testBlockChain) InsertChain(chain types.Blocks, verifyHeaders bool, blockExecution bool) (int, error) { +func (bc *testBlockChain) InsertChain(chain types.Blocks, verifyHeaders bool) (int, error) { bc.lock.Lock() defer bc.lock.Unlock() @@ -169,11 +169,11 @@ type testInsertHelper struct { } func (ch *testInsertHelper) verifyAndInsertBlock(block *types.Block) error { - _, err := ch.bc.InsertChain(types.Blocks{block}, true, true) + _, err := ch.bc.InsertChain(types.Blocks{block}, true) return err } func (ch *testInsertHelper) verifyAndInsertBlocks(blocks types.Blocks) (int, error) { - return ch.bc.InsertChain(blocks, true, true) + return ch.bc.InsertChain(blocks, true) } const ( diff --git a/hmy/downloader/beaconhelper.go b/hmy/downloader/beaconhelper.go index 2c7f056754..96d06ebf8e 100644 --- a/hmy/downloader/beaconhelper.go +++ b/hmy/downloader/beaconhelper.go @@ -123,7 +123,7 @@ func (bh *beaconHelper) insertLastMileBlocks() (inserted int, bn uint64, err err } // TODO: Instruct the beacon helper to verify signatures. This may require some forks // in pub-sub message (add commit sigs in node.block.sync messages) - if _, err = bh.bc.InsertChain(types.Blocks{b}, true, true); err != nil { + if _, err = bh.bc.InsertChain(types.Blocks{b}, true); err != nil { bn-- return } diff --git a/hmy/downloader/downloader.go b/hmy/downloader/downloader.go index 378b1e6301..9e132fd27e 100644 --- a/hmy/downloader/downloader.go +++ b/hmy/downloader/downloader.go @@ -314,7 +314,7 @@ func verifyAndInsertBlock(bc blockChain, block *types.Block, blockExecution bool if err := bc.Engine().VerifyHeader(bc, block.Header(), true); err != nil { return errors.Wrap(err, "[VerifyHeader]") } - if _, err := bc.InsertChain(types.Blocks{block}, false, blockExecution); err != nil { + if _, err := bc.InsertChain(types.Blocks{block}, false); err != nil { return errors.Wrap(err, "[InsertChain]") } return nil diff --git a/hmy/downloader/shortrange.go b/hmy/downloader/shortrange.go index 2a705f99a4..81adc8131f 100644 --- a/hmy/downloader/shortrange.go +++ b/hmy/downloader/shortrange.go @@ -131,7 +131,7 @@ func (d *Downloader) doShortRangeSyncForEpochSync() (int, error) { // short circuit for no sync is needed return 0, nil } - n, err := d.bc.InsertChain(blocks, true, true) + n, err := d.bc.InsertChain(blocks, true) numBlocksInsertedShortRangeHistogramVec.With(d.promLabels()).Observe(float64(n)) if err != nil { sh.removeStreams([]sttypes.StreamID{streamID}) // Data provided by remote nodes is corrupted diff --git a/node/node_handler_test.go b/node/node_handler_test.go index 23c5498fed..867a9616dc 100644 --- a/node/node_handler_test.go +++ b/node/node_handler_test.go @@ -69,7 +69,7 @@ func TestAddNewBlock(t *testing.T) { commitSigs, func() uint64 { return 0 }, common.Address{}, nil, nil, ) - _, err = node.Blockchain().InsertChain([]*types.Block{block}, true, true) + _, err = node.Blockchain().InsertChain([]*types.Block{block}, true) if err != nil { t.Errorf("error when adding new block %v", err) } diff --git a/node/node_newblock_test.go b/node/node_newblock_test.go index b8ca6c9e02..5780b7cda0 100644 --- a/node/node_newblock_test.go +++ b/node/node_newblock_test.go @@ -78,7 +78,7 @@ func TestFinalizeNewBlockAsync(t *testing.T) { t.Error("New block is not verified successfully:", err) } - node.Blockchain().InsertChain(types.Blocks{block}, false, true) + node.Blockchain().InsertChain(types.Blocks{block}, false) node.Worker.UpdateCurrent() diff --git a/test/chain/main.go b/test/chain/main.go index d3f518dc20..4b935292f0 100644 --- a/test/chain/main.go +++ b/test/chain/main.go @@ -134,7 +134,7 @@ func fundFaucetContract(chain core.BlockChain) { }() block, _ := contractworker. FinalizeNewBlock(commitSigs, func() uint64 { return 0 }, common.Address{}, nil, nil) - _, err = chain.InsertChain(types.Blocks{block}, true /* verifyHeaders */, true) + _, err = chain.InsertChain(types.Blocks{block}, true /* verifyHeaders */) if err != nil { fmt.Println(err) } @@ -184,7 +184,7 @@ func callFaucetContractToFundAnAddress(chain core.BlockChain) { block, _ := contractworker.FinalizeNewBlock( commitSigs, func() uint64 { return 0 }, common.Address{}, nil, nil, ) - _, err = chain.InsertChain(types.Blocks{block}, true /* verifyHeaders */, true) + _, err = chain.InsertChain(types.Blocks{block}, true /* verifyHeaders */) if err != nil { fmt.Println(err) } @@ -227,7 +227,7 @@ func main() { gen.SetShardID(0) gen.AddTx(pendingTxs[i].(*types.Transaction)) }) - if _, err := chain.InsertChain(blocks, true /* verifyHeaders */, true); err != nil { + if _, err := chain.InsertChain(blocks, true /* verifyHeaders */); err != nil { log.Fatal(err) } } From 36d2abd0a418cb09e8075b106df9deb161ba8cb7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CGheisMohammadi=E2=80=9D?= <36589218+GheisMohammadi@users.noreply.github.com> Date: Wed, 4 Oct 2023 09:52:35 +0800 Subject: [PATCH 082/128] remove extra blockExecutions --- hmy/downloader/downloader.go | 6 +++--- hmy/downloader/longrange.go | 2 +- hmy/downloader/shortrange.go | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/hmy/downloader/downloader.go b/hmy/downloader/downloader.go index 9e132fd27e..01ec242abb 100644 --- a/hmy/downloader/downloader.go +++ b/hmy/downloader/downloader.go @@ -280,16 +280,16 @@ func (e *sigVerifyErr) Error() string { return fmt.Sprintf("[VerifyHeaderSignature] %v", e.err.Error()) } -func verifyAndInsertBlocks(bc blockChain, blockExecution bool, blocks types.Blocks) (int, error) { +func verifyAndInsertBlocks(bc blockChain, blocks types.Blocks) (int, error) { for i, block := range blocks { - if err := verifyAndInsertBlock(bc, block, blockExecution, blocks[i+1:]...); err != nil { + if err := verifyAndInsertBlock(bc, block, blocks[i+1:]...); err != nil { return i, err } } return len(blocks), nil } -func verifyAndInsertBlock(bc blockChain, block *types.Block, blockExecution bool, nextBlocks ...*types.Block) error { +func verifyAndInsertBlock(bc blockChain, block *types.Block, nextBlocks ...*types.Block) error { var ( sigBytes bls.SerializedSignature bitmap []byte diff --git a/hmy/downloader/longrange.go b/hmy/downloader/longrange.go index fc4d4962f4..4d4935b8f2 100644 --- a/hmy/downloader/longrange.go +++ b/hmy/downloader/longrange.go @@ -210,7 +210,7 @@ func (lsi *lrSyncIter) processBlocks(results []*blockResult, targetBN uint64) { blocks := blockResultsToBlocks(results) for i, block := range blocks { - if err := verifyAndInsertBlock(lsi.bc, block, true); err != nil { + if err := verifyAndInsertBlock(lsi.bc, block); err != nil { lsi.logger.Warn().Err(err).Uint64("target block", targetBN). Uint64("block number", block.NumberU64()). Msg("insert blocks failed in long range") diff --git a/hmy/downloader/shortrange.go b/hmy/downloader/shortrange.go index 81adc8131f..8276911d4f 100644 --- a/hmy/downloader/shortrange.go +++ b/hmy/downloader/shortrange.go @@ -74,7 +74,7 @@ func (d *Downloader) doShortRangeSync() (int, error) { } d.logger.Info().Int("num blocks", len(blocks)).Msg("getBlockByHashes result") - n, err := verifyAndInsertBlocks(d.bc, true, blocks) + n, err := verifyAndInsertBlocks(d.bc, blocks) numBlocksInsertedShortRangeHistogramVec.With(d.promLabels()).Observe(float64(n)) if err != nil { d.logger.Warn().Err(err).Int("blocks inserted", n).Msg("Insert block failed") From 9629d9ca56caad41cb7cceae59b6fc31865d618f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CGheisMohammadi=E2=80=9D?= <36589218+GheisMohammadi@users.noreply.github.com> Date: Wed, 4 Oct 2023 10:02:07 +0800 Subject: [PATCH 083/128] remove blockExecution option from staged stream sync --- api/service/stagedstreamsync/adapter.go | 2 +- api/service/stagedstreamsync/sig_verify.go | 8 ++++---- api/service/stagedstreamsync/stage_short_range.go | 2 +- api/service/stagedstreamsync/stage_state.go | 12 ++++++++---- 4 files changed, 14 insertions(+), 10 deletions(-) diff --git a/api/service/stagedstreamsync/adapter.go b/api/service/stagedstreamsync/adapter.go index 9988ccc51a..ca9c6a6787 100644 --- a/api/service/stagedstreamsync/adapter.go +++ b/api/service/stagedstreamsync/adapter.go @@ -31,6 +31,6 @@ type blockChain interface { engine.ChainReader Engine() engine.Engine - InsertChain(chain types.Blocks, verifyHeaders bool, blockExecution bool) (int, error) + InsertChain(chain types.Blocks, verifyHeaders bool) (int, error) WriteCommitSig(blockNum uint64, lastCommits []byte) error } diff --git a/api/service/stagedstreamsync/sig_verify.go b/api/service/stagedstreamsync/sig_verify.go index bc204fb1a3..bdf5a21077 100644 --- a/api/service/stagedstreamsync/sig_verify.go +++ b/api/service/stagedstreamsync/sig_verify.go @@ -20,9 +20,9 @@ func (e *sigVerifyErr) Error() string { return fmt.Sprintf("[VerifyHeaderSignature] %v", e.err.Error()) } -func verifyAndInsertBlocks(bc blockChain, blocks types.Blocks, blockExecution bool) (int, error) { +func verifyAndInsertBlocks(bc blockChain, blocks types.Blocks) (int, error) { for i, block := range blocks { - if err := verifyAndInsertBlock(bc, block, blockExecution, blocks[i+1:]...); err != nil { + if err := verifyAndInsertBlock(bc, block, blocks[i+1:]...); err != nil { return i, err } } @@ -65,13 +65,13 @@ func verifyBlock(bc blockChain, block *types.Block, nextBlocks ...*types.Block) return nil } -func verifyAndInsertBlock(bc blockChain, block *types.Block, blockExecution bool, nextBlocks ...*types.Block) error { +func verifyAndInsertBlock(bc blockChain, block *types.Block, nextBlocks ...*types.Block) error { //verify block if err := verifyBlock(bc, block, nextBlocks...); err != nil { return err } // insert block - if _, err := bc.InsertChain(types.Blocks{block}, false, blockExecution); err != nil { + if _, err := bc.InsertChain(types.Blocks{block}, false); err != nil { return errors.Wrap(err, "[InsertChain]") } return nil diff --git a/api/service/stagedstreamsync/stage_short_range.go b/api/service/stagedstreamsync/stage_short_range.go index a651490eb0..d771cd6606 100644 --- a/api/service/stagedstreamsync/stage_short_range.go +++ b/api/service/stagedstreamsync/stage_short_range.go @@ -138,7 +138,7 @@ func (sr *StageShortRange) doShortRangeSync(ctx context.Context, s *StageState) utils.Logger().Info().Int("num blocks", len(blocks)).Msg("getBlockByHashes result") - n, err := verifyAndInsertBlocks(sr.configs.bc, blocks, true) + n, err := verifyAndInsertBlocks(sr.configs.bc, blocks) numBlocksInsertedShortRangeHistogramVec.With(s.state.promLabels()).Observe(float64(n)) if err != nil { utils.Logger().Warn().Err(err).Int("blocks inserted", n).Msg("Insert block failed") diff --git a/api/service/stagedstreamsync/stage_state.go b/api/service/stagedstreamsync/stage_state.go index f579019f6c..1294edf703 100644 --- a/api/service/stagedstreamsync/stage_state.go +++ b/api/service/stagedstreamsync/stage_state.go @@ -23,7 +23,6 @@ type StageStatesCfg struct { db kv.RwDB blockDBs []kv.RwDB concurrency int - blockExecution bool logger zerolog.Logger logProgress bool } @@ -39,7 +38,6 @@ func NewStageStatesCfg( db kv.RwDB, blockDBs []kv.RwDB, concurrency int, - blockExecution bool, logger zerolog.Logger, logProgress bool) StageStatesCfg { @@ -48,7 +46,6 @@ func NewStageStatesCfg( db: db, blockDBs: blockDBs, concurrency: concurrency, - blockExecution: blockExecution, logger: logger, logProgress: logProgress, } @@ -56,6 +53,13 @@ func NewStageStatesCfg( // Exec progresses States stage in the forward direction func (stg *StageStates) Exec(ctx context.Context, firstCycle bool, invalidBlockRevert bool, s *StageState, reverter Reverter, tx kv.RwTx) (err error) { + // only execute this stage in full sync mode + if s.state.config.SyncMode != FullSync { + if s.state.status.pivotBlock != nil && s.state.bc.CurrentBlock().NumberU64() <= s.state.status.pivotBlock.NumberU64() { + return nil + } + } + // for short range sync, skip this step if !s.state.initSync { return nil @@ -160,7 +164,7 @@ func (stg *StageStates) Exec(ctx context.Context, firstCycle bool, invalidBlockR return ErrInvalidBlockNumber } - if err := verifyAndInsertBlock(stg.configs.bc, block, stg.configs.blockExecution); err != nil { + if err := verifyAndInsertBlock(stg.configs.bc, block); err != nil { stg.configs.logger.Warn().Err(err).Uint64("cycle target block", targetHeight). Uint64("block number", block.NumberU64()). Msg(WrapStagedSyncMsg("insert blocks failed in long range")) From e4dcda67e48457369eda114d48c0ed855a6222fc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CGheisMohammadi=E2=80=9D?= <36589218+GheisMohammadi@users.noreply.github.com> Date: Wed, 4 Oct 2023 10:17:22 +0800 Subject: [PATCH 084/128] refactor staged stream sync, fix the state sync functions --- api/service/stagedstreamsync/block_manager.go | 48 +++-- api/service/stagedstreamsync/const.go | 4 + .../stagedstreamsync/default_stages.go | 3 + .../receipt_download_manager.go | 45 +++-- api/service/stagedstreamsync/stage_bodies.go | 9 +- api/service/stagedstreamsync/stage_heads.go | 11 +- .../stagedstreamsync/stage_receipts.go | 155 ++++++++++------ api/service/stagedstreamsync/stage_state.go | 24 +-- .../stagedstreamsync/stage_statesync.go | 82 ++++++--- .../stagedstreamsync/staged_stream_sync.go | 16 +- .../state_download_manager.go | 170 ++++++++++++++---- api/service/stagedstreamsync/syncing.go | 144 ++++++++++++++- api/service/stagedstreamsync/types.go | 7 +- 13 files changed, 533 insertions(+), 185 deletions(-) diff --git a/api/service/stagedstreamsync/block_manager.go b/api/service/stagedstreamsync/block_manager.go index 28c966b4d6..273078c591 100644 --- a/api/service/stagedstreamsync/block_manager.go +++ b/api/service/stagedstreamsync/block_manager.go @@ -3,6 +3,7 @@ package stagedstreamsync import ( "sync" + "github.com/ethereum/go-ethereum/common" sttypes "github.com/harmony-one/harmony/p2p/stream/types" "github.com/ledgerwatch/erigon-lib/kv" "github.com/rs/zerolog" @@ -11,6 +12,7 @@ import ( type BlockDownloadDetails struct { loopID int streamID sttypes.StreamID + rootHash common.Hash } // blockDownloadManager is the helper structure for get blocks request management @@ -19,11 +21,11 @@ type blockDownloadManager struct { tx kv.RwTx targetBN uint64 - requesting map[uint64]struct{} // block numbers that have been assigned to workers but not received - processing map[uint64]struct{} // block numbers received requests but not inserted - retries *prioritizedNumbers // requests where error happens - rq *resultQueue // result queue wait to be inserted into blockchain - bdd map[uint64]BlockDownloadDetails // details about how this block was downloaded + requesting map[uint64]struct{} // block numbers that have been assigned to workers but not received + processing map[uint64]struct{} // block numbers received requests but not inserted + retries *prioritizedNumbers // requests where error happens + rq *resultQueue // result queue wait to be inserted into blockchain + bdd map[uint64]*BlockDownloadDetails // details about how this block was downloaded logger zerolog.Logger lock sync.Mutex @@ -38,26 +40,26 @@ func newBlockDownloadManager(tx kv.RwTx, chain blockChain, targetBN uint64, logg processing: make(map[uint64]struct{}), retries: newPrioritizedNumbers(), rq: newResultQueue(), - bdd: make(map[uint64]BlockDownloadDetails), + bdd: make(map[uint64]*BlockDownloadDetails), logger: logger, } } // GetNextBatch get the next block numbers batch -func (gbm *blockDownloadManager) GetNextBatch() []uint64 { +func (gbm *blockDownloadManager) GetNextBatch(curHeight uint64) []uint64 { gbm.lock.Lock() defer gbm.lock.Unlock() cap := BlocksPerRequest - bns := gbm.getBatchFromRetries(cap) + bns := gbm.getBatchFromRetries(cap, curHeight) if len(bns) > 0 { cap -= len(bns) gbm.addBatchToRequesting(bns) } if gbm.availableForMoreTasks() { - addBNs := gbm.getBatchFromUnprocessed(cap) + addBNs := gbm.getBatchFromUnprocessed(cap, curHeight) gbm.addBatchToRequesting(addBNs) bns = append(bns, addBNs...) } @@ -88,7 +90,7 @@ func (gbm *blockDownloadManager) HandleRequestResult(bns []uint64, blockBytes [] gbm.retries.push(bn) } else { gbm.processing[bn] = struct{}{} - gbm.bdd[bn] = BlockDownloadDetails{ + gbm.bdd[bn] = &BlockDownloadDetails{ loopID: loopID, streamID: streamID, } @@ -107,7 +109,7 @@ func (gbm *blockDownloadManager) SetDownloadDetails(bns []uint64, loopID int, st defer gbm.lock.Unlock() for _, bn := range bns { - gbm.bdd[bn] = BlockDownloadDetails{ + gbm.bdd[bn] = &BlockDownloadDetails{ loopID: loopID, streamID: streamID, } @@ -123,18 +125,33 @@ func (gbm *blockDownloadManager) GetDownloadDetails(blockNumber uint64) (loopID return gbm.bdd[blockNumber].loopID, gbm.bdd[blockNumber].streamID } +// SetRootHash sets the root hash for a specific block +func (gbm *blockDownloadManager) SetRootHash(blockNumber uint64, root common.Hash) { + gbm.lock.Lock() + defer gbm.lock.Unlock() + + gbm.bdd[blockNumber].rootHash = root +} + +// GetRootHash returns the root hash for a specific block +func (gbm *blockDownloadManager) GetRootHash(blockNumber uint64) common.Hash { + gbm.lock.Lock() + defer gbm.lock.Unlock() + + return gbm.bdd[blockNumber].rootHash +} + // getBatchFromRetries get the block number batch to be requested from retries. -func (gbm *blockDownloadManager) getBatchFromRetries(cap int) []uint64 { +func (gbm *blockDownloadManager) getBatchFromRetries(cap int, fromBlockNumber uint64) []uint64 { var ( requestBNs []uint64 - curHeight = gbm.chain.CurrentBlock().NumberU64() ) for cnt := 0; cnt < cap; cnt++ { bn := gbm.retries.pop() if bn == 0 { break // no more retries } - if bn <= curHeight { + if bn <= fromBlockNumber { continue } requestBNs = append(requestBNs, bn) @@ -143,10 +160,9 @@ func (gbm *blockDownloadManager) getBatchFromRetries(cap int) []uint64 { } // getBatchFromUnprocessed returns a batch of block numbers to be requested from unprocessed. -func (gbm *blockDownloadManager) getBatchFromUnprocessed(cap int) []uint64 { +func (gbm *blockDownloadManager) getBatchFromUnprocessed(cap int, curHeight uint64) []uint64 { var ( requestBNs []uint64 - curHeight = gbm.chain.CurrentBlock().NumberU64() ) bn := curHeight + 1 // TODO: this algorithm can be potentially optimized. diff --git a/api/service/stagedstreamsync/const.go b/api/service/stagedstreamsync/const.go index c87932bf73..e172854ec9 100644 --- a/api/service/stagedstreamsync/const.go +++ b/api/service/stagedstreamsync/const.go @@ -38,6 +38,10 @@ const ( // ShortRangeTimeout is the timeout for each short range sync, which allow short range sync // to restart automatically when stuck in `getBlockHashes` ShortRangeTimeout time.Duration = 1 * time.Minute + + // pivot block distance ranges + MinPivotDistanceToHead uint64 = 1028 + MaxPivotDistanceToHead uint64 = 2048 ) // SyncMode represents the synchronization mode of the downloader. diff --git a/api/service/stagedstreamsync/default_stages.go b/api/service/stagedstreamsync/default_stages.go index 60e9f49620..f869ee5feb 100644 --- a/api/service/stagedstreamsync/default_stages.go +++ b/api/service/stagedstreamsync/default_stages.go @@ -65,6 +65,7 @@ func initFastSyncStagesOrder() { BlockBodies, Receipts, StateSync, + States, LastMile, Finish, } @@ -72,6 +73,7 @@ func initFastSyncStagesOrder() { StagesRevertOrder = RevertOrder{ Finish, LastMile, + States, StateSync, Receipts, BlockBodies, @@ -83,6 +85,7 @@ func initFastSyncStagesOrder() { StagesCleanUpOrder = CleanUpOrder{ Finish, LastMile, + States, StateSync, Receipts, BlockBodies, diff --git a/api/service/stagedstreamsync/receipt_download_manager.go b/api/service/stagedstreamsync/receipt_download_manager.go index 2bab10ade9..55d9490828 100644 --- a/api/service/stagedstreamsync/receipt_download_manager.go +++ b/api/service/stagedstreamsync/receipt_download_manager.go @@ -10,10 +10,15 @@ import ( ) type ReceiptDownloadDetails struct { - loopID int streamID sttypes.StreamID } +type Received struct { + streamID sttypes.StreamID + block *types.Block + receipts types.Receipts +} + // receiptDownloadManager is the helper structure for get receipts request management type receiptDownloadManager struct { chain blockChain @@ -25,6 +30,8 @@ type receiptDownloadManager struct { retries *prioritizedNumbers // requests where error happens rdd map[uint64]ReceiptDownloadDetails // details about how this receipt was downloaded + received map[uint64]Received + logger zerolog.Logger lock sync.Mutex } @@ -38,25 +45,27 @@ func newReceiptDownloadManager(tx kv.RwTx, chain blockChain, targetBN uint64, lo processing: make(map[uint64]struct{}), retries: newPrioritizedNumbers(), rdd: make(map[uint64]ReceiptDownloadDetails), - logger: logger, + received: make(map[uint64]Received), + + logger: logger, } } // GetNextBatch get the next receipt numbers batch -func (rdm *receiptDownloadManager) GetNextBatch() []uint64 { +func (rdm *receiptDownloadManager) GetNextBatch(curHeight uint64) []uint64 { rdm.lock.Lock() defer rdm.lock.Unlock() cap := ReceiptsPerRequest - bns := rdm.getBatchFromRetries(cap) + bns := rdm.getBatchFromRetries(cap, curHeight) if len(bns) > 0 { cap -= len(bns) rdm.addBatchToRequesting(bns) } if rdm.availableForMoreTasks() { - addBNs := rdm.getBatchFromUnprocessed(cap) + addBNs := rdm.getBatchFromUnprocessed(cap, curHeight) rdm.addBatchToRequesting(addBNs) bns = append(bns, addBNs...) } @@ -65,7 +74,7 @@ func (rdm *receiptDownloadManager) GetNextBatch() []uint64 { } // HandleRequestError handles the error result -func (rdm *receiptDownloadManager) HandleRequestError(bns []uint64, err error, streamID sttypes.StreamID) { +func (rdm *receiptDownloadManager) HandleRequestError(bns []uint64, err error) { rdm.lock.Lock() defer rdm.lock.Unlock() @@ -77,33 +86,35 @@ func (rdm *receiptDownloadManager) HandleRequestError(bns []uint64, err error, s } // HandleRequestResult handles get receipts result -func (rdm *receiptDownloadManager) HandleRequestResult(bns []uint64, receipts []types.Receipts, loopID int, streamID sttypes.StreamID) error { +func (rdm *receiptDownloadManager) HandleRequestResult(bns []uint64, receivedReceipts []types.Receipts, receivedBlocks []*types.Block, streamID sttypes.StreamID) error { rdm.lock.Lock() defer rdm.lock.Unlock() for i, bn := range bns { delete(rdm.requesting, bn) - if indexExists(receipts, i) { + if !indexExists(receivedBlocks, i) || !indexExists(receivedReceipts, i) { rdm.retries.push(bn) } else { rdm.processing[bn] = struct{}{} rdm.rdd[bn] = ReceiptDownloadDetails{ - loopID: loopID, streamID: streamID, } + rdm.received[bn] = Received{ + block: receivedBlocks[i], + receipts: receivedReceipts[i], + } } } return nil } // SetDownloadDetails sets the download details for a batch of blocks -func (rdm *receiptDownloadManager) SetDownloadDetails(bns []uint64, loopID int, streamID sttypes.StreamID) error { +func (rdm *receiptDownloadManager) SetDownloadDetails(bns []uint64, streamID sttypes.StreamID) error { rdm.lock.Lock() defer rdm.lock.Unlock() for _, bn := range bns { rdm.rdd[bn] = ReceiptDownloadDetails{ - loopID: loopID, streamID: streamID, } } @@ -111,25 +122,24 @@ func (rdm *receiptDownloadManager) SetDownloadDetails(bns []uint64, loopID int, } // GetDownloadDetails returns the download details for a certain block number -func (rdm *receiptDownloadManager) GetDownloadDetails(blockNumber uint64) (loopID int, streamID sttypes.StreamID) { +func (rdm *receiptDownloadManager) GetDownloadDetails(blockNumber uint64) (streamID sttypes.StreamID) { rdm.lock.Lock() defer rdm.lock.Unlock() - return rdm.rdd[blockNumber].loopID, rdm.rdd[blockNumber].streamID + return rdm.rdd[blockNumber].streamID } // getBatchFromRetries get the receipt number batch to be requested from retries. -func (rdm *receiptDownloadManager) getBatchFromRetries(cap int) []uint64 { +func (rdm *receiptDownloadManager) getBatchFromRetries(cap int, fromBlockNumber uint64) []uint64 { var ( requestBNs []uint64 - curHeight = rdm.chain.CurrentBlock().NumberU64() ) for cnt := 0; cnt < cap; cnt++ { bn := rdm.retries.pop() if bn == 0 { break // no more retries } - if bn <= curHeight { + if bn <= fromBlockNumber { continue } requestBNs = append(requestBNs, bn) @@ -138,10 +148,9 @@ func (rdm *receiptDownloadManager) getBatchFromRetries(cap int) []uint64 { } // getBatchFromUnprocessed returns a batch of receipt numbers to be requested from unprocessed. -func (rdm *receiptDownloadManager) getBatchFromUnprocessed(cap int) []uint64 { +func (rdm *receiptDownloadManager) getBatchFromUnprocessed(cap int, curHeight uint64) []uint64 { var ( requestBNs []uint64 - curHeight = rdm.chain.CurrentBlock().NumberU64() ) bn := curHeight + 1 // TODO: this algorithm can be potentially optimized. diff --git a/api/service/stagedstreamsync/stage_bodies.go b/api/service/stagedstreamsync/stage_bodies.go index 8cce5207ca..9fdf4681a1 100644 --- a/api/service/stagedstreamsync/stage_bodies.go +++ b/api/service/stagedstreamsync/stage_bodies.go @@ -70,7 +70,7 @@ func (b *StageBodies) Exec(ctx context.Context, firstCycle bool, invalidBlockRev } maxHeight := s.state.status.targetBN - currentHead := b.configs.bc.CurrentBlock().NumberU64() + currentHead := s.state.CurrentBlockNumber() if currentHead >= maxHeight { return nil } @@ -138,7 +138,7 @@ func (b *StageBodies) Exec(ctx context.Context, firstCycle bool, invalidBlockRev // runBlockWorkerLoop creates a work loop for download blocks func (b *StageBodies) runBlockWorkerLoop(ctx context.Context, gbm *blockDownloadManager, wg *sync.WaitGroup, loopID int, s *StageState, startTime time.Time) { - currentBlock := int(b.configs.bc.CurrentBlock().NumberU64()) + currentBlock := int(s.state.CurrentBlockNumber()) defer wg.Done() @@ -148,7 +148,8 @@ func (b *StageBodies) runBlockWorkerLoop(ctx context.Context, gbm *blockDownload return default: } - batch := gbm.GetNextBatch() + curHeight := s.state.CurrentBlockNumber() + batch := gbm.GetNextBatch(curHeight) if len(batch) == 0 { select { case <-ctx.Done(): @@ -434,7 +435,7 @@ func (b *StageBodies) Revert(ctx context.Context, firstCycle bool, u *RevertStat defer tx.Rollback() } // save progress - currentHead := b.configs.bc.CurrentBlock().NumberU64() + currentHead := s.state.CurrentBlockNumber() if err = s.Update(tx, currentHead); err != nil { utils.Logger().Error(). Err(err). diff --git a/api/service/stagedstreamsync/stage_heads.go b/api/service/stagedstreamsync/stage_heads.go index c917884a36..99e0248bae 100644 --- a/api/service/stagedstreamsync/stage_heads.go +++ b/api/service/stagedstreamsync/stage_heads.go @@ -53,7 +53,7 @@ func (heads *StageHeads) Exec(ctx context.Context, firstCycle bool, invalidBlock maxHeight := s.state.status.targetBN maxBlocksPerSyncCycle := uint64(1024) // TODO: should be in config -> s.state.MaxBlocksPerSyncCycle - currentHeight := heads.configs.bc.CurrentBlock().NumberU64() + currentHeight := s.state.CurrentBlockNumber() s.state.currentCycle.TargetHeight = maxHeight targetHeight := uint64(0) if errV := CreateView(ctx, heads.configs.db, tx, func(etx kv.Tx) (err error) { @@ -89,6 +89,15 @@ func (heads *StageHeads) Exec(ctx context.Context, firstCycle bool, invalidBlock targetHeight = currentHeight + maxBlocksPerSyncCycle } + // check pivot: if chain hasn't reached to pivot yet + if s.state.status.pivotBlock != nil && s.state.CurrentBlockNumber() < s.state.status.pivotBlock.NumberU64() { + // set target height on the block before pivot + // pivot block would be downloaded by StateSync stage + if targetHeight >= s.state.status.pivotBlock.NumberU64() { + targetHeight = s.state.status.pivotBlock.NumberU64() - 1 + } + } + s.state.currentCycle.TargetHeight = targetHeight if err := s.Update(tx, targetHeight); err != nil { diff --git a/api/service/stagedstreamsync/stage_receipts.go b/api/service/stagedstreamsync/stage_receipts.go index fb0af99bc0..0a2d8ab024 100644 --- a/api/service/stagedstreamsync/stage_receipts.go +++ b/api/service/stagedstreamsync/stage_receipts.go @@ -51,6 +51,11 @@ func NewStageReceiptsCfg(bc core.BlockChain, db kv.RwDB, blockDBs []kv.RwDB, con // Exec progresses receipts stage in the forward direction func (r *StageReceipts) Exec(ctx context.Context, firstCycle bool, invalidBlockRevert bool, s *StageState, reverter Reverter, tx kv.RwTx) (err error) { + // only execute this stage in fast/snap sync mode + if s.state.status.pivotBlock == nil || s.state.CurrentBlockNumber() >= s.state.status.pivotBlock.NumberU64() { + return nil + } + useInternalTx := tx == nil if invalidBlockRevert { @@ -63,7 +68,7 @@ func (r *StageReceipts) Exec(ctx context.Context, firstCycle bool, invalidBlockR } maxHeight := s.state.status.targetBN - currentHead := r.configs.bc.CurrentBlock().NumberU64() + currentHead := s.state.CurrentBlockNumber() if currentHead >= maxHeight { return nil } @@ -91,21 +96,6 @@ func (r *StageReceipts) Exec(ctx context.Context, firstCycle bool, invalidBlockR startTime := time.Now() // startBlock := currProgress - // prepare db transactions - txs := make([]kv.RwTx, r.configs.concurrency) - for i := 0; i < r.configs.concurrency; i++ { - txs[i], err = r.configs.blockDBs[i].BeginRw(ctx) - if err != nil { - return err - } - } - - defer func() { - for i := 0; i < r.configs.concurrency; i++ { - txs[i].Rollback() - } - }() - if r.configs.logProgress { fmt.Print("\033[s") // save the cursor position } @@ -119,19 +109,53 @@ func (r *StageReceipts) Exec(ctx context.Context, firstCycle bool, invalidBlockR defer tx.Rollback() } - // Fetch blocks from neighbors - s.state.rdm = newReceiptDownloadManager(tx, r.configs.bc, targetHeight, s.state.logger) + for { + // check if there is no any more to download break the loop + curBn := s.state.CurrentBlockNumber() + if curBn == targetHeight { + break + } - // Setup workers to fetch blocks from remote node - var wg sync.WaitGroup + // calculate the block numbers range to download + toBn := curBn + uint64(ReceiptsPerRequest*s.state.config.Concurrency) + if toBn > targetHeight { + toBn = targetHeight + } - for i := 0; i != s.state.config.Concurrency; i++ { - wg.Add(1) - go r.runReceiptWorkerLoop(ctx, s.state.rdm, &wg, i, s, txs, startTime) + // Fetch receipts from connected peers + rdm := newReceiptDownloadManager(tx, r.configs.bc, toBn, s.state.logger) + + // Setup workers to fetch blocks from remote node + var wg sync.WaitGroup + + for i := 0; i < s.state.config.Concurrency; i++ { + wg.Add(1) + go func() { + // prepare db transactions + txs := make([]kv.RwTx, r.configs.concurrency) + for i := 0; i < r.configs.concurrency; i++ { + txs[i], err = r.configs.blockDBs[i].BeginRw(ctx) + if err != nil { + return + } + } + // rollback the transactions after worker loop + defer func() { + for i := 0; i < r.configs.concurrency; i++ { + txs[i].Rollback() + } + }() + + r.runReceiptWorkerLoop(ctx, rdm, &wg, s, txs, startTime) + }() + } + wg.Wait() + // insert all downloaded blocks and receipts to chain + if err := r.insertBlocksAndReceipts(ctx, rdm, toBn, s); err != nil { + utils.Logger().Err(err).Msg(WrapStagedSyncMsg("InsertReceiptChain failed")) + } } - wg.Wait() - if useInternalTx { if err := tx.Commit(); err != nil { return err @@ -141,10 +165,52 @@ func (r *StageReceipts) Exec(ctx context.Context, firstCycle bool, invalidBlockR return nil } +func (r *StageReceipts) insertBlocksAndReceipts(ctx context.Context, rdm *receiptDownloadManager, toBn uint64, s *StageState) error { + if len(rdm.received) == 0 { + return nil + } + var ( + bns []uint64 + blocks []*types.Block + receipts []types.Receipts + streamIDs []sttypes.StreamID + ) + // populate blocks and receipts in separate array + // this way helps to sort blocks and receipts by block number + for bn := s.state.CurrentBlockNumber() + 1; bn <= toBn; bn++ { + if received, ok := rdm.received[bn]; !ok { + return errors.New("some blocks are missing") + } else { + bns = append(bns, bn) + blocks = append(blocks, received.block) + receipts = append(receipts, received.receipts) + streamIDs = append(streamIDs, received.streamID) + } + } + // insert sorted blocks and receipts to chain + if inserted, err := r.configs.bc.InsertReceiptChain(blocks, receipts); err != nil { + utils.Logger().Err(err). + Interface("streams", streamIDs). + Interface("block numbers", bns). + Msg(WrapStagedSyncMsg("InsertReceiptChain failed")) + rdm.HandleRequestError(bns, err) + return fmt.Errorf("InsertReceiptChain failed: %s", err.Error()) + } else { + if inserted != len(blocks) { + utils.Logger().Warn(). + Interface("block numbers", bns). + Int("inserted", inserted). + Int("blocks to insert", len(blocks)). + Msg(WrapStagedSyncMsg("InsertReceiptChain couldn't insert all downloaded blocks/receipts")) + } + } + return nil +} + // runReceiptWorkerLoop creates a work loop for download receipts -func (r *StageReceipts) runReceiptWorkerLoop(ctx context.Context, rdm *receiptDownloadManager, wg *sync.WaitGroup, loopID int, s *StageState, txs []kv.RwTx, startTime time.Time) { +func (r *StageReceipts) runReceiptWorkerLoop(ctx context.Context, rdm *receiptDownloadManager, wg *sync.WaitGroup, s *StageState, txs []kv.RwTx, startTime time.Time) { - currentBlock := int(r.configs.bc.CurrentBlock().NumberU64()) + currentBlock := int(s.state.CurrentBlockNumber()) gbm := s.state.gbm defer wg.Done() @@ -156,7 +222,8 @@ func (r *StageReceipts) runReceiptWorkerLoop(ctx context.Context, rdm *receiptDo default: } // get next batch of block numbers - batch := rdm.GetNextBatch() + curHeight := s.state.CurrentBlockNumber() + batch := rdm.GetNextBatch(curHeight) if len(batch) == 0 { select { case <-ctx.Done(): @@ -168,6 +235,7 @@ func (r *StageReceipts) runReceiptWorkerLoop(ctx context.Context, rdm *receiptDo // retrieve corresponding blocks from cache db var hashes []common.Hash var blocks []*types.Block + for _, bn := range batch { blkKey := marshalData(bn) loopID, _ := gbm.GetDownloadDetails(bn) @@ -197,7 +265,8 @@ func (r *StageReceipts) runReceiptWorkerLoop(ctx context.Context, rdm *receiptDo return } // receiptHash := s.state.currentCycle.ReceiptHashes[bn] - hashes = append(hashes, block.Header().ReceiptHash()) + gbm.SetRootHash(bn, block.Header().Root()) + hashes = append(hashes, block.Header().Hash()) blocks = append(blocks, block) } @@ -213,34 +282,10 @@ func (r *StageReceipts) runReceiptWorkerLoop(ctx context.Context, rdm *receiptDo Interface("block numbers", batch). Msg(WrapStagedSyncMsg("downloadRawBlocks failed")) err = errors.Wrap(err, "request error") - rdm.HandleRequestError(batch, err, stid) - } else if receipts == nil || len(receipts) == 0 { - utils.Logger().Warn(). - Str("stream", string(stid)). - Interface("block numbers", batch). - Msg(WrapStagedSyncMsg("downloadRawBlocks failed, received empty reciptBytes")) - err := errors.New("downloadRawBlocks received empty reciptBytes") - rdm.HandleRequestError(batch, err, stid) + rdm.HandleRequestError(batch, err) } else { - // insert block and receipts to chain - if inserted, err := r.configs.bc.InsertReceiptChain(blocks, receipts); err != nil { - utils.Logger().Err(err). - Str("stream", string(stid)). - Interface("block numbers", batch). - Msg(WrapStagedSyncMsg("InsertReceiptChain failed")) - err := errors.New("InsertReceiptChain failed") - rdm.HandleRequestError(batch, err, stid) - } else { - if inserted != len(blocks) { - utils.Logger().Warn(). - Interface("block numbers", batch). - Int("inserted", inserted). - Int("blocks to insert", len(blocks)). - Msg(WrapStagedSyncMsg("InsertReceiptChain couldn't insert all downloaded blocks/receipts")) - } - } // handle request result - rdm.HandleRequestResult(batch, receipts, loopID, stid) + rdm.HandleRequestResult(batch, receipts, blocks, stid) // log progress if r.configs.logProgress { //calculating block download speed diff --git a/api/service/stagedstreamsync/stage_state.go b/api/service/stagedstreamsync/stage_state.go index 1294edf703..6c82a69c11 100644 --- a/api/service/stagedstreamsync/stage_state.go +++ b/api/service/stagedstreamsync/stage_state.go @@ -19,12 +19,12 @@ type StageStates struct { configs StageStatesCfg } type StageStatesCfg struct { - bc core.BlockChain - db kv.RwDB - blockDBs []kv.RwDB - concurrency int - logger zerolog.Logger - logProgress bool + bc core.BlockChain + db kv.RwDB + blockDBs []kv.RwDB + concurrency int + logger zerolog.Logger + logProgress bool } func NewStageStates(cfg StageStatesCfg) *StageStates { @@ -42,12 +42,12 @@ func NewStageStatesCfg( logProgress bool) StageStatesCfg { return StageStatesCfg{ - bc: bc, - db: db, - blockDBs: blockDBs, - concurrency: concurrency, - logger: logger, - logProgress: logProgress, + bc: bc, + db: db, + blockDBs: blockDBs, + concurrency: concurrency, + logger: logger, + logProgress: logProgress, } } diff --git a/api/service/stagedstreamsync/stage_statesync.go b/api/service/stagedstreamsync/stage_statesync.go index 75326b6ace..654171df4e 100644 --- a/api/service/stagedstreamsync/stage_statesync.go +++ b/api/service/stagedstreamsync/stage_statesync.go @@ -55,17 +55,22 @@ func NewStageStateSyncCfg(bc core.BlockChain, // Exec progresses States stage in the forward direction func (sss *StageStateSync) Exec(ctx context.Context, bool, invalidBlockRevert bool, s *StageState, reverter Reverter, tx kv.RwTx) (err error) { + // only execute this stage in fast/snap sync mode and once we reach to pivot + if s.state.status.pivotBlock == nil || s.state.CurrentBlockNumber() != s.state.status.pivotBlock.NumberU64()-1 { + return nil + } + // for short range sync, skip this step if !s.state.initSync { return nil } maxHeight := s.state.status.targetBN - currentHead := sss.configs.bc.CurrentBlock().NumberU64() + currentHead := s.state.CurrentBlockNumber() if currentHead >= maxHeight { return nil } - currProgress := sss.configs.bc.CurrentBlock().NumberU64() + currProgress := s.state.CurrentBlockNumber() targetHeight := s.state.currentCycle.TargetHeight if errV := CreateView(ctx, sss.configs.db, tx, func(etx kv.Tx) error { @@ -97,20 +102,38 @@ func (sss *StageStateSync) Exec(ctx context.Context, bool, invalidBlockRevert bo fmt.Print("\033[s") // save the cursor position } - // Fetch blocks from neighbors - root := sss.configs.bc.CurrentBlock().Root() - sdm := newStateDownloadManager(tx, sss.configs.bc, root, sss.configs.concurrency, s.state.logger) - - // Setup workers to fetch blocks from remote node + // Fetch states from neighbors + pivotRootHash := s.state.status.pivotBlock.Root() + sdm := newStateDownloadManager(tx, sss.configs.bc, sss.configs.concurrency, s.state.logger) + sdm.setRootHash(pivotRootHash) var wg sync.WaitGroup - - for i := 0; i != s.state.config.Concurrency; i++ { + for i := 0; i < s.state.config.Concurrency; i++ { wg.Add(1) - go sss.runStateWorkerLoop(ctx, sdm, &wg, i, startTime) + go sss.runStateWorkerLoop(ctx, sdm, &wg, i, startTime, s) } - wg.Wait() + /* + gbm := s.state.gbm + + // Setup workers to fetch states from remote node + var wg sync.WaitGroup + curHeight := s.state.CurrentBlockNumber() + + for bn := curHeight + 1; bn <= gbm.targetBN; bn++ { + root := gbm.GetRootHash(bn) + if root == emptyHash { + continue + } + sdm.setRootHash(root) + for i := 0; i < s.state.config.Concurrency; i++ { + wg.Add(1) + go sss.runStateWorkerLoop(ctx, sdm, &wg, i, startTime, s) + } + wg.Wait() + } + */ + if useInternalTx { if err := tx.Commit(); err != nil { return err @@ -121,7 +144,8 @@ func (sss *StageStateSync) Exec(ctx context.Context, bool, invalidBlockRevert bo } // runStateWorkerLoop creates a work loop for download states -func (sss *StageStateSync) runStateWorkerLoop(ctx context.Context, sdm *StateDownloadManager, wg *sync.WaitGroup, loopID int, startTime time.Time) { +func (sss *StageStateSync) runStateWorkerLoop(ctx context.Context, sdm *StateDownloadManager, wg *sync.WaitGroup, loopID int, startTime time.Time, s *StageState) { + defer wg.Done() for { @@ -130,8 +154,8 @@ func (sss *StageStateSync) runStateWorkerLoop(ctx context.Context, sdm *StateDow return default: } - nodes, paths, codes := sdm.GetNextBatch() - if len(nodes)+len(codes) == 0 { + nodes, paths, codes, err := sdm.GetNextBatch() + if len(nodes)+len(codes) == 0 || err != nil { select { case <-ctx.Done(): return @@ -139,10 +163,9 @@ func (sss *StageStateSync) runStateWorkerLoop(ctx context.Context, sdm *StateDow return } } - data, stid, err := sss.downloadStates(ctx, nodes, codes) if err != nil { - if !errors.Is(err, context.Canceled) { + if !errors.Is(err, context.Canceled) && !errors.Is(err, context.DeadlineExceeded) { sss.configs.protocol.StreamFailed(stid, "downloadStates failed") } utils.Logger().Error(). @@ -157,19 +180,20 @@ func (sss *StageStateSync) runStateWorkerLoop(ctx context.Context, sdm *StateDow Msg(WrapStagedSyncMsg("downloadStates failed, received empty data bytes")) err := errors.New("downloadStates received empty data bytes") sdm.HandleRequestError(codes, paths, stid, err) - } - sdm.HandleRequestResult(nodes, paths, data, loopID, stid) - if sss.configs.logProgress { - //calculating block download speed - dt := time.Now().Sub(startTime).Seconds() - speed := float64(0) - if dt > 0 { - speed = float64(len(data)) / dt + } else { + sdm.HandleRequestResult(nodes, paths, data, loopID, stid) + if sss.configs.logProgress { + //calculating block download speed + dt := time.Now().Sub(startTime).Seconds() + speed := float64(0) + if dt > 0 { + speed = float64(len(data)) / dt + } + stateDownloadSpeed := fmt.Sprintf("%.2f", speed) + + fmt.Print("\033[u\033[K") // restore the cursor position and clear the line + fmt.Println("state download speed:", stateDownloadSpeed, "states/s") } - stateDownloadSpeed := fmt.Sprintf("%.2f", speed) - - fmt.Print("\033[u\033[K") // restore the cursor position and clear the line - fmt.Println("state download speed:", stateDownloadSpeed, "states/s") } } } @@ -216,7 +240,7 @@ func (stg *StageStateSync) saveProgress(s *StageState, tx kv.RwTx) (err error) { } // save progress - if err = s.Update(tx, stg.configs.bc.CurrentBlock().NumberU64()); err != nil { + if err = s.Update(tx, s.state.CurrentBlockNumber()); err != nil { utils.Logger().Error(). Err(err). Msgf("[STAGED_SYNC] saving progress for block States stage failed") diff --git a/api/service/stagedstreamsync/staged_stream_sync.go b/api/service/stagedstreamsync/staged_stream_sync.go index 7d94126b0c..a4e04dff37 100644 --- a/api/service/stagedstreamsync/staged_stream_sync.go +++ b/api/service/stagedstreamsync/staged_stream_sync.go @@ -67,8 +67,7 @@ type StagedStreamSync struct { protocol syncProtocol isBeaconNode bool gbm *blockDownloadManager // initialized when finished get block number - rdm *receiptDownloadManager - lastMileBlocks []*types.Block // last mile blocks to catch up with the consensus + lastMileBlocks []*types.Block // last mile blocks to catch up with the consensus lastMileMux sync.Mutex inserted int config Config @@ -338,6 +337,18 @@ func (s *StagedStreamSync) doGetCurrentNumberRequest(ctx context.Context) (uint6 return bn, stid, nil } +// doGetBlockByNumberRequest returns block by its number and corresponding stream +func (s *StagedStreamSync) doGetBlockByNumberRequest(ctx context.Context, bn uint64) (*types.Block, sttypes.StreamID, error) { + ctx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + + blocks, stid, err := s.protocol.GetBlocksByNumber(ctx, []uint64{bn}, syncproto.WithHighPriority()) + if err != nil || len(blocks) != 1 { + return nil, stid, err + } + return blocks[0], stid, nil +} + // promLabels returns a prometheus labels for current shard id func (s *StagedStreamSync) promLabels() prometheus.Labels { sid := s.bc.ShardID() @@ -483,7 +494,6 @@ func (s *StagedStreamSync) runStage(ctx context.Context, stage *Stage, db kv.RwD if err != nil { return err } - if err = stage.Handler.Exec(ctx, firstCycle, invalidBlockRevert, stageState, s, tx); err != nil { utils.Logger().Error(). Err(err). diff --git a/api/service/stagedstreamsync/state_download_manager.go b/api/service/stagedstreamsync/state_download_manager.go index 80a7583881..51eccb8ec7 100644 --- a/api/service/stagedstreamsync/state_download_manager.go +++ b/api/service/stagedstreamsync/state_download_manager.go @@ -3,13 +3,16 @@ package stagedstreamsync import ( "fmt" "sync" + "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/trie" "github.com/harmony-one/harmony/core" "github.com/harmony-one/harmony/core/rawdb" "github.com/harmony-one/harmony/core/state" + "github.com/harmony-one/harmony/internal/utils" sttypes "github.com/harmony-one/harmony/p2p/stream/types" "github.com/ledgerwatch/erigon-lib/kv" "github.com/rs/zerolog" @@ -22,6 +25,26 @@ type codeTask struct { attempts map[sttypes.StreamID]int } +// trieTask represents a single trie node download task, containing a set of +// peers already attempted retrieval from to detect stalled syncs and abort. +type trieTask struct { + hash common.Hash + path [][]byte + attempts map[sttypes.StreamID]int +} + +type task struct { + trieTasks map[string]*trieTask // Set of trie node tasks currently queued for retrieval, indexed by path + codeTasks map[common.Hash]*codeTask // Set of byte code tasks currently queued for retrieval, indexed by hash +} + +func newTask() *task { + return &task{ + trieTasks: make(map[string]*trieTask), + codeTasks: make(map[common.Hash]*codeTask), + } +} + func (t *task) addCodeTask(h common.Hash, ct *codeTask) { t.codeTasks[h] = &codeTask{ attempts: ct.attempts, @@ -29,7 +52,10 @@ func (t *task) addCodeTask(h common.Hash, ct *codeTask) { } func (t *task) getCodeTask(h common.Hash) *codeTask { - return t.codeTasks[h] + if task, ok := t.codeTasks[h]; ok { + return task + } + return nil } func (t *task) addNewCodeTask(h common.Hash) { @@ -39,15 +65,17 @@ func (t *task) addNewCodeTask(h common.Hash) { } func (t *task) deleteCodeTask(hash common.Hash) { - delete(t.codeTasks, hash) + if _, ok := t.codeTasks[hash]; ok { + delete(t.codeTasks, hash) + } } -// trieTask represents a single trie node download task, containing a set of -// peers already attempted retrieval from to detect stalled syncs and abort. -type trieTask struct { - hash common.Hash - path [][]byte - attempts map[sttypes.StreamID]int +func (t *task) deleteCodeTaskAttempts(h common.Hash, stID sttypes.StreamID) { + if task, ok := t.codeTasks[h]; ok { + if _, ok := task.attempts[stID]; ok { + delete(t.codeTasks[h].attempts, stID) + } + } } func (t *task) addTrieTask(path string, tt *trieTask) { @@ -59,7 +87,10 @@ func (t *task) addTrieTask(path string, tt *trieTask) { } func (t *task) getTrieTask(path string) *trieTask { - return t.trieTasks[path] + if task, ok := t.trieTasks[path]; ok { + return task + } + return nil } func (t *task) addNewTrieTask(hash common.Hash, path string) { @@ -71,18 +102,16 @@ func (t *task) addNewTrieTask(hash common.Hash, path string) { } func (t *task) deleteTrieTask(path string) { - delete(t.trieTasks, path) -} - -type task struct { - trieTasks map[string]*trieTask // Set of trie node tasks currently queued for retrieval, indexed by path - codeTasks map[common.Hash]*codeTask // Set of byte code tasks currently queued for retrieval, indexed by hash + if _, ok := t.trieTasks[path]; ok { + delete(t.trieTasks, path) + } } -func newTask() *task { - return &task{ - trieTasks: make(map[string]*trieTask), - codeTasks: make(map[common.Hash]*codeTask), +func (t *task) deleteTrieTaskAttempts(path string, stID sttypes.StreamID) { + if task, ok := t.trieTasks[path]; ok { + if _, ok := task.attempts[stID]; ok { + delete(t.trieTasks[path].attempts, stID) + } } } @@ -99,6 +128,9 @@ type StateDownloadManager struct { logger zerolog.Logger lock sync.Mutex + numUncommitted int + bytesUncommitted int + tasks *task requesting *task processing *task @@ -107,15 +139,12 @@ type StateDownloadManager struct { func newStateDownloadManager(tx kv.RwTx, bc core.BlockChain, - root common.Hash, concurrency int, logger zerolog.Logger) *StateDownloadManager { return &StateDownloadManager{ bc: bc, tx: tx, - root: root, - sched: state.NewStateSync(root, bc.ChainDb(), nil, rawdb.HashScheme), keccak: sha3.NewLegacyKeccak256().(crypto.KeccakState), concurrency: concurrency, logger: logger, @@ -126,9 +155,13 @@ func newStateDownloadManager(tx kv.RwTx, } } +func (s *StateDownloadManager) setRootHash(root common.Hash) { + s.root = root + s.sched = state.NewStateSync(root, s.bc.ChainDb(), nil, rawdb.HashScheme) +} + // fillTasks fills the tasks to send to the remote peer. func (s *StateDownloadManager) fillTasks(n int) error { - // Refill available tasks from the scheduler. if fill := n - (len(s.tasks.trieTasks) + len(s.tasks.codeTasks)); fill > 0 { paths, hashes, codes := s.sched.Missing(fill) for i, path := range paths { @@ -143,7 +176,7 @@ func (s *StateDownloadManager) fillTasks(n int) error { // getNextBatch returns objects with a maximum of n state download // tasks to send to the remote peer. -func (s *StateDownloadManager) GetNextBatch() (nodes []common.Hash, paths []string, codes []common.Hash) { +func (s *StateDownloadManager) GetNextBatch() (nodes []common.Hash, paths []string, codes []common.Hash, err error) { s.lock.Lock() defer s.lock.Unlock() @@ -154,13 +187,57 @@ func (s *StateDownloadManager) GetNextBatch() (nodes []common.Hash, paths []stri cap -= nItems if cap > 0 { + // Refill available tasks from the scheduler. + if s.sched.Pending() == 0 { + return + } + + if err = s.commit(false); err != nil { + return + } + + if err = s.fillTasks(cap); err != nil { + return + } newNodes, newPaths, newCodes := s.getBatchFromUnprocessed(cap) nodes = append(nodes, newNodes...) paths = append(paths, newPaths...) codes = append(codes, newCodes...) } + return +} - return nodes, paths, codes +func (s *StateDownloadManager) commit(force bool) error { + if !force && s.bytesUncommitted < ethdb.IdealBatchSize { + return nil + } + start := time.Now() + b := s.bc.ChainDb().NewBatch() + if err := s.sched.Commit(b); err != nil { + return err + } + if err := b.Write(); err != nil { + return fmt.Errorf("DB write error: %v", err) + } + s.updateStats(s.numUncommitted, 0, 0, time.Since(start)) + s.numUncommitted = 0 + s.bytesUncommitted = 0 + return nil +} + +// updateStats bumps the various state sync progress counters and displays a log +// message for the user to see. +func (s *StateDownloadManager) updateStats(written, duplicate, unexpected int, duration time.Duration) { + // TODO: here it updates the stats for total pending, processed, duplicates and unexpected + + // for now, we just jog current stats + if written > 0 || duplicate > 0 || unexpected > 0 { + utils.Logger().Info(). + Int("count", written). + Int("duplicate", duplicate). + Int("unexpected", unexpected). + Msg("Imported new state entries") + } } // getBatchFromUnprocessed returns objects with a maximum of n unprocessed state download @@ -194,11 +271,11 @@ func (s *StateDownloadManager) getBatchFromUnprocessed(n int) (nodes []common.Ha } // getBatchFromRetries get the block number batch to be requested from retries. -func (s *StateDownloadManager) getBatchFromRetries(n int) (nodes []common.Hash, paths []string, codes []common.Hash) { +func (s *StateDownloadManager) getBatchFromRetries(n int) ([]common.Hash, []string, []common.Hash) { // over trie nodes as those can be written to disk and forgotten about. - nodes = make([]common.Hash, 0, n) - paths = make([]string, 0, n) - codes = make([]common.Hash, 0, n) + nodes := make([]common.Hash, 0, n) + paths := make([]string, 0, n) + codes := make([]common.Hash, 0, n) for hash, t := range s.retries.codeTasks { // Stop when we've gathered enough requests @@ -229,14 +306,16 @@ func (s *StateDownloadManager) HandleRequestError(codeHashes []common.Hash, trie // add requested code hashes to retries for _, h := range codeHashes { - s.retries.addCodeTask(h, s.requesting.codeTasks[h]) - delete(s.requesting.codeTasks, h) + task := s.requesting.getCodeTask(h) + s.retries.addCodeTask(h, task) + s.requesting.deleteCodeTask(h) } // add requested trie paths to retries for _, path := range triePaths { - s.retries.addTrieTask(path, s.requesting.trieTasks[path]) - delete(s.requesting.trieTasks, path) + task := s.requesting.getTrieTask(path) + s.retries.addTrieTask(path, task) + s.requesting.deleteTrieTask(path) } } @@ -246,14 +325,14 @@ func (s *StateDownloadManager) HandleRequestResult(codeHashes []common.Hash, tri defer s.lock.Unlock() // Collect processing stats and update progress if valid data was received - duplicate, unexpected, successful, numUncommitted, bytesUncommitted := 0, 0, 0, 0, 0 + duplicate, unexpected, successful := 0, 0, 0 for _, blob := range response { hash, err := s.processNodeData(codeHashes, triePaths, blob) switch err { case nil: - numUncommitted++ - bytesUncommitted += len(blob) + s.numUncommitted++ + s.bytesUncommitted += len(blob) successful++ case trie.ErrNotRequested: unexpected++ @@ -266,11 +345,16 @@ func (s *StateDownloadManager) HandleRequestResult(codeHashes []common.Hash, tri for _, path := range triePaths { task := s.requesting.getTrieTask(path) + if task == nil { + // it is already removed from requesting + // either it has been completed and deleted by processNodeData or it does not exist + continue + } // If the node did deliver something, missing items may be due to a protocol // limit or a previous timeout + delayed delivery. Both cases should permit // the node to retry the missing items (to avoid single-peer stalls). if len(response) > 0 { //TODO: if timeout also do same - delete(s.requesting.trieTasks[path].attempts, streamID) + s.requesting.deleteTrieTaskAttempts(path, streamID) } else if task.attempts[streamID] >= MaxTriesToFetchNodeData { // If we've requested the node too many times already, it may be a malicious // sync where nobody has the right data. Abort. @@ -283,11 +367,16 @@ func (s *StateDownloadManager) HandleRequestResult(codeHashes []common.Hash, tri for _, hash := range codeHashes { task := s.requesting.getCodeTask(hash) + if task == nil { + // it is already removed from requesting + // either it has been completed and deleted by processNodeData or it does not exist + continue + } // If the node did deliver something, missing items may be due to a protocol // limit or a previous timeout + delayed delivery. Both cases should permit // the node to retry the missing items (to avoid single-peer stalls). if len(response) > 0 { //TODO: if timeout also do same - delete(s.requesting.codeTasks[hash].attempts, streamID) //TODO: do we need delete attempts??? + s.requesting.deleteCodeTaskAttempts(hash, streamID) //TODO: do we need delete attempts??? } else if task.attempts[streamID] >= MaxTriesToFetchNodeData { // If we've requested the node too many times already, it may be a malicious // sync where nobody has the right data. Abort. @@ -325,6 +414,11 @@ func (s *StateDownloadManager) processNodeData(codeHashes []common.Hash, triePat } for _, path := range triePaths { task := s.requesting.getTrieTask(path) + if task == nil { + // this shouldn't happen while the path is given from triPaths and triPaths + // are given from requesting queue + continue + } if task.hash == hash { err := s.sched.ProcessNode(trie.NodeSyncResult{ Path: path, diff --git a/api/service/stagedstreamsync/syncing.go b/api/service/stagedstreamsync/syncing.go index b2c1aacdfa..d2457a5261 100644 --- a/api/service/stagedstreamsync/syncing.go +++ b/api/service/stagedstreamsync/syncing.go @@ -11,6 +11,8 @@ import ( "github.com/harmony-one/harmony/consensus" "github.com/harmony-one/harmony/core" + "github.com/harmony-one/harmony/core/rawdb" + "github.com/harmony-one/harmony/core/types" "github.com/harmony-one/harmony/internal/utils" sttypes "github.com/harmony-one/harmony/p2p/stream/types" "github.com/ledgerwatch/erigon-lib/kv" @@ -81,14 +83,12 @@ func CreateStagedSync(ctx context.Context, return nil, errInitDB } - blockExecution := config.SyncMode == FullSync extractReceiptHashes := config.SyncMode == FastSync || config.SyncMode == SnapSync - stageHeadsCfg := NewStageHeadersCfg(bc, mainDB) stageShortRangeCfg := NewStageShortRangeCfg(bc, mainDB) stageSyncEpochCfg := NewStageEpochCfg(bc, mainDB) stageBodiesCfg := NewStageBodiesCfg(bc, mainDB, dbs, config.Concurrency, protocol, isBeaconNode, extractReceiptHashes, config.LogProgress) - stageStatesCfg := NewStageStatesCfg(bc, mainDB, dbs, config.Concurrency, blockExecution, logger, config.LogProgress) + stageStatesCfg := NewStageStatesCfg(bc, mainDB, dbs, config.Concurrency, logger, config.LogProgress) stageStateSyncCfg := NewStageStateSyncCfg(bc, mainDB, config.Concurrency, protocol, logger, config.LogProgress) stageReceiptsCfg := NewStageReceiptsCfg(bc, mainDB, dbs, config.Concurrency, protocol, isBeaconNode, config.LogProgress) lastMileCfg := NewStageLastMileCfg(ctx, bc, mainDB) @@ -235,6 +235,60 @@ func (s *StagedStreamSync) Debug(source string, msg interface{}) { } } +func (s *StagedStreamSync) checkPivot(ctx context.Context, estimatedHeight uint64) (uint64, error) { + + // do full sync if chain is at early stage + if estimatedHeight < MaxPivotDistanceToHead { + return 0, nil + } + + pivotBlockNumber := uint64(0) + if curPivot := rawdb.ReadLastPivotNumber(s.bc.ChainDb()); curPivot != nil { + // if head is behind pivot, that means it is still on fast/snap sync mode + if head := s.CurrentBlockNumber(); head < *curPivot { + pivotBlockNumber = *curPivot + // pivot could be moved forward if it is far from head + if pivotBlockNumber < estimatedHeight-MaxPivotDistanceToHead { + pivotBlockNumber = estimatedHeight - MinPivotDistanceToHead + if err := rawdb.WriteLastPivotNumber(s.bc.ChainDb(), pivotBlockNumber); err != nil { + s.logger.Error().Err(err). + Uint64("current pivot number", *curPivot). + Uint64("new pivot number", pivotBlockNumber). + Msg(WrapStagedSyncMsg("update pivot number failed")) + return pivotBlockNumber, err + } + } + } + } else { + pivot := estimatedHeight - MinPivotDistanceToHead + if s.config.SyncMode == FastSync && s.CurrentBlockNumber() < pivot { + pivotBlockNumber = pivot + if err := rawdb.WriteLastPivotNumber(s.bc.ChainDb(), pivotBlockNumber); err != nil { + s.logger.Error().Err(err). + Uint64("new pivot number", pivotBlockNumber). + Msg(WrapStagedSyncMsg("update pivot number failed")) + return pivotBlockNumber, err + } + } + } + if pivotBlockNumber > 0 { + if block, err := s.queryAllPeersForBlockByNumber(ctx, pivotBlockNumber); err != nil { + s.logger.Error().Err(err). + Uint64("pivot", pivotBlockNumber). + Msg(WrapStagedSyncMsg("query peers for pivot block failed")) + return pivotBlockNumber, err + } else { + s.status.pivotBlock = block + } + s.logger.Info(). + Uint64("estimatedHeight", estimatedHeight). + Uint64("pivot number", pivotBlockNumber). + Msg(WrapStagedSyncMsg("fast/snap sync mode, pivot is set successfully")) + } + + return pivotBlockNumber, nil +} + // doSync does the long range sync. // One LongRangeSync consists of several iterations. // For each iteration, estimate the current block number, then fetch block & insert to blockchain @@ -245,7 +299,6 @@ func (s *StagedStreamSync) doSync(downloaderContext context.Context, initSync bo var totalInserted int s.initSync = initSync - if err := s.checkPrerequisites(); err != nil { return 0, 0, err } @@ -259,13 +312,20 @@ func (s *StagedStreamSync) doSync(downloaderContext context.Context, initSync bo //TODO: use directly currentCycle var s.status.setTargetBN(estimatedHeight) } - if curBN := s.bc.CurrentBlock().NumberU64(); estimatedHeight <= curBN { + if curBN := s.CurrentBlockNumber(); estimatedHeight <= curBN { s.logger.Info().Uint64("current number", curBN).Uint64("target number", estimatedHeight). Msg(WrapStagedSyncMsg("early return of long range sync (chain is already ahead of target height)")) return estimatedHeight, 0, nil } } + // We are probably in full sync, but we might have rewound to before the + // fast/snap sync pivot, check if we should reenable + if _, err := s.checkPivot(downloaderContext, estimatedHeight); err != nil { + s.logger.Error().Err(err).Msg(WrapStagedSyncMsg("check pivot failed")) + return 0, 0, err + } + s.startSyncing() defer s.finishSyncing() @@ -336,7 +396,7 @@ func (s *StagedStreamSync) doSyncCycle(ctx context.Context) (int, error) { var totalInserted int s.inserted = 0 - startHead := s.bc.CurrentBlock().NumberU64() + startHead := s.CurrentBlockNumber() canRunCycleInOneTransaction := false var tx kv.RwTx @@ -400,6 +460,36 @@ func (s *StagedStreamSync) checkPrerequisites() error { return s.checkHaveEnoughStreams() } +func (s *StagedStreamSync) CurrentBlockNumber() uint64 { + // if current head is ahead of pivot block, return chain head regardless of sync mode + if s.status.pivotBlock != nil && s.bc.CurrentBlock().NumberU64() > s.status.pivotBlock.NumberU64() { + return s.bc.CurrentBlock().NumberU64() + } + + current := uint64(0) + switch s.config.SyncMode { + case FullSync: + current = s.bc.CurrentBlock().NumberU64() + case FastSync: + current = s.bc.CurrentFastBlock().NumberU64() + case SnapSync: + current = s.bc.CurrentHeader().Number().Uint64() + } + return current +} + +func (s *StagedStreamSync) stateSyncStage() bool { + switch s.config.SyncMode { + case FullSync: + return false + case FastSync: + return s.status.pivotBlock != nil && s.bc.CurrentFastBlock().NumberU64() == s.status.pivotBlock.NumberU64()-1 + case SnapSync: + return false + } + return false +} + // estimateCurrentNumber roughly estimates the current block number. // The block number does not need to be exact, but just a temporary target of the iteration func (s *StagedStreamSync) estimateCurrentNumber(ctx context.Context) (uint64, error) { @@ -439,3 +529,45 @@ func (s *StagedStreamSync) estimateCurrentNumber(ctx context.Context) (uint64, e bn := computeBlockNumberByMaxVote(cnResults) return bn, nil } + +// queryAllPeersForBlockByNumber queries all connected streams for a block by its number. +func (s *StagedStreamSync) queryAllPeersForBlockByNumber(ctx context.Context, bn uint64) (*types.Block, error) { + var ( + blkResults []*types.Block + lock sync.Mutex + wg sync.WaitGroup + ) + wg.Add(s.config.Concurrency) + for i := 0; i != s.config.Concurrency; i++ { + go func() { + defer wg.Done() + block, stid, err := s.doGetBlockByNumberRequest(ctx, bn) + if err != nil { + s.logger.Err(err).Str("streamID", string(stid)). + Msg(WrapStagedSyncMsg("getBlockByNumber request failed")) + if !errors.Is(err, context.Canceled) { + s.protocol.StreamFailed(stid, "getBlockByNumber request failed") + } + return + } + lock.Lock() + blkResults = append(blkResults, block) + lock.Unlock() + }() + } + wg.Wait() + + if len(blkResults) == 0 { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + return nil, ErrZeroBlockResponse + } + block, err := getBlockByMaxVote(blkResults) + if err != nil { + return nil, err + } + return block, nil +} diff --git a/api/service/stagedstreamsync/types.go b/api/service/stagedstreamsync/types.go index 6d6326452e..17a3d345fd 100644 --- a/api/service/stagedstreamsync/types.go +++ b/api/service/stagedstreamsync/types.go @@ -14,9 +14,10 @@ var ( ) type status struct { - isSyncing bool - targetBN uint64 - lock sync.Mutex + isSyncing bool + targetBN uint64 + pivotBlock *types.Block + lock sync.Mutex } func newStatus() status { From 6348128c482b2ecaaadeeb3fba386f001498868a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CGheisMohammadi=E2=80=9D?= <36589218+GheisMohammadi@users.noreply.github.com> Date: Wed, 4 Oct 2023 20:05:12 +0800 Subject: [PATCH 085/128] improve stage handling for create new instance of staged stream sync --- api/service/stagedstreamsync/staged_stream_sync.go | 6 +++--- api/service/stagedstreamsync/syncing.go | 13 +------------ 2 files changed, 4 insertions(+), 15 deletions(-) diff --git a/api/service/stagedstreamsync/staged_stream_sync.go b/api/service/stagedstreamsync/staged_stream_sync.go index a4e04dff37..03340eb15f 100644 --- a/api/service/stagedstreamsync/staged_stream_sync.go +++ b/api/service/stagedstreamsync/staged_stream_sync.go @@ -266,7 +266,7 @@ func New( logger zerolog.Logger, ) *StagedStreamSync { - forwardStages := make([]*Stage, len(stagesList)) + forwardStages := make([]*Stage, len(StagesForwardOrder)) for i, stageIndex := range StagesForwardOrder { for _, s := range stagesList { if s.ID == stageIndex { @@ -276,7 +276,7 @@ func New( } } - revertStages := make([]*Stage, len(stagesList)) + revertStages := make([]*Stage, len(StagesRevertOrder)) for i, stageIndex := range StagesRevertOrder { for _, s := range stagesList { if s.ID == stageIndex { @@ -286,7 +286,7 @@ func New( } } - pruneStages := make([]*Stage, len(stagesList)) + pruneStages := make([]*Stage, len(StagesCleanUpOrder)) for i, stageIndex := range StagesCleanUpOrder { for _, s := range stagesList { if s.ID == stageIndex { diff --git a/api/service/stagedstreamsync/syncing.go b/api/service/stagedstreamsync/syncing.go index d2457a5261..03043525b8 100644 --- a/api/service/stagedstreamsync/syncing.go +++ b/api/service/stagedstreamsync/syncing.go @@ -118,22 +118,11 @@ func CreateStagedSync(ctx context.Context, Int("minStreams", config.MinStreams). Msg(WrapStagedSyncMsg("staged sync created successfully")) - var stages []*Stage - // if any of the default stages doesn't exist in forward order, delete it from the list of stages - for _, stg := range defaultStages { - for _, stageID := range StagesForwardOrder { - if stg.ID == stageID { - stages = append(stages, stg) - break - } - } - } - return New( bc, consensus, mainDB, - stages, + defaultStages, isBeaconNode, protocol, isBeaconNode, From c808f2b733f560fa821201e3de6235095662347b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CGheisMohammadi=E2=80=9D?= <36589218+GheisMohammadi@users.noreply.github.com> Date: Thu, 5 Oct 2023 15:08:10 +0800 Subject: [PATCH 086/128] fix pivot block issue for write on chain --- api/service/stagedstreamsync/stage_state.go | 2 +- api/service/stagedstreamsync/stage_statesync.go | 10 ++++++++++ 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/api/service/stagedstreamsync/stage_state.go b/api/service/stagedstreamsync/stage_state.go index 6c82a69c11..80a3faa0e3 100644 --- a/api/service/stagedstreamsync/stage_state.go +++ b/api/service/stagedstreamsync/stage_state.go @@ -55,7 +55,7 @@ func NewStageStatesCfg( func (stg *StageStates) Exec(ctx context.Context, firstCycle bool, invalidBlockRevert bool, s *StageState, reverter Reverter, tx kv.RwTx) (err error) { // only execute this stage in full sync mode if s.state.config.SyncMode != FullSync { - if s.state.status.pivotBlock != nil && s.state.bc.CurrentBlock().NumberU64() <= s.state.status.pivotBlock.NumberU64() { + if s.state.status.pivotBlock != nil && s.state.bc.CurrentBlock().NumberU64() < s.state.status.pivotBlock.NumberU64() { return nil } } diff --git a/api/service/stagedstreamsync/stage_statesync.go b/api/service/stagedstreamsync/stage_statesync.go index 654171df4e..130f7f71f9 100644 --- a/api/service/stagedstreamsync/stage_statesync.go +++ b/api/service/stagedstreamsync/stage_statesync.go @@ -113,6 +113,16 @@ func (sss *StageStateSync) Exec(ctx context.Context, bool, invalidBlockRevert bo } wg.Wait() + // insert block + if err := sss.configs.bc.WriteHeadBlock(s.state.status.pivotBlock); err != nil { + sss.configs.logger.Warn().Err(err). + Uint64("pivot block number", s.state.status.pivotBlock.NumberU64()). + Msg(WrapStagedSyncMsg("insert pivot block failed")) + s.state.Debug("StateSync/pivot/insert/error", err) + // TODO: panic("pivot block is failed to insert in chain.") + return err + } + /* gbm := s.state.gbm From bdd7f142c7f0f7f8235691c45e0d433c05a073d5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CGheisMohammadi=E2=80=9D?= <36589218+GheisMohammadi@users.noreply.github.com> Date: Fri, 6 Oct 2023 14:22:36 +0800 Subject: [PATCH 087/128] improve stream sync current cycle and pivot checks, fix edge case issue to insert pivot block and its receipts --- api/service/stagedstreamsync/stage_heads.go | 6 +-- .../stagedstreamsync/stage_receipts.go | 2 +- api/service/stagedstreamsync/stage_state.go | 6 +-- .../stagedstreamsync/stage_statesync.go | 5 ++- api/service/stagedstreamsync/syncing.go | 44 +++++++++++-------- api/service/stagedstreamsync/types.go | 10 +++-- 6 files changed, 41 insertions(+), 32 deletions(-) diff --git a/api/service/stagedstreamsync/stage_heads.go b/api/service/stagedstreamsync/stage_heads.go index 99e0248bae..46ebed1d24 100644 --- a/api/service/stagedstreamsync/stage_heads.go +++ b/api/service/stagedstreamsync/stage_heads.go @@ -90,11 +90,11 @@ func (heads *StageHeads) Exec(ctx context.Context, firstCycle bool, invalidBlock } // check pivot: if chain hasn't reached to pivot yet - if s.state.status.pivotBlock != nil && s.state.CurrentBlockNumber() < s.state.status.pivotBlock.NumberU64() { + if s.state.status.cycleSyncMode != FullSync && s.state.status.pivotBlock != nil { // set target height on the block before pivot // pivot block would be downloaded by StateSync stage - if targetHeight >= s.state.status.pivotBlock.NumberU64() { - targetHeight = s.state.status.pivotBlock.NumberU64() - 1 + if !s.state.status.statesSynced && targetHeight > s.state.status.pivotBlock.NumberU64() { + targetHeight = s.state.status.pivotBlock.NumberU64() } } diff --git a/api/service/stagedstreamsync/stage_receipts.go b/api/service/stagedstreamsync/stage_receipts.go index 0a2d8ab024..63f09f9869 100644 --- a/api/service/stagedstreamsync/stage_receipts.go +++ b/api/service/stagedstreamsync/stage_receipts.go @@ -52,7 +52,7 @@ func NewStageReceiptsCfg(bc core.BlockChain, db kv.RwDB, blockDBs []kv.RwDB, con func (r *StageReceipts) Exec(ctx context.Context, firstCycle bool, invalidBlockRevert bool, s *StageState, reverter Reverter, tx kv.RwTx) (err error) { // only execute this stage in fast/snap sync mode - if s.state.status.pivotBlock == nil || s.state.CurrentBlockNumber() >= s.state.status.pivotBlock.NumberU64() { + if s.state.status.cycleSyncMode == FullSync { return nil } diff --git a/api/service/stagedstreamsync/stage_state.go b/api/service/stagedstreamsync/stage_state.go index 80a3faa0e3..c477f4309f 100644 --- a/api/service/stagedstreamsync/stage_state.go +++ b/api/service/stagedstreamsync/stage_state.go @@ -54,10 +54,8 @@ func NewStageStatesCfg( // Exec progresses States stage in the forward direction func (stg *StageStates) Exec(ctx context.Context, firstCycle bool, invalidBlockRevert bool, s *StageState, reverter Reverter, tx kv.RwTx) (err error) { // only execute this stage in full sync mode - if s.state.config.SyncMode != FullSync { - if s.state.status.pivotBlock != nil && s.state.bc.CurrentBlock().NumberU64() < s.state.status.pivotBlock.NumberU64() { - return nil - } + if s.state.status.cycleSyncMode != FullSync { + return nil } // for short range sync, skip this step diff --git a/api/service/stagedstreamsync/stage_statesync.go b/api/service/stagedstreamsync/stage_statesync.go index 130f7f71f9..1a973c13e9 100644 --- a/api/service/stagedstreamsync/stage_statesync.go +++ b/api/service/stagedstreamsync/stage_statesync.go @@ -56,7 +56,7 @@ func NewStageStateSyncCfg(bc core.BlockChain, func (sss *StageStateSync) Exec(ctx context.Context, bool, invalidBlockRevert bool, s *StageState, reverter Reverter, tx kv.RwTx) (err error) { // only execute this stage in fast/snap sync mode and once we reach to pivot - if s.state.status.pivotBlock == nil || s.state.CurrentBlockNumber() != s.state.status.pivotBlock.NumberU64()-1 { + if s.state.status.pivotBlock == nil || s.state.CurrentBlockNumber() != s.state.status.pivotBlock.NumberU64() { return nil } @@ -123,6 +123,9 @@ func (sss *StageStateSync) Exec(ctx context.Context, bool, invalidBlockRevert bo return err } + // states should be fully synced in this stage + s.state.status.statesSynced = true + /* gbm := s.state.gbm diff --git a/api/service/stagedstreamsync/syncing.go b/api/service/stagedstreamsync/syncing.go index 03043525b8..88e0a0857c 100644 --- a/api/service/stagedstreamsync/syncing.go +++ b/api/service/stagedstreamsync/syncing.go @@ -224,11 +224,16 @@ func (s *StagedStreamSync) Debug(source string, msg interface{}) { } } -func (s *StagedStreamSync) checkPivot(ctx context.Context, estimatedHeight uint64) (uint64, error) { +// checkPivot checks pivot block and returns pivot block and cycle Sync mode +func (s *StagedStreamSync) checkPivot(ctx context.Context, estimatedHeight uint64, initSync bool) (*types.Block, SyncMode, error) { + + if s.config.SyncMode == FullSync { + return nil, FullSync, nil + } // do full sync if chain is at early stage - if estimatedHeight < MaxPivotDistanceToHead { - return 0, nil + if initSync && estimatedHeight < MaxPivotDistanceToHead { + return nil, FullSync, nil } pivotBlockNumber := uint64(0) @@ -240,23 +245,21 @@ func (s *StagedStreamSync) checkPivot(ctx context.Context, estimatedHeight uint6 if pivotBlockNumber < estimatedHeight-MaxPivotDistanceToHead { pivotBlockNumber = estimatedHeight - MinPivotDistanceToHead if err := rawdb.WriteLastPivotNumber(s.bc.ChainDb(), pivotBlockNumber); err != nil { - s.logger.Error().Err(err). + s.logger.Warn().Err(err). Uint64("current pivot number", *curPivot). Uint64("new pivot number", pivotBlockNumber). Msg(WrapStagedSyncMsg("update pivot number failed")) - return pivotBlockNumber, err + pivotBlockNumber = *curPivot } } } } else { - pivot := estimatedHeight - MinPivotDistanceToHead - if s.config.SyncMode == FastSync && s.CurrentBlockNumber() < pivot { - pivotBlockNumber = pivot + if head := s.CurrentBlockNumber(); s.config.SyncMode == FastSync && head <= 1 { + pivotBlockNumber = estimatedHeight - MinPivotDistanceToHead if err := rawdb.WriteLastPivotNumber(s.bc.ChainDb(), pivotBlockNumber); err != nil { - s.logger.Error().Err(err). + s.logger.Warn().Err(err). Uint64("new pivot number", pivotBlockNumber). Msg(WrapStagedSyncMsg("update pivot number failed")) - return pivotBlockNumber, err } } } @@ -265,17 +268,17 @@ func (s *StagedStreamSync) checkPivot(ctx context.Context, estimatedHeight uint6 s.logger.Error().Err(err). Uint64("pivot", pivotBlockNumber). Msg(WrapStagedSyncMsg("query peers for pivot block failed")) - return pivotBlockNumber, err + return block, FastSync, err } else { s.status.pivotBlock = block + s.logger.Info(). + Uint64("estimatedHeight", estimatedHeight). + Uint64("pivot number", pivotBlockNumber). + Msg(WrapStagedSyncMsg("fast/snap sync mode, pivot is set successfully")) + return block, FastSync, nil } - s.logger.Info(). - Uint64("estimatedHeight", estimatedHeight). - Uint64("pivot number", pivotBlockNumber). - Msg(WrapStagedSyncMsg("fast/snap sync mode, pivot is set successfully")) } - - return pivotBlockNumber, nil + return nil, FullSync, nil } // doSync does the long range sync. @@ -310,9 +313,12 @@ func (s *StagedStreamSync) doSync(downloaderContext context.Context, initSync bo // We are probably in full sync, but we might have rewound to before the // fast/snap sync pivot, check if we should reenable - if _, err := s.checkPivot(downloaderContext, estimatedHeight); err != nil { + if pivotBlock, cycleSyncMode, err := s.checkPivot(downloaderContext, estimatedHeight, initSync); err != nil { s.logger.Error().Err(err).Msg(WrapStagedSyncMsg("check pivot failed")) return 0, 0, err + } else { + s.status.cycleSyncMode = cycleSyncMode + s.status.pivotBlock = pivotBlock } s.startSyncing() @@ -451,7 +457,7 @@ func (s *StagedStreamSync) checkPrerequisites() error { func (s *StagedStreamSync) CurrentBlockNumber() uint64 { // if current head is ahead of pivot block, return chain head regardless of sync mode - if s.status.pivotBlock != nil && s.bc.CurrentBlock().NumberU64() > s.status.pivotBlock.NumberU64() { + if s.status.pivotBlock != nil && s.bc.CurrentBlock().NumberU64() >= s.status.pivotBlock.NumberU64() { return s.bc.CurrentBlock().NumberU64() } diff --git a/api/service/stagedstreamsync/types.go b/api/service/stagedstreamsync/types.go index 17a3d345fd..e46b614299 100644 --- a/api/service/stagedstreamsync/types.go +++ b/api/service/stagedstreamsync/types.go @@ -14,10 +14,12 @@ var ( ) type status struct { - isSyncing bool - targetBN uint64 - pivotBlock *types.Block - lock sync.Mutex + isSyncing bool + targetBN uint64 + pivotBlock *types.Block + cycleSyncMode SyncMode + statesSynced bool + lock sync.Mutex } func newStatus() status { From 135c7da45506312b9a613ea09f8599cc079e114b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CGheisMohammadi=E2=80=9D?= <36589218+GheisMohammadi@users.noreply.github.com> Date: Tue, 24 Oct 2023 15:41:39 +0800 Subject: [PATCH 088/128] fix WriteHeadBlock, fix GetDownloadDetails index, improve fetching current block in a few stages, improve pivot calculation --- api/service/stagedstreamsync/block_manager.go | 8 ++- api/service/stagedstreamsync/const.go | 2 +- api/service/stagedstreamsync/stage_heads.go | 3 +- .../stagedstreamsync/stage_receipts.go | 9 +++- api/service/stagedstreamsync/stage_state.go | 9 ++-- .../stagedstreamsync/stage_statesync.go | 52 ++++++++++--------- api/service/stagedstreamsync/syncing.go | 18 ++++--- core/blockchain_impl.go | 25 +++++++++ core/rawdb/accessors_offchain.go | 2 +- 9 files changed, 85 insertions(+), 43 deletions(-) diff --git a/api/service/stagedstreamsync/block_manager.go b/api/service/stagedstreamsync/block_manager.go index 273078c591..f5ba8fdc44 100644 --- a/api/service/stagedstreamsync/block_manager.go +++ b/api/service/stagedstreamsync/block_manager.go @@ -1,6 +1,7 @@ package stagedstreamsync import ( + "fmt" "sync" "github.com/ethereum/go-ethereum/common" @@ -118,11 +119,14 @@ func (gbm *blockDownloadManager) SetDownloadDetails(bns []uint64, loopID int, st } // GetDownloadDetails returns the download details for a block -func (gbm *blockDownloadManager) GetDownloadDetails(blockNumber uint64) (loopID int, streamID sttypes.StreamID) { +func (gbm *blockDownloadManager) GetDownloadDetails(blockNumber uint64) (loopID int, streamID sttypes.StreamID, err error) { gbm.lock.Lock() defer gbm.lock.Unlock() - return gbm.bdd[blockNumber].loopID, gbm.bdd[blockNumber].streamID + if dm, exist := gbm.bdd[blockNumber]; exist { + return dm.loopID, dm.streamID, nil + } + return 0, sttypes.StreamID(0), fmt.Errorf("there is no download details for the block number: %d", blockNumber) } // SetRootHash sets the root hash for a specific block diff --git a/api/service/stagedstreamsync/const.go b/api/service/stagedstreamsync/const.go index e172854ec9..2789bfb1e3 100644 --- a/api/service/stagedstreamsync/const.go +++ b/api/service/stagedstreamsync/const.go @@ -40,7 +40,7 @@ const ( ShortRangeTimeout time.Duration = 1 * time.Minute // pivot block distance ranges - MinPivotDistanceToHead uint64 = 1028 + MinPivotDistanceToHead uint64 = 1024 MaxPivotDistanceToHead uint64 = 2048 ) diff --git a/api/service/stagedstreamsync/stage_heads.go b/api/service/stagedstreamsync/stage_heads.go index 46ebed1d24..bf0721aad7 100644 --- a/api/service/stagedstreamsync/stage_heads.go +++ b/api/service/stagedstreamsync/stage_heads.go @@ -91,8 +91,7 @@ func (heads *StageHeads) Exec(ctx context.Context, firstCycle bool, invalidBlock // check pivot: if chain hasn't reached to pivot yet if s.state.status.cycleSyncMode != FullSync && s.state.status.pivotBlock != nil { - // set target height on the block before pivot - // pivot block would be downloaded by StateSync stage + // set target height on the pivot block if !s.state.status.statesSynced && targetHeight > s.state.status.pivotBlock.NumberU64() { targetHeight = s.state.status.pivotBlock.NumberU64() } diff --git a/api/service/stagedstreamsync/stage_receipts.go b/api/service/stagedstreamsync/stage_receipts.go index 63f09f9869..4445eb6ba2 100644 --- a/api/service/stagedstreamsync/stage_receipts.go +++ b/api/service/stagedstreamsync/stage_receipts.go @@ -238,7 +238,14 @@ func (r *StageReceipts) runReceiptWorkerLoop(ctx context.Context, rdm *receiptDo for _, bn := range batch { blkKey := marshalData(bn) - loopID, _ := gbm.GetDownloadDetails(bn) + loopID, _, errBDD := gbm.GetDownloadDetails(bn) + if errBDD != nil { + utils.Logger().Warn(). + Err(errBDD). + Interface("block numbers", bn). + Msg(WrapStagedSyncMsg("get block download details failed")) + return + } blockBytes, err := txs[loopID].GetOne(BlocksBucket, blkKey) if err != nil { return diff --git a/api/service/stagedstreamsync/stage_state.go b/api/service/stagedstreamsync/stage_state.go index c477f4309f..df864d63ff 100644 --- a/api/service/stagedstreamsync/stage_state.go +++ b/api/service/stagedstreamsync/stage_state.go @@ -69,11 +69,11 @@ func (stg *StageStates) Exec(ctx context.Context, firstCycle bool, invalidBlockR } maxHeight := s.state.status.targetBN - currentHead := stg.configs.bc.CurrentBlock().NumberU64() + currentHead := s.state.CurrentBlockNumber() if currentHead >= maxHeight { return nil } - currProgress := stg.configs.bc.CurrentBlock().NumberU64() + currProgress := currentHead targetHeight := s.state.currentCycle.TargetHeight if currProgress >= targetHeight { return nil @@ -115,7 +115,10 @@ func (stg *StageStates) Exec(ctx context.Context, firstCycle bool, invalidBlockR for i := currProgress + 1; i <= targetHeight; i++ { blkKey := marshalData(i) - loopID, streamID := gbm.GetDownloadDetails(i) + loopID, streamID, errBDD := gbm.GetDownloadDetails(i) + if errBDD != nil { + return errBDD + } blockBytes, err := txs[loopID].GetOne(BlocksBucket, blkKey) if err != nil { diff --git a/api/service/stagedstreamsync/stage_statesync.go b/api/service/stagedstreamsync/stage_statesync.go index 1a973c13e9..081b3e8b94 100644 --- a/api/service/stagedstreamsync/stage_statesync.go +++ b/api/service/stagedstreamsync/stage_statesync.go @@ -55,36 +55,37 @@ func NewStageStateSyncCfg(bc core.BlockChain, // Exec progresses States stage in the forward direction func (sss *StageStateSync) Exec(ctx context.Context, bool, invalidBlockRevert bool, s *StageState, reverter Reverter, tx kv.RwTx) (err error) { - // only execute this stage in fast/snap sync mode and once we reach to pivot - if s.state.status.pivotBlock == nil || s.state.CurrentBlockNumber() != s.state.status.pivotBlock.NumberU64() { - return nil - } - // for short range sync, skip this step if !s.state.initSync { return nil - } - - maxHeight := s.state.status.targetBN - currentHead := s.state.CurrentBlockNumber() - if currentHead >= maxHeight { - return nil - } - currProgress := s.state.CurrentBlockNumber() - targetHeight := s.state.currentCycle.TargetHeight - - if errV := CreateView(ctx, sss.configs.db, tx, func(etx kv.Tx) error { - if currProgress, err = s.CurrentStageProgress(etx); err != nil { - return err - } + } // only execute this stage in fast/snap sync mode and once we reach to pivot + + if s.state.status.pivotBlock == nil || + s.state.CurrentBlockNumber() != s.state.status.pivotBlock.NumberU64() || + s.state.status.statesSynced { return nil - }); errV != nil { - return errV } - if currProgress >= targetHeight { - return nil - } + // maxHeight := s.state.status.targetBN + // currentHead := s.state.CurrentBlockNumber() + // if currentHead >= maxHeight { + // return nil + // } + // currProgress := s.state.CurrentBlockNumber() + // targetHeight := s.state.currentCycle.TargetHeight + + // if errV := CreateView(ctx, sss.configs.db, tx, func(etx kv.Tx) error { + // if currProgress, err = s.CurrentStageProgress(etx); err != nil { + // return err + // } + // return nil + // }); errV != nil { + // return errV + // } + + // if currProgress >= targetHeight { + // return nil + // } useInternalTx := tx == nil if useInternalTx { var err error @@ -104,8 +105,9 @@ func (sss *StageStateSync) Exec(ctx context.Context, bool, invalidBlockRevert bo // Fetch states from neighbors pivotRootHash := s.state.status.pivotBlock.Root() + currentBlockRootHash := s.state.bc.CurrentFastBlock().Root() sdm := newStateDownloadManager(tx, sss.configs.bc, sss.configs.concurrency, s.state.logger) - sdm.setRootHash(pivotRootHash) + sdm.setRootHash(currentBlockRootHash) var wg sync.WaitGroup for i := 0; i < s.state.config.Concurrency; i++ { wg.Add(1) diff --git a/api/service/stagedstreamsync/syncing.go b/api/service/stagedstreamsync/syncing.go index 88e0a0857c..73f050080b 100644 --- a/api/service/stagedstreamsync/syncing.go +++ b/api/service/stagedstreamsync/syncing.go @@ -237,20 +237,14 @@ func (s *StagedStreamSync) checkPivot(ctx context.Context, estimatedHeight uint6 } pivotBlockNumber := uint64(0) - if curPivot := rawdb.ReadLastPivotNumber(s.bc.ChainDb()); curPivot != nil { + var curPivot *uint64 + if curPivot = rawdb.ReadLastPivotNumber(s.bc.ChainDb()); curPivot != nil { // if head is behind pivot, that means it is still on fast/snap sync mode if head := s.CurrentBlockNumber(); head < *curPivot { pivotBlockNumber = *curPivot // pivot could be moved forward if it is far from head if pivotBlockNumber < estimatedHeight-MaxPivotDistanceToHead { pivotBlockNumber = estimatedHeight - MinPivotDistanceToHead - if err := rawdb.WriteLastPivotNumber(s.bc.ChainDb(), pivotBlockNumber); err != nil { - s.logger.Warn().Err(err). - Uint64("current pivot number", *curPivot). - Uint64("new pivot number", pivotBlockNumber). - Msg(WrapStagedSyncMsg("update pivot number failed")) - pivotBlockNumber = *curPivot - } } } } else { @@ -270,6 +264,14 @@ func (s *StagedStreamSync) checkPivot(ctx context.Context, estimatedHeight uint6 Msg(WrapStagedSyncMsg("query peers for pivot block failed")) return block, FastSync, err } else { + if curPivot == nil || pivotBlockNumber != *curPivot { + if err := rawdb.WriteLastPivotNumber(s.bc.ChainDb(), pivotBlockNumber); err != nil { + s.logger.Warn().Err(err). + Uint64("new pivot number", pivotBlockNumber). + Msg(WrapStagedSyncMsg("update pivot number failed")) + return block, FastSync, err + } + } s.status.pivotBlock = block s.logger.Info(). Uint64("estimatedHeight", estimatedHeight). diff --git a/core/blockchain_impl.go b/core/blockchain_impl.go index 97660544da..15527c3fe0 100644 --- a/core/blockchain_impl.go +++ b/core/blockchain_impl.go @@ -852,6 +852,20 @@ func (bc *BlockChainImpl) writeHeadBlock(block *types.Block) error { if err := rawdb.WriteHeadBlockHash(batch, block.Hash()); err != nil { return err } + if err := rawdb.WriteHeadHeaderHash(batch, block.Hash()); err != nil { + return err + } + + isNewEpoch := block.IsLastBlockInEpoch() + if isNewEpoch { + epoch := block.Header().Epoch() + nextEpoch := epoch.Add(epoch, common.Big1) + if err := rawdb.WriteShardStateBytes(batch, nextEpoch, block.Header().ShardState()); err != nil { + utils.Logger().Error().Err(err).Msg("failed to store shard state") + return err + } + } + if err := batch.Write(); err != nil { return err } @@ -1328,6 +1342,17 @@ func (bc *BlockChainImpl) InsertReceiptChain(blockChain types.Blocks, receiptCha return 0, err } + isNewEpoch := block.IsLastBlockInEpoch() + if isNewEpoch { + epoch := block.Header().Epoch() + nextEpoch := epoch.Add(epoch, common.Big1) + err := rawdb.WriteShardStateBytes(batch, nextEpoch, block.Header().ShardState()) + if err != nil { + utils.Logger().Error().Err(err).Msg("failed to store shard state") + return 0, err + } + } + stats.processed++ if batch.ValueSize() >= ethdb.IdealBatchSize { diff --git a/core/rawdb/accessors_offchain.go b/core/rawdb/accessors_offchain.go index 4808c8c231..05a2321a26 100644 --- a/core/rawdb/accessors_offchain.go +++ b/core/rawdb/accessors_offchain.go @@ -22,7 +22,7 @@ func ReadShardState( data, err := db.Get(shardStateKey(epoch)) if err != nil { return nil, errors.Errorf( - MsgNoShardStateFromDB, "epoch: %d", epoch, + MsgNoShardStateFromDB, "epoch: %d", epoch.Uint64(), ) } ss, err2 := shard.DecodeWrapper(data) From 3fcfad4531e63f32c08094787ef955b847fd809d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CGheisMohammadi=E2=80=9D?= <36589218+GheisMohammadi@users.noreply.github.com> Date: Fri, 27 Oct 2023 00:26:47 +0800 Subject: [PATCH 089/128] fix rebase conflicts --- api/service/stagedstreamsync/block_manager.go | 2 +- .../stagedstreamsync/stage_statesync.go | 2 +- core/blockchain.go | 2 +- p2p/stream/protocols/sync/stream_test.go | 21 ------------------- 4 files changed, 3 insertions(+), 24 deletions(-) diff --git a/api/service/stagedstreamsync/block_manager.go b/api/service/stagedstreamsync/block_manager.go index f5ba8fdc44..d614d24205 100644 --- a/api/service/stagedstreamsync/block_manager.go +++ b/api/service/stagedstreamsync/block_manager.go @@ -126,7 +126,7 @@ func (gbm *blockDownloadManager) GetDownloadDetails(blockNumber uint64) (loopID if dm, exist := gbm.bdd[blockNumber]; exist { return dm.loopID, dm.streamID, nil } - return 0, sttypes.StreamID(0), fmt.Errorf("there is no download details for the block number: %d", blockNumber) + return 0, sttypes.StreamID(fmt.Sprint(0)), fmt.Errorf("there is no download details for the block number: %d", blockNumber) } // SetRootHash sets the root hash for a specific block diff --git a/api/service/stagedstreamsync/stage_statesync.go b/api/service/stagedstreamsync/stage_statesync.go index 081b3e8b94..086d0fb418 100644 --- a/api/service/stagedstreamsync/stage_statesync.go +++ b/api/service/stagedstreamsync/stage_statesync.go @@ -104,7 +104,7 @@ func (sss *StageStateSync) Exec(ctx context.Context, bool, invalidBlockRevert bo } // Fetch states from neighbors - pivotRootHash := s.state.status.pivotBlock.Root() + // pivotRootHash := s.state.status.pivotBlock.Root() currentBlockRootHash := s.state.bc.CurrentFastBlock().Root() sdm := newStateDownloadManager(tx, sss.configs.bc, sss.configs.concurrency, s.state.logger) sdm.setRootHash(currentBlockRootHash) diff --git a/core/blockchain.go b/core/blockchain.go index f6f50e71f4..1f7233f42e 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -130,7 +130,7 @@ type BlockChain interface { // transaction and receipt data. InsertReceiptChain(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) // LeaderRotationMeta returns the number of continuous blocks by the leader. - LeaderRotationMeta() (publicKeyBytes []byte, epoch, count, shifts uint64, err error) + LeaderRotationMeta() LeaderRotationMeta // BadBlocks returns a list of the last 'bad blocks' that // the client has seen on the network. BadBlocks() []BadBlock diff --git a/p2p/stream/protocols/sync/stream_test.go b/p2p/stream/protocols/sync/stream_test.go index 9511de2ce1..3b538c14b8 100644 --- a/p2p/stream/protocols/sync/stream_test.go +++ b/p2p/stream/protocols/sync/stream_test.go @@ -296,27 +296,6 @@ func TestSyncStream_HandleGetTrieNodes(t *testing.T) { } } -func TestSyncStream_HandleGetNodeData(t *testing.T) { - st, remoteSt := makeTestSyncStream() - - go st.run() - defer close(st.closeC) - - req := testGetNodeDataRequestMsg - b, _ := protobuf.Marshal(req) - err := remoteSt.WriteBytes(b) - if err != nil { - t.Fatal(err) - } - - time.Sleep(200 * time.Millisecond) - receivedBytes, _ := remoteSt.ReadBytes() - - if err := checkGetNodeDataResult(receivedBytes, testGetNodeData); err != nil { - t.Fatal(err) - } -} - func makeTestSyncStream() (*syncStream, *testRemoteBaseStream) { localRaw, remoteRaw := makePairP2PStreams() remote := newTestRemoteBaseStream(remoteRaw) From 99928257d092f7c0786aea0abdae6b9c131c0040 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CGheisMohammadi=E2=80=9D?= <36589218+GheisMohammadi@users.noreply.github.com> Date: Wed, 15 Nov 2023 01:14:17 +0800 Subject: [PATCH 090/128] add state sync --- api/service/stagedstreamsync/range.go | 84 + api/service/stagedstreamsync/satate_sync.go | 2013 +++++++++++++++++++ 2 files changed, 2097 insertions(+) create mode 100644 api/service/stagedstreamsync/range.go create mode 100644 api/service/stagedstreamsync/satate_sync.go diff --git a/api/service/stagedstreamsync/range.go b/api/service/stagedstreamsync/range.go new file mode 100644 index 0000000000..de18b02ab3 --- /dev/null +++ b/api/service/stagedstreamsync/range.go @@ -0,0 +1,84 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package stagedstreamsync + +import ( + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/holiman/uint256" +) + +// hashSpace is the total size of the 256 bit hash space for accounts. +var hashSpace = new(big.Int).Exp(common.Big2, common.Big256, nil) + +// hashRange is a utility to handle ranges of hashes, Split up the +// hash-space into sections, and 'walk' over the sections +type hashRange struct { + current *uint256.Int + step *uint256.Int +} + +// newHashRange creates a new hashRange, initiated at the start position, +// and with the step set to fill the desired 'num' chunks +func newHashRange(start common.Hash, num uint64) *hashRange { + left := new(big.Int).Sub(hashSpace, start.Big()) + step := new(big.Int).Div( + new(big.Int).Add(left, new(big.Int).SetUint64(num-1)), + new(big.Int).SetUint64(num), + ) + step256 := new(uint256.Int) + step256.SetFromBig(step) + + return &hashRange{ + current: new(uint256.Int).SetBytes32(start[:]), + step: step256, + } +} + +// Next pushes the hash range to the next interval. +func (r *hashRange) Next() bool { + next, overflow := new(uint256.Int).AddOverflow(r.current, r.step) + if overflow { + return false + } + r.current = next + return true +} + +// Start returns the first hash in the current interval. +func (r *hashRange) Start() common.Hash { + return r.current.Bytes32() +} + +// End returns the last hash in the current interval. +func (r *hashRange) End() common.Hash { + // If the end overflows (non divisible range), return a shorter interval + next, overflow := new(uint256.Int).AddOverflow(r.current, r.step) + if overflow { + return common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") + } + return next.SubUint64(next, 1).Bytes32() +} + +// incHash returns the next hash, in lexicographical order (a.k.a plus one) +func incHash(h common.Hash) common.Hash { + var a uint256.Int + a.SetBytes32(h[:]) + a.AddUint64(&a, 1) + return common.Hash(a.Bytes32()) +} \ No newline at end of file diff --git a/api/service/stagedstreamsync/satate_sync.go b/api/service/stagedstreamsync/satate_sync.go new file mode 100644 index 0000000000..e90640a9aa --- /dev/null +++ b/api/service/stagedstreamsync/satate_sync.go @@ -0,0 +1,2013 @@ +package stagedstreamsync + +import ( + "bytes" + "encoding/json" + gomath "math" + "math/big" + "math/rand" + "sort" + "sync" + "sync/atomic" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/trie" + "github.com/harmony-one/harmony/common/math" + "github.com/harmony-one/harmony/core" + "github.com/harmony-one/harmony/core/rawdb" + "github.com/harmony-one/harmony/core/state" + "github.com/harmony-one/harmony/internal/utils" + sttypes "github.com/harmony-one/harmony/p2p/stream/types" + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/log/v3" + "github.com/pkg/errors" + "github.com/rs/zerolog" + "golang.org/x/crypto/sha3" + // "github.com/ethereum/go-ethereum/eth/protocols/snap/range" +) + +const ( + // minRequestSize is the minimum number of bytes to request from a remote peer. + // This number is used as the low cap for account and storage range requests. + // Bytecode and trienode are limited inherently by item count (1). + minRequestSize = 64 * 1024 + + // maxRequestSize is the maximum number of bytes to request from a remote peer. + // This number is used as the high cap for account and storage range requests. + // Bytecode and trienode are limited more explicitly by the caps below. + maxRequestSize = 512 * 1024 + + // maxCodeRequestCount is the maximum number of bytecode blobs to request in a + // single query. If this number is too low, we're not filling responses fully + // and waste round trip times. If it's too high, we're capping responses and + // waste bandwidth. + // + // Deployed bytecodes are currently capped at 24KB, so the minimum request + // size should be maxRequestSize / 24K. Assuming that most contracts do not + // come close to that, requesting 4x should be a good approximation. + maxCodeRequestCount = maxRequestSize / (24 * 1024) * 4 + + // maxTrieRequestCount is the maximum number of trie node blobs to request in + // a single query. If this number is too low, we're not filling responses fully + // and waste round trip times. If it's too high, we're capping responses and + // waste bandwidth. + maxTrieRequestCount = maxRequestSize / 512 + + // trienodeHealRateMeasurementImpact is the impact a single measurement has on + // the local node's trienode processing capacity. A value closer to 0 reacts + // slower to sudden changes, but it is also more stable against temporary hiccups. + trienodeHealRateMeasurementImpact = 0.005 + + // minTrienodeHealThrottle is the minimum divisor for throttling trie node + // heal requests to avoid overloading the local node and excessively expanding + // the state trie breadth wise. + minTrienodeHealThrottle = 1 + + // maxTrienodeHealThrottle is the maximum divisor for throttling trie node + // heal requests to avoid overloading the local node and exessively expanding + // the state trie bedth wise. + maxTrienodeHealThrottle = maxTrieRequestCount + + // trienodeHealThrottleIncrease is the multiplier for the throttle when the + // rate of arriving data is higher than the rate of processing it. + trienodeHealThrottleIncrease = 1.33 + + // trienodeHealThrottleDecrease is the divisor for the throttle when the + // rate of arriving data is lower than the rate of processing it. + trienodeHealThrottleDecrease = 1.25 +) + +// of only the account path. There's no need to be able to address both an +// account node and a storage node in the same request as it cannot happen +// that a slot is accessed before the account path is fully expanded. +type TrieNodePathSet [][]byte + +var ( + // accountConcurrency is the number of chunks to split the account trie into + // to allow concurrent retrievals. + accountConcurrency = 16 + + // storageConcurrency is the number of chunks to split the a large contract + // storage trie into to allow concurrent retrievals. + storageConcurrency = 16 + + // MaxHash represents the maximum possible hash value. + MaxHash = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") +) + +// accountTask represents the sync task for a chunk of the account snapshot. +type accountTask struct { + id uint64 //unique id for account task + + // These fields get serialized to leveldb on shutdown + Next common.Hash // Next account to sync in this interval + Last common.Hash // Last account to sync in this interval + SubTasks map[common.Hash][]*storageTask // Storage intervals needing fetching for large contracts + + // These fields are internals used during runtime + //req *accountRequest // Pending request to fill this task + //res *accountResponse // Validate response filling this task + pend int // Number of pending subtasks for this round + + needCode []bool // Flags whether the filling accounts need code retrieval + needState []bool // Flags whether the filling accounts need storage retrieval + needHeal []bool // Flags whether the filling accounts's state was chunked and need healing + + codeTasks map[common.Hash]struct{} // Code hashes that need retrieval + stateTasks map[common.Hash]common.Hash // Account hashes->roots that need full state retrieval + + genBatch ethdb.Batch // Batch used by the node generator + genTrie *trie.StackTrie // Node generator from storage slots + + requested bool + done bool // Flag whether the task can be removed + + res *accountResponse +} + +// accountResponse is an already Merkle-verified remote response to an account +// range request. It contains the subtrie for the requested account range and +// the database that's going to be filled with the internal nodes on commit. +type accountResponse struct { + task *accountTask // Task which this request is filling + + hashes []common.Hash // Account hashes in the returned range + accounts []*types.StateAccount // Expanded accounts in the returned range + + cont bool // Whether the account range has a continuation +} + +// storageTask represents the sync task for a chunk of the storage snapshot. +type storageTask struct { + Next common.Hash // Next account to sync in this interval + Last common.Hash // Last account to sync in this interval + + // These fields are internals used during runtime + root common.Hash // Storage root hash for this instance + //req *storageTaskBundleuest // Pending request to fill this task + + genBatch ethdb.Batch // Batch used by the node generator + genTrie *trie.StackTrie // Node generator from storage slots + + requested bool + done bool // Flag whether the task can be removed +} + +// healRequestSort implements the Sort interface, allowing sorting trienode +// heal requests, which is a prerequisite for merging storage-requests. +type healRequestSort struct { + paths []string + hashes []common.Hash + syncPaths []trie.SyncPath +} + +func (t *healRequestSort) Len() int { + return len(t.hashes) +} + +func (t *healRequestSort) Less(i, j int) bool { + a := t.syncPaths[i] + b := t.syncPaths[j] + switch bytes.Compare(a[0], b[0]) { + case -1: + return true + case 1: + return false + } + // identical first part + if len(a) < len(b) { + return true + } + if len(b) < len(a) { + return false + } + if len(a) == 2 { + return bytes.Compare(a[1], b[1]) < 0 + } + return false +} + +func (t *healRequestSort) Swap(i, j int) { + t.paths[i], t.paths[j] = t.paths[j], t.paths[i] + t.hashes[i], t.hashes[j] = t.hashes[j], t.hashes[i] + t.syncPaths[i], t.syncPaths[j] = t.syncPaths[j], t.syncPaths[i] +} + +// Merge merges the pathsets, so that several storage requests concerning the +// same account are merged into one, to reduce bandwidth. +// OBS: This operation is moot if t has not first been sorted. +func (t *healRequestSort) Merge() []TrieNodePathSet { + var result []TrieNodePathSet + for _, path := range t.syncPaths { + pathset := TrieNodePathSet(path) + if len(path) == 1 { + // It's an account reference. + result = append(result, pathset) + } else { + // It's a storage reference. + end := len(result) - 1 + if len(result) == 0 || !bytes.Equal(pathset[0], result[end][0]) { + // The account doesn't match last, create a new entry. + result = append(result, pathset) + } else { + // It's the same account as the previous one, add to the storage + // paths of that request. + result[end] = append(result[end], pathset[1]) + } + } + } + return result +} + +type storageTaskBundle struct { + id uint64 //unique id for storage task bundle + accounts []common.Hash + roots []common.Hash + mainTask *accountTask + subtask *storageTask + origin common.Hash + limit common.Hash +} + +// healTask represents the sync task for healing the snap-synced chunk boundaries. +type healTask struct { + id uint64 + trieTasks map[string]common.Hash // Set of trie node tasks currently queued for retrieval, indexed by node path + codeTasks map[common.Hash]struct{} // Set of byte code tasks currently queued for retrieval, indexed by code hash + paths []string + hashes []common.Hash + pathsets []TrieNodePathSet + task *healTask + root common.Hash + byteCodeReq bool +} + +type tasks struct { + accountTasks map[uint64]*accountTask // Current account task set being synced + storageTasks map[uint64]*storageTaskBundle // Set of trie node tasks currently queued for retrieval, indexed by path + codeTasks map[common.Hash]struct{} // Set of byte code tasks currently queued for retrieval, indexed by hash + healer map[uint64]*healTask + snapped bool // Flag to signal that snap phase is done +} + +func newTasks() *tasks { + return &tasks{ + accountTasks: make(map[uint64]*accountTask, 0), + storageTasks: make(map[uint64]*storageTaskBundle, 0), + codeTasks: make(map[common.Hash]struct{}), + healer: make(map[uint64]*healTask, 0), + snapped: false, + } +} + +func (t *tasks) addAccountTask(accountTaskIndex uint64, ct *accountTask) { + t.accountTasks[accountTaskIndex] = ct +} + +func (t *tasks) getAccountTask(accountTaskIndex uint64) *accountTask { + if _, ok := t.accountTasks[accountTaskIndex]; ok { + return t.accountTasks[accountTaskIndex] + } + return nil +} + +func (t *tasks) deleteAccountTask(accountTaskIndex uint64) { + if _, ok := t.accountTasks[accountTaskIndex]; ok { + delete(t.accountTasks, accountTaskIndex) + } + // t.accountTasks = append(t.accountTasks[:accountTaskIndex], t.accountTasks[accountTaskIndex+1:]...) +} + +func (t *tasks) addCodeTask(h common.Hash) { + t.codeTasks[h] = struct{}{} +} + +func (t *tasks) deleteCodeTask(hash common.Hash) { + if _, ok := t.codeTasks[hash]; ok { + delete(t.codeTasks, hash) + } +} + +func (t *tasks) addStorageTaskBundle(storageBundleIndex uint64, storages *storageTaskBundle) { + t.storageTasks[storageBundleIndex] = storages +} + +func (t *tasks) deleteStorageTaskBundle(storageBundleIndex uint64) { + if _, ok := t.storageTasks[storageBundleIndex]; ok { + delete(t.storageTasks, storageBundleIndex) + } +} + +func (t *tasks) addHealerTask(taskID uint64, task *healTask) { + t.healer[taskID] = task +} + +func (t *tasks) deleteHealerTask(taskID uint64) { + if _, ok := t.healer[taskID]; ok { + delete(t.healer, taskID) + } +} + +func (t *tasks) addHealerTrieTask(taskID uint64, path string, h common.Hash) { + if _, ok := t.healer[taskID]; ok { + t.healer[taskID].trieTasks[path] = h + } +} + +func (t *tasks) getHealerTrieTask(taskID uint64, path string) common.Hash { + if _, ok := t.healer[taskID]; ok { + return t.healer[taskID].trieTasks[path] + } + return common.Hash{} +} + +func (t *tasks) addHealerTrieCodeTask(taskID uint64, hash common.Hash, v struct{}) { + if _, ok := t.healer[taskID]; ok { + t.healer[taskID].codeTasks[hash] = v + } +} + +func (t *tasks) getHealerTrieCodeTask(taskID uint64, h common.Hash) struct{} { + if _, ok := t.healer[taskID]; ok { + return t.healer[taskID].codeTasks[h] + } + return struct{}{} +} + +// SyncProgress is a database entry to allow suspending and resuming a snapshot state +// sync. Opposed to full and fast sync, there is no way to restart a suspended +// snap sync without prior knowledge of the suspension point. +type SyncProgress struct { + Tasks map[uint64]*accountTask // The suspended account tasks (contract tasks within) + + // Status report during syncing phase + AccountSynced uint64 // Number of accounts downloaded + AccountBytes common.StorageSize // Number of account trie bytes persisted to disk + BytecodeSynced uint64 // Number of bytecodes downloaded + BytecodeBytes common.StorageSize // Number of bytecode bytes downloaded + StorageSynced uint64 // Number of storage slots downloaded + StorageBytes common.StorageSize // Number of storage trie bytes persisted to disk + + // Status report during healing phase + TrienodeHealSynced uint64 // Number of state trie nodes downloaded + TrienodeHealBytes common.StorageSize // Number of state trie bytes persisted to disk + BytecodeHealSynced uint64 // Number of bytecodes downloaded + BytecodeHealBytes common.StorageSize // Number of bytecodes persisted to disk +} + +// FullStateDownloadManager is the helper structure for get blocks request management +type FullStateDownloadManager struct { + bc core.BlockChain + tx kv.RwTx + + db ethdb.KeyValueStore // Database to store the trie nodes into (and dedup) + scheme string // Node scheme used in node database + + tasks *tasks + requesting *tasks + processing *tasks + retries *tasks + + root common.Hash // Current state trie root being synced + snapped bool // Flag to signal that snap phase is done + // healer *healTask // Current state healing task being executed + + protocol syncProtocol + scheduler *trie.Sync // State trie sync scheduler defining the tasks + keccak crypto.KeccakState // Keccak256 hasher to verify deliveries with + concurrency int + logger zerolog.Logger + lock sync.RWMutex + + numUncommitted int + bytesUncommitted int + + accountSynced uint64 // Number of accounts downloaded + accountBytes common.StorageSize // Number of account trie bytes persisted to disk + bytecodeSynced uint64 // Number of bytecodes downloaded + bytecodeBytes common.StorageSize // Number of bytecode bytes downloaded + storageSynced uint64 // Number of storage slots downloaded + storageBytes common.StorageSize // Number of storage trie bytes persisted to disk + + pend sync.WaitGroup // Tracks network request goroutines for graceful shutdown + + stateWriter ethdb.Batch // Shared batch writer used for persisting raw states + accountHealed uint64 // Number of accounts downloaded during the healing stage + accountHealedBytes common.StorageSize // Number of raw account bytes persisted to disk during the healing stage + storageHealed uint64 // Number of storage slots downloaded during the healing stage + storageHealedBytes common.StorageSize // Number of raw storage bytes persisted to disk during the healing stage + + trienodeHealRate float64 // Average heal rate for processing trie node data + trienodeHealPend atomic.Uint64 // Number of trie nodes currently pending for processing + trienodeHealThrottle float64 // Divisor for throttling the amount of trienode heal data requested + trienodeHealThrottled time.Time // Timestamp the last time the throttle was updated + + trienodeHealSynced uint64 // Number of state trie nodes downloaded + trienodeHealBytes common.StorageSize // Number of state trie bytes persisted to disk + trienodeHealDups uint64 // Number of state trie nodes already processed + trienodeHealNops uint64 // Number of state trie nodes not requested + bytecodeHealSynced uint64 // Number of bytecodes downloaded + bytecodeHealBytes common.StorageSize // Number of bytecodes persisted to disk + bytecodeHealDups uint64 // Number of bytecodes already processed + bytecodeHealNops uint64 // Number of bytecodes not requested +} + +func newFullStateDownloadManager(db ethdb.KeyValueStore, + scheme string, + tx kv.RwTx, + bc core.BlockChain, + concurrency int, + logger zerolog.Logger) *FullStateDownloadManager { + + return &FullStateDownloadManager{ + db: db, + scheme: scheme, + bc: bc, + stateWriter: db.NewBatch(), + tx: tx, + keccak: sha3.NewLegacyKeccak256().(crypto.KeccakState), + concurrency: concurrency, + logger: logger, + tasks: newTasks(), + requesting: newTasks(), + processing: newTasks(), + retries: newTasks(), + } +} + +func (s *FullStateDownloadManager) setRootHash(root common.Hash) { + s.root = root + s.scheduler = state.NewStateSync(root, s.db, s.onHealState, s.scheme) + s.loadSyncStatus() + // s.sched = state.NewStateSync(root, s.bc.ChainDb(), nil, rawdb.HashScheme) +} + +func (s *FullStateDownloadManager) taskDone(taskID uint64) { + s.tasks.accountTasks[taskID].done = true +} + +// SlimAccount is a modified version of an Account, where the root is replaced +// with a byte slice. This format can be used to represent full-consensus format +// or slim format which replaces the empty root and code hash as nil byte slice. +type SlimAccount struct { + Nonce uint64 + Balance *big.Int + Root []byte // Nil if root equals to types.EmptyRootHash + CodeHash []byte // Nil if hash equals to types.EmptyCodeHash +} + +// SlimAccountRLP encodes the state account in 'slim RLP' format. +func (s *FullStateDownloadManager) SlimAccountRLP(account types.StateAccount) []byte { + slim := SlimAccount{ + Nonce: account.Nonce, + Balance: account.Balance, + } + if account.Root != types.EmptyRootHash { + slim.Root = account.Root[:] + } + if !bytes.Equal(account.CodeHash, types.EmptyCodeHash[:]) { + slim.CodeHash = account.CodeHash + } + data, err := rlp.EncodeToBytes(slim) + if err != nil { + panic(err) + } + return data +} + +// FullAccount decodes the data on the 'slim RLP' format and returns +// the consensus format account. +func FullAccount(data []byte) (*types.StateAccount, error) { + var slim SlimAccount + if err := rlp.DecodeBytes(data, &slim); err != nil { + return nil, err + } + var account types.StateAccount + account.Nonce, account.Balance = slim.Nonce, slim.Balance + + // Interpret the storage root and code hash in slim format. + if len(slim.Root) == 0 { + account.Root = types.EmptyRootHash + } else { + account.Root = common.BytesToHash(slim.Root) + } + if len(slim.CodeHash) == 0 { + account.CodeHash = types.EmptyCodeHash[:] + } else { + account.CodeHash = slim.CodeHash + } + return &account, nil +} + +// FullAccountRLP converts data on the 'slim RLP' format into the full RLP-format. +func FullAccountRLP(data []byte) ([]byte, error) { + account, err := FullAccount(data) + if err != nil { + return nil, err + } + return rlp.EncodeToBytes(account) +} + +// onHealState is a callback method to invoke when a flat state(account +// or storage slot) is downloaded during the healing stage. The flat states +// can be persisted blindly and can be fixed later in the generation stage. +// Note it's not concurrent safe, please handle the concurrent issue outside. +func (s *FullStateDownloadManager) onHealState(paths [][]byte, value []byte) error { + if len(paths) == 1 { + var account types.StateAccount + if err := rlp.DecodeBytes(value, &account); err != nil { + return nil // Returning the error here would drop the remote peer + } + blob := s.SlimAccountRLP(account) + rawdb.WriteAccountSnapshot(s.stateWriter, common.BytesToHash(paths[0]), blob) + s.accountHealed += 1 + s.accountHealedBytes += common.StorageSize(1 + common.HashLength + len(blob)) + } + if len(paths) == 2 { + rawdb.WriteStorageSnapshot(s.stateWriter, common.BytesToHash(paths[0]), common.BytesToHash(paths[1]), value) + s.storageHealed += 1 + s.storageHealedBytes += common.StorageSize(1 + 2*common.HashLength + len(value)) + } + if s.stateWriter.ValueSize() > ethdb.IdealBatchSize { + s.stateWriter.Write() // It's fine to ignore the error here + s.stateWriter.Reset() + } + return nil +} + +func (s *FullStateDownloadManager) commitHealer(force bool) { + if !force && s.scheduler.MemSize() < ethdb.IdealBatchSize { + return + } + batch := s.db.NewBatch() + if err := s.scheduler.Commit(batch); err != nil { + utils.Logger().Error().Err(err).Msg("Failed to commit healing data") + } + if err := batch.Write(); err != nil { + log.Crit("Failed to persist healing data", "err", err) + } + utils.Logger().Debug().Str("type", "trienodes").Interface("bytes", common.StorageSize(batch.ValueSize())).Msg("Persisted set of healing data") +} + +// getNextBatch returns objects with a maximum of n state download +// tasks to send to the remote peer. +func (s *FullStateDownloadManager) GetNextBatch() (accounts []*accountTask, + codes []common.Hash, + storages *storageTaskBundle, + healtask *healTask, + codetask *healTask, + err error) { + + s.lock.Lock() + defer s.lock.Unlock() + + cap := StatesPerRequest + + accounts, codes, storages, healtask, codetask = s.getBatchFromRetries(cap) + nItems := len(accounts) + len(codes) + len(storages.roots) + len(healtask.hashes) + len(codetask.hashes) + cap -= nItems + + if cap == 0 { + return + } + + if len(s.tasks.accountTasks) == 0 && s.scheduler.Pending() == 0 { + utils.Logger().Debug().Msg("Snapshot sync already completed") + return + } + + defer func() { // Persist any progress, independent of failure + for _, task := range s.tasks.accountTasks { + s.forwardAccountTask(task) + } + s.cleanAccountTasks() + s.saveSyncStatus() + }() + + // Flush out the last committed raw states + defer func() { + if s.stateWriter.ValueSize() > 0 { + s.stateWriter.Write() + s.stateWriter.Reset() + } + }() + + // commit any trie- and bytecode-healing data. + defer s.commitHealer(true) + + // Whether sync completed or not, disregard any future packets + defer func() { + utils.Logger().Debug().Interface("root", s.root).Msg("Terminating snapshot sync cycle") + }() + + // Refill available tasks from the scheduler. + if len(s.tasks.accountTasks) == 0 && s.scheduler.Pending() == 0 { + utils.Logger().Debug().Msg("Snapshot sync already completed") + return + } + + // if err = s.fillTasks(cap); err != nil { + // return + // } + + includeHealtasks := true + if healtask != nil || codetask != nil { + includeHealtasks = false + } + newAccounts, newCodes, newStorageTaskBundle, unprocessedHealtask, unprocessedCodetask := s.getBatchFromUnprocessed(cap, includeHealtasks) + accounts = append(accounts, newAccounts...) + codes = append(codes, newCodes...) + storages = newStorageTaskBundle + if includeHealtasks { + healtask = unprocessedHealtask + codetask = unprocessedCodetask + } + + return +} + +// saveSyncStatus marshals the remaining sync tasks into leveldb. +func (s *FullStateDownloadManager) saveSyncStatus() { + // Serialize any partial progress to disk before spinning down + for _, task := range s.tasks.accountTasks { + if err := task.genBatch.Write(); err != nil { + utils.Logger().Debug(). + Err(err). + Msg("Failed to persist account slots") + } + for _, subtasks := range task.SubTasks { + for _, subtask := range subtasks { + if err := subtask.genBatch.Write(); err != nil { + utils.Logger().Debug(). + Err(err). + Msg("Failed to persist storage slots") + } + } + } + } + // Store the actual progress markers + progress := &SyncProgress{ + Tasks: s.tasks.accountTasks, + AccountSynced: s.accountSynced, + AccountBytes: s.accountBytes, + BytecodeSynced: s.bytecodeSynced, + BytecodeBytes: s.bytecodeBytes, + StorageSynced: s.storageSynced, + StorageBytes: s.storageBytes, + TrienodeHealSynced: s.trienodeHealSynced, + TrienodeHealBytes: s.trienodeHealBytes, + BytecodeHealSynced: s.bytecodeHealSynced, + BytecodeHealBytes: s.bytecodeHealBytes, + } + status, err := json.Marshal(progress) + if err != nil { + panic(err) // This can only fail during implementation + } + rawdb.WriteSnapshotSyncStatus(s.db, status) +} + +// loadSyncStatus retrieves a previously aborted sync status from the database, +// or generates a fresh one if none is available. +func (s *FullStateDownloadManager) loadSyncStatus() { + var progress SyncProgress + + if status := rawdb.ReadSnapshotSyncStatus(s.db); status != nil { + if err := json.Unmarshal(status, &progress); err != nil { + utils.Logger().Error(). + Err(err). + Msg("Failed to decode snap sync status") + } else { + for _, task := range progress.Tasks { + utils.Logger().Debug(). + Interface("from", task.Next). + Interface("last", task.Last). + Msg("Scheduled account sync task") + } + s.tasks.accountTasks = progress.Tasks + for _, task := range s.tasks.accountTasks { + // task := task // closure for task.genBatch in the stacktrie writer callback + + task.genBatch = ethdb.HookedBatch{ + Batch: s.db.NewBatch(), + OnPut: func(key []byte, value []byte) { + s.accountBytes += common.StorageSize(len(key) + len(value)) + }, + } + // options := trie.NewStackTrieOptions() + writeFn := func(owner common.Hash, path []byte, hash common.Hash, blob []byte) { + rawdb.WriteTrieNode(task.genBatch, common.Hash{}, path, hash, blob, s.scheme) + } + task.genTrie = trie.NewStackTrie(writeFn) + for accountHash, subtasks := range task.SubTasks { + for _, subtask := range subtasks { + subtask := subtask // closure for subtask.genBatch in the stacktrie writer callback + + subtask.genBatch = ethdb.HookedBatch{ + Batch: s.db.NewBatch(), + OnPut: func(key []byte, value []byte) { + s.storageBytes += common.StorageSize(len(key) + len(value)) + }, + } + // owner := accountHash // local assignment for stacktrie writer closure + writeFn = func(owner common.Hash, path []byte, hash common.Hash, blob []byte) { + rawdb.WriteTrieNode(subtask.genBatch, accountHash, path, hash, blob, s.scheme) + } + subtask.genTrie = trie.NewStackTrie(writeFn) + } + } + } + s.lock.Lock() + defer s.lock.Unlock() + + s.snapped = len(s.tasks.accountTasks) == 0 + + s.accountSynced = progress.AccountSynced + s.accountBytes = progress.AccountBytes + s.bytecodeSynced = progress.BytecodeSynced + s.bytecodeBytes = progress.BytecodeBytes + s.storageSynced = progress.StorageSynced + s.storageBytes = progress.StorageBytes + + s.trienodeHealSynced = progress.TrienodeHealSynced + s.trienodeHealBytes = progress.TrienodeHealBytes + s.bytecodeHealSynced = progress.BytecodeHealSynced + s.bytecodeHealBytes = progress.BytecodeHealBytes + return + } + } + // Either we've failed to decode the previous state, or there was none. + // Start a fresh sync by chunking up the account range and scheduling + // them for retrieval. + s.tasks.accountTasks = nil + s.accountSynced, s.accountBytes = 0, 0 + s.bytecodeSynced, s.bytecodeBytes = 0, 0 + s.storageSynced, s.storageBytes = 0, 0 + s.trienodeHealSynced, s.trienodeHealBytes = 0, 0 + s.bytecodeHealSynced, s.bytecodeHealBytes = 0, 0 + + var next common.Hash + step := new(big.Int).Sub( + new(big.Int).Div( + new(big.Int).Exp(common.Big2, common.Big256, nil), + big.NewInt(int64(accountConcurrency)), + ), common.Big1, + ) + for i := 0; i < accountConcurrency; i++ { + last := common.BigToHash(new(big.Int).Add(next.Big(), step)) + if i == accountConcurrency-1 { + // Make sure we don't overflow if the step is not a proper divisor + last = MaxHash + } + batch := ethdb.HookedBatch{ + Batch: s.db.NewBatch(), + OnPut: func(key []byte, value []byte) { + s.accountBytes += common.StorageSize(len(key) + len(value)) + }, + } + // options := trie.NewStackTrieOptions() + writeFn := func(owner common.Hash, path []byte, hash common.Hash, blob []byte) { + rawdb.WriteTrieNode(batch, common.Hash{}, path, hash, blob, s.scheme) + } + // create a unique id for task + var taskID uint64 + for { + taskID = uint64(rand.Int63()) + if taskID == 0 { + continue + } + if _, ok := s.tasks.accountTasks[taskID]; ok { + continue + } + break + } + s.tasks.addAccountTask(taskID, &accountTask{ + id: taskID, + Next: next, + Last: last, + SubTasks: make(map[common.Hash][]*storageTask), + genBatch: batch, + genTrie: trie.NewStackTrie(writeFn), + }) + utils.Logger().Debug(). + Interface("from", next). + Interface("last", last). + Msg("Created account sync task") + + next = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1)) + } +} + +// cleanAccountTasks removes account range retrieval tasks that have already been +// completed. +func (s *FullStateDownloadManager) cleanAccountTasks() { + // If the sync was already done before, don't even bother + if len(s.tasks.accountTasks) == 0 { + return + } + // Sync wasn't finished previously, check for any task that can be finalized + //for i := 0; i < len(s.tasks.accountTasks); i++ { + for taskID, _ := range s.tasks.accountTasks { + if s.tasks.accountTasks[taskID].done { + //s.tasks.accountTasks = append(s.tasks.accountTasks[:i], s.tasks.accountTasks[i+1:]...) + //i-- + s.tasks.deleteAccountTask(taskID) + } + } + // If everything was just finalized just, generate the account trie and start heal + if len(s.tasks.accountTasks) == 0 { + s.lock.Lock() + s.snapped = true + s.lock.Unlock() + + // Push the final sync report + //s.reportSyncProgress(true) + } +} + +// cleanStorageTasks iterates over all the account tasks and storage sub-tasks +// within, cleaning any that have been completed. +func (s *FullStateDownloadManager) cleanStorageTasks() { + for _, task := range s.tasks.accountTasks { + for account, subtasks := range task.SubTasks { + // Remove storage range retrieval tasks that completed + for j := 0; j < len(subtasks); j++ { + if subtasks[j].done { + subtasks = append(subtasks[:j], subtasks[j+1:]...) + j-- + } + } + if len(subtasks) > 0 { + task.SubTasks[account] = subtasks + continue + } + // If all storage chunks are done, mark the account as done too + for j, hash := range task.res.hashes { + if hash == account { + task.needState[j] = false + } + } + delete(task.SubTasks, account) + task.pend-- + + // If this was the last pending task, forward the account task + if task.pend == 0 { + s.forwardAccountTask(task) + } + } + } +} + +// forwardAccountTask takes a filled account task and persists anything available +// into the database, after which it forwards the next account marker so that the +// task's next chunk may be filled. +func (s *FullStateDownloadManager) forwardAccountTask(task *accountTask) { + // Remove any pending delivery + res := task.res + if res == nil { + return // nothing to forward + } + task.res = nil + + // Persist the received account segments. These flat state maybe + // outdated during the sync, but it can be fixed later during the + // snapshot generation. + oldAccountBytes := s.accountBytes + + batch := ethdb.HookedBatch{ + Batch: s.db.NewBatch(), + OnPut: func(key []byte, value []byte) { + s.accountBytes += common.StorageSize(len(key) + len(value)) + }, + } + for i, hash := range res.hashes { + if task.needCode[i] || task.needState[i] { + break + } + slim := s.SlimAccountRLP(*res.accounts[i]) + rawdb.WriteAccountSnapshot(batch, hash, slim) + + // If the task is complete, drop it into the stack trie to generate + // account trie nodes for it + if !task.needHeal[i] { + full, err := FullAccountRLP(slim) // TODO(karalabe): Slim parsing can be omitted + if err != nil { + panic(err) // Really shouldn't ever happen + } + task.genTrie.Update(hash[:], full) + } + } + // Flush anything written just now and update the stats + if err := batch.Write(); err != nil { + utils.Logger().Error().Err(err).Msg("Failed to persist accounts") + } + s.accountSynced += uint64(len(res.accounts)) + + // Task filling persisted, push it the chunk marker forward to the first + // account still missing data. + for i, hash := range res.hashes { + if task.needCode[i] || task.needState[i] { + return + } + task.Next = incHash(hash) + } + // All accounts marked as complete, track if the entire task is done + task.done = !res.cont + + // Stack trie could have generated trie nodes, push them to disk (we need to + // flush after finalizing task.done. It's fine even if we crash and lose this + // write as it will only cause more data to be downloaded during heal. + if task.done { + task.genTrie.Commit() + } + if task.genBatch.ValueSize() > ethdb.IdealBatchSize || task.done { + if err := task.genBatch.Write(); err != nil { + utils.Logger().Error().Err(err).Msg("Failed to persist stack account") + } + task.genBatch.Reset() + } + utils.Logger().Debug(). + Int("accounts", len(res.accounts)). + Float64("bytes", float64(s.accountBytes-oldAccountBytes)). + Msg("Persisted range of accounts") +} + +// updateStats bumps the various state sync progress counters and displays a log +// message for the user to see. +func (s *FullStateDownloadManager) updateStats(written, duplicate, unexpected int, duration time.Duration) { + // TODO: here it updates the stats for total pending, processed, duplicates and unexpected + + // for now, we just jog current stats + if written > 0 || duplicate > 0 || unexpected > 0 { + utils.Logger().Info(). + Int("count", written). + Int("duplicate", duplicate). + Int("unexpected", unexpected). + Msg("Imported new state entries") + } +} + +// getBatchFromUnprocessed returns objects with a maximum of n unprocessed state download +// tasks to send to the remote peer. +func (s *FullStateDownloadManager) getBatchFromUnprocessed(n int, includeHealtasks bool) ( + accounts []*accountTask, + codes []common.Hash, + storages *storageTaskBundle, + healtask *healTask, + codetask *healTask) { + + // over trie nodes as those can be written to disk and forgotten about. + codes = make([]common.Hash, 0, n) + accounts = make([]*accountTask, 0, n) + + for i, task := range s.tasks.accountTasks { + // Stop when we've gathered enough requests + if len(accounts) == n { + return + } + // if already requested + if task.requested { + continue + } + if task.id == 0 { + continue + } + s.tasks.accountTasks[i].requested = true + accounts = append(accounts, task) + s.requesting.addAccountTask(task.id, task) + // s.tasks.deleteAccountTask(task) + } + + cap := n - len(accounts) + + for _, task := range s.tasks.accountTasks { + // Skip tasks that are already retrieving (or done with) all codes + if len(task.codeTasks) == 0 { + continue + } + + for hash := range task.codeTasks { + delete(task.codeTasks, hash) + codes = append(codes, hash) + s.requesting.addCodeTask(hash) + s.tasks.deleteCodeTask(hash) + // Stop when we've gathered enough requests + if len(codes) >= cap { + return + } + } + } + + cap = n - len(accounts) - len(codes) + + for accTaskID, task := range s.tasks.accountTasks { + // Skip tasks that are already retrieving (or done with) all small states + if len(task.SubTasks) == 0 && len(task.stateTasks) == 0 { + continue + } + + // TODO: check cap calculations (shouldn't give us big chunk) + if cap > maxRequestSize { + cap = maxRequestSize + } + if cap < minRequestSize { // Don't bother with peers below a bare minimum performance + cap = minRequestSize + } + storageSets := cap / 1024 + + storages = &storageTaskBundle{ + accounts: make([]common.Hash, 0, storageSets), + roots: make([]common.Hash, 0, storageSets), + mainTask: task, + } + + // create a unique id for task bundle + var taskID uint64 + for { + taskID = uint64(rand.Int63()) + if taskID == 0 { + continue + } + if _, ok := s.tasks.storageTasks[taskID]; ok { + continue + } + break + } + storages.id = taskID + + for account, subtasks := range task.SubTasks { + // find the first subtask which is not requested yet + for i, st := range subtasks { + // Skip any subtasks already filling + if st.requested { + continue + } + // Found an incomplete storage chunk, schedule it + storages.accounts = append(storages.accounts, account) + storages.roots = append(storages.roots, st.root) + storages.subtask = st + s.tasks.accountTasks[accTaskID].SubTasks[account][i].requested = true + break // Large contract chunks are downloaded individually + } + if storages.subtask != nil { + break // Large contract chunks are downloaded individually + } + } + if storages.subtask == nil { + // No large contract required retrieval, but small ones available + for account, root := range task.stateTasks { + delete(task.stateTasks, account) + + storages.accounts = append(storages.accounts, account) + storages.roots = append(storages.roots, root) + + if len(storages.accounts) >= storageSets { + break + } + } + } + // If nothing was found, it means this task is actually already fully + // retrieving, but large contracts are hard to detect. Skip to the next. + if len(storages.accounts) == 0 { + continue + } + if storages.subtask != nil { + storages.origin = storages.subtask.Next + storages.limit = storages.subtask.Last + } + s.tasks.addStorageTaskBundle(taskID, storages) + s.requesting.addStorageTaskBundle(taskID, storages) + + cap -= len(storages.accounts) + + if cap <= 0 { + break + } + } + + if len(accounts)+len(codes)+len(storages.accounts) > 0 { + return + } + + if !includeHealtasks { + return + } + + // Sync phase done, run heal phase + cap = n + + // Iterate over pending tasks and try to find a peer to retrieve with + for (len(s.tasks.healer) > 0 && len(s.tasks.healer[0].hashes) > 0) || s.scheduler.Pending() > 0 { + // If there are not enough trie tasks queued to fully assign, fill the + // queue from the state sync scheduler. The trie synced schedules these + // together with bytecodes, so we need to queue them combined. + + // index 0 keeps all tasks, later we split it into multiple batch + if len(s.tasks.healer) == 0 { + s.tasks.healer[0] = &healTask{ + trieTasks: make(map[string]common.Hash, 0), + codeTasks: make(map[common.Hash]struct{}, 0), + } + } + + mPaths, mHashes, mCodes := s.scheduler.Missing(n) + for i, path := range mPaths { + s.tasks.healer[0].trieTasks[path] = mHashes[i] + } + for _, hash := range mCodes { + s.tasks.healer[0].codeTasks[hash] = struct{}{} + } + + // If all the heal tasks are bytecodes or already downloading, bail + if len(s.tasks.healer[0].trieTasks) == 0 { + return + } + // Generate the network query and send it to the peer + if cap > maxTrieRequestCount { + cap = maxTrieRequestCount + } + cap = int(float64(cap) / s.trienodeHealThrottle) + if cap <= 0 { + cap = 1 + } + var ( + hashes = make([]common.Hash, 0, cap) + paths = make([]string, 0, cap) + pathsets = make([]TrieNodePathSet, 0, cap) + ) + for path, hash := range s.tasks.healer[0].trieTasks { + delete(s.tasks.healer[0].trieTasks, path) + + paths = append(paths, path) + hashes = append(hashes, hash) + if len(paths) >= cap { + break + } + } + + // Group requests by account hash + paths, hashes, _, pathsets = sortByAccountPath(paths, hashes) + + // create a unique id for healer task + var taskID uint64 + for { + taskID = uint64(rand.Int63()) + if taskID == 0 { + continue + } + if _, ok := s.tasks.healer[taskID]; ok { + continue + } + break + } + + healtask = &healTask{ + id: taskID, + hashes: hashes, + paths: paths, + pathsets: pathsets, + root: s.root, + task: s.tasks.healer[0], + byteCodeReq: false, + } + + s.tasks.healer[taskID] = healtask + s.requesting.addHealerTask(taskID, healtask) + + cap = n - len(hashes) + } + + // trying to get bytecodes + // Iterate over pending tasks and try to find a peer to retrieve with + for (len(s.tasks.healer) > 0 && len(s.tasks.healer[0].codeTasks) > 0) || s.scheduler.Pending() > 0 { + // If there are not enough trie tasks queued to fully assign, fill the + // queue from the state sync scheduler. The trie synced schedules these + // together with trie nodes, so we need to queue them combined. + + mPaths, mHashes, mCodes := s.scheduler.Missing(cap) + for i, path := range mPaths { + s.tasks.healer[0].trieTasks[path] = mHashes[i] + } + for _, hash := range mCodes { + s.tasks.healer[0].codeTasks[hash] = struct{}{} + } + + // If all the heal tasks are trienodes or already downloading, bail + if len(s.tasks.healer[0].codeTasks) == 0 { + return + } + // Task pending retrieval, try to find an idle peer. If no such peer + // exists, we probably assigned tasks for all (or they are stateless). + // Abort the entire assignment mechanism. + + // Generate the network query and send it to the peer + if cap > maxCodeRequestCount { + cap = maxCodeRequestCount + } + hashes := make([]common.Hash, 0, cap) + for hash := range s.tasks.healer[0].codeTasks { + delete(s.tasks.healer[0].codeTasks, hash) + + hashes = append(hashes, hash) + if len(hashes) >= cap { + break + } + } + + // create a unique id for healer task + var taskID uint64 + for { + taskID = uint64(rand.Int63()) + if taskID == 0 { + continue + } + if _, ok := s.tasks.healer[taskID]; ok { + continue + } + break + } + + codetask = &healTask{ + id: taskID, + hashes: hashes, + task: s.tasks.healer[0], + byteCodeReq: true, + } + + s.tasks.healer[taskID] = codetask + s.requesting.addHealerTask(taskID, healtask) + } + + return +} + +// sortByAccountPath takes hashes and paths, and sorts them. After that, it generates +// the TrieNodePaths and merges paths which belongs to the same account path. +func sortByAccountPath(paths []string, hashes []common.Hash) ([]string, []common.Hash, []trie.SyncPath, []TrieNodePathSet) { + var syncPaths []trie.SyncPath + for _, path := range paths { + syncPaths = append(syncPaths, trie.NewSyncPath([]byte(path))) + } + n := &healRequestSort{paths, hashes, syncPaths} + sort.Sort(n) + pathsets := n.Merge() + return n.paths, n.hashes, n.syncPaths, pathsets +} + +// getBatchFromRetries get the block number batch to be requested from retries. +func (s *FullStateDownloadManager) getBatchFromRetries(n int) ( + accounts []*accountTask, + codes []common.Hash, + storages *storageTaskBundle, + healtask *healTask, + codetask *healTask) { + + // over trie nodes as those can be written to disk and forgotten about. + accounts = make([]*accountTask, 0, n) + codes = make([]common.Hash, 0, n) + + for _, task := range s.retries.accountTasks { + // Stop when we've gathered enough requests + if len(accounts) == n { + return + } + accounts = append(accounts, task) + s.requesting.addAccountTask(task.id, task) + s.retries.deleteAccountTask(task.id) + } + + cap := n - len(accounts) + + for code := range s.retries.codeTasks { + // Stop when we've gathered enough requests + if len(codes) >= cap { + return + } + codes = append(codes, code) + s.requesting.addCodeTask(code) + s.retries.deleteCodeTask(code) + } + + cap = n - len(accounts) - len(codes) + + if s.retries.storageTasks != nil && len(s.retries.storageTasks) > 0 { + storages = &storageTaskBundle{ + id: s.retries.storageTasks[0].id, + accounts: s.retries.storageTasks[0].accounts, + roots: s.retries.storageTasks[0].roots, + mainTask: s.retries.storageTasks[0].mainTask, + subtask: s.retries.storageTasks[0].subtask, + limit: s.retries.storageTasks[0].limit, + origin: s.retries.storageTasks[0].origin, + } + s.requesting.addStorageTaskBundle(storages.id, storages) + s.retries.deleteStorageTaskBundle(storages.id) + } + + if len(accounts)+len(codes)+len(storages.accounts) > 0 { + return + } + + cap = n + + if s.retries.healer != nil && len(s.retries.healer) > 0 { + foundHealTask := false + foundByteCodeTask := false + + for id, task := range s.retries.healer { + if !foundHealTask && !task.byteCodeReq { + healtask = &healTask{ + id: id, + hashes: task.hashes, + paths: task.paths, + pathsets: task.pathsets, + root: task.root, + task: task.task, + byteCodeReq: task.byteCodeReq, + } + s.requesting.addHealerTask(id, task) + s.retries.deleteHealerTask(id) + foundHealTask = true + } + if !foundByteCodeTask && task.byteCodeReq { + codetask = &healTask{ + id: id, + hashes: task.hashes, + paths: task.paths, + pathsets: task.pathsets, + root: task.root, + task: task.task, + byteCodeReq: task.byteCodeReq, + } + s.requesting.addHealerTask(id, task) + s.retries.deleteHealerTask(id) + foundByteCodeTask = true + } + if foundHealTask && foundByteCodeTask { + break + } + } + } + + return +} + +// HandleRequestError handles the error result +func (s *FullStateDownloadManager) HandleRequestError(accounts []*accountTask, + codes []common.Hash, + storages *storageTaskBundle, + healtask *healTask, + codetask *healTask, + streamID sttypes.StreamID, err error) { + + s.lock.Lock() + defer s.lock.Unlock() + + for _, task := range accounts { + s.requesting.deleteAccountTask(task.id) + s.retries.addAccountTask(task.id, task) + } + + for _, code := range codes { + s.requesting.deleteCodeTask(code) + s.retries.addCodeTask(code) + } + + if storages != nil { + s.requesting.addStorageTaskBundle(storages.id, storages) + s.retries.deleteStorageTaskBundle(storages.id) + } + + if healtask != nil { + s.retries.addHealerTask(healtask.id, healtask) + s.requesting.deleteHealerTask(healtask.id) + } + + if codetask != nil { + s.retries.addHealerTask(codetask.id, codetask) + s.requesting.deleteHealerTask(codetask.id) + } +} + +// HandleAccountRequestResult handles get account ranges result +func (s *FullStateDownloadManager) HandleAccountRequestResult(task *accountTask, // Task which this request is filling + hashes []common.Hash, // Account hashes in the returned range + accounts []*types.StateAccount, // Expanded accounts in the returned range + cont bool, // Whether the account range has a continuation + loopID int, + streamID sttypes.StreamID) error { + + s.lock.Lock() + defer s.lock.Unlock() + + if err := s.processAccountResponse(task, hashes, accounts, cont); err != nil { + return err + } + + return nil +} + +// processAccountResponse integrates an already validated account range response +// into the account tasks. +func (s *FullStateDownloadManager) processAccountResponse(task *accountTask, // Task which this request is filling + hashes []common.Hash, // Account hashes in the returned range + accounts []*types.StateAccount, // Expanded accounts in the returned range + cont bool, // Whether the account range has a continuation +) error { + + if _, ok := s.tasks.accountTasks[task.id]; ok { + s.tasks.accountTasks[task.id].res = &accountResponse{ + task: task, + hashes: hashes, + accounts: accounts, + cont: cont, + } + } + + // Ensure that the response doesn't overflow into the subsequent task + last := task.Last.Big() + for i, hash := range hashes { + // Mark the range complete if the last is already included. + // Keep iteration to delete the extra states if exists. + cmp := hash.Big().Cmp(last) + if cmp == 0 { + cont = false + continue + } + if cmp > 0 { + // Chunk overflown, cut off excess + hashes = hashes[:i] + accounts = accounts[:i] + cont = false // Mark range completed + break + } + } + // Iterate over all the accounts and assemble which ones need further sub- + // filling before the entire account range can be persisted. + task.needCode = make([]bool, len(accounts)) + task.needState = make([]bool, len(accounts)) + task.needHeal = make([]bool, len(accounts)) + + task.codeTasks = make(map[common.Hash]struct{}) + task.stateTasks = make(map[common.Hash]common.Hash) + + resumed := make(map[common.Hash]struct{}) + + task.pend = 0 + for i, account := range accounts { + // Check if the account is a contract with an unknown code + if !bytes.Equal(account.CodeHash, types.EmptyCodeHash.Bytes()) { + if !rawdb.HasCodeWithPrefix(s.db, common.BytesToHash(account.CodeHash)) { + task.codeTasks[common.BytesToHash(account.CodeHash)] = struct{}{} + task.needCode[i] = true + task.pend++ + } + } + // Check if the account is a contract with an unknown storage trie + if account.Root != types.EmptyRootHash { + if !rawdb.HasTrieNode(s.db, hashes[i], nil, account.Root, s.scheme) { + // If there was a previous large state retrieval in progress, + // don't restart it from scratch. This happens if a sync cycle + // is interrupted and resumed later. However, *do* update the + // previous root hash. + if subtasks, ok := task.SubTasks[hashes[i]]; ok { + utils.Logger().Debug().Interface("account", hashes[i]).Interface("root", account.Root).Msg("Resuming large storage retrieval") + for _, subtask := range subtasks { + subtask.root = account.Root + } + task.needHeal[i] = true + resumed[hashes[i]] = struct{}{} + } else { + task.stateTasks[hashes[i]] = account.Root + } + task.needState[i] = true + task.pend++ + } + } + } + // Delete any subtasks that have been aborted but not resumed. This may undo + // some progress if a new peer gives us less accounts than an old one, but for + // now we have to live with that. + for hash := range task.SubTasks { + if _, ok := resumed[hash]; !ok { + utils.Logger().Debug().Interface("account", hash).Msg("Aborting suspended storage retrieval") + delete(task.SubTasks, hash) + } + } + // If the account range contained no contracts, or all have been fully filled + // beforehand, short circuit storage filling and forward to the next task + if task.pend == 0 { + s.forwardAccountTask(task) + return nil + } + // Some accounts are incomplete, leave as is for the storage and contract + // task assigners to pick up and fill + return nil +} + +// HandleBytecodeRequestResult handles get bytecode result +func (s *FullStateDownloadManager) HandleBytecodeRequestResult(task *accountTask, // Task which this request is filling + hashes []common.Hash, // Hashes of the bytecode to avoid double hashing + bytecodes [][]byte, // Actual bytecodes to store into the database (nil = missing) + loopID int, + streamID sttypes.StreamID) error { + + s.lock.Lock() + defer s.lock.Unlock() + + if err := s.processBytecodeResponse(task, hashes, bytecodes); err != nil { + return err + } + + return nil +} + +// processBytecodeResponse integrates an already validated bytecode response +// into the account tasks. +func (s *FullStateDownloadManager) processBytecodeResponse(task *accountTask, // Task which this request is filling + hashes []common.Hash, // Hashes of the bytecode to avoid double hashing + bytecodes [][]byte, // Actual bytecodes to store into the database (nil = missing) +) error { + batch := s.db.NewBatch() + + var ( + codes uint64 + ) + for i, hash := range hashes { + code := bytecodes[i] + + // If the bytecode was not delivered, reschedule it + if code == nil { + task.codeTasks[hash] = struct{}{} + continue + } + // Code was delivered, mark it not needed any more + for j, account := range task.res.accounts { + if task.needCode[j] && hash == common.BytesToHash(account.CodeHash) { + task.needCode[j] = false + task.pend-- + } + } + // Push the bytecode into a database batch + codes++ + rawdb.WriteCode(batch, hash, code) + } + bytes := common.StorageSize(batch.ValueSize()) + if err := batch.Write(); err != nil { + log.Crit("Failed to persist bytecodes", "err", err) + } + s.bytecodeSynced += codes + s.bytecodeBytes += bytes + + utils.Logger().Debug().Interface("count", codes).Float64("bytes", float64(bytes)).Msg("Persisted set of bytecodes") + + // If this delivery completed the last pending task, forward the account task + // to the next chunk + if task.pend == 0 { + s.forwardAccountTask(task) + return nil + } + // Some accounts are still incomplete, leave as is for the storage and contract + // task assigners to pick up and fill. + + return nil +} + +// estimateRemainingSlots tries to determine roughly how many slots are left in +// a contract storage, based on the number of keys and the last hash. This method +// assumes that the hashes are lexicographically ordered and evenly distributed. +func estimateRemainingSlots(hashes int, last common.Hash) (uint64, error) { + if last == (common.Hash{}) { + return 0, errors.New("last hash empty") + } + space := new(big.Int).Mul(math.MaxBig256, big.NewInt(int64(hashes))) + space.Div(space, last.Big()) + if !space.IsUint64() { + // Gigantic address space probably due to too few or malicious slots + return 0, errors.New("too few slots for estimation") + } + return space.Uint64() - uint64(hashes), nil +} + +// HandleStorageRequestResult handles get storages result +func (s *FullStateDownloadManager) HandleStorageRequestResult(mainTask *accountTask, // Task which this response belongs to + subTask *storageTask, // Task which this response is filling + accounts []common.Hash, // Account hashes requested, may be only partially filled + roots []common.Hash, // Storage roots requested, may be only partially filled + hashes [][]common.Hash, // Storage slot hashes in the returned range + storageSlots [][][]byte, // Storage slot values in the returned range + cont bool, // Whether the last storage range has a continuation + loopID int, + streamID sttypes.StreamID) error { + + s.lock.Lock() + defer s.lock.Unlock() + + if err := s.processStorageResponse(mainTask, subTask, accounts, roots, hashes, storageSlots, cont); err != nil { + return err + } + + return nil +} + +// processStorageResponse integrates an already validated storage response +// into the account tasks. +func (s *FullStateDownloadManager) processStorageResponse(mainTask *accountTask, // Task which this response belongs to + subTask *storageTask, // Task which this response is filling + accounts []common.Hash, // Account hashes requested, may be only partially filled + roots []common.Hash, // Storage roots requested, may be only partially filled + hashes [][]common.Hash, // Storage slot hashes in the returned range + storageSlots [][][]byte, // Storage slot values in the returned range + cont bool, // Whether the last storage range has a continuation +) error { + batch := ethdb.HookedBatch{ + Batch: s.db.NewBatch(), + OnPut: func(key []byte, value []byte) { + s.storageBytes += common.StorageSize(len(key) + len(value)) + }, + } + var ( + slots int + oldStorageBytes = s.storageBytes + ) + // Iterate over all the accounts and reconstruct their storage tries from the + // delivered slots + for i, account := range accounts { + // If the account was not delivered, reschedule it + if i >= len(hashes) { + mainTask.stateTasks[account] = roots[i] + continue + } + // State was delivered, if complete mark as not needed any more, otherwise + // mark the account as needing healing + for j, hash := range mainTask.res.hashes { + if account != hash { + continue + } + acc := mainTask.res.accounts[j] + + // If the packet contains multiple contract storage slots, all + // but the last are surely complete. The last contract may be + // chunked, so check it's continuation flag. + if subTask == nil && mainTask.needState[j] && (i < len(hashes)-1 || !cont) { + mainTask.needState[j] = false + mainTask.pend-- + } + // If the last contract was chunked, mark it as needing healing + // to avoid writing it out to disk prematurely. + if subTask == nil && !mainTask.needHeal[j] && i == len(hashes)-1 && cont { + mainTask.needHeal[j] = true + } + // If the last contract was chunked, we need to switch to large + // contract handling mode + if subTask == nil && i == len(hashes)-1 && cont { + // If we haven't yet started a large-contract retrieval, create + // the subtasks for it within the main account task + if tasks, ok := mainTask.SubTasks[account]; !ok { + var ( + keys = hashes[i] + chunks = uint64(storageConcurrency) + lastKey common.Hash + ) + if len(keys) > 0 { + lastKey = keys[len(keys)-1] + } + // If the number of slots remaining is low, decrease the + // number of chunks. Somewhere on the order of 10-15K slots + // fit into a packet of 500KB. A key/slot pair is maximum 64 + // bytes, so pessimistically maxRequestSize/64 = 8K. + // + // Chunk so that at least 2 packets are needed to fill a task. + if estimate, err := estimateRemainingSlots(len(keys), lastKey); err == nil { + if n := estimate / (2 * (maxRequestSize / 64)); n+1 < chunks { + chunks = n + 1 + } + utils.Logger().Debug(). + Int("initiators", len(keys)). + Interface("tail", lastKey). + Uint64("remaining", estimate). + Uint64("chunks", chunks). + Msg("Chunked large contract") + } else { + utils.Logger().Debug(). + Int("initiators", len(keys)). + Interface("tail", lastKey). + Uint64("chunks", chunks). + Msg("Chunked large contract") + } + r := newHashRange(lastKey, chunks) + + // Our first task is the one that was just filled by this response. + batch := ethdb.HookedBatch{ + Batch: s.db.NewBatch(), + OnPut: func(key []byte, value []byte) { + s.storageBytes += common.StorageSize(len(key) + len(value)) + }, + } + ownerAccount := account // local assignment for stacktrie writer closure + // options := trie.NewStackTrieOptions() + writeFn := func(owner common.Hash, path []byte, hash common.Hash, blob []byte) { + rawdb.WriteTrieNode(batch, ownerAccount, path, hash, blob, s.scheme) + } + tasks = append(tasks, &storageTask{ + Next: common.Hash{}, + Last: r.End(), + root: acc.Root, + genBatch: batch, + genTrie: trie.NewStackTrie(writeFn), + }) + for r.Next() { + batch := ethdb.HookedBatch{ + Batch: s.db.NewBatch(), + OnPut: func(key []byte, value []byte) { + s.storageBytes += common.StorageSize(len(key) + len(value)) + }, + } + // options := trie.NewStackTrieOptions() + writeFn := func(owner common.Hash, path []byte, hash common.Hash, blob []byte) { + rawdb.WriteTrieNode(batch, ownerAccount, path, hash, blob, s.scheme) + } + tasks = append(tasks, &storageTask{ + Next: r.Start(), + Last: r.End(), + root: acc.Root, + genBatch: batch, + genTrie: trie.NewStackTrie(writeFn), + }) + } + for _, task := range tasks { + utils.Logger().Debug(). + Interface("from", task.Next). + Interface("last", task.Last). + Interface("root", acc.Root). + Interface("account", account). + Msg("Created storage sync task") + } + mainTask.SubTasks[account] = tasks + + // Since we've just created the sub-tasks, this response + // is surely for the first one (zero origin) + subTask = tasks[0] + } + } + // If we're in large contract delivery mode, forward the subtask + if subTask != nil { + // Ensure the response doesn't overflow into the subsequent task + last := subTask.Last.Big() + // Find the first overflowing key. While at it, mark res as complete + // if we find the range to include or pass the 'last' + index := sort.Search(len(hashes[i]), func(k int) bool { + cmp := hashes[i][k].Big().Cmp(last) + if cmp >= 0 { + cont = false + } + return cmp > 0 + }) + if index >= 0 { + // cut off excess + hashes[i] = hashes[i][:index] + storageSlots[i] = storageSlots[i][:index] + } + // Forward the relevant storage chunk (even if created just now) + if cont { + subTask.Next = incHash(hashes[i][len(hashes[i])-1]) + } else { + subTask.done = true + } + } + } + // Iterate over all the complete contracts, reconstruct the trie nodes and + // push them to disk. If the contract is chunked, the trie nodes will be + // reconstructed later. + slots += len(hashes[i]) + + if i < len(hashes)-1 || subTask == nil { + // no need to make local reassignment of account: this closure does not outlive the loop + // options := trie.NewStackTrieOptions() + writeFn := func(owner common.Hash, path []byte, hash common.Hash, blob []byte) { + rawdb.WriteTrieNode(batch, account, path, hash, blob, s.scheme) + } + tr := trie.NewStackTrie(writeFn) + for j := 0; j < len(hashes[i]); j++ { + tr.Update(hashes[i][j][:], storageSlots[i][j]) + } + tr.Commit() + } + // Persist the received storage segments. These flat state maybe + // outdated during the sync, but it can be fixed later during the + // snapshot generation. + for j := 0; j < len(hashes[i]); j++ { + rawdb.WriteStorageSnapshot(batch, account, hashes[i][j], storageSlots[i][j]) + + // If we're storing large contracts, generate the trie nodes + // on the fly to not trash the gluing points + if i == len(hashes)-1 && subTask != nil { + subTask.genTrie.Update(hashes[i][j][:], storageSlots[i][j]) + } + } + } + // Large contracts could have generated new trie nodes, flush them to disk + if subTask != nil { + if subTask.done { + root, _ := subTask.genTrie.Commit() + if root == subTask.root { + // If the chunk's root is an overflown but full delivery, clear the heal request + for i, account := range mainTask.res.hashes { + if account == accounts[len(accounts)-1] { + mainTask.needHeal[i] = false + } + } + } + } + if subTask.genBatch.ValueSize() > ethdb.IdealBatchSize || subTask.done { + if err := subTask.genBatch.Write(); err != nil { + log.Error("Failed to persist stack slots", "err", err) + } + subTask.genBatch.Reset() + } + } + // Flush anything written just now and update the stats + if err := batch.Write(); err != nil { + log.Crit("Failed to persist storage slots", "err", err) + } + s.storageSynced += uint64(slots) + + utils.Logger().Debug(). + Int("accounts", len(hashes)). + Int("slots", slots). + Interface("bytes", s.storageBytes-oldStorageBytes). + Msg("Persisted set of storage slots") + + // If this delivery completed the last pending task, forward the account task + // to the next chunk + if mainTask.pend == 0 { + s.forwardAccountTask(mainTask) + return nil + } + // Some accounts are still incomplete, leave as is for the storage and contract + // task assigners to pick up and fill. + + return nil +} + +// HandleTrieNodeHealRequestResult handles get trie nodes heal result +func (s *FullStateDownloadManager) HandleTrieNodeHealRequestResult(task *healTask, // Task which this request is filling + paths []string, // Paths of the trie nodes + hashes []common.Hash, // Hashes of the trie nodes to avoid double hashing + nodes [][]byte, // Actual trie nodes to store into the database (nil = missing) + loopID int, + streamID sttypes.StreamID) error { + + s.lock.Lock() + defer s.lock.Unlock() + + if err := s.processTrienodeHealResponse(task, paths, hashes, nodes); err != nil { + return err + } + + return nil +} + +// processTrienodeHealResponse integrates an already validated trienode response +// into the healer tasks. +func (s *FullStateDownloadManager) processTrienodeHealResponse(task *healTask, // Task which this request is filling + paths []string, // Paths of the trie nodes + hashes []common.Hash, // Hashes of the trie nodes to avoid double hashing + nodes [][]byte, // Actual trie nodes to store into the database (nil = missing) +) error { + var ( + start = time.Now() + fills int + ) + for i, hash := range hashes { + node := nodes[i] + + // If the trie node was not delivered, reschedule it + if node == nil { + task.trieTasks[paths[i]] = hashes[i] + continue + } + fills++ + + // Push the trie node into the state syncer + s.trienodeHealSynced++ + s.trienodeHealBytes += common.StorageSize(len(node)) + + err := s.scheduler.ProcessNode(trie.NodeSyncResult{Path: paths[i], Data: node}) + switch err { + case nil: + case trie.ErrAlreadyProcessed: + s.trienodeHealDups++ + case trie.ErrNotRequested: + s.trienodeHealNops++ + default: + utils.Logger().Err(err).Interface("hash", hash).Msg("Invalid trienode processed") + } + } + s.commitHealer(false) + + // Calculate the processing rate of one filled trie node + rate := float64(fills) / (float64(time.Since(start)) / float64(time.Second)) + + // Update the currently measured trienode queueing and processing throughput. + // + // The processing rate needs to be updated uniformly independent if we've + // processed 1x100 trie nodes or 100x1 to keep the rate consistent even in + // the face of varying network packets. As such, we cannot just measure the + // time it took to process N trie nodes and update once, we need one update + // per trie node. + // + // Naively, that would be: + // + // for i:=0; i time.Second { + // Periodically adjust the trie node throttler + if float64(pending) > 2*s.trienodeHealRate { + s.trienodeHealThrottle *= trienodeHealThrottleIncrease + } else { + s.trienodeHealThrottle /= trienodeHealThrottleDecrease + } + if s.trienodeHealThrottle > maxTrienodeHealThrottle { + s.trienodeHealThrottle = maxTrienodeHealThrottle + } else if s.trienodeHealThrottle < minTrienodeHealThrottle { + s.trienodeHealThrottle = minTrienodeHealThrottle + } + s.trienodeHealThrottled = time.Now() + + utils.Logger().Debug(). + Float64("rate", s.trienodeHealRate). + Uint64("pending", pending). + Float64("throttle", s.trienodeHealThrottle). + Msg("Updated trie node heal throttler") + } + + return nil +} + +// HandleByteCodeHealRequestResult handles get byte codes heal result +func (s *FullStateDownloadManager) HandleByteCodeHealRequestResult(task *healTask, // Task which this request is filling + hashes []common.Hash, // Hashes of the bytecode to avoid double hashing + codes [][]byte, // Actual bytecodes to store into the database (nil = missing) + loopID int, + streamID sttypes.StreamID) error { + + s.lock.Lock() + defer s.lock.Unlock() + + if err := s.processBytecodeHealResponse(task, hashes, codes); err != nil { + return err + } + + return nil +} + +// processBytecodeHealResponse integrates an already validated bytecode response +// into the healer tasks. +func (s *FullStateDownloadManager) processBytecodeHealResponse(task *healTask, // Task which this request is filling + hashes []common.Hash, // Hashes of the bytecode to avoid double hashing + codes [][]byte, // Actual bytecodes to store into the database (nil = missing) +) error { + for i, hash := range hashes { + node := codes[i] + + // If the trie node was not delivered, reschedule it + if node == nil { + task.codeTasks[hash] = struct{}{} + continue + } + // Push the trie node into the state syncer + s.bytecodeHealSynced++ + s.bytecodeHealBytes += common.StorageSize(len(node)) + + err := s.scheduler.ProcessCode(trie.CodeSyncResult{Hash: hash, Data: node}) + switch err { + case nil: + case trie.ErrAlreadyProcessed: + s.bytecodeHealDups++ + case trie.ErrNotRequested: + s.bytecodeHealNops++ + default: + log.Error("Invalid bytecode processed", "hash", hash, "err", err) + } + } + s.commitHealer(false) + + return nil +} From c340c704ba6928787ccdd7ff2c4903d7dfad2650 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CGheisMohammadi=E2=80=9D?= <36589218+GheisMohammadi@users.noreply.github.com> Date: Thu, 16 Nov 2023 20:55:52 +0800 Subject: [PATCH 091/128] fix GetNextBatch to complete sync after there is no more pending states,remove extra comments, cleanup and goimports --- api/service/stagedstreamsync/satate_sync.go | 113 ++++++++------------ 1 file changed, 47 insertions(+), 66 deletions(-) diff --git a/api/service/stagedstreamsync/satate_sync.go b/api/service/stagedstreamsync/satate_sync.go index e90640a9aa..1bf6858261 100644 --- a/api/service/stagedstreamsync/satate_sync.go +++ b/api/service/stagedstreamsync/satate_sync.go @@ -109,9 +109,6 @@ type accountTask struct { Last common.Hash // Last account to sync in this interval SubTasks map[common.Hash][]*storageTask // Storage intervals needing fetching for large contracts - // These fields are internals used during runtime - //req *accountRequest // Pending request to fill this task - //res *accountResponse // Validate response filling this task pend int // Number of pending subtasks for this round needCode []bool // Flags whether the filling accounts need code retrieval @@ -134,26 +131,19 @@ type accountTask struct { // range request. It contains the subtrie for the requested account range and // the database that's going to be filled with the internal nodes on commit. type accountResponse struct { - task *accountTask // Task which this request is filling - + task *accountTask // Task which this request is filling hashes []common.Hash // Account hashes in the returned range accounts []*types.StateAccount // Expanded accounts in the returned range - - cont bool // Whether the account range has a continuation + cont bool // Whether the account range has a continuation } // storageTask represents the sync task for a chunk of the storage snapshot. type storageTask struct { - Next common.Hash // Next account to sync in this interval - Last common.Hash // Last account to sync in this interval - - // These fields are internals used during runtime - root common.Hash // Storage root hash for this instance - //req *storageTaskBundleuest // Pending request to fill this task - - genBatch ethdb.Batch // Batch used by the node generator - genTrie *trie.StackTrie // Node generator from storage slots - + Next common.Hash // Next account to sync in this interval + Last common.Hash // Last account to sync in this interval + root common.Hash // Storage root hash for this instance + genBatch ethdb.Batch // Batch used by the node generator + genTrie *trie.StackTrie // Node generator from storage slots requested bool done bool // Flag whether the task can be removed } @@ -200,7 +190,7 @@ func (t *healRequestSort) Swap(i, j int) { // Merge merges the pathsets, so that several storage requests concerning the // same account are merged into one, to reduce bandwidth. -// OBS: This operation is moot if t has not first been sorted. +// This operation is moot if t has not first been sorted. func (t *healRequestSort) Merge() []TrieNodePathSet { var result []TrieNodePathSet for _, path := range t.syncPaths { @@ -280,7 +270,6 @@ func (t *tasks) deleteAccountTask(accountTaskIndex uint64) { if _, ok := t.accountTasks[accountTaskIndex]; ok { delete(t.accountTasks, accountTaskIndex) } - // t.accountTasks = append(t.accountTasks[:accountTaskIndex], t.accountTasks[accountTaskIndex+1:]...) } func (t *tasks) addCodeTask(h common.Hash) { @@ -375,7 +364,6 @@ type FullStateDownloadManager struct { root common.Hash // Current state trie root being synced snapped bool // Flag to signal that snap phase is done - // healer *healTask // Current state healing task being executed protocol syncProtocol scheduler *trie.Sync // State trie sync scheduler defining the tasks @@ -444,7 +432,6 @@ func (s *FullStateDownloadManager) setRootHash(root common.Hash) { s.root = root s.scheduler = state.NewStateSync(root, s.db, s.onHealState, s.scheme) s.loadSyncStatus() - // s.sched = state.NewStateSync(root, s.bc.ChainDb(), nil, rawdb.HashScheme) } func (s *FullStateDownloadManager) taskDone(taskID uint64) { @@ -554,33 +541,7 @@ func (s *FullStateDownloadManager) commitHealer(force bool) { utils.Logger().Debug().Str("type", "trienodes").Interface("bytes", common.StorageSize(batch.ValueSize())).Msg("Persisted set of healing data") } -// getNextBatch returns objects with a maximum of n state download -// tasks to send to the remote peer. -func (s *FullStateDownloadManager) GetNextBatch() (accounts []*accountTask, - codes []common.Hash, - storages *storageTaskBundle, - healtask *healTask, - codetask *healTask, - err error) { - - s.lock.Lock() - defer s.lock.Unlock() - - cap := StatesPerRequest - - accounts, codes, storages, healtask, codetask = s.getBatchFromRetries(cap) - nItems := len(accounts) + len(codes) + len(storages.roots) + len(healtask.hashes) + len(codetask.hashes) - cap -= nItems - - if cap == 0 { - return - } - - if len(s.tasks.accountTasks) == 0 && s.scheduler.Pending() == 0 { - utils.Logger().Debug().Msg("Snapshot sync already completed") - return - } - +func (s *FullStateDownloadManager) SyncCompleted() { defer func() { // Persist any progress, independent of failure for _, task := range s.tasks.accountTasks { s.forwardAccountTask(task) @@ -605,27 +566,50 @@ func (s *FullStateDownloadManager) GetNextBatch() (accounts []*accountTask, utils.Logger().Debug().Interface("root", s.root).Msg("Terminating snapshot sync cycle") }() - // Refill available tasks from the scheduler. - if len(s.tasks.accountTasks) == 0 && s.scheduler.Pending() == 0 { - utils.Logger().Debug().Msg("Snapshot sync already completed") + utils.Logger().Debug().Msg("Snapshot sync already completed") +} + +// getNextBatch returns objects with a maximum of n state download +// tasks to send to the remote peer. +func (s *FullStateDownloadManager) GetNextBatch() (accounts []*accountTask, + codes []common.Hash, + storages *storageTaskBundle, + healtask *healTask, + codetask *healTask, + err error) { + + s.lock.Lock() + defer s.lock.Unlock() + + cap := StatesPerRequest + + accounts, codes, storages, healtask, codetask = s.getBatchFromRetries(cap) + nItems := len(accounts) + len(codes) + len(storages.roots) + len(healtask.hashes) + len(codetask.hashes) + cap -= nItems + + if cap == 0 { return } - // if err = s.fillTasks(cap); err != nil { - // return - // } + if len(s.tasks.accountTasks) == 0 && s.scheduler.Pending() == 0 { + if nItems == 0 { + s.SyncCompleted() + } + return + } - includeHealtasks := true + // Refill available tasks from the scheduler. + withHealTasks := true if healtask != nil || codetask != nil { - includeHealtasks = false + withHealTasks = false } - newAccounts, newCodes, newStorageTaskBundle, unprocessedHealtask, unprocessedCodetask := s.getBatchFromUnprocessed(cap, includeHealtasks) + newAccounts, newCodes, newStorageTaskBundle, newHealTask, newCodeTask := s.getBatchFromUnprocessed(cap, withHealTasks) accounts = append(accounts, newAccounts...) codes = append(codes, newCodes...) storages = newStorageTaskBundle - if includeHealtasks { - healtask = unprocessedHealtask - codetask = unprocessedCodetask + if withHealTasks { + healtask = newHealTask + codetask = newCodeTask } return @@ -690,7 +674,7 @@ func (s *FullStateDownloadManager) loadSyncStatus() { } s.tasks.accountTasks = progress.Tasks for _, task := range s.tasks.accountTasks { - // task := task // closure for task.genBatch in the stacktrie writer callback + task := task // closure for task.genBatch in the stacktrie writer callback task.genBatch = ethdb.HookedBatch{ Batch: s.db.NewBatch(), @@ -810,11 +794,8 @@ func (s *FullStateDownloadManager) cleanAccountTasks() { return } // Sync wasn't finished previously, check for any task that can be finalized - //for i := 0; i < len(s.tasks.accountTasks); i++ { for taskID, _ := range s.tasks.accountTasks { if s.tasks.accountTasks[taskID].done { - //s.tasks.accountTasks = append(s.tasks.accountTasks[:i], s.tasks.accountTasks[i+1:]...) - //i-- s.tasks.deleteAccountTask(taskID) } } @@ -953,7 +934,7 @@ func (s *FullStateDownloadManager) updateStats(written, duplicate, unexpected in // getBatchFromUnprocessed returns objects with a maximum of n unprocessed state download // tasks to send to the remote peer. -func (s *FullStateDownloadManager) getBatchFromUnprocessed(n int, includeHealtasks bool) ( +func (s *FullStateDownloadManager) getBatchFromUnprocessed(n int, withHealTasks bool) ( accounts []*accountTask, codes []common.Hash, storages *storageTaskBundle, @@ -1093,7 +1074,7 @@ func (s *FullStateDownloadManager) getBatchFromUnprocessed(n int, includeHealtas return } - if !includeHealtasks { + if !withHealTasks { return } From 337410040958bab24874e335cafcc586230c78df Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CGheisMohammadi=E2=80=9D?= <36589218+GheisMohammadi@users.noreply.github.com> Date: Thu, 16 Nov 2023 21:01:03 +0800 Subject: [PATCH 092/128] fix state sync file name spell error --- api/service/stagedstreamsync/{satate_sync.go => state_sync.go} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename api/service/stagedstreamsync/{satate_sync.go => state_sync.go} (100%) diff --git a/api/service/stagedstreamsync/satate_sync.go b/api/service/stagedstreamsync/state_sync.go similarity index 100% rename from api/service/stagedstreamsync/satate_sync.go rename to api/service/stagedstreamsync/state_sync.go From e141f79818a0268db59f202f372fe20966211f39 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CGheisMohammadi=E2=80=9D?= <36589218+GheisMohammadi@users.noreply.github.com> Date: Thu, 7 Dec 2023 16:19:17 +0800 Subject: [PATCH 093/128] add ProofSet and ProofList to staged stream sync --- api/service/stagedstreamsync/proof.go | 146 ++++++++++++++++++++++++++ 1 file changed, 146 insertions(+) create mode 100644 api/service/stagedstreamsync/proof.go diff --git a/api/service/stagedstreamsync/proof.go b/api/service/stagedstreamsync/proof.go new file mode 100644 index 0000000000..216d797d45 --- /dev/null +++ b/api/service/stagedstreamsync/proof.go @@ -0,0 +1,146 @@ +package stagedstreamsync + +import ( + "errors" + "sync" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/rlp" +) + +// ProofSet stores a set of trie nodes. It implements trie.Database and can also +// act as a cache for another trie.Database. +type ProofSet struct { + nodes map[string][]byte + order []string + + dataSize int + lock sync.RWMutex +} + +// NewProofSet creates an empty node set +func NewProofSet() *ProofSet { + return &ProofSet{ + nodes: make(map[string][]byte), + } +} + +// Put stores a new node in the set +func (db *ProofSet) Put(key []byte, value []byte) error { + db.lock.Lock() + defer db.lock.Unlock() + + if _, ok := db.nodes[string(key)]; ok { + return nil + } + keystr := string(key) + + db.nodes[keystr] = common.CopyBytes(value) + db.order = append(db.order, keystr) + db.dataSize += len(value) + + return nil +} + +// Delete removes a node from the set +func (db *ProofSet) Delete(key []byte) error { + db.lock.Lock() + defer db.lock.Unlock() + + delete(db.nodes, string(key)) + return nil +} + +// Get returns a stored node +func (db *ProofSet) Get(key []byte) ([]byte, error) { + db.lock.RLock() + defer db.lock.RUnlock() + + if entry, ok := db.nodes[string(key)]; ok { + return entry, nil + } + return nil, errors.New("not found") +} + +// Has returns true if the node set contains the given key +func (db *ProofSet) Has(key []byte) (bool, error) { + _, err := db.Get(key) + return err == nil, nil +} + +// KeyCount returns the number of nodes in the set +func (db *ProofSet) KeyCount() int { + db.lock.RLock() + defer db.lock.RUnlock() + + return len(db.nodes) +} + +// DataSize returns the aggregated data size of nodes in the set +func (db *ProofSet) DataSize() int { + db.lock.RLock() + defer db.lock.RUnlock() + + return db.dataSize +} + +// List converts the node set to a ProofList +func (db *ProofSet) List() ProofList { + db.lock.RLock() + defer db.lock.RUnlock() + + var values ProofList + for _, key := range db.order { + values = append(values, db.nodes[key]) + } + return values +} + +// Store writes the contents of the set to the given database +func (db *ProofSet) Store(target ethdb.KeyValueWriter) { + db.lock.RLock() + defer db.lock.RUnlock() + + for key, value := range db.nodes { + target.Put([]byte(key), value) + } +} + +// ProofList stores an ordered list of trie nodes. It implements ethdb.KeyValueWriter. +type ProofList []rlp.RawValue + +// Store writes the contents of the list to the given database +func (n ProofList) Store(db ethdb.KeyValueWriter) { + for _, node := range n { + db.Put(crypto.Keccak256(node), node) + } +} + +// Set converts the node list to a ProofSet +func (n ProofList) Set() *ProofSet { + db := NewProofSet() + n.Store(db) + return db +} + +// Put stores a new node at the end of the list +func (n *ProofList) Put(key []byte, value []byte) error { + *n = append(*n, value) + return nil +} + +// Delete panics as there's no reason to remove a node from the list. +func (n *ProofList) Delete(key []byte) error { + panic("not supported") +} + +// DataSize returns the aggregated data size of nodes in the list +func (n ProofList) DataSize() int { + var size int + for _, node := range n { + size += len(node) + } + return size +} From 390bdb67d835939bc951139d171478e3e88e0705 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CGheisMohammadi=E2=80=9D?= <36589218+GheisMohammadi@users.noreply.github.com> Date: Thu, 7 Dec 2023 16:23:03 +0800 Subject: [PATCH 094/128] add client new functions to stream sync adapter, update GetAccountRanges parameters --- api/service/stagedstreamsync/adapter.go | 5 +++++ p2p/stream/protocols/sync/client.go | 27 +++++++++++-------------- 2 files changed, 17 insertions(+), 15 deletions(-) diff --git a/api/service/stagedstreamsync/adapter.go b/api/service/stagedstreamsync/adapter.go index ca9c6a6787..56c42b661c 100644 --- a/api/service/stagedstreamsync/adapter.go +++ b/api/service/stagedstreamsync/adapter.go @@ -9,6 +9,7 @@ import ( "github.com/harmony-one/harmony/core/types" "github.com/harmony-one/harmony/p2p/stream/common/streammanager" syncproto "github.com/harmony-one/harmony/p2p/stream/protocols/sync" + "github.com/harmony-one/harmony/p2p/stream/protocols/sync/message" sttypes "github.com/harmony-one/harmony/p2p/stream/types" ) @@ -20,6 +21,10 @@ type syncProtocol interface { GetBlocksByHashes(ctx context.Context, hs []common.Hash, opts ...syncproto.Option) ([]*types.Block, sttypes.StreamID, error) GetReceipts(ctx context.Context, hs []common.Hash, opts ...syncproto.Option) (receipts []types.Receipts, stid sttypes.StreamID, err error) GetNodeData(ctx context.Context, hs []common.Hash, opts ...syncproto.Option) (data [][]byte, stid sttypes.StreamID, err error) + GetAccountRange(ctx context.Context, root common.Hash, origin common.Hash, limit common.Hash, bytes uint64, opts ...syncproto.Option) (accounts []*message.AccountData, proof [][]byte, stid sttypes.StreamID, err error) + GetStorageRanges(ctx context.Context, root common.Hash, accounts []common.Hash, origin common.Hash, limit common.Hash, bytes uint64, opts ...syncproto.Option) (slots [][]*message.StorageData, proof [][]byte, stid sttypes.StreamID, err error) + GetByteCodes(ctx context.Context, hs []common.Hash, bytes uint64, opts ...syncproto.Option) (codes [][]byte, stid sttypes.StreamID, err error) + GetTrieNodes(ctx context.Context, root common.Hash, paths []*message.TrieNodePathSet, bytes uint64, opts ...syncproto.Option) (nodes [][]byte, stid sttypes.StreamID, err error) RemoveStream(stID sttypes.StreamID) // If a stream delivers invalid data, remove the stream StreamFailed(stID sttypes.StreamID, reason string) diff --git a/p2p/stream/protocols/sync/client.go b/p2p/stream/protocols/sync/client.go index 9024142cef..45707e1191 100644 --- a/p2p/stream/protocols/sync/client.go +++ b/p2p/stream/protocols/sync/client.go @@ -184,7 +184,7 @@ func (p *Protocol) GetNodeData(ctx context.Context, hs []common.Hash, opts ...Op // GetAccountRange do getAccountRange through sync stream protocol. // returns the accounts along with proofs as result, target stream id, and error -func (p *Protocol) GetAccountRange(ctx context.Context, root common.Hash, origin common.Hash, limit common.Hash, bytes uint64, opts ...Option) (accounts []*message.AccountData, proof []common.Hash, stid sttypes.StreamID, err error) { +func (p *Protocol) GetAccountRange(ctx context.Context, root common.Hash, origin common.Hash, limit common.Hash, bytes uint64, opts ...Option) (accounts []*message.AccountData, proof [][]byte, stid sttypes.StreamID, err error) { timer := p.doMetricClientRequest("getAccountRange") defer p.doMetricPostClientRequest("getAccountRange", err, timer) @@ -207,7 +207,7 @@ func (p *Protocol) GetAccountRange(ctx context.Context, root common.Hash, origin // GetStorageRanges do getStorageRanges through sync stream protocol. // returns the slots along with proofs as result, target stream id, and error -func (p *Protocol) GetStorageRanges(ctx context.Context, root common.Hash, accounts []common.Hash, origin common.Hash, limit common.Hash, bytes uint64, opts ...Option) (slots []*message.StorageData, proof []common.Hash, stid sttypes.StreamID, err error) { +func (p *Protocol) GetStorageRanges(ctx context.Context, root common.Hash, accounts []common.Hash, origin common.Hash, limit common.Hash, bytes uint64, opts ...Option) (slots [][]*message.StorageData, proof [][]byte, stid sttypes.StreamID, err error) { timer := p.doMetricClientRequest("getStorageRanges") defer p.doMetricPostClientRequest("getStorageRanges", err, timer) @@ -233,11 +233,9 @@ func (p *Protocol) GetStorageRanges(ctx context.Context, root common.Hash, accou if err != nil { return } - slots = make([]*message.StorageData, 0) + slots = make([][]*message.StorageData, 0) for _, storage := range storages { - for _, data := range storage.Data { - slots = append(slots, data) - } + slots = append(slots, storage.Data) } return } @@ -735,8 +733,7 @@ func (req *getAccountRangeRequest) Encode() ([]byte, error) { return protobuf.Marshal(msg) } -// []*message.AccountData, []common.Hash -func (req *getAccountRangeRequest) getAccountRangeFromResponse(resp sttypes.Response) ([]*message.AccountData, []common.Hash, error) { +func (req *getAccountRangeRequest) getAccountRangeFromResponse(resp sttypes.Response) ([]*message.AccountData, [][]byte, error) { sResp, ok := resp.(*syncResponse) if !ok || sResp == nil { return nil, nil, errors.New("not sync response") @@ -744,7 +741,7 @@ func (req *getAccountRangeRequest) getAccountRangeFromResponse(resp sttypes.Resp return req.parseGetAccountRangeResponse(sResp) } -func (req *getAccountRangeRequest) parseGetAccountRangeResponse(resp *syncResponse) ([]*message.AccountData, []common.Hash, error) { +func (req *getAccountRangeRequest) parseGetAccountRangeResponse(resp *syncResponse) ([]*message.AccountData, [][]byte, error) { if errResp := resp.pb.GetErrorResponse(); errResp != nil { return nil, nil, errors.New(errResp.Error) } @@ -752,9 +749,9 @@ func (req *getAccountRangeRequest) parseGetAccountRangeResponse(resp *syncRespon if grResp == nil { return nil, nil, errors.New("response not GetAccountRange") } - proofs := make([]common.Hash, 0) + proofs := make([][]byte, 0) for _, proofBytes := range grResp.Proof { - var proof common.Hash + var proof []byte if err := rlp.DecodeBytes(proofBytes, &proof); err != nil { return nil, nil, errors.Wrap(err, "[GetAccountRangeResponse]") } @@ -817,7 +814,7 @@ func (req *getStorageRangesRequest) Encode() ([]byte, error) { } // []*message.AccountData, []common.Hash -func (req *getStorageRangesRequest) getStorageRangesFromResponse(resp sttypes.Response) ([]*message.StoragesData, []common.Hash, error) { +func (req *getStorageRangesRequest) getStorageRangesFromResponse(resp sttypes.Response) ([]*message.StoragesData, [][]byte, error) { sResp, ok := resp.(*syncResponse) if !ok || sResp == nil { return nil, nil, errors.New("not sync response") @@ -825,7 +822,7 @@ func (req *getStorageRangesRequest) getStorageRangesFromResponse(resp sttypes.Re return req.parseGetStorageRangesResponse(sResp) } -func (req *getStorageRangesRequest) parseGetStorageRangesResponse(resp *syncResponse) ([]*message.StoragesData, []common.Hash, error) { +func (req *getStorageRangesRequest) parseGetStorageRangesResponse(resp *syncResponse) ([]*message.StoragesData, [][]byte, error) { if errResp := resp.pb.GetErrorResponse(); errResp != nil { return nil, nil, errors.New(errResp.Error) } @@ -833,9 +830,9 @@ func (req *getStorageRangesRequest) parseGetStorageRangesResponse(resp *syncResp if grResp == nil { return nil, nil, errors.New("response not GetStorageRanges") } - proofs := make([]common.Hash, 0) + proofs := make([][]byte, 0) for _, proofBytes := range grResp.Proof { - var proof common.Hash + var proof []byte if err := rlp.DecodeBytes(proofBytes, &proof); err != nil { return nil, nil, errors.Wrap(err, "[GetStorageRangesResponse]") } From 0901e92bf8cc17085e072dbc90294b46e49dd0f8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CGheisMohammadi=E2=80=9D?= <36589218+GheisMohammadi@users.noreply.github.com> Date: Thu, 7 Dec 2023 16:32:03 +0800 Subject: [PATCH 095/128] add state sync full, complete full state sync stage --- .../stagedstreamsync/stage_statesync_full.go | 449 ++++++++++++++ .../{state_sync.go => state_sync_full.go} | 583 +++++++++++++++--- api/service/stagedstreamsync/syncing.go | 2 +- p2p/stream/protocols/sync/chain.go | 2 +- 4 files changed, 951 insertions(+), 85 deletions(-) create mode 100644 api/service/stagedstreamsync/stage_statesync_full.go rename api/service/stagedstreamsync/{state_sync.go => state_sync_full.go} (80%) diff --git a/api/service/stagedstreamsync/stage_statesync_full.go b/api/service/stagedstreamsync/stage_statesync_full.go new file mode 100644 index 0000000000..3e190bdc9e --- /dev/null +++ b/api/service/stagedstreamsync/stage_statesync_full.go @@ -0,0 +1,449 @@ +package stagedstreamsync + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/harmony-one/harmony/core" + "github.com/harmony-one/harmony/internal/utils" + sttypes "github.com/harmony-one/harmony/p2p/stream/types" + "github.com/pkg/errors" + + //sttypes "github.com/harmony-one/harmony/p2p/stream/types" + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/prometheus/client_golang/prometheus" + "github.com/rs/zerolog" +) + +type StageFullStateSync struct { + configs StageFullStateSyncCfg +} + +type StageFullStateSyncCfg struct { + bc core.BlockChain + db kv.RwDB + concurrency int + protocol syncProtocol + logger zerolog.Logger + logProgress bool +} + +func NewStageFullStateSync(cfg StageFullStateSyncCfg) *StageFullStateSync { + return &StageFullStateSync{ + configs: cfg, + } +} + +func NewStageFullStateSyncCfg(bc core.BlockChain, + db kv.RwDB, + concurrency int, + protocol syncProtocol, + logger zerolog.Logger, + logProgress bool) StageFullStateSyncCfg { + + return StageFullStateSyncCfg{ + bc: bc, + db: db, + concurrency: concurrency, + protocol: protocol, + logger: logger, + logProgress: logProgress, + } +} + +// Exec progresses States stage in the forward direction +func (sss *StageFullStateSync) Exec(ctx context.Context, bool, invalidBlockRevert bool, s *StageState, reverter Reverter, tx kv.RwTx) (err error) { + + // for short range sync, skip this step + if !s.state.initSync { + return nil + } // only execute this stage in fast/snap sync mode and once we reach to pivot + + if s.state.status.pivotBlock == nil || + s.state.CurrentBlockNumber() != s.state.status.pivotBlock.NumberU64() || + s.state.status.statesSynced { + return nil + } + + s.state.Debug("STATE SYNC ======================================================>", "started") + // maxHeight := s.state.status.targetBN + // currentHead := s.state.CurrentBlockNumber() + // if currentHead >= maxHeight { + // return nil + // } + // currProgress := s.state.CurrentBlockNumber() + // targetHeight := s.state.currentCycle.TargetHeight + + // if errV := CreateView(ctx, sss.configs.db, tx, func(etx kv.Tx) error { + // if currProgress, err = s.CurrentStageProgress(etx); err != nil { + // return err + // } + // return nil + // }); errV != nil { + // return errV + // } + + // if currProgress >= targetHeight { + // return nil + // } + useInternalTx := tx == nil + if useInternalTx { + var err error + tx, err = sss.configs.db.BeginRw(ctx) + if err != nil { + return err + } + defer tx.Rollback() + } + + // isLastCycle := targetHeight >= maxHeight + startTime := time.Now() + + if sss.configs.logProgress { + fmt.Print("\033[s") // save the cursor position + } + + // Fetch states from neighbors + pivotRootHash := s.state.status.pivotBlock.Root() + currentBlockRootHash := s.state.bc.CurrentFastBlock().Root() + scheme := sss.configs.bc.TrieDB().Scheme() + sdm := newFullStateDownloadManager(sss.configs.bc.ChainDb(), scheme, tx, sss.configs.bc, sss.configs.concurrency, s.state.logger) + sdm.setRootHash(currentBlockRootHash) + s.state.Debug("StateSync/setRootHash", pivotRootHash) + s.state.Debug("StateSync/currentFastBlockRoot", currentBlockRootHash) + s.state.Debug("StateSync/pivotBlockNumber", s.state.status.pivotBlock.NumberU64()) + s.state.Debug("StateSync/currentFastBlockNumber", s.state.bc.CurrentFastBlock().NumberU64()) + var wg sync.WaitGroup + for i := 0; i < s.state.config.Concurrency; i++ { + wg.Add(1) + go sss.runStateWorkerLoop(ctx, sdm, &wg, i, startTime, s) + } + wg.Wait() + + // insert block + if err := sss.configs.bc.WriteHeadBlock(s.state.status.pivotBlock); err != nil { + sss.configs.logger.Warn().Err(err). + Uint64("pivot block number", s.state.status.pivotBlock.NumberU64()). + Msg(WrapStagedSyncMsg("insert pivot block failed")) + s.state.Debug("StateSync/pivot/insert/error", err) + // TODO: panic("pivot block is failed to insert in chain.") + return err + } + + // states should be fully synced in this stage + s.state.status.statesSynced = true + + s.state.Debug("StateSync/pivot/num", s.state.status.pivotBlock.NumberU64()) + s.state.Debug("StateSync/pivot/insert", "done") + + /* + gbm := s.state.gbm + + // Setup workers to fetch states from remote node + var wg sync.WaitGroup + curHeight := s.state.CurrentBlockNumber() + + for bn := curHeight + 1; bn <= gbm.targetBN; bn++ { + root := gbm.GetRootHash(bn) + if root == emptyHash { + continue + } + sdm.setRootHash(root) + for i := 0; i < s.state.config.Concurrency; i++ { + wg.Add(1) + go sss.runStateWorkerLoop(ctx, sdm, &wg, i, startTime, s) + } + wg.Wait() + } + */ + + if useInternalTx { + if err := tx.Commit(); err != nil { + return err + } + } + + return nil +} + +// runStateWorkerLoop creates a work loop for download states +func (sss *StageFullStateSync) runStateWorkerLoop(ctx context.Context, sdm *FullStateDownloadManager, wg *sync.WaitGroup, loopID int, startTime time.Time, s *StageState) { + + s.state.Debug("runStateWorkerLoop/info", "started") + + defer wg.Done() + + for { + select { + case <-ctx.Done(): + s.state.Debug("runStateWorkerLoop/ctx/done", "Finished") + return + default: + } + accountTasks, codes, storages, healtask, codetask, err := sdm.GetNextBatch() + s.state.Debug("runStateWorkerLoop/batch/len", len(accountTasks)+len(codes)+len(storages.accounts)) + s.state.Debug("runStateWorkerLoop/batch/heals/len", len(healtask.hashes)+len(codetask.hashes)) + s.state.Debug("runStateWorkerLoop/batch/err", err) + if len(accountTasks)+len(codes)+len(storages.accounts)+len(healtask.hashes)+len(codetask.hashes) == 0 || err != nil { + select { + case <-ctx.Done(): + return + case <-time.After(100 * time.Millisecond): + return + } + } + s.state.Debug("runStateWorkerLoop/batch/accounts", accountTasks) + s.state.Debug("runStateWorkerLoop/batch/codes", codes) + + if len(accountTasks) > 0 { + + task := accountTasks[0] + origin := task.Next + limit := task.Last + root := sdm.root + cap := maxRequestSize + retAccounts, proof, stid, err := sss.configs.protocol.GetAccountRange(ctx, root, origin, limit, uint64(cap)) + if err != nil { + return + } + if err := sdm.HandleAccountRequestResult(task, retAccounts, proof, origin[:], limit[:], loopID, stid); err != nil { + return + } + + } else if len(codes)+len(storages.accounts) > 0 { + + if len(codes) > 0 { + stid, err := sss.downloadByteCodes(ctx, sdm, codes, loopID) + if err != nil { + if !errors.Is(err, context.Canceled) && !errors.Is(err, context.DeadlineExceeded) { + sss.configs.protocol.StreamFailed(stid, "downloadByteCodes failed") + } + utils.Logger().Error(). + Err(err). + Str("stream", string(stid)). + Msg(WrapStagedSyncMsg("downloadByteCodes failed")) + err = errors.Wrap(err, "request error") + sdm.HandleRequestError(accountTasks, codes, storages, healtask, codetask, stid, err) + return + } + } + + if len(storages.accounts) > 0 { + root := sdm.root + roots := storages.roots + accounts := storages.accounts + cap := maxRequestSize + origin := storages.origin + limit := storages.limit + mainTask := storages.mainTask + subTask := storages.subtask + + slots, proof, stid, err := sss.configs.protocol.GetStorageRanges(ctx, root, accounts, origin, limit, uint64(cap)) + if err != nil { + return + } + if err := sdm.HandleStorageRequestResult(mainTask, subTask, accounts, roots, origin, limit, slots, proof, loopID, stid); err != nil { + return + } + } + + // data, stid, err := sss.downloadStates(ctx, accounts, codes, storages) + // if err != nil { + // s.state.Debug("runStateWorkerLoop/downloadStates/error", err) + // if !errors.Is(err, context.Canceled) && !errors.Is(err, context.DeadlineExceeded) { + // sss.configs.protocol.StreamFailed(stid, "downloadStates failed") + // } + // utils.Logger().Error(). + // Err(err). + // Str("stream", string(stid)). + // Msg(WrapStagedSyncMsg("downloadStates failed")) + // err = errors.Wrap(err, "request error") + // sdm.HandleRequestError(codes, paths, stid, err) + // } else if data == nil || len(data) == 0 { + // s.state.Debug("runStateWorkerLoop/downloadStates/data", "nil array") + // utils.Logger().Warn(). + // Str("stream", string(stid)). + // Msg(WrapStagedSyncMsg("downloadStates failed, received empty data bytes")) + // err := errors.New("downloadStates received empty data bytes") + // sdm.HandleRequestError(codes, paths, stid, err) + // } else { + // s.state.Debug("runStateWorkerLoop/downloadStates/data/len", len(data)) + // sdm.HandleRequestResult(nodes, paths, data, loopID, stid) + // if sss.configs.logProgress { + // //calculating block download speed + // dt := time.Now().Sub(startTime).Seconds() + // speed := float64(0) + // if dt > 0 { + // speed = float64(len(data)) / dt + // } + // stateDownloadSpeed := fmt.Sprintf("%.2f", speed) + + // fmt.Print("\033[u\033[K") // restore the cursor position and clear the line + // fmt.Println("state download speed:", stateDownloadSpeed, "states/s") + // } + // } + + } else { + // assign trie node Heal Tasks + if len(healtask.hashes) > 0 { + root := sdm.root + task := healtask.task + hashes := healtask.hashes + pathsets := healtask.pathsets + paths := healtask.paths + + nodes, stid, err := sss.configs.protocol.GetTrieNodes(ctx, root, pathsets, maxRequestSize) + if err != nil { + return + } + if err := sdm.HandleTrieNodeHealRequestResult(task, paths, hashes, nodes, loopID, stid); err != nil { + return + } + } + + if len(codetask.hashes) > 0 { + task := codetask.task + hashes := codetask.hashes + codes, stid, err := sss.configs.protocol.GetByteCodes(ctx, hashes, maxRequestSize) + if err != nil { + return + } + if err := sdm.HandleBytecodeRequestResult(task, hashes, codes, loopID, stid); err != nil { + return + } + } + } + } +} + +func (sss *StageFullStateSync) downloadByteCodes(ctx context.Context, sdm *FullStateDownloadManager, codeTasks []*byteCodeTasksBundle, loopID int) (stid sttypes.StreamID, err error) { + for _, codeTask := range codeTasks { + // try to get byte codes from remote peer + // if any of them failed, the stid will be the id of the failed stream + retCodes, stid, err := sss.configs.protocol.GetByteCodes(ctx, codeTask.hashes, maxRequestSize) + if err != nil { + return stid, err + } + if err = sdm.HandleBytecodeRequestResult(codeTask.task, codeTask.hashes, retCodes, loopID, stid); err != nil { + return stid, err + } + } + return +} + +func (sss *StageFullStateSync) downloadStorages(ctx context.Context, sdm *FullStateDownloadManager, codeTasks []*byteCodeTasksBundle, loopID int) (stid sttypes.StreamID, err error) { + for _, codeTask := range codeTasks { + // try to get byte codes from remote peer + // if any of them failed, the stid will be the id of failed stream + retCodes, stid, err := sss.configs.protocol.GetByteCodes(ctx, codeTask.hashes, maxRequestSize) + if err != nil { + return stid, err + } + if err = sdm.HandleBytecodeRequestResult(codeTask.task, codeTask.hashes, retCodes, loopID, stid); err != nil { + return stid, err + } + } + return +} + +// func (sss *StageFullStateSync) downloadStates(ctx context.Context, +// root common.Hash, +// origin common.Hash, +// accounts []*accountTask, +// codes []common.Hash, +// storages *storageTaskBundle) ([][]byte, sttypes.StreamID, error) { + +// ctx, cancel := context.WithTimeout(ctx, 10*time.Second) +// defer cancel() + +// // if there is any account task, first we have to complete that +// if len(accounts) > 0 { + +// } +// // hashes := append(codes, nodes...) +// // data, stid, err := sss.configs.protocol.GetNodeData(ctx, hashes) +// // if err != nil { +// // return nil, stid, err +// // } +// // if err := validateGetNodeDataResult(hashes, data); err != nil { +// // return nil, stid, err +// // } +// return data, stid, nil +// } + +func (stg *StageFullStateSync) insertChain(gbm *blockDownloadManager, + protocol syncProtocol, + lbls prometheus.Labels, + targetBN uint64) { + +} + +func (stg *StageFullStateSync) saveProgress(s *StageState, tx kv.RwTx) (err error) { + + useInternalTx := tx == nil + if useInternalTx { + var err error + tx, err = stg.configs.db.BeginRw(context.Background()) + if err != nil { + return err + } + defer tx.Rollback() + } + + // save progress + if err = s.Update(tx, s.state.CurrentBlockNumber()); err != nil { + utils.Logger().Error(). + Err(err). + Msgf("[STAGED_SYNC] saving progress for block States stage failed") + return ErrSaveStateProgressFail + } + + if useInternalTx { + if err := tx.Commit(); err != nil { + return err + } + } + return nil +} + +func (stg *StageFullStateSync) Revert(ctx context.Context, firstCycle bool, u *RevertState, s *StageState, tx kv.RwTx) (err error) { + useInternalTx := tx == nil + if useInternalTx { + tx, err = stg.configs.db.BeginRw(ctx) + if err != nil { + return err + } + defer tx.Rollback() + } + + if err = u.Done(tx); err != nil { + return err + } + + if useInternalTx { + if err = tx.Commit(); err != nil { + return err + } + } + return nil +} + +func (stg *StageFullStateSync) CleanUp(ctx context.Context, firstCycle bool, p *CleanUpState, tx kv.RwTx) (err error) { + useInternalTx := tx == nil + if useInternalTx { + tx, err = stg.configs.db.BeginRw(ctx) + if err != nil { + return err + } + defer tx.Rollback() + } + + if useInternalTx { + if err = tx.Commit(); err != nil { + return err + } + } + return nil +} diff --git a/api/service/stagedstreamsync/state_sync.go b/api/service/stagedstreamsync/state_sync_full.go similarity index 80% rename from api/service/stagedstreamsync/state_sync.go rename to api/service/stagedstreamsync/state_sync_full.go index 1bf6858261..daf0f4869b 100644 --- a/api/service/stagedstreamsync/state_sync.go +++ b/api/service/stagedstreamsync/state_sync_full.go @@ -3,6 +3,7 @@ package stagedstreamsync import ( "bytes" "encoding/json" + "fmt" gomath "math" "math/big" "math/rand" @@ -17,11 +18,14 @@ import ( "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" + + //"github.com/ethereum/go-ethereum/trie/trienode" "github.com/harmony-one/harmony/common/math" "github.com/harmony-one/harmony/core" "github.com/harmony-one/harmony/core/rawdb" "github.com/harmony-one/harmony/core/state" "github.com/harmony-one/harmony/internal/utils" + "github.com/harmony-one/harmony/p2p/stream/protocols/sync/message" sttypes "github.com/harmony-one/harmony/p2p/stream/types" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/log/v3" @@ -191,7 +195,7 @@ func (t *healRequestSort) Swap(i, j int) { // Merge merges the pathsets, so that several storage requests concerning the // same account are merged into one, to reduce bandwidth. // This operation is moot if t has not first been sorted. -func (t *healRequestSort) Merge() []TrieNodePathSet { +func (t *healRequestSort) Merge() []*message.TrieNodePathSet { var result []TrieNodePathSet for _, path := range t.syncPaths { pathset := TrieNodePathSet(path) @@ -211,7 +215,20 @@ func (t *healRequestSort) Merge() []TrieNodePathSet { } } } - return result + // convert to array of pointers + result_ptr := make([]*message.TrieNodePathSet, 0) + for _, p := range result { + result_ptr = append(result_ptr, &message.TrieNodePathSet{ + Pathset: p, + }) + } + return result_ptr +} + +type byteCodeTasksBundle struct { + id uint64 //unique id for bytecode task bundle + task *accountTask + hashes []common.Hash } type storageTaskBundle struct { @@ -231,16 +248,16 @@ type healTask struct { codeTasks map[common.Hash]struct{} // Set of byte code tasks currently queued for retrieval, indexed by code hash paths []string hashes []common.Hash - pathsets []TrieNodePathSet + pathsets []*message.TrieNodePathSet task *healTask root common.Hash byteCodeReq bool } type tasks struct { - accountTasks map[uint64]*accountTask // Current account task set being synced - storageTasks map[uint64]*storageTaskBundle // Set of trie node tasks currently queued for retrieval, indexed by path - codeTasks map[common.Hash]struct{} // Set of byte code tasks currently queued for retrieval, indexed by hash + accountTasks map[uint64]*accountTask // Current account task set being synced + storageTasks map[uint64]*storageTaskBundle // Set of trie node tasks currently queued for retrieval, indexed by path + codeTasks map[uint64]*byteCodeTasksBundle // Set of byte code tasks currently queued for retrieval, indexed by hash healer map[uint64]*healTask snapped bool // Flag to signal that snap phase is done } @@ -249,7 +266,7 @@ func newTasks() *tasks { return &tasks{ accountTasks: make(map[uint64]*accountTask, 0), storageTasks: make(map[uint64]*storageTaskBundle, 0), - codeTasks: make(map[common.Hash]struct{}), + codeTasks: make(map[uint64]*byteCodeTasksBundle), healer: make(map[uint64]*healTask, 0), snapped: false, } @@ -272,13 +289,13 @@ func (t *tasks) deleteAccountTask(accountTaskIndex uint64) { } } -func (t *tasks) addCodeTask(h common.Hash) { - t.codeTasks[h] = struct{}{} +func (t *tasks) addCodeTask(id uint64, bytecodeTask *byteCodeTasksBundle) { + t.codeTasks[id] = bytecodeTask } -func (t *tasks) deleteCodeTask(hash common.Hash) { - if _, ok := t.codeTasks[hash]; ok { - delete(t.codeTasks, hash) +func (t *tasks) deleteCodeTask(id uint64) { + if _, ok := t.codeTasks[id]; ok { + delete(t.codeTasks, id) } } @@ -500,33 +517,6 @@ func FullAccountRLP(data []byte) ([]byte, error) { return rlp.EncodeToBytes(account) } -// onHealState is a callback method to invoke when a flat state(account -// or storage slot) is downloaded during the healing stage. The flat states -// can be persisted blindly and can be fixed later in the generation stage. -// Note it's not concurrent safe, please handle the concurrent issue outside. -func (s *FullStateDownloadManager) onHealState(paths [][]byte, value []byte) error { - if len(paths) == 1 { - var account types.StateAccount - if err := rlp.DecodeBytes(value, &account); err != nil { - return nil // Returning the error here would drop the remote peer - } - blob := s.SlimAccountRLP(account) - rawdb.WriteAccountSnapshot(s.stateWriter, common.BytesToHash(paths[0]), blob) - s.accountHealed += 1 - s.accountHealedBytes += common.StorageSize(1 + common.HashLength + len(blob)) - } - if len(paths) == 2 { - rawdb.WriteStorageSnapshot(s.stateWriter, common.BytesToHash(paths[0]), common.BytesToHash(paths[1]), value) - s.storageHealed += 1 - s.storageHealedBytes += common.StorageSize(1 + 2*common.HashLength + len(value)) - } - if s.stateWriter.ValueSize() > ethdb.IdealBatchSize { - s.stateWriter.Write() // It's fine to ignore the error here - s.stateWriter.Reset() - } - return nil -} - func (s *FullStateDownloadManager) commitHealer(force bool) { if !force && s.scheduler.MemSize() < ethdb.IdealBatchSize { return @@ -572,7 +562,7 @@ func (s *FullStateDownloadManager) SyncCompleted() { // getNextBatch returns objects with a maximum of n state download // tasks to send to the remote peer. func (s *FullStateDownloadManager) GetNextBatch() (accounts []*accountTask, - codes []common.Hash, + codes []*byteCodeTasksBundle, storages *storageTaskBundle, healtask *healTask, codetask *healTask, @@ -936,13 +926,13 @@ func (s *FullStateDownloadManager) updateStats(written, duplicate, unexpected in // tasks to send to the remote peer. func (s *FullStateDownloadManager) getBatchFromUnprocessed(n int, withHealTasks bool) ( accounts []*accountTask, - codes []common.Hash, + codes []*byteCodeTasksBundle, storages *storageTaskBundle, healtask *healTask, codetask *healTask) { // over trie nodes as those can be written to disk and forgotten about. - codes = make([]common.Hash, 0, n) + codes = make([]*byteCodeTasksBundle, 0, n) accounts = make([]*accountTask, 0, n) for i, task := range s.tasks.accountTasks { @@ -961,9 +951,12 @@ func (s *FullStateDownloadManager) getBatchFromUnprocessed(n int, withHealTasks accounts = append(accounts, task) s.requesting.addAccountTask(task.id, task) // s.tasks.deleteAccountTask(task) + + // one task account is enough for an stream + return } - cap := n - len(accounts) + cap := n // - len(accounts) for _, task := range s.tasks.accountTasks { // Skip tasks that are already retrieving (or done with) all codes @@ -971,19 +964,42 @@ func (s *FullStateDownloadManager) getBatchFromUnprocessed(n int, withHealTasks continue } + var hashes []common.Hash for hash := range task.codeTasks { delete(task.codeTasks, hash) - codes = append(codes, hash) - s.requesting.addCodeTask(hash) - s.tasks.deleteCodeTask(hash) - // Stop when we've gathered enough requests - if len(codes) >= cap { - return + hashes = append(hashes, hash) + } + + // create a unique id for task bundle + var taskID uint64 + for { + taskID = uint64(rand.Int63()) + if taskID == 0 { + continue } + if _, ok := s.tasks.codeTasks[taskID]; ok { + continue + } + break + } + + bytecodeTask := &byteCodeTasksBundle{ + id: taskID, + hashes: hashes, + task: task, + } + codes = append(codes, bytecodeTask) + + s.requesting.addCodeTask(taskID, bytecodeTask) + //s.tasks.deleteCodeTask(taskID) + + // Stop when we've gathered enough requests + if len(codes) >= cap { + return } } - cap = n - len(accounts) - len(codes) + cap = n - len(codes) // - len(accounts) for accTaskID, task := range s.tasks.accountTasks { // Skip tasks that are already retrieving (or done with) all small states @@ -1118,7 +1134,7 @@ func (s *FullStateDownloadManager) getBatchFromUnprocessed(n int, withHealTasks var ( hashes = make([]common.Hash, 0, cap) paths = make([]string, 0, cap) - pathsets = make([]TrieNodePathSet, 0, cap) + pathsets = make([]*message.TrieNodePathSet, 0, cap) ) for path, hash := range s.tasks.healer[0].trieTasks { delete(s.tasks.healer[0].trieTasks, path) @@ -1228,7 +1244,7 @@ func (s *FullStateDownloadManager) getBatchFromUnprocessed(n int, withHealTasks // sortByAccountPath takes hashes and paths, and sorts them. After that, it generates // the TrieNodePaths and merges paths which belongs to the same account path. -func sortByAccountPath(paths []string, hashes []common.Hash) ([]string, []common.Hash, []trie.SyncPath, []TrieNodePathSet) { +func sortByAccountPath(paths []string, hashes []common.Hash) ([]string, []common.Hash, []trie.SyncPath, []*message.TrieNodePathSet) { var syncPaths []trie.SyncPath for _, path := range paths { syncPaths = append(syncPaths, trie.NewSyncPath([]byte(path))) @@ -1242,14 +1258,14 @@ func sortByAccountPath(paths []string, hashes []common.Hash) ([]string, []common // getBatchFromRetries get the block number batch to be requested from retries. func (s *FullStateDownloadManager) getBatchFromRetries(n int) ( accounts []*accountTask, - codes []common.Hash, + codes []*byteCodeTasksBundle, storages *storageTaskBundle, healtask *healTask, codetask *healTask) { // over trie nodes as those can be written to disk and forgotten about. - accounts = make([]*accountTask, 0, n) - codes = make([]common.Hash, 0, n) + accounts = make([]*accountTask, 0) + codes = make([]*byteCodeTasksBundle, 0) for _, task := range s.retries.accountTasks { // Stop when we've gathered enough requests @@ -1263,14 +1279,14 @@ func (s *FullStateDownloadManager) getBatchFromRetries(n int) ( cap := n - len(accounts) - for code := range s.retries.codeTasks { + for _, code := range s.retries.codeTasks { // Stop when we've gathered enough requests if len(codes) >= cap { return } codes = append(codes, code) - s.requesting.addCodeTask(code) - s.retries.deleteCodeTask(code) + s.requesting.addCodeTask(code.id, code) + s.retries.deleteCodeTask(code.id) } cap = n - len(accounts) - len(codes) @@ -1339,7 +1355,7 @@ func (s *FullStateDownloadManager) getBatchFromRetries(n int) ( // HandleRequestError handles the error result func (s *FullStateDownloadManager) HandleRequestError(accounts []*accountTask, - codes []common.Hash, + codes []*byteCodeTasksBundle, storages *storageTaskBundle, healtask *healTask, codetask *healTask, @@ -1354,8 +1370,8 @@ func (s *FullStateDownloadManager) HandleRequestError(accounts []*accountTask, } for _, code := range codes { - s.requesting.deleteCodeTask(code) - s.retries.addCodeTask(code) + s.requesting.deleteCodeTask(code.id) + s.retries.addCodeTask(code.id, code) } if storages != nil { @@ -1374,18 +1390,99 @@ func (s *FullStateDownloadManager) HandleRequestError(accounts []*accountTask, } } +// UnpackAccountRanges retrieves the accounts from the range packet and converts from slim +// wire representation to consensus format. The returned data is RLP encoded +// since it's expected to be serialized to disk without further interpretation. +// +// Note, this method does a round of RLP decoding and re-encoding, so only use it +// once and cache the results if need be. Ideally discard the packet afterwards +// to not double the memory use. +func (s *FullStateDownloadManager) UnpackAccountRanges(retAccounts []*message.AccountData) ([]common.Hash, [][]byte, error) { + var ( + hashes = make([]common.Hash, len(retAccounts)) + accounts = make([][]byte, len(retAccounts)) + ) + for i, acc := range retAccounts { + val, err := FullAccountRLP(acc.Body) + if err != nil { + return nil, nil, fmt.Errorf("invalid account %x: %v", acc.Body, err) + } + hashes[i] = common.BytesToHash(acc.Hash) + accounts[i] = val + } + return hashes, accounts, nil +} + // HandleAccountRequestResult handles get account ranges result -func (s *FullStateDownloadManager) HandleAccountRequestResult(task *accountTask, // Task which this request is filling - hashes []common.Hash, // Account hashes in the returned range - accounts []*types.StateAccount, // Expanded accounts in the returned range - cont bool, // Whether the account range has a continuation +func (s *FullStateDownloadManager) HandleAccountRequestResult(task *accountTask, + retAccounts []*message.AccountData, + proof [][]byte, + origin []byte, + last []byte, loopID int, streamID sttypes.StreamID) error { + hashes, accounts, err := s.UnpackAccountRanges(retAccounts) + if err != nil { + return err + } + + size := common.StorageSize(len(hashes) * common.HashLength) + for _, account := range accounts { + size += common.StorageSize(len(account)) + } + for _, node := range proof { + size += common.StorageSize(len(node)) + } + utils.Logger().Trace(). + Int("hashes", len(hashes)). + Int("accounts", len(accounts)). + Int("proofs", len(proof)). + Interface("bytes", size). + Msg("Delivering range of accounts") + s.lock.Lock() defer s.lock.Unlock() - if err := s.processAccountResponse(task, hashes, accounts, cont); err != nil { + // Response is valid, but check if peer is signalling that it does not have + // the requested data. For account range queries that means the state being + // retrieved was either already pruned remotely, or the peer is not yet + // synced to our head. + if len(hashes) == 0 && len(accounts) == 0 && len(proof) == 0 { + utils.Logger().Debug(). + Interface("root", s.root). + Msg("Peer rejected account range request") + s.lock.Unlock() + return nil + } + root := s.root + s.lock.Unlock() + + // Reconstruct a partial trie from the response and verify it + keys := make([][]byte, len(hashes)) + for i, key := range hashes { + keys[i] = common.CopyBytes(key[:]) + } + nodes := make(ProofList, len(proof)) + for i, node := range proof { + nodes[i] = node + } + cont, err := trie.VerifyRangeProof(root, origin[:], last[:], keys, accounts, nodes.Set()) + if err != nil { + utils.Logger().Warn().Err(err).Msg("Account range failed proof") + // Signal this request as failed, and ready for rescheduling + return err + } + accs := make([]*types.StateAccount, len(accounts)) + for i, account := range accounts { + acc := new(types.StateAccount) + if err := rlp.DecodeBytes(account, acc); err != nil { + panic(err) // We created these blobs, we must be able to decode them + } + accs[i] = acc + } + + if err := s.processAccountResponse(task, hashes, accs, cont); err != nil { return err } @@ -1491,16 +1588,72 @@ func (s *FullStateDownloadManager) processAccountResponse(task *accountTask, // } // HandleBytecodeRequestResult handles get bytecode result -func (s *FullStateDownloadManager) HandleBytecodeRequestResult(task *accountTask, // Task which this request is filling - hashes []common.Hash, // Hashes of the bytecode to avoid double hashing +// it is a callback method to invoke when a batch of contract +// bytes codes are received from a remote peer. +func (s *FullStateDownloadManager) HandleBytecodeRequestResult(task interface{}, // Task which this request is filling + reqHashes []common.Hash, // Hashes of the bytecode to avoid double hashing bytecodes [][]byte, // Actual bytecodes to store into the database (nil = missing) loopID int, streamID sttypes.StreamID) error { + s.lock.RLock() + syncing := !s.snapped + s.lock.RUnlock() + + if syncing { + return s.onByteCodes(task.(*accountTask), bytecodes, reqHashes) + } + return s.onHealByteCodes(task.(*healTask), reqHashes, bytecodes) +} + +// onByteCodes is a callback method to invoke when a batch of contract +// bytes codes are received from a remote peer in the syncing phase. +func (s *FullStateDownloadManager) onByteCodes(task *accountTask, bytecodes [][]byte, reqHashes []common.Hash) error { + var size common.StorageSize + for _, code := range bytecodes { + size += common.StorageSize(len(code)) + } + + utils.Logger().Trace().Int("bytecodes", len(bytecodes)).Interface("bytes", size).Msg("Delivering set of bytecodes") + s.lock.Lock() defer s.lock.Unlock() - if err := s.processBytecodeResponse(task, hashes, bytecodes); err != nil { + // Response is valid, but check if peer is signalling that it does not have + // the requested data. For bytecode range queries that means the peer is not + // yet synced. + if len(bytecodes) == 0 { + utils.Logger().Debug().Msg("Peer rejected bytecode request") + return nil + } + + // Cross reference the requested bytecodes with the response to find gaps + // that the serving node is missing + hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState) + hash := make([]byte, 32) + + codes := make([][]byte, len(reqHashes)) + for i, j := 0, 0; i < len(bytecodes); i++ { + // Find the next hash that we've been served, leaving misses with nils + hasher.Reset() + hasher.Write(bytecodes[i]) + hasher.Read(hash) + + for j < len(reqHashes) && !bytes.Equal(hash, reqHashes[j][:]) { + j++ + } + if j < len(reqHashes) { + codes[j] = bytecodes[i] + j++ + continue + } + // We've either ran out of hashes, or got unrequested data + utils.Logger().Warn().Int("count", len(bytecodes)-i).Msg("Unexpected bytecodes") + // Signal this request as failed, and ready for rescheduling + return errors.New("unexpected bytecode") + } + // Response validated, send it to the scheduler for filling + if err := s.processBytecodeResponse(task, reqHashes, codes); err != nil { return err } @@ -1574,21 +1727,143 @@ func estimateRemainingSlots(hashes int, last common.Hash) (uint64, error) { return space.Uint64() - uint64(hashes), nil } -// HandleStorageRequestResult handles get storages result -func (s *FullStateDownloadManager) HandleStorageRequestResult(mainTask *accountTask, // Task which this response belongs to - subTask *storageTask, // Task which this response is filling - accounts []common.Hash, // Account hashes requested, may be only partially filled - roots []common.Hash, // Storage roots requested, may be only partially filled - hashes [][]common.Hash, // Storage slot hashes in the returned range - storageSlots [][][]byte, // Storage slot values in the returned range - cont bool, // Whether the last storage range has a continuation +// Unpack retrieves the storage slots from the range packet and returns them in +// a split flat format that's more consistent with the internal data structures. +func (s *FullStateDownloadManager) UnpackStorages(slots [][]*message.StorageData) ([][]common.Hash, [][][]byte) { + var ( + hashset = make([][]common.Hash, len(slots)) + slotset = make([][][]byte, len(slots)) + ) + for i, slots := range slots { + hashset[i] = make([]common.Hash, len(slots)) + slotset[i] = make([][]byte, len(slots)) + for j, slot := range slots { + hashset[i][j] = common.BytesToHash(slot.Hash) + slotset[i][j] = slot.Body + } + } + return hashset, slotset +} + +// HandleStorageRequestResult handles get storages result when ranges of storage slots +// are received from a remote peer. +func (s *FullStateDownloadManager) HandleStorageRequestResult(mainTask *accountTask, + subTask *storageTask, + reqAccounts []common.Hash, + roots []common.Hash, + origin common.Hash, + limit common.Hash, + receivedSlots [][]*message.StorageData, + proof [][]byte, loopID int, streamID sttypes.StreamID) error { s.lock.Lock() defer s.lock.Unlock() - if err := s.processStorageResponse(mainTask, subTask, accounts, roots, hashes, storageSlots, cont); err != nil { + hashes, slots := s.UnpackStorages(receivedSlots) + + // Gather some trace stats to aid in debugging issues + var ( + hashCount int + slotCount int + size common.StorageSize + ) + for _, hashset := range hashes { + size += common.StorageSize(common.HashLength * len(hashset)) + hashCount += len(hashset) + } + for _, slotset := range slots { + for _, slot := range slotset { + size += common.StorageSize(len(slot)) + } + slotCount += len(slotset) + } + for _, node := range proof { + size += common.StorageSize(len(node)) + } + + utils.Logger().Trace(). + Int("accounts", len(hashes)). + Int("hashes", hashCount). + Int("slots", slotCount). + Int("proofs", len(proof)). + Interface("size", size). + Msg("Delivering ranges of storage slots") + + s.lock.Lock() + defer s.lock.Unlock() + + // Reject the response if the hash sets and slot sets don't match, or if the + // peer sent more data than requested. + if len(hashes) != len(slots) { + utils.Logger().Warn(). + Int("hashset", len(hashes)). + Int("slotset", len(slots)). + Msg("Hash and slot set size mismatch") + return errors.New("hash and slot set size mismatch") + } + if len(hashes) > len(reqAccounts) { + utils.Logger().Warn(). + Int("hashset", len(hashes)). + Int("requested", len(reqAccounts)). + Msg("Hash set larger than requested") + return errors.New("hash set larger than requested") + } + // Response is valid, but check if peer is signalling that it does not have + // the requested data. For storage range queries that means the state being + // retrieved was either already pruned remotely, or the peer is not yet + // synced to our head. + if len(hashes) == 0 && len(proof) == 0 { + utils.Logger().Debug().Msg("Peer rejected storage request") + return nil + } + + // Reconstruct the partial tries from the response and verify them + var cont bool + + // If a proof was attached while the response is empty, it indicates that the + // requested range specified with 'origin' is empty. Construct an empty state + // response locally to finalize the range. + if len(hashes) == 0 && len(proof) > 0 { + hashes = append(hashes, []common.Hash{}) + slots = append(slots, [][]byte{}) + } + for i := 0; i < len(hashes); i++ { + // Convert the keys and proofs into an internal format + keys := make([][]byte, len(hashes[i])) + for j, key := range hashes[i] { + keys[j] = common.CopyBytes(key[:]) + } + nodes := make(ProofList, 0, len(proof)) + if i == len(hashes)-1 { + for _, node := range proof { + nodes = append(nodes, node) + } + } + var err error + if len(nodes) == 0 { + // No proof has been attached, the response must cover the entire key + // space and hash to the origin root. + _, err = trie.VerifyRangeProof(roots[i], nil, nil, keys, slots[i], nil) + if err != nil { + utils.Logger().Warn().Err(err).Msg("Storage slots failed proof") + return err + } + } else { + // A proof was attached, the response is only partial, check that the + // returned data is indeed part of the storage trie + proofdb := nodes.Set() + + cont, err = trie.VerifyRangeProof(roots[i], origin[:], limit[:], keys, slots[i], proofdb) + if err != nil { + utils.Logger().Warn().Err(err).Msg("Storage range failed proof") + return err + } + } + } + + if err := s.processStorageResponse(mainTask, subTask, reqAccounts, roots, hashes, slots, cont); err != nil { return err } @@ -1835,18 +2110,72 @@ func (s *FullStateDownloadManager) processStorageResponse(mainTask *accountTask, return nil } -// HandleTrieNodeHealRequestResult handles get trie nodes heal result +// HandleTrieNodeHealRequestResult handles get trie nodes heal result when a batch of trie nodes +// are received from a remote peer. func (s *FullStateDownloadManager) HandleTrieNodeHealRequestResult(task *healTask, // Task which this request is filling - paths []string, // Paths of the trie nodes - hashes []common.Hash, // Hashes of the trie nodes to avoid double hashing - nodes [][]byte, // Actual trie nodes to store into the database (nil = missing) + reqPaths []string, + reqHashes []common.Hash, + trienodes [][]byte, loopID int, streamID sttypes.StreamID) error { s.lock.Lock() defer s.lock.Unlock() - if err := s.processTrienodeHealResponse(task, paths, hashes, nodes); err != nil { + var size common.StorageSize + for _, node := range trienodes { + size += common.StorageSize(len(node)) + } + + utils.Logger().Trace(). + Int("trienodes", len(trienodes)). + Interface("bytes", size). + Msg("Delivering set of healing trienodes") + + // Response is valid, but check if peer is signalling that it does not have + // the requested data. For bytecode range queries that means the peer is not + // yet synced. + if len(trienodes) == 0 { + utils.Logger().Debug().Msg("Peer rejected trienode heal request") + return nil + } + + // Cross reference the requested trienodes with the response to find gaps + // that the serving node is missing + var ( + hasher = sha3.NewLegacyKeccak256().(crypto.KeccakState) + hash = make([]byte, 32) + nodes = make([][]byte, len(reqHashes)) + fills uint64 + ) + for i, j := 0, 0; i < len(trienodes); i++ { + // Find the next hash that we've been served, leaving misses with nils + hasher.Reset() + hasher.Write(trienodes[i]) + hasher.Read(hash) + + for j < len(reqHashes) && !bytes.Equal(hash, reqHashes[j][:]) { + j++ + } + if j < len(reqHashes) { + nodes[j] = trienodes[i] + fills++ + j++ + continue + } + // We've either ran out of hashes, or got unrequested data + utils.Logger().Warn().Int("count", len(trienodes)-i).Msg("Unexpected healing trienodes") + + // Signal this request as failed, and ready for rescheduling + return errors.New("unexpected healing trienode") + } + // Response validated, send it to the scheduler for filling + s.trienodeHealPend.Add(fills) + defer func() { + s.trienodeHealPend.Add(^(fills - 1)) + }() + + if err := s.processTrienodeHealResponse(task, reqPaths, reqHashes, nodes); err != nil { return err } @@ -1959,6 +2288,67 @@ func (s *FullStateDownloadManager) HandleByteCodeHealRequestResult(task *healTas return nil } +// onHealByteCodes is a callback method to invoke when a batch of contract +// bytes codes are received from a remote peer in the healing phase. +func (s *FullStateDownloadManager) onHealByteCodes(task *healTask, + reqHashes []common.Hash, + bytecodes [][]byte) error { + + var size common.StorageSize + for _, code := range bytecodes { + size += common.StorageSize(len(code)) + } + + utils.Logger().Trace(). + Int("bytecodes", len(bytecodes)). + Interface("bytes", size). + Msg("Delivering set of healing bytecodes") + + s.lock.Lock() + s.lock.Unlock() + + // Response is valid, but check if peer is signalling that it does not have + // the requested data. For bytecode range queries that means the peer is not + // yet synced. + if len(bytecodes) == 0 { + utils.Logger().Debug().Msg("Peer rejected bytecode heal request") + return nil + } + + // Cross reference the requested bytecodes with the response to find gaps + // that the serving node is missing + hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState) + hash := make([]byte, 32) + + codes := make([][]byte, len(reqHashes)) + for i, j := 0, 0; i < len(bytecodes); i++ { + // Find the next hash that we've been served, leaving misses with nils + hasher.Reset() + hasher.Write(bytecodes[i]) + hasher.Read(hash) + + for j < len(reqHashes) && !bytes.Equal(hash, reqHashes[j][:]) { + j++ + } + if j < len(reqHashes) { + codes[j] = bytecodes[i] + j++ + continue + } + // We've either ran out of hashes, or got unrequested data + utils.Logger().Warn().Int("count", len(bytecodes)-i).Msg("Unexpected healing bytecodes") + + // Signal this request as failed, and ready for rescheduling + return errors.New("unexpected healing bytecode") + } + + if err := s.processBytecodeHealResponse(task, reqHashes, codes); err != nil { + return err + } + + return nil +} + // processBytecodeHealResponse integrates an already validated bytecode response // into the healer tasks. func (s *FullStateDownloadManager) processBytecodeHealResponse(task *healTask, // Task which this request is filling @@ -1992,3 +2382,30 @@ func (s *FullStateDownloadManager) processBytecodeHealResponse(task *healTask, / return nil } + +// onHealState is a callback method to invoke when a flat state(account +// or storage slot) is downloaded during the healing stage. The flat states +// can be persisted blindly and can be fixed later in the generation stage. +// Note it's not concurrent safe, please handle the concurrent issue outside. +func (s *FullStateDownloadManager) onHealState(paths [][]byte, value []byte) error { + if len(paths) == 1 { + var account types.StateAccount + if err := rlp.DecodeBytes(value, &account); err != nil { + return nil // Returning the error here would drop the remote peer + } + blob := s.SlimAccountRLP(account) + rawdb.WriteAccountSnapshot(s.stateWriter, common.BytesToHash(paths[0]), blob) + s.accountHealed += 1 + s.accountHealedBytes += common.StorageSize(1 + common.HashLength + len(blob)) + } + if len(paths) == 2 { + rawdb.WriteStorageSnapshot(s.stateWriter, common.BytesToHash(paths[0]), common.BytesToHash(paths[1]), value) + s.storageHealed += 1 + s.storageHealedBytes += common.StorageSize(1 + 2*common.HashLength + len(value)) + } + if s.stateWriter.ValueSize() > ethdb.IdealBatchSize { + s.stateWriter.Write() // It's fine to ignore the error here + s.stateWriter.Reset() + } + return nil +} diff --git a/api/service/stagedstreamsync/syncing.go b/api/service/stagedstreamsync/syncing.go index 73f050080b..e6879a5239 100644 --- a/api/service/stagedstreamsync/syncing.go +++ b/api/service/stagedstreamsync/syncing.go @@ -367,7 +367,7 @@ func (s *StagedStreamSync) doSync(downloaderContext context.Context, initSync bo } // add consensus last mile blocks - if s.consensus != nil { + if s.consensus != nil && s.isBeaconNode { if hashes, err := s.addConsensusLastMile(s.Blockchain(), s.consensus); err != nil { utils.Logger().Error().Err(err). Msg("[STAGED_STREAM_SYNC] Add consensus last mile failed") diff --git a/p2p/stream/protocols/sync/chain.go b/p2p/stream/protocols/sync/chain.go index aa4dced3f5..3c147c91a8 100644 --- a/p2p/stream/protocols/sync/chain.go +++ b/p2p/stream/protocols/sync/chain.go @@ -199,7 +199,7 @@ func (ch *chainHelperImpl) getReceipts(hs []common.Hash) ([]types.Receipts, erro return receipts, nil } -// getAccountRangeRequest +// getAccountRange func (ch *chainHelperImpl) getAccountRange(root common.Hash, origin common.Hash, limit common.Hash, bytes uint64) ([]*message.AccountData, [][]byte, error) { if bytes > softResponseLimit { bytes = softResponseLimit From 02e2fee4db8f9ec1ad3b813d7824cbf99def5894 Mon Sep 17 00:00:00 2001 From: Diego Nava <8563843+diego1q2w@users.noreply.github.com> Date: Sun, 10 Dec 2023 19:33:08 +0100 Subject: [PATCH 096/128] Fix: max rate issue (#4580) * fix: max-rate bellow the era min-rate * fix comments * add localnet epoch config * update config * update config * update config * update config * add log * remove hip30 from localnet * disable localnet config --- core/state/statedb.go | 4 +++- internal/chain/engine.go | 8 ++++++++ internal/params/config.go | 17 ++++++++++++++++- staking/availability/measure.go | 24 ++++++++++++++++++++++++ test/build-localnet-validator.sh | 4 ++-- 5 files changed, 53 insertions(+), 4 deletions(-) diff --git a/core/state/statedb.go b/core/state/statedb.go index 96bd4d26ec..fce6f750b5 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -894,7 +894,9 @@ func (db *DB) Finalise(deleteEmptyObjects bool) { // Commit validator changes in cache to stateObjects // TODO: remove validator cache after commit for addr, wrapper := range db.stateValidators { - db.UpdateValidatorWrapper(addr, wrapper) + if err := db.UpdateValidatorWrapper(addr, wrapper); err != nil { + utils.Logger().Warn().Err(err).Msg("Unable to update the validator wrapper on the finalize") + } } addressesToPrefetch := make([][]byte, 0, len(db.journal.dirties)) for addr := range db.journal.dirties { diff --git a/internal/chain/engine.go b/internal/chain/engine.go index 4f3aac9ff4..c77d86487d 100644 --- a/internal/chain/engine.go +++ b/internal/chain/engine.go @@ -448,6 +448,14 @@ func setElectionEpochAndMinFee(chain engine.ChainReader, header *block.Header, s } isElected[addr] = struct{}{} } + + if config.IsMaxRate(newShardState.Epoch) { + for _, addr := range chain.ValidatorCandidates() { + if _, err := availability.UpdateMaxCommissionFee(state, addr, minRate); err != nil { + return err + } + } + } // due to a bug in the old implementation of the minimum fee, // unelected validators did not have their fee updated even // when the protocol required them to do so. here we fix it, diff --git a/internal/params/config.go b/internal/params/config.go index 44bd5cbd7d..017624ffe0 100644 --- a/internal/params/config.go +++ b/internal/params/config.go @@ -75,6 +75,7 @@ var ( ValidatorCodeFixEpoch: big.NewInt(1535), // 2023-07-20 05:51:07+00:00 HIP30Epoch: big.NewInt(1673), // 2023-11-02 17:30:00+00:00 BlockGas30MEpoch: big.NewInt(1673), // 2023-11-02 17:30:00+00:00 + MaxRateEpoch: EpochTBD, } // TestnetChainConfig contains the chain parameters to run a node on the harmony test network. @@ -118,6 +119,7 @@ var ( ValidatorCodeFixEpoch: big.NewInt(1296), // 2023-04-28 07:14:20+00:00 HIP30Epoch: big.NewInt(2176), // 2023-10-12 10:00:00+00:00 BlockGas30MEpoch: big.NewInt(2176), // 2023-10-12 10:00:00+00:00 + MaxRateEpoch: EpochTBD, } // PangaeaChainConfig contains the chain parameters for the Pangaea network. // All features except for CrossLink are enabled at launch. @@ -161,6 +163,7 @@ var ( ValidatorCodeFixEpoch: EpochTBD, HIP30Epoch: EpochTBD, BlockGas30MEpoch: big.NewInt(0), + MaxRateEpoch: EpochTBD, } // PartnerChainConfig contains the chain parameters for the Partner network. @@ -205,6 +208,7 @@ var ( ValidatorCodeFixEpoch: big.NewInt(5), HIP30Epoch: big.NewInt(7), BlockGas30MEpoch: big.NewInt(7), + MaxRateEpoch: EpochTBD, } // StressnetChainConfig contains the chain parameters for the Stress test network. @@ -249,6 +253,7 @@ var ( ValidatorCodeFixEpoch: EpochTBD, HIP30Epoch: EpochTBD, BlockGas30MEpoch: big.NewInt(0), + MaxRateEpoch: EpochTBD, } // LocalnetChainConfig contains the chain parameters to run for local development. @@ -292,6 +297,7 @@ var ( ValidatorCodeFixEpoch: big.NewInt(2), HIP30Epoch: EpochTBD, BlockGas30MEpoch: big.NewInt(0), + MaxRateEpoch: EpochTBD, } // AllProtocolChanges ... @@ -336,7 +342,8 @@ var ( big.NewInt(0), // FeeCollectEpoch big.NewInt(0), // ValidatorCodeFixEpoch big.NewInt(0), // BlockGas30M - big.NewInt(0), // HIP30Epoch + big.NewInt(0), // BlockGas30M + big.NewInt(0), // MaxRateEpoch } // TestChainConfig ... @@ -382,6 +389,7 @@ var ( big.NewInt(0), // ValidatorCodeFixEpoch big.NewInt(0), // HIP30Epoch big.NewInt(0), // BlockGas30M + big.NewInt(0), // MaxRateEpoch } // TestRules ... @@ -547,6 +555,9 @@ type ChainConfig struct { HIP30Epoch *big.Int `json:"hip30-epoch,omitempty"` BlockGas30MEpoch *big.Int `json:"block-gas-30m-epoch,omitempty"` + + // MaxRateEpoch will make sure the validator max-rate is at least equal to the minRate + the validator max-rate-increase + MaxRateEpoch *big.Int `json:"max-rate-epoch,omitempty"` } // String implements the fmt.Stringer interface. @@ -803,6 +814,10 @@ func (c *ChainConfig) IsHIP30(epoch *big.Int) bool { return isForked(c.HIP30Epoch, epoch) } +func (c *ChainConfig) IsMaxRate(epoch *big.Int) bool { + return isForked(c.MaxRateEpoch, epoch) +} + // During this epoch, shards 2 and 3 will start sending // their balances over to shard 0 or 1. func (c *ChainConfig) IsOneEpochBeforeHIP30(epoch *big.Int) bool { diff --git a/staking/availability/measure.go b/staking/availability/measure.go index 881baa8553..6bf36bfb05 100644 --- a/staking/availability/measure.go +++ b/staking/availability/measure.go @@ -267,3 +267,27 @@ func UpdateMinimumCommissionFee( } return false, nil } + +// UpdateMaxCommissionFee makes sure the max-rate is at least higher than the rate + max-rate-change. +func UpdateMaxCommissionFee(state *state.DB, addr common.Address, minRate numeric.Dec) (bool, error) { + utils.Logger().Info().Msg("begin update max commission fee") + + wrapper, err := state.ValidatorWrapper(addr, true, false) + if err != nil { + return false, err + } + + minMaxRate := minRate.Add(wrapper.MaxChangeRate) + + if wrapper.MaxRate.LT(minMaxRate) { + utils.Logger().Info(). + Str("addr", addr.Hex()). + Str("old max-rate", wrapper.MaxRate.String()). + Str("new max-rate", minMaxRate.String()). + Msg("updating max commission rate") + wrapper.MaxRate.SetBytes(minMaxRate.Bytes()) + return true, nil + } + + return false, nil +} diff --git a/test/build-localnet-validator.sh b/test/build-localnet-validator.sh index 08d9877779..70501c8d67 100644 --- a/test/build-localnet-validator.sh +++ b/test/build-localnet-validator.sh @@ -32,7 +32,7 @@ hmy --node="http://localhost:9500" staking create-validator \ --bls-pubkeys 4f41a37a3a8d0695dd6edcc58142c6b7d98e74da5c90e79b587b3b960b6a4f5e048e6d8b8a000d77a478d44cd640270c,7dcc035a943e29e17959dabe636efad7303d2c6f273ace457ba9dcc2fd19d3f37e70ba1cd8d082cf8ff7be2f861db48c \ --name "s0-localnet-validator1" --identity "validator1" --details "validator1" \ --security-contact "localnet" --website "localnet.one" \ - --max-change-rate 0.1 --max-rate 0.1 --rate 0.1 \ + --max-change-rate 0.01 --max-rate 0.01 --rate 0.01 \ --max-total-delegation 100000000 --min-self-delegation 10000 --bls-pubkeys-dir .hmy/extbls/ hmy --node="http://localhost:9500" staking create-validator \ @@ -40,7 +40,7 @@ hmy --node="http://localhost:9500" staking create-validator \ --bls-pubkeys b0917378b179a519a5055259c4f8980cce37d58af300b00dd98b07076d3d9a3b16c4a55f84522f553872225a7b1efc0c \ --name "s0-localnet-validator2" --identity "validator2" --details "validator2" \ --security-contact "localnet" --website "localnet.one" \ - --max-change-rate 0.1 --max-rate 0.1 --rate 0.1 \ + --max-change-rate 0.1 --max-rate 0.1 --rate 0.05 \ --max-total-delegation 100000000 --min-self-delegation 10000 --bls-pubkeys-dir .hmy/extbls/ hmy --node="http://localhost:9500" staking create-validator \ From f3ce9f3ac927268465a8afe81c41c6a9c88c5dc2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CGheisMohammadi=E2=80=9D?= <36589218+GheisMohammadi@users.noreply.github.com> Date: Mon, 11 Dec 2023 22:30:19 +0800 Subject: [PATCH 097/128] return back deleted codes, fix rebase issues, goimports --- api/service/stagedstreamsync/range.go | 2 +- .../stagedstreamsync/stage_statesync.go | 4 +- core/blockchain.go | 2 + core/blockchain_impl.go | 185 ++++++------------ 4 files changed, 67 insertions(+), 126 deletions(-) diff --git a/api/service/stagedstreamsync/range.go b/api/service/stagedstreamsync/range.go index de18b02ab3..d05a92ed40 100644 --- a/api/service/stagedstreamsync/range.go +++ b/api/service/stagedstreamsync/range.go @@ -81,4 +81,4 @@ func incHash(h common.Hash) common.Hash { a.SetBytes32(h[:]) a.AddUint64(&a, 1) return common.Hash(a.Bytes32()) -} \ No newline at end of file +} diff --git a/api/service/stagedstreamsync/stage_statesync.go b/api/service/stagedstreamsync/stage_statesync.go index 086d0fb418..4928b71b04 100644 --- a/api/service/stagedstreamsync/stage_statesync.go +++ b/api/service/stagedstreamsync/stage_statesync.go @@ -58,8 +58,8 @@ func (sss *StageStateSync) Exec(ctx context.Context, bool, invalidBlockRevert bo // for short range sync, skip this step if !s.state.initSync { return nil - } // only execute this stage in fast/snap sync mode and once we reach to pivot - + } // only execute this stage in fast/snap sync mode and once we reach to pivot + if s.state.status.pivotBlock == nil || s.state.CurrentBlockNumber() != s.state.status.pivotBlock.NumberU64() || s.state.status.statesSynced { diff --git a/core/blockchain.go b/core/blockchain.go index 1f7233f42e..f47133bad8 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -105,6 +105,8 @@ type BlockChain interface { // Rollback is designed to remove a chain of links from the database that aren't // certain enough to be valid. Rollback(chain []common.Hash) error + // writeHeadBlock writes a new head block + WriteHeadBlock(block *types.Block) error // WriteBlockWithoutState writes only the block and its metadata to the database, // but does not write any state. This is used to construct competing side forks // up to the point where they exceed the canonical total difficulty. diff --git a/core/blockchain_impl.go b/core/blockchain_impl.go index 15527c3fe0..c7f01d4137 100644 --- a/core/blockchain_impl.go +++ b/core/blockchain_impl.go @@ -34,6 +34,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/mclock" "github.com/ethereum/go-ethereum/common/prque" + "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/metrics" @@ -69,8 +70,9 @@ import ( ) var ( - headBlockGauge = metrics.NewRegisteredGauge("chain/head/block", nil) - headHeaderGauge = metrics.NewRegisteredGauge("chain/head/header", nil) + headBlockGauge = metrics.NewRegisteredGauge("chain/head/block", nil) + headHeaderGauge = metrics.NewRegisteredGauge("chain/head/header", nil) + headFastBlockGauge = metrics.NewRegisteredGauge("chain/head/receipt", nil) accountReadTimer = metrics.NewRegisteredTimer("chain/account/reads", nil) accountHashTimer = metrics.NewRegisteredTimer("chain/account/hashes", nil) @@ -185,7 +187,8 @@ type BlockChainImpl struct { pendingCrossLinksMutex sync.RWMutex // pending crosslinks lock pendingSlashingCandidatesMU sync.RWMutex // pending slashing candidates - currentBlock atomic.Value // Current head of the block chain + currentBlock atomic.Value // Current head of the block chain + currentFastBlock atomic.Value // Current head of the fast-sync chain (may be above the block chain!) stateCache state.Database // State database to reuse between imports (contains state cache) bodyCache *lru.Cache // Cache for the most recent block bodies @@ -319,6 +322,7 @@ func newBlockChainWithOptions( } var nilBlock *types.Block bc.currentBlock.Store(nilBlock) + bc.currentFastBlock.Store(nilBlock) if err := bc.loadLastState(); err != nil { return nil, err } @@ -612,8 +616,22 @@ func (bc *BlockChainImpl) loadLastState() error { return errors.Wrap(err, "headerChain SetCurrentHeader") } + // Restore the last known head fast block + bc.currentFastBlock.Store(currentBlock) + headFastBlockGauge.Update(int64(currentBlock.NumberU64())) + if head := rawdb.ReadHeadFastBlockHash(bc.db); head != (common.Hash{}) { + if block := bc.GetBlockByHash(head); block != nil { + bc.currentFastBlock.Store(block) + headFastBlockGauge.Update(int64(block.NumberU64())) + } + } + + // Issue a status log for the user + currentFastBlock := bc.CurrentFastBlock() + headerTd := bc.GetTd(currentHeader.Hash(), currentHeader.Number().Uint64()) blockTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64()) + fastTd := bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64()) utils.Logger().Info(). Str("number", currentHeader.Number().String()). @@ -627,6 +645,12 @@ func (bc *BlockChainImpl) loadLastState() error { Str("td", blockTd.String()). Str("age", common.PrettyAge(time.Unix(currentBlock.Time().Int64(), 0)).String()). Msg("Loaded most recent local full block") + utils.Logger().Info(). + Str("number", currentFastBlock.Number().String()). + Str("hash", currentFastBlock.Hash().Hex()). + Str("td", fastTd.String()). + Str("age", common.PrettyAge(time.Unix(currentFastBlock.Time().Int64(), 0)).String()). + Msg("Loaded most recent local fast block") return nil } @@ -663,16 +687,30 @@ func (bc *BlockChainImpl) setHead(head uint64) error { headBlockGauge.Update(int64(bc.genesisBlock.NumberU64())) } } + // Rewind the fast block in a simpleton way to the target head + if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock != nil && currentHeader.Number().Uint64() < currentFastBlock.NumberU64() { + newHeadFastBlock := bc.GetBlock(currentHeader.Hash(), currentHeader.Number().Uint64()) + bc.currentFastBlock.Store(newHeadFastBlock) + headFastBlockGauge.Update(int64(newHeadFastBlock.NumberU64())) + } // If either blocks reached nil, reset to the genesis state if currentBlock := bc.CurrentBlock(); currentBlock == nil { bc.currentBlock.Store(bc.genesisBlock) headBlockGauge.Update(int64(bc.genesisBlock.NumberU64())) } + if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock == nil { + bc.currentFastBlock.Store(bc.genesisBlock) + headFastBlockGauge.Update(int64(bc.genesisBlock.NumberU64())) + } currentBlock := bc.CurrentBlock() + currentFastBlock := bc.CurrentFastBlock() if err := rawdb.WriteHeadBlockHash(bc.db, currentBlock.Hash()); err != nil { return err } + if err := rawdb.WriteHeadFastBlockHash(bc.db, currentFastBlock.Hash()); err != nil { + return err + } return bc.loadLastState() } @@ -738,6 +776,8 @@ func (bc *BlockChainImpl) resetWithGenesisBlock(genesis *types.Block) error { } bc.currentBlock.Store(bc.genesisBlock) headBlockGauge.Update(int64(bc.genesisBlock.NumberU64())) + bc.currentFastBlock.Store(bc.genesisBlock) + headFastBlockGauge.Update(int64(bc.genesisBlock.NumberU64())) return nil } @@ -839,6 +879,10 @@ func (bc *BlockChainImpl) ExportN(w io.Writer, first uint64, last uint64) error return nil } +func (bc *BlockChainImpl) WriteHeadBlock(block *types.Block) error { + return bc.writeHeadBlock(block) +} + // writeHeadBlock writes a new head block func (bc *BlockChainImpl) writeHeadBlock(block *types.Block) error { // If the block is on a side chain or an unknown one, force other heads onto it too @@ -881,6 +925,9 @@ func (bc *BlockChainImpl) writeHeadBlock(block *types.Block) error { if err := rawdb.WriteHeadFastBlockHash(bc.db, block.Hash()); err != nil { return err } + + bc.currentFastBlock.Store(block) + headFastBlockGauge.Update(int64(block.NumberU64())) } return nil } @@ -894,6 +941,9 @@ func (bc *BlockChainImpl) tikvFastForward(block *types.Block, logs []*types.Log) return errors.Wrap(err, "HeaderChain SetCurrentHeader") } + bc.currentFastBlock.Store(block) + headFastBlockGauge.Update(int64(block.NumberU64())) + var events []interface{} events = append(events, ChainEvent{block, block.Hash(), logs}) events = append(events, ChainHeadEvent{block}) @@ -1195,6 +1245,14 @@ func (bc *BlockChainImpl) Rollback(chain []common.Hash) error { } } } + if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock != nil && currentFastBlock.Hash() == hash { + newFastBlock := bc.GetBlock(currentFastBlock.ParentHash(), currentFastBlock.NumberU64()-1) + if newFastBlock != nil { + bc.currentFastBlock.Store(newFastBlock) + headFastBlockGauge.Update(int64(newFastBlock.NumberU64())) + rawdb.WriteHeadFastBlockHash(bc.db, newFastBlock.Hash()) + } + } if currentBlock := bc.CurrentBlock(); currentBlock != nil && currentBlock.Hash() == hash { newBlock := bc.GetBlock(currentBlock.ParentHash(), currentBlock.NumberU64()-1) if newBlock != nil { @@ -1792,7 +1850,7 @@ func (bc *BlockChainImpl) insertChain(chain types.Blocks, verifyHeaders bool) (i // Write the block to the chain and get the status. substart = time.Now() - status, err := bc.writeBlockWithState( + status, err := bc.WriteBlockWithState( block, receipts, cxReceipts, stakeMsgs, payout, state, ) if err != nil { @@ -1848,125 +1906,6 @@ func (bc *BlockChainImpl) insertChain(chain types.Blocks, verifyHeaders bool) (i return 0, events, coalescedLogs, nil } -// insertChainWithoutBlockExecution adds a set of blocks to blockchain without adding states -func (bc *BlockChainImpl) insertChainWithoutBlockExecution(chain types.Blocks, verifyHeaders bool) (int, []interface{}, []*types.Log, error) { - // Sanity check that we have something meaningful to import - if len(chain) == 0 { - return 0, nil, nil, nil - } - // Do a sanity check that the provided chain is actually ordered and linked - for i := 1; i < len(chain); i++ { - if chain[i].NumberU64() != chain[i-1].NumberU64()+1 || chain[i].ParentHash() != chain[i-1].Hash() { - // Chain broke ancestry, log a message (programming error) and skip insertion - utils.Logger().Error(). - Str("number", chain[i].Number().String()). - Str("hash", chain[i].Hash().Hex()). - Str("parent", chain[i].ParentHash().Hex()). - Str("prevnumber", chain[i-1].Number().String()). - Str("prevhash", chain[i-1].Hash().Hex()). - Msg("insertChain: non contiguous block insert") - - return 0, nil, nil, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, chain[i-1].NumberU64(), - chain[i-1].Hash().Bytes()[:4], i, chain[i].NumberU64(), chain[i].Hash().Bytes()[:4], chain[i].ParentHash().Bytes()[:4]) - } - } - - bc.chainmu.Lock() - defer bc.chainmu.Unlock() - - var verifyHeadersResults <-chan error - - // If the block header chain has not been verified, conduct header verification here. - if verifyHeaders { - headers := make([]*block.Header, len(chain)) - seals := make([]bool, len(chain)) - - for i, block := range chain { - headers[i] = block.Header() - seals[i] = true - } - // Note that VerifyHeaders verifies headers in the chain in parallel - abort, results := bc.Engine().VerifyHeaders(bc, headers, seals) - verifyHeadersResults = results - defer close(abort) - } - - // Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss) - //senderCacher.recoverFromBlocks(types.MakeSigner(bc.chainConfig, chain[0].Number()), chain) - - // Iterate over the blocks and insert when the verifier permits - for i, block := range chain { - // If the chain is terminating, stop processing blocks - if atomic.LoadInt32(&bc.procInterrupt) == 1 { - utils.Logger().Debug().Msg("Premature abort during blocks processing") - break - } - - var err error - if verifyHeaders { - err = <-verifyHeadersResults - } - if err == nil { - err = bc.Validator().ValidateBody(block) - } - switch { - case err == ErrKnownBlock: - // Block and state both already known. However if the current block is below - // this number we did a rollback and we should reimport it nonetheless. - if bc.CurrentBlock().NumberU64() >= block.NumberU64() { - continue - } - - case err == consensus_engine.ErrFutureBlock: - // Allow up to MaxFuture second in the future blocks. If this limit is exceeded - // the chain is discarded and processed at a later time if given. - max := big.NewInt(time.Now().Unix() + maxTimeFutureBlocks) - if block.Time().Cmp(max) > 0 { - return i, nil, nil, fmt.Errorf("future block: %v > %v", block.Time(), max) - } - bc.futureBlocks.Add(block.Hash(), block) - continue - - case err == consensus_engine.ErrUnknownAncestor && bc.futureBlocks.Contains(block.ParentHash()): - bc.futureBlocks.Add(block.Hash(), block) - continue - - case err == consensus_engine.ErrPrunedAncestor: - var winner []*types.Block - parent := bc.GetBlock(block.ParentHash(), block.NumberU64()-1) - for parent != nil && !bc.HasState(parent.Root()) { - winner = append(winner, parent) - parent = bc.GetBlock(parent.ParentHash(), parent.NumberU64()-1) - } - for j := 0; j < len(winner)/2; j++ { - winner[j], winner[len(winner)-1-j] = winner[len(winner)-1-j], winner[j] - } - // Prune in case non-empty winner chain - if len(winner) > 0 { - // Import all the pruned blocks to make the state available - bc.chainmu.Unlock() - _, _, _, err := bc.insertChainWithoutBlockExecution(winner, true /* verifyHeaders */) - bc.chainmu.Lock() - if err != nil { - return i, nil, nil, err - } - } - - case err != nil: - bc.reportBlock(block, nil, err) - return i, nil, nil, err - } - - // Create a new statedb using the parent block and report an - // error if it fails. - if err = bc.WriteBlockWithoutState(block); err != nil { - return i, nil, nil, err - } - } - - return 0, nil, nil, nil -} - // insertStats tracks and reports on block insertion. type insertStats struct { queued, processed, ignored int From 191c55b403eaf64d91f3eda049da6a92202c1c11 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CGheisMohammadi=E2=80=9D?= <36589218+GheisMohammadi@users.noreply.github.com> Date: Tue, 12 Dec 2023 22:08:50 +0800 Subject: [PATCH 098/128] fix full state sync requests cap, add error handling to stage state sync, goimports --- .../stagedstreamsync/stage_statesync_full.go | 206 +++++++++++------- .../stagedstreamsync/state_sync_full.go | 137 ++++++------ p2p/stream/protocols/sync/chain.go | 2 +- 3 files changed, 197 insertions(+), 148 deletions(-) diff --git a/api/service/stagedstreamsync/stage_statesync_full.go b/api/service/stagedstreamsync/stage_statesync_full.go index 3e190bdc9e..5a8bcd11d2 100644 --- a/api/service/stagedstreamsync/stage_statesync_full.go +++ b/api/service/stagedstreamsync/stage_statesync_full.go @@ -206,84 +206,92 @@ func (sss *StageFullStateSync) runStateWorkerLoop(ctx context.Context, sdm *Full cap := maxRequestSize retAccounts, proof, stid, err := sss.configs.protocol.GetAccountRange(ctx, root, origin, limit, uint64(cap)) if err != nil { + if !errors.Is(err, context.Canceled) && !errors.Is(err, context.DeadlineExceeded) { + sss.configs.protocol.StreamFailed(stid, "GetAccountRange failed") + } + utils.Logger().Error(). + Err(err). + Str("stream", string(stid)). + Msg(WrapStagedSyncMsg("GetAccountRange failed")) + err = errors.Wrap(err, "request error") + sdm.HandleRequestError(accountTasks, codes, storages, healtask, codetask, stid, err) + return + } else if retAccounts == nil || len(retAccounts) == 0 { + s.state.Debug("runStateWorkerLoop/GetAccountRange/data", "nil array") + utils.Logger().Warn(). + Str("stream", string(stid)). + Msg(WrapStagedSyncMsg("GetAccountRange failed, received empty accounts")) + err := errors.New("GetAccountRange received empty slots") + sdm.HandleRequestError(accountTasks, codes, storages, healtask, codetask, stid, err) return } if err := sdm.HandleAccountRequestResult(task, retAccounts, proof, origin[:], limit[:], loopID, stid); err != nil { + utils.Logger().Error(). + Err(err). + Str("stream", string(stid)). + Msg(WrapStagedSyncMsg("GetAccountRange handle result failed")) + err = errors.Wrap(err, "handle result error") + sdm.HandleRequestError(accountTasks, codes, storages, healtask, codetask, stid, err) return } - } else if len(codes)+len(storages.accounts) > 0 { + } else if len(codes) > 0 { - if len(codes) > 0 { - stid, err := sss.downloadByteCodes(ctx, sdm, codes, loopID) - if err != nil { - if !errors.Is(err, context.Canceled) && !errors.Is(err, context.DeadlineExceeded) { - sss.configs.protocol.StreamFailed(stid, "downloadByteCodes failed") - } - utils.Logger().Error(). - Err(err). - Str("stream", string(stid)). - Msg(WrapStagedSyncMsg("downloadByteCodes failed")) - err = errors.Wrap(err, "request error") - sdm.HandleRequestError(accountTasks, codes, storages, healtask, codetask, stid, err) - return + stid, err := sss.downloadByteCodes(ctx, sdm, codes, loopID) + if err != nil { + if !errors.Is(err, context.Canceled) && !errors.Is(err, context.DeadlineExceeded) { + sss.configs.protocol.StreamFailed(stid, "downloadByteCodes failed") } + utils.Logger().Error(). + Err(err). + Str("stream", string(stid)). + Msg(WrapStagedSyncMsg("downloadByteCodes failed")) + err = errors.Wrap(err, "request error") + sdm.HandleRequestError(accountTasks, codes, storages, healtask, codetask, stid, err) + return } - if len(storages.accounts) > 0 { - root := sdm.root - roots := storages.roots - accounts := storages.accounts - cap := maxRequestSize - origin := storages.origin - limit := storages.limit - mainTask := storages.mainTask - subTask := storages.subtask - - slots, proof, stid, err := sss.configs.protocol.GetStorageRanges(ctx, root, accounts, origin, limit, uint64(cap)) - if err != nil { - return - } - if err := sdm.HandleStorageRequestResult(mainTask, subTask, accounts, roots, origin, limit, slots, proof, loopID, stid); err != nil { - return + } else if len(storages.accounts) > 0 { + + root := sdm.root + roots := storages.roots + accounts := storages.accounts + cap := maxRequestSize + origin := storages.origin + limit := storages.limit + mainTask := storages.mainTask + subTask := storages.subtask + + slots, proof, stid, err := sss.configs.protocol.GetStorageRanges(ctx, root, accounts, origin, limit, uint64(cap)) + if err != nil { + if !errors.Is(err, context.Canceled) && !errors.Is(err, context.DeadlineExceeded) { + sss.configs.protocol.StreamFailed(stid, "GetStorageRanges failed") } + utils.Logger().Error(). + Err(err). + Str("stream", string(stid)). + Msg(WrapStagedSyncMsg("GetStorageRanges failed")) + err = errors.Wrap(err, "request error") + sdm.HandleRequestError(accountTasks, codes, storages, healtask, codetask, stid, err) + return + } else if slots == nil || len(slots) == 0 { + s.state.Debug("runStateWorkerLoop/GetStorageRanges/data", "nil array") + utils.Logger().Warn(). + Str("stream", string(stid)). + Msg(WrapStagedSyncMsg("GetStorageRanges failed, received empty slots")) + err := errors.New("GetStorageRanges received empty slots") + sdm.HandleRequestError(accountTasks, codes, storages, healtask, codetask, stid, err) + return + } + if err := sdm.HandleStorageRequestResult(mainTask, subTask, accounts, roots, origin, limit, slots, proof, loopID, stid); err != nil { + utils.Logger().Error(). + Err(err). + Str("stream", string(stid)). + Msg(WrapStagedSyncMsg("GetStorageRanges handle result failed")) + err = errors.Wrap(err, "handle result error") + sdm.HandleRequestError(accountTasks, codes, storages, healtask, codetask, stid, err) + return } - - // data, stid, err := sss.downloadStates(ctx, accounts, codes, storages) - // if err != nil { - // s.state.Debug("runStateWorkerLoop/downloadStates/error", err) - // if !errors.Is(err, context.Canceled) && !errors.Is(err, context.DeadlineExceeded) { - // sss.configs.protocol.StreamFailed(stid, "downloadStates failed") - // } - // utils.Logger().Error(). - // Err(err). - // Str("stream", string(stid)). - // Msg(WrapStagedSyncMsg("downloadStates failed")) - // err = errors.Wrap(err, "request error") - // sdm.HandleRequestError(codes, paths, stid, err) - // } else if data == nil || len(data) == 0 { - // s.state.Debug("runStateWorkerLoop/downloadStates/data", "nil array") - // utils.Logger().Warn(). - // Str("stream", string(stid)). - // Msg(WrapStagedSyncMsg("downloadStates failed, received empty data bytes")) - // err := errors.New("downloadStates received empty data bytes") - // sdm.HandleRequestError(codes, paths, stid, err) - // } else { - // s.state.Debug("runStateWorkerLoop/downloadStates/data/len", len(data)) - // sdm.HandleRequestResult(nodes, paths, data, loopID, stid) - // if sss.configs.logProgress { - // //calculating block download speed - // dt := time.Now().Sub(startTime).Seconds() - // speed := float64(0) - // if dt > 0 { - // speed = float64(len(data)) / dt - // } - // stateDownloadSpeed := fmt.Sprintf("%.2f", speed) - - // fmt.Print("\033[u\033[K") // restore the cursor position and clear the line - // fmt.Println("state download speed:", stateDownloadSpeed, "states/s") - // } - // } } else { // assign trie node Heal Tasks @@ -296,9 +304,32 @@ func (sss *StageFullStateSync) runStateWorkerLoop(ctx context.Context, sdm *Full nodes, stid, err := sss.configs.protocol.GetTrieNodes(ctx, root, pathsets, maxRequestSize) if err != nil { + if !errors.Is(err, context.Canceled) && !errors.Is(err, context.DeadlineExceeded) { + sss.configs.protocol.StreamFailed(stid, "GetTrieNodes failed") + } + utils.Logger().Error(). + Err(err). + Str("stream", string(stid)). + Msg(WrapStagedSyncMsg("GetTrieNodes failed")) + err = errors.Wrap(err, "request error") + sdm.HandleRequestError(accountTasks, codes, storages, healtask, codetask, stid, err) + return + } else if nodes == nil || len(nodes) == 0 { + s.state.Debug("runStateWorkerLoop/GetTrieNodes/data", "nil array") + utils.Logger().Warn(). + Str("stream", string(stid)). + Msg(WrapStagedSyncMsg("GetTrieNodes failed, received empty nodes")) + err := errors.New("GetTrieNodes received empty nodes") + sdm.HandleRequestError(accountTasks, codes, storages, healtask, codetask, stid, err) return } if err := sdm.HandleTrieNodeHealRequestResult(task, paths, hashes, nodes, loopID, stid); err != nil { + utils.Logger().Error(). + Err(err). + Str("stream", string(stid)). + Msg(WrapStagedSyncMsg("GetTrieNodes handle result failed")) + err = errors.Wrap(err, "handle result error") + sdm.HandleRequestError(accountTasks, codes, storages, healtask, codetask, stid, err) return } } @@ -306,11 +337,34 @@ func (sss *StageFullStateSync) runStateWorkerLoop(ctx context.Context, sdm *Full if len(codetask.hashes) > 0 { task := codetask.task hashes := codetask.hashes - codes, stid, err := sss.configs.protocol.GetByteCodes(ctx, hashes, maxRequestSize) + retCodes, stid, err := sss.configs.protocol.GetByteCodes(ctx, hashes, maxRequestSize) if err != nil { + if !errors.Is(err, context.Canceled) && !errors.Is(err, context.DeadlineExceeded) { + sss.configs.protocol.StreamFailed(stid, "GetByteCodes failed") + } + utils.Logger().Error(). + Err(err). + Str("stream", string(stid)). + Msg(WrapStagedSyncMsg("GetByteCodes failed")) + err = errors.Wrap(err, "request error") + sdm.HandleRequestError(accountTasks, codes, storages, healtask, codetask, stid, err) + return + } else if retCodes == nil || len(retCodes) == 0 { + s.state.Debug("runStateWorkerLoop/GetByteCodes/data", "nil array") + utils.Logger().Warn(). + Str("stream", string(stid)). + Msg(WrapStagedSyncMsg("GetByteCodes failed, received empty codes")) + err := errors.New("GetByteCodes received empty codes") + sdm.HandleRequestError(accountTasks, codes, storages, healtask, codetask, stid, err) return } - if err := sdm.HandleBytecodeRequestResult(task, hashes, codes, loopID, stid); err != nil { + if err := sdm.HandleBytecodeRequestResult(task, hashes, retCodes, loopID, stid); err != nil { + utils.Logger().Error(). + Err(err). + Str("stream", string(stid)). + Msg(WrapStagedSyncMsg("GetByteCodes handle result failed")) + err = errors.Wrap(err, "handle result error") + sdm.HandleRequestError(accountTasks, codes, storages, healtask, codetask, stid, err) return } } @@ -326,20 +380,8 @@ func (sss *StageFullStateSync) downloadByteCodes(ctx context.Context, sdm *FullS if err != nil { return stid, err } - if err = sdm.HandleBytecodeRequestResult(codeTask.task, codeTask.hashes, retCodes, loopID, stid); err != nil { - return stid, err - } - } - return -} - -func (sss *StageFullStateSync) downloadStorages(ctx context.Context, sdm *FullStateDownloadManager, codeTasks []*byteCodeTasksBundle, loopID int) (stid sttypes.StreamID, err error) { - for _, codeTask := range codeTasks { - // try to get byte codes from remote peer - // if any of them failed, the stid will be the id of failed stream - retCodes, stid, err := sss.configs.protocol.GetByteCodes(ctx, codeTask.hashes, maxRequestSize) - if err != nil { - return stid, err + if len(retCodes) == 0 { + return stid, errors.New("empty codes array") } if err = sdm.HandleBytecodeRequestResult(codeTask.task, codeTask.hashes, retCodes, loopID, stid); err != nil { return stid, err diff --git a/api/service/stagedstreamsync/state_sync_full.go b/api/service/stagedstreamsync/state_sync_full.go index daf0f4869b..c98dcbafdc 100644 --- a/api/service/stagedstreamsync/state_sync_full.go +++ b/api/service/stagedstreamsync/state_sync_full.go @@ -571,13 +571,10 @@ func (s *FullStateDownloadManager) GetNextBatch() (accounts []*accountTask, s.lock.Lock() defer s.lock.Unlock() - cap := StatesPerRequest - - accounts, codes, storages, healtask, codetask = s.getBatchFromRetries(cap) + accounts, codes, storages, healtask, codetask = s.getBatchFromRetries() nItems := len(accounts) + len(codes) + len(storages.roots) + len(healtask.hashes) + len(codetask.hashes) - cap -= nItems - if cap == 0 { + if nItems > 0 { return } @@ -593,7 +590,7 @@ func (s *FullStateDownloadManager) GetNextBatch() (accounts []*accountTask, if healtask != nil || codetask != nil { withHealTasks = false } - newAccounts, newCodes, newStorageTaskBundle, newHealTask, newCodeTask := s.getBatchFromUnprocessed(cap, withHealTasks) + newAccounts, newCodes, newStorageTaskBundle, newHealTask, newCodeTask := s.getBatchFromUnprocessed(withHealTasks) accounts = append(accounts, newAccounts...) codes = append(codes, newCodes...) storages = newStorageTaskBundle @@ -924,7 +921,7 @@ func (s *FullStateDownloadManager) updateStats(written, duplicate, unexpected in // getBatchFromUnprocessed returns objects with a maximum of n unprocessed state download // tasks to send to the remote peer. -func (s *FullStateDownloadManager) getBatchFromUnprocessed(n int, withHealTasks bool) ( +func (s *FullStateDownloadManager) getBatchFromUnprocessed(withHealTasks bool) ( accounts []*accountTask, codes []*byteCodeTasksBundle, storages *storageTaskBundle, @@ -932,31 +929,43 @@ func (s *FullStateDownloadManager) getBatchFromUnprocessed(n int, withHealTasks codetask *healTask) { // over trie nodes as those can be written to disk and forgotten about. - codes = make([]*byteCodeTasksBundle, 0, n) - accounts = make([]*accountTask, 0, n) + codes = make([]*byteCodeTasksBundle, 0) + accounts = make([]*accountTask, 0) for i, task := range s.tasks.accountTasks { // Stop when we've gathered enough requests - if len(accounts) == n { - return - } + // if len(accounts) == n { + // return + // } + // if already requested if task.requested { continue } - if task.id == 0 { - continue + + // create a unique id for healer task + var taskID uint64 + for { + taskID = uint64(rand.Int63()) + if taskID == 0 { + continue + } + if _, ok := s.tasks.accountTasks[taskID]; ok { + continue + } + break } + s.tasks.accountTasks[i].requested = true accounts = append(accounts, task) s.requesting.addAccountTask(task.id, task) - // s.tasks.deleteAccountTask(task) + s.tasks.addAccountTask(task.id, task) // one task account is enough for an stream return } - cap := n // - len(accounts) + totalHashes := int(0) for _, task := range s.tasks.accountTasks { // Skip tasks that are already retrieving (or done with) all codes @@ -969,6 +978,7 @@ func (s *FullStateDownloadManager) getBatchFromUnprocessed(n int, withHealTasks delete(task.codeTasks, hash) hashes = append(hashes, hash) } + totalHashes += len(hashes) // create a unique id for task bundle var taskID uint64 @@ -991,15 +1001,18 @@ func (s *FullStateDownloadManager) getBatchFromUnprocessed(n int, withHealTasks codes = append(codes, bytecodeTask) s.requesting.addCodeTask(taskID, bytecodeTask) - //s.tasks.deleteCodeTask(taskID) + s.tasks.addCodeTask(taskID, bytecodeTask) // Stop when we've gathered enough requests - if len(codes) >= cap { + if totalHashes >= maxCodeRequestCount { return } } - cap = n - len(codes) // - len(accounts) + // if we found some codes, can assign it to node + if totalHashes > 0 { + return + } for accTaskID, task := range s.tasks.accountTasks { // Skip tasks that are already retrieving (or done with) all small states @@ -1008,13 +1021,13 @@ func (s *FullStateDownloadManager) getBatchFromUnprocessed(n int, withHealTasks } // TODO: check cap calculations (shouldn't give us big chunk) - if cap > maxRequestSize { - cap = maxRequestSize - } - if cap < minRequestSize { // Don't bother with peers below a bare minimum performance - cap = minRequestSize - } - storageSets := cap / 1024 + // if cap > maxRequestSize { + // cap = maxRequestSize + // } + // if cap < minRequestSize { // Don't bother with peers below a bare minimum performance + // cap = minRequestSize + // } + storageSets := maxRequestSize / 1024 storages = &storageTaskBundle{ accounts: make([]common.Hash, 0, storageSets), @@ -1079,14 +1092,10 @@ func (s *FullStateDownloadManager) getBatchFromUnprocessed(n int, withHealTasks s.tasks.addStorageTaskBundle(taskID, storages) s.requesting.addStorageTaskBundle(taskID, storages) - cap -= len(storages.accounts) - - if cap <= 0 { - break - } + return } - if len(accounts)+len(codes)+len(storages.accounts) > 0 { + if len(storages.accounts) > 0 { return } @@ -1095,7 +1104,6 @@ func (s *FullStateDownloadManager) getBatchFromUnprocessed(n int, withHealTasks } // Sync phase done, run heal phase - cap = n // Iterate over pending tasks and try to find a peer to retrieve with for (len(s.tasks.healer) > 0 && len(s.tasks.healer[0].hashes) > 0) || s.scheduler.Pending() > 0 { @@ -1111,7 +1119,7 @@ func (s *FullStateDownloadManager) getBatchFromUnprocessed(n int, withHealTasks } } - mPaths, mHashes, mCodes := s.scheduler.Missing(n) + mPaths, mHashes, mCodes := s.scheduler.Missing(maxTrieRequestCount) for i, path := range mPaths { s.tasks.healer[0].trieTasks[path] = mHashes[i] } @@ -1124,10 +1132,10 @@ func (s *FullStateDownloadManager) getBatchFromUnprocessed(n int, withHealTasks return } // Generate the network query and send it to the peer - if cap > maxTrieRequestCount { - cap = maxTrieRequestCount - } - cap = int(float64(cap) / s.trienodeHealThrottle) + // if cap > maxTrieRequestCount { + // cap = maxTrieRequestCount + // } + cap := int(float64(maxTrieRequestCount) / s.trienodeHealThrottle) if cap <= 0 { cap = 1 } @@ -1175,7 +1183,9 @@ func (s *FullStateDownloadManager) getBatchFromUnprocessed(n int, withHealTasks s.tasks.healer[taskID] = healtask s.requesting.addHealerTask(taskID, healtask) - cap = n - len(hashes) + if len(hashes) > 0 { + return + } } // trying to get bytecodes @@ -1185,7 +1195,7 @@ func (s *FullStateDownloadManager) getBatchFromUnprocessed(n int, withHealTasks // queue from the state sync scheduler. The trie synced schedules these // together with trie nodes, so we need to queue them combined. - mPaths, mHashes, mCodes := s.scheduler.Missing(cap) + mPaths, mHashes, mCodes := s.scheduler.Missing(maxTrieRequestCount) for i, path := range mPaths { s.tasks.healer[0].trieTasks[path] = mHashes[i] } @@ -1202,9 +1212,10 @@ func (s *FullStateDownloadManager) getBatchFromUnprocessed(n int, withHealTasks // Abort the entire assignment mechanism. // Generate the network query and send it to the peer - if cap > maxCodeRequestCount { - cap = maxCodeRequestCount - } + // if cap > maxCodeRequestCount { + // cap = maxCodeRequestCount + // } + cap := maxCodeRequestCount hashes := make([]common.Hash, 0, cap) for hash := range s.tasks.healer[0].codeTasks { delete(s.tasks.healer[0].codeTasks, hash) @@ -1256,7 +1267,7 @@ func sortByAccountPath(paths []string, hashes []common.Hash) ([]string, []common } // getBatchFromRetries get the block number batch to be requested from retries. -func (s *FullStateDownloadManager) getBatchFromRetries(n int) ( +func (s *FullStateDownloadManager) getBatchFromRetries() ( accounts []*accountTask, codes []*byteCodeTasksBundle, storages *storageTaskBundle, @@ -1269,27 +1280,29 @@ func (s *FullStateDownloadManager) getBatchFromRetries(n int) ( for _, task := range s.retries.accountTasks { // Stop when we've gathered enough requests - if len(accounts) == n { - return - } + // if len(accounts) == n { + // return + // } accounts = append(accounts, task) s.requesting.addAccountTask(task.id, task) s.retries.deleteAccountTask(task.id) + return } - cap := n - len(accounts) + if len(accounts) > 0 { + return + } for _, code := range s.retries.codeTasks { - // Stop when we've gathered enough requests - if len(codes) >= cap { - return - } codes = append(codes, code) s.requesting.addCodeTask(code.id, code) s.retries.deleteCodeTask(code.id) + return } - cap = n - len(accounts) - len(codes) + if len(codes) > 0 { + return + } if s.retries.storageTasks != nil && len(s.retries.storageTasks) > 0 { storages = &storageTaskBundle{ @@ -1303,20 +1316,17 @@ func (s *FullStateDownloadManager) getBatchFromRetries(n int) ( } s.requesting.addStorageTaskBundle(storages.id, storages) s.retries.deleteStorageTaskBundle(storages.id) + return } - if len(accounts)+len(codes)+len(storages.accounts) > 0 { + if len(storages.accounts) > 0 { return } - cap = n - if s.retries.healer != nil && len(s.retries.healer) > 0 { - foundHealTask := false - foundByteCodeTask := false for id, task := range s.retries.healer { - if !foundHealTask && !task.byteCodeReq { + if !task.byteCodeReq { healtask = &healTask{ id: id, hashes: task.hashes, @@ -1328,9 +1338,9 @@ func (s *FullStateDownloadManager) getBatchFromRetries(n int) ( } s.requesting.addHealerTask(id, task) s.retries.deleteHealerTask(id) - foundHealTask = true + return } - if !foundByteCodeTask && task.byteCodeReq { + if task.byteCodeReq { codetask = &healTask{ id: id, hashes: task.hashes, @@ -1342,10 +1352,7 @@ func (s *FullStateDownloadManager) getBatchFromRetries(n int) ( } s.requesting.addHealerTask(id, task) s.retries.deleteHealerTask(id) - foundByteCodeTask = true - } - if foundHealTask && foundByteCodeTask { - break + return } } } diff --git a/p2p/stream/protocols/sync/chain.go b/p2p/stream/protocols/sync/chain.go index 3c147c91a8..451952bcce 100644 --- a/p2p/stream/protocols/sync/chain.go +++ b/p2p/stream/protocols/sync/chain.go @@ -199,7 +199,7 @@ func (ch *chainHelperImpl) getReceipts(hs []common.Hash) ([]types.Receipts, erro return receipts, nil } -// getAccountRange +// getAccountRange func (ch *chainHelperImpl) getAccountRange(root common.Hash, origin common.Hash, limit common.Hash, bytes uint64) ([]*message.AccountData, [][]byte, error) { if bytes > softResponseLimit { bytes = softResponseLimit From 419aad1fb7d4d7e503dad91940507fce6834bee6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CGheisMohammadi=E2=80=9D?= <36589218+GheisMohammadi@users.noreply.github.com> Date: Tue, 12 Dec 2023 22:15:11 +0800 Subject: [PATCH 099/128] remove state debug logs --- .../stagedstreamsync/stage_statesync.go | 1 - .../stagedstreamsync/stage_statesync_full.go | 22 ------------------- 2 files changed, 23 deletions(-) diff --git a/api/service/stagedstreamsync/stage_statesync.go b/api/service/stagedstreamsync/stage_statesync.go index 4928b71b04..c4e66e10ef 100644 --- a/api/service/stagedstreamsync/stage_statesync.go +++ b/api/service/stagedstreamsync/stage_statesync.go @@ -120,7 +120,6 @@ func (sss *StageStateSync) Exec(ctx context.Context, bool, invalidBlockRevert bo sss.configs.logger.Warn().Err(err). Uint64("pivot block number", s.state.status.pivotBlock.NumberU64()). Msg(WrapStagedSyncMsg("insert pivot block failed")) - s.state.Debug("StateSync/pivot/insert/error", err) // TODO: panic("pivot block is failed to insert in chain.") return err } diff --git a/api/service/stagedstreamsync/stage_statesync_full.go b/api/service/stagedstreamsync/stage_statesync_full.go index 5a8bcd11d2..d304ca1c3b 100644 --- a/api/service/stagedstreamsync/stage_statesync_full.go +++ b/api/service/stagedstreamsync/stage_statesync_full.go @@ -67,7 +67,6 @@ func (sss *StageFullStateSync) Exec(ctx context.Context, bool, invalidBlockRever return nil } - s.state.Debug("STATE SYNC ======================================================>", "started") // maxHeight := s.state.status.targetBN // currentHead := s.state.CurrentBlockNumber() // if currentHead >= maxHeight { @@ -106,15 +105,10 @@ func (sss *StageFullStateSync) Exec(ctx context.Context, bool, invalidBlockRever } // Fetch states from neighbors - pivotRootHash := s.state.status.pivotBlock.Root() currentBlockRootHash := s.state.bc.CurrentFastBlock().Root() scheme := sss.configs.bc.TrieDB().Scheme() sdm := newFullStateDownloadManager(sss.configs.bc.ChainDb(), scheme, tx, sss.configs.bc, sss.configs.concurrency, s.state.logger) sdm.setRootHash(currentBlockRootHash) - s.state.Debug("StateSync/setRootHash", pivotRootHash) - s.state.Debug("StateSync/currentFastBlockRoot", currentBlockRootHash) - s.state.Debug("StateSync/pivotBlockNumber", s.state.status.pivotBlock.NumberU64()) - s.state.Debug("StateSync/currentFastBlockNumber", s.state.bc.CurrentFastBlock().NumberU64()) var wg sync.WaitGroup for i := 0; i < s.state.config.Concurrency; i++ { wg.Add(1) @@ -127,7 +121,6 @@ func (sss *StageFullStateSync) Exec(ctx context.Context, bool, invalidBlockRever sss.configs.logger.Warn().Err(err). Uint64("pivot block number", s.state.status.pivotBlock.NumberU64()). Msg(WrapStagedSyncMsg("insert pivot block failed")) - s.state.Debug("StateSync/pivot/insert/error", err) // TODO: panic("pivot block is failed to insert in chain.") return err } @@ -135,9 +128,6 @@ func (sss *StageFullStateSync) Exec(ctx context.Context, bool, invalidBlockRever // states should be fully synced in this stage s.state.status.statesSynced = true - s.state.Debug("StateSync/pivot/num", s.state.status.pivotBlock.NumberU64()) - s.state.Debug("StateSync/pivot/insert", "done") - /* gbm := s.state.gbm @@ -171,21 +161,15 @@ func (sss *StageFullStateSync) Exec(ctx context.Context, bool, invalidBlockRever // runStateWorkerLoop creates a work loop for download states func (sss *StageFullStateSync) runStateWorkerLoop(ctx context.Context, sdm *FullStateDownloadManager, wg *sync.WaitGroup, loopID int, startTime time.Time, s *StageState) { - s.state.Debug("runStateWorkerLoop/info", "started") - defer wg.Done() for { select { case <-ctx.Done(): - s.state.Debug("runStateWorkerLoop/ctx/done", "Finished") return default: } accountTasks, codes, storages, healtask, codetask, err := sdm.GetNextBatch() - s.state.Debug("runStateWorkerLoop/batch/len", len(accountTasks)+len(codes)+len(storages.accounts)) - s.state.Debug("runStateWorkerLoop/batch/heals/len", len(healtask.hashes)+len(codetask.hashes)) - s.state.Debug("runStateWorkerLoop/batch/err", err) if len(accountTasks)+len(codes)+len(storages.accounts)+len(healtask.hashes)+len(codetask.hashes) == 0 || err != nil { select { case <-ctx.Done(): @@ -194,8 +178,6 @@ func (sss *StageFullStateSync) runStateWorkerLoop(ctx context.Context, sdm *Full return } } - s.state.Debug("runStateWorkerLoop/batch/accounts", accountTasks) - s.state.Debug("runStateWorkerLoop/batch/codes", codes) if len(accountTasks) > 0 { @@ -217,7 +199,6 @@ func (sss *StageFullStateSync) runStateWorkerLoop(ctx context.Context, sdm *Full sdm.HandleRequestError(accountTasks, codes, storages, healtask, codetask, stid, err) return } else if retAccounts == nil || len(retAccounts) == 0 { - s.state.Debug("runStateWorkerLoop/GetAccountRange/data", "nil array") utils.Logger().Warn(). Str("stream", string(stid)). Msg(WrapStagedSyncMsg("GetAccountRange failed, received empty accounts")) @@ -275,7 +256,6 @@ func (sss *StageFullStateSync) runStateWorkerLoop(ctx context.Context, sdm *Full sdm.HandleRequestError(accountTasks, codes, storages, healtask, codetask, stid, err) return } else if slots == nil || len(slots) == 0 { - s.state.Debug("runStateWorkerLoop/GetStorageRanges/data", "nil array") utils.Logger().Warn(). Str("stream", string(stid)). Msg(WrapStagedSyncMsg("GetStorageRanges failed, received empty slots")) @@ -315,7 +295,6 @@ func (sss *StageFullStateSync) runStateWorkerLoop(ctx context.Context, sdm *Full sdm.HandleRequestError(accountTasks, codes, storages, healtask, codetask, stid, err) return } else if nodes == nil || len(nodes) == 0 { - s.state.Debug("runStateWorkerLoop/GetTrieNodes/data", "nil array") utils.Logger().Warn(). Str("stream", string(stid)). Msg(WrapStagedSyncMsg("GetTrieNodes failed, received empty nodes")) @@ -350,7 +329,6 @@ func (sss *StageFullStateSync) runStateWorkerLoop(ctx context.Context, sdm *Full sdm.HandleRequestError(accountTasks, codes, storages, healtask, codetask, stid, err) return } else if retCodes == nil || len(retCodes) == 0 { - s.state.Debug("runStateWorkerLoop/GetByteCodes/data", "nil array") utils.Logger().Warn(). Str("stream", string(stid)). Msg(WrapStagedSyncMsg("GetByteCodes failed, received empty codes")) From 9a5ba3cc0b22a1f8175847310351d478b3428255 Mon Sep 17 00:00:00 2001 From: Konstantin <355847+Frozen@users.noreply.github.com> Date: Wed, 20 Dec 2023 16:14:43 -0400 Subject: [PATCH 100/128] blocksCountAliveness can't be bigger than minimumBlocksForLeaderInRow (#4589) --- consensus/consensus_v2.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/consensus/consensus_v2.go b/consensus/consensus_v2.go index 0eb6e338d2..0e1c407057 100644 --- a/consensus/consensus_v2.go +++ b/consensus/consensus_v2.go @@ -712,7 +712,7 @@ func (consensus *Consensus) rotateLeader(epoch *big.Int) *bls.PublicKeyWrapper { } numBlocksProducedByLeader := blocksPerEpoch / uint64(slotsCount) rest := blocksPerEpoch % uint64(slotsCount) - const minimumBlocksForLeaderInRow = 3 + const minimumBlocksForLeaderInRow = blocksCountAliveness if numBlocksProducedByLeader < minimumBlocksForLeaderInRow { // mine no less than 3 blocks in a row numBlocksProducedByLeader = minimumBlocksForLeaderInRow From 556444cea551be43c4453fcf6c2f142b8698f0cd Mon Sep 17 00:00:00 2001 From: Konstantin <355847+Frozen@users.noreply.github.com> Date: Wed, 20 Dec 2023 16:16:46 -0400 Subject: [PATCH 101/128] Feature: last signing power. (#4584) * Signing power without leader. * Fixed shard id. * Removed debug info. --- consensus/quorum/one-node-staked-vote.go | 40 ++++++++---------------- hmy/hmy.go | 1 + node/api.go | 28 +++++++++++++++++ rpc/private_debug.go | 7 +++++ 4 files changed, 49 insertions(+), 27 deletions(-) diff --git a/consensus/quorum/one-node-staked-vote.go b/consensus/quorum/one-node-staked-vote.go index e3a45540a1..2532f9691e 100644 --- a/consensus/quorum/one-node-staked-vote.go +++ b/consensus/quorum/one-node-staked-vote.go @@ -98,19 +98,19 @@ func (v *stakedVoteWeight) AddNewVote( additionalVotePower = additionalVotePower.Add(votingPower) } - tallyQuorum := func() *tallyAndQuorum { - switch p { - case Prepare: - return v.voteTally.Prepare - case Commit: - return v.voteTally.Commit - case ViewChange: - return v.voteTally.ViewChange - default: - // Should not happen - return nil - } - }() + var tallyQuorum *tallyAndQuorum + switch p { + case Prepare: + tallyQuorum = v.voteTally.Prepare + case Commit: + tallyQuorum = v.voteTally.Commit + case ViewChange: + tallyQuorum = v.voteTally.ViewChange + default: + // Should not happen + return nil, errors.New("stakedVoteWeight not cache this phase") + } + tallyQuorum.tally = tallyQuorum.tally.Add(additionalVotePower) t := v.QuorumThreshold() @@ -163,20 +163,6 @@ func (v *stakedVoteWeight) IsQuorumAchievedByMask(mask *bls_cosi.Mask) bool { return (*currentTotalPower).GT(threshold) } -func (v *stakedVoteWeight) currentTotalPower(p Phase) (*numeric.Dec, error) { - switch p { - case Prepare: - return &v.voteTally.Prepare.tally, nil - case Commit: - return &v.voteTally.Commit.tally, nil - case ViewChange: - return &v.voteTally.ViewChange.tally, nil - default: - // Should not happen - return nil, errors.New("wrong phase is provided") - } -} - // ComputeTotalPowerByMask computes the total power indicated by bitmap mask func (v *stakedVoteWeight) computeTotalPowerByMask(mask *bls_cosi.Mask) *numeric.Dec { currentTotal := numeric.ZeroDec() diff --git a/hmy/hmy.go b/hmy/hmy.go index 24f0caa127..097e597d02 100644 --- a/hmy/hmy.go +++ b/hmy/hmy.go @@ -120,6 +120,7 @@ type NodeAPI interface { GetConfig() commonRPC.Config ShutDown() GetLastSigningPower() (float64, error) + GetLastSigningPower2() (float64, error) } // New creates a new Harmony object (including the diff --git a/node/api.go b/node/api.go index ceda968084..ef76079f1e 100644 --- a/node/api.go +++ b/node/api.go @@ -2,7 +2,9 @@ package node import ( "github.com/harmony-one/harmony/consensus/quorum" + "github.com/harmony-one/harmony/consensus/votepower" "github.com/harmony-one/harmony/core/types" + "github.com/harmony-one/harmony/crypto/bls" "github.com/harmony-one/harmony/eth/rpc" "github.com/harmony-one/harmony/hmy" "github.com/harmony-one/harmony/internal/tikv" @@ -183,3 +185,29 @@ func (node *Node) GetLastSigningPower() (float64, error) { round := float64(power.MulInt64(10000).RoundInt64()) / 10000 return round, nil } + +func (node *Node) GetLastSigningPower2() (float64, error) { + bc := node.Consensus.Blockchain() + cur := bc.CurrentBlock() + ss, err := bc.ReadShardState(cur.Epoch()) + if err != nil { + return 0, err + } + roster, err := votepower.Compute(&ss.Shards[bc.ShardID()], cur.Epoch()) + if err != nil { + return 0, err + } + blsPubKeys, err := ss.Shards[bc.ShardID()].BLSPublicKeys() + if err != nil { + return 0, err + } + + mask := bls.NewMask(blsPubKeys) + err = mask.SetMask(cur.Header().LastCommitBitmap()) + if err != nil { + return 0, err + } + power := roster.VotePowerByMask(mask) + round := float64(power.MulInt64(10000).RoundInt64()) / 10000 + return round, nil +} diff --git a/rpc/private_debug.go b/rpc/private_debug.go index 921d6645d5..97ade82dd2 100644 --- a/rpc/private_debug.go +++ b/rpc/private_debug.go @@ -65,3 +65,10 @@ func (s *PrivateDebugService) GetLastSigningPower( ) (float64, error) { return s.hmy.NodeAPI.GetLastSigningPower() } + +// GetLastSigningPower2 get last signed power +func (s *PrivateDebugService) GetLastSigningPower2( + ctx context.Context, +) (float64, error) { + return s.hmy.NodeAPI.GetLastSigningPower2() +} From d6690ed7b5f5811aab3d58388b47a9bdb0831fb9 Mon Sep 17 00:00:00 2001 From: Konstantin <355847+Frozen@users.noreply.github.com> Date: Fri, 22 Dec 2023 15:46:44 -0400 Subject: [PATCH 102/128] Activate external rotation devnet. (#4596) --- internal/params/config.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/params/config.go b/internal/params/config.go index 017624ffe0..d8cc5812df 100644 --- a/internal/params/config.go +++ b/internal/params/config.go @@ -203,7 +203,7 @@ var ( CrossShardXferPrecompileEpoch: big.NewInt(5), AllowlistEpoch: EpochTBD, LeaderRotationInternalValidatorsEpoch: big.NewInt(2379), - LeaderRotationExternalValidatorsEpoch: EpochTBD, + LeaderRotationExternalValidatorsEpoch: big.NewInt(3153), FeeCollectEpoch: big.NewInt(5), ValidatorCodeFixEpoch: big.NewInt(5), HIP30Epoch: big.NewInt(7), From 718286f622efb21b95c3489decf6d8200b3f0d01 Mon Sep 17 00:00:00 2001 From: Diego Nava <8563843+diego1q2w@users.noreply.github.com> Date: Sun, 24 Dec 2023 20:20:46 +0100 Subject: [PATCH 103/128] delay for a few epochs the leader rotations devnet (#4597) --- internal/params/config.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/params/config.go b/internal/params/config.go index d8cc5812df..15ca7d2879 100644 --- a/internal/params/config.go +++ b/internal/params/config.go @@ -203,7 +203,7 @@ var ( CrossShardXferPrecompileEpoch: big.NewInt(5), AllowlistEpoch: EpochTBD, LeaderRotationInternalValidatorsEpoch: big.NewInt(2379), - LeaderRotationExternalValidatorsEpoch: big.NewInt(3153), + LeaderRotationExternalValidatorsEpoch: big.NewInt(3173), FeeCollectEpoch: big.NewInt(5), ValidatorCodeFixEpoch: big.NewInt(5), HIP30Epoch: big.NewInt(7), From e68b44fd9814af389cc36617e261dd53ea2d2cc9 Mon Sep 17 00:00:00 2001 From: Gheis Mohammadi Date: Tue, 9 Jan 2024 10:49:53 +0800 Subject: [PATCH 104/128] Complete Fast Sync codes (#4594) * adjust full state sync request parameters, rename stage_state * add full state stage to the list of stages in fast sync * add RangeMode and ChainExecutionMode to handle execution of the stream sync stage * fix block exists issue on stage_states in stream sync * fix double insertion in stage states * add count for state downloader to return number of tasks * fix travis build issue by goimports * switch to Full Sync on pivot block, fix checking nil length in HandleRequestError --- .../stagedstreamsync/default_stages.go | 87 +++++++---- api/service/stagedstreamsync/downloader.go | 1 + api/service/stagedstreamsync/sig_verify.go | 12 +- api/service/stagedstreamsync/stage.go | 23 +++ api/service/stagedstreamsync/stage_finish.go | 5 + .../stagedstreamsync/stage_receipts.go | 6 + .../{stage_state.go => stage_states.go} | 4 + .../stagedstreamsync/stage_statesync.go | 9 +- .../stagedstreamsync/stage_statesync_full.go | 70 ++++++--- .../stagedstreamsync/staged_stream_sync.go | 55 +++++++ api/service/stagedstreamsync/stages.go | 19 +-- .../stagedstreamsync/state_sync_full.go | 147 ++++++++++-------- api/service/stagedstreamsync/syncing.go | 2 + 13 files changed, 305 insertions(+), 135 deletions(-) rename api/service/stagedstreamsync/{stage_state.go => stage_states.go} (98%) diff --git a/api/service/stagedstreamsync/default_stages.go b/api/service/stagedstreamsync/default_stages.go index f869ee5feb..fe64e26d4c 100644 --- a/api/service/stagedstreamsync/default_stages.go +++ b/api/service/stagedstreamsync/default_stages.go @@ -64,7 +64,7 @@ func initFastSyncStagesOrder() { ShortRange, BlockBodies, Receipts, - StateSync, + FullStateSync, States, LastMile, Finish, @@ -74,7 +74,7 @@ func initFastSyncStagesOrder() { Finish, LastMile, States, - StateSync, + FullStateSync, Receipts, BlockBodies, ShortRange, @@ -86,7 +86,7 @@ func initFastSyncStagesOrder() { Finish, LastMile, States, - StateSync, + FullStateSync, Receipts, BlockBodies, ShortRange, @@ -101,6 +101,7 @@ func DefaultStages(ctx context.Context, srCfg StageShortRangeCfg, bodiesCfg StageBodiesCfg, stateSyncCfg StageStateSyncCfg, + fullStateSyncCfg StageFullStateSyncCfg, statesCfg StageStatesCfg, receiptsCfg StageReceiptsCfg, lastMileCfg StageLastMileCfg, @@ -113,55 +114,81 @@ func DefaultStages(ctx context.Context, handlerStageBodies := NewStageBodies(bodiesCfg) handlerStageStates := NewStageStates(statesCfg) handlerStageStateSync := NewStageStateSync(stateSyncCfg) + handlerStageFullStateSync := NewStageFullStateSync(fullStateSyncCfg) handlerStageReceipts := NewStageReceipts(receiptsCfg) handlerStageLastMile := NewStageLastMile(lastMileCfg) handlerStageFinish := NewStageFinish(finishCfg) return []*Stage{ { - ID: Heads, - Description: "Retrieve Chain Heads", - Handler: handlerStageHeads, + ID: Heads, + Description: "Retrieve Chain Heads", + Handler: handlerStageHeads, + RangeMode: OnlyLongRange, + ChainExecutionMode: AllChains, }, { - ID: SyncEpoch, - Description: "Sync only Last Block of Epoch", - Handler: handlerStageEpochSync, + ID: SyncEpoch, + Description: "Sync only Last Block of Epoch", + Handler: handlerStageEpochSync, + RangeMode: OnlyShortRange, + ChainExecutionMode: OnlyEpochChain, }, { - ID: ShortRange, - Description: "Short Range Sync", - Handler: handlerStageShortRange, + ID: ShortRange, + Description: "Short Range Sync", + Handler: handlerStageShortRange, + RangeMode: OnlyShortRange, + ChainExecutionMode: AllChainsExceptEpochChain, }, { - ID: BlockBodies, - Description: "Retrieve Block Bodies", - Handler: handlerStageBodies, + ID: BlockBodies, + Description: "Retrieve Block Bodies", + Handler: handlerStageBodies, + RangeMode: OnlyLongRange, + ChainExecutionMode: AllChainsExceptEpochChain, }, { - ID: States, - Description: "Update Blockchain State", - Handler: handlerStageStates, + ID: States, + Description: "Update Blockchain State", + Handler: handlerStageStates, + RangeMode: OnlyLongRange, + ChainExecutionMode: AllChainsExceptEpochChain, }, { - ID: StateSync, - Description: "Retrieve States", - Handler: handlerStageStateSync, + ID: StateSync, + Description: "Retrieve States", + Handler: handlerStageStateSync, + RangeMode: OnlyLongRange, + ChainExecutionMode: AllChainsExceptEpochChain, }, { - ID: Receipts, - Description: "Retrieve Receipts", - Handler: handlerStageReceipts, + ID: FullStateSync, + Description: "Retrieve Full States", + Handler: handlerStageFullStateSync, + RangeMode: OnlyLongRange, + ChainExecutionMode: AllChainsExceptEpochChain, }, { - ID: LastMile, - Description: "update status for blocks after sync and update last mile blocks as well", - Handler: handlerStageLastMile, + ID: Receipts, + Description: "Retrieve Receipts", + Handler: handlerStageReceipts, + RangeMode: OnlyLongRange, + ChainExecutionMode: AllChainsExceptEpochChain, }, { - ID: Finish, - Description: "Finalize Changes", - Handler: handlerStageFinish, + ID: LastMile, + Description: "update status for blocks after sync and update last mile blocks as well", + Handler: handlerStageLastMile, + RangeMode: LongRangeAndShortRange, + ChainExecutionMode: AllChainsExceptEpochChain, + }, + { + ID: Finish, + Description: "Finalize Changes", + Handler: handlerStageFinish, + RangeMode: LongRangeAndShortRange, + ChainExecutionMode: AllChains, }, } } diff --git a/api/service/stagedstreamsync/downloader.go b/api/service/stagedstreamsync/downloader.go index 3711048955..9d564b016c 100644 --- a/api/service/stagedstreamsync/downloader.go +++ b/api/service/stagedstreamsync/downloader.go @@ -285,4 +285,5 @@ func (d *Downloader) loop() { return } } + } diff --git a/api/service/stagedstreamsync/sig_verify.go b/api/service/stagedstreamsync/sig_verify.go index bdf5a21077..cd7fc4f913 100644 --- a/api/service/stagedstreamsync/sig_verify.go +++ b/api/service/stagedstreamsync/sig_verify.go @@ -54,14 +54,7 @@ func verifyBlock(bc blockChain, block *types.Block, nextBlocks ...*types.Block) if err := bc.Engine().VerifyHeader(bc, block.Header(), true); err != nil { return errors.Wrap(err, "[VerifyHeader]") } - _, err = bc.InsertChain(types.Blocks{block}, false) - switch { - case errors.Is(err, core.ErrKnownBlock): - return nil - case err != nil: - return errors.Wrap(err, "[InsertChain]") - default: - } + return nil } @@ -72,6 +65,9 @@ func verifyAndInsertBlock(bc blockChain, block *types.Block, nextBlocks ...*type } // insert block if _, err := bc.InsertChain(types.Blocks{block}, false); err != nil { + if errors.Is(err, core.ErrKnownBlock) { + return nil + } return errors.Wrap(err, "[InsertChain]") } return nil diff --git a/api/service/stagedstreamsync/stage.go b/api/service/stagedstreamsync/stage.go index 48334a5e52..59602fe818 100644 --- a/api/service/stagedstreamsync/stage.go +++ b/api/service/stagedstreamsync/stage.go @@ -30,6 +30,25 @@ type StageHandler interface { CleanUp(ctx context.Context, firstCycle bool, p *CleanUpState, tx kv.RwTx) error } +type RangeExecution uint32 + +const ( + LongRangeAndShortRange RangeExecution = iota // Both short range and long range + OnlyShortRange // only short range + OnlyLongRange // only long range + //OnlyEpochSync // only epoch sync +) + +type ChainExecution uint32 + +const ( + AllChains ChainExecution = iota // Can execute for any shard + AllChainsExceptEpochChain // Can execute for any shard except epoch chain + OnlyBeaconNode // only for beacon node + OnlyEpochChain // only for epoch chain + OnlyShardChain // only for shard node (exclude beacon node and epoch chain) +) + // Stage is a single sync stage in staged sync. type Stage struct { // ID of the sync stage. Should not be empty and should be unique. It is recommended to prefix it with reverse domain to avoid clashes (`com.example.my-stage`). @@ -42,6 +61,10 @@ type Stage struct { DisabledDescription string // Disabled defines if the stage is disabled. It sets up when the stage is build by its `StageBuilder`. Disabled bool + // Range defines whether stage has to be executed for either long range or short range + RangeMode RangeExecution + // ShardExecution defines this stage has to be executed for which shards + ChainExecutionMode ChainExecution } // StageState is the state of the stage. diff --git a/api/service/stagedstreamsync/stage_finish.go b/api/service/stagedstreamsync/stage_finish.go index 0dfae53ae2..c94aa692bf 100644 --- a/api/service/stagedstreamsync/stage_finish.go +++ b/api/service/stagedstreamsync/stage_finish.go @@ -39,6 +39,11 @@ func (finish *StageFinish) Exec(ctx context.Context, firstCycle bool, invalidBlo // TODO: prepare indices (useful for RPC) and finalize + // switch to Full Sync Mode if the states are synced + if s.state.status.statesSynced { + s.state.status.cycleSyncMode = FullSync + } + if useInternalTx { if err := tx.Commit(); err != nil { return err diff --git a/api/service/stagedstreamsync/stage_receipts.go b/api/service/stagedstreamsync/stage_receipts.go index 4445eb6ba2..78e8e089cd 100644 --- a/api/service/stagedstreamsync/stage_receipts.go +++ b/api/service/stagedstreamsync/stage_receipts.go @@ -12,6 +12,7 @@ import ( "github.com/harmony-one/harmony/core/types" "github.com/harmony-one/harmony/internal/utils" sttypes "github.com/harmony-one/harmony/p2p/stream/types" + "github.com/harmony-one/harmony/shard" "github.com/ledgerwatch/erigon-lib/kv" "github.com/pkg/errors" ) @@ -56,6 +57,11 @@ func (r *StageReceipts) Exec(ctx context.Context, firstCycle bool, invalidBlockR return nil } + // shouldn't execute for epoch chain + if r.configs.bc.ShardID() == shard.BeaconChainShardID && !s.state.isBeaconNode { + return nil + } + useInternalTx := tx == nil if invalidBlockRevert { diff --git a/api/service/stagedstreamsync/stage_state.go b/api/service/stagedstreamsync/stage_states.go similarity index 98% rename from api/service/stagedstreamsync/stage_state.go rename to api/service/stagedstreamsync/stage_states.go index df864d63ff..1b668786cf 100644 --- a/api/service/stagedstreamsync/stage_state.go +++ b/api/service/stagedstreamsync/stage_states.go @@ -165,6 +165,10 @@ func (stg *StageStates) Exec(ctx context.Context, firstCycle bool, invalidBlockR return ErrInvalidBlockNumber } + if stg.configs.bc.HasBlock(block.Hash(), block.NumberU64()) { + continue + } + if err := verifyAndInsertBlock(stg.configs.bc, block); err != nil { stg.configs.logger.Warn().Err(err).Uint64("cycle target block", targetHeight). Uint64("block number", block.NumberU64()). diff --git a/api/service/stagedstreamsync/stage_statesync.go b/api/service/stagedstreamsync/stage_statesync.go index c4e66e10ef..3ce733f41f 100644 --- a/api/service/stagedstreamsync/stage_statesync.go +++ b/api/service/stagedstreamsync/stage_statesync.go @@ -10,6 +10,7 @@ import ( "github.com/harmony-one/harmony/core" "github.com/harmony-one/harmony/internal/utils" sttypes "github.com/harmony-one/harmony/p2p/stream/types" + "github.com/harmony-one/harmony/shard" "github.com/ledgerwatch/erigon-lib/kv" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" @@ -58,8 +59,14 @@ func (sss *StageStateSync) Exec(ctx context.Context, bool, invalidBlockRevert bo // for short range sync, skip this step if !s.state.initSync { return nil - } // only execute this stage in fast/snap sync mode and once we reach to pivot + } + + // shouldn't execute for epoch chain + if sss.configs.bc.ShardID() == shard.BeaconChainShardID && !s.state.isBeaconNode { + return nil + } + // only execute this stage in fast/snap sync mode and once we reach to pivot if s.state.status.pivotBlock == nil || s.state.CurrentBlockNumber() != s.state.status.pivotBlock.NumberU64() || s.state.status.statesSynced { diff --git a/api/service/stagedstreamsync/stage_statesync_full.go b/api/service/stagedstreamsync/stage_statesync_full.go index d304ca1c3b..c1579114b2 100644 --- a/api/service/stagedstreamsync/stage_statesync_full.go +++ b/api/service/stagedstreamsync/stage_statesync_full.go @@ -9,6 +9,7 @@ import ( "github.com/harmony-one/harmony/core" "github.com/harmony-one/harmony/internal/utils" sttypes "github.com/harmony-one/harmony/p2p/stream/types" + "github.com/harmony-one/harmony/shard" "github.com/pkg/errors" //sttypes "github.com/harmony-one/harmony/p2p/stream/types" @@ -59,8 +60,19 @@ func (sss *StageFullStateSync) Exec(ctx context.Context, bool, invalidBlockRever // for short range sync, skip this step if !s.state.initSync { return nil - } // only execute this stage in fast/snap sync mode and once we reach to pivot + } + + // shouldn't execute for epoch chain + if sss.configs.bc.ShardID() == shard.BeaconChainShardID && !s.state.isBeaconNode { + return nil + } + + // if states are already synced, don't execute this stage + if s.state.status.statesSynced { + return + } + // only execute this stage in fast/snap sync mode and once we reach to pivot if s.state.status.pivotBlock == nil || s.state.CurrentBlockNumber() != s.state.status.pivotBlock.NumberU64() || s.state.status.statesSynced { @@ -72,21 +84,21 @@ func (sss *StageFullStateSync) Exec(ctx context.Context, bool, invalidBlockRever // if currentHead >= maxHeight { // return nil // } - // currProgress := s.state.CurrentBlockNumber() // targetHeight := s.state.currentCycle.TargetHeight - // if errV := CreateView(ctx, sss.configs.db, tx, func(etx kv.Tx) error { - // if currProgress, err = s.CurrentStageProgress(etx); err != nil { - // return err - // } - // return nil - // }); errV != nil { - // return errV - // } + currProgress := uint64(0) + if errV := CreateView(ctx, sss.configs.db, tx, func(etx kv.Tx) error { + if currProgress, err = s.CurrentStageProgress(etx); err != nil { + return err + } + return nil + }); errV != nil { + return errV + } + if currProgress >= s.state.status.pivotBlock.NumberU64() { + return nil + } - // if currProgress >= targetHeight { - // return nil - // } useInternalTx := tx == nil if useInternalTx { var err error @@ -109,6 +121,8 @@ func (sss *StageFullStateSync) Exec(ctx context.Context, bool, invalidBlockRever scheme := sss.configs.bc.TrieDB().Scheme() sdm := newFullStateDownloadManager(sss.configs.bc.ChainDb(), scheme, tx, sss.configs.bc, sss.configs.concurrency, s.state.logger) sdm.setRootHash(currentBlockRootHash) + + sdm.SyncStarted() var wg sync.WaitGroup for i := 0; i < s.state.config.Concurrency; i++ { wg.Add(1) @@ -128,6 +142,12 @@ func (sss *StageFullStateSync) Exec(ctx context.Context, bool, invalidBlockRever // states should be fully synced in this stage s.state.status.statesSynced = true + if err := sss.saveProgress(s, tx); err != nil { + sss.configs.logger.Warn().Err(err). + Uint64("pivot block number", s.state.status.pivotBlock.NumberU64()). + Msg(WrapStagedSyncMsg("save progress for statesync stage failed")) + } + /* gbm := s.state.gbm @@ -169,8 +189,8 @@ func (sss *StageFullStateSync) runStateWorkerLoop(ctx context.Context, sdm *Full return default: } - accountTasks, codes, storages, healtask, codetask, err := sdm.GetNextBatch() - if len(accountTasks)+len(codes)+len(storages.accounts)+len(healtask.hashes)+len(codetask.hashes) == 0 || err != nil { + accountTasks, codes, storages, healtask, codetask, nTasks, err := sdm.GetNextBatch() + if nTasks == 0 || err != nil { select { case <-ctx.Done(): return @@ -184,8 +204,8 @@ func (sss *StageFullStateSync) runStateWorkerLoop(ctx context.Context, sdm *Full task := accountTasks[0] origin := task.Next limit := task.Last - root := sdm.root - cap := maxRequestSize + root := task.root + cap := task.cap retAccounts, proof, stid, err := sss.configs.protocol.GetAccountRange(ctx, root, origin, limit, uint64(cap)) if err != nil { if !errors.Is(err, context.Canceled) && !errors.Is(err, context.DeadlineExceeded) { @@ -234,10 +254,10 @@ func (sss *StageFullStateSync) runStateWorkerLoop(ctx context.Context, sdm *Full } else if len(storages.accounts) > 0 { - root := sdm.root + root := storages.root roots := storages.roots accounts := storages.accounts - cap := maxRequestSize + cap := storages.cap origin := storages.origin limit := storages.limit mainTask := storages.mainTask @@ -276,13 +296,14 @@ func (sss *StageFullStateSync) runStateWorkerLoop(ctx context.Context, sdm *Full } else { // assign trie node Heal Tasks if len(healtask.hashes) > 0 { - root := sdm.root + root := healtask.root task := healtask.task hashes := healtask.hashes pathsets := healtask.pathsets paths := healtask.paths + bytes := healtask.bytes - nodes, stid, err := sss.configs.protocol.GetTrieNodes(ctx, root, pathsets, maxRequestSize) + nodes, stid, err := sss.configs.protocol.GetTrieNodes(ctx, root, pathsets, uint64(bytes)) if err != nil { if !errors.Is(err, context.Canceled) && !errors.Is(err, context.DeadlineExceeded) { sss.configs.protocol.StreamFailed(stid, "GetTrieNodes failed") @@ -316,7 +337,8 @@ func (sss *StageFullStateSync) runStateWorkerLoop(ctx context.Context, sdm *Full if len(codetask.hashes) > 0 { task := codetask.task hashes := codetask.hashes - retCodes, stid, err := sss.configs.protocol.GetByteCodes(ctx, hashes, maxRequestSize) + bytes := codetask.bytes + retCodes, stid, err := sss.configs.protocol.GetByteCodes(ctx, hashes, uint64(bytes)) if err != nil { if !errors.Is(err, context.Canceled) && !errors.Is(err, context.DeadlineExceeded) { sss.configs.protocol.StreamFailed(stid, "GetByteCodes failed") @@ -354,7 +376,7 @@ func (sss *StageFullStateSync) downloadByteCodes(ctx context.Context, sdm *FullS for _, codeTask := range codeTasks { // try to get byte codes from remote peer // if any of them failed, the stid will be the id of the failed stream - retCodes, stid, err := sss.configs.protocol.GetByteCodes(ctx, codeTask.hashes, maxRequestSize) + retCodes, stid, err := sss.configs.protocol.GetByteCodes(ctx, codeTask.hashes, uint64(codeTask.cap)) if err != nil { return stid, err } @@ -413,7 +435,7 @@ func (stg *StageFullStateSync) saveProgress(s *StageState, tx kv.RwTx) (err erro } // save progress - if err = s.Update(tx, s.state.CurrentBlockNumber()); err != nil { + if err = s.Update(tx, s.state.status.pivotBlock.NumberU64()); err != nil { utils.Logger().Error(). Err(err). Msgf("[STAGED_SYNC] saving progress for block States stage failed") diff --git a/api/service/stagedstreamsync/staged_stream_sync.go b/api/service/stagedstreamsync/staged_stream_sync.go index 03340eb15f..1782068b29 100644 --- a/api/service/stagedstreamsync/staged_stream_sync.go +++ b/api/service/stagedstreamsync/staged_stream_sync.go @@ -16,6 +16,7 @@ import ( "github.com/harmony-one/harmony/internal/utils" syncproto "github.com/harmony-one/harmony/p2p/stream/protocols/sync" sttypes "github.com/harmony-one/harmony/p2p/stream/types" + "github.com/harmony-one/harmony/shard" "github.com/ledgerwatch/erigon-lib/kv" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" @@ -405,6 +406,11 @@ func (s *StagedStreamSync) Run(ctx context.Context, db kv.RwDB, tx kv.RwTx, firs continue } + // TODO: enable this part after make sure all works well + // if !s.canExecute(stage) { + // continue + // } + if err := s.runStage(ctx, stage, db, tx, firstCycle, s.invalidBlock.Active); err != nil { utils.Logger().Error(). Err(err). @@ -431,6 +437,55 @@ func (s *StagedStreamSync) Run(ctx context.Context, db kv.RwDB, tx kv.RwTx, firs return nil } +func (s *StagedStreamSync) canExecute(stage *Stage) bool { + // check range mode + if stage.RangeMode != LongRangeAndShortRange { + isLongRange := s.initSync + switch stage.RangeMode { + case OnlyLongRange: + if !isLongRange { + return false + } + case OnlyShortRange: + if isLongRange { + return false + } + default: + return false + } + } + + // check chain execution + if stage.ChainExecutionMode != AllChains { + shardID := s.bc.ShardID() + isBeaconNode := s.isBeaconNode + isShardChain := shardID != shard.BeaconChainShardID + isEpochChain := shardID == shard.BeaconChainShardID && !isBeaconNode + switch stage.ChainExecutionMode { + case AllChainsExceptEpochChain: + if isEpochChain { + return false + } + case OnlyBeaconNode: + if !isBeaconNode { + return false + } + case OnlyShardChain: + if !isShardChain { + return false + } + case OnlyEpochChain: + if !isEpochChain { + return false + } + default: + return false + } + } + + return true +} + // CreateView creates a view for a given db func CreateView(ctx context.Context, db kv.RwDB, tx kv.Tx, f func(tx kv.Tx) error) error { if tx != nil { diff --git a/api/service/stagedstreamsync/stages.go b/api/service/stagedstreamsync/stages.go index 6ad9e4519c..33f3b293b0 100644 --- a/api/service/stagedstreamsync/stages.go +++ b/api/service/stagedstreamsync/stages.go @@ -8,15 +8,16 @@ import ( type SyncStageID string const ( - Heads SyncStageID = "Heads" // Heads are downloaded - ShortRange SyncStageID = "ShortRange" // short range - SyncEpoch SyncStageID = "SyncEpoch" // epoch sync - BlockBodies SyncStageID = "BlockBodies" // Block bodies are downloaded, TxHash and UncleHash are getting verified - States SyncStageID = "States" // will construct most recent state from downloaded blocks - StateSync SyncStageID = "StateSync" // State sync - Receipts SyncStageID = "Receipts" // Receipts - LastMile SyncStageID = "LastMile" // update blocks after sync and update last mile blocks as well - Finish SyncStageID = "Finish" // Nominal stage after all other stages + Heads SyncStageID = "Heads" // Heads are downloaded + ShortRange SyncStageID = "ShortRange" // short range + SyncEpoch SyncStageID = "SyncEpoch" // epoch sync + BlockBodies SyncStageID = "BlockBodies" // Block bodies are downloaded, TxHash and UncleHash are getting verified + States SyncStageID = "States" // will construct most recent state from downloaded blocks + StateSync SyncStageID = "StateSync" // State sync + FullStateSync SyncStageID = "FullStateSync" // Full State Sync + Receipts SyncStageID = "Receipts" // Receipts + LastMile SyncStageID = "LastMile" // update blocks after sync and update last mile blocks as well + Finish SyncStageID = "Finish" // Nominal stage after all other stages ) // GetStageName returns the stage name in string diff --git a/api/service/stagedstreamsync/state_sync_full.go b/api/service/stagedstreamsync/state_sync_full.go index c98dcbafdc..14cdb1f594 100644 --- a/api/service/stagedstreamsync/state_sync_full.go +++ b/api/service/stagedstreamsync/state_sync_full.go @@ -108,6 +108,11 @@ var ( type accountTask struct { id uint64 //unique id for account task + root common.Hash + origin common.Hash + limit common.Hash + cap int + // These fields get serialized to leveldb on shutdown Next common.Hash // Next account to sync in this interval Last common.Hash // Last account to sync in this interval @@ -229,16 +234,19 @@ type byteCodeTasksBundle struct { id uint64 //unique id for bytecode task bundle task *accountTask hashes []common.Hash + cap int } type storageTaskBundle struct { id uint64 //unique id for storage task bundle + root common.Hash accounts []common.Hash roots []common.Hash mainTask *accountTask subtask *storageTask origin common.Hash limit common.Hash + cap int } // healTask represents the sync task for healing the snap-synced chunk boundaries. @@ -251,6 +259,7 @@ type healTask struct { pathsets []*message.TrieNodePathSet task *healTask root common.Hash + bytes int byteCodeReq bool } @@ -259,7 +268,6 @@ type tasks struct { storageTasks map[uint64]*storageTaskBundle // Set of trie node tasks currently queued for retrieval, indexed by path codeTasks map[uint64]*byteCodeTasksBundle // Set of byte code tasks currently queued for retrieval, indexed by hash healer map[uint64]*healTask - snapped bool // Flag to signal that snap phase is done } func newTasks() *tasks { @@ -268,7 +276,6 @@ func newTasks() *tasks { storageTasks: make(map[uint64]*storageTaskBundle, 0), codeTasks: make(map[uint64]*byteCodeTasksBundle), healer: make(map[uint64]*healTask, 0), - snapped: false, } } @@ -399,8 +406,6 @@ type FullStateDownloadManager struct { storageSynced uint64 // Number of storage slots downloaded storageBytes common.StorageSize // Number of storage trie bytes persisted to disk - pend sync.WaitGroup // Tracks network request goroutines for graceful shutdown - stateWriter ethdb.Batch // Shared batch writer used for persisting raw states accountHealed uint64 // Number of accounts downloaded during the healing stage accountHealedBytes common.StorageSize // Number of raw account bytes persisted to disk during the healing stage @@ -420,6 +425,9 @@ type FullStateDownloadManager struct { bytecodeHealBytes common.StorageSize // Number of bytecodes persisted to disk bytecodeHealDups uint64 // Number of bytecodes already processed bytecodeHealNops uint64 // Number of bytecodes not requested + + startTime time.Time // Time instance when snapshot sync started + logTime time.Time // Time instance when status was last reported } func newFullStateDownloadManager(db ethdb.KeyValueStore, @@ -430,18 +438,19 @@ func newFullStateDownloadManager(db ethdb.KeyValueStore, logger zerolog.Logger) *FullStateDownloadManager { return &FullStateDownloadManager{ - db: db, - scheme: scheme, - bc: bc, - stateWriter: db.NewBatch(), - tx: tx, - keccak: sha3.NewLegacyKeccak256().(crypto.KeccakState), - concurrency: concurrency, - logger: logger, - tasks: newTasks(), - requesting: newTasks(), - processing: newTasks(), - retries: newTasks(), + db: db, + scheme: scheme, + bc: bc, + stateWriter: db.NewBatch(), + tx: tx, + keccak: sha3.NewLegacyKeccak256().(crypto.KeccakState), + concurrency: concurrency, + logger: logger, + tasks: newTasks(), + requesting: newTasks(), + processing: newTasks(), + retries: newTasks(), + trienodeHealThrottle: maxTrienodeHealThrottle, // Tune downward instead of insta-filling with junk } } @@ -531,6 +540,12 @@ func (s *FullStateDownloadManager) commitHealer(force bool) { utils.Logger().Debug().Str("type", "trienodes").Interface("bytes", common.StorageSize(batch.ValueSize())).Msg("Persisted set of healing data") } +func (s *FullStateDownloadManager) SyncStarted() { + if s.startTime == (time.Time{}) { + s.startTime = time.Now() + } +} + func (s *FullStateDownloadManager) SyncCompleted() { defer func() { // Persist any progress, independent of failure for _, task := range s.tasks.accountTasks { @@ -556,7 +571,8 @@ func (s *FullStateDownloadManager) SyncCompleted() { utils.Logger().Debug().Interface("root", s.root).Msg("Terminating snapshot sync cycle") }() - utils.Logger().Debug().Msg("Snapshot sync already completed") + elapsed := time.Since(s.startTime) + utils.Logger().Debug().Interface("elapsed", elapsed).Msg("Snapshot sync already completed") } // getNextBatch returns objects with a maximum of n state download @@ -566,38 +582,30 @@ func (s *FullStateDownloadManager) GetNextBatch() (accounts []*accountTask, storages *storageTaskBundle, healtask *healTask, codetask *healTask, + nItems int, err error) { s.lock.Lock() defer s.lock.Unlock() - accounts, codes, storages, healtask, codetask = s.getBatchFromRetries() - nItems := len(accounts) + len(codes) + len(storages.roots) + len(healtask.hashes) + len(codetask.hashes) + accounts, codes, storages, healtask, codetask, nItems = s.getBatchFromRetries() if nItems > 0 { return } if len(s.tasks.accountTasks) == 0 && s.scheduler.Pending() == 0 { - if nItems == 0 { - s.SyncCompleted() - } + s.SyncCompleted() return } // Refill available tasks from the scheduler. - withHealTasks := true - if healtask != nil || codetask != nil { - withHealTasks = false - } - newAccounts, newCodes, newStorageTaskBundle, newHealTask, newCodeTask := s.getBatchFromUnprocessed(withHealTasks) + newAccounts, newCodes, newStorageTaskBundle, newHealTask, newCodeTask, nItems := s.getBatchFromUnprocessed() accounts = append(accounts, newAccounts...) codes = append(codes, newCodes...) storages = newStorageTaskBundle - if withHealTasks { - healtask = newHealTask - codetask = newCodeTask - } + healtask = newHealTask + codetask = newCodeTask return } @@ -714,7 +722,7 @@ func (s *FullStateDownloadManager) loadSyncStatus() { // Either we've failed to decode the previous state, or there was none. // Start a fresh sync by chunking up the account range and scheduling // them for retrieval. - s.tasks.accountTasks = nil + s.tasks = newTasks() s.accountSynced, s.accountBytes = 0, 0 s.bytecodeSynced, s.bytecodeBytes = 0, 0 s.storageSynced, s.storageBytes = 0, 0 @@ -921,16 +929,18 @@ func (s *FullStateDownloadManager) updateStats(written, duplicate, unexpected in // getBatchFromUnprocessed returns objects with a maximum of n unprocessed state download // tasks to send to the remote peer. -func (s *FullStateDownloadManager) getBatchFromUnprocessed(withHealTasks bool) ( +func (s *FullStateDownloadManager) getBatchFromUnprocessed() ( accounts []*accountTask, codes []*byteCodeTasksBundle, storages *storageTaskBundle, healtask *healTask, - codetask *healTask) { + codetask *healTask, + count int) { // over trie nodes as those can be written to disk and forgotten about. codes = make([]*byteCodeTasksBundle, 0) accounts = make([]*accountTask, 0) + count = 0 for i, task := range s.tasks.accountTasks { // Stop when we've gathered enough requests @@ -956,12 +966,18 @@ func (s *FullStateDownloadManager) getBatchFromUnprocessed(withHealTasks bool) ( break } + task.root = s.root + task.origin = task.Next + task.limit = task.Last + task.cap = maxRequestSize + task.requested = true s.tasks.accountTasks[i].requested = true accounts = append(accounts, task) s.requesting.addAccountTask(task.id, task) s.tasks.addAccountTask(task.id, task) // one task account is enough for an stream + count = len(accounts) return } @@ -997,6 +1013,7 @@ func (s *FullStateDownloadManager) getBatchFromUnprocessed(withHealTasks bool) ( id: taskID, hashes: hashes, task: task, + cap: maxRequestSize, } codes = append(codes, bytecodeTask) @@ -1005,12 +1022,14 @@ func (s *FullStateDownloadManager) getBatchFromUnprocessed(withHealTasks bool) ( // Stop when we've gathered enough requests if totalHashes >= maxCodeRequestCount { + count = totalHashes return } } // if we found some codes, can assign it to node if totalHashes > 0 { + count = totalHashes return } @@ -1020,14 +1039,8 @@ func (s *FullStateDownloadManager) getBatchFromUnprocessed(withHealTasks bool) ( continue } - // TODO: check cap calculations (shouldn't give us big chunk) - // if cap > maxRequestSize { - // cap = maxRequestSize - // } - // if cap < minRequestSize { // Don't bother with peers below a bare minimum performance - // cap = minRequestSize - // } - storageSets := maxRequestSize / 1024 + cap := maxRequestSize + storageSets := cap / 1024 storages = &storageTaskBundle{ accounts: make([]common.Hash, 0, storageSets), @@ -1089,23 +1102,21 @@ func (s *FullStateDownloadManager) getBatchFromUnprocessed(withHealTasks bool) ( storages.origin = storages.subtask.Next storages.limit = storages.subtask.Last } + storages.root = s.root + storages.cap = cap s.tasks.addStorageTaskBundle(taskID, storages) s.requesting.addStorageTaskBundle(taskID, storages) - + count = len(storages.accounts) return } if len(storages.accounts) > 0 { - return - } - - if !withHealTasks { + count = len(storages.accounts) return } // Sync phase done, run heal phase - - // Iterate over pending tasks and try to find a peer to retrieve with + // Iterate over pending tasks for (len(s.tasks.healer) > 0 && len(s.tasks.healer[0].hashes) > 0) || s.scheduler.Pending() > 0 { // If there are not enough trie tasks queued to fully assign, fill the // queue from the state sync scheduler. The trie synced schedules these @@ -1129,7 +1140,7 @@ func (s *FullStateDownloadManager) getBatchFromUnprocessed(withHealTasks bool) ( // If all the heal tasks are bytecodes or already downloading, bail if len(s.tasks.healer[0].trieTasks) == 0 { - return + break } // Generate the network query and send it to the peer // if cap > maxTrieRequestCount { @@ -1177,6 +1188,7 @@ func (s *FullStateDownloadManager) getBatchFromUnprocessed(withHealTasks bool) ( pathsets: pathsets, root: s.root, task: s.tasks.healer[0], + bytes: maxRequestSize, byteCodeReq: false, } @@ -1184,6 +1196,7 @@ func (s *FullStateDownloadManager) getBatchFromUnprocessed(withHealTasks bool) ( s.requesting.addHealerTask(taskID, healtask) if len(hashes) > 0 { + count = len(hashes) return } } @@ -1205,7 +1218,7 @@ func (s *FullStateDownloadManager) getBatchFromUnprocessed(withHealTasks bool) ( // If all the heal tasks are trienodes or already downloading, bail if len(s.tasks.healer[0].codeTasks) == 0 { - return + break } // Task pending retrieval, try to find an idle peer. If no such peer // exists, we probably assigned tasks for all (or they are stateless). @@ -1243,9 +1256,10 @@ func (s *FullStateDownloadManager) getBatchFromUnprocessed(withHealTasks bool) ( id: taskID, hashes: hashes, task: s.tasks.healer[0], + bytes: maxRequestSize, byteCodeReq: true, } - + count = len(hashes) s.tasks.healer[taskID] = codetask s.requesting.addHealerTask(taskID, healtask) } @@ -1272,7 +1286,8 @@ func (s *FullStateDownloadManager) getBatchFromRetries() ( codes []*byteCodeTasksBundle, storages *storageTaskBundle, healtask *healTask, - codetask *healTask) { + codetask *healTask, + count int) { // over trie nodes as those can be written to disk and forgotten about. accounts = make([]*accountTask, 0) @@ -1290,6 +1305,7 @@ func (s *FullStateDownloadManager) getBatchFromRetries() ( } if len(accounts) > 0 { + count = len(accounts) return } @@ -1301,6 +1317,7 @@ func (s *FullStateDownloadManager) getBatchFromRetries() ( } if len(codes) > 0 { + count = len(codes) return } @@ -1316,10 +1333,7 @@ func (s *FullStateDownloadManager) getBatchFromRetries() ( } s.requesting.addStorageTaskBundle(storages.id, storages) s.retries.deleteStorageTaskBundle(storages.id) - return - } - - if len(storages.accounts) > 0 { + count = len(storages.accounts) return } @@ -1338,6 +1352,7 @@ func (s *FullStateDownloadManager) getBatchFromRetries() ( } s.requesting.addHealerTask(id, task) s.retries.deleteHealerTask(id) + count = len(task.hashes) return } if task.byteCodeReq { @@ -1352,11 +1367,13 @@ func (s *FullStateDownloadManager) getBatchFromRetries() ( } s.requesting.addHealerTask(id, task) s.retries.deleteHealerTask(id) + count = len(task.hashes) return } } } + count = 0 return } @@ -1371,14 +1388,18 @@ func (s *FullStateDownloadManager) HandleRequestError(accounts []*accountTask, s.lock.Lock() defer s.lock.Unlock() - for _, task := range accounts { - s.requesting.deleteAccountTask(task.id) - s.retries.addAccountTask(task.id, task) + if accounts != nil && len(accounts) > 0 { + for _, task := range accounts { + s.requesting.deleteAccountTask(task.id) + s.retries.addAccountTask(task.id, task) + } } - for _, code := range codes { - s.requesting.deleteCodeTask(code.id) - s.retries.addCodeTask(code.id, code) + if codes != nil && len(codes) > 0 { + for _, code := range codes { + s.requesting.deleteCodeTask(code.id) + s.retries.addCodeTask(code.id, code) + } } if storages != nil { diff --git a/api/service/stagedstreamsync/syncing.go b/api/service/stagedstreamsync/syncing.go index e6879a5239..c3bc585f21 100644 --- a/api/service/stagedstreamsync/syncing.go +++ b/api/service/stagedstreamsync/syncing.go @@ -90,6 +90,7 @@ func CreateStagedSync(ctx context.Context, stageBodiesCfg := NewStageBodiesCfg(bc, mainDB, dbs, config.Concurrency, protocol, isBeaconNode, extractReceiptHashes, config.LogProgress) stageStatesCfg := NewStageStatesCfg(bc, mainDB, dbs, config.Concurrency, logger, config.LogProgress) stageStateSyncCfg := NewStageStateSyncCfg(bc, mainDB, config.Concurrency, protocol, logger, config.LogProgress) + stageFullStateSyncCfg := NewStageFullStateSyncCfg(bc, mainDB, config.Concurrency, protocol, logger, config.LogProgress) stageReceiptsCfg := NewStageReceiptsCfg(bc, mainDB, dbs, config.Concurrency, protocol, isBeaconNode, config.LogProgress) lastMileCfg := NewStageLastMileCfg(ctx, bc, mainDB) stageFinishCfg := NewStageFinishCfg(mainDB) @@ -103,6 +104,7 @@ func CreateStagedSync(ctx context.Context, stageShortRangeCfg, stageBodiesCfg, stageStateSyncCfg, + stageFullStateSyncCfg, stageStatesCfg, stageReceiptsCfg, lastMileCfg, From 5443bf7d94ed92efd69a39c999d0f9a88033d35a Mon Sep 17 00:00:00 2001 From: Diego Nava <8563843+diego1q2w@users.noreply.github.com> Date: Mon, 8 Jan 2024 20:50:35 -0600 Subject: [PATCH 105/128] reduce internal voting power to 10% devnet (#4599) --- internal/configs/sharding/partner.go | 10 ++++++++++ internal/params/config.go | 14 ++++++++++++++ 2 files changed, 24 insertions(+) diff --git a/internal/configs/sharding/partner.go b/internal/configs/sharding/partner.go index 99ea96141b..93cc4139de 100644 --- a/internal/configs/sharding/partner.go +++ b/internal/configs/sharding/partner.go @@ -40,6 +40,8 @@ const ( func (ps partnerSchedule) InstanceForEpoch(epoch *big.Int) Instance { switch { + case params.PartnerChainConfig.IsDevnetExternalEpoch(epoch): + return partnerV3 case params.PartnerChainConfig.IsHIP30(epoch): return partnerV2 case epoch.Cmp(params.PartnerChainConfig.StakingEpoch) >= 0: @@ -111,3 +113,11 @@ var partnerV2 = MustNewInstance( hip30CollectionAddressTestnet, partnerReshardingEpoch, PartnerSchedule.BlocksPerEpoch(), ) +var partnerV3 = MustNewInstance( + 2, 5, 1, 0, + numeric.MustNewDecFromStr("0.1"), genesis.TNHarmonyAccounts, + genesis.TNFoundationalAccounts, emptyAllowlist, + feeCollectorsDevnet[1], numeric.MustNewDecFromStr("0.25"), + hip30CollectionAddressTestnet, partnerReshardingEpoch, + PartnerSchedule.BlocksPerEpoch(), +) diff --git a/internal/params/config.go b/internal/params/config.go index 15ca7d2879..703dea062b 100644 --- a/internal/params/config.go +++ b/internal/params/config.go @@ -76,6 +76,7 @@ var ( HIP30Epoch: big.NewInt(1673), // 2023-11-02 17:30:00+00:00 BlockGas30MEpoch: big.NewInt(1673), // 2023-11-02 17:30:00+00:00 MaxRateEpoch: EpochTBD, + DevnetExternalEpoch: EpochTBD, } // TestnetChainConfig contains the chain parameters to run a node on the harmony test network. @@ -120,6 +121,7 @@ var ( HIP30Epoch: big.NewInt(2176), // 2023-10-12 10:00:00+00:00 BlockGas30MEpoch: big.NewInt(2176), // 2023-10-12 10:00:00+00:00 MaxRateEpoch: EpochTBD, + DevnetExternalEpoch: EpochTBD, } // PangaeaChainConfig contains the chain parameters for the Pangaea network. // All features except for CrossLink are enabled at launch. @@ -164,6 +166,7 @@ var ( HIP30Epoch: EpochTBD, BlockGas30MEpoch: big.NewInt(0), MaxRateEpoch: EpochTBD, + DevnetExternalEpoch: EpochTBD, } // PartnerChainConfig contains the chain parameters for the Partner network. @@ -209,6 +212,7 @@ var ( HIP30Epoch: big.NewInt(7), BlockGas30MEpoch: big.NewInt(7), MaxRateEpoch: EpochTBD, + DevnetExternalEpoch: EpochTBD, } // StressnetChainConfig contains the chain parameters for the Stress test network. @@ -254,6 +258,7 @@ var ( HIP30Epoch: EpochTBD, BlockGas30MEpoch: big.NewInt(0), MaxRateEpoch: EpochTBD, + DevnetExternalEpoch: EpochTBD, } // LocalnetChainConfig contains the chain parameters to run for local development. @@ -298,6 +303,7 @@ var ( HIP30Epoch: EpochTBD, BlockGas30MEpoch: big.NewInt(0), MaxRateEpoch: EpochTBD, + DevnetExternalEpoch: EpochTBD, } // AllProtocolChanges ... @@ -344,6 +350,7 @@ var ( big.NewInt(0), // BlockGas30M big.NewInt(0), // BlockGas30M big.NewInt(0), // MaxRateEpoch + big.NewInt(0), } // TestChainConfig ... @@ -390,6 +397,7 @@ var ( big.NewInt(0), // HIP30Epoch big.NewInt(0), // BlockGas30M big.NewInt(0), // MaxRateEpoch + big.NewInt(0), } // TestRules ... @@ -554,6 +562,8 @@ type ChainConfig struct { // 4. Change the minimum validator commission from 5 to 7% (all nets) HIP30Epoch *big.Int `json:"hip30-epoch,omitempty"` + DevnetExternalEpoch *big.Int `json:"devnet-external-epoch,omitempty"` + BlockGas30MEpoch *big.Int `json:"block-gas-30m-epoch,omitempty"` // MaxRateEpoch will make sure the validator max-rate is at least equal to the minRate + the validator max-rate-increase @@ -814,6 +824,10 @@ func (c *ChainConfig) IsHIP30(epoch *big.Int) bool { return isForked(c.HIP30Epoch, epoch) } +func (c *ChainConfig) IsDevnetExternalEpoch(epoch *big.Int) bool { + return isForked(c.DevnetExternalEpoch, epoch) +} + func (c *ChainConfig) IsMaxRate(epoch *big.Int) bool { return isForked(c.MaxRateEpoch, epoch) } From 50a1a75cd5525f32a4b222bb54a28199b8b3d644 Mon Sep 17 00:00:00 2001 From: Gheis Mohammadi Date: Wed, 10 Jan 2024 04:38:41 +0800 Subject: [PATCH 106/128] ignore known blocks for processing state sync (#4602) --- api/service/legacysync/epoch_syncing.go | 3 +++ api/service/legacysync/syncing.go | 3 +++ 2 files changed, 6 insertions(+) diff --git a/api/service/legacysync/epoch_syncing.go b/api/service/legacysync/epoch_syncing.go index 5d9b4dab10..7719c8a819 100644 --- a/api/service/legacysync/epoch_syncing.go +++ b/api/service/legacysync/epoch_syncing.go @@ -138,6 +138,9 @@ func syncLoop(bc core.BlockChain, syncConfig *SyncConfig) (timeout int) { err := ProcessStateSync(syncConfig, heights, bc) if err != nil { + if errors.Is(err, core.ErrKnownBlock) { + return 10 + } utils.Logger().Error().Err(err). Msgf("[EPOCHSYNC] ProcessStateSync failed (isBeacon: %t, ShardID: %d, otherEpoch: %d, currentEpoch: %d)", isBeacon, bc.ShardID(), otherEpoch, curEpoch) diff --git a/api/service/legacysync/syncing.go b/api/service/legacysync/syncing.go index 92c8a457f0..dc1880506e 100644 --- a/api/service/legacysync/syncing.go +++ b/api/service/legacysync/syncing.go @@ -1121,6 +1121,9 @@ func (ss *StateSync) SyncLoop(bc core.BlockChain, isBeacon bool, consensus *cons } err := ss.ProcessStateSync(startHash[:], size, bc) if err != nil { + if errors.Is(err, core.ErrKnownBlock) { + continue + } utils.Logger().Error().Err(err). Msgf("[SYNC] ProcessStateSync failed (isBeacon: %t, ShardID: %d, otherHeight: %d, currentHeight: %d)", isBeacon, bc.ShardID(), otherHeight, currentHeight) From f57691047b6305ae9977467c7619c5484e9ebc1f Mon Sep 17 00:00:00 2001 From: Konstantin <355847+Frozen@users.noreply.github.com> Date: Wed, 10 Jan 2024 13:25:13 -0400 Subject: [PATCH 107/128] Force verify all sign. (#4601) --- api/service/legacysync/syncing.go | 11 ++++++----- api/service/stagedsync/stage_lastmile.go | 4 ++-- api/service/stagedsync/stagedsync.go | 3 ++- node/node.go | 2 +- 4 files changed, 11 insertions(+), 9 deletions(-) diff --git a/api/service/legacysync/syncing.go b/api/service/legacysync/syncing.go index dc1880506e..830eb1d734 100644 --- a/api/service/legacysync/syncing.go +++ b/api/service/legacysync/syncing.go @@ -860,11 +860,12 @@ func (ss *StateSync) getBlockFromLastMileBlocksByParentHash(parentHash common.Ha } // UpdateBlockAndStatus ... -func (ss *StateSync) UpdateBlockAndStatus(block *types.Block, bc core.BlockChain, verifyAllSig bool) error { +func (ss *StateSync) UpdateBlockAndStatus(block *types.Block, bc core.BlockChain) error { if block.NumberU64() != bc.CurrentBlock().NumberU64()+1 { utils.Logger().Debug().Uint64("curBlockNum", bc.CurrentBlock().NumberU64()).Uint64("receivedBlockNum", block.NumberU64()).Msg("[SYNC] Inappropriate block number, ignore!") return nil } + verifyAllSig := true haveCurrentSig := len(block.GetCurrentCommitSig()) != 0 // Verify block signatures @@ -954,8 +955,8 @@ func (ss *StateSync) generateNewState(bc core.BlockChain) error { break } // Enforce sig check for the last block in a batch - enforceSigCheck := !commonIter.HasNext() - err = ss.UpdateBlockAndStatus(block, bc, enforceSigCheck) + _ = !commonIter.HasNext() + err = ss.UpdateBlockAndStatus(block, bc) if err != nil { break } @@ -972,7 +973,7 @@ func (ss *StateSync) generateNewState(bc core.BlockChain) error { if block == nil { break } - err = ss.UpdateBlockAndStatus(block, bc, true) + err = ss.UpdateBlockAndStatus(block, bc) if err != nil { break } @@ -993,7 +994,7 @@ func (ss *StateSync) generateNewState(bc core.BlockChain) error { if block == nil { break } - err = ss.UpdateBlockAndStatus(block, bc, false) + err = ss.UpdateBlockAndStatus(block, bc) if err != nil { break } diff --git a/api/service/stagedsync/stage_lastmile.go b/api/service/stagedsync/stage_lastmile.go index df6079bd03..13fece8eec 100644 --- a/api/service/stagedsync/stage_lastmile.go +++ b/api/service/stagedsync/stage_lastmile.go @@ -49,7 +49,7 @@ func (lm *StageLastMile) Exec(firstCycle bool, invalidBlockRevert bool, s *Stage if block == nil { break } - err = s.state.UpdateBlockAndStatus(block, bc, true) + err = s.state.UpdateBlockAndStatus(block, bc) if err != nil { break } @@ -70,7 +70,7 @@ func (lm *StageLastMile) Exec(firstCycle bool, invalidBlockRevert bool, s *Stage if block == nil { break } - err = s.state.UpdateBlockAndStatus(block, bc, false) + err = s.state.UpdateBlockAndStatus(block, bc) if err != nil { break } diff --git a/api/service/stagedsync/stagedsync.go b/api/service/stagedsync/stagedsync.go index 83be4bae46..7959a05d29 100644 --- a/api/service/stagedsync/stagedsync.go +++ b/api/service/stagedsync/stagedsync.go @@ -1035,7 +1035,7 @@ func (ss *StagedSync) getBlockFromLastMileBlocksByParentHash(parentHash common.H } // UpdateBlockAndStatus updates block and its status in db -func (ss *StagedSync) UpdateBlockAndStatus(block *types.Block, bc core.BlockChain, verifyAllSig bool) error { +func (ss *StagedSync) UpdateBlockAndStatus(block *types.Block, bc core.BlockChain) error { if block.NumberU64() != bc.CurrentBlock().NumberU64()+1 { utils.Logger().Debug(). Uint64("curBlockNum", bc.CurrentBlock().NumberU64()). @@ -1043,6 +1043,7 @@ func (ss *StagedSync) UpdateBlockAndStatus(block *types.Block, bc core.BlockChai Msg("[STAGED_SYNC] Inappropriate block number, ignore!") return nil } + verifyAllSig := true haveCurrentSig := len(block.GetCurrentCommitSig()) != 0 // Verify block signatures diff --git a/node/node.go b/node/node.go index 573786c009..df766e52ee 100644 --- a/node/node.go +++ b/node/node.go @@ -81,7 +81,7 @@ type syncConfig struct { } type ISync interface { - UpdateBlockAndStatus(block *types.Block, bc core.BlockChain, verifyAllSig bool) error + UpdateBlockAndStatus(block *types.Block, bc core.BlockChain) error AddLastMileBlock(block *types.Block) GetActivePeerNumber() int CreateSyncConfig(peers []p2p.Peer, shardID uint32, selfPeerID libp2p_peer.ID, waitForEachPeerToConnect bool) error From 00734e2f8dbe0f36df0f134c613d9d3bf366c597 Mon Sep 17 00:00:00 2001 From: Gheis Mohammadi Date: Fri, 12 Jan 2024 02:47:06 +0800 Subject: [PATCH 108/128] closing stream removes it from stream manager as well, so it doesn't need to remove it on caller function (#4606) --- p2p/stream/protocols/sync/protocol.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/p2p/stream/protocols/sync/protocol.go b/p2p/stream/protocols/sync/protocol.go index 0cb48bfff3..b4e84592ae 100644 --- a/p2p/stream/protocols/sync/protocol.go +++ b/p2p/stream/protocols/sync/protocol.go @@ -271,8 +271,6 @@ func (p *Protocol) RemoveStream(stID sttypes.StreamID) { if exist && st != nil { //TODO: log this incident with reason st.Close() - // stream manager removes this stream from the list and triggers discovery if number of streams are not enough - p.sm.RemoveStream(stID) //TODO: double check to see if this part is needed p.logger.Info(). Str("stream ID", string(stID)). Msg("stream removed") @@ -290,8 +288,6 @@ func (p *Protocol) StreamFailed(stID sttypes.StreamID, reason string) { Msg("stream failed") if st.FailedTimes() >= MaxStreamFailures { st.Close() - // stream manager removes this stream from the list and triggers discovery if number of streams are not enough - p.sm.RemoveStream(stID) //TODO: double check to see if this part is needed p.logger.Warn(). Str("stream ID", string(st.ID())). Msg("stream removed") From 1dd67a801c326fa7bef93632f9165ddac31a25d6 Mon Sep 17 00:00:00 2001 From: Konstantin <355847+Frozen@users.noreply.github.com> Date: Thu, 11 Jan 2024 14:47:24 -0400 Subject: [PATCH 109/128] 15 nodes per shard for devnet (#4607) --- internal/configs/sharding/partner.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/configs/sharding/partner.go b/internal/configs/sharding/partner.go index 93cc4139de..5834b5ae31 100644 --- a/internal/configs/sharding/partner.go +++ b/internal/configs/sharding/partner.go @@ -99,7 +99,7 @@ var partnerV0 = MustNewInstance( partnerReshardingEpoch, PartnerSchedule.BlocksPerEpoch(), ) var partnerV1 = MustNewInstance( - 2, 5, 4, 0, + 2, 15, 4, 0, numeric.MustNewDecFromStr("0.9"), genesis.TNHarmonyAccounts, genesis.TNFoundationalAccounts, emptyAllowlist, nil, numeric.ZeroDec(), ethCommon.Address{}, From a31b4f56409f8ea67cd6e96b07e134c94a2f2659 Mon Sep 17 00:00:00 2001 From: Konstantin <355847+Frozen@users.noreply.github.com> Date: Tue, 16 Jan 2024 17:07:05 -0400 Subject: [PATCH 110/128] Thread safe Decider. (#4610) --- consensus/consensus.go | 12 +- consensus/consensus_service.go | 26 ++-- consensus/consensus_test.go | 3 +- consensus/consensus_v2.go | 12 +- consensus/construct.go | 4 +- consensus/construct_test.go | 4 +- consensus/double_sign.go | 2 +- consensus/leader.go | 26 ++-- consensus/quorum/thread_safe_decider.go | 179 ++++++++++++++++++++++++ consensus/threshold.go | 2 +- consensus/validator.go | 6 +- consensus/view_change.go | 28 ++-- consensus/view_change_test.go | 6 +- node/api.go | 2 +- node/node.go | 2 +- node/node_explorer.go | 2 +- 16 files changed, 250 insertions(+), 66 deletions(-) create mode 100644 consensus/quorum/thread_safe_decider.go diff --git a/consensus/consensus.go b/consensus/consensus.go index 18b53e682d..6f019b2a9b 100644 --- a/consensus/consensus.go +++ b/consensus/consensus.go @@ -46,7 +46,7 @@ type DownloadAsync interface { // Consensus is the main struct with all states and data related to consensus process. type Consensus struct { - Decider quorum.Decider + decider quorum.Decider // FBFTLog stores the pbft messages and blocks during FBFT process fBFTLog *FBFTLog // phase: different phase of FBFT protocol: pre-prepare, prepare, commit, finish etc @@ -200,7 +200,9 @@ func (consensus *Consensus) BlocksNotSynchronized(reason string) { // VdfSeedSize returns the number of VRFs for VDF computation func (consensus *Consensus) VdfSeedSize() int { - return int(consensus.Decider.ParticipantsCount()) * 2 / 3 + consensus.mutex.RLock() + defer consensus.mutex.RUnlock() + return int(consensus.decider.ParticipantsCount()) * 2 / 3 } // GetPublicKeys returns the public keys @@ -275,7 +277,7 @@ func New( fBFTLog: NewFBFTLog(), phase: FBFTAnnounce, current: State{mode: Normal}, - Decider: Decider, + decider: Decider, registry: registry, MinPeers: minPeers, AggregateSig: aggregateSig, @@ -322,6 +324,10 @@ func (consensus *Consensus) Registry() *registry.Registry { return consensus.registry } +func (consensus *Consensus) Decider() quorum.Decider { + return quorum.NewThreadSafeDecider(consensus.decider, consensus.mutex) +} + // InitConsensusWithValidators initialize shard state // from latest epoch and update committee pub // keys for consensus diff --git a/consensus/consensus_service.go b/consensus/consensus_service.go index 40f0bc23d8..48324c4788 100644 --- a/consensus/consensus_service.go +++ b/consensus/consensus_service.go @@ -82,7 +82,7 @@ func (consensus *Consensus) UpdatePublicKeys(pubKeys, allowlist []bls_cosi.Publi } func (consensus *Consensus) updatePublicKeys(pubKeys, allowlist []bls_cosi.PublicKeyWrapper) int64 { - consensus.Decider.UpdateParticipants(pubKeys, allowlist) + consensus.decider.UpdateParticipants(pubKeys, allowlist) consensus.getLogger().Info().Msg("My Committee updated") for i := range pubKeys { consensus.getLogger().Info(). @@ -91,7 +91,7 @@ func (consensus *Consensus) updatePublicKeys(pubKeys, allowlist []bls_cosi.Publi Msg("Member") } - allKeys := consensus.Decider.Participants() + allKeys := consensus.decider.Participants() if len(allKeys) != 0 { consensus.LeaderPubKey = &allKeys[0] consensus.getLogger().Info(). @@ -115,7 +115,7 @@ func (consensus *Consensus) updatePublicKeys(pubKeys, allowlist []bls_cosi.Publi if !consensus.isViewChangingMode() { consensus.resetViewChangeState() } - return consensus.Decider.ParticipantsCount() + return consensus.decider.ParticipantsCount() } // Sign on the hash of the message @@ -144,7 +144,7 @@ func (consensus *Consensus) updateBitmaps() { consensus.getLogger().Debug(). Str("MessageType", consensus.phase.String()). Msg("[UpdateBitmaps] Updating consensus bitmaps") - members := consensus.Decider.Participants() + members := consensus.decider.Participants() prepareBitmap := bls_cosi.NewMask(members) commitBitmap := bls_cosi.NewMask(members) multiSigBitmap := bls_cosi.NewMask(members) @@ -160,7 +160,7 @@ func (consensus *Consensus) resetState() { consensus.blockHash = [32]byte{} consensus.block = []byte{} - consensus.Decider.ResetPrepareAndCommitVotes() + consensus.decider.ResetPrepareAndCommitVotes() if consensus.prepareBitmap != nil { consensus.prepareBitmap.Clear() } @@ -179,7 +179,7 @@ func (consensus *Consensus) IsValidatorInCommittee(pubKey bls.SerializedPublicKe } func (consensus *Consensus) isValidatorInCommittee(pubKey bls.SerializedPublicKey) bool { - return consensus.Decider.IndexOf(pubKey) != -1 + return consensus.decider.IndexOf(pubKey) != -1 } // SetMode sets the mode of consensus @@ -271,7 +271,7 @@ func (consensus *Consensus) setBlockNum(blockNum uint64) { // ReadSignatureBitmapPayload read the payload for signature and bitmap; offset is the beginning position of reading func (consensus *Consensus) ReadSignatureBitmapPayload(recvPayload []byte, offset int) (*bls_core.Sign, *bls_cosi.Mask, error) { consensus.mutex.RLock() - members := consensus.Decider.Participants() + members := consensus.decider.Participants() consensus.mutex.RUnlock() return consensus.readSignatureBitmapPayload(recvPayload, offset, members) } @@ -334,12 +334,12 @@ func (consensus *Consensus) updateConsensusInformation() Mode { isFirstTimeStaking := consensus.Blockchain().Config().IsStaking(nextEpoch) && curHeader.IsLastBlockInEpoch() && !consensus.Blockchain().Config().IsStaking(curEpoch) haventUpdatedDecider := consensus.Blockchain().Config().IsStaking(curEpoch) && - consensus.Decider.Policy() != quorum.SuperMajorityStake + consensus.decider.Policy() != quorum.SuperMajorityStake // Only happens once, the flip-over to a new Decider policy if isFirstTimeStaking || haventUpdatedDecider { decider := quorum.NewDecider(quorum.SuperMajorityStake, consensus.ShardID) - consensus.Decider = decider + consensus.decider = decider } var committeeToSet *shard.Committee @@ -412,7 +412,7 @@ func (consensus *Consensus) updateConsensusInformation() Mode { consensus.updatePublicKeys(pubKeys, shard.Schedule.InstanceForEpoch(nextEpoch).ExternalAllowlist()) // Update voters in the committee - if _, err := consensus.Decider.SetVoters( + if _, err := consensus.decider.SetVoters( committeeToSet, epochToSet, ); err != nil { consensus.getLogger().Error(). @@ -582,7 +582,7 @@ func (consensus *Consensus) selfCommit(payload []byte) error { return errGetPreparedBlock } - aggSig, mask, err := consensus.readSignatureBitmapPayload(payload, 32, consensus.Decider.Participants()) + aggSig, mask, err := consensus.readSignatureBitmapPayload(payload, 32, consensus.decider.Participants()) if err != nil { return errReadBitmapPayload } @@ -606,7 +606,7 @@ func (consensus *Consensus) selfCommit(payload []byte) error { continue } - if _, err := consensus.Decider.AddNewVote( + if _, err := consensus.decider.AddNewVote( quorum.Commit, []*bls_cosi.PublicKeyWrapper{key.Pub}, key.Pri.SignHash(commitPayload), @@ -628,7 +628,7 @@ func (consensus *Consensus) selfCommit(payload []byte) error { func (consensus *Consensus) NumSignaturesIncludedInBlock(block *types.Block) uint32 { count := uint32(0) consensus.mutex.Lock() - members := consensus.Decider.Participants() + members := consensus.decider.Participants() pubKeys := consensus.getPublicKeys() consensus.mutex.Unlock() diff --git a/consensus/consensus_test.go b/consensus/consensus_test.go index 992e725e75..2fe524fdf8 100644 --- a/consensus/consensus_test.go +++ b/consensus/consensus_test.go @@ -18,7 +18,7 @@ import ( ) func TestConsensusInitialization(t *testing.T) { - host, multiBLSPrivateKey, consensus, decider, err := GenerateConsensusForTesting() + host, multiBLSPrivateKey, consensus, _, err := GenerateConsensusForTesting() assert.NoError(t, err) messageSender := &MessageSender{host: host, retryTimes: int(phaseDuration.Seconds()) / RetryIntervalInSec} @@ -30,7 +30,6 @@ func TestConsensusInitialization(t *testing.T) { expectedTimeouts[timeoutViewChange] = viewChangeDuration expectedTimeouts[timeoutBootstrap] = bootstrapDuration - assert.Equal(t, decider, consensus.Decider) assert.Equal(t, host, consensus.host) assert.Equal(t, messageSender, consensus.msgSender) diff --git a/consensus/consensus_v2.go b/consensus/consensus_v2.go index 0e1c407057..f4b8c56f09 100644 --- a/consensus/consensus_v2.go +++ b/consensus/consensus_v2.go @@ -91,7 +91,7 @@ func (consensus *Consensus) HandleMessageUpdate(ctx context.Context, peer libp2p case t == msg_pb.MessageType_VIEWCHANGE: fbftMsg, err = ParseViewChangeMessage(msg) case t == msg_pb.MessageType_NEWVIEW: - members := consensus.Decider.Participants() + members := consensus.decider.Participants() fbftMsg, err = ParseNewViewMessage(msg, members) default: fbftMsg, err = consensus.parseFBFTMessage(msg) @@ -138,7 +138,7 @@ func (consensus *Consensus) HandleMessageUpdate(ctx context.Context, peer libp2p } func (consensus *Consensus) finalCommit() { - numCommits := consensus.Decider.SignersCount(quorum.Commit) + numCommits := consensus.decider.SignersCount(quorum.Commit) consensus.getLogger().Info(). Int64("NumCommits", numCommits). @@ -441,7 +441,7 @@ func (consensus *Consensus) BlockChannel(newBlock *types.Block) { Int("numTxs", len(newBlock.Transactions())). Int("numStakingTxs", len(newBlock.StakingTransactions())). Time("startTime", startTime). - Int64("publicKeys", consensus.Decider.ParticipantsCount()). + Int64("publicKeys", consensus.decider.ParticipantsCount()). Msg("[ConsensusMainLoop] STARTING CONSENSUS") consensus.announce(newBlock) }) @@ -741,16 +741,16 @@ func (consensus *Consensus) rotateLeader(epoch *big.Int) *bls.PublicKeyWrapper { for i := 0; i < len(committee.Slots); i++ { if bc.Config().IsLeaderRotationExternalValidatorsAllowed(epoch) { - wasFound, next = consensus.Decider.NthNextValidator(committee.Slots, leader, offset) + wasFound, next = consensus.decider.NthNextValidator(committee.Slots, leader, offset) } else { - wasFound, next = consensus.Decider.NthNextHmy(shard.Schedule.InstanceForEpoch(epoch), leader, offset) + wasFound, next = consensus.decider.NthNextHmy(shard.Schedule.InstanceForEpoch(epoch), leader, offset) } if !wasFound { utils.Logger().Error().Msg("Failed to get next leader") // Seems like nothing we can do here. return nil } - members := consensus.Decider.Participants() + members := consensus.decider.Participants() mask := bls.NewMask(members) skipped := 0 for i := 0; i < blocksCountAliveness; i++ { diff --git a/consensus/construct.go b/consensus/construct.go index 10488816c7..48291a3644 100644 --- a/consensus/construct.go +++ b/consensus/construct.go @@ -82,7 +82,7 @@ func (consensus *Consensus) construct( ) } else { // TODO: use a persistent bitmap to report bitmap - mask := bls.NewMask(consensus.Decider.Participants()) + mask := bls.NewMask(consensus.decider.Participants()) for _, key := range priKeys { mask.SetKey(key.Pub.Bytes, true) } @@ -161,7 +161,7 @@ func (consensus *Consensus) construct( func (consensus *Consensus) constructQuorumSigAndBitmap(p quorum.Phase) []byte { buffer := bytes.Buffer{} // 96 bytes aggregated signature - aggSig := consensus.Decider.AggregateVotes(p) + aggSig := consensus.decider.AggregateVotes(p) buffer.Write(aggSig.Serialize()) // Bitmap if p == quorum.Prepare { diff --git a/consensus/construct_test.go b/consensus/construct_test.go index 7188ebea68..c836e78224 100644 --- a/consensus/construct_test.go +++ b/consensus/construct_test.go @@ -81,7 +81,7 @@ func TestConstructPreparedMessage(test *testing.T) { validatorKey := bls.SerializedPublicKey{} validatorKey.FromLibBLSPublicKey(validatorPubKey) validatorKeyWrapper := bls.PublicKeyWrapper{Object: validatorPubKey, Bytes: validatorKey} - consensus.Decider.AddNewVote( + consensus.Decider().AddNewVote( quorum.Prepare, []*bls.PublicKeyWrapper{&leaderKeyWrapper}, leaderPriKey.Sign(message), @@ -89,7 +89,7 @@ func TestConstructPreparedMessage(test *testing.T) { consensus.BlockNum(), consensus.GetCurBlockViewID(), ) - if _, err := consensus.Decider.AddNewVote( + if _, err := consensus.Decider().AddNewVote( quorum.Prepare, []*bls.PublicKeyWrapper{&validatorKeyWrapper}, validatorPriKey.Sign(message), diff --git a/consensus/double_sign.go b/consensus/double_sign.go index 3a8d559fd6..144c67bff7 100644 --- a/consensus/double_sign.go +++ b/consensus/double_sign.go @@ -17,7 +17,7 @@ func (consensus *Consensus) checkDoubleSign(recvMsg *FBFTMessage) bool { if consensus.couldThisBeADoubleSigner(recvMsg) { addrSet := map[common.Address]struct{}{} for _, pubKey2 := range recvMsg.SenderPubkeys { - if alreadyCastBallot := consensus.Decider.ReadBallot( + if alreadyCastBallot := consensus.decider.ReadBallot( quorum.Commit, pubKey2.Bytes, ); alreadyCastBallot != nil { for _, pubKey1 := range alreadyCastBallot.SignerPubKeys { diff --git a/consensus/leader.go b/consensus/leader.go index 0bd934cb7f..747be1eb70 100644 --- a/consensus/leader.go +++ b/consensus/leader.go @@ -62,7 +62,7 @@ func (consensus *Consensus) announce(block *types.Block) { continue } - if _, err := consensus.Decider.AddNewVote( + if _, err := consensus.decider.AddNewVote( quorum.Prepare, []*bls.PublicKeyWrapper{key.Pub}, key.Pri.SignHash(consensus.blockHash[:]), @@ -112,7 +112,7 @@ func (consensus *Consensus) onPrepare(recvMsg *FBFTMessage) { prepareBitmap := consensus.prepareBitmap // proceed only when the message is not received before for _, signer := range recvMsg.SenderPubkeys { - signed := consensus.Decider.ReadBallot(quorum.Prepare, signer.Bytes) + signed := consensus.decider.ReadBallot(quorum.Prepare, signer.Bytes) if signed != nil { consensus.getLogger().Debug(). Str("validatorPubKey", signer.Bytes.Hex()). @@ -121,14 +121,14 @@ func (consensus *Consensus) onPrepare(recvMsg *FBFTMessage) { } } - if consensus.Decider.IsQuorumAchieved(quorum.Prepare) { + if consensus.decider.IsQuorumAchieved(quorum.Prepare) { // already have enough signatures consensus.getLogger().Debug(). Interface("validatorPubKeys", recvMsg.SenderPubkeys). Msg("[OnPrepare] Received Additional Prepare Message") return } - signerCount := consensus.Decider.SignersCount(quorum.Prepare) + signerCount := consensus.decider.SignersCount(quorum.Prepare) //// Read - End // Check BLS signature for the multi-sig @@ -161,11 +161,11 @@ func (consensus *Consensus) onPrepare(recvMsg *FBFTMessage) { consensus.getLogger().Debug(). Int64("NumReceivedSoFar", signerCount). - Int64("PublicKeys", consensus.Decider.ParticipantsCount()). + Int64("PublicKeys", consensus.decider.ParticipantsCount()). Msg("[OnPrepare] Received New Prepare Signature") //// Write - Start - if _, err := consensus.Decider.AddNewVote( + if _, err := consensus.decider.AddNewVote( quorum.Prepare, recvMsg.SenderPubkeys, &sign, recvMsg.BlockHash, recvMsg.BlockNum, recvMsg.ViewID, @@ -181,7 +181,7 @@ func (consensus *Consensus) onPrepare(recvMsg *FBFTMessage) { //// Write - End //// Read - Start - if consensus.Decider.IsQuorumAchieved(quorum.Prepare) { + if consensus.decider.IsQuorumAchieved(quorum.Prepare) { // NOTE Let it handle its own logs if err := consensus.didReachPrepareQuorum(); err != nil { return @@ -199,7 +199,7 @@ func (consensus *Consensus) onCommit(recvMsg *FBFTMessage) { } // proceed only when the message is not received before for _, signer := range recvMsg.SenderPubkeys { - signed := consensus.Decider.ReadBallot(quorum.Commit, signer.Bytes) + signed := consensus.decider.ReadBallot(quorum.Commit, signer.Bytes) if signed != nil { consensus.getLogger().Debug(). Str("validatorPubKey", signer.Bytes.Hex()). @@ -211,9 +211,9 @@ func (consensus *Consensus) onCommit(recvMsg *FBFTMessage) { commitBitmap := consensus.commitBitmap // has to be called before verifying signature - quorumWasMet := consensus.Decider.IsQuorumAchieved(quorum.Commit) + quorumWasMet := consensus.decider.IsQuorumAchieved(quorum.Commit) - signerCount := consensus.Decider.SignersCount(quorum.Commit) + signerCount := consensus.decider.SignersCount(quorum.Commit) //// Read - End // Verify the signature on commitPayload is correct @@ -267,7 +267,7 @@ func (consensus *Consensus) onCommit(recvMsg *FBFTMessage) { return } */ - if _, err := consensus.Decider.AddNewVote( + if _, err := consensus.decider.AddNewVote( quorum.Commit, recvMsg.SenderPubkeys, &sign, recvMsg.BlockHash, recvMsg.BlockNum, recvMsg.ViewID, @@ -285,7 +285,7 @@ func (consensus *Consensus) onCommit(recvMsg *FBFTMessage) { //// Read - Start viewID := consensus.getCurBlockViewID() - if consensus.Decider.IsAllSigsCollected() { + if consensus.decider.IsAllSigsCollected() { logger.Info().Msg("[OnCommit] 100% Enough commits received") consensus.finalCommit() @@ -293,7 +293,7 @@ func (consensus *Consensus) onCommit(recvMsg *FBFTMessage) { return } - quorumIsMet := consensus.Decider.IsQuorumAchieved(quorum.Commit) + quorumIsMet := consensus.decider.IsQuorumAchieved(quorum.Commit) //// Read - End if !quorumWasMet && quorumIsMet { diff --git a/consensus/quorum/thread_safe_decider.go b/consensus/quorum/thread_safe_decider.go new file mode 100644 index 0000000000..9999325f67 --- /dev/null +++ b/consensus/quorum/thread_safe_decider.go @@ -0,0 +1,179 @@ +package quorum + +import ( + "math/big" + "sync" + + "github.com/ethereum/go-ethereum/common" + bls_core "github.com/harmony-one/bls/ffi/go/bls" + "github.com/harmony-one/harmony/consensus/votepower" + "github.com/harmony-one/harmony/crypto/bls" + shardingconfig "github.com/harmony-one/harmony/internal/configs/sharding" + "github.com/harmony-one/harmony/multibls" + "github.com/harmony-one/harmony/numeric" + "github.com/harmony-one/harmony/shard" +) + +var _ Decider = threadSafeDeciderImpl{} + +type threadSafeDeciderImpl struct { + mu *sync.RWMutex + decider Decider +} + +func NewThreadSafeDecider(decider Decider, mu *sync.RWMutex) Decider { + return threadSafeDeciderImpl{ + mu: mu, + decider: decider, + } +} + +func (a threadSafeDeciderImpl) String() string { + a.mu.Lock() + defer a.mu.Unlock() + return a.decider.String() +} + +func (a threadSafeDeciderImpl) Participants() multibls.PublicKeys { + a.mu.Lock() + defer a.mu.Unlock() + return a.decider.Participants() +} + +func (a threadSafeDeciderImpl) IndexOf(key bls.SerializedPublicKey) int { + a.mu.Lock() + defer a.mu.Unlock() + return a.decider.IndexOf(key) +} + +func (a threadSafeDeciderImpl) ParticipantsCount() int64 { + a.mu.Lock() + defer a.mu.Unlock() + return a.decider.ParticipantsCount() +} + +func (a threadSafeDeciderImpl) NthNextValidator(slotList shard.SlotList, pubKey *bls.PublicKeyWrapper, next int) (bool, *bls.PublicKeyWrapper) { + a.mu.Lock() + defer a.mu.Unlock() + return a.decider.NthNextValidator(slotList, pubKey, next) +} + +func (a threadSafeDeciderImpl) NthNextHmy(instance shardingconfig.Instance, pubkey *bls.PublicKeyWrapper, next int) (bool, *bls.PublicKeyWrapper) { + a.mu.Lock() + defer a.mu.Unlock() + return a.decider.NthNextHmy(instance, pubkey, next) +} + +func (a threadSafeDeciderImpl) NthNextHmyExt(instance shardingconfig.Instance, wrapper *bls.PublicKeyWrapper, i int) (bool, *bls.PublicKeyWrapper) { + a.mu.Lock() + defer a.mu.Unlock() + return a.decider.NthNextHmyExt(instance, wrapper, i) +} + +func (a threadSafeDeciderImpl) FirstParticipant(instance shardingconfig.Instance) *bls.PublicKeyWrapper { + a.mu.Lock() + defer a.mu.Unlock() + return a.decider.FirstParticipant(instance) +} + +func (a threadSafeDeciderImpl) UpdateParticipants(pubKeys, allowlist []bls.PublicKeyWrapper) { + a.mu.Lock() + defer a.mu.Unlock() + a.decider.UpdateParticipants(pubKeys, allowlist) +} + +func (a threadSafeDeciderImpl) submitVote(p Phase, pubkeys []bls.SerializedPublicKey, sig *bls_core.Sign, headerHash common.Hash, height, viewID uint64) (*votepower.Ballot, error) { + a.mu.Lock() + defer a.mu.Unlock() + return a.decider.submitVote(p, pubkeys, sig, headerHash, height, viewID) +} + +func (a threadSafeDeciderImpl) SignersCount(phase Phase) int64 { + a.mu.Lock() + defer a.mu.Unlock() + return a.decider.SignersCount(phase) +} + +func (a threadSafeDeciderImpl) reset(phases []Phase) { + a.mu.Lock() + defer a.mu.Unlock() + a.decider.reset(phases) +} + +func (a threadSafeDeciderImpl) ReadBallot(p Phase, pubkey bls.SerializedPublicKey) *votepower.Ballot { + a.mu.Lock() + defer a.mu.Unlock() + return a.decider.ReadBallot(p, pubkey) +} + +func (a threadSafeDeciderImpl) TwoThirdsSignersCount() int64 { + a.mu.Lock() + defer a.mu.Unlock() + return a.decider.TwoThirdsSignersCount() +} + +func (a threadSafeDeciderImpl) AggregateVotes(p Phase) *bls_core.Sign { + a.mu.Lock() + defer a.mu.Unlock() + return a.decider.AggregateVotes(p) +} + +func (a threadSafeDeciderImpl) SetVoters(subCommittee *shard.Committee, epoch *big.Int) (*TallyResult, error) { + a.mu.Lock() + defer a.mu.Unlock() + return a.decider.SetVoters(subCommittee, epoch) +} + +func (a threadSafeDeciderImpl) Policy() Policy { + a.mu.Lock() + defer a.mu.Unlock() + return a.decider.Policy() +} + +func (a threadSafeDeciderImpl) AddNewVote(p Phase, pubkeys []*bls.PublicKeyWrapper, sig *bls_core.Sign, headerHash common.Hash, height, viewID uint64) (*votepower.Ballot, error) { + a.mu.Lock() + defer a.mu.Unlock() + return a.decider.AddNewVote(p, pubkeys, sig, headerHash, height, viewID) +} + +func (a threadSafeDeciderImpl) IsQuorumAchievedByMask(mask *bls.Mask) bool { + a.mu.Lock() + defer a.mu.Unlock() + return a.decider.IsQuorumAchievedByMask(mask) +} + +func (a threadSafeDeciderImpl) QuorumThreshold() numeric.Dec { + a.mu.Lock() + defer a.mu.Unlock() + return a.decider.QuorumThreshold() +} + +func (a threadSafeDeciderImpl) IsAllSigsCollected() bool { + a.mu.Lock() + defer a.mu.Unlock() + return a.decider.IsAllSigsCollected() +} + +func (a threadSafeDeciderImpl) ResetPrepareAndCommitVotes() { + a.mu.Lock() + defer a.mu.Unlock() + a.decider.ResetPrepareAndCommitVotes() +} + +func (a threadSafeDeciderImpl) ResetViewChangeVotes() { + a.mu.Lock() + defer a.mu.Unlock() + a.decider.ResetViewChangeVotes() +} + +func (a threadSafeDeciderImpl) CurrentTotalPower(p Phase) (*numeric.Dec, error) { + a.mu.Lock() + defer a.mu.Unlock() + return a.decider.CurrentTotalPower(p) +} + +func (a threadSafeDeciderImpl) IsQuorumAchieved(p Phase) bool { + a.mu.Lock() + defer a.mu.Unlock() + return a.decider.IsQuorumAchieved(p) +} diff --git a/consensus/threshold.go b/consensus/threshold.go index e611eaedcd..339f6d2a7e 100644 --- a/consensus/threshold.go +++ b/consensus/threshold.go @@ -57,7 +57,7 @@ func (consensus *Consensus) didReachPrepareQuorum() error { continue } - if _, err := consensus.Decider.AddNewVote( + if _, err := consensus.decider.AddNewVote( quorum.Commit, []*bls.PublicKeyWrapper{key.Pub}, key.Pri.SignHash(commitPayload), diff --git a/consensus/validator.go b/consensus/validator.go index 891fe0c035..fa5cdac921 100644 --- a/consensus/validator.go +++ b/consensus/validator.go @@ -199,12 +199,12 @@ func (consensus *Consensus) onPrepared(recvMsg *FBFTMessage) { // check validity of prepared signature blockHash := recvMsg.BlockHash - aggSig, mask, err := consensus.readSignatureBitmapPayload(recvMsg.Payload, 0, consensus.Decider.Participants()) + aggSig, mask, err := consensus.readSignatureBitmapPayload(recvMsg.Payload, 0, consensus.decider.Participants()) if err != nil { consensus.getLogger().Error().Err(err).Msg("ReadSignatureBitmapPayload failed!") return } - if !consensus.Decider.IsQuorumAchievedByMask(mask) { + if !consensus.decider.IsQuorumAchievedByMask(mask) { consensus.getLogger().Warn().Msgf("[OnPrepared] Quorum Not achieved.") return } @@ -335,7 +335,7 @@ func (consensus *Consensus) onCommitted(recvMsg *FBFTMessage) { return } - aggSig, mask, err := chain.DecodeSigBitmap(sigBytes, bitmap, consensus.Decider.Participants()) + aggSig, mask, err := chain.DecodeSigBitmap(sigBytes, bitmap, consensus.decider.Participants()) if err != nil { consensus.getLogger().Error().Err(err).Msg("[OnCommitted] readSignatureBitmapPayload failed") return diff --git a/consensus/view_change.go b/consensus/view_change.go index d03ae5a13a..1171b073e0 100644 --- a/consensus/view_change.go +++ b/consensus/view_change.go @@ -187,7 +187,7 @@ func (consensus *Consensus) getNextLeaderKey(viewID uint64, committee *shard.Com // it can still sync with other validators. if curHeader.IsLastBlockInEpoch() { consensus.getLogger().Info().Msg("[getNextLeaderKey] view change in the first block of new epoch") - lastLeaderPubKey = consensus.Decider.FirstParticipant(shard.Schedule.InstanceForEpoch(epoch)) + lastLeaderPubKey = consensus.decider.FirstParticipant(shard.Schedule.InstanceForEpoch(epoch)) } } } @@ -204,18 +204,18 @@ func (consensus *Consensus) getNextLeaderKey(viewID uint64, committee *shard.Com var next *bls.PublicKeyWrapper if blockchain != nil && blockchain.Config().IsLeaderRotationInternalValidators(epoch) { if blockchain.Config().IsLeaderRotationExternalValidatorsAllowed(epoch) { - wasFound, next = consensus.Decider.NthNextValidator( + wasFound, next = consensus.decider.NthNextValidator( committee.Slots, lastLeaderPubKey, gap) } else { - wasFound, next = consensus.Decider.NthNextHmy( + wasFound, next = consensus.decider.NthNextHmy( shard.Schedule.InstanceForEpoch(epoch), lastLeaderPubKey, gap) } } else { - wasFound, next = consensus.Decider.NthNextHmy( + wasFound, next = consensus.decider.NthNextHmy( shard.Schedule.InstanceForEpoch(epoch), lastLeaderPubKey, gap) @@ -281,7 +281,7 @@ func (consensus *Consensus) startViewChange() { defer consensus.consensusTimeout[timeoutViewChange].Start() // update the dictionary key if the viewID is first time received - members := consensus.Decider.Participants() + members := consensus.decider.Participants() consensus.vc.AddViewIDKeyIfNotExist(nextViewID, members) // init my own payload @@ -386,10 +386,10 @@ func (consensus *Consensus) onViewChange(recvMsg *FBFTMessage) { return } - if consensus.Decider.IsQuorumAchievedByMask(consensus.vc.GetViewIDBitmap(recvMsg.ViewID)) { + if consensus.decider.IsQuorumAchievedByMask(consensus.vc.GetViewIDBitmap(recvMsg.ViewID)) { consensus.getLogger().Info(). - Int64("have", consensus.Decider.SignersCount(quorum.ViewChange)). - Int64("need", consensus.Decider.TwoThirdsSignersCount()). + Int64("have", consensus.decider.SignersCount(quorum.ViewChange)). + Int64("need", consensus.decider.TwoThirdsSignersCount()). Interface("SenderPubkeys", recvMsg.SenderPubkeys). Str("newLeaderKey", newLeaderKey.Bytes.Hex()). Msg("[onViewChange] Received Enough View Change Messages") @@ -404,7 +404,7 @@ func (consensus *Consensus) onViewChange(recvMsg *FBFTMessage) { senderKey := recvMsg.SenderPubkeys[0] // update the dictionary key if the viewID is first time received - members := consensus.Decider.Participants() + members := consensus.decider.Participants() consensus.vc.AddViewIDKeyIfNotExist(recvMsg.ViewID, members) // do it once only per viewID/Leader @@ -420,7 +420,7 @@ func (consensus *Consensus) onViewChange(recvMsg *FBFTMessage) { return } - err = consensus.vc.ProcessViewChangeMsg(consensus.fBFTLog, consensus.Decider, recvMsg, consensus.verifyBlock) + err = consensus.vc.ProcessViewChangeMsg(consensus.fBFTLog, consensus.decider, recvMsg, consensus.verifyBlock) if err != nil { consensus.getLogger().Error().Err(err). Uint64("viewID", recvMsg.ViewID). @@ -431,7 +431,7 @@ func (consensus *Consensus) onViewChange(recvMsg *FBFTMessage) { } // received enough view change messages, change state to normal consensus - if consensus.Decider.IsQuorumAchievedByMask(consensus.vc.GetViewIDBitmap(recvMsg.ViewID)) && consensus.isViewChangingMode() { + if consensus.decider.IsQuorumAchievedByMask(consensus.vc.GetViewIDBitmap(recvMsg.ViewID)) && consensus.isViewChangingMode() { // no previous prepared message, go straight to normal mode // and start proposing new block if consensus.vc.IsM1PayloadEmpty() { @@ -495,7 +495,7 @@ func (consensus *Consensus) onNewView(recvMsg *FBFTMessage) { } m3Mask := recvMsg.M3Bitmap - if !consensus.Decider.IsQuorumAchievedByMask(m3Mask) { + if !consensus.decider.IsQuorumAchievedByMask(m3Mask) { consensus.getLogger().Warn(). Msgf("[onNewView] Quorum Not achieved") return @@ -507,7 +507,7 @@ func (consensus *Consensus) onNewView(recvMsg *FBFTMessage) { utils.CountOneBits(m3Mask.Bitmap) > utils.CountOneBits(m2Mask.Bitmap)) { // m1 is not empty, check it's valid blockHash := recvMsg.Payload[:32] - aggSig, mask, err := consensus.readSignatureBitmapPayload(recvMsg.Payload, 32, consensus.Decider.Participants()) + aggSig, mask, err := consensus.readSignatureBitmapPayload(recvMsg.Payload, 32, consensus.decider.Participants()) if err != nil { consensus.getLogger().Error().Err(err). Msg("[onNewView] ReadSignatureBitmapPayload Failed") @@ -584,5 +584,5 @@ func (consensus *Consensus) resetViewChangeState() { Msg("[ResetViewChangeState] Resetting view change state") consensus.current.SetMode(Normal) consensus.vc.Reset() - consensus.Decider.ResetViewChangeVotes() + consensus.decider.ResetViewChangeVotes() } diff --git a/consensus/view_change_test.go b/consensus/view_change_test.go index 96d8fbc865..bbc6999445 100644 --- a/consensus/view_change_test.go +++ b/consensus/view_change_test.go @@ -94,7 +94,7 @@ func TestGetNextLeaderKeyShouldSucceed(t *testing.T) { _, _, consensus, _, err := GenerateConsensusForTesting() assert.NoError(t, err) - assert.Equal(t, int64(0), consensus.Decider.ParticipantsCount()) + assert.Equal(t, int64(0), consensus.Decider().ParticipantsCount()) blsKeys := []*bls_core.PublicKey{} wrappedBLSKeys := []bls.PublicKeyWrapper{} @@ -111,8 +111,8 @@ func TestGetNextLeaderKeyShouldSucceed(t *testing.T) { wrappedBLSKeys = append(wrappedBLSKeys, wrapped) } - consensus.Decider.UpdateParticipants(wrappedBLSKeys, []bls.PublicKeyWrapper{}) - assert.Equal(t, keyCount, consensus.Decider.ParticipantsCount()) + consensus.Decider().UpdateParticipants(wrappedBLSKeys, []bls.PublicKeyWrapper{}) + assert.Equal(t, keyCount, consensus.Decider().ParticipantsCount()) consensus.LeaderPubKey = &wrappedBLSKeys[0] nextKey := consensus.getNextLeaderKey(uint64(1), nil) diff --git a/node/api.go b/node/api.go index ef76079f1e..e3862f510c 100644 --- a/node/api.go +++ b/node/api.go @@ -177,7 +177,7 @@ func (node *Node) GetConfig() rpc_common.Config { // GetLastSigningPower get last signed power func (node *Node) GetLastSigningPower() (float64, error) { - power, err := node.Consensus.Decider.CurrentTotalPower(quorum.Commit) + power, err := node.Consensus.Decider().CurrentTotalPower(quorum.Commit) if err != nil { return 0, err } diff --git a/node/node.go b/node/node.go index df766e52ee..d815f86ece 100644 --- a/node/node.go +++ b/node/node.go @@ -655,7 +655,7 @@ func validateShardBoundMessage(consensus *consensus.Consensus, peer libp2p_peer. return nil, nil, true, errors.WithStack(shard.ErrValidNotInCommittee) } } else { - count := consensus.Decider.ParticipantsCount() + count := consensus.Decider().ParticipantsCount() if (count+7)>>3 != int64(len(senderBitmap)) { nodeConsensusMessageCounterVec.With(prometheus.Labels{"type": "invalid_participant_count"}).Inc() return nil, nil, true, errors.WithStack(errWrongSizeOfBitmap) diff --git a/node/node_explorer.go b/node/node_explorer.go index ce1b0a2445..1e4a4010a0 100644 --- a/node/node_explorer.go +++ b/node/node_explorer.go @@ -53,7 +53,7 @@ func (node *Node) explorerMessageHandler(ctx context.Context, msg *msg_pb.Messag return err } - if !node.Consensus.Decider.IsQuorumAchievedByMask(mask) { + if !node.Consensus.Decider().IsQuorumAchievedByMask(mask) { utils.Logger().Error().Msg("[Explorer] not have enough signature power") return nil } From cdbc79e01f2f95807764570b78f20194ea18aa05 Mon Sep 17 00:00:00 2001 From: Konstantin <355847+Frozen@users.noreply.github.com> Date: Wed, 17 Jan 2024 02:23:58 -0400 Subject: [PATCH 111/128] Devnet: activate leader rotation at epoch 4 . (#4611) * LeaderRotationInternal rotation epoch 4. * LeaderRotationInternal rotation epoch 4. --- internal/params/config.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/params/config.go b/internal/params/config.go index 67e9ff453d..ade35adbde 100644 --- a/internal/params/config.go +++ b/internal/params/config.go @@ -205,8 +205,8 @@ var ( SlotsLimitedEpoch: EpochTBD, // epoch to enable HIP-16 CrossShardXferPrecompileEpoch: big.NewInt(5), AllowlistEpoch: EpochTBD, - LeaderRotationInternalValidatorsEpoch: big.NewInt(2379), - LeaderRotationExternalValidatorsEpoch: big.NewInt(3173), + LeaderRotationInternalValidatorsEpoch: big.NewInt(4), + LeaderRotationExternalValidatorsEpoch: big.NewInt(4), FeeCollectEpoch: big.NewInt(5), ValidatorCodeFixEpoch: big.NewInt(5), HIP30Epoch: big.NewInt(7), From e15bae129b23c68dcef58385ed787ba882292abf Mon Sep 17 00:00:00 2001 From: Konstantin <355847+Frozen@users.noreply.github.com> Date: Wed, 17 Jan 2024 21:50:27 -0400 Subject: [PATCH 112/128] Fix for panic "insertChain failed to update current block" (#4612) --- core/blockchain_impl.go | 29 ++++++++++------------------- 1 file changed, 10 insertions(+), 19 deletions(-) diff --git a/core/blockchain_impl.go b/core/blockchain_impl.go index c7f01d4137..fff35bbdd5 100644 --- a/core/blockchain_impl.go +++ b/core/blockchain_impl.go @@ -31,6 +31,8 @@ import ( "sync/atomic" "time" + "github.com/pkg/errors" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/mclock" "github.com/ethereum/go-ethereum/common/prque" @@ -66,7 +68,6 @@ import ( "github.com/harmony-one/harmony/staking/slash" staking "github.com/harmony-one/harmony/staking/types" lru "github.com/hashicorp/golang-lru" - "github.com/pkg/errors" ) var ( @@ -1730,21 +1731,16 @@ func (bc *BlockChainImpl) insertChain(chain types.Blocks, verifyHeaders bool) (i err = NewBlockValidator(bc).ValidateBody(block) } switch { - case err == ErrKnownBlock: - // Block and state both already known. However if the current block is below - // this number we did a rollback and we should reimport it nonetheless. - if bc.CurrentBlock().NumberU64() >= block.NumberU64() { - stats.ignored++ - continue - } + case errors.Is(err, ErrKnownBlock): + return i, events, coalescedLogs, err case err == consensus_engine.ErrFutureBlock: return i, events, coalescedLogs, err - case err == consensus_engine.ErrUnknownAncestor: + case errors.Is(err, consensus_engine.ErrUnknownAncestor): return i, events, coalescedLogs, err - case err == consensus_engine.ErrPrunedAncestor: + case errors.Is(err, consensus_engine.ErrPrunedAncestor): // TODO: add fork choice mechanism // Block competing with the canonical chain, store in the db, but don't process // until the competitor TD goes above the canonical TD @@ -1771,9 +1767,7 @@ func (bc *BlockChainImpl) insertChain(chain types.Blocks, verifyHeaders bool) (i // Prune in case non-empty winner chain if len(winner) > 0 { // Import all the pruned blocks to make the state available - bc.chainmu.Unlock() _, evs, logs, err := bc.insertChain(winner, true /* verifyHeaders */) - bc.chainmu.Lock() events, coalescedLogs = evs, logs if err != nil { @@ -1908,10 +1902,10 @@ func (bc *BlockChainImpl) insertChain(chain types.Blocks, verifyHeaders bool) (i // insertStats tracks and reports on block insertion. type insertStats struct { - queued, processed, ignored int - usedGas uint64 - lastIndex int - startTime mclock.AbsTime + queued, processed int + usedGas uint64 + lastIndex int + startTime mclock.AbsTime } // statsReportLimit is the time limit during import and export after which we @@ -1950,9 +1944,6 @@ func (st *insertStats) report(chain []*types.Block, index int, cache common.Stor if st.queued > 0 { context = context.Int("queued", st.queued) } - if st.ignored > 0 { - context = context.Int("ignored", st.ignored) - } logger := context.Logger() logger.Info().Msg("Imported new chain segment") From 17a25223916e33de1b0c294ac2f3c5ae5afffe4c Mon Sep 17 00:00:00 2001 From: Konstantin <355847+Frozen@users.noreply.github.com> Date: Wed, 17 Jan 2024 22:08:26 -0400 Subject: [PATCH 113/128] Recreate trie after revert. (#4608) --- api/service/legacysync/syncing.go | 1 + core/block_validator.go | 2 +- core/blockchain_impl.go | 24 ++++++++++++++++++++++++ 3 files changed, 26 insertions(+), 1 deletion(-) diff --git a/api/service/legacysync/syncing.go b/api/service/legacysync/syncing.go index 830eb1d734..a85a5e9d51 100644 --- a/api/service/legacysync/syncing.go +++ b/api/service/legacysync/syncing.go @@ -912,6 +912,7 @@ func (ss *StateSync) UpdateBlockAndStatus(block *types.Block, bc core.BlockChain Uint64("blockEpoch", block.Epoch().Uint64()). Str("blockHex", block.Hash().Hex()). Uint32("ShardID", block.ShardID()). + Err(err). Msg("[SYNC] UpdateBlockAndStatus: Block exists") return nil case err != nil: diff --git a/core/block_validator.go b/core/block_validator.go index 4e097b94d0..7006068321 100644 --- a/core/block_validator.go +++ b/core/block_validator.go @@ -56,7 +56,7 @@ func NewBlockValidator(blockchain BlockChain) *BlockValidator { func (v *BlockValidator) ValidateBody(block *types.Block) error { // Check whether the block's known, and if not, that it's linkable if v.bc.HasBlockAndState(block.Hash(), block.NumberU64()) { - return ErrKnownBlock + return errors.WithMessage(ErrKnownBlock, "validate body: has block and state") } if !v.bc.HasBlockAndState(block.ParentHash(), block.NumberU64()-1) { if !v.bc.HasBlock(block.ParentHash(), block.NumberU64()-1) { diff --git a/core/blockchain_impl.go b/core/blockchain_impl.go index fff35bbdd5..e1302e8468 100644 --- a/core/blockchain_impl.go +++ b/core/blockchain_impl.go @@ -782,6 +782,20 @@ func (bc *BlockChainImpl) resetWithGenesisBlock(genesis *types.Block) error { return nil } +func (bc *BlockChainImpl) repairRecreateStateTries(head **types.Block) error { + for { + blk := bc.GetBlockByNumber((*head).NumberU64() + 1) + if blk != nil { + _, _, _, err := bc.insertChain([]*types.Block{blk}, true) + if err != nil { + return err + } + *head = blk + continue + } + } +} + // repair tries to repair the current blockchain by rolling back the current block // until one with associated state is found. This is needed to fix incomplete db // writes caused either by crashes/power outages, or simply non-committed tries. @@ -789,6 +803,16 @@ func (bc *BlockChainImpl) resetWithGenesisBlock(genesis *types.Block) error { // This method only rolls back the current block. The current header and current // fast block are left intact. func (bc *BlockChainImpl) repair(head **types.Block) error { + if err := bc.repairValidatorsAndCommitSigs(head); err != nil { + return errors.WithMessage(err, "failed to repair validators and commit sigs") + } + if err := bc.repairRecreateStateTries(head); err != nil { + return errors.WithMessage(err, "failed to recreate state tries") + } + return nil +} + +func (bc *BlockChainImpl) repairValidatorsAndCommitSigs(head **types.Block) error { valsToRemove := map[common.Address]struct{}{} for { // Abort if we've rewound to a head block that does have associated state From 51a1ffe350a09f28c7186433e383f19c94e8da91 Mon Sep 17 00:00:00 2001 From: Diego Nava <8563843+diego1q2w@users.noreply.github.com> Date: Tue, 23 Jan 2024 14:10:06 +0100 Subject: [PATCH 114/128] prepare devnet reset (#4615) --- internal/configs/sharding/partner.go | 6 +++--- internal/params/config.go | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/internal/configs/sharding/partner.go b/internal/configs/sharding/partner.go index 5834b5ae31..363f77d8bf 100644 --- a/internal/configs/sharding/partner.go +++ b/internal/configs/sharding/partner.go @@ -106,7 +106,7 @@ var partnerV1 = MustNewInstance( partnerReshardingEpoch, PartnerSchedule.BlocksPerEpoch(), ) var partnerV2 = MustNewInstance( - 2, 5, 4, 0, + 2, 20, 4, 0, numeric.MustNewDecFromStr("0.9"), genesis.TNHarmonyAccounts, genesis.TNFoundationalAccounts, emptyAllowlist, feeCollectorsDevnet[1], numeric.MustNewDecFromStr("0.25"), @@ -114,8 +114,8 @@ var partnerV2 = MustNewInstance( PartnerSchedule.BlocksPerEpoch(), ) var partnerV3 = MustNewInstance( - 2, 5, 1, 0, - numeric.MustNewDecFromStr("0.1"), genesis.TNHarmonyAccounts, + 2, 20, 1, 0, + numeric.MustNewDecFromStr("0.0"), genesis.TNHarmonyAccounts, genesis.TNFoundationalAccounts, emptyAllowlist, feeCollectorsDevnet[1], numeric.MustNewDecFromStr("0.25"), hip30CollectionAddressTestnet, partnerReshardingEpoch, diff --git a/internal/params/config.go b/internal/params/config.go index ade35adbde..e24d753143 100644 --- a/internal/params/config.go +++ b/internal/params/config.go @@ -205,8 +205,8 @@ var ( SlotsLimitedEpoch: EpochTBD, // epoch to enable HIP-16 CrossShardXferPrecompileEpoch: big.NewInt(5), AllowlistEpoch: EpochTBD, - LeaderRotationInternalValidatorsEpoch: big.NewInt(4), - LeaderRotationExternalValidatorsEpoch: big.NewInt(4), + LeaderRotationInternalValidatorsEpoch: big.NewInt(12), + LeaderRotationExternalValidatorsEpoch: big.NewInt(12), FeeCollectEpoch: big.NewInt(5), ValidatorCodeFixEpoch: big.NewInt(5), HIP30Epoch: big.NewInt(7), From 2441a0b5443102df944ca8a88256c456b392e54f Mon Sep 17 00:00:00 2001 From: Gheis Mohammadi Date: Thu, 25 Jan 2024 16:04:45 +0800 Subject: [PATCH 115/128] Fix fast sync null snapshot and null response issue (#4613) * fix null snapshot issue in chain helper's state sync new functions * add WriteHeaderNumber to writeHeadBlock * improve CurrentBlockNumberto handle unsaved new pivot * fix null response data for full state sync * add log for zero task in state worker loop --- .../stagedstreamsync/stage_statesync_full.go | 20 +++++++++++-------- api/service/stagedstreamsync/syncing.go | 4 ++++ core/blockchain_impl.go | 3 +++ p2p/stream/protocols/sync/chain.go | 18 ++++++++++++++--- 4 files changed, 34 insertions(+), 11 deletions(-) diff --git a/api/service/stagedstreamsync/stage_statesync_full.go b/api/service/stagedstreamsync/stage_statesync_full.go index c1579114b2..f5bd213af5 100644 --- a/api/service/stagedstreamsync/stage_statesync_full.go +++ b/api/service/stagedstreamsync/stage_statesync_full.go @@ -190,7 +190,11 @@ func (sss *StageFullStateSync) runStateWorkerLoop(ctx context.Context, sdm *Full default: } accountTasks, codes, storages, healtask, codetask, nTasks, err := sdm.GetNextBatch() - if nTasks == 0 || err != nil { + if nTasks == 0 { + utils.Logger().Debug().Msg("the state worker loop received no more tasks") + return + } + if err != nil { select { case <-ctx.Done(): return @@ -199,7 +203,7 @@ func (sss *StageFullStateSync) runStateWorkerLoop(ctx context.Context, sdm *Full } } - if len(accountTasks) > 0 { + if accountTasks != nil && len(accountTasks) > 0 { task := accountTasks[0] origin := task.Next @@ -222,8 +226,8 @@ func (sss *StageFullStateSync) runStateWorkerLoop(ctx context.Context, sdm *Full utils.Logger().Warn(). Str("stream", string(stid)). Msg(WrapStagedSyncMsg("GetAccountRange failed, received empty accounts")) - err := errors.New("GetAccountRange received empty slots") - sdm.HandleRequestError(accountTasks, codes, storages, healtask, codetask, stid, err) + //err := errors.New("GetAccountRange received empty slots") + //sdm.HandleRequestError(accountTasks, codes, storages, healtask, codetask, stid, err) return } if err := sdm.HandleAccountRequestResult(task, retAccounts, proof, origin[:], limit[:], loopID, stid); err != nil { @@ -236,7 +240,7 @@ func (sss *StageFullStateSync) runStateWorkerLoop(ctx context.Context, sdm *Full return } - } else if len(codes) > 0 { + } else if codes != nil && len(codes) > 0 { stid, err := sss.downloadByteCodes(ctx, sdm, codes, loopID) if err != nil { @@ -252,7 +256,7 @@ func (sss *StageFullStateSync) runStateWorkerLoop(ctx context.Context, sdm *Full return } - } else if len(storages.accounts) > 0 { + } else if storages != nil && len(storages.accounts) > 0 { root := storages.root roots := storages.roots @@ -295,7 +299,7 @@ func (sss *StageFullStateSync) runStateWorkerLoop(ctx context.Context, sdm *Full } else { // assign trie node Heal Tasks - if len(healtask.hashes) > 0 { + if healtask != nil && len(healtask.hashes) > 0 { root := healtask.root task := healtask.task hashes := healtask.hashes @@ -334,7 +338,7 @@ func (sss *StageFullStateSync) runStateWorkerLoop(ctx context.Context, sdm *Full } } - if len(codetask.hashes) > 0 { + if codetask != nil && len(codetask.hashes) > 0 { task := codetask.task hashes := codetask.hashes bytes := codetask.bytes diff --git a/api/service/stagedstreamsync/syncing.go b/api/service/stagedstreamsync/syncing.go index c3bc585f21..0db0dd4e2d 100644 --- a/api/service/stagedstreamsync/syncing.go +++ b/api/service/stagedstreamsync/syncing.go @@ -465,6 +465,10 @@ func (s *StagedStreamSync) CurrentBlockNumber() uint64 { return s.bc.CurrentBlock().NumberU64() } + if s.status.pivotBlock != nil && s.bc.CurrentFastBlock().NumberU64() >= s.status.pivotBlock.NumberU64() { + return s.bc.CurrentFastBlock().NumberU64() + } + current := uint64(0) switch s.config.SyncMode { case FullSync: diff --git a/core/blockchain_impl.go b/core/blockchain_impl.go index e1302e8468..acf050e916 100644 --- a/core/blockchain_impl.go +++ b/core/blockchain_impl.go @@ -924,6 +924,9 @@ func (bc *BlockChainImpl) writeHeadBlock(block *types.Block) error { if err := rawdb.WriteHeadHeaderHash(batch, block.Hash()); err != nil { return err } + if err := rawdb.WriteHeaderNumber(batch, block.Hash(), block.NumberU64()); err != nil { + return err + } isNewEpoch := block.IsLastBlockInEpoch() if isNewEpoch { diff --git a/p2p/stream/protocols/sync/chain.go b/p2p/stream/protocols/sync/chain.go index 451952bcce..009c7b0afc 100644 --- a/p2p/stream/protocols/sync/chain.go +++ b/p2p/stream/protocols/sync/chain.go @@ -209,7 +209,11 @@ func (ch *chainHelperImpl) getAccountRange(root common.Hash, origin common.Hash, if err != nil { return nil, nil, err } - it, err := ch.chain.Snapshots().AccountIterator(root, origin) + snapshots := ch.chain.Snapshots() + if snapshots == nil { + return nil, nil, errors.Errorf("failed to retrieve snapshots") + } + it, err := snapshots.AccountIterator(root, origin) if err != nil { return nil, nil, err } @@ -275,6 +279,10 @@ func (ch *chainHelperImpl) getStorageRanges(root common.Hash, accounts []common. proofs [][]byte size uint64 ) + snapshots := ch.chain.Snapshots() + if snapshots == nil { + return nil, nil, errors.Errorf("failed to retrieve snapshots") + } for _, account := range accounts { // If we've exceeded the requested data limit, abort without opening // a new storage range (that we'd need to prove due to exceeded size) @@ -284,7 +292,7 @@ func (ch *chainHelperImpl) getStorageRanges(root common.Hash, accounts []common. // The first account might start from a different origin and end sooner // origin==nil or limit ==nil // Retrieve the requested state and bail out if non existent - it, err := ch.chain.Snapshots().StorageIterator(root, account, origin) + it, err := snapshots.StorageIterator(root, account, origin) if err != nil { return nil, nil, err } @@ -409,7 +417,11 @@ func (ch *chainHelperImpl) getTrieNodes(root common.Hash, paths []*message.TrieN return nil, nil } // The 'snap' might be nil, in which case we cannot serve storage slots. - snap := ch.chain.Snapshots().Snapshot(root) + snapshots := ch.chain.Snapshots() + if snapshots == nil { + return nil, errors.Errorf("failed to retrieve snapshots") + } + snap := snapshots.Snapshot(root) // Retrieve trie nodes until the packet size limit is reached var ( nodes [][]byte From 8d24b5beefce9f6965c47208d619d8c29e0ea411 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CGheisMohammadi=E2=80=9D?= <36589218+GheisMohammadi@users.noreply.github.com> Date: Thu, 25 Jan 2024 21:34:56 +0800 Subject: [PATCH 116/128] fix wrong root hash for generating snapshot --- core/blockchain_impl.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/core/blockchain_impl.go b/core/blockchain_impl.go index acf050e916..b14bcbe28a 100644 --- a/core/blockchain_impl.go +++ b/core/blockchain_impl.go @@ -354,7 +354,8 @@ func newBlockChainWithOptions( NoBuild: bc.cacheConfig.SnapshotNoBuild, AsyncBuild: !bc.cacheConfig.SnapshotWait, } - bc.snaps, _ = snapshot.New(snapconfig, bc.db, bc.triedb, head.Hash()) + fmt.Println("loading/generating snapshot...") + bc.snaps, _ = snapshot.New(snapconfig, bc.db, bc.triedb, head.Root()) } curHeader := bc.CurrentBlock().Header() From d1ffd5cc750cf44bb9294f5ccb71c0f7def67d60 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CGheisMohammadi=E2=80=9D?= <36589218+GheisMohammadi@users.noreply.github.com> Date: Thu, 25 Jan 2024 21:35:52 +0800 Subject: [PATCH 117/128] add cache configurations --- cmd/harmony/config.go | 29 ++++++ cmd/harmony/config_migrations.go | 13 ++- cmd/harmony/default.go | 39 ++++++-- cmd/harmony/flags.go | 91 ++++++++++++++--- cmd/harmony/flags_test.go | 150 ++++++++++++++++++---------- cmd/harmony/main.go | 1 + internal/configs/harmony/harmony.go | 13 ++- internal/shardchain/shardchains.go | 22 ++-- 8 files changed, 270 insertions(+), 88 deletions(-) diff --git a/cmd/harmony/config.go b/cmd/harmony/config.go index 5a41f22da8..037221835c 100644 --- a/cmd/harmony/config.go +++ b/cmd/harmony/config.go @@ -145,6 +145,35 @@ func getDefaultSyncConfig(nt nodeconfig.NetworkType) harmonyconfig.SyncConfig { } } +func getDefaultCacheConfig(nt nodeconfig.NetworkType) harmonyconfig.CacheConfig { + cacheConfig := harmonyconfig.CacheConfig{ + Disabled: defaultCacheConfig.Disabled, + TrieNodeLimit: defaultCacheConfig.TrieNodeLimit, + TriesInMemory: defaultCacheConfig.TriesInMemory, + TrieTimeLimit: defaultCacheConfig.TrieTimeLimit, + SnapshotLimit: defaultCacheConfig.SnapshotLimit, + SnapshotWait: defaultCacheConfig.SnapshotWait, + Preimages: defaultCacheConfig.Preimages, + SnapshotNoBuild: defaultCacheConfig.SnapshotNoBuild, + } + + switch nt { + case nodeconfig.Mainnet: + cacheConfig.Disabled = true + cacheConfig.Preimages = true + case nodeconfig.Testnet: + cacheConfig.Disabled = false + cacheConfig.Preimages = true + case nodeconfig.Localnet: + cacheConfig.Disabled = false + cacheConfig.Preimages = false + default: + cacheConfig.Disabled = false + cacheConfig.Preimages = true + } + return cacheConfig +} + var configCmd = &cobra.Command{ Use: "config", Short: "dump or update config", diff --git a/cmd/harmony/config_migrations.go b/cmd/harmony/config_migrations.go index 8f222b3d62..0db87d0748 100644 --- a/cmd/harmony/config_migrations.go +++ b/cmd/harmony/config_migrations.go @@ -334,7 +334,7 @@ func init() { migrations["2.5.11"] = func(confTree *toml.Tree) *toml.Tree { if confTree.Get("General.TriesInMemory") == nil { - confTree.Set("General.TriesInMemory", defaultConfig.General.TriesInMemory) + confTree.Set("General.TriesInMemory", defaultConfig.Cache.TriesInMemory) } confTree.Set("Version", "2.5.12") return confTree @@ -405,6 +405,17 @@ func init() { return confTree } + migrations["2.6.0"] = func(confTree *toml.Tree) *toml.Tree { + confTree.Delete("General.TriesInMemory") + + if confTree.Get("Cache") == nil { + confTree.Set("Cache", defaultConfig.Cache) + } + // upgrade minor version because of `Cache` section introduction + confTree.Set("Version", "2.6.1") + return confTree + } + // check that the latest version here is the same as in default.go largestKey := getNextVersion(migrations) if largestKey != tomlConfigVersion { diff --git a/cmd/harmony/default.go b/cmd/harmony/default.go index 86ed4226a5..09c3988054 100644 --- a/cmd/harmony/default.go +++ b/cmd/harmony/default.go @@ -1,13 +1,15 @@ package main import ( + "time" + "github.com/harmony-one/harmony/core" "github.com/harmony-one/harmony/hmy" harmonyconfig "github.com/harmony-one/harmony/internal/configs/harmony" nodeconfig "github.com/harmony-one/harmony/internal/configs/node" ) -const tomlConfigVersion = "2.6.0" +const tomlConfigVersion = "2.6.1" const ( defNetworkType = nodeconfig.Mainnet @@ -24,7 +26,6 @@ var defaultConfig = harmonyconfig.HarmonyConfig{ IsOffline: false, DataDir: "./", TraceEnable: false, - TriesInMemory: 128, }, Network: getDefaultNetworkConfig(defNetworkType), P2P: harmonyconfig.P2pConfig{ @@ -131,6 +132,7 @@ var defaultConfig = harmonyconfig.HarmonyConfig{ LowUsageThreshold: hmy.DefaultGPOConfig.LowUsageThreshold, BlockGasLimit: hmy.DefaultGPOConfig.BlockGasLimit, }, + Cache: getDefaultCacheConfig(defNetworkType), } var defaultSysConfig = harmonyconfig.SysConfig{ @@ -176,7 +178,7 @@ var defaultPrometheusConfig = harmonyconfig.PrometheusConfig{ } var defaultStagedSyncConfig = harmonyconfig.StagedSyncConfig{ - TurboMode: true, + TurboMode: false, DoubleCheckBlockHashes: false, MaxBlocksPerSyncCycle: 512, // sync new blocks in each cycle, if set to zero means all blocks in one full cycle MaxBackgroundBlocks: 512, // max blocks to be downloaded at background process in turbo mode @@ -228,14 +230,14 @@ var ( Downloader: true, StagedSync: true, StagedSyncCfg: defaultStagedSyncConfig, - Concurrency: 4, - MinPeers: 4, - InitStreams: 4, - MaxAdvertiseWaitTime: 5, //minutes - DiscSoftLowCap: 4, - DiscHardLowCap: 4, + Concurrency: 2, + MinPeers: 2, + InitStreams: 2, + MaxAdvertiseWaitTime: 1, //minutes + DiscSoftLowCap: 2, + DiscHardLowCap: 2, DiscHighCap: 1024, - DiscBatch: 8, + DiscBatch: 3, } defaultPartnerSyncConfig = harmonyconfig.SyncConfig{ @@ -271,6 +273,17 @@ var ( } ) +var defaultCacheConfig = harmonyconfig.CacheConfig{ + Disabled: false, + TrieNodeLimit: 256, + TriesInMemory: 128, + TrieTimeLimit: 2 * time.Minute, + SnapshotLimit: 256, + SnapshotWait: true, + Preimages: true, + SnapshotNoBuild: false, +} + const ( defaultBroadcastInvalidTx = false ) @@ -285,6 +298,7 @@ func getDefaultHmyConfigCopy(nt nodeconfig.NetworkType) harmonyconfig.HarmonyCon } config.Sync = getDefaultSyncConfig(nt) config.DNSSync = getDefaultDNSSyncConfig(nt) + config.Cache = getDefaultCacheConfig(nt) return config } @@ -324,6 +338,11 @@ func getDefaultPrometheusConfigCopy() harmonyconfig.PrometheusConfig { return config } +func getDefaultCacheConfigCopy() harmonyconfig.CacheConfig { + config := defaultCacheConfig + return config +} + const ( nodeTypeValidator = "validator" nodeTypeExplorer = "explorer" diff --git a/cmd/harmony/flags.go b/cmd/harmony/flags.go index 2af21cb24c..a52b7138ff 100644 --- a/cmd/harmony/flags.go +++ b/cmd/harmony/flags.go @@ -32,7 +32,6 @@ var ( legacyDataDirFlag, taraceFlag, - triesInMemoryFlag, } dnsSyncFlags = []cli.Flag{ @@ -268,6 +267,16 @@ var ( gpoBlockGasLimitFlag, } + cacheConfigFlags = []cli.Flag{ + cacheDisabled, + cacheTrieNodeLimit, + cacheTriesInMemory, + cachePreimages, + cacheSnapshotLimit, + cacheSnapshotNoBuild, + cacheSnapshotWait, + } + metricsFlags = []cli.Flag{ metricsETHFlag, metricsExpensiveETHFlag, @@ -352,11 +361,6 @@ var ( Usage: "indicates if full transaction tracing should be enabled", DefValue: defaultConfig.General.TraceEnable, } - triesInMemoryFlag = cli.IntFlag{ - Name: "blockchain.tries_in_memory", - Usage: "number of blocks from header stored in disk before exiting", - DefValue: defaultConfig.General.TriesInMemory, - } ) func getRootFlags() []cli.Flag { @@ -436,14 +440,6 @@ func applyGeneralFlags(cmd *cobra.Command, config *harmonyconfig.HarmonyConfig) if cli.IsFlagChanged(cmd, isBackupFlag) { config.General.IsBackup = cli.GetBoolFlagValue(cmd, isBackupFlag) } - - if cli.IsFlagChanged(cmd, triesInMemoryFlag) { - value := cli.GetIntFlagValue(cmd, triesInMemoryFlag) - if value <= 2 { - panic("Must provide number greater than 2 for General.TriesInMemory") - } - config.General.TriesInMemory = value - } } // network flags @@ -2115,3 +2111,70 @@ func applyGPOFlags(cmd *cobra.Command, cfg *harmonyconfig.HarmonyConfig) { cfg.GPO.BlockGasLimit = cli.GetIntFlagValue(cmd, gpoBlockGasLimitFlag) } } + +// cache config flags +var ( + cacheDisabled = cli.BoolFlag{ + Name: "cache.disabled", + Usage: "Whether to disable trie write caching (archive node)", + DefValue: defaultCacheConfig.Disabled, + } + cacheTrieNodeLimit = cli.IntFlag{ + Name: "cache.trie_node_limit", + Usage: " Memory limit (MB) at which to flush the current in-memory trie to disk", + DefValue: defaultCacheConfig.TrieNodeLimit, + } + cacheTriesInMemory = cli.Uint64Flag{ + Name: "cache.tries_in_memory", + Usage: "Block number from the head stored in disk before exiting", + DefValue: defaultCacheConfig.TriesInMemory, + } + cachePreimages = cli.BoolFlag{ + Name: "cache.preimages", + Usage: "Whether to store preimage of trie key to the disk", + DefValue: defaultCacheConfig.Preimages, + } + cacheSnapshotLimit = cli.IntFlag{ + Name: "cache.snapshot_limit", + Usage: "Memory allowance (MB) to use for caching snapshot entries in memory", + DefValue: defaultCacheConfig.SnapshotLimit, + } + cacheSnapshotNoBuild = cli.BoolFlag{ + Name: "cache.snapshot_no_build", + Usage: "Whether the background generation is allowed", + DefValue: defaultCacheConfig.SnapshotNoBuild, + } + cacheSnapshotWait = cli.BoolFlag{ + Name: "cache.snapshot_wait", + Usage: "Wait for snapshot construction on startup", + DefValue: defaultCacheConfig.SnapshotWait, + } +) + +func applyCacheFlags(cmd *cobra.Command, cfg *harmonyconfig.HarmonyConfig) { + if cli.IsFlagChanged(cmd, cacheDisabled) { + cfg.Cache.Disabled = cli.GetBoolFlagValue(cmd, cacheDisabled) + } + if cli.IsFlagChanged(cmd, cacheTrieNodeLimit) { + cfg.Cache.TrieNodeLimit = cli.GetIntFlagValue(cmd, cacheTrieNodeLimit) + } + if cli.IsFlagChanged(cmd, cacheTriesInMemory) { + value := cli.GetUint64FlagValue(cmd, cacheTriesInMemory) + if value <= 2 { + panic("Must provide number greater than 2 for Cache.TriesInMemory") + } + cfg.Cache.TriesInMemory = value + } + if cli.IsFlagChanged(cmd, cachePreimages) { + cfg.Cache.Preimages = cli.GetBoolFlagValue(cmd, cachePreimages) + } + if cli.IsFlagChanged(cmd, cacheSnapshotLimit) { + cfg.Cache.SnapshotLimit = cli.GetIntFlagValue(cmd, cacheSnapshotLimit) + } + if cli.IsFlagChanged(cmd, cacheSnapshotNoBuild) { + cfg.Cache.SnapshotNoBuild = cli.GetBoolFlagValue(cmd, cacheSnapshotNoBuild) + } + if cli.IsFlagChanged(cmd, cacheSnapshotWait) { + cfg.Cache.SnapshotWait = cli.GetBoolFlagValue(cmd, cacheSnapshotWait) + } +} diff --git a/cmd/harmony/flags_test.go b/cmd/harmony/flags_test.go index bea0e0eabe..ffe261b39a 100644 --- a/cmd/harmony/flags_test.go +++ b/cmd/harmony/flags_test.go @@ -37,12 +37,11 @@ func TestHarmonyFlags(t *testing.T) { expConfig: harmonyconfig.HarmonyConfig{ Version: tomlConfigVersion, General: harmonyconfig.GeneralConfig{ - NodeType: "validator", - NoStaking: false, - ShardID: -1, - IsArchival: false, - DataDir: "./", - TriesInMemory: 128, + NodeType: "validator", + NoStaking: false, + ShardID: -1, + IsArchival: false, + DataDir: "./", }, Network: harmonyconfig.NetworkConfig{ NetworkType: "mainnet", @@ -183,6 +182,16 @@ func TestHarmonyFlags(t *testing.T) { LowUsageThreshold: defaultConfig.GPO.LowUsageThreshold, BlockGasLimit: defaultConfig.GPO.BlockGasLimit, }, + Cache: harmonyconfig.CacheConfig{ + Disabled: defaultConfig.Cache.Disabled, + TrieNodeLimit: defaultCacheConfig.TrieNodeLimit, + TriesInMemory: defaultConfig.Cache.TriesInMemory, + TrieTimeLimit: defaultConfig.Cache.TrieTimeLimit, + SnapshotLimit: defaultConfig.Cache.SnapshotLimit, + SnapshotWait: defaultConfig.Cache.SnapshotWait, + Preimages: defaultConfig.Cache.Preimages, + SnapshotNoBuild: defaultConfig.Cache.SnapshotNoBuild, + }, }, }, } @@ -208,80 +217,63 @@ func TestGeneralFlags(t *testing.T) { { args: []string{}, expConfig: harmonyconfig.GeneralConfig{ - NodeType: "validator", - NoStaking: false, - ShardID: -1, - IsArchival: false, - DataDir: "./", - TriesInMemory: 128, + NodeType: "validator", + NoStaking: false, + ShardID: -1, + IsArchival: false, + DataDir: "./", }, }, { args: []string{"--run", "explorer", "--run.legacy", "--run.shard=0", "--run.archive=true", "--datadir=./.hmy"}, expConfig: harmonyconfig.GeneralConfig{ - NodeType: "explorer", - NoStaking: true, - ShardID: 0, - IsArchival: true, - DataDir: "./.hmy", - TriesInMemory: 128, + NodeType: "explorer", + NoStaking: true, + ShardID: 0, + IsArchival: true, + DataDir: "./.hmy", }, }, { args: []string{"--node_type", "explorer", "--staking", "--shard_id", "0", "--is_archival", "--db_dir", "./"}, expConfig: harmonyconfig.GeneralConfig{ - NodeType: "explorer", - NoStaking: false, - ShardID: 0, - IsArchival: true, - DataDir: "./", - TriesInMemory: 128, + NodeType: "explorer", + NoStaking: false, + ShardID: 0, + IsArchival: true, + DataDir: "./", }, }, { args: []string{"--staking=false", "--is_archival=false"}, expConfig: harmonyconfig.GeneralConfig{ - NodeType: "validator", - NoStaking: true, - ShardID: -1, - IsArchival: false, - DataDir: "./", - TriesInMemory: 128, + NodeType: "validator", + NoStaking: true, + ShardID: -1, + IsArchival: false, + DataDir: "./", }, }, { args: []string{"--run", "explorer", "--run.shard", "0"}, expConfig: harmonyconfig.GeneralConfig{ - NodeType: "explorer", - NoStaking: false, - ShardID: 0, - IsArchival: false, - DataDir: "./", - TriesInMemory: 128, + NodeType: "explorer", + NoStaking: false, + ShardID: 0, + IsArchival: false, + DataDir: "./", }, }, { args: []string{"--run", "explorer", "--run.shard", "0", "--run.archive=false"}, expConfig: harmonyconfig.GeneralConfig{ - NodeType: "explorer", - NoStaking: false, - ShardID: 0, - IsArchival: false, - DataDir: "./", - TriesInMemory: 128, - }, - }, - { - args: []string{"--blockchain.tries_in_memory", "64"}, - expConfig: harmonyconfig.GeneralConfig{ - NodeType: "validator", - NoStaking: false, - ShardID: -1, - IsArchival: false, - DataDir: "./", - TriesInMemory: 64, + NodeType: "explorer", + NoStaking: false, + ShardID: 0, + IsArchival: false, + DataDir: "./", }, }, } @@ -1435,6 +1427,58 @@ func TestGPOFlags(t *testing.T) { } } +func TestCacheFlags(t *testing.T) { + tests := []struct { + args []string + expConfig harmonyconfig.CacheConfig + expErr error + }{ + { + args: []string{}, + expConfig: harmonyconfig.CacheConfig{ + Disabled: true, // based on network type + TrieNodeLimit: defaultCacheConfig.TrieNodeLimit, + TriesInMemory: defaultCacheConfig.TriesInMemory, + TrieTimeLimit: defaultCacheConfig.TrieTimeLimit, + SnapshotLimit: defaultCacheConfig.SnapshotLimit, + SnapshotWait: defaultCacheConfig.SnapshotWait, + Preimages: defaultCacheConfig.Preimages, // based on network type + SnapshotNoBuild: defaultCacheConfig.SnapshotNoBuild, + }, + }, + { + args: []string{"--cache.disabled=true", "--cache.trie_node_limit", "512", "--cache.tries_in_memory", "256", "--cache.preimages=false", "--cache.snapshot_limit", "512", "--cache.snapshot_no_build=true", "--cache.snapshot_wait=false"}, + expConfig: harmonyconfig.CacheConfig{ + Disabled: true, + TrieNodeLimit: 512, + TriesInMemory: 256, + TrieTimeLimit: 2 * time.Minute, + SnapshotLimit: 512, + SnapshotWait: false, + Preimages: false, + SnapshotNoBuild: true, + }, + }, + } + + for i, test := range tests { + ts := newFlagTestSuite(t, cacheConfigFlags, applyCacheFlags) + hc, err := ts.run(test.args) + + if assErr := assertError(err, test.expErr); assErr != nil { + t.Fatalf("Test %v: %v", i, assErr) + } + if err != nil || test.expErr != nil { + continue + } + + if !reflect.DeepEqual(hc.Cache, test.expConfig) { + t.Errorf("Test %v:\n\t%+v\n\t%+v", i, hc.Cache, test.expConfig) + } + ts.tearDown() + } +} + func TestDevnetFlags(t *testing.T) { tests := []struct { args []string diff --git a/cmd/harmony/main.go b/cmd/harmony/main.go index ec05e2419f..03fef53bed 100644 --- a/cmd/harmony/main.go +++ b/cmd/harmony/main.go @@ -245,6 +245,7 @@ func applyRootFlags(cmd *cobra.Command, config *harmonyconfig.HarmonyConfig) { applySyncFlags(cmd, config) applyShardDataFlags(cmd, config) applyGPOFlags(cmd, config) + applyCacheFlags(cmd, config) } func setupNodeLog(config harmonyconfig.HarmonyConfig) { diff --git a/internal/configs/harmony/harmony.go b/internal/configs/harmony/harmony.go index 7ff2501481..276c90d05a 100644 --- a/internal/configs/harmony/harmony.go +++ b/internal/configs/harmony/harmony.go @@ -37,6 +37,7 @@ type HarmonyConfig struct { ShardData ShardDataConfig GPO GasPriceOracleConfig Preimage *PreimageConfig + Cache CacheConfig } func (hc HarmonyConfig) ToRPCServerConfig() nodeconfig.RPCServerConfig { @@ -138,7 +139,6 @@ type GeneralConfig struct { TraceEnable bool EnablePruneBeaconChain bool RunElasticMode bool - TriesInMemory int } type TiKVConfig struct { @@ -306,6 +306,17 @@ type RevertConfig struct { RevertBefore int } +type CacheConfig struct { + Disabled bool // Whether to disable trie write caching (archive node) + TrieNodeLimit int // Memory limit (MB) at which to flush the current in-memory trie to disk + TrieTimeLimit time.Duration // Time limit after which to flush the current in-memory trie to disk + TriesInMemory uint64 // Block number from the head stored in disk before exiting + Preimages bool // Whether to store preimage of trie key to the disk + SnapshotLimit int // Memory allowance (MB) to use for caching snapshot entries in memory + SnapshotNoBuild bool // Whether the background generation is allowed + SnapshotWait bool // Wait for snapshot construction on startup +} + type PreimageConfig struct { ImportFrom string ExportTo string diff --git a/internal/shardchain/shardchains.go b/internal/shardchain/shardchains.go index 5da1b9186f..6a9e9230ac 100644 --- a/internal/shardchain/shardchains.go +++ b/internal/shardchain/shardchains.go @@ -3,7 +3,6 @@ package shardchain import ( "math/big" "sync" - "time" "github.com/harmony-one/harmony/core/state" harmonyconfig "github.com/harmony-one/harmony/internal/configs/harmony" @@ -110,14 +109,19 @@ func (sc *CollectionImpl) ShardChain(shardID uint32, options ...core.Options) (c Uint32("shardID", shardID). Msg("disable cache, running in archival mode") } else { - cacheConfig = &core.CacheConfig{ - TrieNodeLimit: 256, - TrieTimeLimit: 2 * time.Minute, - TriesInMemory: 128, - Preimages: true, - } - if sc.harmonyconfig != nil { - cacheConfig.TriesInMemory = uint64(sc.harmonyconfig.General.TriesInMemory) + hc := sc.harmonyconfig + if hc != nil { + cacheConfig = &core.CacheConfig{ + Disabled: hc.Cache.Disabled, + TrieNodeLimit: hc.Cache.TrieNodeLimit, + TrieTimeLimit: hc.Cache.TrieTimeLimit, + TriesInMemory: hc.Cache.TriesInMemory, + SnapshotLimit: hc.Cache.SnapshotLimit, + SnapshotWait: hc.Cache.SnapshotWait, + Preimages: hc.Cache.Preimages, + } + } else { + cacheConfig = nil } } From 9b42870e09e34c333ec2e86215bb4894fff63c92 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CGheisMohammadi=E2=80=9D?= <36589218+GheisMohammadi@users.noreply.github.com> Date: Thu, 25 Jan 2024 22:20:56 +0800 Subject: [PATCH 118/128] return back sync default settings --- cmd/harmony/default.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/cmd/harmony/default.go b/cmd/harmony/default.go index 09c3988054..22b964b997 100644 --- a/cmd/harmony/default.go +++ b/cmd/harmony/default.go @@ -178,7 +178,7 @@ var defaultPrometheusConfig = harmonyconfig.PrometheusConfig{ } var defaultStagedSyncConfig = harmonyconfig.StagedSyncConfig{ - TurboMode: false, + TurboMode: true, DoubleCheckBlockHashes: false, MaxBlocksPerSyncCycle: 512, // sync new blocks in each cycle, if set to zero means all blocks in one full cycle MaxBackgroundBlocks: 512, // max blocks to be downloaded at background process in turbo mode @@ -230,14 +230,14 @@ var ( Downloader: true, StagedSync: true, StagedSyncCfg: defaultStagedSyncConfig, - Concurrency: 2, - MinPeers: 2, - InitStreams: 2, - MaxAdvertiseWaitTime: 1, //minutes - DiscSoftLowCap: 2, - DiscHardLowCap: 2, + Concurrency: 4, + MinPeers: 4, + InitStreams: 4, + MaxAdvertiseWaitTime: 5, //minutes + DiscSoftLowCap: 4, + DiscHardLowCap: 4, DiscHighCap: 1024, - DiscBatch: 3, + DiscBatch: 8, } defaultPartnerSyncConfig = harmonyconfig.SyncConfig{ From f3fc63395c33d56da0d08401ad07805ead895d46 Mon Sep 17 00:00:00 2001 From: Konstantin <355847+Frozen@users.noreply.github.com> Date: Thu, 25 Jan 2024 11:22:02 -0400 Subject: [PATCH 119/128] Fix for revert. (#4617) * Delete blocks from chain. * Write Head block. --- core/blockchain_impl.go | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/core/blockchain_impl.go b/core/blockchain_impl.go index acf050e916..7baadce769 100644 --- a/core/blockchain_impl.go +++ b/core/blockchain_impl.go @@ -821,6 +821,9 @@ func (bc *BlockChainImpl) repairValidatorsAndCommitSigs(head **types.Block) erro Str("number", (*head).Number().String()). Str("hash", (*head).Hash().Hex()). Msg("Rewound blockchain to past state") + if err := rawdb.WriteHeadBlockHash(bc.db, (*head).Hash()); err != nil { + return errors.WithMessagef(err, "failed to write head block hash number %d", (*head).NumberU64()) + } return bc.removeInValidatorList(valsToRemove) } // Repair last commit sigs @@ -828,6 +831,14 @@ func (bc *BlockChainImpl) repairValidatorsAndCommitSigs(head **types.Block) erro sigAndBitMap := append(lastSig[:], (*head).Header().LastCommitBitmap()...) bc.WriteCommitSig((*head).NumberU64()-1, sigAndBitMap) + err := rawdb.DeleteBlock(bc.db, (*head).Hash(), (*head).NumberU64()) + if err != nil { + return errors.WithMessagef(err, "failed to delete block %d", (*head).NumberU64()) + } + if err := rawdb.WriteHeadBlockHash(bc.db, (*head).ParentHash()); err != nil { + return errors.WithMessagef(err, "failed to write head block hash number %d", (*head).NumberU64()-1) + } + // Otherwise rewind one block and recheck state availability there for _, stkTxn := range (*head).StakingTransactions() { if stkTxn.StakingType() == staking.DirectiveCreateValidator { From fe72c4f37a26a2e25047ac9b734589ea93fe961a Mon Sep 17 00:00:00 2001 From: Diego Nava <8563843+diego1q2w@users.noreply.github.com> Date: Fri, 26 Jan 2024 05:31:39 +0100 Subject: [PATCH 120/128] activate devnet external hardfork (#4619) --- internal/configs/sharding/partner.go | 2 +- internal/params/config.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/configs/sharding/partner.go b/internal/configs/sharding/partner.go index 363f77d8bf..bbf5dccb4a 100644 --- a/internal/configs/sharding/partner.go +++ b/internal/configs/sharding/partner.go @@ -114,7 +114,7 @@ var partnerV2 = MustNewInstance( PartnerSchedule.BlocksPerEpoch(), ) var partnerV3 = MustNewInstance( - 2, 20, 1, 0, + 2, 20, 0, 0, numeric.MustNewDecFromStr("0.0"), genesis.TNHarmonyAccounts, genesis.TNFoundationalAccounts, emptyAllowlist, feeCollectorsDevnet[1], numeric.MustNewDecFromStr("0.25"), diff --git a/internal/params/config.go b/internal/params/config.go index e24d753143..07a8a59178 100644 --- a/internal/params/config.go +++ b/internal/params/config.go @@ -212,7 +212,7 @@ var ( HIP30Epoch: big.NewInt(7), BlockGas30MEpoch: big.NewInt(7), MaxRateEpoch: EpochTBD, - DevnetExternalEpoch: EpochTBD, + DevnetExternalEpoch: big.NewInt(135), } // StressnetChainConfig contains the chain parameters for the Stress test network. From 0066c2e6bcc6a656302845c7f6a99e574455ca96 Mon Sep 17 00:00:00 2001 From: Diego Nava <8563843+diego1q2w@users.noreply.github.com> Date: Sat, 27 Jan 2024 01:19:59 +0100 Subject: [PATCH 121/128] push the activation of the external devnet epoch (#4620) --- internal/params/config.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/params/config.go b/internal/params/config.go index 07a8a59178..9fcfc14088 100644 --- a/internal/params/config.go +++ b/internal/params/config.go @@ -212,7 +212,7 @@ var ( HIP30Epoch: big.NewInt(7), BlockGas30MEpoch: big.NewInt(7), MaxRateEpoch: EpochTBD, - DevnetExternalEpoch: big.NewInt(135), + DevnetExternalEpoch: big.NewInt(142), } // StressnetChainConfig contains the chain parameters for the Stress test network. From 0082c4d7a0ae3dcf05e3b33424ad0df6843ae523 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CGheisMohammadi=E2=80=9D?= <36589218+GheisMohammadi@users.noreply.github.com> Date: Mon, 29 Jan 2024 14:01:05 +0800 Subject: [PATCH 122/128] add system log for creating/loading snapshot --- core/blockchain_impl.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/core/blockchain_impl.go b/core/blockchain_impl.go index b14bcbe28a..224ccdb424 100644 --- a/core/blockchain_impl.go +++ b/core/blockchain_impl.go @@ -355,6 +355,9 @@ func newBlockChainWithOptions( AsyncBuild: !bc.cacheConfig.SnapshotWait, } fmt.Println("loading/generating snapshot...") + utils.Logger().Info(). + Str("Root", head.Root().Hex()). + Msg("loading/generating snapshot") bc.snaps, _ = snapshot.New(snapconfig, bc.db, bc.triedb, head.Root()) } From ae4ffeb09abc3763cbf0c209160d8de0ee43f6ff Mon Sep 17 00:00:00 2001 From: Gheis Mohammadi Date: Tue, 30 Jan 2024 23:45:03 +0800 Subject: [PATCH 123/128] fix allowed txs to be able to handle multiple txs for same from address (#4623) * fix allowed txs to be able to handle multiple txs for same from address * improve tx data checking for allowed txs Co-authored-by: Diego Nava <8563843+diego1q2w@users.noreply.github.com> --------- Co-authored-by: Diego Nava <8563843+diego1q2w@users.noreply.github.com> --- cmd/harmony/main.go | 10 ++++----- cmd/harmony/main_test.go | 44 +++++++++++++++++++++++----------------- core/tx_pool.go | 24 ++++++++++++++-------- node/node.go | 2 +- 4 files changed, 47 insertions(+), 33 deletions(-) diff --git a/cmd/harmony/main.go b/cmd/harmony/main.go index 03fef53bed..31332b2e6e 100644 --- a/cmd/harmony/main.go +++ b/cmd/harmony/main.go @@ -1062,8 +1062,8 @@ func setupBlacklist(hc harmonyconfig.HarmonyConfig) (map[ethCommon.Address]struc return addrMap, nil } -func parseAllowedTxs(data []byte) (map[ethCommon.Address]core.AllowedTxData, error) { - allowedTxs := make(map[ethCommon.Address]core.AllowedTxData) +func parseAllowedTxs(data []byte) (map[ethCommon.Address][]core.AllowedTxData, error) { + allowedTxs := make(map[ethCommon.Address][]core.AllowedTxData) for _, line := range strings.Split(string(data), "\n") { line = strings.TrimSpace(line) if len(line) != 0 { // AllowedTxs file may have trailing empty string line @@ -1084,16 +1084,16 @@ func parseAllowedTxs(data []byte) (map[ethCommon.Address]core.AllowedTxData, err if err != nil { return nil, err } - allowedTxs[from] = core.AllowedTxData{ + allowedTxs[from] = append(allowedTxs[from], core.AllowedTxData{ To: to, Data: data, - } + }) } } return allowedTxs, nil } -func setupAllowedTxs(hc harmonyconfig.HarmonyConfig) (map[ethCommon.Address]core.AllowedTxData, error) { +func setupAllowedTxs(hc harmonyconfig.HarmonyConfig) (map[ethCommon.Address][]core.AllowedTxData, error) { utils.Logger().Debug().Msgf("Using AllowedTxs file at `%s`", hc.TxPool.AllowedTxsFile) data, err := os.ReadFile(hc.TxPool.AllowedTxsFile) if err != nil { diff --git a/cmd/harmony/main_test.go b/cmd/harmony/main_test.go index 0ee836f337..c6b2db9c4a 100644 --- a/cmd/harmony/main_test.go +++ b/cmd/harmony/main_test.go @@ -16,22 +16,26 @@ func TestAllowedTxsParse(t *testing.T) { one1s4dvv454dtmkzsulffz3epewsyhrjq9y0g3fqz->0x985458E523dB3d53125813eD68c274899e9DfAb4:0xa9059cbb one1s4dvv454dtmkzsulffz3epewsyhrjq9y0g3fqz->one10fhdp2g9q5azrs2ukk608x6krd4rleg0ueskug:0x `) - expected := map[ethCommon.Address]core.AllowedTxData{ - common.HexToAddress("0x7A6Ed0a905053A21C15cB5b4F39b561B6A3FE50f"): core.AllowedTxData{ - To: common.HexToAddress("0x855Ac656956AF761439f4a451c872E812E3900a4"), - Data: common.FromHex("0x"), + expected := map[ethCommon.Address][]core.AllowedTxData{ + common.HexToAddress("0x7A6Ed0a905053A21C15cB5b4F39b561B6A3FE50f"): { + core.AllowedTxData{ + To: common.HexToAddress("0x855Ac656956AF761439f4a451c872E812E3900a4"), + Data: common.FromHex("0x"), + }, + core.AllowedTxData{ + To: common.HexToAddress("0x985458E523dB3d53125813eD68c274899e9DfAb4"), + Data: common.FromHex("0xa9059cbb"), + }, }, - common.HexToAddress("0x7A6Ed0a905053A21C15cB5b4F39b561B6A3FE50f"): core.AllowedTxData{ - To: common.HexToAddress("0x985458E523dB3d53125813eD68c274899e9DfAb4"), - Data: common.FromHex("0xa9059cbb"), - }, - common.HexToAddress("0x855Ac656956AF761439f4a451c872E812E3900a4"): core.AllowedTxData{ - To: common.HexToAddress("0x985458E523dB3d53125813eD68c274899e9DfAb4"), - Data: common.FromHex("0xa9059cbb"), - }, - common.HexToAddress("0x855Ac656956AF761439f4a451c872E812E3900a4"): core.AllowedTxData{ - To: common.HexToAddress("0x7A6Ed0a905053A21C15cB5b4F39b561B6A3FE50f"), - Data: common.FromHex("0x"), + common.HexToAddress("0x855Ac656956AF761439f4a451c872E812E3900a4"): { + core.AllowedTxData{ + To: common.HexToAddress("0x985458E523dB3d53125813eD68c274899e9DfAb4"), + Data: common.FromHex("0xa9059cbb"), + }, + core.AllowedTxData{ + To: common.HexToAddress("0x7A6Ed0a905053A21C15cB5b4F39b561B6A3FE50f"), + Data: common.FromHex("0x"), + }, }, } got, err := parseAllowedTxs(testData) @@ -41,10 +45,12 @@ func TestAllowedTxsParse(t *testing.T) { if len(got) != len(expected) { t.Errorf("lenght of allowed transactions not equal, got: %d expected: %d", len(got), len(expected)) } - for from, txData := range got { - expectedTxData := expected[from] - if expectedTxData.To != txData.To || !bytes.Equal(expectedTxData.Data, txData.Data) { - t.Errorf("txData not equal: got: %v expected: %v", txData, expectedTxData) + for from, txsData := range got { + for i, txData := range txsData { + expectedTxData := expected[from][i] + if expectedTxData.To != txData.To || !bytes.Equal(expectedTxData.Data, txData.Data) { + t.Errorf("txData not equal: got: %v expected: %v", txData, expectedTxData) + } } } } diff --git a/core/tx_pool.go b/core/tx_pool.go index 2457da3854..66254a4781 100644 --- a/core/tx_pool.go +++ b/core/tx_pool.go @@ -172,8 +172,8 @@ type TxPoolConfig struct { AddEvent func(tx types.PoolTransaction, local bool) // Fire add event - Blacklist map[common.Address]struct{} // Set of accounts that cannot be a part of any transaction - AllowedTxs map[common.Address]AllowedTxData // Set of allowed transactions can break the blocklist + Blacklist map[common.Address]struct{} // Set of accounts that cannot be a part of any transaction + AllowedTxs map[common.Address][]AllowedTxData // Set of allowed transactions can break the blocklist } // DefaultTxPoolConfig contains the default configurations for the transaction @@ -193,7 +193,7 @@ var DefaultTxPoolConfig = TxPoolConfig{ Lifetime: 30 * time.Minute, // --txpool.lifetime Blacklist: map[common.Address]struct{}{}, - AllowedTxs: map[common.Address]AllowedTxData{}, + AllowedTxs: map[common.Address][]AllowedTxData{}, } // sanitize checks the provided user configurations and changes anything that's @@ -753,12 +753,20 @@ func (pool *TxPool) validateTx(tx types.PoolTransaction, local bool) error { } // do whitelist check first, if tx not in whitelist, do blacklist check - if allowedTx, exists := pool.config.AllowedTxs[from]; exists { - if to := tx.To(); to == nil || *to != allowedTx.To || !bytes.Equal(tx.Data(), allowedTx.Data) { - toAddr := common.Address{} - if to != nil { - toAddr = *to + if allowedTxs, exists := pool.config.AllowedTxs[from]; exists { + txIsAllowed := false + to := tx.To() + toAddr := common.Address{} + if to != nil { + toAddr = *to + for _, allowedTx := range allowedTxs { + if toAddr == allowedTx.To && bytes.Equal(tx.Data(), allowedTx.Data) { + txIsAllowed = true + break + } } + } + if !txIsAllowed { return errors.WithMessagef(ErrAllowedTxs, "transaction sender: %x, receiver: %x, input: %x", tx.From(), toAddr, tx.Data()) } } else { diff --git a/node/node.go b/node/node.go index d815f86ece..841d6330dd 100644 --- a/node/node.go +++ b/node/node.go @@ -1022,7 +1022,7 @@ func New( host p2p.Host, consensusObj *consensus.Consensus, blacklist map[common.Address]struct{}, - allowedTxs map[common.Address]core.AllowedTxData, + allowedTxs map[common.Address][]core.AllowedTxData, localAccounts []common.Address, harmonyconfig *harmonyconfig.HarmonyConfig, registry *registry.Registry, From 8d5f20f998ced4e64f2c23661e6ca6ad7cf2a00e Mon Sep 17 00:00:00 2001 From: Konstantin <355847+Frozen@users.noreply.github.com> Date: Tue, 30 Jan 2024 17:08:40 -0400 Subject: [PATCH 124/128] Removed outdated flag, additional checks and simplified logic. (#4621) * Removed outdated flag and simplified logic. * Removed outdated flag and simplified logic. * Added additional logs. --- consensus/consensus_v2.go | 41 +++++++++++------------ core/blockchain_leader_rotation.go | 32 ++++++------------ core/blockchain_leader_rotation_test.go | 43 +++++++------------------ 3 files changed, 42 insertions(+), 74 deletions(-) diff --git a/consensus/consensus_v2.go b/consensus/consensus_v2.go index f4b8c56f09..9780accb7b 100644 --- a/consensus/consensus_v2.go +++ b/consensus/consensus_v2.go @@ -679,7 +679,7 @@ func (consensus *Consensus) commitBlock(blk *types.Block, committedMsg *FBFTMess // rotateLeader rotates the leader to the next leader in the committee. // This function must be called with enabled leader rotation. -func (consensus *Consensus) rotateLeader(epoch *big.Int) *bls.PublicKeyWrapper { +func (consensus *Consensus) rotateLeader(epoch *big.Int, defaultKey *bls.PublicKeyWrapper) *bls.PublicKeyWrapper { var ( bc = consensus.Blockchain() leader = consensus.getLeaderPubKey() @@ -687,31 +687,32 @@ func (consensus *Consensus) rotateLeader(epoch *big.Int) *bls.PublicKeyWrapper { curNumber = curBlock.NumberU64() curEpoch = curBlock.Epoch().Uint64() ) + if epoch.Uint64() != curEpoch { + return defaultKey + } const blocksCountAliveness = 4 - utils.Logger().Info().Msgf("[Rotating leader] epoch: %v rotation:%v external rotation %v", epoch.Uint64(), bc.Config().IsLeaderRotationInternalValidators(epoch), bc.Config().IsLeaderRotationExternalValidatorsAllowed(epoch)) ss, err := bc.ReadShardState(epoch) if err != nil { utils.Logger().Error().Err(err).Msg("Failed to read shard state") - return nil + return defaultKey } committee, err := ss.FindCommitteeByID(consensus.ShardID) if err != nil { utils.Logger().Error().Err(err).Msg("Failed to find committee") - return nil + return defaultKey } slotsCount := len(committee.Slots) blocksPerEpoch := shard.Schedule.InstanceForEpoch(epoch).BlocksPerEpoch() if blocksPerEpoch == 0 { utils.Logger().Error().Msg("[Rotating leader] blocks per epoch is 0") - return nil + return defaultKey } if slotsCount == 0 { utils.Logger().Error().Msg("[Rotating leader] slots count is 0") - return nil + return defaultKey } numBlocksProducedByLeader := blocksPerEpoch / uint64(slotsCount) - rest := blocksPerEpoch % uint64(slotsCount) const minimumBlocksForLeaderInRow = blocksCountAliveness if numBlocksProducedByLeader < minimumBlocksForLeaderInRow { // mine no less than 3 blocks in a row @@ -720,15 +721,11 @@ func (consensus *Consensus) rotateLeader(epoch *big.Int) *bls.PublicKeyWrapper { s := bc.LeaderRotationMeta() if !bytes.Equal(leader.Bytes[:], s.Pub) { // Another leader. - return nil - } - // If it is the first validator producing blocks, it should also produce the remaining 'rest' of the blocks. - if s.Shifts == 0 { - numBlocksProducedByLeader += rest + return defaultKey } if s.Count < numBlocksProducedByLeader { // Not enough blocks produced by the leader, continue producing by the same leader. - return nil + return defaultKey } // Passed all checks, we can change leader. // NthNext will move the leader to the next leader in the committee. @@ -748,7 +745,7 @@ func (consensus *Consensus) rotateLeader(epoch *big.Int) *bls.PublicKeyWrapper { if !wasFound { utils.Logger().Error().Msg("Failed to get next leader") // Seems like nothing we can do here. - return nil + return defaultKey } members := consensus.decider.Participants() mask := bls.NewMask(members) @@ -757,7 +754,7 @@ func (consensus *Consensus) rotateLeader(epoch *big.Int) *bls.PublicKeyWrapper { header := bc.GetHeaderByNumber(curNumber - uint64(i)) if header == nil { utils.Logger().Error().Msgf("Failed to get header by number %d", curNumber-uint64(i)) - return nil + return defaultKey } // if epoch is different, we should not check this block. if header.Epoch().Uint64() != curEpoch { @@ -767,12 +764,12 @@ func (consensus *Consensus) rotateLeader(epoch *big.Int) *bls.PublicKeyWrapper { err = mask.SetMask(header.LastCommitBitmap()) if err != nil { utils.Logger().Err(err).Msg("Failed to set mask") - return nil + return defaultKey } ok, err := mask.KeyEnabled(next.Bytes) if err != nil { utils.Logger().Err(err).Msg("Failed to get key enabled") - return nil + return defaultKey } if !ok { skipped++ @@ -787,14 +784,13 @@ func (consensus *Consensus) rotateLeader(epoch *big.Int) *bls.PublicKeyWrapper { } return next } - return nil + return defaultKey } // SetupForNewConsensus sets the state for new consensus func (consensus *Consensus) setupForNewConsensus(blk *types.Block, committedMsg *FBFTMessage) { atomic.StoreUint64(&consensus.blockNum, blk.NumberU64()+1) consensus.setCurBlockViewID(committedMsg.ViewID + 1) - consensus.LeaderPubKey = committedMsg.SenderPubkeys[0] var epoch *big.Int if blk.IsLastBlockInEpoch() { epoch = new(big.Int).Add(blk.Epoch(), common.Big1) @@ -802,9 +798,14 @@ func (consensus *Consensus) setupForNewConsensus(blk *types.Block, committedMsg epoch = blk.Epoch() } if consensus.Blockchain().Config().IsLeaderRotationInternalValidators(epoch) { - if next := consensus.rotateLeader(epoch); next != nil { + if next := consensus.rotateLeader(epoch, committedMsg.SenderPubkeys[0]); next != nil { prev := consensus.getLeaderPubKey() consensus.setLeaderPubKey(next) + if consensus.isLeader() { + utils.Logger().Info().Msgf("We are block %d, I am the new leader %s", blk.NumberU64(), next.Bytes.Hex()) + } else { + utils.Logger().Info().Msgf("We are block %d, the leader is %s", blk.NumberU64(), next.Bytes.Hex()) + } if consensus.isLeader() && !consensus.getLeaderPubKey().Object.IsEqual(prev.Object) { // leader changed blockPeriod := consensus.BlockPeriod diff --git a/core/blockchain_leader_rotation.go b/core/blockchain_leader_rotation.go index b7cdef5190..8b2683780b 100644 --- a/core/blockchain_leader_rotation.go +++ b/core/blockchain_leader_rotation.go @@ -14,10 +14,9 @@ import ( // LeaderRotationMeta contains information about leader rotation type LeaderRotationMeta struct { - Pub []byte // bls public key of previous block miner - Epoch uint64 // epoch number of previously inserted block - Count uint64 // quantity of continuous blocks inserted by the same leader - Shifts uint64 // number of leader shifts, shift happens when leader changes + Pub []byte // bls public key of previous block miner + Epoch uint64 // epoch number of previously inserted block + Count uint64 // quantity of continuous blocks inserted by the same leader } // ShortString returns string representation of the struct @@ -28,8 +27,6 @@ func (a LeaderRotationMeta) ShortString() string { s.WriteString(strconv.FormatUint(a.Epoch, 10)) s.WriteString(" ") s.WriteString(strconv.FormatUint(a.Count, 10)) - s.WriteString(" ") - s.WriteString(strconv.FormatUint(a.Shifts, 10)) return s.String() } @@ -39,17 +36,15 @@ func (a LeaderRotationMeta) Hash() []byte { c.Write(a.Pub) c.Write([]byte(strconv.FormatUint(a.Epoch, 10))) c.Write([]byte(strconv.FormatUint(a.Count, 10))) - c.Write([]byte(strconv.FormatUint(a.Shifts, 10))) return c.Sum(nil) } // Clone returns a copy of the struct func (a LeaderRotationMeta) Clone() LeaderRotationMeta { return LeaderRotationMeta{ - Pub: append([]byte{}, a.Pub...), - Epoch: a.Epoch, - Count: a.Count, - Shifts: a.Shifts, + Pub: append([]byte{}, a.Pub...), + Epoch: a.Epoch, + Count: a.Count, } } @@ -109,19 +104,10 @@ func processRotationMeta(epoch uint64, blockPubKey bls.SerializedPublicKey, s Le } else { s.Count = 1 } - // we should increase shifts if the leader has changed. - if !bytes.Equal(s.Pub, blockPubKey[:]) { - s.Shifts++ - } - // but set to zero if new - if s.Epoch != epoch { - s.Shifts = 0 - } s.Epoch = epoch return LeaderRotationMeta{ - Pub: blockPubKey[:], - Epoch: s.Epoch, - Count: s.Count, - Shifts: s.Shifts, + Pub: blockPubKey[:], + Epoch: s.Epoch, + Count: s.Count, } } diff --git a/core/blockchain_leader_rotation_test.go b/core/blockchain_leader_rotation_test.go index 047dbdd636..e964d39d77 100644 --- a/core/blockchain_leader_rotation_test.go +++ b/core/blockchain_leader_rotation_test.go @@ -12,46 +12,27 @@ var k1 = bls.SerializedPublicKey{1, 2, 3} func TestRotationMetaProcess(t *testing.T) { t.Run("same_leader_increase_count", func(t *testing.T) { rs := processRotationMeta(1, bls.SerializedPublicKey{}, LeaderRotationMeta{ - Pub: bls.SerializedPublicKey{}.Bytes(), - Epoch: 1, - Count: 1, - Shifts: 1, + Pub: bls.SerializedPublicKey{}.Bytes(), + Epoch: 1, + Count: 1, }) require.Equal(t, LeaderRotationMeta{ - Pub: bls.SerializedPublicKey{}.Bytes(), - Epoch: 1, - Count: 2, - Shifts: 1, - }, rs) - }) - - t.Run("new_leader_increase_shifts", func(t *testing.T) { - rs := processRotationMeta(1, k1, LeaderRotationMeta{ - Pub: bls.SerializedPublicKey{}.Bytes(), - Epoch: 1, - Count: 1, - Shifts: 1, - }) - require.Equal(t, LeaderRotationMeta{ - Pub: k1.Bytes(), - Epoch: 1, - Count: 1, - Shifts: 2, + Pub: bls.SerializedPublicKey{}.Bytes(), + Epoch: 1, + Count: 2, }, rs) }) t.Run("new_epoch_reset_count", func(t *testing.T) { rs := processRotationMeta(2, k1, LeaderRotationMeta{ - Pub: bls.SerializedPublicKey{}.Bytes(), - Epoch: 1, - Count: 1, - Shifts: 1, + Pub: bls.SerializedPublicKey{}.Bytes(), + Epoch: 1, + Count: 1, }) require.Equal(t, LeaderRotationMeta{ - Pub: k1.Bytes(), - Epoch: 2, - Count: 1, - Shifts: 0, + Pub: k1.Bytes(), + Epoch: 2, + Count: 1, }, rs) }) } From ca91cb22b6ea7cbf53ddca1466c5c25cce7acc10 Mon Sep 17 00:00:00 2001 From: Diego Nava <8563843+diego1q2w@users.noreply.github.com> Date: Fri, 2 Feb 2024 10:03:19 +0100 Subject: [PATCH 125/128] Skip check when the 100% check is achieved. (#4625) * fix(consensus): skip is all collected * fix(consensus): add the IsAllSigsCollected * fix(consensus): remove consensus.decider.IsAllSigsCollected() * fix(consensus): remove code --- consensus/consensus_v2.go | 5 ++++- consensus/leader.go | 8 -------- 2 files changed, 4 insertions(+), 9 deletions(-) diff --git a/consensus/consensus_v2.go b/consensus/consensus_v2.go index 9780accb7b..0aec2537b4 100644 --- a/consensus/consensus_v2.go +++ b/consensus/consensus_v2.go @@ -178,7 +178,10 @@ func (consensus *Consensus) finalCommit() { return } consensus.getLogger().Info().Hex("new", commitSigAndBitmap).Msg("[finalCommit] Overriding commit signatures!!") - consensus.Blockchain().WriteCommitSig(block.NumberU64(), commitSigAndBitmap) + + if err := consensus.Blockchain().WriteCommitSig(block.NumberU64(), commitSigAndBitmap); err != nil { + consensus.getLogger().Warn().Err(err).Msg("[finalCommit] failed writting commit sig") + } // Send committed message before block insertion. // if leader successfully finalizes the block, send committed message to validators diff --git a/consensus/leader.go b/consensus/leader.go index 747be1eb70..5740493024 100644 --- a/consensus/leader.go +++ b/consensus/leader.go @@ -285,14 +285,6 @@ func (consensus *Consensus) onCommit(recvMsg *FBFTMessage) { //// Read - Start viewID := consensus.getCurBlockViewID() - if consensus.decider.IsAllSigsCollected() { - logger.Info().Msg("[OnCommit] 100% Enough commits received") - consensus.finalCommit() - - consensus.msgSender.StopRetry(msg_pb.MessageType_PREPARED) - return - } - quorumIsMet := consensus.decider.IsQuorumAchieved(quorum.Commit) //// Read - End From b53a911a7c225d6ad31fb42e6bbe8abd9f84080e Mon Sep 17 00:00:00 2001 From: Diego Nava <8563843+diego1q2w@users.noreply.github.com> Date: Tue, 20 Feb 2024 18:08:22 +0100 Subject: [PATCH 126/128] push devnet external and leader rotation epochs (#4634) --- internal/params/config.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/internal/params/config.go b/internal/params/config.go index 9fcfc14088..b0c6c70d11 100644 --- a/internal/params/config.go +++ b/internal/params/config.go @@ -205,14 +205,14 @@ var ( SlotsLimitedEpoch: EpochTBD, // epoch to enable HIP-16 CrossShardXferPrecompileEpoch: big.NewInt(5), AllowlistEpoch: EpochTBD, - LeaderRotationInternalValidatorsEpoch: big.NewInt(12), - LeaderRotationExternalValidatorsEpoch: big.NewInt(12), + LeaderRotationInternalValidatorsEpoch: big.NewInt(144), + LeaderRotationExternalValidatorsEpoch: big.NewInt(144), FeeCollectEpoch: big.NewInt(5), ValidatorCodeFixEpoch: big.NewInt(5), HIP30Epoch: big.NewInt(7), BlockGas30MEpoch: big.NewInt(7), MaxRateEpoch: EpochTBD, - DevnetExternalEpoch: big.NewInt(142), + DevnetExternalEpoch: big.NewInt(144), } // StressnetChainConfig contains the chain parameters for the Stress test network. From 138a460e4684c52f7b907629ecfd4ed711495fc4 Mon Sep 17 00:00:00 2001 From: Casey Gardiner <117784577+ONECasey@users.noreply.github.com> Date: Tue, 20 Feb 2024 16:28:40 -0800 Subject: [PATCH 127/128] Update dev to include main hotfixes (#4633) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix allowed txs to be able to handle multiple txs for same from address (#4624) * [HOTFIX] fix leader crosslink issue to not include old cross link in the propo… (#4629) * fix leader crosslink issue to not include old cross link in the proposing block * set higher epoch threshold for pending crosslinks to be added to proposing block * delete old pending cross links * delete when proposing * delete when proposing * delete when proposing * delete when proposing * minor logic change for the log * minor logic change for the log * minor logic change for the log * minor logic change for the log --------- Co-authored-by: Diego Nava * Fix for possible panic. (#4627) * Fix, removed duplicated check. --------- Co-authored-by: Gheis Mohammadi Co-authored-by: Diego Nava Co-authored-by: Konstantin <355847+Frozen@users.noreply.github.com> --- consensus/view_change_construct.go | 2 +- core/state_processor.go | 4 +-- node/node.go | 39 ++++++++++++++++++++++++++++++ node/node_newblock.go | 19 ++++++++++++++- 4 files changed, 60 insertions(+), 4 deletions(-) diff --git a/consensus/view_change_construct.go b/consensus/view_change_construct.go index fcf025e74d..5d25531757 100644 --- a/consensus/view_change_construct.go +++ b/consensus/view_change_construct.go @@ -465,7 +465,7 @@ func (vc *viewChange) InitPayload( if !inited { viewIDBytes := make([]byte, 8) binary.LittleEndian.PutUint64(viewIDBytes, viewID) - vc.getLogger().Info().Uint64("viewID", viewID).Uint64("blockNum", blockNum).Msg("[InitPayload] add my M3 (ViewID) type messaage") + vc.getLogger().Info().Uint64("viewID", viewID).Uint64("blockNum", blockNum).Msg("[InitPayload] add my M3 (ViewID) type message") for _, key := range privKeys { if _, ok := vc.viewIDBitmap[viewID]; !ok { viewIDBitmap := bls_cosi.NewMask(members) diff --git a/core/state_processor.go b/core/state_processor.go index 9ccb256a78..38be4184a8 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -22,8 +22,6 @@ import ( "math/big" "time" - lru "github.com/hashicorp/golang-lru" - "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/rlp" @@ -40,6 +38,7 @@ import ( "github.com/harmony-one/harmony/staking/effective" "github.com/harmony-one/harmony/staking/slash" staking "github.com/harmony-one/harmony/staking/types" + lru "github.com/hashicorp/golang-lru" "github.com/pkg/errors" ) @@ -319,6 +318,7 @@ func ApplyTransaction(bc ChainContext, author *common.Address, gp *GasPool, stat balance = a.String() } return nil, nil, nil, 0, errors.Wrapf(err, "apply failed from='%s' to='%s' balance='%s'", msg.From().Hex(), to, balance) + } // Update the state with pending changes var root []byte diff --git a/node/node.go b/node/node.go index 841d6330dd..f80c502a0a 100644 --- a/node/node.go +++ b/node/node.go @@ -1178,6 +1178,45 @@ func New( node.serviceManager = service.NewManager() + // delete old pending crosslinks + if node.Blockchain().ShardID() == shard.BeaconChainShardID { + ten := big.NewInt(10) + crossLinkEpochThreshold := new(big.Int).Sub(node.Blockchain().CurrentHeader().Epoch(), ten) + + invalidToDelete := make([]types.CrossLink, 0, 1000) + allPending, err := node.Blockchain().ReadPendingCrossLinks() + if err == nil { + for _, pending := range allPending { + // if pending crosslink is older than 10 epochs, delete it + if pending.EpochF.Cmp(crossLinkEpochThreshold) <= 0 { + invalidToDelete = append(invalidToDelete, pending) + utils.Logger().Info(). + Uint32("shard", pending.ShardID()). + Int64("epoch", pending.Epoch().Int64()). + Uint64("blockNum", pending.BlockNum()). + Int64("viewID", pending.ViewID().Int64()). + Interface("hash", pending.Hash()). + Msg("[PendingCrossLinksOnInit] delete old pending cross links") + } + } + + if n, err := node.Blockchain().DeleteFromPendingCrossLinks(invalidToDelete); err != nil { + utils.Logger().Error(). + Err(err). + Msg("[PendingCrossLinksOnInit] deleting old pending cross links failed") + } else if len(invalidToDelete) > 0 { + utils.Logger().Info(). + Int("not-deleted", n). + Int("deleted", len(invalidToDelete)). + Msg("[PendingCrossLinksOnInit] deleted old pending cross links") + } + } else { + utils.Logger().Error(). + Err(err). + Msg("[PendingCrossLinksOnInit] read pending cross links failed") + } + } + return &node } diff --git a/node/node_newblock.go b/node/node_newblock.go index fdca8b741b..bafb340a80 100644 --- a/node/node_newblock.go +++ b/node/node_newblock.go @@ -1,6 +1,7 @@ package node import ( + "math/big" "sort" "strings" "time" @@ -226,11 +227,18 @@ func (node *Node) ProposeNewBlock(commitSigs chan []byte) (*types.Block, error) utils.AnalysisStart("proposeNewBlockVerifyCrossLinks") // Prepare cross links and slashing messages var crossLinksToPropose types.CrossLinks + ten := big.NewInt(10) + crossLinkEpochThreshold := new(big.Int).Sub(currentHeader.Epoch(), ten) if isBeaconchainInCrossLinkEra { allPending, err := node.Blockchain().ReadPendingCrossLinks() invalidToDelete := []types.CrossLink{} if err == nil { for _, pending := range allPending { + // if pending crosslink is older than 10 epochs, delete it and continue. this logic is also applied when the node starts + if pending.EpochF.Cmp(crossLinkEpochThreshold) <= 0 { + invalidToDelete = append(invalidToDelete, pending) + continue + } // ReadCrossLink beacon chain usage. exist, err := node.Blockchain().ReadCrossLink(pending.ShardID(), pending.BlockNum()) if err == nil || exist != nil { @@ -263,7 +271,16 @@ func (node *Node) ProposeNewBlock(commitSigs chan []byte) (*types.Block, error) len(allPending), ) } - node.Blockchain().DeleteFromPendingCrossLinks(invalidToDelete) + if n, err := node.Blockchain().DeleteFromPendingCrossLinks(invalidToDelete); err != nil { + utils.Logger().Error(). + Err(err). + Msg("[ProposeNewBlock] invalid pending cross links failed") + } else if len(invalidToDelete) > 0 { + utils.Logger().Info(). + Int("not-deleted", n). + Int("deleted", len(invalidToDelete)). + Msg("[ProposeNewBlock] deleted invalid pending cross links") + } } utils.AnalysisEnd("proposeNewBlockVerifyCrossLinks") From 49bba1785e7f5bb2ad207002adf9ef3132e3cc0c Mon Sep 17 00:00:00 2001 From: Konstantin <355847+Frozen@users.noreply.github.com> Date: Thu, 22 Feb 2024 11:53:16 -0400 Subject: [PATCH 128/128] Fix. (#4635) --- scripts/travis_rosetta_checker.sh | 2 ++ scripts/travis_rpc_checker.sh | 2 ++ 2 files changed, 4 insertions(+) diff --git a/scripts/travis_rosetta_checker.sh b/scripts/travis_rosetta_checker.sh index b2e395fdba..d2f98569f9 100644 --- a/scripts/travis_rosetta_checker.sh +++ b/scripts/travis_rosetta_checker.sh @@ -1,12 +1,14 @@ #!/usr/bin/env bash set -e +echo $TRAVIS_PULL_REQUEST_BRANCH DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" echo $DIR echo $GOPATH cd $GOPATH/src/github.com/harmony-one/harmony-test git fetch git pull +git checkout $TRAVIS_PULL_REQUEST_BRANCH || true git branch --show-current cd localnet docker build -t harmonyone/localnet-test . diff --git a/scripts/travis_rpc_checker.sh b/scripts/travis_rpc_checker.sh index b057452f88..5de2ef93b8 100755 --- a/scripts/travis_rpc_checker.sh +++ b/scripts/travis_rpc_checker.sh @@ -1,11 +1,13 @@ #!/usr/bin/env bash set -e +echo $TRAVIS_PULL_REQUEST_BRANCH DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" echo $DIR echo $GOPATH cd $GOPATH/src/github.com/harmony-one/harmony-test git fetch +git checkout $TRAVIS_PULL_REQUEST_BRANCH || true git pull git branch --show-current cd localnet