From 696d17307b67f4c6bdcd093dd54c4df8430e3bbd Mon Sep 17 00:00:00 2001 From: Calvin Kim Date: Fri, 13 Dec 2024 16:56:20 +0900 Subject: [PATCH 01/13] netsync: don't reset headersFirstMode on resetHeaderState We only exit headersFirstMode when we have no more checkpoints left. updateSyncPeer was resetting headersFirstMode by calling resetHeaderState only to have it turned back on with startSync(). Resetting the headers-first mode when we've not really exited the headers first mode will cause problems with later commits that add parallel block downloads so we remove it here. --- netsync/manager.go | 1 - 1 file changed, 1 deletion(-) diff --git a/netsync/manager.go b/netsync/manager.go index 41e39544..7fd253a7 100644 --- a/netsync/manager.go +++ b/netsync/manager.go @@ -232,7 +232,6 @@ type SyncManager struct { // resetHeaderState sets the headers-first mode state to values appropriate for // syncing from a new peer. func (sm *SyncManager) resetHeaderState(newestHash *chainhash.Hash, newestHeight int32) { - sm.headersFirstMode = false sm.headerList.Init() sm.startHeader = nil From 5a441fe47ead92d14caaf9ee504a318813b48224 Mon Sep 17 00:00:00 2001 From: Calvin Kim Date: Fri, 13 Dec 2024 17:08:31 +0900 Subject: [PATCH 02/13] netsync: ignore invs when in headersFirstMode We were still updating invs for peers but since during ibd new blocks and txs are irrelevant, don't bother updating either. --- netsync/manager.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/netsync/manager.go b/netsync/manager.go index 7fd253a7..5682d53c 100644 --- a/netsync/manager.go +++ b/netsync/manager.go @@ -1498,6 +1498,11 @@ func (sm *SyncManager) handleInvMsg(imsg *invMsg) { } } + // Don't request on inventory messages when we're in headers-first mode. + if sm.headersFirstMode { + return + } + // Request the advertised inventory if we don't already have it. Also, // request parent blocks of orphans if we receive one we already have. // Finally, attempt to detect potential stalls due to long side chains @@ -1540,11 +1545,6 @@ func (sm *SyncManager) handleInvMsg(imsg *invMsg) { // for the peer. peer.AddKnownInventory(&addIv) - // Ignore inventory when we're in headers-first mode. - if sm.headersFirstMode { - continue - } - // Request the inventory if we don't already have it. haveInv, err := sm.haveInventory(iv) if err != nil { From 4eae9331c26a2353680caae9a86fef3fa9349536 Mon Sep 17 00:00:00 2001 From: Calvin Kim Date: Fri, 13 Dec 2024 17:12:41 +0900 Subject: [PATCH 03/13] netsync: don't use the global requested blocks map during headers first For headers first mode, there's no need for a global map of requested blocks since it's only used when we're caught up and need a quick way to check which blocks were requested instead of searching through all the requested blocks map for all the connected peers. --- netsync/manager.go | 43 +++++++++++++++++++++++++++++++------------ 1 file changed, 31 insertions(+), 12 deletions(-) diff --git a/netsync/manager.go b/netsync/manager.go index 5682d53c..cc4ced4a 100644 --- a/netsync/manager.go +++ b/netsync/manager.go @@ -362,7 +362,12 @@ func (sm *SyncManager) startSync() { // Clear the requestedBlocks if the sync peer changes, otherwise // we may ignore blocks we need that the last sync peer failed // to send. - sm.requestedBlocks = make(map[chainhash.Hash]struct{}) + // + // We don't reset it during headersFirstMode since it's not used + // during headersFirstMode. + if !sm.headersFirstMode { + sm.requestedBlocks = make(map[chainhash.Hash]struct{}) + } locator, err := sm.chain.LatestBlockLocator() if err != nil { @@ -614,12 +619,15 @@ func (sm *SyncManager) clearRequestedState(state *peerSyncState) { delete(sm.requestedTxns, txHash) } - // Remove requested blocks from the global map so that they will be - // fetched from elsewhere next time we get an inv. - // TODO: we could possibly here check which peers have these blocks - // and request them now to speed things up a little. - for blockHash := range state.requestedBlocks { - delete(sm.requestedBlocks, blockHash) + // The global map of requestedBlocks is not used during headersFirstMode. + if !sm.headersFirstMode { + // Remove requested blocks from the global map so that they will + // be fetched from elsewhere next time we get an inv. + // TODO: we could possibly here check which peers have these + // blocks and request them now to speed things up a little. + for blockHash := range state.requestedBlocks { + delete(sm.requestedBlocks, blockHash) + } } } @@ -797,7 +805,11 @@ func (sm *SyncManager) handleBlockMsg(bmsg *blockMsg) { // so we shouldn't have any more instances of trying to fetch it, or we // will fail the insert and thus we'll retry next time we get an inv. delete(state.requestedBlocks, *blockHash) - delete(sm.requestedBlocks, *blockHash) + if !sm.headersFirstMode { + // The global map of requestedBlocks is not used during + // headersFirstMode. + delete(sm.requestedBlocks, *blockHash) + } // Process the block to include validation, best chain selection, orphan // handling, etc. @@ -1028,8 +1040,6 @@ func (sm *SyncManager) fetchHeaderBlocks() { } if !haveInv { syncPeerState := sm.peerStates[sm.syncPeer] - - sm.requestedBlocks[*node.hash] = struct{}{} syncPeerState.requestedBlocks[*node.hash] = struct{}{} // If we're fetching from a witness enabled peer @@ -1327,7 +1337,9 @@ func (sm *SyncManager) handleUtreexoHeaderMsg(hmsg *utreexoHeaderMsg) { log.Debugf("accepted utreexo header for block %v. have %v headers", msg.BlockHash, len(sm.utreexoHeaders)) - sm.requestedBlocks[msg.BlockHash] = struct{}{} + if !sm.headersFirstMode { + sm.requestedBlocks[msg.BlockHash] = struct{}{} + } peerState.requestedBlocks[msg.BlockHash] = struct{}{} gdmsg := wire.NewMsgGetData() @@ -1357,7 +1369,11 @@ func (sm *SyncManager) handleNotFoundMsg(nfmsg *notFoundMsg) { case wire.InvTypeBlock: if _, exists := state.requestedBlocks[inv.Hash]; exists { delete(state.requestedBlocks, inv.Hash) - delete(sm.requestedBlocks, inv.Hash) + // The global map of requestedBlocks is not used + // during headersFirstMode. + if !sm.headersFirstMode { + delete(sm.requestedBlocks, inv.Hash) + } } case wire.InvTypeWitnessTx: @@ -1655,6 +1671,9 @@ func (sm *SyncManager) handleInvMsg(imsg *invMsg) { case wire.InvTypeBlock: // Request the block if there is not already a pending // request. + // + // No check for if we're in headers first since it's + // already done so earlier in the method. if _, exists := sm.requestedBlocks[iv.Hash]; !exists { amUtreexoNode := sm.chain.IsUtreexoViewActive() if amUtreexoNode { From dfb60d23aa1100b555ccf45db55d0abd9a580e2d Mon Sep 17 00:00:00 2001 From: Calvin Kim Date: Fri, 13 Dec 2024 18:15:37 +0900 Subject: [PATCH 04/13] netsync: don't reset headerList The headerList includes headers we've already downloaded and verified. Instead of resetting it and re-downloading them on syncPeer disconnects, we reset the startHeader to be the first header in the list so that we can re-use the headers and download the blocks associated with it. --- netsync/manager.go | 62 +++++++++++++++++++++++++++++++++++----------- 1 file changed, 47 insertions(+), 15 deletions(-) diff --git a/netsync/manager.go b/netsync/manager.go index cc4ced4a..3ad02f71 100644 --- a/netsync/manager.go +++ b/netsync/manager.go @@ -232,8 +232,9 @@ type SyncManager struct { // resetHeaderState sets the headers-first mode state to values appropriate for // syncing from a new peer. func (sm *SyncManager) resetHeaderState(newestHash *chainhash.Hash, newestHeight int32) { - sm.headerList.Init() - sm.startHeader = nil + if sm.headerList.Len() != 0 { + return + } // When there is a next checkpoint, add an entry for the latest known // block into the header pool. This allows the next downloaded header @@ -369,16 +370,53 @@ func (sm *SyncManager) startSync() { sm.requestedBlocks = make(map[chainhash.Hash]struct{}) } - locator, err := sm.chain.LatestBlockLocator() - if err != nil { - log.Errorf("Failed to get block locator for the "+ - "latest block: %v", err) - return - } - log.Infof("Syncing to block height %d from peer %v", bestPeer.LastBlock(), bestPeer.Addr()) + sm.syncPeer = bestPeer + + // Reset the last progress time now that we have a non-nil + // syncPeer to avoid instantly detecting it as stalled in the + // event the progress time hasn't been updated recently. + sm.lastProgressTime = time.Now() + + // Check if we have some headers already downloaded. + var locator blockchain.BlockLocator + if sm.headerList.Len() > 0 && sm.nextCheckpoint != nil { + e := sm.headerList.Back() + node := e.Value.(*headerNode) + + // If the final hash equals next checkpoint, that + // means we've verified the downloaded headers and + // can start fetching blocks or start fetching the + // utreexo headers. + if node.hash.IsEqual(sm.nextCheckpoint.Hash) { + sm.startHeader = sm.headerList.Front() + if utreexoViewActive { + sm.fetchUtreexoHeaders() + } else { + sm.fetchHeaderBlocks() + } + return + } + + // If the last hash doesn't equal the checkpoint, + // make the locator as the last hash. + locator = blockchain.BlockLocator( + []*chainhash.Hash{node.hash}) + } + + // If we don't already have headers downloaded we need to fetch + // the block locator from chain. + if len(locator) == 0 { + locator, err = sm.chain.LatestBlockLocator() + if err != nil { + log.Errorf("Failed to get block locator for the "+ + "latest block: %v", err) + return + } + } + // When the current height is less than a known checkpoint we // can use block headers to learn about which blocks comprise // the chain up to the checkpoint and perform less validation @@ -415,12 +453,6 @@ func (sm *SyncManager) startSync() { } else { bestPeer.PushGetBlocksMsg(locator, &zeroHash) } - sm.syncPeer = bestPeer - - // Reset the last progress time now that we have a non-nil - // syncPeer to avoid instantly detecting it as stalled in the - // event the progress time hasn't been updated recently. - sm.lastProgressTime = time.Now() } else { log.Warnf("No sync peer candidates available") } From d13dfd578e87b5e2942f7094912ad9d7d91e7edb Mon Sep 17 00:00:00 2001 From: Calvin Kim Date: Wed, 8 Jan 2025 14:38:55 +0900 Subject: [PATCH 05/13] netsync: add code to handle block headers when we've synced past the checkpoints Even when we've synced past the checkpoint, it's possible to sync headers first. To allow for a headers-first download even past the checkpoints, we add code to verify the block headers before accepting them when we no longer have any more checkpoints. --- netsync/manager.go | 83 +++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 78 insertions(+), 5 deletions(-) diff --git a/netsync/manager.go b/netsync/manager.go index 3ad02f71..5a7f2bfc 100644 --- a/netsync/manager.go +++ b/netsync/manager.go @@ -430,10 +430,8 @@ func (sm *SyncManager) startSync() { // full block hasn't been tampered with. // // Once we have passed the final checkpoint, or checkpoints are - // disabled, use standard inv messages learn about the blocks - // and fully validate them. Finally, regression test mode does - // not support the headers-first approach so do normal block - // downloads when in regression test mode. + // disabled, still download the headers first but validate them + // first before accepting them. if sm.headersBuildMode && best.Height < sm.nextCheckpoint.Height && sm.chainParams != &chaincfg.RegressionNetParams { @@ -1212,6 +1210,82 @@ func (sm *SyncManager) handleHeadersMsg(hmsg *headersMsg) { return } + utreexoViewActive := sm.chain.IsUtreexoViewActive() + + // This means that we've ran out of checkpoints and need to verify the headers that + // we've received. + if sm.nextCheckpoint == nil { + var finalHeight int32 + var finalHeader *wire.BlockHeader + for _, blockHeader := range msg.Headers { + blockHash := blockHeader.BlockHash() + err := sm.chain.ProcessBlockHeader(blockHeader) + if err != nil { + log.Warnf("Received block header that does not "+ + "properly connect to the chain from peer %s "+ + "-- disconnecting", peer.Addr()) + peer.Disconnect() + return + } + + prevNodeEl := sm.headerList.Back() + if prevNodeEl == nil { + log.Warnf("Header list does not contain a previous" + + "element as expected -- disconnecting peer") + peer.Disconnect() + return + } + + prevNode := prevNodeEl.Value.(*headerNode) + node := headerNode{hash: &blockHash} + if prevNode.hash.IsEqual(&blockHeader.PrevBlock) { + node.height = prevNode.height + 1 + finalHeader = blockHeader + finalHeight = node.height + e := sm.headerList.PushBack(&node) + if sm.startHeader == nil { + sm.startHeader = e + } + } else { + log.Warnf("Received block header that does not "+ + "properly connect to the chain from peer %s "+ + "-- disconnecting", peer.Addr()) + peer.Disconnect() + return + } + } + + if finalHeight != sm.syncPeer.LastBlock() { + finalHash := finalHeader.BlockHash() + locator := blockchain.BlockLocator([]*chainhash.Hash{&finalHash}) + err := peer.PushGetHeadersMsg(locator, &zeroHash) + if err != nil { + log.Warnf("Failed to send getheaders message to "+ + "peer %s: %v", peer.Addr(), err) + return + } + } else { + // Since the first entry of the list is always the final block + // that is already in the database and is only used to ensure + // the next header links properly, it must be removed before + // fetching the headers or the utreexo headers. + sm.headerList.Remove(sm.headerList.Front()) + sm.progressLogger.SetLastLogTime(time.Now()) + + if utreexoViewActive { + log.Infof("Received %v block headers: Fetching utreexo headers", + sm.headerList.Len()) + sm.fetchUtreexoHeaders() + } else { + log.Infof("Received %v block headers: Fetching blocks", + sm.headerList.Len()) + sm.fetchHeaderBlocks() + } + } + + return + } + // Process all of the received headers ensuring each one connects to the // previous and that checkpoints match. receivedCheckpoint := false @@ -1268,7 +1342,6 @@ func (sm *SyncManager) handleHeadersMsg(hmsg *headersMsg) { } } - utreexoViewActive := sm.chain.IsUtreexoViewActive() // When this header is a checkpoint, switch to fetching the blocks for // all of the headers since the last checkpoint. if receivedCheckpoint { From 4d63740f94db3a9c00cc813f830f93304d534c49 Mon Sep 17 00:00:00 2001 From: Calvin Kim Date: Wed, 8 Jan 2025 14:55:27 +0900 Subject: [PATCH 06/13] netsync: handle headers-first mode when there's no more checkpoints in handleBlockMsg handleBlockMsg only handled cases of headers-first mode when there are checkpoints. The added code handles cases in where there are no more checkpoints but when the sync manager is still in headers first mode. --- netsync/manager.go | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/netsync/manager.go b/netsync/manager.go index 5a7f2bfc..63330a84 100644 --- a/netsync/manager.go +++ b/netsync/manager.go @@ -816,7 +816,7 @@ func (sm *SyncManager) handleBlockMsg(bmsg *blockMsg) { // properly. isCheckpointBlock := false behaviorFlags := blockchain.BFNone - if sm.headersFirstMode { + if sm.headersFirstMode && sm.nextCheckpoint != nil { firstNodeEl := sm.headerList.Front() if firstNodeEl != nil { firstNode := firstNodeEl.Value.(*headerNode) @@ -963,6 +963,25 @@ func (sm *SyncManager) handleBlockMsg(bmsg *blockMsg) { return } + if sm.nextCheckpoint == nil { + lastHeight := sm.syncPeer.LastBlock() + if bmsg.block.Height() < lastHeight { + if sm.startHeader != nil && + len(state.requestedBlocks) < minInFlightBlocks { + sm.fetchHeaderBlocks() + } + return + } + if bmsg.block.Height() >= lastHeight { + log.Infof("Finished the initial block download and caught up to block %v(%v) "+ + "-- now listening to blocks.", bmsg.block.Hash(), bmsg.block.Height()) + sm.headersFirstMode = false + sm.headerList.Init() + } + + return + } + // This is headers-first mode, so if the block is not a checkpoint // request more blocks using the header list when the request queue is // getting short. From 384df1126410a6f3bcc5575f47004790e42d1d11 Mon Sep 17 00:00:00 2001 From: Calvin Kim Date: Wed, 8 Jan 2025 15:04:04 +0900 Subject: [PATCH 07/13] netsync: always push back the best state to headerList on resetHeaderState Since we can be in headers-first mode, we push the best headerNode regardless of whether or not there's still checkpoints left. --- netsync/manager.go | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/netsync/manager.go b/netsync/manager.go index 63330a84..1d0749ec 100644 --- a/netsync/manager.go +++ b/netsync/manager.go @@ -236,13 +236,8 @@ func (sm *SyncManager) resetHeaderState(newestHash *chainhash.Hash, newestHeight return } - // When there is a next checkpoint, add an entry for the latest known - // block into the header pool. This allows the next downloaded header - // to prove it links to the chain properly. - if sm.nextCheckpoint != nil { - node := headerNode{height: newestHeight, hash: newestHash} - sm.headerList.PushBack(&node) - } + node := headerNode{height: newestHeight, hash: newestHash} + sm.headerList.PushBack(&node) } // findNextHeaderCheckpoint returns the next checkpoint after the passed height. From dd0b04f0c55db6ac0de81a0d4f0704c17b70ee81 Mon Sep 17 00:00:00 2001 From: Calvin Kim Date: Wed, 8 Jan 2025 15:06:20 +0900 Subject: [PATCH 08/13] netsync: disable headers-first mode when we're all caught up. When the chainstate is current and there's no more blocks to sync up to from our peers, exit headers-first mode. --- netsync/manager.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/netsync/manager.go b/netsync/manager.go index 1d0749ec..831e023a 100644 --- a/netsync/manager.go +++ b/netsync/manager.go @@ -336,6 +336,8 @@ func (sm *SyncManager) startSync() { if sm.chain.IsCurrent() && len(higherPeers) == 0 { log.Infof("Caught up to block %s(%d)", best.Hash.String(), best.Height) + sm.headersFirstMode = false + sm.headerList.Init() return } From 74776e87af2f82687776d481782fcff32a4d2daf Mon Sep 17 00:00:00 2001 From: Calvin Kim Date: Wed, 8 Jan 2025 15:11:50 +0900 Subject: [PATCH 09/13] netsync: always ask for headers when catching up to the latest block a peer has headers-first download is now always preferred over asking for blocks directly. This eliminates orphan blocks during the initial block download. --- netsync/manager.go | 62 +++++++++++++++++++++++++++++++++++----------- 1 file changed, 47 insertions(+), 15 deletions(-) diff --git a/netsync/manager.go b/netsync/manager.go index 831e023a..c0d9e3b2 100644 --- a/netsync/manager.go +++ b/netsync/manager.go @@ -379,7 +379,7 @@ func (sm *SyncManager) startSync() { // Check if we have some headers already downloaded. var locator blockchain.BlockLocator - if sm.headerList.Len() > 0 && sm.nextCheckpoint != nil { + if sm.headerList.Len() > 0 { e := sm.headerList.Back() node := e.Value.(*headerNode) @@ -387,13 +387,36 @@ func (sm *SyncManager) startSync() { // means we've verified the downloaded headers and // can start fetching blocks or start fetching the // utreexo headers. - if node.hash.IsEqual(sm.nextCheckpoint.Hash) { + if sm.nextCheckpoint != nil && node.hash.IsEqual(sm.nextCheckpoint.Hash) { sm.startHeader = sm.headerList.Front() if utreexoViewActive { - sm.fetchUtreexoHeaders() - } else { - sm.fetchHeaderBlocks() + // If we have the last utreexo header, then we + // should have all the previous headers as well. + _, have := sm.utreexoHeaders[*node.hash] + if !have { + sm.fetchUtreexoHeaders() + return + } } + sm.fetchHeaderBlocks() + return + } + + // If the final height is the same as the sync peer, then + // start downloading blocks as we have all the headers + // already downloaded. + if node.height == sm.syncPeer.LastBlock() { + sm.startHeader = sm.headerList.Front() + if utreexoViewActive { + // If we have the last utreexo header, then we + // should have all the previous headers as well. + _, have := sm.utreexoHeaders[*node.hash] + if !have { + sm.fetchUtreexoHeaders() + return + } + } + sm.fetchHeaderBlocks() return } @@ -414,6 +437,13 @@ func (sm *SyncManager) startSync() { } } + // Always reset header state as we may have switched to a different + // sync peer. If there's an existing header state, the state will not + // be modified. If there isn't an existing state, then the added best + // block will be required to properly connect the downloaded header + // to the headerList. + sm.resetHeaderState(&best.Hash, best.Height) + // When the current height is less than a known checkpoint we // can use block headers to learn about which blocks comprise // the chain up to the checkpoint and perform less validation @@ -446,7 +476,12 @@ func (sm *SyncManager) startSync() { "%d from peer %s", best.Height+1, sm.nextCheckpoint.Height, bestPeer.Addr()) } else { - bestPeer.PushGetBlocksMsg(locator, &zeroHash) + sm.headersFirstMode = true + bestPeer.PushGetHeadersMsg(locator, &zeroHash) + log.Infof("Downloading headers for blocks %d to "+ + "%d from peer %s", best.Height+1, + bestPeer.LastBlock(), bestPeer.Addr()) + } } else { log.Warnf("No sync peer candidates available") @@ -1011,17 +1046,14 @@ func (sm *SyncManager) handleBlockMsg(bmsg *blockMsg) { return } - // This is headers-first mode, the block is a checkpoint, and there are - // no more checkpoints, so switch to normal mode by requesting blocks - // from the block after this one up to the end of the chain (zero hash). - sm.headersFirstMode = false - sm.headerList.Init() log.Infof("Reached the final checkpoint -- switching to normal mode") - locator := blockchain.BlockLocator([]*chainhash.Hash{blockHash}) - err = peer.PushGetBlocksMsg(locator, &zeroHash) + log.Infof("Downloading headers from %d to %d from peer %s", bmsg.block.Height()+1, + peer.LastBlock(), peer.Addr()) + locator := blockchain.BlockLocator([]*chainhash.Hash{prevHash}) + err = peer.PushGetHeadersMsg(locator, &zeroHash) if err != nil { - log.Warnf("Failed to send getblocks message to peer %s: %v", - peer.Addr(), err) + log.Warnf("Failed to send getheaders message to "+ + "peer %s: %v", peer.Addr(), err) return } } From 9fb2f0257085e206e48fef61c0190f7412eaa913 Mon Sep 17 00:00:00 2001 From: Calvin Kim Date: Wed, 8 Jan 2025 23:35:03 +0900 Subject: [PATCH 10/13] netsync: always call resetHeaderState on Syncmanager.New() Since we're purely headers-first during ibd, we need to call resetHeaderState regardless of whether or not we have more checkpmore checkpoints. --- netsync/manager.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/netsync/manager.go b/netsync/manager.go index c0d9e3b2..71135f72 100644 --- a/netsync/manager.go +++ b/netsync/manager.go @@ -2337,12 +2337,10 @@ func New(config *Config) (*SyncManager, error) { if !config.DisableCheckpoints { // Initialize the next checkpoint based on the current height. sm.nextCheckpoint = sm.findNextHeaderCheckpoint(best.Height) - if sm.nextCheckpoint != nil { - sm.resetHeaderState(&best.Hash, best.Height) - } } else { log.Info("Checkpoints are disabled") } + sm.resetHeaderState(&best.Hash, best.Height) // If we're at assume utreexo mode, build headers first. if sm.chain.IsUtreexoViewActive() && sm.chain.IsAssumeUtreexo() { From c69ff6865f958f05395da78323e2041cb1ed149f Mon Sep 17 00:00:00 2001 From: Calvin Kim Date: Thu, 9 Jan 2025 10:43:59 +0900 Subject: [PATCH 11/13] netsync: handle headers-first mode when there's no more checkpoints in handleUtreexoHeaderMsg --- netsync/manager.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/netsync/manager.go b/netsync/manager.go index 71135f72..ccd2c2da 100644 --- a/netsync/manager.go +++ b/netsync/manager.go @@ -1467,7 +1467,14 @@ func (sm *SyncManager) handleUtreexoHeaderMsg(hmsg *utreexoHeaderMsg) { log.Debugf("accepted utreexo header for block %v. have %v headers", msg.BlockHash, len(sm.utreexoHeaders)) - if msg.BlockHash == *sm.nextCheckpoint.Hash { + if sm.nextCheckpoint != nil && + msg.BlockHash == *sm.nextCheckpoint.Hash { + log.Infof("Received utreexo headers to block "+ + "%d/hash %s. Fetching blocks", + node.height, node.hash) + sm.fetchHeaderBlocks() + return + } else if node.height == peer.LastBlock() { log.Infof("Received utreexo headers to block "+ "%d/hash %s. Fetching blocks", node.height, node.hash) From 172cf7f6de67d2aab2f1efc5aff14794ddcce526 Mon Sep 17 00:00:00 2001 From: Calvin Kim Date: Thu, 9 Jan 2025 18:25:33 +0900 Subject: [PATCH 12/13] netsync: add requestedUtreexoHeaders field to peerState We create a requestedUtreexoHeaders local to each of the peers to be able to keep track of the utreexo headers requested from each of the peers. Just like with blocks, the global state is not used during headers-first download. --- netsync/manager.go | 48 +++++++++++++++++++++++++++++++--------------- 1 file changed, 33 insertions(+), 15 deletions(-) diff --git a/netsync/manager.go b/netsync/manager.go index ccd2c2da..b5f8f675 100644 --- a/netsync/manager.go +++ b/netsync/manager.go @@ -164,10 +164,11 @@ type headerNode struct { // peerSyncState stores additional information that the SyncManager tracks // about a peer. type peerSyncState struct { - syncCandidate bool - requestQueue []*wire.InvVect - requestedTxns map[chainhash.Hash]struct{} - requestedBlocks map[chainhash.Hash]struct{} + syncCandidate bool + requestQueue []*wire.InvVect + requestedTxns map[chainhash.Hash]struct{} + requestedBlocks map[chainhash.Hash]struct{} + requestedUtreexoHeaders map[chainhash.Hash]struct{} } // limitAdd is a helper function for maps that require a maximum limit by @@ -365,6 +366,7 @@ func (sm *SyncManager) startSync() { // during headersFirstMode. if !sm.headersFirstMode { sm.requestedBlocks = make(map[chainhash.Hash]struct{}) + sm.requestedUtreexoHeaders = make(map[chainhash.Hash]struct{}) } log.Infof("Syncing to block height %d from peer %v", @@ -582,9 +584,10 @@ func (sm *SyncManager) handleNewPeerMsg(peer *peerpkg.Peer) { // Initialize the peer state. isSyncCandidate := sm.isSyncCandidate(peer) sm.peerStates[peer] = &peerSyncState{ - syncCandidate: isSyncCandidate, - requestedTxns: make(map[chainhash.Hash]struct{}), - requestedBlocks: make(map[chainhash.Hash]struct{}), + syncCandidate: isSyncCandidate, + requestedTxns: make(map[chainhash.Hash]struct{}), + requestedBlocks: make(map[chainhash.Hash]struct{}), + requestedUtreexoHeaders: make(map[chainhash.Hash]struct{}), } // Start syncing by choosing the best candidate if needed. @@ -690,6 +693,12 @@ func (sm *SyncManager) clearRequestedState(state *peerSyncState) { for blockHash := range state.requestedBlocks { delete(sm.requestedBlocks, blockHash) } + + // Also remove requested utreexo headers from the global map so + // that they will be fetched from elsewhere next time we get an inv. + for blockHash := range state.requestedUtreexoHeaders { + delete(sm.requestedUtreexoHeaders, blockHash) + } } } @@ -1067,6 +1076,12 @@ func (sm *SyncManager) fetchUtreexoHeaders() { return } + state, exists := sm.peerStates[sm.syncPeer] + if !exists { + log.Warnf("Don't have peer state for sync peer %s", sm.syncPeer.String()) + return + } + for e := sm.startHeader; e != nil; e = e.Next() { node, ok := e.Value.(*headerNode) if !ok { @@ -1074,15 +1089,15 @@ func (sm *SyncManager) fetchUtreexoHeaders() { continue } - _, requested := sm.requestedUtreexoHeaders[*node.hash] + _, requested := state.requestedUtreexoHeaders[*node.hash] _, have := sm.utreexoHeaders[*node.hash] if !requested && !have { - sm.requestedUtreexoHeaders[*node.hash] = struct{}{} + state.requestedUtreexoHeaders[*node.hash] = struct{}{} ghmsg := wire.NewMsgGetUtreexoHeader(*node.hash) sm.syncPeer.QueueMessage(ghmsg, nil) } - if len(sm.requestedUtreexoHeaders) > 16 { + if len(state.requestedUtreexoHeaders) > minInFlightBlocks { break } } @@ -1438,12 +1453,12 @@ func (sm *SyncManager) handleUtreexoHeaderMsg(hmsg *utreexoHeaderMsg) { log.Warnf("Received utreexo header message from unknown peer %s", peer) return } + msg := hmsg.header - // If we're in headers first, check if we have the final utreexo header. If not + // If we're in headers first, check if we have the final utreexo header. If not, // ask for more utreexo headers. - msg := hmsg.header if sm.headersFirstMode { - _, found := sm.requestedUtreexoHeaders[msg.BlockHash] + _, found := peerState.requestedUtreexoHeaders[msg.BlockHash] if !found { log.Warnf("Got unrequested utreexo header from %s -- "+ "disconnecting", peer.Addr()) @@ -1451,6 +1466,9 @@ func (sm *SyncManager) handleUtreexoHeaderMsg(hmsg *utreexoHeaderMsg) { return } + // Since we received it, it's no longer requested. + delete(peerState.requestedUtreexoHeaders, msg.BlockHash) + for e := sm.startHeader; e != nil; e = e.Next() { node, ok := e.Value.(*headerNode) if !ok { @@ -1459,7 +1477,6 @@ func (sm *SyncManager) handleUtreexoHeaderMsg(hmsg *utreexoHeaderMsg) { } if node.hash.IsEqual(&msg.BlockHash) { - delete(sm.requestedUtreexoHeaders, msg.BlockHash) sm.progressLogger.SetLastLogTime(time.Now()) sm.lastProgressTime = time.Now() @@ -1484,7 +1501,7 @@ func (sm *SyncManager) handleUtreexoHeaderMsg(hmsg *utreexoHeaderMsg) { } } - if len(sm.requestedUtreexoHeaders) < minInFlightBlocks { + if len(peerState.requestedUtreexoHeaders) < minInFlightBlocks { sm.fetchUtreexoHeaders() } @@ -1494,6 +1511,7 @@ func (sm *SyncManager) handleUtreexoHeaderMsg(hmsg *utreexoHeaderMsg) { // We're not in headers-first mode. When we receive a utreexo header, // immediately ask for the block. sm.utreexoHeaders[msg.BlockHash] = hmsg.header + delete(sm.requestedUtreexoHeaders, msg.BlockHash) log.Debugf("accepted utreexo header for block %v. have %v headers", msg.BlockHash, len(sm.utreexoHeaders)) From 42ea8ffb8e9c679d4c5a7b5166df4c01a43a3d74 Mon Sep 17 00:00:00 2001 From: Calvin Kim Date: Thu, 9 Jan 2025 18:28:03 +0900 Subject: [PATCH 13/13] netsync: refcator out logic for checking the headersList during headers-first download The logic for checking if the block exists within the list of headers saved was growing and hurting readability. We refactor out the logic to a separate function. --- netsync/manager.go | 67 ++++++++++++++++++++++++++++++---------------- 1 file changed, 44 insertions(+), 23 deletions(-) diff --git a/netsync/manager.go b/netsync/manager.go index b5f8f675..e87962a8 100644 --- a/netsync/manager.go +++ b/netsync/manager.go @@ -812,6 +812,48 @@ func (sm *SyncManager) current() bool { return true } +// checkHeadersList checks if the sync manager is in headers-first mode and returns +// if the given block hash is a checkpointed block and the behavior flags for this +// block. If the block is still under the checkpoint, then it's given the fast-add +// flag. +func (sm *SyncManager) checkHeadersList(blockHash *chainhash.Hash) ( + bool, blockchain.BehaviorFlags) { + + // Nothing to check if we're not in headers-first mode. + if !sm.headersFirstMode { + return false, blockchain.BFNone + } + + isCheckpointBlock := false + behaviorFlags := blockchain.BFNone + + firstNodeEl := sm.headerList.Front() + if firstNodeEl == nil { + log.Warnf("headers-first mode is on but the headersList is empty") + return isCheckpointBlock, behaviorFlags + } + + firstNode := firstNodeEl.Value.(*headerNode) + if !blockHash.IsEqual(firstNode.hash) { + return isCheckpointBlock, behaviorFlags + } + + if sm.nextCheckpoint != nil { + behaviorFlags |= blockchain.BFFastAdd + if firstNode.hash.IsEqual(sm.nextCheckpoint.Hash) { + isCheckpointBlock = true + } else { + sm.headerList.Remove(firstNodeEl) + } + } else { + if firstNode.height != sm.syncPeer.LastBlock() { + sm.headerList.Remove(firstNodeEl) + } + } + + return isCheckpointBlock, behaviorFlags +} + // handleBlockMsg handles block messages from all peers. func (sm *SyncManager) handleBlockMsg(bmsg *blockMsg) { peer := bmsg.peer @@ -848,29 +890,8 @@ func (sm *SyncManager) handleBlockMsg(bmsg *blockMsg) { bmsg.block.MsgBlock().UData.AccProof.Targets = utreexoHeader.Targets } - // When in headers-first mode, if the block matches the hash of the - // first header in the list of headers that are being fetched, it's - // eligible for less validation since the headers have already been - // verified to link together and are valid up to the next checkpoint. - // Also, remove the list entry for all blocks except the checkpoint - // since it is needed to verify the next round of headers links - // properly. - isCheckpointBlock := false - behaviorFlags := blockchain.BFNone - if sm.headersFirstMode && sm.nextCheckpoint != nil { - firstNodeEl := sm.headerList.Front() - if firstNodeEl != nil { - firstNode := firstNodeEl.Value.(*headerNode) - if blockHash.IsEqual(firstNode.hash) { - behaviorFlags |= blockchain.BFFastAdd - if firstNode.hash.IsEqual(sm.nextCheckpoint.Hash) { - isCheckpointBlock = true - } else { - sm.headerList.Remove(firstNodeEl) - } - } - } - } + // Process the block based off the headers if we're still in headers-first mode. + isCheckpointBlock, behaviorFlags := sm.checkHeadersList(blockHash) // Remove block from request maps. Either chain will know about it and // so we shouldn't have any more instances of trying to fetch it, or we