From 2f4f2dd1ee50a60117c09bcecb44bc3a5bb876c7 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 4 Feb 2025 18:49:33 -0800 Subject: [PATCH 01/13] Issue tenure extend if incoming miner fails to mine Signed-off-by: Jacinta Ferrant --- CHANGELOG.md | 2 + stacks-signer/CHANGELOG.md | 1 + stacks-signer/src/config.rs | 2 +- stackslib/src/config/mod.rs | 15 +- .../stacks-node/src/nakamoto_node/relayer.rs | 152 +-- .../src/tests/nakamoto_integrations.rs | 3 +- testnet/stacks-node/src/tests/signer/v0.rs | 880 ++++++++++++++++++ 7 files changed, 996 insertions(+), 59 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1f7fce479b..7afe8459e7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,12 +18,14 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ### Added - Add miner configuration option `tenure_extend_cost_threshold` to specify the percentage of the tenure budget that must be spent before a time-based tenure extend is attempted +- Add miner configuration option `tenure_extend_wait_timeout_ms` to specify the time to wait to try to continue a tenure if a BlockFound is expected ### Changed - Miner will include other transactions in blocks with tenure extend transactions (#5760) - Add `block_rejection_timeout_steps` to miner configuration for defining rejections-based timeouts while waiting for signers response (#5705) - Miner will not issue a tenure extend until at least half of the block budget has been spent (#5757) +- Miner will issue a tenure extend if the incoming miner has failed to produce a block (#5729) ### Fixed diff --git a/stacks-signer/CHANGELOG.md b/stacks-signer/CHANGELOG.md index 2e801d680d..52497410ae 100644 --- a/stacks-signer/CHANGELOG.md +++ b/stacks-signer/CHANGELOG.md @@ -20,6 +20,7 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE `StackerDB` messages, it logs `INFO` messages. Other interactions with the `stacks-node` behave normally (e.g., submitting validation requests, submitting finished blocks). A dry run signer will error out if the supplied key is actually a registered signer. +- Reduce default value of `block_proposal_timeout_ms` to 120_000 ## [3.1.0.0.4.0] diff --git a/stacks-signer/src/config.rs b/stacks-signer/src/config.rs index 29ee35c961..556920a8ba 100644 --- a/stacks-signer/src/config.rs +++ b/stacks-signer/src/config.rs @@ -35,7 +35,7 @@ use stacks_common::util::hash::Hash160; use crate::client::SignerSlotID; const EVENT_TIMEOUT_MS: u64 = 5000; -const BLOCK_PROPOSAL_TIMEOUT_MS: u64 = 600_000; +const BLOCK_PROPOSAL_TIMEOUT_MS: u64 = 120_000; const BLOCK_PROPOSAL_VALIDATION_TIMEOUT_MS: u64 = 120_000; const DEFAULT_FIRST_PROPOSAL_BURN_BLOCK_TIMING_SECS: u64 = 60; const DEFAULT_TENURE_LAST_BLOCK_PROPOSAL_TIMEOUT_SECS: u64 = 30; diff --git a/stackslib/src/config/mod.rs b/stackslib/src/config/mod.rs index a9a03d4861..b330aab1af 100644 --- a/stackslib/src/config/mod.rs +++ b/stackslib/src/config/mod.rs @@ -114,6 +114,8 @@ const DEFAULT_TENURE_COST_LIMIT_PER_BLOCK_PERCENTAGE: u8 = 25; /// see if we need to extend the ongoing tenure (e.g. because the current /// sortition is empty or invalid). const DEFAULT_TENURE_EXTEND_POLL_SECS: u64 = 1; +/// Default number of millis to wait to try to continue a tenure if a BlockFound is expected +const DEFAULT_TENURE_EXTEND_WAIT_MS: u64 = 120_000; /// Default duration to wait before attempting to issue a tenure extend. /// This should be greater than the signers' timeout. This is used for issuing /// fallback tenure extends @@ -2177,9 +2179,11 @@ pub struct MinerConfig { pub block_commit_delay: Duration, /// The percentage of the remaining tenure cost limit to consume each block. pub tenure_cost_limit_per_block_percentage: Option, - /// The number of seconds to wait in-between polling the sortition DB to see if we need to + /// Duration to wait in-between polling the sortition DB to see if we need to /// extend the ongoing tenure (e.g. because the current sortition is empty or invalid). - pub tenure_extend_poll_secs: Duration, + pub tenure_extend_poll_timeout: Duration, + /// Duration to wait to try to continue a tenure if a BlockFound is expected + pub tenure_extend_wait_timeout: Duration, /// Duration to wait before attempting to issue a tenure extend pub tenure_timeout: Duration, /// Percentage of block budget that must be used before attempting a time-based tenure extend @@ -2222,7 +2226,8 @@ impl Default for MinerConfig { tenure_cost_limit_per_block_percentage: Some( DEFAULT_TENURE_COST_LIMIT_PER_BLOCK_PERCENTAGE, ), - tenure_extend_poll_secs: Duration::from_secs(DEFAULT_TENURE_EXTEND_POLL_SECS), + tenure_extend_poll_timeout: Duration::from_secs(DEFAULT_TENURE_EXTEND_POLL_SECS), + tenure_extend_wait_timeout: Duration::from_millis(DEFAULT_TENURE_EXTEND_WAIT_MS), tenure_timeout: Duration::from_secs(DEFAULT_TENURE_TIMEOUT_SECS), tenure_extend_cost_threshold: DEFAULT_TENURE_EXTEND_COST_THRESHOLD, @@ -2629,6 +2634,7 @@ pub struct MinerConfigFile { pub block_commit_delay_ms: Option, pub tenure_cost_limit_per_block_percentage: Option, pub tenure_extend_poll_secs: Option, + pub tenure_extend_wait_timeout_ms: Option, pub tenure_timeout_secs: Option, pub tenure_extend_cost_threshold: Option, pub block_rejection_timeout_steps: Option>, @@ -2772,7 +2778,8 @@ impl MinerConfigFile { subsequent_rejection_pause_ms: self.subsequent_rejection_pause_ms.unwrap_or(miner_default_config.subsequent_rejection_pause_ms), block_commit_delay: self.block_commit_delay_ms.map(Duration::from_millis).unwrap_or(miner_default_config.block_commit_delay), tenure_cost_limit_per_block_percentage, - tenure_extend_poll_secs: self.tenure_extend_poll_secs.map(Duration::from_secs).unwrap_or(miner_default_config.tenure_extend_poll_secs), + tenure_extend_poll_timeout: self.tenure_extend_poll_secs.map(Duration::from_secs).unwrap_or(miner_default_config.tenure_extend_poll_timeout), + tenure_extend_wait_timeout: self.tenure_extend_wait_timeout_ms.map(Duration::from_millis).unwrap_or(miner_default_config.tenure_extend_wait_timeout), tenure_timeout: self.tenure_timeout_secs.map(Duration::from_secs).unwrap_or(miner_default_config.tenure_timeout), tenure_extend_cost_threshold: self.tenure_extend_cost_threshold.unwrap_or(miner_default_config.tenure_extend_cost_threshold), diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index 2cbc37acff..86ae5a4de4 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -250,6 +250,48 @@ impl MinerStopHandle { } } +/// Information necessary to determine when to extend a tenure +pub struct TenureExtendTime { + /// The time at which we determined that we should tenure-extend + time: Instant, + /// The amount of time we should wait before tenure-extending + timeout: Duration, +} + +impl TenureExtendTime { + /// Create a new `TenureExtendTime` with a delayed `timeout` + pub fn delayed(timeout: Duration) -> Self { + Self { + time: Instant::now(), + timeout, + } + } + + /// Create a new `TenureExtendTime` with no `timeout` + pub fn immediate() -> Self { + Self { + time: Instant::now(), + timeout: Duration::from_secs(0), + } + } + + /// Should we attempt to tenure-extend? + pub fn should_extend(&self) -> bool { + // We set the time, but have we waited long enough? + self.time.elapsed() > self.timeout + } + + // Amount of time elapsed since we decided to tenure-extend + pub fn elapsed(&self) -> Duration { + self.time.elapsed() + } + + // The timeout specified when we decided to tenure-extend + pub fn timeout(&self) -> Duration { + self.timeout + } +} + /// Relayer thread /// * accepts network results and stores blocks and microblocks /// * forwards new blocks, microblocks, and transactions to the p2p thread @@ -319,8 +361,8 @@ pub struct RelayerThread { last_committed: Option, /// Timeout for waiting for the first block in a tenure before submitting a block commit new_tenure_timeout: Option, - /// Timeout for waiting for a BlockFound in a subsequent tenure before trying to extend our own - tenure_extend_timeout: Option, + /// Time to wait before attempting a tenure extend + tenure_extend_time: Option, } impl RelayerThread { @@ -380,7 +422,7 @@ impl RelayerThread { next_initiative: Instant::now() + Duration::from_millis(next_initiative_delay), last_committed: None, new_tenure_timeout: None, - tenure_extend_timeout: None, + tenure_extend_time: None, } } @@ -505,7 +547,7 @@ impl RelayerThread { SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortdb.conn()) .expect("FATAL: failed to query sortition DB for stacks tip"); - self.tenure_extend_timeout = None; + self.tenure_extend_time = None; if sn.sortition { // a sortition happened @@ -535,11 +577,18 @@ impl RelayerThread { sn.consensus_hash, mining_pkh_opt, ) { - Ok(Some(_)) => { + Ok(Some((_, wait_for_miner))) => { // we can continue our ongoing tenure, but we should give the new winning miner // a chance to send their BlockFound first. - debug!("Relayer: Did not win sortition, but am mining the ongoing tenure. Allowing the new miner some time to come online before trying to continue."); - self.tenure_extend_timeout = Some(Instant::now()); + if wait_for_miner { + debug!("Relayer: Did not win sortition, but am mining the ongoing tenure. Allowing the new miner some time to come online before trying to continue."); + self.tenure_extend_time = Some(TenureExtendTime::delayed( + self.config.miner.tenure_extend_wait_timeout, + )); + } else { + debug!("Relayer: Did not win sortition, but am mining the ongoing tenure. Will try to continue tenure immediately."); + self.tenure_extend_time = Some(TenureExtendTime::immediate()); + } return Some(MinerDirective::StopTenure); } Ok(None) => { @@ -611,7 +660,7 @@ impl RelayerThread { "Relayer: ongoing tenure {} already represents last-winning snapshot", &stacks_tip_sn.consensus_hash ); - self.tenure_extend_timeout = Some(Instant::now()); + self.tenure_extend_time = Some(TenureExtendTime::immediate()); false } else { // stacks tip's snapshot may be an ancestor of the last-won sortition. @@ -654,7 +703,7 @@ impl RelayerThread { &last_winning_snapshot.consensus_hash ); // prepare to extend after our BlockFound gets mined. - self.tenure_extend_timeout = Some(Instant::now()); + self.tenure_extend_time = Some(TenureExtendTime::immediate()); return Some(MinerDirective::BeginTenure { parent_tenure_start: StacksBlockId( last_winning_snapshot.winning_stacks_block_hash.clone().0, @@ -675,7 +724,9 @@ impl RelayerThread { // by someone else -- there's a chance that this other miner will produce a // BlockFound in the interim. debug!("Relayer: Did not win last winning snapshot despite mining the ongoing tenure, so allowing the new miner some time to come online."); - self.tenure_extend_timeout = Some(Instant::now()); + self.tenure_extend_time = Some(TenureExtendTime::delayed( + self.config.miner.tenure_extend_wait_timeout, + )); return None; } return Some(MinerDirective::ContinueTenure { @@ -1348,10 +1399,10 @@ impl RelayerThread { /// Assumes that the caller has already checked that the given miner has _not_ won the new /// sortition. /// - /// Returns Ok(Some(stacks-tip-election-snapshot)) if the last-winning miner needs to extend. - /// For now, this only happens if the miner's election snapshot was the last-known valid and - /// non-empty snapshot. In the future, this function may return Ok(Some(..)) if the node - /// determines that a subsequent miner won sortition, but never came online. + /// Returns Ok(Some(stacks-tip-election-snapshot, wait-for-miner) if the last-winning miner should attempt to extend + /// This can happen for two seperate reasons: + /// - the miner's election snapshot was the last-known valid and non-empty snapshot and therefore should extend immediately + /// - the node determines that a subsequent miner won sortition, but has not yet produced a valid block and should wait-for-miner before extending /// /// Returns OK(None) if the last-winning miner should not extend its tenure. /// @@ -1361,7 +1412,7 @@ impl RelayerThread { chain_state: &mut StacksChainState, new_burn_view: ConsensusHash, mining_key_opt: Option, - ) -> Result, NakamotoNodeError> { + ) -> Result, NakamotoNodeError> { let Some(mining_pkh) = mining_key_opt else { return Ok(None); }; @@ -1407,23 +1458,25 @@ impl RelayerThread { return Ok(None); } - // For now, only allow the miner to extend its tenure if won the highest valid sortition. - // There cannot be any higher sortitions that are valid (as defined above). - // - // In the future, the miner will be able to extend its tenure even if there are higher - // valid sortitions, but only if it determines that the miners of those sortitions are - // offline. + // Allow the miner to extend its tenure if won the highest valid sortition IFF + // it determines that the miners of the sortition fails to produce a block + // by the required timeout. if let Some(highest_valid_sortition) = Self::find_highest_valid_sortition( sortdb, chain_state, &sort_tip, &canonical_stacks_snapshot.consensus_hash, )? { - info!("Relayer: will not extend tenure -- we won sortition {}, but the highest valid sortition is {}", &canonical_stacks_snapshot.consensus_hash, &highest_valid_sortition.consensus_hash); - return Ok(None); + // TODO: I don't understand why this works? HELP??? + if sort_tip.consensus_hash != highest_valid_sortition.consensus_hash { + info!("Relayer: will not extend tenure -- we won sortition {}, but the highest valid sortition is {}", &canonical_stacks_snapshot.consensus_hash, &highest_valid_sortition.consensus_hash); + return Ok(None); + } + info!("Relayer: MAY extend tenure -- we won sortition {}, but must give miner time to produce a valid block for the highest valid sortition {}", &canonical_stacks_snapshot.consensus_hash, &highest_valid_sortition.consensus_hash); + return Ok(Some((canonical_stacks_snapshot, true))); } - - Ok(Some(canonical_stacks_snapshot)) + // There cannot be any higher sortitions that are valid (as defined above). + Ok(Some((canonical_stacks_snapshot, false))) } /// Attempt to continue a miner's tenure into the next burn block. @@ -1431,7 +1484,8 @@ impl RelayerThread { /// elected the local view of the canonical Stacks fork's ongoing tenure. /// /// This function assumes that the caller has checked that the sortition referred to by - /// `new_burn_view` does not have a sortition winner. + /// `new_burn_view` does not have a sortition winner or that the winner has not produced a + /// valid block yet. fn continue_tenure(&mut self, new_burn_view: ConsensusHash) -> Result<(), NakamotoNodeError> { if let Err(e) = self.stop_tenure() { error!("Relayer: Failed to stop tenure: {e:?}"); @@ -1440,7 +1494,7 @@ impl RelayerThread { debug!("Relayer: successfully stopped tenure; will try to continue."); let mining_pkh_opt = self.get_mining_key_pkh(); - let Some(canonical_stacks_tip_election_snapshot) = Self::can_continue_tenure( + let Some((canonical_stacks_tip_election_snapshot, _)) = Self::can_continue_tenure( &self.sortdb, &mut self.chainstate, new_burn_view.clone(), @@ -1758,38 +1812,30 @@ impl RelayerThread { } /// Try to start up a tenure-extend. - /// Only do this if the miner won the highest valid sortition but the burn view has changed. - /// In the future, the miner will also try to extend its tenure if a subsequent miner appears - /// to be offline. + /// Only do this if: + /// - the miner won the highest valid sortition but the burn view has changed. + /// - the subsequent miner appears to be offline. fn try_continue_tenure(&mut self) { - if self.tenure_extend_timeout.is_none() { - return; - } - - // time to poll to see if we should begin a tenure-extend? - let deadline_passed = self - .tenure_extend_timeout - .map(|tenure_extend_timeout| { - let deadline_passed = - tenure_extend_timeout.elapsed() > self.config.miner.tenure_extend_poll_secs; - if !deadline_passed { - test_debug!( - "Relayer: will not try to tenure-extend yet ({} <= {})", - tenure_extend_timeout.elapsed().as_secs(), - self.config.miner.tenure_extend_poll_secs.as_secs() - ); - } - deadline_passed - }) - .unwrap_or(false); - - if !deadline_passed { + // Should begin a tenure-extend? + if let Some(tenure_extend_time) = &self.tenure_extend_time { + if !tenure_extend_time.should_extend() { + test_debug!( + "Relayer: will not try to tenure-extend yet ({} <= {})", + tenure_extend_time.elapsed().as_secs(), + tenure_extend_time.timeout().as_secs() + ); + return; + } + } else { + // No tenure extend time set, so nothing to do. return; } // reset timer so we can try again if for some reason a miner was already running (e.g. a // blockfound from earlier). - self.tenure_extend_timeout = Some(Instant::now()); + self.tenure_extend_time = Some(TenureExtendTime::delayed( + self.config.miner.tenure_extend_poll_timeout, + )); // try to extend, but only if we aren't already running a thread for the current or newer // burnchain view diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 4099ce64f2..54bb65d777 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -10897,7 +10897,7 @@ fn test_tenure_extend_from_flashblocks() { assert_ne!(sort_tip.consensus_hash, election_tip.consensus_hash); // we can, however, continue the tenure - let canonical_stacks_tip = RelayerThread::can_continue_tenure( + let (canonical_stacks_tip, wait) = RelayerThread::can_continue_tenure( &sortdb, &mut chainstate, sort_tip.consensus_hash.clone(), @@ -10905,6 +10905,7 @@ fn test_tenure_extend_from_flashblocks() { ) .unwrap() .unwrap(); + assert!(!wait); assert_eq!(canonical_stacks_tip, election_tip); // if we didn't win the last block -- tantamount to the sortition winner miner key being diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index dfe5c34443..279bba32f1 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -12835,3 +12835,883 @@ fn tenure_extend_cost_threshold() { signer_test.shutdown(); } + +/// Test a scenario where: +/// Two miners boot to Nakamoto. +/// Miner 1 wins the first tenure. +/// Miner 1 proposes a block N with a TenureChangeCause::BlockFound +/// Signers accept and the stacks tip advances to N +/// Miner 2 wins the second tenure B. +/// Miner 2 proposes block N+1' AFTER signers' block proposal timeout. +/// Signers reject block N+1' and mark miner 2 as malicious +/// Miner 1 proposes block N+1 with a TenureChangeCause::Extended +/// Signers accept and the stacks tip advances to N+1 +/// Miner 2 wins the third tenure C and proposes a block N+2 with a TenureChangeCause::BlockFound +/// Signers accept block N+2. +/// +/// Asserts: +/// - Block N contains the TenureChangeCause::BlockFound +/// - Block N+1 contains the TenureChangeCause::Extended +/// - Block N+2 contains the TenureChangeCause::BlockFound +/// - The stacks tip advances to N+2 +#[test] +#[ignore] +fn prev_miner_extends_if_incoming_miner_fails_to_mine() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let num_signers = 5; + + let btc_miner_1_seed = vec![1, 1, 1, 1]; + let btc_miner_2_seed = vec![2, 2, 2, 2]; + let btc_miner_1_pk = Keychain::default(btc_miner_1_seed.clone()).get_pub_key(); + let btc_miner_2_pk = Keychain::default(btc_miner_2_seed.clone()).get_pub_key(); + + let node_1_rpc = gen_random_port(); + let node_1_p2p = gen_random_port(); + let node_2_rpc = gen_random_port(); + let node_2_p2p = gen_random_port(); + + debug!("Node 1 bound at (p2p={node_1_p2p}, rpc={node_1_rpc})"); + debug!("Node 2 bound at (p2p={node_2_p2p}, rpc={node_2_rpc})"); + + let localhost = "127.0.0.1"; + let node_1_rpc_bind = format!("{localhost}:{node_1_rpc}"); + let node_2_rpc_bind = format!("{localhost}:{node_2_rpc}"); + let mut node_2_listeners = Vec::new(); + + let max_nakamoto_tenures = 30; + + let block_proposal_timeout = Duration::from_secs(30); + info!("------------------------- Test Setup -------------------------"); + // partition the signer set so that ~half are listening and using node 1 for RPC and events, + // and the rest are using node 2 + + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![], + |signer_config| { + let node_host = if signer_config.endpoint.port() % 2 == 0 { + &node_1_rpc_bind + } else { + &node_2_rpc_bind + }; + signer_config.node_host = node_host.to_string(); + signer_config.block_proposal_timeout = block_proposal_timeout; + }, + |config| { + config.miner.tenure_extend_wait_timeout = block_proposal_timeout; + config.node.rpc_bind = format!("{localhost}:{node_1_rpc}"); + config.node.p2p_bind = format!("{localhost}:{node_1_p2p}"); + config.node.data_url = format!("http://{localhost}:{node_1_rpc}"); + config.node.p2p_address = format!("{localhost}:{node_1_p2p}"); + config.miner.wait_on_interim_blocks = Duration::from_secs(5); + config.node.pox_sync_sample_secs = 30; + config.burnchain.pox_reward_length = Some(max_nakamoto_tenures); + + config.node.seed = btc_miner_1_seed.clone(); + config.node.local_peer_seed = btc_miner_1_seed.clone(); + config.burnchain.local_mining_public_key = Some(btc_miner_1_pk.to_hex()); + config.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[1])); + + config.events_observers.retain(|listener| { + let Ok(addr) = std::net::SocketAddr::from_str(&listener.endpoint) else { + warn!( + "Cannot parse {} to a socket, assuming it isn't a signer-listener binding", + listener.endpoint + ); + return true; + }; + if addr.port() % 2 == 0 || addr.port() == test_observer::EVENT_OBSERVER_PORT { + return true; + } + node_2_listeners.push(listener.clone()); + false + }) + }, + Some(vec![btc_miner_1_pk, btc_miner_2_pk]), + None, + ); + let conf = signer_test.running_nodes.conf.clone(); + let mut conf_node_2 = conf.clone(); + conf_node_2.node.rpc_bind = format!("{localhost}:{node_2_rpc}"); + conf_node_2.node.p2p_bind = format!("{localhost}:{node_2_p2p}"); + conf_node_2.node.data_url = format!("http://{localhost}:{node_2_rpc}"); + conf_node_2.node.p2p_address = format!("{localhost}:{node_2_p2p}"); + conf_node_2.node.seed = btc_miner_2_seed.clone(); + conf_node_2.burnchain.local_mining_public_key = Some(btc_miner_2_pk.to_hex()); + conf_node_2.node.local_peer_seed = btc_miner_2_seed; + conf_node_2.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[2])); + conf_node_2.node.miner = true; + conf_node_2.events_observers.clear(); + conf_node_2.events_observers.extend(node_2_listeners); + assert!(!conf_node_2.events_observers.is_empty()); + + let node_1_sk = Secp256k1PrivateKey::from_seed(&conf.node.local_peer_seed); + let node_1_pk = StacksPublicKey::from_private(&node_1_sk); + + conf_node_2.node.working_dir = format!("{}-1", conf_node_2.node.working_dir); + + conf_node_2.node.set_bootstrap_nodes( + format!("{}@{}", &node_1_pk.to_hex(), conf.node.p2p_bind), + conf.burnchain.chain_id, + conf.burnchain.peer_version, + ); + + let mut run_loop_2 = boot_nakamoto::BootRunLoop::new(conf_node_2.clone()).unwrap(); + let run_loop_stopper_2 = run_loop_2.get_termination_switch(); + let rl2_coord_channels = run_loop_2.coordinator_channels(); + let Counters { + naka_submitted_commits: rl2_commits, + naka_skip_commit_op: rl2_skip_commit_op, + naka_submitted_commit_last_stacks_tip: rl2_commit_last_stacks_tip, + .. + } = run_loop_2.counters(); + + let blocks_mined1 = signer_test.running_nodes.nakamoto_blocks_mined.clone(); + + info!("------------------------- Pause Miner 2's Block Commits -------------------------"); + + // Make sure Miner 2 cannot win a sortition at first. + rl2_skip_commit_op.set(true); + + info!("------------------------- Boot to Epoch 3.0 -------------------------"); + + let run_loop_2_thread = thread::Builder::new() + .name("run_loop_2".into()) + .spawn(move || run_loop_2.start(None, 0)) + .unwrap(); + + signer_test.boot_to_epoch_3(); + + wait_for(120, || { + let Some(node_1_info) = get_chain_info_opt(&conf) else { + return Ok(false); + }; + let Some(node_2_info) = get_chain_info_opt(&conf_node_2) else { + return Ok(false); + }; + Ok(node_1_info.stacks_tip_height == node_2_info.stacks_tip_height) + }) + .expect("Timed out waiting for boostrapped node to catch up to the miner"); + + let mining_pk_1 = StacksPublicKey::from_private(&conf.miner.mining_key.unwrap()); + let mining_pk_2 = StacksPublicKey::from_private(&conf_node_2.miner.mining_key.unwrap()); + let mining_pkh_1 = Hash160::from_node_public_key(&mining_pk_1); + let mining_pkh_2 = Hash160::from_node_public_key(&mining_pk_2); + debug!("The mining key for miner 1 is {mining_pkh_1}"); + debug!("The mining key for miner 2 is {mining_pkh_2}"); + + info!("------------------------- Reached Epoch 3.0 -------------------------"); + + let burnchain = signer_test.running_nodes.conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + + let get_burn_height = || { + SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) + .unwrap() + .block_height + }; + let starting_peer_height = get_chain_info(&conf).stacks_tip_height; + let starting_burn_height = get_burn_height(); + let mut btc_blocks_mined = 0; + + info!("------------------------- Pause Miner 1's Block Commit -------------------------"); + // Make sure miner 1 doesn't submit any further block commits for the next tenure BEFORE mining the bitcoin block + signer_test + .running_nodes + .nakamoto_test_skip_commit_op + .set(true); + + info!("------------------------- Miner 1 Mines a Normal Tenure A -------------------------"); + let blocks_processed_before_1 = blocks_mined1.load(Ordering::SeqCst); + let nmb_old_blocks = test_observer::get_blocks().len(); + let stacks_height_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + + signer_test + .running_nodes + .btc_regtest_controller + .build_next_block(1); + btc_blocks_mined += 1; + + // assure we have a successful sortition that miner A won + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + assert!(tip.sortition); + assert_eq!(tip.miner_pk_hash.unwrap(), mining_pkh_1); + + // wait for the new block to be processed + wait_for(60, || { + let stacks_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + Ok( + blocks_mined1.load(Ordering::SeqCst) > blocks_processed_before_1 + && stacks_height > stacks_height_before + && test_observer::get_blocks().len() > nmb_old_blocks, + ) + }) + .unwrap(); + + info!( + "------------------------- Verify Tenure Change Tx in Miner 1's Block N -------------------------" + ); + verify_last_block_contains_tenure_change_tx(TenureChangeCause::BlockFound); + + info!("------------------------- Submit Miner 2 Block Commit -------------------------"); + let stacks_height_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + + let rl2_commits_before = rl2_commits.load(Ordering::SeqCst); + // Unpause miner 2's block commits + rl2_skip_commit_op.set(false); + + // Ensure the miner 2 submits a block commit before mining the bitcoin block + wait_for(30, || { + Ok(rl2_commits.load(Ordering::SeqCst) > rl2_commits_before) + }) + .unwrap(); + + // Make miner 2 also fail to submit any FURTHER block commits + rl2_skip_commit_op.set(true); + + let burn_height_before = get_burn_height(); + // Pause the block proposal broadcast so that miner 2 will be unable to broadcast its + // tenure change proposal BEFORE the block_proposal_timeout and will be marked invalid. + TEST_BROADCAST_STALL.set(true); + + info!("------------------------- Miner 2 Mines an Empty Tenure B -------------------------"; + "burn_height_before" => burn_height_before, + ); + + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || Ok(get_burn_height() > burn_height_before), + ) + .unwrap(); + btc_blocks_mined += 1; + + // assure we have a successful sortition that miner 2 won + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + assert!(tip.sortition); + assert_eq!(tip.miner_pk_hash.unwrap(), mining_pkh_2); + + // Make sure that miner 2 gets marked invalid by not proposing a block BEFORE block_proposal_timeout + std::thread::sleep(block_proposal_timeout.add(Duration::from_secs(1))); + + let stacks_height_after = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + + assert_eq!(stacks_height_before, stacks_height_after); + + let nmb_old_blocks = test_observer::get_blocks().len(); + // Unpause both miner's block proposals + TEST_BROADCAST_STALL.set(false); + + info!("------------------------- Wait for Miner 2's Block N+1' ------------------------"; + "stacks_height_before" => %stacks_height_before, + "nmb_old_blocks" => %nmb_old_blocks); + + // wait for the new block to be processed + wait_for(30, || { + let stacks_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + + Ok(stacks_height > stacks_height_before + && test_observer::get_blocks().len() > nmb_old_blocks) + }) + .expect("Timed out waiting for block to be mined and processed"); + + info!("------------------------- Verify Miner 2's N+1' was Rejected and Miner 1's N+1 Accepted-------------------------"); + + let mut miner_1_block_n_1 = None; + let mut miner_2_block_n_1 = None; + + wait_for(30, || { + let chunks = test_observer::get_stackerdb_chunks(); + for chunk in chunks.into_iter().flat_map(|chunk| chunk.modified_slots) { + let Ok(message) = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + else { + continue; + }; + let SignerMessage::BlockProposal(proposal) = message else { + continue; + }; + let miner_pk = proposal.block.header.recover_miner_pk().unwrap(); + let block_stacks_height = proposal.block.header.chain_length; + if block_stacks_height != stacks_height_before + 1 { + continue; + } + if miner_pk == mining_pk_1 { + miner_1_block_n_1 = Some(proposal.block); + } else if miner_pk == mining_pk_2 { + miner_2_block_n_1 = Some(proposal.block); + } + } + Ok(miner_1_block_n_1.is_some() && miner_2_block_n_1.is_some()) + }) + .expect("Timed out waiting for N+1 and N+1' block proposals from miners 1 and 2"); + + let miner_1_block_n_1 = miner_1_block_n_1.expect("No block proposal from miner 1"); + let miner_2_block_n_1 = miner_2_block_n_1.expect("No block proposal from miner 2"); + + // Miner 2's proposed block should get rejected by all the signers + let mut found_miner_2_rejections = HashSet::new(); + let mut found_miner_1_accepts = HashSet::new(); + wait_for(30, || { + let chunks = test_observer::get_stackerdb_chunks(); + for chunk in chunks.into_iter().flat_map(|chunk| chunk.modified_slots) { + let Ok(message) = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + else { + continue; + }; + match message { + SignerMessage::BlockResponse(BlockResponse::Accepted(BlockAccepted { + signer_signature_hash, + signature, + .. + })) => { + if signer_signature_hash == miner_1_block_n_1.header.signer_signature_hash() { + found_miner_1_accepts.insert(signature); + } + } + SignerMessage::BlockResponse(BlockResponse::Rejected(BlockRejection { + signer_signature_hash, + signature, + .. + })) => { + if signer_signature_hash == miner_2_block_n_1.header.signer_signature_hash() { + found_miner_2_rejections.insert(signature); + } + } + _ => {} + } + } + Ok(found_miner_2_rejections.len() >= num_signers * 3 / 10 + && found_miner_1_accepts.len() >= num_signers * 7 / 10) + }) + .expect("Timed out waiting for expeceted block responses"); + + let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); + let last_mined = nakamoto_blocks.last().unwrap(); + assert_eq!( + last_mined.signer_signature_hash, + miner_1_block_n_1.header.signer_signature_hash() + ); + let tip_block_header_hash = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip; + assert_eq!(tip_block_header_hash.to_string(), last_mined.block_hash); + + info!( + "------------------------- Verify Tenure Change Extend Tx in Miner 1's Block N+1 -------------------------" + ); + verify_last_block_contains_tenure_change_tx(TenureChangeCause::Extended); + + let stacks_height_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + + assert_eq!( + get_chain_info(&conf).stacks_tip_height, + stacks_height_before + ); + info!("------------------------- Unpause Miner 2's Block Commits -------------------------"); + let stacks_height_before = get_chain_info(&conf).stacks_tip_height; + let rl2_commits_before = rl2_commits.load(Ordering::SeqCst); + // Unpause miner 2's commits + rl2_skip_commit_op.set(false); + + // Ensure that both miners' commits point at the stacks tip + wait_for(30, || { + let last_committed_2 = rl2_commit_last_stacks_tip.load(Ordering::SeqCst); + Ok(last_committed_2 >= stacks_height_before + && rl2_commits.load(Ordering::SeqCst) > rl2_commits_before) + }) + .expect("Timed out waiting for block commit from Miner 2"); + + let nmb_old_blocks = test_observer::get_blocks().len(); + let burn_height_before = get_burn_height(); + let block_before = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) + .unwrap() + .block_height; + + info!("------------------------- Miner 2 Mines a Normal Tenure C -------------------------"; + "nmb_old_blocks" => %nmb_old_blocks, + "burn_height_before" => burn_height_before); + + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || { + Ok(get_burn_height() > burn_height_before + && SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) + .unwrap() + .block_height + > block_before) + }, + ) + .unwrap(); + btc_blocks_mined += 1; + + // assure we have a successful sortition that miner 2 won + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + assert!(tip.sortition); + assert_eq!(tip.miner_pk_hash.unwrap(), mining_pkh_2); + + info!("------------------------- Wait for Miner 2's Block N+2 -------------------------"; + "stacks_height_before" => %stacks_height_before, + "nmb_old_blocks" => %nmb_old_blocks); + + wait_for(30, || { + let stacks_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + Ok(stacks_height > stacks_height_before) + }) + .expect("Timed out waiting for block N+2 to be mined and processed"); + + info!( + "------------------------- Verify Tenure Change Tx in Miner 2's Block N+2 -------------------------" + ); + verify_last_block_contains_tenure_change_tx(TenureChangeCause::BlockFound); + + info!( + "------------------------- Confirm Burn and Stacks Block Heights -------------------------" + ); + assert_eq!(get_burn_height(), starting_burn_height + btc_blocks_mined); + assert_eq!( + signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height, + starting_peer_height + 3 + ); + + info!("------------------------- Shutdown -------------------------"); + rl2_coord_channels + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper_2.store(false, Ordering::SeqCst); + run_loop_2_thread.join().unwrap(); + signer_test.shutdown(); +} + +/// Test a scenario where: +/// Two miners boot to Nakamoto. +/// Miner 1 wins the first tenure. +/// Miner 1 proposes a block N with a TenureChangeCause::BlockFound +/// Signers accept and the stacks tip advances to N +/// Miner 2 wins the second tenure B. +/// Miner 2 proposes block N+1 with a TenureChangeCause::BlockFound +/// Signers accept and the stacks tip advances to N +/// +/// Asserts: +/// - Block N contains the TenureChangeCause::BlockFound +/// - Block N+1 contains the TenureChangeCause::BlockFound +/// - The stacks tip advances to N+1 +/// - Miner 1 does not produce a tenure extend block +#[test] +#[ignore] +fn prev_miner_will_not_extend_if_incoming_miner_mines() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let num_signers = 5; + + let btc_miner_1_seed = vec![1, 1, 1, 1]; + let btc_miner_2_seed = vec![2, 2, 2, 2]; + let btc_miner_1_pk = Keychain::default(btc_miner_1_seed.clone()).get_pub_key(); + let btc_miner_2_pk = Keychain::default(btc_miner_2_seed.clone()).get_pub_key(); + + let node_1_rpc = gen_random_port(); + let node_1_p2p = gen_random_port(); + let node_2_rpc = gen_random_port(); + let node_2_p2p = gen_random_port(); + + debug!("Node 1 bound at (p2p={node_1_p2p}, rpc={node_1_rpc})"); + debug!("Node 2 bound at (p2p={node_2_p2p}, rpc={node_2_rpc})"); + + let localhost = "127.0.0.1"; + let node_1_rpc_bind = format!("{localhost}:{node_1_rpc}"); + let node_2_rpc_bind = format!("{localhost}:{node_2_rpc}"); + let mut node_2_listeners = Vec::new(); + + let max_nakamoto_tenures = 30; + + let block_proposal_timeout = Duration::from_secs(100); + let tenure_extend_wait_timeout = Duration::from_secs(20); + info!("------------------------- Test Setup -------------------------"); + // partition the signer set so that ~half are listening and using node 1 for RPC and events, + // and the rest are using node 2 + + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![], + |signer_config| { + let node_host = if signer_config.endpoint.port() % 2 == 0 { + &node_1_rpc_bind + } else { + &node_2_rpc_bind + }; + signer_config.node_host = node_host.to_string(); + signer_config.block_proposal_timeout = block_proposal_timeout; + }, + |config| { + config.miner.tenure_extend_wait_timeout = tenure_extend_wait_timeout; + config.node.rpc_bind = format!("{localhost}:{node_1_rpc}"); + config.node.p2p_bind = format!("{localhost}:{node_1_p2p}"); + config.node.data_url = format!("http://{localhost}:{node_1_rpc}"); + config.node.p2p_address = format!("{localhost}:{node_1_p2p}"); + config.miner.wait_on_interim_blocks = Duration::from_secs(5); + config.node.pox_sync_sample_secs = 30; + config.burnchain.pox_reward_length = Some(max_nakamoto_tenures); + + config.node.seed = btc_miner_1_seed.clone(); + config.node.local_peer_seed = btc_miner_1_seed.clone(); + config.burnchain.local_mining_public_key = Some(btc_miner_1_pk.to_hex()); + config.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[1])); + + config.events_observers.retain(|listener| { + let Ok(addr) = std::net::SocketAddr::from_str(&listener.endpoint) else { + warn!( + "Cannot parse {} to a socket, assuming it isn't a signer-listener binding", + listener.endpoint + ); + return true; + }; + if addr.port() % 2 == 0 || addr.port() == test_observer::EVENT_OBSERVER_PORT { + return true; + } + node_2_listeners.push(listener.clone()); + false + }) + }, + Some(vec![btc_miner_1_pk, btc_miner_2_pk]), + None, + ); + let conf = signer_test.running_nodes.conf.clone(); + let mut conf_node_2 = conf.clone(); + conf_node_2.node.rpc_bind = format!("{localhost}:{node_2_rpc}"); + conf_node_2.node.p2p_bind = format!("{localhost}:{node_2_p2p}"); + conf_node_2.node.data_url = format!("http://{localhost}:{node_2_rpc}"); + conf_node_2.node.p2p_address = format!("{localhost}:{node_2_p2p}"); + conf_node_2.node.seed = btc_miner_2_seed.clone(); + conf_node_2.burnchain.local_mining_public_key = Some(btc_miner_2_pk.to_hex()); + conf_node_2.node.local_peer_seed = btc_miner_2_seed; + conf_node_2.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[2])); + conf_node_2.node.miner = true; + conf_node_2.events_observers.clear(); + conf_node_2.events_observers.extend(node_2_listeners); + assert!(!conf_node_2.events_observers.is_empty()); + + let node_1_sk = Secp256k1PrivateKey::from_seed(&conf.node.local_peer_seed); + let node_1_pk = StacksPublicKey::from_private(&node_1_sk); + + conf_node_2.node.working_dir = format!("{}-1", conf_node_2.node.working_dir); + + conf_node_2.node.set_bootstrap_nodes( + format!("{}@{}", &node_1_pk.to_hex(), conf.node.p2p_bind), + conf.burnchain.chain_id, + conf.burnchain.peer_version, + ); + + let mut run_loop_2 = boot_nakamoto::BootRunLoop::new(conf_node_2.clone()).unwrap(); + let run_loop_stopper_2 = run_loop_2.get_termination_switch(); + let rl2_coord_channels = run_loop_2.coordinator_channels(); + let Counters { + naka_submitted_commits: rl2_commits, + naka_skip_commit_op: rl2_skip_commit_op, + .. + } = run_loop_2.counters(); + + let blocks_mined1 = signer_test.running_nodes.nakamoto_blocks_mined.clone(); + + info!("------------------------- Pause Miner 2's Block Commits -------------------------"); + + // Make sure Miner 2 cannot win a sortition at first. + rl2_skip_commit_op.set(true); + + info!("------------------------- Boot to Epoch 3.0 -------------------------"); + + let run_loop_2_thread = thread::Builder::new() + .name("run_loop_2".into()) + .spawn(move || run_loop_2.start(None, 0)) + .unwrap(); + + signer_test.boot_to_epoch_3(); + + wait_for(120, || { + let Some(node_1_info) = get_chain_info_opt(&conf) else { + return Ok(false); + }; + let Some(node_2_info) = get_chain_info_opt(&conf_node_2) else { + return Ok(false); + }; + Ok(node_1_info.stacks_tip_height == node_2_info.stacks_tip_height) + }) + .expect("Timed out waiting for boostrapped node to catch up to the miner"); + + let mining_pk_1 = StacksPublicKey::from_private(&conf.miner.mining_key.unwrap()); + let mining_pk_2 = StacksPublicKey::from_private(&conf_node_2.miner.mining_key.unwrap()); + let mining_pkh_1 = Hash160::from_node_public_key(&mining_pk_1); + let mining_pkh_2 = Hash160::from_node_public_key(&mining_pk_2); + debug!("The mining key for miner 1 is {mining_pkh_1}"); + debug!("The mining key for miner 2 is {mining_pkh_2}"); + + info!("------------------------- Reached Epoch 3.0 -------------------------"); + + let burnchain = signer_test.running_nodes.conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + + let get_burn_height = || { + SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) + .unwrap() + .block_height + }; + let starting_peer_height = get_chain_info(&conf).stacks_tip_height; + let starting_burn_height = get_burn_height(); + let mut btc_blocks_mined = 0; + + info!("------------------------- Pause Miner 1's Block Commit -------------------------"); + // Make sure miner 1 doesn't submit any further block commits for the next tenure BEFORE mining the bitcoin block + signer_test + .running_nodes + .nakamoto_test_skip_commit_op + .set(true); + + info!("------------------------- Miner 1 Mines a Normal Tenure A -------------------------"); + let blocks_processed_before_1 = blocks_mined1.load(Ordering::SeqCst); + let nmb_old_blocks = test_observer::get_blocks().len(); + let stacks_height_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + + signer_test + .running_nodes + .btc_regtest_controller + .build_next_block(1); + btc_blocks_mined += 1; + + // assure we have a successful sortition that miner A won + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + assert!(tip.sortition); + assert_eq!(tip.miner_pk_hash.unwrap(), mining_pkh_1); + + // wait for the new block to be processed + wait_for(60, || { + let stacks_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + Ok( + blocks_mined1.load(Ordering::SeqCst) > blocks_processed_before_1 + && stacks_height > stacks_height_before + && test_observer::get_blocks().len() > nmb_old_blocks, + ) + }) + .unwrap(); + + info!( + "------------------------- Verify Tenure Change Tx in Miner 1's Block N -------------------------" + ); + verify_last_block_contains_tenure_change_tx(TenureChangeCause::BlockFound); + + info!("------------------------- Submit Miner 2 Block Commit -------------------------"); + let stacks_height_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + + let rl2_commits_before = rl2_commits.load(Ordering::SeqCst); + // Unpause miner 2's block commits + rl2_skip_commit_op.set(false); + + // Ensure the miner 2 submits a block commit before mining the bitcoin block + wait_for(30, || { + Ok(rl2_commits.load(Ordering::SeqCst) > rl2_commits_before) + }) + .unwrap(); + + // Make miner 2 also fail to submit any FURTHER block commits + rl2_skip_commit_op.set(true); + + let burn_height_before = get_burn_height(); + + info!("------------------------- Miner 2 Mines Tenure B -------------------------"; + "burn_height_before" => burn_height_before, + "stacks_height_before" => stacks_height_before + ); + + let stacks_height_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || { + Ok(get_burn_height() > burn_height_before + && signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height + > stacks_height_before) + }, + ) + .unwrap(); + btc_blocks_mined += 1; + + // assure we have a successful sortition that miner 2 won + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + assert!(tip.sortition); + assert_eq!(tip.miner_pk_hash.unwrap(), mining_pkh_2); + + info!("------------------------- Get Miner 2's N+1' block -------------------------"); + + let mut miner_2_block_n_1 = None; + + wait_for(30, || { + let chunks = test_observer::get_stackerdb_chunks(); + for chunk in chunks.into_iter().flat_map(|chunk| chunk.modified_slots) { + let Ok(message) = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + else { + continue; + }; + let SignerMessage::BlockProposal(proposal) = message else { + continue; + }; + let miner_pk = proposal.block.header.recover_miner_pk().unwrap(); + let block_stacks_height = proposal.block.header.chain_length; + if block_stacks_height != stacks_height_before + 1 { + continue; + } + assert_eq!(miner_pk, mining_pk_2); + miner_2_block_n_1 = Some(proposal.block); + return Ok(true); + } + Ok(false) + }) + .expect("Timed out waiting for N+1 from miner 2"); + + let mut miner_2_block_n_1 = miner_2_block_n_1.expect("No block proposal from miner 2"); + + // Miner 2's proposed block should get approved and pushed + wait_for(30, || { + let chunks = test_observer::get_stackerdb_chunks(); + for chunk in chunks.into_iter().flat_map(|chunk| chunk.modified_slots) { + let Ok(message) = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + else { + continue; + }; + if let SignerMessage::BlockPushed(pushed_block) = message { + if pushed_block.header.signer_signature_hash() + == miner_2_block_n_1.header.signer_signature_hash() + { + miner_2_block_n_1 = pushed_block; + return Ok(true); + } + } + } + Ok(false) + }) + .expect("Timed out waiting for expeceted block responses"); + + let tip_block_header_hash = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip; + assert_eq!( + tip_block_header_hash.to_string(), + miner_2_block_n_1.header.block_hash().to_string() + ); + + info!( + "------------------------- Verify Tenure Change Block Found Tx in Miner 2's Block N+1 -------------------------" + ); + assert_eq!( + miner_2_block_n_1 + .get_tenure_change_tx_payload() + .unwrap() + .cause, + TenureChangeCause::BlockFound + ); + + let stacks_height_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + + info!("------------------------- Ensure Miner 1 Never Isues a Tenure Extend -------------------------"; + "stacks_height_before" => %stacks_height_before, + "nmb_old_blocks" => %nmb_old_blocks); + + // Ensure the tenure extend wait timeout is passed + std::thread::sleep(tenure_extend_wait_timeout.add(Duration::from_secs(1))); + + assert!(wait_for(30, || { + let stacks_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + Ok(stacks_height > stacks_height_before) + }) + .is_err()); + + info!( + "------------------------- Confirm Burn and Stacks Block Heights -------------------------" + ); + assert_eq!(get_burn_height(), starting_burn_height + btc_blocks_mined); + assert_eq!( + signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height, + starting_peer_height + 2 + ); + + info!("------------------------- Shutdown -------------------------"); + rl2_coord_channels + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper_2.store(false, Ordering::SeqCst); + run_loop_2_thread.join().unwrap(); + signer_test.shutdown(); +} From 8029fea1c9a003190190f73f22989aefe2705d59 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 5 Feb 2025 12:40:38 -0800 Subject: [PATCH 02/13] WIP: still not sure Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/nakamoto_node/relayer.rs | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index 86ae5a4de4..4384ee388e 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -1445,7 +1445,7 @@ impl RelayerThread { "canonical_stacks_tip_id" => %canonical_stacks_tip, "canonical_stacks_tip_ch" => %canonical_stacks_tip_ch, "canonical_stacks_tip_miner" => ?canonical_stacks_snapshot.miner_pk_hash, - "burn_view_ch" => %new_burn_view, + "burn_view_ch" => %new_burn_view ); if !won_ongoing_tenure_sortition { @@ -1458,8 +1458,8 @@ impl RelayerThread { return Ok(None); } - // Allow the miner to extend its tenure if won the highest valid sortition IFF - // it determines that the miners of the sortition fails to produce a block + // Allow the miner to extend its tenure even if there are higher valid sortition IFF + // it determines that the miners of the higher sortition fails to produce a block // by the required timeout. if let Some(highest_valid_sortition) = Self::find_highest_valid_sortition( sortdb, @@ -1467,12 +1467,7 @@ impl RelayerThread { &sort_tip, &canonical_stacks_snapshot.consensus_hash, )? { - // TODO: I don't understand why this works? HELP??? - if sort_tip.consensus_hash != highest_valid_sortition.consensus_hash { - info!("Relayer: will not extend tenure -- we won sortition {}, but the highest valid sortition is {}", &canonical_stacks_snapshot.consensus_hash, &highest_valid_sortition.consensus_hash); - return Ok(None); - } - info!("Relayer: MAY extend tenure -- we won sortition {}, but must give miner time to produce a valid block for the highest valid sortition {}", &canonical_stacks_snapshot.consensus_hash, &highest_valid_sortition.consensus_hash); + info!("Relayer: waiting to extend tenure -- we won sortition {}, but the highest valid sortition is {}. Must give the incoming miner time to produce a block.", &canonical_stacks_snapshot.consensus_hash, &highest_valid_sortition.consensus_hash); return Ok(Some((canonical_stacks_snapshot, true))); } // There cannot be any higher sortitions that are valid (as defined above). @@ -1819,7 +1814,7 @@ impl RelayerThread { // Should begin a tenure-extend? if let Some(tenure_extend_time) = &self.tenure_extend_time { if !tenure_extend_time.should_extend() { - test_debug!( + debug!( "Relayer: will not try to tenure-extend yet ({} <= {})", tenure_extend_time.elapsed().as_secs(), tenure_extend_time.timeout().as_secs() @@ -1833,6 +1828,7 @@ impl RelayerThread { // reset timer so we can try again if for some reason a miner was already running (e.g. a // blockfound from earlier). + debug!("NO HERE"); self.tenure_extend_time = Some(TenureExtendTime::delayed( self.config.miner.tenure_extend_poll_timeout, )); From 11316eac9adf45b0d3eedf89babc3f0fe5f56990 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 5 Feb 2025 12:44:21 -0800 Subject: [PATCH 03/13] CRC: basic code review comments about comments and logs Signed-off-by: Jacinta Ferrant --- CHANGELOG.md | 2 +- stackslib/src/config/mod.rs | 4 ++-- testnet/stacks-node/src/nakamoto_node/relayer.rs | 4 ++-- testnet/stacks-node/src/tests/signer/v0.rs | 7 +++++-- 4 files changed, 10 insertions(+), 7 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7afe8459e7..140f9ba053 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,7 +18,7 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ### Added - Add miner configuration option `tenure_extend_cost_threshold` to specify the percentage of the tenure budget that must be spent before a time-based tenure extend is attempted -- Add miner configuration option `tenure_extend_wait_timeout_ms` to specify the time to wait to try to continue a tenure if a BlockFound is expected +- Add miner configuration option `tenure_extend_wait_timeout_ms` to specify the time to wait before trying to continue a tenure because the next miner did not produce blocks ### Changed diff --git a/stackslib/src/config/mod.rs b/stackslib/src/config/mod.rs index b330aab1af..b3a55cb844 100644 --- a/stackslib/src/config/mod.rs +++ b/stackslib/src/config/mod.rs @@ -114,7 +114,7 @@ const DEFAULT_TENURE_COST_LIMIT_PER_BLOCK_PERCENTAGE: u8 = 25; /// see if we need to extend the ongoing tenure (e.g. because the current /// sortition is empty or invalid). const DEFAULT_TENURE_EXTEND_POLL_SECS: u64 = 1; -/// Default number of millis to wait to try to continue a tenure if a BlockFound is expected +/// Default number of millis to wait before trying to continue a tenure because the next miner did not produce blocks const DEFAULT_TENURE_EXTEND_WAIT_MS: u64 = 120_000; /// Default duration to wait before attempting to issue a tenure extend. /// This should be greater than the signers' timeout. This is used for issuing @@ -2182,7 +2182,7 @@ pub struct MinerConfig { /// Duration to wait in-between polling the sortition DB to see if we need to /// extend the ongoing tenure (e.g. because the current sortition is empty or invalid). pub tenure_extend_poll_timeout: Duration, - /// Duration to wait to try to continue a tenure if a BlockFound is expected + /// Duration to wait before trying to continue a tenure because the next miner did not produce blocks pub tenure_extend_wait_timeout: Duration, /// Duration to wait before attempting to issue a tenure extend pub tenure_timeout: Duration, diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index 4384ee388e..0aba47f121 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -581,12 +581,12 @@ impl RelayerThread { // we can continue our ongoing tenure, but we should give the new winning miner // a chance to send their BlockFound first. if wait_for_miner { - debug!("Relayer: Did not win sortition, but am mining the ongoing tenure. Allowing the new miner some time to come online before trying to continue."); + info!("Did not win sortition, but am mining the ongoing tenure. Allowing the new miner some time to come online before trying to continue."); self.tenure_extend_time = Some(TenureExtendTime::delayed( self.config.miner.tenure_extend_wait_timeout, )); } else { - debug!("Relayer: Did not win sortition, but am mining the ongoing tenure. Will try to continue tenure immediately."); + info!("Did not win sortition, but am mining the ongoing tenure. Will try to continue tenure immediately."); self.tenure_extend_time = Some(TenureExtendTime::immediate()); } return Some(MinerDirective::StopTenure); diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 279bba32f1..c8b2a6e8b2 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -6586,8 +6586,8 @@ fn continue_after_fast_block_no_sortition() { let node_2_rpc = gen_random_port(); let node_2_p2p = gen_random_port(); - debug!("Node 1 bound at (p2p={}, rpc={})", node_1_p2p, node_1_rpc); - debug!("Node 2 bound at (p2p={}, rpc={})", node_2_p2p, node_2_rpc); + debug!("Node 1 bound at (p2p={node_1_p2p}, rpc={node_1_rpc})"); + debug!("Node 2 bound at (p2p={node_2_p2p}, rpc={node_2_rpc})"); let localhost = "127.0.0.1"; let node_1_rpc_bind = format!("{localhost}:{node_1_rpc}"); @@ -7058,6 +7058,9 @@ fn continue_after_fast_block_no_sortition() { &mut signer_test.running_nodes.btc_regtest_controller, 60, || { + if blocks_mined1.load(Ordering::SeqCst) > blocks_processed_before_1 { + debug!("MINER 1 BLOCKS PROCESSED HERE"); + } Ok(get_burn_height() > burn_height_before && blocks_mined1.load(Ordering::SeqCst) > blocks_processed_before_1 && test_observer::get_blocks().len() > nmb_old_blocks) From 71eadc4571b941a6b665ebcc1be016f9a5db9702 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 6 Feb 2025 10:48:12 -0800 Subject: [PATCH 04/13] WIP: got tests working, but gotta clean up Signed-off-by: Jacinta Ferrant --- .../stacks-node/src/nakamoto_node/relayer.rs | 484 +++++++++--------- .../src/tests/nakamoto_integrations.rs | 34 +- testnet/stacks-node/src/tests/signer/v0.rs | 18 +- 3 files changed, 258 insertions(+), 278 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index 0aba47f121..8b13a85252 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -543,11 +543,20 @@ impl RelayerThread { won_sortition: bool, committed_index_hash: StacksBlockId, ) -> Option { - let (cur_stacks_tip_ch, _) = + // Reset the tenure extend time as we need to process this new sortition to deterine if we should extend + self.tenure_extend_time = None; + let (canonical_stacks_tip_ch, _) = SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortdb.conn()) .expect("FATAL: failed to query sortition DB for stacks tip"); + let canonical_stacks_snapshot = + SortitionDB::get_block_snapshot_consensus(self.sortdb.conn(), &canonical_stacks_tip_ch) + .expect("FATAL: failed to query sortiiton DB for epoch") + .expect("FATAL: no sortition for canonical stacks tip"); - self.tenure_extend_time = None; + let mining_pkh_opt = self.get_mining_key_pkh(); + + let won_ongoing_tenure_sortition = + mining_pkh_opt.is_some() && canonical_stacks_snapshot.miner_pk_hash == mining_pkh_opt; if sn.sortition { // a sortition happened @@ -569,56 +578,45 @@ impl RelayerThread { &sn.sortition ); - let mining_pkh_opt = self.get_mining_key_pkh(); - - match Self::can_continue_tenure( - &self.sortdb, - &mut self.chainstate, - sn.consensus_hash, - mining_pkh_opt, - ) { - Ok(Some((_, wait_for_miner))) => { - // we can continue our ongoing tenure, but we should give the new winning miner - // a chance to send their BlockFound first. - if wait_for_miner { - info!("Did not win sortition, but am mining the ongoing tenure. Allowing the new miner some time to come online before trying to continue."); + if won_ongoing_tenure_sortition { + // we won the current ongoing tenure, but not the most recent sortition. Should we attempt to extend immediately or wait for the incoming miner? + if let Ok(result) = Self::find_highest_valid_sortition( + &self.sortdb, + &mut self.chainstate, + &sn, + &canonical_stacks_snapshot.consensus_hash, + ) { + if result.is_some() { + debug!("Relayer: Did not win current sortition but won the prior valid sortition. Will attempt to extend tenure after allowing the new miner some time to come online."; + "tenure_extend_wait_timeout_ms" => self.config.miner.tenure_extend_wait_timeout.as_millis(), + ); self.tenure_extend_time = Some(TenureExtendTime::delayed( self.config.miner.tenure_extend_wait_timeout, )); } else { - info!("Did not win sortition, but am mining the ongoing tenure. Will try to continue tenure immediately."); + info!("Relayer: no valid sortition since our last winning sortition. Will extend tenure."); self.tenure_extend_time = Some(TenureExtendTime::immediate()); } - return Some(MinerDirective::StopTenure); - } - Ok(None) => { - return Some(MinerDirective::StopTenure); - } - Err(e) => { - warn!("Relayer: failed to check to see if we can continue tenure: {e:?}"); - return Some(MinerDirective::StopTenure); } } + return Some(MinerDirective::StopTenure); } // no sortition happened. // find out what epoch the Stacks tip is in. // If it's in epoch 2.x, then we must always begin a new tenure, but we can't do so // right now since this sortition has no winner. - let stacks_tip_sn = - SortitionDB::get_block_snapshot_consensus(self.sortdb.conn(), &cur_stacks_tip_ch) - .expect("FATAL: failed to query sortiiton DB for epoch") - .expect("FATAL: no sortition for canonical stacks tip"); - - let cur_epoch = - SortitionDB::get_stacks_epoch(self.sortdb.conn(), stacks_tip_sn.block_height) - .expect("FATAL: failed to query sortition DB for epoch") - .expect("FATAL: no epoch defined for existing sortition"); + let cur_epoch = SortitionDB::get_stacks_epoch( + self.sortdb.conn(), + canonical_stacks_snapshot.block_height, + ) + .expect("FATAL: failed to query sortition DB for epoch") + .expect("FATAL: no epoch defined for existing sortition"); if cur_epoch.epoch_id < StacksEpochId::Epoch30 { debug!( "As of sortition {}, there has not yet been a Nakamoto tip. Cannot mine.", - &stacks_tip_sn.consensus_hash + &canonical_stacks_snapshot.consensus_hash ); return None; } @@ -634,96 +632,44 @@ impl RelayerThread { return None; }; - if last_winning_snapshot.miner_pk_hash == self.get_mining_key_pkh() { - debug!( - "Relayer: we won the last winning sortition {}", + if self.need_block_found(&canonical_stacks_snapshot, &last_winning_snapshot) { + info!( + "Relayer: will submit late BlockFound for {}", &last_winning_snapshot.consensus_hash ); - - // we won the last non-empty sortition. Has there been a BlockFound issued for it? - // This would be true if the stacks tip's tenure is at or descends from this snapshot. - // If there has _not_ been a BlockFound, then we should issue one. - let ih = self - .sortdb - .index_handle(&last_winning_snapshot.sortition_id); - let need_blockfound = if stacks_tip_sn.block_height > last_winning_snapshot.block_height - { - // stacks tip is ahead of this snapshot, so no BlockFound can be issued. - test_debug!("Relayer: stacks_tip_sn.block_height ({}) > last_winning_snapshot.block_height ({})", stacks_tip_sn.block_height, last_winning_snapshot.block_height); - false - } else if stacks_tip_sn.block_height == last_winning_snapshot.block_height - && stacks_tip_sn.consensus_hash == last_winning_snapshot.consensus_hash - { - // this is the ongoing tenure snapshot. A BlockFound has already been issued. We - // can instead opt to Extend - test_debug!( - "Relayer: ongoing tenure {} already represents last-winning snapshot", - &stacks_tip_sn.consensus_hash - ); - self.tenure_extend_time = Some(TenureExtendTime::immediate()); - false - } else { - // stacks tip's snapshot may be an ancestor of the last-won sortition. - // If so, then we can issue a BlockFound. - SortitionDB::get_ancestor_snapshot( - &ih, - stacks_tip_sn.block_height, - &last_winning_snapshot.sortition_id, - ) - .map_err(|e| { - error!("Relayer: Failed to load ancestor snapshot: {e:?}"); - e - }) - .ok() - .flatten() - .map(|sn| { - let need_blockfound = sn.consensus_hash == stacks_tip_sn.consensus_hash; - if !need_blockfound { - test_debug!( - "Relayer: stacks_tip_sn.consensus_hash ({}) != sn.consensus_hash ({})", - &stacks_tip_sn.consensus_hash, - &sn.consensus_hash - ); - } - need_blockfound - }) - .unwrap_or_else(|| { - test_debug!( - "Relayer: no ancestor at height {} off of sortition {} height {}", - stacks_tip_sn.block_height, - &last_winning_snapshot.consensus_hash, - last_winning_snapshot.block_height - ); - false - }) - }; - if need_blockfound { - info!( - "Relayer: will submit late BlockFound for {}", - &last_winning_snapshot.consensus_hash - ); - // prepare to extend after our BlockFound gets mined. - self.tenure_extend_time = Some(TenureExtendTime::immediate()); - return Some(MinerDirective::BeginTenure { - parent_tenure_start: StacksBlockId( - last_winning_snapshot.winning_stacks_block_hash.clone().0, - ), - burnchain_tip: sn, - election_block: last_winning_snapshot, - late: true, - }); - } + // prepare to immediately extend after our BlockFound gets mined. + self.tenure_extend_time = Some(TenureExtendTime::immediate()); + return Some(MinerDirective::BeginTenure { + parent_tenure_start: StacksBlockId( + last_winning_snapshot.winning_stacks_block_hash.clone().0, + ), + burnchain_tip: sn, + election_block: last_winning_snapshot, + late: true, + }); } - // try to continue our tenure if we produced the canonical Stacks tip. - if stacks_tip_sn.miner_pk_hash == self.get_mining_key_pkh() { - info!("Relayer: No sortition, but we produced the canonical Stacks tip. Will continue tenure."); + let won_last_winning_snapshot = + mining_pkh_opt.is_some() && last_winning_snapshot.miner_pk_hash == mining_pkh_opt; + let tip_is_last_winning_snapshot = mining_pkh_opt.is_some() + && canonical_stacks_snapshot.block_height == last_winning_snapshot.block_height + && canonical_stacks_snapshot.consensus_hash == last_winning_snapshot.consensus_hash; - if last_winning_snapshot.miner_pk_hash != self.get_mining_key_pkh() { + if won_last_winning_snapshot && tip_is_last_winning_snapshot { + // this is the ongoing tenure snapshot. A BlockFound has already been issued. We + // can instead opt to Extend immediately + info!("Relayer: BlockFound already issued for the last winning sortition. Will extend tenure."); + return Some(MinerDirective::ContinueTenure { + new_burn_view: sn.consensus_hash, + }); + } + if won_ongoing_tenure_sortition { + info!("Relayer: No sortition, but we produced the canonical Stacks tip. Will extend tenure."); + if !won_last_winning_snapshot { // delay trying to continue since the last snasphot with a sortition was won // by someone else -- there's a chance that this other miner will produce a // BlockFound in the interim. - debug!("Relayer: Did not win last winning snapshot despite mining the ongoing tenure, so allowing the new miner some time to come online."); + debug!("Relayer: Did not win last winning snapshot despite mining the ongoing tenure. Will attempt to extend tenure after allowing the new miner some time to produce a block."); self.tenure_extend_time = Some(TenureExtendTime::delayed( self.config.miner.tenure_extend_wait_timeout, )); @@ -738,6 +684,83 @@ impl RelayerThread { return None; } + /// Determine if we need to issue a BlockFound. + /// + /// Returns true if we won the last non-empty sortition, and the stacks tip's snapshot is an ancestor of the last-won sortition + /// Returns false otherwise. + fn need_block_found( + &mut self, + canonical_stacks_snapshot: &BlockSnapshot, + last_winning_snapshot: &BlockSnapshot, + ) -> bool { + if last_winning_snapshot.miner_pk_hash != self.get_mining_key_pkh() { + debug!( + "Relayer: we did not win the last winning sortition {}", + &last_winning_snapshot.consensus_hash + ); + // We did not win the last winning snapshot, we do not need to issue a block found. + return false; + } + debug!( + "Relayer: we won the last winning sortition {}", + &last_winning_snapshot.consensus_hash + ); + // we won the last non-empty sortition. Has there been a BlockFound issued for it? + // This would be true if the stacks tip's tenure is at or descends from this snapshot. + // If there has _not_ been a BlockFound, then we should issue one. + let ih = self + .sortdb + .index_handle(&last_winning_snapshot.sortition_id); + if canonical_stacks_snapshot.block_height > last_winning_snapshot.block_height { + // stacks tip is ahead of this snapshot, so no BlockFound can be issued. + test_debug!("Relayer: stacks_tip_sn.block_height ({}) > last_winning_snapshot.block_height ({})", canonical_stacks_snapshot.block_height, last_winning_snapshot.block_height); + false + } else if canonical_stacks_snapshot.block_height == last_winning_snapshot.block_height + && canonical_stacks_snapshot.consensus_hash == last_winning_snapshot.consensus_hash + { + // this is the ongoing tenure snapshot. A BlockFound has already been issued. + test_debug!( + "Relayer: ongoing tenure {} already represents last-winning snapshot", + &canonical_stacks_snapshot.consensus_hash + ); + false + } else { + // stacks tip's snapshot may be an ancestor of the last-won sortition. + // If so, then we can issue a BlockFound. + SortitionDB::get_ancestor_snapshot( + &ih, + canonical_stacks_snapshot.block_height, + &last_winning_snapshot.sortition_id, + ) + .map_err(|e| { + error!("Relayer: Failed to load ancestor snapshot: {e:?}"); + e + }) + .ok() + .flatten() + .map(|sn| { + let need_blockfound = sn.consensus_hash == canonical_stacks_snapshot.consensus_hash; + if !need_blockfound { + test_debug!( + "Relayer: canonical_stacks_tip_ch ({}) != sn_consensus_hash ({})", + &canonical_stacks_snapshot.consensus_hash, + &sn.consensus_hash + ); + } + need_blockfound + }) + .unwrap_or_else(|| { + test_debug!( + "Relayer: no ancestor at height {} off of sortition {} height {}", + canonical_stacks_snapshot.block_height, + &last_winning_snapshot.consensus_hash, + last_winning_snapshot.block_height + ); + false + }) + } + } + /// Given the pointer to a recently processed sortition, see if we won the sortition, and /// determine what miner action (if any) to take. /// @@ -1276,7 +1299,6 @@ impl RelayerThread { chain_state: &mut StacksChainState, stacks_tip_id: &StacksBlockId, stacks_tip_sn: &BlockSnapshot, - burn_tip_ch: &ConsensusHash, sn: &BlockSnapshot, ) -> Result { if !sn.sortition { @@ -1293,34 +1315,30 @@ impl RelayerThread { if height_opt.is_none() { // parent_tenure_id is not an ancestor of stacks_tip_id debug!( - "Relayer: Sortition {} has winning commit hash {}, which is not canonical", - &sn.consensus_hash, &parent_tenure_id + "Relayer: Sortition {} has winning commit hash {parent_tenure_id}, which is not canonical", + &sn.consensus_hash ); return Ok(false); } - if sn.consensus_hash == *burn_tip_ch { - // sn is the sortition tip, so this sortition must commit to the tenure start block of - // the ongoing Stacks tenure. - let highest_tenure_start_block_header = NakamotoChainState::get_tenure_start_block_header( - &mut ic, - stacks_tip_id, - &stacks_tip_sn.consensus_hash - )? - .ok_or_else(|| { - error!( - "Relayer: Failed to find tenure-start block header for stacks tip {stacks_tip_id}" - ); - NakamotoNodeError::ParentNotFound - })?; + // The sortition must commit to the tenure start block of the ongoing Stacks tenure. + let highest_tenure_start_block_header = NakamotoChainState::get_tenure_start_block_header( + &mut ic, + stacks_tip_id, + &stacks_tip_sn.consensus_hash, + )? + .ok_or_else(|| { + error!( + "Relayer: Failed to find tenure-start block header for stacks tip {stacks_tip_id}" + ); + NakamotoNodeError::ParentNotFound + })?; - let highest_tenure_start_block_id = - highest_tenure_start_block_header.index_block_hash(); - if highest_tenure_start_block_id != parent_tenure_id { - debug!("Relayer: Sortition {} is at the tip, but does not commit to {} so cannot be valid", &sn.consensus_hash, &parent_tenure_id; - "highest_tenure_start_block_header.block_id()" => %highest_tenure_start_block_id); - return Ok(false); - } + let highest_tenure_start_block_id = highest_tenure_start_block_header.index_block_hash(); + if highest_tenure_start_block_id != parent_tenure_id { + debug!("Relayer: Sortition {} is at the tip, but does not commit to {parent_tenure_id} so cannot be valid", &sn.consensus_hash; + "highest_tenure_start_block_header_block_id" => %highest_tenure_start_block_id); + return Ok(false); } Ok(true) @@ -1372,7 +1390,6 @@ impl RelayerThread { chain_state, &canonical_stacks_tip, &canonical_stacks_tip_sn, - &sort_tip.consensus_hash, &cursor, )? { return Ok(Some(cursor)); @@ -1393,90 +1410,11 @@ impl RelayerThread { } } - /// Determine if the miner can contine an existing tenure with the new sortition (identified - /// by `new_burn_view`) - /// - /// Assumes that the caller has already checked that the given miner has _not_ won the new - /// sortition. - /// - /// Returns Ok(Some(stacks-tip-election-snapshot, wait-for-miner) if the last-winning miner should attempt to extend - /// This can happen for two seperate reasons: - /// - the miner's election snapshot was the last-known valid and non-empty snapshot and therefore should extend immediately - /// - the node determines that a subsequent miner won sortition, but has not yet produced a valid block and should wait-for-miner before extending - /// - /// Returns OK(None) if the last-winning miner should not extend its tenure. - /// - /// Returns Err(..) on DB error - pub(crate) fn can_continue_tenure( - sortdb: &SortitionDB, - chain_state: &mut StacksChainState, - new_burn_view: ConsensusHash, - mining_key_opt: Option, - ) -> Result, NakamotoNodeError> { - let Some(mining_pkh) = mining_key_opt else { - return Ok(None); - }; - - // Get the necessary snapshots and state - let (canonical_stacks_tip_ch, canonical_stacks_tip_bh) = - SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn()).unwrap(); - - let canonical_stacks_tip = - StacksBlockId::new(&canonical_stacks_tip_ch, &canonical_stacks_tip_bh); - - let canonical_stacks_snapshot = - SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &canonical_stacks_tip_ch)? - .ok_or_else(|| { - error!("Relayer: failed to get block snapshot for canonical tip"); - NakamotoNodeError::SnapshotNotFoundForChainTip - })?; - - let won_ongoing_tenure_sortition = - canonical_stacks_snapshot.miner_pk_hash == Some(mining_pkh); - - let sort_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - let won_current_tip = sort_tip.miner_pk_hash == Some(mining_pkh); - - info!( - "Relayer: Checking for tenure continuation."; - "won_ongoing_tenure_sortition" => won_ongoing_tenure_sortition, - "won_current_tip" => won_current_tip, - "current_mining_pkh" => %mining_pkh, - "canonical_stacks_tip_id" => %canonical_stacks_tip, - "canonical_stacks_tip_ch" => %canonical_stacks_tip_ch, - "canonical_stacks_tip_miner" => ?canonical_stacks_snapshot.miner_pk_hash, - "burn_view_ch" => %new_burn_view - ); - - if !won_ongoing_tenure_sortition { - info!("Relayer: Did not win the last sortition that commits to our Stacks fork. Cannot continue tenure."); - return Ok(None); - } - - if won_current_tip { - info!("Relayer: Won current sortition, so no need to continue tenure. Just start a new one."); - return Ok(None); - } - - // Allow the miner to extend its tenure even if there are higher valid sortition IFF - // it determines that the miners of the higher sortition fails to produce a block - // by the required timeout. - if let Some(highest_valid_sortition) = Self::find_highest_valid_sortition( - sortdb, - chain_state, - &sort_tip, - &canonical_stacks_snapshot.consensus_hash, - )? { - info!("Relayer: waiting to extend tenure -- we won sortition {}, but the highest valid sortition is {}. Must give the incoming miner time to produce a block.", &canonical_stacks_snapshot.consensus_hash, &highest_valid_sortition.consensus_hash); - return Ok(Some((canonical_stacks_snapshot, true))); - } - // There cannot be any higher sortitions that are valid (as defined above). - Ok(Some((canonical_stacks_snapshot, false))) - } - /// Attempt to continue a miner's tenure into the next burn block. /// This is allowed if the miner won the last good sortition -- that is, the sortition which /// elected the local view of the canonical Stacks fork's ongoing tenure. + /// Or if the miner won the last valid sortition prior to the current and the current miner + /// has failed to produce a block before the required timeout. /// /// This function assumes that the caller has checked that the sortition referred to by /// `new_burn_view` does not have a sortition winner or that the winner has not produced a @@ -1488,17 +1426,25 @@ impl RelayerThread { } debug!("Relayer: successfully stopped tenure; will try to continue."); - let mining_pkh_opt = self.get_mining_key_pkh(); - let Some((canonical_stacks_tip_election_snapshot, _)) = Self::can_continue_tenure( - &self.sortdb, - &mut self.chainstate, - new_burn_view.clone(), - mining_pkh_opt, - )? + // try to extend, but only if we aren't already running a thread for the current or newer + // burnchain view + let Ok(sn) = + SortitionDB::get_canonical_burn_chain_tip(self.sortdb.conn()).inspect_err(|e| { + error!("Relayer: failed to read canonical burnchain sortition: {e:?}"); + }) else { return Ok(()); }; + if let Some(miner_thread_burn_view) = self.miner_thread_burn_view.as_ref() { + // a miner thread is already running. If its burn view is the same as the canonical + // tip, then do nothing + if sn.consensus_hash == miner_thread_burn_view.consensus_hash { + info!("Relayer: will not tenure extend -- the current miner thread's burn view matches the sortition tip"; "sortition tip" => %sn.consensus_hash); + return Ok(()); + } + } + // Get the necessary snapshots and state let burn_tip = SortitionDB::get_block_snapshot_consensus(self.sortdb.conn(), &new_burn_view)? @@ -1510,14 +1456,21 @@ impl RelayerThread { SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortdb.conn()).unwrap(); let canonical_stacks_tip = StacksBlockId::new(&canonical_stacks_tip_ch, &canonical_stacks_tip_bh); - + let canonical_stacks_snapshot = SortitionDB::get_block_snapshot_consensus( + self.sortdb.conn(), + &canonical_stacks_tip_ch, + )? + .ok_or_else(|| { + error!("Relayer: failed to get block snapshot for canonical tip"); + NakamotoNodeError::SnapshotNotFoundForChainTip + })?; let reason = MinerReason::Extended { burn_view_consensus_hash: new_burn_view.clone(), }; if let Err(e) = self.start_new_tenure( canonical_stacks_tip.clone(), - canonical_stacks_tip_election_snapshot.clone(), + canonical_stacks_snapshot.clone(), burn_tip.clone(), reason.clone(), &new_burn_view, @@ -1528,7 +1481,7 @@ impl RelayerThread { "parent_tenure_start" => %canonical_stacks_tip, "burn_tip" => %burn_tip.consensus_hash, "burn_view_snapshot" => %burn_tip.consensus_hash, - "block_election_snapshot" => %canonical_stacks_tip_election_snapshot.consensus_hash, + "block_election_snapshot" => %canonical_stacks_snapshot.consensus_hash, "reason" => %reason); } Ok(()) @@ -1814,7 +1767,7 @@ impl RelayerThread { // Should begin a tenure-extend? if let Some(tenure_extend_time) = &self.tenure_extend_time { if !tenure_extend_time.should_extend() { - debug!( + test_debug!( "Relayer: will not try to tenure-extend yet ({} <= {})", tenure_extend_time.elapsed().as_secs(), tenure_extend_time.timeout().as_secs() @@ -1825,19 +1778,16 @@ impl RelayerThread { // No tenure extend time set, so nothing to do. return; } - // reset timer so we can try again if for some reason a miner was already running (e.g. a // blockfound from earlier). - debug!("NO HERE"); self.tenure_extend_time = Some(TenureExtendTime::delayed( self.config.miner.tenure_extend_poll_timeout, )); - // try to extend, but only if we aren't already running a thread for the current or newer // burnchain view let Ok(sn) = SortitionDB::get_canonical_burn_chain_tip(self.sortdb.conn()).inspect_err(|e| { - error!("Relayer: failed to read canonical burnchain sortition: {e:?}"); + error!("Failed to read canonical burnchain sortition: {e:?}"); }) else { return; @@ -1845,18 +1795,72 @@ impl RelayerThread { if let Some(miner_thread_burn_view) = self.miner_thread_burn_view.as_ref() { // a miner thread is already running. If its burn view is the same as the canonical - // tip, then do nothing + // tip, then do nothing for now if sn.consensus_hash == miner_thread_burn_view.consensus_hash { - info!("Relayer: will not try to start a tenure extend -- the current miner thread's burn view matches the sortition tip"; "sortition tip" => %sn.consensus_hash); + info!("Will not try to start a tenure extend -- the current miner thread's burn view matches the sortition tip"; "sortition tip" => %sn.consensus_hash); return; } } + let Some(mining_pk) = self.get_mining_key_pkh() else { + // This shouldn't really ever hit, but just in case. + warn!("Will not tenure extend -- no mining key"); + return; + }; + + let (canonical_stacks_tip_ch, _) = + SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortdb.conn()) + .expect("FATAL: failed to query sortition DB for stacks tip"); + let canonical_stacks_snapshot = + SortitionDB::get_block_snapshot_consensus(self.sortdb.conn(), &canonical_stacks_tip_ch) + .expect("FATAL: failed to query sortiiton DB for epoch") + .expect("FATAL: no sortition for canonical stacks tip"); + + let won_sortition = sn.sortition && sn.miner_pk_hash == Some(mining_pk); + + if won_sortition { + debug!("Will not tenure extend. Won current sortition"; + "burn_chain_sortition_tip_ch" => %sn.consensus_hash, + "canonical_stacks_tip_ch" => %canonical_stacks_tip_ch, + ); + return; + } + + let won_ongoing_tenure_sortition = + canonical_stacks_snapshot.miner_pk_hash == Some(mining_pk); + + if !won_ongoing_tenure_sortition { + // We did not win the ongoing tenure sortition, so nothing we can even do. + debug!("Will not tenure extend. Did not win ongoing tenure sortition"; + "burn_chain_sortition_tip_ch" => %sn.consensus_hash, + "canonical_stacks_tip_ch" => %canonical_stacks_tip_ch, + "burn_chain_sortition_tip_mining_pk" => ?sn.miner_pk_hash, + "mining_pk" => %mining_pk, + ); + return; + } + let Ok(last_winning_snapshot) = Self::get_last_winning_snapshot(&self.sortdb, &sn) + .inspect_err(|e| { + warn!("Failed to load last winning snapshot: {e:?}"); + }) + else { + // this should be unreachable, but don't tempt fate. + info!("No prior snapshots have a winning sortition. Will not try to mine."); + return; + }; + + if self.need_block_found(&canonical_stacks_snapshot, &last_winning_snapshot) { + info!("Will not extend tenure -- need to issue a BlockFound first"); + return; + } + if let Err(e) = self.continue_tenure(sn.consensus_hash.clone()) { warn!( - "Relayer: failed to continue tenure for burn view {}: {e:?}", + "Failed to continue tenure for burn view {}: {e:?}", &sn.consensus_hash ); + } else { + self.tenure_extend_time = None; } } diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 54bb65d777..9aa26cfe92 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -99,7 +99,7 @@ use super::bitcoin_regtest::BitcoinCoreController; use crate::nakamoto_node::miner::{ TEST_BLOCK_ANNOUNCE_STALL, TEST_BROADCAST_STALL, TEST_MINE_STALL, TEST_SKIP_P2P_BROADCAST, }; -use crate::nakamoto_node::relayer::{RelayerThread, TEST_MINER_THREAD_STALL}; +use crate::nakamoto_node::relayer::TEST_MINER_THREAD_STALL; use crate::neon::{Counters, RunLoopCounter}; use crate::operations::BurnchainOpSigner; use crate::run_loop::boot_nakamoto; @@ -10764,8 +10764,6 @@ fn test_tenure_extend_from_flashblocks() { signer_test.boot_to_epoch_3(); let naka_conf = signer_test.running_nodes.conf.clone(); - let mining_key = naka_conf.miner.mining_key.clone().unwrap(); - let mining_key_pkh = Hash160::from_node_public_key(&StacksPublicKey::from_private(&mining_key)); let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); let btc_regtest_controller = &mut signer_test.running_nodes.btc_regtest_controller; @@ -10781,13 +10779,6 @@ fn test_tenure_extend_from_flashblocks() { let burnchain = naka_conf.get_burnchain(); let sortdb = burnchain.open_sortition_db(true).unwrap(); - let (mut chainstate, _) = StacksChainState::open( - naka_conf.is_mainnet(), - naka_conf.burnchain.chain_id, - &naka_conf.get_chainstate_path_str(), - None, - ) - .unwrap(); for _ in 0..3 { next_block_and_mine_commit(btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); @@ -10896,29 +10887,6 @@ fn test_tenure_extend_from_flashblocks() { // the sortition that elected the ongoing tenure is not the canonical sortition tip assert_ne!(sort_tip.consensus_hash, election_tip.consensus_hash); - // we can, however, continue the tenure - let (canonical_stacks_tip, wait) = RelayerThread::can_continue_tenure( - &sortdb, - &mut chainstate, - sort_tip.consensus_hash.clone(), - Some(mining_key_pkh.clone()), - ) - .unwrap() - .unwrap(); - assert!(!wait); - assert_eq!(canonical_stacks_tip, election_tip); - - // if we didn't win the last block -- tantamount to the sortition winner miner key being - // different -- then we can't continue the tenure. - assert!(RelayerThread::can_continue_tenure( - &sortdb, - &mut chainstate, - sort_tip.consensus_hash.clone(), - Some(Hash160([0x11; 20])) - ) - .unwrap() - .is_none()); - let mut accounts_before = vec![]; let mut sent_txids = vec![]; diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index c8b2a6e8b2..8574715f98 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -3747,6 +3747,8 @@ fn empty_sortition_before_proposal() { let info = get_chain_info(&signer_test.running_nodes.conf); info!("Current state: {:?}", info); + info!("------------------------- Ensure Miner Extends Tenure -------------------------"); + // Wait for a block with a tenure extend to be mined wait_for(60, || { let blocks = test_observer::get_blocks(); @@ -3773,6 +3775,8 @@ fn empty_sortition_before_proposal() { }) .expect("Timed out waiting for tenure extend"); + info!("------------------------- Test Miner Mines Transfer Tx -------------------------"); + let stacks_height_before = get_chain_info(&signer_test.running_nodes.conf).stacks_tip_height; // submit a tx so that the miner will mine an extra block @@ -3793,6 +3797,8 @@ fn empty_sortition_before_proposal() { }) .expect("Failed to advance chain tip with STX transfer"); + info!("------------------------- Test Miner Tenure C -------------------------"); + next_block_and_process_new_stacks_block( &mut signer_test.running_nodes.btc_regtest_controller, 60, @@ -3800,6 +3806,8 @@ fn empty_sortition_before_proposal() { ) .expect("Failed to mine a normal tenure after the tenure extend"); + info!("------------------------- Shutdown -------------------------"); + signer_test.shutdown(); } @@ -7058,9 +7066,6 @@ fn continue_after_fast_block_no_sortition() { &mut signer_test.running_nodes.btc_regtest_controller, 60, || { - if blocks_mined1.load(Ordering::SeqCst) > blocks_processed_before_1 { - debug!("MINER 1 BLOCKS PROCESSED HERE"); - } Ok(get_burn_height() > burn_height_before && blocks_mined1.load(Ordering::SeqCst) > blocks_processed_before_1 && test_observer::get_blocks().len() > nmb_old_blocks) @@ -13109,6 +13114,9 @@ fn prev_miner_extends_if_incoming_miner_fails_to_mine() { assert!(tip.sortition); assert_eq!(tip.miner_pk_hash.unwrap(), mining_pkh_2); + info!( + "------------------------- Wait for Miner 2 to be Marked Invalid -------------------------" + ); // Make sure that miner 2 gets marked invalid by not proposing a block BEFORE block_proposal_timeout std::thread::sleep(block_proposal_timeout.add(Duration::from_secs(1))); @@ -13124,7 +13132,7 @@ fn prev_miner_extends_if_incoming_miner_fails_to_mine() { // Unpause both miner's block proposals TEST_BROADCAST_STALL.set(false); - info!("------------------------- Wait for Miner 2's Block N+1' ------------------------"; + info!("------------------------- Wait for Miner 1's Block N+1 to be Mined ------------------------"; "stacks_height_before" => %stacks_height_before, "nmb_old_blocks" => %nmb_old_blocks); @@ -13146,7 +13154,7 @@ fn prev_miner_extends_if_incoming_miner_fails_to_mine() { let mut miner_1_block_n_1 = None; let mut miner_2_block_n_1 = None; - wait_for(30, || { + wait_for(60, || { let chunks = test_observer::get_stackerdb_chunks(); for chunk in chunks.into_iter().flat_map(|chunk| chunk.modified_slots) { let Ok(message) = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) From 677fc8ac623f4a43f07eddb279c416efa86d34b1 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 6 Feb 2025 13:12:26 -0800 Subject: [PATCH 05/13] Add prev_miner_extends_if_incoming_miner_fails_to_mine_failure test Signed-off-by: Jacinta Ferrant --- .../stacks-node/src/nakamoto_node/miner.rs | 40 +- .../src/tests/nakamoto_integrations.rs | 7 +- testnet/stacks-node/src/tests/signer/v0.rs | 733 ++++++++++++++++-- 3 files changed, 686 insertions(+), 94 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 16b33ead7a..055279216d 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -47,7 +47,11 @@ use stacks::net::stackerdb::StackerDBs; use stacks::net::{NakamotoBlocksData, StacksMessageType}; use stacks::util::get_epoch_time_secs; use stacks::util::secp256k1::MessageSignature; +#[cfg(test)] +use stacks::util::secp256k1::Secp256k1PublicKey; use stacks_common::types::chainstate::{StacksAddress, StacksBlockId}; +#[cfg(test)] +use stacks_common::types::PublicKey; use stacks_common::types::{PrivateKey, StacksEpochId}; #[cfg(test)] use stacks_common::util::tests::TestFlag; @@ -64,8 +68,9 @@ use crate::run_loop::RegisteredKey; /// Test flag to stall the miner thread pub static TEST_MINE_STALL: LazyLock> = LazyLock::new(TestFlag::default); #[cfg(test)] -/// Test flag to stall block proposal broadcasting -pub static TEST_BROADCAST_STALL: LazyLock> = LazyLock::new(TestFlag::default); +/// Test flag to stall block proposal broadcasting for the specified miner keys +pub static TEST_BROADCAST_STALL: LazyLock>> = + LazyLock::new(TestFlag::default); #[cfg(test)] pub static TEST_BLOCK_ANNOUNCE_STALL: LazyLock> = LazyLock::new(TestFlag::default); #[cfg(test)] @@ -253,21 +258,32 @@ impl BlockMinerThread { #[cfg(test)] fn fault_injection_block_broadcast_stall(new_block: &NakamotoBlock) { - if TEST_BROADCAST_STALL.get() { - // Do an extra check just so we don't log EVERY time. + if TEST_BROADCAST_STALL.get().iter().any(|key| { + key.verify( + new_block.header.miner_signature_hash().as_bytes(), + &new_block.header.miner_signature, + ) + .unwrap_or_default() + }) { warn!("Fault injection: Broadcasting is stalled due to testing directive."; - "stacks_block_id" => %new_block.block_id(), - "stacks_block_hash" => %new_block.header.block_hash(), - "height" => new_block.header.chain_length, - "consensus_hash" => %new_block.header.consensus_hash + "stacks_block_id" => %new_block.block_id(), + "stacks_block_hash" => %new_block.header.block_hash(), + "height" => new_block.header.chain_length, + "consensus_hash" => %new_block.header.consensus_hash ); - while TEST_BROADCAST_STALL.get() { + while TEST_BROADCAST_STALL.get().iter().any(|key| { + key.verify( + new_block.header.miner_signature_hash().as_bytes(), + &new_block.header.miner_signature, + ) + .unwrap_or_default() + }) { std::thread::sleep(std::time::Duration::from_millis(10)); } info!("Fault injection: Broadcasting is no longer stalled due to testing directive."; - "block_id" => %new_block.block_id(), - "height" => new_block.header.chain_length, - "consensus_hash" => %new_block.header.consensus_hash + "block_id" => %new_block.block_id(), + "height" => new_block.header.chain_length, + "consensus_hash" => %new_block.header.consensus_hash ); } } diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index e03c056fb9..8a17b03670 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -5111,6 +5111,9 @@ fn forked_tenure_is_ignored() { &[EventKeyType::AnyEvent, EventKeyType::MinedBlocks], ); + let miner_sk = naka_conf.miner.mining_key.unwrap(); + let miner_pk = StacksPublicKey::from_private(&miner_sk); + let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); btcd_controller .start_bitcoind() @@ -5188,7 +5191,7 @@ fn forked_tenure_is_ignored() { // For the next tenure, submit the commit op but do not allow any stacks blocks to be broadcasted. // Stall the miner thread; only wait until the number of submitted commits increases. - TEST_BROADCAST_STALL.set(true); + TEST_BROADCAST_STALL.set(vec![miner_pk]); TEST_BLOCK_ANNOUNCE_STALL.set(true); let blocks_before = mined_blocks.load(Ordering::SeqCst); @@ -5207,7 +5210,7 @@ fn forked_tenure_is_ignored() { // Unpause the broadcast of Tenure B's block, do not submit commits, and do not allow blocks to // be processed test_skip_commit_op.set(true); - TEST_BROADCAST_STALL.set(false); + TEST_BROADCAST_STALL.set(vec![]); // Wait for a stacks block to be broadcasted. // However, it will not be processed. diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 8634825620..1a008df25e 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -971,6 +971,8 @@ fn forked_tenure_testing( info!("------------------------- Reached Epoch 3.0 -------------------------"); let naka_conf = signer_test.running_nodes.conf.clone(); + let miner_sk = naka_conf.miner.mining_key.unwrap(); + let miner_pk = StacksPublicKey::from_private(&miner_sk); let burnchain = naka_conf.get_burnchain(); let sortdb = burnchain.open_sortition_db(true).unwrap(); let (chainstate, _) = StacksChainState::open( @@ -1020,7 +1022,7 @@ fn forked_tenure_testing( .unwrap(); // For the next tenure, submit the commit op but do not allow any stacks blocks to be broadcasted - TEST_BROADCAST_STALL.set(true); + TEST_BROADCAST_STALL.set(vec![miner_pk]); TEST_BLOCK_ANNOUNCE_STALL.set(true); let blocks_before = mined_blocks.load(Ordering::SeqCst); @@ -1045,7 +1047,7 @@ fn forked_tenure_testing( .running_nodes .nakamoto_test_skip_commit_op .set(true); - TEST_BROADCAST_STALL.set(false); + TEST_BROADCAST_STALL.set(vec![]); // Wait for a stacks block to be broadcasted let start_time = Instant::now(); @@ -1965,7 +1967,7 @@ fn miner_forking() { info!("------------------------- RL1 Wins Sortition -------------------------"); info!("Pausing stacks block proposal to force an empty tenure commit from RL2"); - TEST_BROADCAST_STALL.set(true); + TEST_BROADCAST_STALL.set(vec![mining_pk_1, mining_pk_2]); let rl1_commits_before = commits_submitted_rl1.load(Ordering::SeqCst); let burn_height_before = get_burn_height(); @@ -2028,7 +2030,7 @@ fn miner_forking() { // unblock block mining let blocks_len = test_observer::get_blocks().len(); - TEST_BROADCAST_STALL.set(false); + TEST_BROADCAST_STALL.set(vec![]); // Wait for the block to be broadcasted and processed wait_for(30, || Ok(test_observer::get_blocks().len() > blocks_len)) @@ -2115,7 +2117,7 @@ fn miner_forking() { info!("------------------------- RL1 RBFs its Own Commit -------------------------"); info!("Pausing stacks block proposal to test RBF capability"); - TEST_BROADCAST_STALL.set(true); + TEST_BROADCAST_STALL.set(vec![mining_pk_1, mining_pk_2]); let rl1_commits_before = commits_submitted_rl1.load(Ordering::SeqCst); info!("Unpausing commits from RL1"); @@ -2153,7 +2155,7 @@ fn miner_forking() { let rl1_commits_before = commits_submitted_rl1.load(Ordering::SeqCst); // unblock block mining let blocks_len = test_observer::get_blocks().len(); - TEST_BROADCAST_STALL.set(false); + TEST_BROADCAST_STALL.set(vec![]); // Wait for the block to be broadcasted and processed wait_for(30, || Ok(test_observer::get_blocks().len() > blocks_len)) @@ -3348,9 +3350,11 @@ fn empty_sortition() { let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); let short_timeout = Duration::from_secs(20); + let miner_sk = signer_test.running_nodes.conf.miner.mining_key.unwrap(); + let miner_pk = StacksPublicKey::from_private(&miner_sk); signer_test.boot_to_epoch_3(); - TEST_BROADCAST_STALL.set(true); + TEST_BROADCAST_STALL.set(vec![miner_pk]); info!("------------------------- Test Mine Regular Tenure A -------------------------"); let commits_before = signer_test @@ -3397,7 +3401,7 @@ fn empty_sortition() { .unwrap(); info!("Pausing stacks block proposal to force an empty tenure"); - TEST_BROADCAST_STALL.set(true); + TEST_BROADCAST_STALL.set(vec![miner_pk]); info!("Pausing commit op to prevent tenure C from starting..."); signer_test @@ -3430,7 +3434,7 @@ fn empty_sortition() { std::thread::sleep(block_proposal_timeout.add(Duration::from_secs(1))); - TEST_BROADCAST_STALL.set(false); + TEST_BROADCAST_STALL.set(vec![]); info!("------------------------- Test Delayed Block is Rejected -------------------------"); let reward_cycle = signer_test.get_current_reward_cycle(); @@ -11649,6 +11653,8 @@ fn reorg_attempts_activity_timeout_exceeded() { ); let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + let miner_sk = signer_test.running_nodes.conf.miner.mining_key.unwrap(); + let miner_pk = StacksPublicKey::from_private(&miner_sk); signer_test.boot_to_epoch_3(); let wait_for_block_proposal = || { @@ -11720,7 +11726,7 @@ fn reorg_attempts_activity_timeout_exceeded() { let block_proposal_n = wait_for_block_proposal().expect("Failed to get block proposal N"); let chain_after = get_chain_info(&signer_test.running_nodes.conf); assert_eq!(chain_after, chain_before); - TEST_BROADCAST_STALL.set(true); + TEST_BROADCAST_STALL.set(vec![miner_pk]); info!("------------------------- Start Tenure B -------------------------"); let commits_before = signer_test @@ -11758,7 +11764,7 @@ fn reorg_attempts_activity_timeout_exceeded() { // Make sure to wait the reorg_attempts_activity_timeout AFTER the block is globally signed over // as this is the point where signers start considering from. std::thread::sleep(reorg_attempts_activity_timeout.add(Duration::from_secs(1))); - TEST_BROADCAST_STALL.set(false); + TEST_BROADCAST_STALL.set(vec![]); let block_proposal_n_prime = wait_for_block_proposal().expect("Failed to get block proposal N'"); assert_eq!( @@ -11766,7 +11772,7 @@ fn reorg_attempts_activity_timeout_exceeded() { chain_after.stacks_tip_height ); // Make sure that no subsequent proposal arrives before the block_proposal_timeout is exceeded - TEST_BROADCAST_STALL.set(true); + TEST_BROADCAST_STALL.set(vec![miner_pk]); TEST_VALIDATE_STALL.set(false); // We only need to wait the difference between the two timeouts now since we already slept for a min of reorg_attempts_activity_timeout + 1 std::thread::sleep(block_proposal_timeout.saturating_sub(reorg_attempts_activity_timeout)); @@ -11783,7 +11789,7 @@ fn reorg_attempts_activity_timeout_exceeded() { info!("------------------------- Wait for Block N+1 Proposal -------------------------"); test_observer::clear(); - TEST_BROADCAST_STALL.set(false); + TEST_BROADCAST_STALL.set(vec![miner_pk]); wait_for(30, || { let block_proposal_n_1 = wait_for_block_proposal().expect("Failed to get block proposal N+1"); @@ -12349,10 +12355,13 @@ fn block_proposal_timeout() { None, ); + let miner_sk = signer_test.running_nodes.conf.miner.mining_key.unwrap(); + let miner_pk = StacksPublicKey::from_private(&miner_sk); + signer_test.boot_to_epoch_3(); // Pause the miner's block proposals - TEST_BROADCAST_STALL.set(true); + TEST_BROADCAST_STALL.set(vec![miner_pk]); let wait_for_block_proposal = || { let mut block_proposal = None; @@ -12397,7 +12406,7 @@ fn block_proposal_timeout() { test_observer::clear(); info!("------------------------- Attempt Mine Block N -------------------------"); - TEST_BROADCAST_STALL.set(false); + TEST_BROADCAST_STALL.set(vec![]); let block_proposal_n = wait_for_block_proposal().expect("Failed to get block proposal N"); @@ -13072,7 +13081,7 @@ fn tenure_extend_cost_threshold() { signer_test.shutdown(); } -/// Test a scenario where: +/// Test a scenario where a previous miner can extend a tenure when it is favoured by signers over the incoming miner. /// Two miners boot to Nakamoto. /// Miner 1 wins the first tenure. /// Miner 1 proposes a block N with a TenureChangeCause::BlockFound @@ -13092,7 +13101,7 @@ fn tenure_extend_cost_threshold() { /// - The stacks tip advances to N+2 #[test] #[ignore] -fn prev_miner_extends_if_incoming_miner_fails_to_mine() { +fn prev_miner_extends_if_incoming_miner_fails_to_mine_success() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } @@ -13301,12 +13310,6 @@ fn prev_miner_extends_if_incoming_miner_fails_to_mine() { verify_last_block_contains_tenure_change_tx(TenureChangeCause::BlockFound); info!("------------------------- Submit Miner 2 Block Commit -------------------------"); - let stacks_height_before = signer_test - .stacks_client - .get_peer_info() - .expect("Failed to get peer info") - .stacks_tip_height; - let rl2_commits_before = rl2_commits.load(Ordering::SeqCst); // Unpause miner 2's block commits rl2_skip_commit_op.set(false); @@ -13323,7 +13326,7 @@ fn prev_miner_extends_if_incoming_miner_fails_to_mine() { let burn_height_before = get_burn_height(); // Pause the block proposal broadcast so that miner 2 will be unable to broadcast its // tenure change proposal BEFORE the block_proposal_timeout and will be marked invalid. - TEST_BROADCAST_STALL.set(true); + TEST_BROADCAST_STALL.set(vec![mining_pk_2]); info!("------------------------- Miner 2 Mines an Empty Tenure B -------------------------"; "burn_height_before" => burn_height_before, @@ -13345,20 +13348,15 @@ fn prev_miner_extends_if_incoming_miner_fails_to_mine() { info!( "------------------------- Wait for Miner 2 to be Marked Invalid -------------------------" ); - // Make sure that miner 2 gets marked invalid by not proposing a block BEFORE block_proposal_timeout - std::thread::sleep(block_proposal_timeout.add(Duration::from_secs(1))); - - let stacks_height_after = signer_test + let stacks_height_before = signer_test .stacks_client .get_peer_info() .expect("Failed to get peer info") .stacks_tip_height; - - assert_eq!(stacks_height_before, stacks_height_after); + // Make sure that miner 2 gets marked invalid by not proposing a block BEFORE block_proposal_timeout + std::thread::sleep(block_proposal_timeout.add(Duration::from_secs(1))); let nmb_old_blocks = test_observer::get_blocks().len(); - // Unpause both miner's block proposals - TEST_BROADCAST_STALL.set(false); info!("------------------------- Wait for Miner 1's Block N+1 to be Mined ------------------------"; "stacks_height_before" => %stacks_height_before, @@ -13377,12 +13375,15 @@ fn prev_miner_extends_if_incoming_miner_fails_to_mine() { }) .expect("Timed out waiting for block to be mined and processed"); + // Unpause miner 2's block proposal broadcast + TEST_BROADCAST_STALL.set(vec![]); + info!("------------------------- Verify Miner 2's N+1' was Rejected and Miner 1's N+1 Accepted-------------------------"); let mut miner_1_block_n_1 = None; let mut miner_2_block_n_1 = None; - wait_for(60, || { + wait_for(30, || { let chunks = test_observer::get_stackerdb_chunks(); for chunk in chunks.into_iter().flat_map(|chunk| chunk.modified_slots) { let Ok(message) = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) @@ -13560,23 +13561,30 @@ fn prev_miner_extends_if_incoming_miner_fails_to_mine() { signer_test.shutdown(); } -/// Test a scenario where: +/// Test a scenario where a previous miner is unable to extend its tenure if the signers are configured to favour the incoming miner. /// Two miners boot to Nakamoto. -/// Miner 1 wins the first tenure. +/// Miner 1 wins the first tenure A. /// Miner 1 proposes a block N with a TenureChangeCause::BlockFound /// Signers accept and the stacks tip advances to N /// Miner 2 wins the second tenure B. +/// Miner 2 is paused and cannot propose block N+1. +/// Miner 1 attempts to extend tenure A with block N+1' containg a TenureChangeCause::Extended +/// Signers reject block N+1' and the stacks tip remains at N +/// Miner 2 is unpaused /// Miner 2 proposes block N+1 with a TenureChangeCause::BlockFound -/// Signers accept and the stacks tip advances to N +/// Signers accept and the stacks tip advances to N+1 +/// Miner 2 wins the third tenure C +/// Miner 2 proposes block N+2 with a TenureChangeCause::BlockFound /// /// Asserts: /// - Block N contains the TenureChangeCause::BlockFound +/// - Block N+1' contains a TenureChangeCause::Extended and is rejected /// - Block N+1 contains the TenureChangeCause::BlockFound -/// - The stacks tip advances to N+1 -/// - Miner 1 does not produce a tenure extend block +/// - Block N+2 contains the TenureChangeCause::BlockFound +/// - The stacks tip advances to N+2 #[test] #[ignore] -fn prev_miner_will_not_extend_if_incoming_miner_mines() { +fn prev_miner_extends_if_incoming_miner_fails_to_mine_failure() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } @@ -13603,8 +13611,9 @@ fn prev_miner_will_not_extend_if_incoming_miner_mines() { let max_nakamoto_tenures = 30; - let block_proposal_timeout = Duration::from_secs(100); - let tenure_extend_wait_timeout = Duration::from_secs(20); + // Ensure Miner 1 will attempt to extend BEFORE signers are willing to consider it. + let block_proposal_timeout = Duration::from_secs(500); // make it way in the future so miner 1 is rejected + let tenure_extend_wait_timeout = Duration::from_secs(30); info!("------------------------- Test Setup -------------------------"); // partition the signer set so that ~half are listening and using node 1 for RPC and events, // and the rest are using node 2 @@ -13686,6 +13695,7 @@ fn prev_miner_will_not_extend_if_incoming_miner_mines() { let Counters { naka_submitted_commits: rl2_commits, naka_skip_commit_op: rl2_skip_commit_op, + naka_submitted_commit_last_stacks_tip: rl2_commit_last_stacks_tip, .. } = run_loop_2.counters(); @@ -13805,42 +13815,47 @@ fn prev_miner_will_not_extend_if_incoming_miner_mines() { rl2_skip_commit_op.set(true); let burn_height_before = get_burn_height(); + // Pause the block proposal broadcast so that miner 2 will be unable to broadcast its + // tenure change proposal BEFORE miner 1 attempts to extend. + TEST_BROADCAST_STALL.set(vec![mining_pk_2]); - info!("------------------------- Miner 2 Mines Tenure B -------------------------"; + info!("------------------------- Miner 2 Wins Tenure B -------------------------"; "burn_height_before" => burn_height_before, - "stacks_height_before" => stacks_height_before + "stacks_height_before" => %stacks_height_before ); - let stacks_height_before = signer_test - .stacks_client - .get_peer_info() - .expect("Failed to get peer info") - .stacks_tip_height; - next_block_and( &mut signer_test.running_nodes.btc_regtest_controller, 60, - || { - Ok(get_burn_height() > burn_height_before - && signer_test - .stacks_client - .get_peer_info() - .expect("Failed to get peer info") - .stacks_tip_height - > stacks_height_before) - }, + || Ok(get_burn_height() > burn_height_before), ) .unwrap(); btc_blocks_mined += 1; + assert_eq!( + stacks_height_before, + signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height + ); + // assure we have a successful sortition that miner 2 won let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); assert!(tip.sortition); assert_eq!(tip.miner_pk_hash.unwrap(), mining_pkh_2); - info!("------------------------- Get Miner 2's N+1' block -------------------------"); + info!( + "------------------------- Wait for Miner 1 to think Miner 2 is Invalid -------------------------" + ); + // Make sure that miner 1 thinks miner 2 is invalid. + std::thread::sleep(tenure_extend_wait_timeout.add(Duration::from_secs(1))); - let mut miner_2_block_n_1 = None; + info!("------------------------- Wait for Miner 1's Block N+1' to be Proposed ------------------------"; + "stacks_height_before" => %stacks_height_before); + + let mut miner_1_block_n_1 = None; wait_for(30, || { let chunks = test_observer::get_stackerdb_chunks(); @@ -13857,16 +13872,98 @@ fn prev_miner_will_not_extend_if_incoming_miner_mines() { if block_stacks_height != stacks_height_before + 1 { continue; } - assert_eq!(miner_pk, mining_pk_2); - miner_2_block_n_1 = Some(proposal.block); - return Ok(true); + if miner_pk == mining_pk_1 { + miner_1_block_n_1 = Some(proposal.block); + return Ok(true); + } } Ok(false) }) - .expect("Timed out waiting for N+1 from miner 2"); + .expect("Timed out waiting for N+1 block proposals from miner 1"); + + let miner_1_block_n_1 = miner_1_block_n_1.expect("No block proposal from miner 1"); + + assert_eq!( + miner_1_block_n_1 + .try_get_tenure_change_payload() + .unwrap() + .cause, + TenureChangeCause::Extended + ); + + info!("------------------------- Verify that Miner 1's Block N+1' was Rejected ------------------------"); + + // Miner 1's proposed block should get rejected by the signers + let mut found_miner_1_rejections = HashSet::new(); + wait_for(30, || { + let chunks = test_observer::get_stackerdb_chunks(); + for chunk in chunks.into_iter().flat_map(|chunk| chunk.modified_slots) { + let Ok(message) = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + else { + continue; + }; + if let SignerMessage::BlockResponse(BlockResponse::Rejected(BlockRejection { + signer_signature_hash, + signature, + .. + })) = message + { + if signer_signature_hash == miner_1_block_n_1.header.signer_signature_hash() { + found_miner_1_rejections.insert(signature); + } + } + } + Ok(found_miner_1_rejections.len() >= num_signers * 3 / 10) + }) + .expect("Timed out waiting for expeceted block responses"); + + assert_eq!( + stacks_height_before, + signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height + ); + + info!("------------------------- Wait for Miner 2's Block N+1 BlockFound to be Proposed ------------------------"; + "stacks_height_before" => %stacks_height_before + ); + + TEST_BROADCAST_STALL.set(vec![]); + + // Get miner 2's N+1 block proposal + let mut miner_2_block_n_1 = None; + wait_for(30, || { + let chunks = test_observer::get_stackerdb_chunks(); + for chunk in chunks.into_iter().flat_map(|chunk| chunk.modified_slots) { + let Ok(message) = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + else { + continue; + }; + let SignerMessage::BlockProposal(proposal) = message else { + continue; + }; + let miner_pk = proposal.block.header.recover_miner_pk().unwrap(); + let block_stacks_height = proposal.block.header.chain_length; + if block_stacks_height != stacks_height_before + 1 { + continue; + } + if miner_pk == mining_pk_2 { + miner_2_block_n_1 = Some(proposal.block); + return Ok(true); + } + } + Ok(false) + }) + .expect("Timed out waiting for N+1 block proposals from miner 1"); let mut miner_2_block_n_1 = miner_2_block_n_1.expect("No block proposal from miner 2"); + info!("------------------------- Wait for Miner 2's Block N+1 to be Approved ------------------------"; + "stacks_height_before" => %stacks_height_before + ); + // Miner 2's proposed block should get approved and pushed wait_for(30, || { let chunks = test_observer::get_stackerdb_chunks(); @@ -13893,21 +13990,20 @@ fn prev_miner_will_not_extend_if_incoming_miner_mines() { .get_peer_info() .expect("Failed to get peer info") .stacks_tip; + assert_eq!(tip_block_header_hash, miner_2_block_n_1.header.block_hash()); assert_eq!( - tip_block_header_hash.to_string(), - miner_2_block_n_1.header.block_hash().to_string() + stacks_height_before + 1, + signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height ); info!( - "------------------------- Verify Tenure Change Block Found Tx in Miner 2's Block N+1 -------------------------" - ); - assert_eq!( - miner_2_block_n_1 - .get_tenure_change_tx_payload() - .unwrap() - .cause, - TenureChangeCause::BlockFound + "------------------------- Verify BlockFound in Miner 2's Block N+1 -------------------------" ); + verify_last_block_contains_tenure_change_tx(TenureChangeCause::BlockFound); let stacks_height_before = signer_test .stacks_client @@ -13915,14 +14011,58 @@ fn prev_miner_will_not_extend_if_incoming_miner_mines() { .expect("Failed to get peer info") .stacks_tip_height; - info!("------------------------- Ensure Miner 1 Never Isues a Tenure Extend -------------------------"; + assert_eq!( + get_chain_info(&conf).stacks_tip_height, + stacks_height_before + ); + info!("------------------------- Unpause Miner 2's Block Commits -------------------------"); + let stacks_height_before = get_chain_info(&conf).stacks_tip_height; + let rl2_commits_before = rl2_commits.load(Ordering::SeqCst); + // Unpause miner 2's commits + rl2_skip_commit_op.set(false); + + // Ensure that both miners' commits point at the stacks tip + wait_for(30, || { + let last_committed_2 = rl2_commit_last_stacks_tip.load(Ordering::SeqCst); + Ok(last_committed_2 >= stacks_height_before + && rl2_commits.load(Ordering::SeqCst) > rl2_commits_before) + }) + .expect("Timed out waiting for block commit from Miner 2"); + + let nmb_old_blocks = test_observer::get_blocks().len(); + let burn_height_before = get_burn_height(); + let block_before = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) + .unwrap() + .block_height; + + info!("------------------------- Miner 2 Mines a Normal Tenure C -------------------------"; + "nmb_old_blocks" => %nmb_old_blocks, + "burn_height_before" => burn_height_before); + + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || { + Ok(get_burn_height() > burn_height_before + && SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) + .unwrap() + .block_height + > block_before) + }, + ) + .unwrap(); + btc_blocks_mined += 1; + + // assure we have a successful sortition that miner 2 won + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + assert!(tip.sortition); + assert_eq!(tip.miner_pk_hash.unwrap(), mining_pkh_2); + + info!("------------------------- Wait for Miner 2's Block N+2 -------------------------"; "stacks_height_before" => %stacks_height_before, "nmb_old_blocks" => %nmb_old_blocks); - // Ensure the tenure extend wait timeout is passed - std::thread::sleep(tenure_extend_wait_timeout.add(Duration::from_secs(1))); - - assert!(wait_for(30, || { + wait_for(30, || { let stacks_height = signer_test .stacks_client .get_peer_info() @@ -13930,7 +14070,440 @@ fn prev_miner_will_not_extend_if_incoming_miner_mines() { .stacks_tip_height; Ok(stacks_height > stacks_height_before) }) - .is_err()); + .expect("Timed out waiting for block N+2 to be mined and processed"); + + info!( + "------------------------- Verify Tenure Change Tx in Miner 2's Block N+2 -------------------------" + ); + verify_last_block_contains_tenure_change_tx(TenureChangeCause::BlockFound); + + info!( + "------------------------- Confirm Burn and Stacks Block Heights -------------------------" + ); + assert_eq!(get_burn_height(), starting_burn_height + btc_blocks_mined); + assert_eq!( + signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height, + starting_peer_height + 3 + ); + + info!("------------------------- Shutdown -------------------------"); + rl2_coord_channels + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper_2.store(false, Ordering::SeqCst); + run_loop_2_thread.join().unwrap(); + signer_test.shutdown(); +} + +/// Test a scenario where: +/// Two miners boot to Nakamoto. +/// Miner 1 wins the first tenure A. +/// Miner 1 proposes a block N with a TenureChangeCause::BlockFound +/// Signers accept and the stacks tip advances to N +/// Miner 2 wins the second tenure B. +/// Miner 2 proposes block N+1 with a TenureChangeCause::BlockFound +/// Signers accept and the stacks tip advances to N+1 +/// Miner 1 never issues a TenureExtend transaction +/// Miner 2 wins the third tenure C +/// Miner 2 proposes block N+2 with a TenureChangeCause::BlockFound +/// Signers accept and the stacks tip advances to N+2 +/// +/// Asserts: +/// - Block N contains the TenureChangeCause::BlockFound +/// - Block N+1 contains the TenureChangeCause::BlockFound +/// - Block N+2 contains the TenureChangeCause::BlockFound +/// - The stacks tip advances to N+2 +/// - Miner 1 does not produce a tenure extend block at all +#[test] +#[ignore] +fn prev_miner_will_not_attempt_to_extend_if_incoming_miner_produces_a_block() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let num_signers = 5; + + let btc_miner_1_seed = vec![1, 1, 1, 1]; + let btc_miner_2_seed = vec![2, 2, 2, 2]; + let btc_miner_1_pk = Keychain::default(btc_miner_1_seed.clone()).get_pub_key(); + let btc_miner_2_pk = Keychain::default(btc_miner_2_seed.clone()).get_pub_key(); + + let node_1_rpc = gen_random_port(); + let node_1_p2p = gen_random_port(); + let node_2_rpc = gen_random_port(); + let node_2_p2p = gen_random_port(); + + debug!("Node 1 bound at (p2p={node_1_p2p}, rpc={node_1_rpc})"); + debug!("Node 2 bound at (p2p={node_2_p2p}, rpc={node_2_rpc})"); + + let localhost = "127.0.0.1"; + let node_1_rpc_bind = format!("{localhost}:{node_1_rpc}"); + let node_2_rpc_bind = format!("{localhost}:{node_2_rpc}"); + let mut node_2_listeners = Vec::new(); + + let max_nakamoto_tenures = 30; + + let block_proposal_timeout = Duration::from_secs(100); + let tenure_extend_wait_timeout = Duration::from_secs(20); + info!("------------------------- Test Setup -------------------------"); + // partition the signer set so that ~half are listening and using node 1 for RPC and events, + // and the rest are using node 2 + + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![], + |signer_config| { + let node_host = if signer_config.endpoint.port() % 2 == 0 { + &node_1_rpc_bind + } else { + &node_2_rpc_bind + }; + signer_config.node_host = node_host.to_string(); + signer_config.block_proposal_timeout = block_proposal_timeout; + }, + |config| { + config.miner.tenure_extend_wait_timeout = tenure_extend_wait_timeout; + config.node.rpc_bind = format!("{localhost}:{node_1_rpc}"); + config.node.p2p_bind = format!("{localhost}:{node_1_p2p}"); + config.node.data_url = format!("http://{localhost}:{node_1_rpc}"); + config.node.p2p_address = format!("{localhost}:{node_1_p2p}"); + config.miner.wait_on_interim_blocks = Duration::from_secs(5); + config.node.pox_sync_sample_secs = 30; + config.burnchain.pox_reward_length = Some(max_nakamoto_tenures); + + config.node.seed = btc_miner_1_seed.clone(); + config.node.local_peer_seed = btc_miner_1_seed.clone(); + config.burnchain.local_mining_public_key = Some(btc_miner_1_pk.to_hex()); + config.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[1])); + + config.events_observers.retain(|listener| { + let Ok(addr) = std::net::SocketAddr::from_str(&listener.endpoint) else { + warn!( + "Cannot parse {} to a socket, assuming it isn't a signer-listener binding", + listener.endpoint + ); + return true; + }; + if addr.port() % 2 == 0 || addr.port() == test_observer::EVENT_OBSERVER_PORT { + return true; + } + node_2_listeners.push(listener.clone()); + false + }) + }, + Some(vec![btc_miner_1_pk, btc_miner_2_pk]), + None, + ); + let conf = signer_test.running_nodes.conf.clone(); + let mut conf_node_2 = conf.clone(); + conf_node_2.node.rpc_bind = format!("{localhost}:{node_2_rpc}"); + conf_node_2.node.p2p_bind = format!("{localhost}:{node_2_p2p}"); + conf_node_2.node.data_url = format!("http://{localhost}:{node_2_rpc}"); + conf_node_2.node.p2p_address = format!("{localhost}:{node_2_p2p}"); + conf_node_2.node.seed = btc_miner_2_seed.clone(); + conf_node_2.burnchain.local_mining_public_key = Some(btc_miner_2_pk.to_hex()); + conf_node_2.node.local_peer_seed = btc_miner_2_seed; + conf_node_2.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[2])); + conf_node_2.node.miner = true; + conf_node_2.events_observers.clear(); + conf_node_2.events_observers.extend(node_2_listeners); + assert!(!conf_node_2.events_observers.is_empty()); + + let node_1_sk = Secp256k1PrivateKey::from_seed(&conf.node.local_peer_seed); + let node_1_pk = StacksPublicKey::from_private(&node_1_sk); + + conf_node_2.node.working_dir = format!("{}-1", conf_node_2.node.working_dir); + + conf_node_2.node.set_bootstrap_nodes( + format!("{}@{}", &node_1_pk.to_hex(), conf.node.p2p_bind), + conf.burnchain.chain_id, + conf.burnchain.peer_version, + ); + + let mut run_loop_2 = boot_nakamoto::BootRunLoop::new(conf_node_2.clone()).unwrap(); + let run_loop_stopper_2 = run_loop_2.get_termination_switch(); + let rl2_coord_channels = run_loop_2.coordinator_channels(); + let Counters { + naka_submitted_commits: rl2_commits, + naka_skip_commit_op: rl2_skip_commit_op, + .. + } = run_loop_2.counters(); + + let blocks_mined1 = signer_test.running_nodes.nakamoto_blocks_mined.clone(); + + info!("------------------------- Pause Miner 2's Block Commits -------------------------"); + + // Make sure Miner 2 cannot win a sortition at first. + rl2_skip_commit_op.set(true); + + info!("------------------------- Boot to Epoch 3.0 -------------------------"); + + let run_loop_2_thread = thread::Builder::new() + .name("run_loop_2".into()) + .spawn(move || run_loop_2.start(None, 0)) + .unwrap(); + + signer_test.boot_to_epoch_3(); + + wait_for(120, || { + let Some(node_1_info) = get_chain_info_opt(&conf) else { + return Ok(false); + }; + let Some(node_2_info) = get_chain_info_opt(&conf_node_2) else { + return Ok(false); + }; + Ok(node_1_info.stacks_tip_height == node_2_info.stacks_tip_height) + }) + .expect("Timed out waiting for boostrapped node to catch up to the miner"); + + let mining_pk_1 = StacksPublicKey::from_private(&conf.miner.mining_key.unwrap()); + let mining_pk_2 = StacksPublicKey::from_private(&conf_node_2.miner.mining_key.unwrap()); + let mining_pkh_1 = Hash160::from_node_public_key(&mining_pk_1); + let mining_pkh_2 = Hash160::from_node_public_key(&mining_pk_2); + debug!("The mining key for miner 1 is {mining_pkh_1}"); + debug!("The mining key for miner 2 is {mining_pkh_2}"); + + info!("------------------------- Reached Epoch 3.0 -------------------------"); + + let burnchain = signer_test.running_nodes.conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + + let get_burn_height = || { + SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) + .unwrap() + .block_height + }; + let starting_peer_height = get_chain_info(&conf).stacks_tip_height; + let starting_burn_height = get_burn_height(); + let mut btc_blocks_mined = 0; + + info!("------------------------- Pause Miner 1's Block Commit -------------------------"); + // Make sure miner 1 doesn't submit any further block commits for the next tenure BEFORE mining the bitcoin block + signer_test + .running_nodes + .nakamoto_test_skip_commit_op + .set(true); + + info!("------------------------- Miner 1 Mines a Normal Tenure A -------------------------"); + let blocks_processed_before_1 = blocks_mined1.load(Ordering::SeqCst); + let nmb_old_blocks = test_observer::get_blocks().len(); + let stacks_height_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + + signer_test + .running_nodes + .btc_regtest_controller + .build_next_block(1); + btc_blocks_mined += 1; + + // assure we have a successful sortition that miner A won + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + assert!(tip.sortition); + assert_eq!(tip.miner_pk_hash.unwrap(), mining_pkh_1); + + // wait for the new block to be processed + wait_for(60, || { + let stacks_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + Ok( + blocks_mined1.load(Ordering::SeqCst) > blocks_processed_before_1 + && stacks_height > stacks_height_before + && test_observer::get_blocks().len() > nmb_old_blocks, + ) + }) + .unwrap(); + + info!( + "------------------------- Verify Tenure Change Tx in Miner 1's Block N -------------------------" + ); + verify_last_block_contains_tenure_change_tx(TenureChangeCause::BlockFound); + + info!("------------------------- Submit Miner 2 Block Commit -------------------------"); + let stacks_height_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + + let rl2_commits_before = rl2_commits.load(Ordering::SeqCst); + // Unpause miner 2's block commits + rl2_skip_commit_op.set(false); + + // Ensure the miner 2 submits a block commit before mining the bitcoin block + wait_for(30, || { + Ok(rl2_commits.load(Ordering::SeqCst) > rl2_commits_before) + }) + .unwrap(); + + // Make miner 2 also fail to submit any FURTHER block commits + rl2_skip_commit_op.set(true); + + let burn_height_before = get_burn_height(); + + info!("------------------------- Miner 2 Mines Tenure B -------------------------"; + "burn_height_before" => burn_height_before, + "stacks_height_before" => stacks_height_before + ); + + let stacks_height_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || { + Ok(get_burn_height() > burn_height_before + && signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height + > stacks_height_before) + }, + ) + .unwrap(); + btc_blocks_mined += 1; + + // assure we have a successful sortition that miner 2 won + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + assert!(tip.sortition); + assert_eq!(tip.miner_pk_hash.unwrap(), mining_pkh_2); + + info!("------------------------- Get Miner 2's N+1' block -------------------------"); + + let mut miner_2_block_n_1 = None; + + wait_for(30, || { + let chunks = test_observer::get_stackerdb_chunks(); + for chunk in chunks.into_iter().flat_map(|chunk| chunk.modified_slots) { + let Ok(message) = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + else { + continue; + }; + let SignerMessage::BlockProposal(proposal) = message else { + continue; + }; + let miner_pk = proposal.block.header.recover_miner_pk().unwrap(); + let block_stacks_height = proposal.block.header.chain_length; + if block_stacks_height != stacks_height_before + 1 { + continue; + } + assert_eq!(miner_pk, mining_pk_2); + miner_2_block_n_1 = Some(proposal.block); + return Ok(true); + } + Ok(false) + }) + .expect("Timed out waiting for N+1 from miner 2"); + + let mut miner_2_block_n_1 = miner_2_block_n_1.expect("No block proposal from miner 2"); + + // Miner 2's proposed block should get approved and pushed + wait_for(30, || { + let chunks = test_observer::get_stackerdb_chunks(); + for chunk in chunks.into_iter().flat_map(|chunk| chunk.modified_slots) { + let Ok(message) = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + else { + continue; + }; + if let SignerMessage::BlockPushed(pushed_block) = message { + if pushed_block.header.signer_signature_hash() + == miner_2_block_n_1.header.signer_signature_hash() + { + miner_2_block_n_1 = pushed_block; + return Ok(true); + } + } + } + Ok(false) + }) + .expect("Timed out waiting for expeceted block responses"); + + let tip_block_header_hash = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip; + assert_eq!( + tip_block_header_hash.to_string(), + miner_2_block_n_1.header.block_hash().to_string() + ); + + info!( + "------------------------- Verify Tenure Change Block Found Tx in Miner 2's Block N+1 -------------------------" + ); + assert_eq!( + miner_2_block_n_1 + .get_tenure_change_tx_payload() + .unwrap() + .cause, + TenureChangeCause::BlockFound + ); + + let stacks_height_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + + info!("------------------------- Ensure Miner 1 Never Isues a Tenure Extend -------------------------"; + "stacks_height_before" => %stacks_height_before, + "nmb_old_blocks" => %nmb_old_blocks); + + // Ensure the tenure extend wait timeout is passed so if a miner was going to extend, it would be now. + std::thread::sleep(tenure_extend_wait_timeout.add(Duration::from_secs(1))); + + assert!(wait_for(30, || { + let chunks = test_observer::get_stackerdb_chunks(); + for chunk in chunks.into_iter().flat_map(|chunk| chunk.modified_slots) { + let Ok(message) = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + else { + continue; + }; + if let SignerMessage::BlockProposal(proposed_block) = message { + if mining_pk_1 + .verify( + proposed_block + .block + .header + .miner_signature_hash() + .as_bytes(), + &proposed_block.block.header.miner_signature, + ) + .unwrap() + { + if let Some(payload) = proposed_block.block.try_get_tenure_change_payload() { + assert_ne!(payload.cause, TenureChangeCause::Extended) + } + } + } + } + Ok(false) + }) + .is_err()); + + assert_eq!( + stacks_height_before, + signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height + ); info!( "------------------------- Confirm Burn and Stacks Block Heights -------------------------" From b2db43b522bdb7f2e869586485f311f9656a56be Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 6 Feb 2025 14:44:28 -0800 Subject: [PATCH 06/13] Add non_blocking_minority_configured_to_favour_incoming_miner and non_blocking_minority_configured_to_favour_prev_miner Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 1315 +++++++++++++++++++- 1 file changed, 1298 insertions(+), 17 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 1a008df25e..28bdca56fa 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -13490,14 +13490,12 @@ fn prev_miner_extends_if_incoming_miner_fails_to_mine_success() { }) .expect("Timed out waiting for block commit from Miner 2"); - let nmb_old_blocks = test_observer::get_blocks().len(); let burn_height_before = get_burn_height(); let block_before = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) .unwrap() .block_height; info!("------------------------- Miner 2 Mines a Normal Tenure C -------------------------"; - "nmb_old_blocks" => %nmb_old_blocks, "burn_height_before" => burn_height_before); next_block_and( @@ -13520,8 +13518,7 @@ fn prev_miner_extends_if_incoming_miner_fails_to_mine_success() { assert_eq!(tip.miner_pk_hash.unwrap(), mining_pkh_2); info!("------------------------- Wait for Miner 2's Block N+2 -------------------------"; - "stacks_height_before" => %stacks_height_before, - "nmb_old_blocks" => %nmb_old_blocks); + "stacks_height_before" => %stacks_height_before); wait_for(30, || { let stacks_height = signer_test @@ -14005,18 +14002,12 @@ fn prev_miner_extends_if_incoming_miner_fails_to_mine_failure() { ); verify_last_block_contains_tenure_change_tx(TenureChangeCause::BlockFound); + info!("------------------------- Unpause Miner 2's Block Commits -------------------------"); let stacks_height_before = signer_test .stacks_client .get_peer_info() .expect("Failed to get peer info") .stacks_tip_height; - - assert_eq!( - get_chain_info(&conf).stacks_tip_height, - stacks_height_before - ); - info!("------------------------- Unpause Miner 2's Block Commits -------------------------"); - let stacks_height_before = get_chain_info(&conf).stacks_tip_height; let rl2_commits_before = rl2_commits.load(Ordering::SeqCst); // Unpause miner 2's commits rl2_skip_commit_op.set(false); @@ -14029,14 +14020,12 @@ fn prev_miner_extends_if_incoming_miner_fails_to_mine_failure() { }) .expect("Timed out waiting for block commit from Miner 2"); - let nmb_old_blocks = test_observer::get_blocks().len(); let burn_height_before = get_burn_height(); let block_before = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) .unwrap() .block_height; info!("------------------------- Miner 2 Mines a Normal Tenure C -------------------------"; - "nmb_old_blocks" => %nmb_old_blocks, "burn_height_before" => burn_height_before); next_block_and( @@ -14059,8 +14048,7 @@ fn prev_miner_extends_if_incoming_miner_fails_to_mine_failure() { assert_eq!(tip.miner_pk_hash.unwrap(), mining_pkh_2); info!("------------------------- Wait for Miner 2's Block N+2 -------------------------"; - "stacks_height_before" => %stacks_height_before, - "nmb_old_blocks" => %nmb_old_blocks); + "stacks_height_before" => %stacks_height_before); wait_for(30, || { let stacks_height = signer_test @@ -14461,8 +14449,7 @@ fn prev_miner_will_not_attempt_to_extend_if_incoming_miner_produces_a_block() { .stacks_tip_height; info!("------------------------- Ensure Miner 1 Never Isues a Tenure Extend -------------------------"; - "stacks_height_before" => %stacks_height_before, - "nmb_old_blocks" => %nmb_old_blocks); + "stacks_height_before" => %stacks_height_before); // Ensure the tenure extend wait timeout is passed so if a miner was going to extend, it would be now. std::thread::sleep(tenure_extend_wait_timeout.add(Duration::from_secs(1))); @@ -14527,3 +14514,1297 @@ fn prev_miner_will_not_attempt_to_extend_if_incoming_miner_produces_a_block() { run_loop_2_thread.join().unwrap(); signer_test.shutdown(); } + +/// Test a scenario where a non-blocking minority of miners are configured to favour the incoming miner. +/// The previous miner should extend its tenure and succeed as a majority are configured to favour it +/// and its subsequent blocks should be be approved. +/// Two miners boot to Nakamoto. +/// Miner 1 wins the first tenure A. +/// Miner 1 proposes a block N with a TenureChangeCause::BlockFound +/// Signers accept and the stacks tip advances to N +/// Miner 2 wins the second tenure B. +/// A majority of signers mark miner 2 as invalid. +/// Miner 2 proposes block N+1' with a TenureChangeCause::BlockFound +/// A majority fo signers rekect block N+1'. +/// Miner 1 proposes block N+1 with a TenureChangeCause::Extended +/// A majority of signers accept and the stacks tip advances to N+1 +/// Miner 1 proposes block N+2 with a transfer tx +/// ALL signers should accept block N+2. +/// Miner 2 wins the third tenure C. +/// Miner 2 proposes block N+3 with a TenureChangeCause::BlockFound +/// Signers accept and the stacks tip advances to N+3 +/// +/// Asserts: +/// - Block N contains the TenureChangeCause::BlockFound +/// - Block N+1' contains a TenureChangeCause::BlockFound and is rejected +/// - Block N+1 contains the TenureChangeCause::Extended +/// - Block N+2 is accepted. +/// - Block N+3 contains the TenureChangeCause::BlockFound. +/// - The stacks tip advances to N+3 +#[test] +#[ignore] +fn non_blocking_minority_configured_to_favour_incoming_miner() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let num_signers = 5; + let non_block_minority = num_signers * 2 / 10; + + let btc_miner_1_seed = vec![1, 1, 1, 1]; + let btc_miner_2_seed = vec![2, 2, 2, 2]; + let btc_miner_1_pk = Keychain::default(btc_miner_1_seed.clone()).get_pub_key(); + let btc_miner_2_pk = Keychain::default(btc_miner_2_seed.clone()).get_pub_key(); + + let node_1_rpc = gen_random_port(); + let node_1_p2p = gen_random_port(); + let node_2_rpc = gen_random_port(); + let node_2_p2p = gen_random_port(); + + debug!("Node 1 bound at (p2p={node_1_p2p}, rpc={node_1_rpc})"); + debug!("Node 2 bound at (p2p={node_2_p2p}, rpc={node_2_rpc})"); + + let localhost = "127.0.0.1"; + let node_1_rpc_bind = format!("{localhost}:{node_1_rpc}"); + let node_2_rpc_bind = format!("{localhost}:{node_2_rpc}"); + let mut node_2_listeners = Vec::new(); + + let max_nakamoto_tenures = 30; + + let favour_prev_miner_block_proposal_timeout = Duration::from_secs(20); + let favour_incoming_miner_block_proposal_timeout = Duration::from_secs(500); + // Make sure the miner attempts to extend after the minority mark the incoming as invalid + let tenure_extend_wait_timeout = favour_prev_miner_block_proposal_timeout; + + let sender_sk = Secp256k1PrivateKey::random(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + + info!("------------------------- Test Setup -------------------------"); + // partition the signer set so that ~half are listening and using node 1 for RPC and events, + // and the rest are using node 2 + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr, send_amt + send_fee)], + |signer_config| { + let port = signer_config.endpoint.port(); + let node_host = if port % 2 == 0 { + &node_1_rpc_bind + } else { + &node_2_rpc_bind + }; + signer_config.node_host = node_host.to_string(); + // Note signer ports are based on the number of them, the first being 3000, the last being 3000 + num_signers - 1 + if port < 3000 + non_block_minority as u16 { + signer_config.block_proposal_timeout = favour_incoming_miner_block_proposal_timeout; + } else { + signer_config.block_proposal_timeout = favour_prev_miner_block_proposal_timeout; + } + }, + |config| { + config.miner.tenure_extend_wait_timeout = tenure_extend_wait_timeout; + config.node.rpc_bind = format!("{localhost}:{node_1_rpc}"); + config.node.p2p_bind = format!("{localhost}:{node_1_p2p}"); + config.node.data_url = format!("http://{localhost}:{node_1_rpc}"); + config.node.p2p_address = format!("{localhost}:{node_1_p2p}"); + config.miner.wait_on_interim_blocks = Duration::from_secs(5); + config.node.pox_sync_sample_secs = 30; + config.burnchain.pox_reward_length = Some(max_nakamoto_tenures); + + config.node.seed = btc_miner_1_seed.clone(); + config.node.local_peer_seed = btc_miner_1_seed.clone(); + config.burnchain.local_mining_public_key = Some(btc_miner_1_pk.to_hex()); + config.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[1])); + + config.events_observers.retain(|listener| { + let Ok(addr) = std::net::SocketAddr::from_str(&listener.endpoint) else { + warn!( + "Cannot parse {} to a socket, assuming it isn't a signer-listener binding", + listener.endpoint + ); + return true; + }; + if addr.port() % 2 == 0 || addr.port() == test_observer::EVENT_OBSERVER_PORT { + return true; + } + node_2_listeners.push(listener.clone()); + false + }) + }, + Some(vec![btc_miner_1_pk, btc_miner_2_pk]), + None, + ); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + let conf = signer_test.running_nodes.conf.clone(); + let mut conf_node_2 = conf.clone(); + conf_node_2.node.rpc_bind = format!("{localhost}:{node_2_rpc}"); + conf_node_2.node.p2p_bind = format!("{localhost}:{node_2_p2p}"); + conf_node_2.node.data_url = format!("http://{localhost}:{node_2_rpc}"); + conf_node_2.node.p2p_address = format!("{localhost}:{node_2_p2p}"); + conf_node_2.node.seed = btc_miner_2_seed.clone(); + conf_node_2.burnchain.local_mining_public_key = Some(btc_miner_2_pk.to_hex()); + conf_node_2.node.local_peer_seed = btc_miner_2_seed; + conf_node_2.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[2])); + conf_node_2.node.miner = true; + conf_node_2.events_observers.clear(); + conf_node_2.events_observers.extend(node_2_listeners); + assert!(!conf_node_2.events_observers.is_empty()); + + let node_1_sk = Secp256k1PrivateKey::from_seed(&conf.node.local_peer_seed); + let node_1_pk = StacksPublicKey::from_private(&node_1_sk); + + conf_node_2.node.working_dir = format!("{}-1", conf_node_2.node.working_dir); + + conf_node_2.node.set_bootstrap_nodes( + format!("{}@{}", &node_1_pk.to_hex(), conf.node.p2p_bind), + conf.burnchain.chain_id, + conf.burnchain.peer_version, + ); + + let mut run_loop_2 = boot_nakamoto::BootRunLoop::new(conf_node_2.clone()).unwrap(); + let run_loop_stopper_2 = run_loop_2.get_termination_switch(); + let rl2_coord_channels = run_loop_2.coordinator_channels(); + let Counters { + naka_submitted_commits: rl2_commits, + naka_skip_commit_op: rl2_skip_commit_op, + naka_submitted_commit_last_stacks_tip: rl2_commit_last_stacks_tip, + .. + } = run_loop_2.counters(); + + let blocks_mined1 = signer_test.running_nodes.nakamoto_blocks_mined.clone(); + + info!("------------------------- Pause Miner 2's Block Commits -------------------------"); + + // Make sure Miner 2 cannot win a sortition at first. + rl2_skip_commit_op.set(true); + + info!("------------------------- Boot to Epoch 3.0 -------------------------"); + + let run_loop_2_thread = thread::Builder::new() + .name("run_loop_2".into()) + .spawn(move || run_loop_2.start(None, 0)) + .unwrap(); + + signer_test.boot_to_epoch_3(); + + wait_for(120, || { + let Some(node_1_info) = get_chain_info_opt(&conf) else { + return Ok(false); + }; + let Some(node_2_info) = get_chain_info_opt(&conf_node_2) else { + return Ok(false); + }; + Ok(node_1_info.stacks_tip_height == node_2_info.stacks_tip_height) + }) + .expect("Timed out waiting for boostrapped node to catch up to the miner"); + + let mining_pk_1 = StacksPublicKey::from_private(&conf.miner.mining_key.unwrap()); + let mining_pk_2 = StacksPublicKey::from_private(&conf_node_2.miner.mining_key.unwrap()); + let mining_pkh_1 = Hash160::from_node_public_key(&mining_pk_1); + let mining_pkh_2 = Hash160::from_node_public_key(&mining_pk_2); + debug!("The mining key for miner 1 is {mining_pkh_1}"); + debug!("The mining key for miner 2 is {mining_pkh_2}"); + + info!("------------------------- Reached Epoch 3.0 -------------------------"); + + let burnchain = signer_test.running_nodes.conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + + let get_burn_height = || { + SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) + .unwrap() + .block_height + }; + let starting_peer_height = get_chain_info(&conf).stacks_tip_height; + let starting_burn_height = get_burn_height(); + let mut btc_blocks_mined = 0; + + info!("------------------------- Pause Miner 1's Block Commit -------------------------"); + // Make sure miner 1 doesn't submit any further block commits for the next tenure BEFORE mining the bitcoin block + signer_test + .running_nodes + .nakamoto_test_skip_commit_op + .set(true); + + info!("------------------------- Miner 1 Mines a Normal Tenure A -------------------------"); + let blocks_processed_before_1 = blocks_mined1.load(Ordering::SeqCst); + let nmb_old_blocks = test_observer::get_blocks().len(); + let stacks_height_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + + signer_test + .running_nodes + .btc_regtest_controller + .build_next_block(1); + btc_blocks_mined += 1; + + // assure we have a successful sortition that miner A won + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + assert!(tip.sortition); + assert_eq!(tip.miner_pk_hash.unwrap(), mining_pkh_1); + + // wait for the new block to be processed + wait_for(60, || { + let stacks_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + Ok( + blocks_mined1.load(Ordering::SeqCst) > blocks_processed_before_1 + && stacks_height > stacks_height_before + && test_observer::get_blocks().len() > nmb_old_blocks, + ) + }) + .unwrap(); + + info!( + "------------------------- Verify Tenure Change Tx in Miner 1's Block N -------------------------" + ); + verify_last_block_contains_tenure_change_tx(TenureChangeCause::BlockFound); + + info!("------------------------- Submit Miner 2 Block Commit -------------------------"); + let stacks_height_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + + let rl2_commits_before = rl2_commits.load(Ordering::SeqCst); + // Unpause miner 2's block commits + rl2_skip_commit_op.set(false); + + // Ensure the miner 2 submits a block commit before mining the bitcoin block + wait_for(30, || { + Ok(rl2_commits.load(Ordering::SeqCst) > rl2_commits_before) + }) + .unwrap(); + + // Make miner 2 also fail to submit any FURTHER block commits + rl2_skip_commit_op.set(true); + + let burn_height_before = get_burn_height(); + // Pause the block proposal broadcast so that miner 2 AND miner 1 are unable to propose + // a block BEFORE block_proposal_timeout + TEST_BROADCAST_STALL.set(vec![mining_pk_2, mining_pk_1]); + + info!("------------------------- Miner 2 Wins Tenure B -------------------------"; + "burn_height_before" => burn_height_before, + "stacks_height_before" => %stacks_height_before + ); + + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || Ok(get_burn_height() > burn_height_before), + ) + .unwrap(); + btc_blocks_mined += 1; + + assert_eq!( + stacks_height_before, + signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height + ); + + // assure we have a successful sortition that miner 2 won + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + assert!(tip.sortition); + assert_eq!(tip.miner_pk_hash.unwrap(), mining_pkh_2); + + info!( + "------------------------- Wait for Miner 2 to be Marked Invalid by a Majority of Signers -------------------------" + ); + // Make sure that miner 1 and a majority of signers thinks miner 2 is invalid. + std::thread::sleep(tenure_extend_wait_timeout.add(Duration::from_secs(1))); + + // Allow miner 2 to attempt to start their tenure. + TEST_BROADCAST_STALL.set(vec![mining_pk_1]); + + info!("------------------------- Wait for Miner 2's Block N+1' to be Proposed ------------------------"; + "stacks_height_before" => %stacks_height_before); + + let mut miner_2_block_n_1 = None; + + wait_for(30, || { + let chunks = test_observer::get_stackerdb_chunks(); + for chunk in chunks.into_iter().flat_map(|chunk| chunk.modified_slots) { + let Ok(message) = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + else { + continue; + }; + let SignerMessage::BlockProposal(proposal) = message else { + continue; + }; + let miner_pk = proposal.block.header.recover_miner_pk().unwrap(); + let block_stacks_height = proposal.block.header.chain_length; + if block_stacks_height != stacks_height_before + 1 || miner_pk != mining_pk_2 { + continue; + } + miner_2_block_n_1 = Some(proposal.block); + return Ok(true); + } + Ok(false) + }) + .expect("Timed out waiting for N+1 block proposals from miner 2"); + + let miner_2_block_n_1 = miner_2_block_n_1.expect("No block proposal from miner 2"); + + assert_eq!( + miner_2_block_n_1 + .try_get_tenure_change_payload() + .unwrap() + .cause, + TenureChangeCause::BlockFound + ); + + info!("------------------------- Verify that Miner 2's Block N+1' was Rejected ------------------------"); + + // Miner 1's proposed block should get rejected by the signers + let mut found_miner_1_rejections = HashSet::new(); + wait_for(30, || { + let chunks = test_observer::get_stackerdb_chunks(); + for chunk in chunks.into_iter().flat_map(|chunk| chunk.modified_slots) { + let Ok(message) = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + else { + continue; + }; + if let SignerMessage::BlockResponse(BlockResponse::Rejected(BlockRejection { + signer_signature_hash, + signature, + .. + })) = message + { + if signer_signature_hash == miner_2_block_n_1.header.signer_signature_hash() { + found_miner_1_rejections.insert(signature); + } + } + } + Ok(found_miner_1_rejections.len() >= num_signers * 3 / 10) + }) + .expect("Timed out waiting for expected block responses"); + + assert_eq!( + stacks_height_before, + signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height + ); + + info!("------------------------- Wait for Miner 1's Block N+1 Extended to be Proposed ------------------------"; + "stacks_height_before" => %stacks_height_before + ); + + TEST_BROADCAST_STALL.set(vec![]); + + // Get miner 1's N+1 block proposal + let mut miner_1_block_n_1 = None; + wait_for(30, || { + let chunks = test_observer::get_stackerdb_chunks(); + for chunk in chunks.into_iter().flat_map(|chunk| chunk.modified_slots) { + let Ok(message) = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + else { + continue; + }; + let SignerMessage::BlockProposal(proposal) = message else { + continue; + }; + let miner_pk = proposal.block.header.recover_miner_pk().unwrap(); + let block_stacks_height = proposal.block.header.chain_length; + if block_stacks_height != stacks_height_before + 1 || miner_pk != mining_pk_1 { + continue; + } + miner_1_block_n_1 = Some(proposal.block); + return Ok(true); + } + Ok(false) + }) + .expect("Timed out waiting for N+1 block proposals from miner 1"); + + let mut miner_1_block_n_1 = miner_1_block_n_1.expect("No block proposal from miner 1"); + + info!("------------------------- Wait for Miner 1's Block N+1 to be Approved ------------------------"; + "stacks_height_before" => %stacks_height_before + ); + + // Miner 2's proposed block should get approved and pushed + wait_for(30, || { + let chunks = test_observer::get_stackerdb_chunks(); + for chunk in chunks.into_iter().flat_map(|chunk| chunk.modified_slots) { + let Ok(message) = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + else { + continue; + }; + if let SignerMessage::BlockPushed(pushed_block) = message { + if pushed_block.header.signer_signature_hash() + == miner_1_block_n_1.header.signer_signature_hash() + { + miner_1_block_n_1 = pushed_block; + return Ok(true); + } + } + } + Ok(false) + }) + .expect("Timed out waiting for expeceted block responses"); + + let tip_block_header_hash = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip; + assert_eq!(tip_block_header_hash, miner_1_block_n_1.header.block_hash()); + assert_eq!( + stacks_height_before + 1, + signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height + ); + + info!( + "------------------------- Verify BlockFound in Miner 1's Block N+1 -------------------------" + ); + verify_last_block_contains_tenure_change_tx(TenureChangeCause::Extended); + + info!("------------------------- Miner 1 Mines Block N+2 with Transfer Tx -------------------------"); + let stacks_height_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + // submit a tx so that the miner will mine an extra block + let sender_nonce = 0; + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + conf.burnchain.chain_id, + &recipient, + send_amt, + ); + let txid = submit_tx(&http_origin, &transfer_tx); + + wait_for(30, || { + Ok(signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height + > stacks_height_before) + }) + .expect("Timed out waiting for transfer tx to be mined"); + + // Get miner 1's N+2 block proposal + let mut miner_1_block_n_2 = None; + wait_for(30, || { + let chunks = test_observer::get_stackerdb_chunks(); + for chunk in chunks.into_iter().flat_map(|chunk| chunk.modified_slots) { + let Ok(message) = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + else { + continue; + }; + let SignerMessage::BlockProposal(proposal) = message else { + continue; + }; + let miner_pk = proposal.block.header.recover_miner_pk().unwrap(); + let block_stacks_height = proposal.block.header.chain_length; + if block_stacks_height != stacks_height_before + 1 + || miner_pk != mining_pk_1 + || !proposal + .block + .txs + .iter() + .any(|tx| tx.txid().to_string() == txid) + { + continue; + } + miner_1_block_n_2 = Some(proposal.block); + return Ok(true); + } + Ok(false) + }) + .expect("Timed out waiting for N+2 block proposals from miner 1"); + + let miner_1_block_n_2 = miner_1_block_n_2.expect("No block proposal from miner 1"); + let tip_block_header_hash = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip; + assert_eq!(tip_block_header_hash, miner_1_block_n_2.header.block_hash()); + assert_eq!( + stacks_height_before + 1, + signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height + ); + + info!("------------------------- Unpause Miner 2's Block Commits -------------------------"); + let stacks_height_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + let rl2_commits_before = rl2_commits.load(Ordering::SeqCst); + // Unpause miner 2's commits + rl2_skip_commit_op.set(false); + + // Ensure that both miners' commits point at the stacks tip + wait_for(30, || { + let last_committed_2 = rl2_commit_last_stacks_tip.load(Ordering::SeqCst); + Ok(last_committed_2 >= stacks_height_before + && rl2_commits.load(Ordering::SeqCst) > rl2_commits_before) + }) + .expect("Timed out waiting for block commit from Miner 2"); + + let burn_height_before = get_burn_height(); + let block_before = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) + .unwrap() + .block_height; + + info!("------------------------- Miner 2 Mines a Normal Tenure C -------------------------"; + "burn_height_before" => burn_height_before); + + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || { + Ok(get_burn_height() > burn_height_before + && SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) + .unwrap() + .block_height + > block_before) + }, + ) + .unwrap(); + btc_blocks_mined += 1; + + // assure we have a successful sortition that miner 2 won + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + assert!(tip.sortition); + assert_eq!(tip.miner_pk_hash.unwrap(), mining_pkh_2); + + info!("------------------------- Wait for Miner 2's Block N+3 -------------------------"; + "stacks_height_before" => %stacks_height_before); + + wait_for(30, || { + let stacks_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + Ok(stacks_height > stacks_height_before) + }) + .expect("Timed out waiting for block N+3 to be mined and processed"); + + info!( + "------------------------- Verify Tenure Change Tx in Miner 2's Block N+3 -------------------------" + ); + verify_last_block_contains_tenure_change_tx(TenureChangeCause::BlockFound); + + info!( + "------------------------- Confirm Burn and Stacks Block Heights -------------------------" + ); + assert_eq!(get_burn_height(), starting_burn_height + btc_blocks_mined); + assert_eq!( + signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height, + starting_peer_height + 4 + ); + + info!("------------------------- Shutdown -------------------------"); + rl2_coord_channels + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper_2.store(false, Ordering::SeqCst); + run_loop_2_thread.join().unwrap(); + signer_test.shutdown(); +} + +/// Test a scenario where a non-blocking majority of miners are configured to favour the previous miner +/// extending their tenure when the incoming miner is slow to propose a block. The incoming miner should succeed +/// and its subsequent blocks should be be approved. +/// Two miners boot to Nakamoto. +/// Miner 1 wins the first tenure A. +/// Miner 1 proposes a block N with a TenureChangeCause::BlockFound +/// Signers accept and the stacks tip advances to N +/// Miner 2 wins the second tenure B. +/// A minority of signers mark miner 2 as invalid. +/// Miner 1 proposes block N+1' with a TenureChangeCause::Extended +/// A majority of signers reject block N+1' +/// Miner 2 proposes block N+1 with a TenureChangeCause::BlockFound +/// A majority fo signers accept block N+1. +/// Miner 2 proposes block N+2 with a transfer tx +/// A majority of signers should accept block N+2. +/// Miner 1 wins the third tenure C. +/// Miner 1 proposes block N+3 with a TenureChangeCause::BlockFound +/// Signers accept and the stacks tip advances to N+3 +/// +/// Asserts: +/// - Block N contains the TenureChangeCause::BlockFound +/// - Block N+1' contains a TenureChangeCause::Extended and is rejected +/// - Block N+1 contains the TenureChangeCause::BlockFound +/// - Block N+2 is accepted. +/// - Block N+3 contains the TenureChangeCause::BlockFound. +/// - The stacks tip advances to N+3 +#[test] +#[ignore] +fn non_blocking_minority_configured_to_favour_prev_miner() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let num_signers = 5; + let non_block_minority = num_signers * 2 / 10; + + let btc_miner_1_seed = vec![1, 1, 1, 1]; + let btc_miner_2_seed = vec![2, 2, 2, 2]; + let btc_miner_1_pk = Keychain::default(btc_miner_1_seed.clone()).get_pub_key(); + let btc_miner_2_pk = Keychain::default(btc_miner_2_seed.clone()).get_pub_key(); + + let node_1_rpc = gen_random_port(); + let node_1_p2p = gen_random_port(); + let node_2_rpc = gen_random_port(); + let node_2_p2p = gen_random_port(); + + debug!("Node 1 bound at (p2p={node_1_p2p}, rpc={node_1_rpc})"); + debug!("Node 2 bound at (p2p={node_2_p2p}, rpc={node_2_rpc})"); + + let localhost = "127.0.0.1"; + let node_1_rpc_bind = format!("{localhost}:{node_1_rpc}"); + let node_2_rpc_bind = format!("{localhost}:{node_2_rpc}"); + let mut node_2_listeners = Vec::new(); + + let max_nakamoto_tenures = 30; + + let favour_prev_miner_block_proposal_timeout = Duration::from_secs(20); + let favour_incoming_miner_block_proposal_timeout = Duration::from_secs(500); + // Make sure the miner attempts to extend after the minority mark the incoming as invalid + let tenure_extend_wait_timeout = favour_prev_miner_block_proposal_timeout; + + let sender_sk = Secp256k1PrivateKey::random(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + + info!("------------------------- Test Setup -------------------------"); + // partition the signer set so that ~half are listening and using node 1 for RPC and events, + // and the rest are using node 2 + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr, send_amt + send_fee)], + |signer_config| { + let port = signer_config.endpoint.port(); + let node_host = if port % 2 == 0 { + &node_1_rpc_bind + } else { + &node_2_rpc_bind + }; + signer_config.node_host = node_host.to_string(); + // Note signer ports are based on the number of them, the first being 3000, the last being 3000 + num_signers - 1 + if port < 3000 + non_block_minority as u16 { + signer_config.block_proposal_timeout = favour_prev_miner_block_proposal_timeout; + } else { + signer_config.block_proposal_timeout = favour_incoming_miner_block_proposal_timeout; + } + }, + |config| { + config.miner.tenure_extend_wait_timeout = tenure_extend_wait_timeout; + config.node.rpc_bind = format!("{localhost}:{node_1_rpc}"); + config.node.p2p_bind = format!("{localhost}:{node_1_p2p}"); + config.node.data_url = format!("http://{localhost}:{node_1_rpc}"); + config.node.p2p_address = format!("{localhost}:{node_1_p2p}"); + config.miner.wait_on_interim_blocks = Duration::from_secs(5); + config.node.pox_sync_sample_secs = 30; + config.burnchain.pox_reward_length = Some(max_nakamoto_tenures); + + config.node.seed = btc_miner_1_seed.clone(); + config.node.local_peer_seed = btc_miner_1_seed.clone(); + config.burnchain.local_mining_public_key = Some(btc_miner_1_pk.to_hex()); + config.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[1])); + + config.events_observers.retain(|listener| { + let Ok(addr) = std::net::SocketAddr::from_str(&listener.endpoint) else { + warn!( + "Cannot parse {} to a socket, assuming it isn't a signer-listener binding", + listener.endpoint + ); + return true; + }; + if addr.port() % 2 == 0 || addr.port() == test_observer::EVENT_OBSERVER_PORT { + return true; + } + node_2_listeners.push(listener.clone()); + false + }) + }, + Some(vec![btc_miner_1_pk, btc_miner_2_pk]), + None, + ); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + let conf = signer_test.running_nodes.conf.clone(); + let mut conf_node_2 = conf.clone(); + conf_node_2.node.rpc_bind = format!("{localhost}:{node_2_rpc}"); + conf_node_2.node.p2p_bind = format!("{localhost}:{node_2_p2p}"); + conf_node_2.node.data_url = format!("http://{localhost}:{node_2_rpc}"); + conf_node_2.node.p2p_address = format!("{localhost}:{node_2_p2p}"); + conf_node_2.node.seed = btc_miner_2_seed.clone(); + conf_node_2.burnchain.local_mining_public_key = Some(btc_miner_2_pk.to_hex()); + conf_node_2.node.local_peer_seed = btc_miner_2_seed; + conf_node_2.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[2])); + conf_node_2.node.miner = true; + conf_node_2.events_observers.clear(); + conf_node_2.events_observers.extend(node_2_listeners); + assert!(!conf_node_2.events_observers.is_empty()); + + let node_1_sk = Secp256k1PrivateKey::from_seed(&conf.node.local_peer_seed); + let node_1_pk = StacksPublicKey::from_private(&node_1_sk); + + conf_node_2.node.working_dir = format!("{}-1", conf_node_2.node.working_dir); + + conf_node_2.node.set_bootstrap_nodes( + format!("{}@{}", &node_1_pk.to_hex(), conf.node.p2p_bind), + conf.burnchain.chain_id, + conf.burnchain.peer_version, + ); + + let rl1_counters = signer_test.running_nodes.counters.clone(); + let mut run_loop_2 = boot_nakamoto::BootRunLoop::new(conf_node_2.clone()).unwrap(); + let run_loop_stopper_2 = run_loop_2.get_termination_switch(); + let rl2_coord_channels = run_loop_2.coordinator_channels(); + let Counters { + naka_submitted_commits: rl2_commits, + naka_skip_commit_op: rl2_skip_commit_op, + .. + } = run_loop_2.counters(); + + let blocks_mined1 = signer_test.running_nodes.nakamoto_blocks_mined.clone(); + + info!("------------------------- Pause Miner 2's Block Commits -------------------------"); + + // Make sure Miner 2 cannot win a sortition at first. + rl2_skip_commit_op.set(true); + + info!("------------------------- Boot to Epoch 3.0 -------------------------"); + + let run_loop_2_thread = thread::Builder::new() + .name("run_loop_2".into()) + .spawn(move || run_loop_2.start(None, 0)) + .unwrap(); + + signer_test.boot_to_epoch_3(); + + wait_for(120, || { + let Some(node_1_info) = get_chain_info_opt(&conf) else { + return Ok(false); + }; + let Some(node_2_info) = get_chain_info_opt(&conf_node_2) else { + return Ok(false); + }; + Ok(node_1_info.stacks_tip_height == node_2_info.stacks_tip_height) + }) + .expect("Timed out waiting for boostrapped node to catch up to the miner"); + + let mining_pk_1 = StacksPublicKey::from_private(&conf.miner.mining_key.unwrap()); + let mining_pk_2 = StacksPublicKey::from_private(&conf_node_2.miner.mining_key.unwrap()); + let mining_pkh_1 = Hash160::from_node_public_key(&mining_pk_1); + let mining_pkh_2 = Hash160::from_node_public_key(&mining_pk_2); + debug!("The mining key for miner 1 is {mining_pkh_1}"); + debug!("The mining key for miner 2 is {mining_pkh_2}"); + + info!("------------------------- Reached Epoch 3.0 -------------------------"); + + let burnchain = signer_test.running_nodes.conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + + let get_burn_height = || { + SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) + .unwrap() + .block_height + }; + let starting_peer_height = get_chain_info(&conf).stacks_tip_height; + let starting_burn_height = get_burn_height(); + let mut btc_blocks_mined = 0; + + info!("------------------------- Pause Miner 1's Block Commit -------------------------"); + // Make sure miner 1 doesn't submit any further block commits for the next tenure BEFORE mining the bitcoin block + signer_test + .running_nodes + .nakamoto_test_skip_commit_op + .set(true); + + info!("------------------------- Miner 1 Mines a Normal Tenure A -------------------------"); + let blocks_processed_before_1 = blocks_mined1.load(Ordering::SeqCst); + let nmb_old_blocks = test_observer::get_blocks().len(); + let stacks_height_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + + signer_test + .running_nodes + .btc_regtest_controller + .build_next_block(1); + btc_blocks_mined += 1; + + // assure we have a successful sortition that miner A won + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + assert!(tip.sortition); + assert_eq!(tip.miner_pk_hash.unwrap(), mining_pkh_1); + + // wait for the new block to be processed + wait_for(60, || { + let stacks_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + Ok( + blocks_mined1.load(Ordering::SeqCst) > blocks_processed_before_1 + && stacks_height > stacks_height_before + && test_observer::get_blocks().len() > nmb_old_blocks, + ) + }) + .unwrap(); + + info!( + "------------------------- Verify Tenure Change Tx in Miner 1's Block N -------------------------" + ); + verify_last_block_contains_tenure_change_tx(TenureChangeCause::BlockFound); + + info!("------------------------- Submit Miner 2 Block Commit -------------------------"); + let stacks_height_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + + let rl2_commits_before = rl2_commits.load(Ordering::SeqCst); + // Unpause miner 2's block commits + rl2_skip_commit_op.set(false); + + // Ensure the miner 2 submits a block commit before mining the bitcoin block + wait_for(30, || { + Ok(rl2_commits.load(Ordering::SeqCst) > rl2_commits_before) + }) + .unwrap(); + + // Make miner 2 also fail to submit any FURTHER block commits + rl2_skip_commit_op.set(true); + + let burn_height_before = get_burn_height(); + // Pause the block proposal broadcast so that miner 2 will be unable to broadcast its + // tenure change proposal BEFORE miner 1 attempts to extend. + TEST_BROADCAST_STALL.set(vec![mining_pk_2]); + + info!("------------------------- Miner 2 Wins Tenure B -------------------------"; + "burn_height_before" => burn_height_before, + "stacks_height_before" => %stacks_height_before + ); + + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || Ok(get_burn_height() > burn_height_before), + ) + .unwrap(); + btc_blocks_mined += 1; + + assert_eq!( + stacks_height_before, + signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height + ); + + // assure we have a successful sortition that miner 2 won + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + assert!(tip.sortition); + assert_eq!(tip.miner_pk_hash.unwrap(), mining_pkh_2); + + info!( + "------------------------- Wait for Miner 1 to think Miner 2 is Invalid -------------------------" + ); + // Make sure that miner 1 thinks miner 2 is invalid. + std::thread::sleep(tenure_extend_wait_timeout.add(Duration::from_secs(1))); + + info!("------------------------- Wait for Miner 1's Block N+1' to be Proposed ------------------------"; + "stacks_height_before" => %stacks_height_before); + + let mut miner_1_block_n_1 = None; + + wait_for(30, || { + let chunks = test_observer::get_stackerdb_chunks(); + for chunk in chunks.into_iter().flat_map(|chunk| chunk.modified_slots) { + let Ok(message) = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + else { + continue; + }; + let SignerMessage::BlockProposal(proposal) = message else { + continue; + }; + let miner_pk = proposal.block.header.recover_miner_pk().unwrap(); + let block_stacks_height = proposal.block.header.chain_length; + if block_stacks_height != stacks_height_before + 1 || miner_pk != mining_pk_1 { + continue; + } + miner_1_block_n_1 = Some(proposal.block); + return Ok(true); + } + Ok(false) + }) + .expect("Timed out waiting for N+1 block proposals from miner 1"); + + let miner_1_block_n_1 = miner_1_block_n_1.expect("No block proposal from miner 1"); + + assert_eq!( + miner_1_block_n_1 + .try_get_tenure_change_payload() + .unwrap() + .cause, + TenureChangeCause::Extended + ); + + info!("------------------------- Verify that Miner 1's Block N+1' was Rejected ------------------------"); + + // Miner 1's proposed block should get rejected by the signers + let mut found_miner_1_rejections = HashSet::new(); + wait_for(30, || { + let chunks = test_observer::get_stackerdb_chunks(); + for chunk in chunks.into_iter().flat_map(|chunk| chunk.modified_slots) { + let Ok(message) = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + else { + continue; + }; + if let SignerMessage::BlockResponse(BlockResponse::Rejected(BlockRejection { + signer_signature_hash, + signature, + .. + })) = message + { + if signer_signature_hash == miner_1_block_n_1.header.signer_signature_hash() { + found_miner_1_rejections.insert(signature); + } + } + } + Ok(found_miner_1_rejections.len() >= num_signers * 3 / 10) + }) + .expect("Timed out waiting for expeceted block responses"); + + assert_eq!( + stacks_height_before, + signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height + ); + + info!("------------------------- Wait for Miner 2's Block N+1 BlockFound to be Proposed ------------------------"; + "stacks_height_before" => %stacks_height_before + ); + + TEST_BROADCAST_STALL.set(vec![]); + + // Get miner 2's N+1 block proposal + let mut miner_2_block_n_1 = None; + wait_for(30, || { + let chunks = test_observer::get_stackerdb_chunks(); + for chunk in chunks.into_iter().flat_map(|chunk| chunk.modified_slots) { + let Ok(message) = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + else { + continue; + }; + let SignerMessage::BlockProposal(proposal) = message else { + continue; + }; + let miner_pk = proposal.block.header.recover_miner_pk().unwrap(); + let block_stacks_height = proposal.block.header.chain_length; + if block_stacks_height != stacks_height_before + 1 { + continue; + } + if miner_pk == mining_pk_2 { + miner_2_block_n_1 = Some(proposal.block); + return Ok(true); + } + } + Ok(false) + }) + .expect("Timed out waiting for N+1 block proposals from miner 1"); + + let mut miner_2_block_n_1 = miner_2_block_n_1.expect("No block proposal from miner 2"); + + info!("------------------------- Wait for Miner 2's Block N+1 to be Approved ------------------------"; + "stacks_height_before" => %stacks_height_before + ); + + // Miner 2's proposed block should get approved and pushed + wait_for(30, || { + let chunks = test_observer::get_stackerdb_chunks(); + for chunk in chunks.into_iter().flat_map(|chunk| chunk.modified_slots) { + let Ok(message) = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + else { + continue; + }; + if let SignerMessage::BlockPushed(pushed_block) = message { + if pushed_block.header.signer_signature_hash() + == miner_2_block_n_1.header.signer_signature_hash() + { + miner_2_block_n_1 = pushed_block; + return Ok(true); + } + } + } + Ok(false) + }) + .expect("Timed out waiting for expeceted block responses"); + + let tip_block_header_hash = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip; + assert_eq!(tip_block_header_hash, miner_2_block_n_1.header.block_hash()); + assert_eq!( + stacks_height_before + 1, + signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height + ); + + info!("------------------------- Verify 2 Signer's Rejected Miner 2's Block N+1 -------------------------"); + let mut found_miner_2_rejects = HashSet::new(); + wait_for(30, || { + let chunks = test_observer::get_stackerdb_chunks(); + for chunk in chunks.into_iter().flat_map(|chunk| chunk.modified_slots) { + let Ok(message) = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + else { + continue; + }; + if let SignerMessage::BlockResponse(BlockResponse::Rejected(BlockRejection { + signer_signature_hash, + .. + })) = message + { + if signer_signature_hash == miner_2_block_n_1.header.signer_signature_hash() { + found_miner_2_rejects.insert(signer_signature_hash); + } + } + } + Ok(found_miner_2_rejects.len() == non_block_minority) + }) + .expect("Timed out waiting for expected block responses"); + info!( + "------------------------- Verify BlockFound in Miner 2's Block N+1 -------------------------" + ); + verify_last_block_contains_tenure_change_tx(TenureChangeCause::BlockFound); + + info!("------------------------- Miner 2 Proposes Block N+2 with Transfer Tx -------------------------"); + let stacks_height_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + // submit a tx so that the miner will mine an extra block + let sender_nonce = 0; + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + conf.burnchain.chain_id, + &recipient, + send_amt, + ); + let txid = submit_tx(&http_origin, &transfer_tx); + // Get miner 2's N+2 block proposal + let mut miner_2_block_n_2 = None; + wait_for(30, || { + let chunks = test_observer::get_stackerdb_chunks(); + for chunk in chunks.into_iter().flat_map(|chunk| chunk.modified_slots) { + let Ok(message) = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + else { + continue; + }; + let SignerMessage::BlockProposal(proposal) = message else { + continue; + }; + let miner_pk = proposal.block.header.recover_miner_pk().unwrap(); + let block_stacks_height = proposal.block.header.chain_length; + if block_stacks_height != stacks_height_before + 1 + || miner_pk != mining_pk_2 + || !proposal + .block + .txs + .iter() + .any(|tx| tx.txid().to_string() == txid) + { + continue; + } + miner_2_block_n_2 = Some(proposal.block); + return Ok(true); + } + Ok(false) + }) + .expect("Timed out waiting for N+2 block proposals from miner 2"); + let miner_2_block_n_2 = miner_2_block_n_2.expect("No block proposal from miner 2"); + info!( + "------------------------- Verify Miner 2's Block N+2 is Globally Accepted but still Rejected by Minority Signers -------------------------" + ); + let mut found_miner_2_accepts = HashSet::new(); + let mut found_miner_2_rejects = HashSet::new(); + wait_for(30, || { + let chunks = test_observer::get_stackerdb_chunks(); + for chunk in chunks.into_iter().flat_map(|chunk| chunk.modified_slots) { + let Ok(message) = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + else { + continue; + }; + match message { + SignerMessage::BlockResponse(BlockResponse::Accepted(BlockAccepted { + signer_signature_hash, + signature, + .. + })) => { + if signer_signature_hash == miner_2_block_n_2.header.signer_signature_hash() { + found_miner_2_accepts.insert(signature); + } + } + SignerMessage::BlockResponse(BlockResponse::Rejected(BlockRejection { + signer_signature_hash, + signature, + .. + })) => { + if signer_signature_hash == miner_2_block_n_2.header.signer_signature_hash() { + found_miner_2_rejects.insert(signature); + } + } + _ => {} + } + } + Ok(found_miner_2_accepts.len() >= num_signers * 7 / 10 + && found_miner_2_rejects.len() == non_block_minority) + }) + .expect("Timed out waiting for expeceted block responses"); + + info!("------------------------- Unpause Miner 1's Block Commits -------------------------"); + let stacks_height_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + + let rl1_commits_before = signer_test + .running_nodes + .commits_submitted + .load(Ordering::SeqCst); + // Unpause miner 1's commits + signer_test + .running_nodes + .nakamoto_test_skip_commit_op + .set(false); + + // Ensure that both miners' commits point at the stacks tip + wait_for(30, || { + let last_committed_1 = rl1_counters + .naka_submitted_commit_last_stacks_tip + .load(Ordering::SeqCst); + Ok(last_committed_1 >= stacks_height_before + && signer_test + .running_nodes + .commits_submitted + .load(Ordering::SeqCst) + > rl1_commits_before) + }) + .expect("Timed out waiting for block commit from Miner 1"); + + let burn_height_before = get_burn_height(); + let block_before = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) + .unwrap() + .block_height; + + info!("------------------------- Miner 1 Mines a Normal Tenure C -------------------------"; + "burn_height_before" => burn_height_before); + + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || { + Ok(get_burn_height() > burn_height_before + && SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) + .unwrap() + .block_height + > block_before) + }, + ) + .unwrap(); + btc_blocks_mined += 1; + + // assure we have a successful sortition that miner 1 won + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + assert!(tip.sortition); + assert_eq!(tip.miner_pk_hash.unwrap(), mining_pkh_1); + + info!("------------------------- Wait for Miner 1's Block N+3 -------------------------"; + "stacks_height_before" => %stacks_height_before); + + wait_for(30, || { + let stacks_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + Ok(stacks_height > stacks_height_before) + }) + .expect("Timed out waiting for block N+3 to be mined and processed"); + + info!( + "------------------------- Verify Tenure Change Tx in Miner 1's Block N+3 -------------------------" + ); + verify_last_block_contains_tenure_change_tx(TenureChangeCause::BlockFound); + + info!( + "------------------------- Confirm Burn and Stacks Block Heights -------------------------" + ); + assert_eq!(get_burn_height(), starting_burn_height + btc_blocks_mined); + assert_eq!( + signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height, + starting_peer_height + 4 + ); + + info!("------------------------- Shutdown -------------------------"); + rl2_coord_channels + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper_2.store(false, Ordering::SeqCst); + run_loop_2_thread.join().unwrap(); + signer_test.shutdown(); +} From 0463c7bcd0de515d00300907cb5d827dfe2bad83 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 7 Feb 2025 10:13:44 -0800 Subject: [PATCH 07/13] Fix accidental copy pasta mistake in reorg_attempts_activity_timeout_exceeded Signed-off-by: Jacinta Ferrant --- .../stacks-node/src/nakamoto_node/relayer.rs | 84 ++++++++++++------- testnet/stacks-node/src/tests/signer/v0.rs | 7 +- 2 files changed, 58 insertions(+), 33 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index 8b13a85252..4eddf2e9e4 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -1760,9 +1760,11 @@ impl RelayerThread { } /// Try to start up a tenure-extend. - /// Only do this if: + /// Will check if the tenure-extend time was set and has expired and one of the following is true: /// - the miner won the highest valid sortition but the burn view has changed. /// - the subsequent miner appears to be offline. + /// If so, it will stop any existing tenure and attempt to start a new one with an Extended reason. + /// Otherwise, it will do nothing. fn try_continue_tenure(&mut self) { // Should begin a tenure-extend? if let Some(tenure_extend_time) = &self.tenure_extend_time { @@ -1778,6 +1780,13 @@ impl RelayerThread { // No tenure extend time set, so nothing to do. return; } + let Some(mining_pk) = self.get_mining_key_pkh() else { + // This shouldn't really ever hit, but just in case. + warn!("Will not tenure extend -- no mining key"); + // If we don't have a mining key set, don't bother checking again. + self.tenure_extend_time = None; + return; + }; // reset timer so we can try again if for some reason a miner was already running (e.g. a // blockfound from earlier). self.tenure_extend_time = Some(TenureExtendTime::delayed( @@ -1785,61 +1794,57 @@ impl RelayerThread { )); // try to extend, but only if we aren't already running a thread for the current or newer // burnchain view - let Ok(sn) = - SortitionDB::get_canonical_burn_chain_tip(self.sortdb.conn()).inspect_err(|e| { + let Ok(burn_tip) = SortitionDB::get_canonical_burn_chain_tip(self.sortdb.conn()) + .inspect_err(|e| { error!("Failed to read canonical burnchain sortition: {e:?}"); }) else { return; }; + let won_sortition = burn_tip.sortition && burn_tip.miner_pk_hash == Some(mining_pk); + + if won_sortition { + debug!("Will not tenure extend. Won current sortition"; + "burn_chain_sortition_tip_ch" => %burn_tip.consensus_hash + ); + self.tenure_extend_time = None; + return; + } + if let Some(miner_thread_burn_view) = self.miner_thread_burn_view.as_ref() { // a miner thread is already running. If its burn view is the same as the canonical // tip, then do nothing for now - if sn.consensus_hash == miner_thread_burn_view.consensus_hash { - info!("Will not try to start a tenure extend -- the current miner thread's burn view matches the sortition tip"; "sortition tip" => %sn.consensus_hash); + if burn_tip.consensus_hash == miner_thread_burn_view.consensus_hash { + info!("Will not try to start a tenure extend -- the current miner thread's burn view matches the sortition tip"; "sortition tip" => %burn_tip.consensus_hash); return; } } - let Some(mining_pk) = self.get_mining_key_pkh() else { - // This shouldn't really ever hit, but just in case. - warn!("Will not tenure extend -- no mining key"); - return; - }; - - let (canonical_stacks_tip_ch, _) = + let (canonical_stacks_tip_ch, canonical_stacks_tip_bh) = SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortdb.conn()) .expect("FATAL: failed to query sortition DB for stacks tip"); + let canonical_stacks_tip = + StacksBlockId::new(&canonical_stacks_tip_ch, &canonical_stacks_tip_bh); let canonical_stacks_snapshot = SortitionDB::get_block_snapshot_consensus(self.sortdb.conn(), &canonical_stacks_tip_ch) .expect("FATAL: failed to query sortiiton DB for epoch") .expect("FATAL: no sortition for canonical stacks tip"); - let won_sortition = sn.sortition && sn.miner_pk_hash == Some(mining_pk); - - if won_sortition { - debug!("Will not tenure extend. Won current sortition"; - "burn_chain_sortition_tip_ch" => %sn.consensus_hash, - "canonical_stacks_tip_ch" => %canonical_stacks_tip_ch, - ); - return; - } - let won_ongoing_tenure_sortition = canonical_stacks_snapshot.miner_pk_hash == Some(mining_pk); if !won_ongoing_tenure_sortition { // We did not win the ongoing tenure sortition, so nothing we can even do. debug!("Will not tenure extend. Did not win ongoing tenure sortition"; - "burn_chain_sortition_tip_ch" => %sn.consensus_hash, + "burn_chain_sortition_tip_ch" => %burn_tip.consensus_hash, "canonical_stacks_tip_ch" => %canonical_stacks_tip_ch, - "burn_chain_sortition_tip_mining_pk" => ?sn.miner_pk_hash, + "burn_chain_sortition_tip_mining_pk" => ?burn_tip.miner_pk_hash, "mining_pk" => %mining_pk, ); return; } - let Ok(last_winning_snapshot) = Self::get_last_winning_snapshot(&self.sortdb, &sn) + let Ok(last_winning_snapshot) = Self::get_last_winning_snapshot(&self.sortdb, &burn_tip) .inspect_err(|e| { warn!("Failed to load last winning snapshot: {e:?}"); }) @@ -1854,14 +1859,31 @@ impl RelayerThread { return; } - if let Err(e) = self.continue_tenure(sn.consensus_hash.clone()) { - warn!( - "Failed to continue tenure for burn view {}: {e:?}", - &sn.consensus_hash - ); + if let Err(e) = self.stop_tenure() { + error!("Relayer: Failed to stop tenure: {e:?}"); + return; + } + let reason = MinerReason::Extended { + burn_view_consensus_hash: burn_tip.consensus_hash.clone(), + }; + debug!("Relayer: successfully stopped tenure; will try to continue."); + if let Err(e) = self.start_new_tenure( + canonical_stacks_tip.clone(), + canonical_stacks_snapshot.clone(), + burn_tip.clone(), + reason.clone(), + &burn_tip.consensus_hash, + ) { + error!("Relayer: Failed to start new tenure: {e:?}"); } else { - self.tenure_extend_time = None; + debug!("Relayer: successfully started new tenure."; + "parent_tenure_start" => %canonical_stacks_tip, + "burn_tip" => %burn_tip.consensus_hash, + "burn_view_snapshot" => %burn_tip.consensus_hash, + "block_election_snapshot" => %canonical_stacks_snapshot.consensus_hash, + "reason" => %reason); } + self.tenure_extend_time = None; } /// Main loop of the relayer. diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 28bdca56fa..5e9e1e2edb 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -11640,6 +11640,7 @@ fn reorg_attempts_activity_timeout_exceeded() { let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let block_proposal_timeout = Duration::from_secs(30); let reorg_attempts_activity_timeout = Duration::from_secs(20); + let tenure_extend_wait_timeout = Duration::from_secs(1000); let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, vec![(sender_addr, send_amt + send_fee)], @@ -11647,7 +11648,9 @@ fn reorg_attempts_activity_timeout_exceeded() { config.block_proposal_timeout = block_proposal_timeout; config.reorg_attempts_activity_timeout = reorg_attempts_activity_timeout; }, - |_| {}, + |config| { + config.miner.tenure_extend_wait_timeout = tenure_extend_wait_timeout; + }, None, None, ); @@ -11789,7 +11792,7 @@ fn reorg_attempts_activity_timeout_exceeded() { info!("------------------------- Wait for Block N+1 Proposal -------------------------"); test_observer::clear(); - TEST_BROADCAST_STALL.set(vec![miner_pk]); + TEST_BROADCAST_STALL.set(vec![]); wait_for(30, || { let block_proposal_n_1 = wait_for_block_proposal().expect("Failed to get block proposal N+1"); From 1dcd321133ea9a8a553339e1da75daf01b4901f9 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 7 Feb 2025 11:42:03 -0800 Subject: [PATCH 08/13] Cleanup need_block_found logic to be more clear Signed-off-by: Jacinta Ferrant --- .../stacks-node/src/nakamoto_node/relayer.rs | 87 ++++++++++--------- 1 file changed, 44 insertions(+), 43 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index 4eddf2e9e4..402e786d59 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -632,37 +632,45 @@ impl RelayerThread { return None; }; - if self.need_block_found(&canonical_stacks_snapshot, &last_winning_snapshot) { - info!( - "Relayer: will submit late BlockFound for {}", + let won_last_winning_snapshot = + mining_pkh_opt.is_some() && last_winning_snapshot.miner_pk_hash == mining_pkh_opt; + + if won_last_winning_snapshot { + debug!( + "Relayer: we won the last winning sortition {}", &last_winning_snapshot.consensus_hash ); - // prepare to immediately extend after our BlockFound gets mined. - self.tenure_extend_time = Some(TenureExtendTime::immediate()); - return Some(MinerDirective::BeginTenure { - parent_tenure_start: StacksBlockId( - last_winning_snapshot.winning_stacks_block_hash.clone().0, - ), - burnchain_tip: sn, - election_block: last_winning_snapshot, - late: true, - }); - } - let won_last_winning_snapshot = - mining_pkh_opt.is_some() && last_winning_snapshot.miner_pk_hash == mining_pkh_opt; - let tip_is_last_winning_snapshot = mining_pkh_opt.is_some() - && canonical_stacks_snapshot.block_height == last_winning_snapshot.block_height - && canonical_stacks_snapshot.consensus_hash == last_winning_snapshot.consensus_hash; - - if won_last_winning_snapshot && tip_is_last_winning_snapshot { - // this is the ongoing tenure snapshot. A BlockFound has already been issued. We - // can instead opt to Extend immediately - info!("Relayer: BlockFound already issued for the last winning sortition. Will extend tenure."); - return Some(MinerDirective::ContinueTenure { - new_burn_view: sn.consensus_hash, - }); + if self.need_block_found(&canonical_stacks_snapshot, &last_winning_snapshot) { + info!( + "Relayer: will submit late BlockFound for {}", + &last_winning_snapshot.consensus_hash + ); + // prepare to immediately extend after our BlockFound gets mined. + self.tenure_extend_time = Some(TenureExtendTime::immediate()); + return Some(MinerDirective::BeginTenure { + parent_tenure_start: StacksBlockId( + last_winning_snapshot.winning_stacks_block_hash.clone().0, + ), + burnchain_tip: sn, + election_block: last_winning_snapshot, + late: true, + }); + } + let tip_is_last_winning_snapshot = mining_pkh_opt.is_some() + && canonical_stacks_snapshot.block_height == last_winning_snapshot.block_height + && canonical_stacks_snapshot.consensus_hash == last_winning_snapshot.consensus_hash; + + if tip_is_last_winning_snapshot { + // this is the ongoing tenure snapshot. A BlockFound has already been issued. We + // can instead opt to Extend immediately + info!("Relayer: BlockFound already issued for the last winning sortition. Will extend tenure."); + return Some(MinerDirective::ContinueTenure { + new_burn_view: sn.consensus_hash, + }); + } } + if won_ongoing_tenure_sortition { info!("Relayer: No sortition, but we produced the canonical Stacks tip. Will extend tenure."); if !won_last_winning_snapshot { @@ -684,27 +692,16 @@ impl RelayerThread { return None; } - /// Determine if we need to issue a BlockFound. + /// Determine if we the current tenure winner needs to issue a BlockFound. + /// Assumes the caller has already checked that the last-winning snapshot was won by us. /// - /// Returns true if we won the last non-empty sortition, and the stacks tip's snapshot is an ancestor of the last-won sortition + /// Returns true if the stacks tip's snapshot is an ancestor of the last-won sortition /// Returns false otherwise. fn need_block_found( &mut self, canonical_stacks_snapshot: &BlockSnapshot, last_winning_snapshot: &BlockSnapshot, ) -> bool { - if last_winning_snapshot.miner_pk_hash != self.get_mining_key_pkh() { - debug!( - "Relayer: we did not win the last winning sortition {}", - &last_winning_snapshot.consensus_hash - ); - // We did not win the last winning snapshot, we do not need to issue a block found. - return false; - } - debug!( - "Relayer: we won the last winning sortition {}", - &last_winning_snapshot.consensus_hash - ); // we won the last non-empty sortition. Has there been a BlockFound issued for it? // This would be true if the stacks tip's tenure is at or descends from this snapshot. // If there has _not_ been a BlockFound, then we should issue one. @@ -1854,11 +1851,15 @@ impl RelayerThread { return; }; - if self.need_block_found(&canonical_stacks_snapshot, &last_winning_snapshot) { + let won_last_winning_snapshot = last_winning_snapshot.miner_pk_hash == Some(mining_pk); + if won_last_winning_snapshot + && self.need_block_found(&canonical_stacks_snapshot, &last_winning_snapshot) + { info!("Will not extend tenure -- need to issue a BlockFound first"); return; } - + // If we reach this code, we have either won the last winning snapshot and have already issued a block found for it and should extend. + // OR we did not win the last snapshot, but the person who did has failed to produce a block and we should extend our old tenure. if let Err(e) = self.stop_tenure() { error!("Relayer: Failed to stop tenure: {e:?}"); return; From 4f5ecd9d48cc4434c55742a2940ab3642f763288 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 7 Feb 2025 12:16:58 -0800 Subject: [PATCH 09/13] Add further comments to try_continue_tenure Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/nakamoto_node/relayer.rs | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index 402e786d59..993fbb1798 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -1756,12 +1756,16 @@ impl RelayerThread { )) } - /// Try to start up a tenure-extend. + /// Try to start up a tenure-extend if the tenure_extend_time has expired. + /// /// Will check if the tenure-extend time was set and has expired and one of the following is true: - /// - the miner won the highest valid sortition but the burn view has changed. + /// - this miner won the highest valid sortition but the burn view has changed. /// - the subsequent miner appears to be offline. /// If so, it will stop any existing tenure and attempt to start a new one with an Extended reason. /// Otherwise, it will do nothing. + /// + /// Note: tenure_extend_time is only set to Some(_) if during sortition processing, the sortition + /// winner commit is corrupted or the winning miner has yet to produce a block. fn try_continue_tenure(&mut self) { // Should begin a tenure-extend? if let Some(tenure_extend_time) = &self.tenure_extend_time { From 9eb307b7b67cfe1e2d84e6b27dde1c2770cc50d5 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 7 Feb 2025 13:30:04 -0800 Subject: [PATCH 10/13] Cleanup try_continue_tenure to overwrite tenure_extend_time to None where appropriate Signed-off-by: Jacinta Ferrant --- .../stacks-node/src/nakamoto_node/relayer.rs | 71 +++++++------------ 1 file changed, 25 insertions(+), 46 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index 993fbb1798..28cc49315b 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -1289,10 +1289,9 @@ impl RelayerThread { Ok(ih.get_last_snapshot_with_sortition(sort_tip.block_height)?) } - /// Is the given sortition a valid sortition? - /// I.e. whose winning commit's parent tenure ID is on the canonical Stacks history, - /// and whose consensus hash corresponds to the ongoing tenure or a confirmed tenure? - fn is_valid_sortition( + /// Returns true if the sortition `sn` commits to the tenure start block of the ongoing Stacks tenure `stacks_tip_sn`. + /// Returns false otherwise. + fn sortition_commits_to_stacks_tip_tenure( chain_state: &mut StacksChainState, stacks_tip_id: &StacksBlockId, stacks_tip_sn: &BlockSnapshot, @@ -1303,22 +1302,9 @@ impl RelayerThread { debug!("Relayer: Sortition {} is empty", &sn.consensus_hash); return Ok(false); } - - // check that this commit's parent tenure ID is on the history tipped at - // `stacks_tip_id` + // The sortition must commit to the tenure start block of the ongoing Stacks tenure. let mut ic = chain_state.index_conn(); let parent_tenure_id = StacksBlockId(sn.winning_stacks_block_hash.clone().0); - let height_opt = ic.get_ancestor_block_height(&parent_tenure_id, stacks_tip_id)?; - if height_opt.is_none() { - // parent_tenure_id is not an ancestor of stacks_tip_id - debug!( - "Relayer: Sortition {} has winning commit hash {parent_tenure_id}, which is not canonical", - &sn.consensus_hash - ); - return Ok(false); - } - - // The sortition must commit to the tenure start block of the ongoing Stacks tenure. let highest_tenure_start_block_header = NakamotoChainState::get_tenure_start_block_header( &mut ic, stacks_tip_id, @@ -1382,8 +1368,7 @@ impl RelayerThread { &cursor.consensus_hash ); - // is this a valid sortiton? - if Self::is_valid_sortition( + if Self::sortition_commits_to_stacks_tip_tenure( chain_state, &canonical_stacks_tip, &canonical_stacks_tip_sn, @@ -1803,21 +1788,12 @@ impl RelayerThread { return; }; - let won_sortition = burn_tip.sortition && burn_tip.miner_pk_hash == Some(mining_pk); - - if won_sortition { - debug!("Will not tenure extend. Won current sortition"; - "burn_chain_sortition_tip_ch" => %burn_tip.consensus_hash - ); - self.tenure_extend_time = None; - return; - } - if let Some(miner_thread_burn_view) = self.miner_thread_burn_view.as_ref() { // a miner thread is already running. If its burn view is the same as the canonical // tip, then do nothing for now if burn_tip.consensus_hash == miner_thread_burn_view.consensus_hash { info!("Will not try to start a tenure extend -- the current miner thread's burn view matches the sortition tip"; "sortition tip" => %burn_tip.consensus_hash); + // Do not reset the timer, as we may be able to extend later. return; } } @@ -1831,20 +1807,6 @@ impl RelayerThread { SortitionDB::get_block_snapshot_consensus(self.sortdb.conn(), &canonical_stacks_tip_ch) .expect("FATAL: failed to query sortiiton DB for epoch") .expect("FATAL: no sortition for canonical stacks tip"); - - let won_ongoing_tenure_sortition = - canonical_stacks_snapshot.miner_pk_hash == Some(mining_pk); - - if !won_ongoing_tenure_sortition { - // We did not win the ongoing tenure sortition, so nothing we can even do. - debug!("Will not tenure extend. Did not win ongoing tenure sortition"; - "burn_chain_sortition_tip_ch" => %burn_tip.consensus_hash, - "canonical_stacks_tip_ch" => %canonical_stacks_tip_ch, - "burn_chain_sortition_tip_mining_pk" => ?burn_tip.miner_pk_hash, - "mining_pk" => %mining_pk, - ); - return; - } let Ok(last_winning_snapshot) = Self::get_last_winning_snapshot(&self.sortdb, &burn_tip) .inspect_err(|e| { warn!("Failed to load last winning snapshot: {e:?}"); @@ -1852,14 +1814,31 @@ impl RelayerThread { else { // this should be unreachable, but don't tempt fate. info!("No prior snapshots have a winning sortition. Will not try to mine."); + self.tenure_extend_time = None; return; }; - let won_last_winning_snapshot = last_winning_snapshot.miner_pk_hash == Some(mining_pk); if won_last_winning_snapshot && self.need_block_found(&canonical_stacks_snapshot, &last_winning_snapshot) { info!("Will not extend tenure -- need to issue a BlockFound first"); + // We may manage to extend later, so don't set the timer to None. + return; + } + let won_ongoing_tenure_sortition = + canonical_stacks_snapshot.miner_pk_hash == Some(mining_pk); + if !won_ongoing_tenure_sortition { + // We did not win the ongoing tenure sortition, so nothing we can even do. + // Make sure this check is done AFTER checking for the BlockFound so that + // we can set tenure_extend_time to None in this case without causing problems + // (If we need to issue a block found, we may not have won_ongoing_tenure_sortition) + debug!("Will not tenure extend. Did not win ongoing tenure sortition"; + "burn_chain_sortition_tip_ch" => %burn_tip.consensus_hash, + "canonical_stacks_tip_ch" => %canonical_stacks_tip_ch, + "burn_chain_sortition_tip_mining_pk" => ?burn_tip.miner_pk_hash, + "mining_pk" => %mining_pk, + ); + self.tenure_extend_time = None; return; } // If we reach this code, we have either won the last winning snapshot and have already issued a block found for it and should extend. @@ -1887,8 +1866,8 @@ impl RelayerThread { "burn_view_snapshot" => %burn_tip.consensus_hash, "block_election_snapshot" => %canonical_stacks_snapshot.consensus_hash, "reason" => %reason); + self.tenure_extend_time = None; } - self.tenure_extend_time = None; } /// Main loop of the relayer. From dbf0bd9c00794a6eddc2d33a1bf37b14dcf6c524 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 7 Feb 2025 15:44:54 -0800 Subject: [PATCH 11/13] CRC: break choose_miner_directive into sep fns for sn with winner vs no winner Signed-off-by: Jacinta Ferrant --- .../stacks-node/src/nakamoto_node/relayer.rs | 170 ++++++++++-------- 1 file changed, 94 insertions(+), 76 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index 28cc49315b..a7fe4a2357 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -504,13 +504,11 @@ impl RelayerThread { } } - /// Choose a miner directive based on the outcome of a sortition. + /// Choose a miner directive for a sortition with a winner. /// /// The decision process is a little tricky, because the right decision depends on: /// * whether or not we won the _given_ sortition (`sn`) /// * whether or not we won the sortition that started the ongoing Stacks tenure - /// * whether or not we won the last sortition with a winner - /// * whether or not the last sortition winner has produced a Stacks block /// * whether or not the ongoing Stacks tenure is at or descended from the last-winning /// sortition /// @@ -520,31 +518,30 @@ impl RelayerThread { /// tenure-change. Otherwise, if we won the tenure which started the ongoing Stacks tenure /// (i.e. we're the active miner), then we _may_ start mining after a timeout _if_ the winning /// miner (not us) fails to submit a `BlockFound` tenure-change block for `sn`. - /// - /// Otherwise, if the given sortition `sn` has no winner, the find out who won the last sortition - /// with a winner. If it was us, and if we haven't yet submitted a `BlockFound` tenure-change - /// for it (which can happen if this given sortition is from a flash block), then start mining - /// immediately with a "late" `BlockFound` tenure, _and_ prepare to start mining right afterwards - /// with an `Extended` tenure-change so as to represent the given sortition `sn`'s burn view in - /// the Stacks chain. - /// - /// Otherwise, if this sortition has no winner, and we did not win the last-winning sortition, - /// then check to see if we're the ongoing Stack's tenure's miner. If so, then we _may_ start - /// mining after a timeout _if_ the winner of the last-good sortition (not us) fails to submit - /// a `BlockFound` tenure-change block. This can happen if `sn` was a flash block, and the - /// remote miner has yet to process it. - /// - /// We won't always be able to mine -- for example, this could be an empty sortition, but the - /// parent block could be an epoch 2 block. In this case, the right thing to do is to wait for - /// the next block-commit. - pub(crate) fn choose_miner_directive( + fn choose_directive_sortition_with_winner( &mut self, sn: BlockSnapshot, - won_sortition: bool, + mining_pk: Hash160, committed_index_hash: StacksBlockId, ) -> Option { - // Reset the tenure extend time as we need to process this new sortition to deterine if we should extend - self.tenure_extend_time = None; + let won_sortition = sn.miner_pk_hash == Some(mining_pk); + if won_sortition || self.config.get_node_config(false).mock_mining { + // a sortition happenend, and we won + info!("Won sortition; begin tenure."; + "winning_sortition" => %sn.consensus_hash); + return Some(MinerDirective::BeginTenure { + parent_tenure_start: committed_index_hash, + burnchain_tip: sn.clone(), + election_block: sn, + late: false, + }); + } + + // a sortition happened, but we didn't win. Check if we won the ongoing tenure. + debug!( + "Relayer: did not win sortition {}, so stopping tenure", + &sn.sortition + ); let (canonical_stacks_tip_ch, _) = SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortdb.conn()) .expect("FATAL: failed to query sortition DB for stacks tip"); @@ -553,56 +550,68 @@ impl RelayerThread { .expect("FATAL: failed to query sortiiton DB for epoch") .expect("FATAL: no sortition for canonical stacks tip"); - let mining_pkh_opt = self.get_mining_key_pkh(); - let won_ongoing_tenure_sortition = - mining_pkh_opt.is_some() && canonical_stacks_snapshot.miner_pk_hash == mining_pkh_opt; - - if sn.sortition { - // a sortition happened - if won_sortition || self.config.get_node_config(false).mock_mining { - // a sortition happenend, and we won - info!("Relayer: Won sortition; begin tenure."; - "winning_sortition" => %sn.consensus_hash); - return Some(MinerDirective::BeginTenure { - parent_tenure_start: committed_index_hash, - burnchain_tip: sn.clone(), - election_block: sn, - late: false, - }); - } - - // a sortition happened, but we didn't win. - debug!( - "Relayer: did not win sortition {}, so stopping tenure", - &sn.sortition - ); - - if won_ongoing_tenure_sortition { - // we won the current ongoing tenure, but not the most recent sortition. Should we attempt to extend immediately or wait for the incoming miner? - if let Ok(result) = Self::find_highest_valid_sortition( - &self.sortdb, - &mut self.chainstate, - &sn, - &canonical_stacks_snapshot.consensus_hash, - ) { - if result.is_some() { - debug!("Relayer: Did not win current sortition but won the prior valid sortition. Will attempt to extend tenure after allowing the new miner some time to come online."; - "tenure_extend_wait_timeout_ms" => self.config.miner.tenure_extend_wait_timeout.as_millis(), - ); - self.tenure_extend_time = Some(TenureExtendTime::delayed( - self.config.miner.tenure_extend_wait_timeout, - )); - } else { - info!("Relayer: no valid sortition since our last winning sortition. Will extend tenure."); - self.tenure_extend_time = Some(TenureExtendTime::immediate()); - } + canonical_stacks_snapshot.miner_pk_hash == Some(mining_pk); + if won_ongoing_tenure_sortition { + // we won the current ongoing tenure, but not the most recent sortition. Should we attempt to extend immediately or wait for the incoming miner? + if let Ok(result) = Self::find_highest_valid_sortition( + &self.sortdb, + &mut self.chainstate, + &sn, + &canonical_stacks_snapshot.consensus_hash, + ) { + if result.is_some() { + debug!("Relayer: Did not win current sortition but won the prior valid sortition. Will attempt to extend tenure after allowing the new miner some time to come online."; + "tenure_extend_wait_timeout_ms" => self.config.miner.tenure_extend_wait_timeout.as_millis(), + ); + self.tenure_extend_time = Some(TenureExtendTime::delayed( + self.config.miner.tenure_extend_wait_timeout, + )); + } else { + info!("Relayer: no valid sortition since our last winning sortition. Will extend tenure."); + self.tenure_extend_time = Some(TenureExtendTime::immediate()); } } - return Some(MinerDirective::StopTenure); } + return Some(MinerDirective::StopTenure); + } + + /// Choose a miner directive for a sortition with no winner. + /// + /// The decision process is a little tricky, because the right decision depends on: + /// * whether or not we won the sortition that started the ongoing Stacks tenure + /// * whether or not we won the last sortition with a winner + /// * whether or not the last sortition winner has produced a Stacks block + /// * whether or not the ongoing Stacks tenure is at or descended from the last-winning + /// sortition + /// + /// Find out who won the last sortition with a winner. If it was us, and if we haven't yet + /// submitted a `BlockFound` tenure-change for it (which can happen if this given sortition is + /// from a flash block), then start mining immediately with a "late" `BlockFound` tenure, _and_ + /// prepare to start mining right afterwards with an `Extended` tenure-change so as to represent + /// the given sortition `sn`'s burn view in the Stacks chain. + /// + /// Otherwise, if did not win the last-winning sortition, then check to see if we're the ongoing + /// Stack's tenure's miner. If so, then we _may_ start mining after a timeout _if_ the winner of + /// the last-good sortition (not us) fails to submit a `BlockFound` tenure-change block. + /// This can happen if `sn` was a flash block, and the remote miner has yet to process it. + /// + /// We won't always be able to mine -- for example, this could be an empty sortition, but the + /// parent block could be an epoch 2 block. In this case, the right thing to do is to wait for + /// the next block-commit. + fn choose_directive_sortition_without_winner( + &mut self, + sn: BlockSnapshot, + mining_pk: Hash160, + ) -> Option { + let (canonical_stacks_tip_ch, _) = + SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortdb.conn()) + .expect("FATAL: failed to query sortition DB for stacks tip"); + let canonical_stacks_snapshot = + SortitionDB::get_block_snapshot_consensus(self.sortdb.conn(), &canonical_stacks_tip_ch) + .expect("FATAL: failed to query sortiiton DB for epoch") + .expect("FATAL: no sortition for canonical stacks tip"); - // no sortition happened. // find out what epoch the Stacks tip is in. // If it's in epoch 2.x, then we must always begin a new tenure, but we can't do so // right now since this sortition has no winner. @@ -632,9 +641,7 @@ impl RelayerThread { return None; }; - let won_last_winning_snapshot = - mining_pkh_opt.is_some() && last_winning_snapshot.miner_pk_hash == mining_pkh_opt; - + let won_last_winning_snapshot = last_winning_snapshot.miner_pk_hash == Some(mining_pk); if won_last_winning_snapshot { debug!( "Relayer: we won the last winning sortition {}", @@ -657,8 +664,8 @@ impl RelayerThread { late: true, }); } - let tip_is_last_winning_snapshot = mining_pkh_opt.is_some() - && canonical_stacks_snapshot.block_height == last_winning_snapshot.block_height + let tip_is_last_winning_snapshot = canonical_stacks_snapshot.block_height + == last_winning_snapshot.block_height && canonical_stacks_snapshot.consensus_hash == last_winning_snapshot.consensus_hash; if tip_is_last_winning_snapshot { @@ -671,6 +678,8 @@ impl RelayerThread { } } + let won_ongoing_tenure_sortition = + canonical_stacks_snapshot.miner_pk_hash == Some(mining_pk); if won_ongoing_tenure_sortition { info!("Relayer: No sortition, but we produced the canonical Stacks tip. Will extend tenure."); if !won_last_winning_snapshot { @@ -818,8 +827,17 @@ impl RelayerThread { .raise_initiative("process_sortition".to_string()); return Ok(None); } - - let directive_opt = self.choose_miner_directive(sn, won_sortition, committed_index_hash); + // Reset the tenure extend time + self.tenure_extend_time = None; + let Some(mining_pk) = self.get_mining_key_pkh() else { + debug!("No mining key, will not mine"); + return Ok(None); + }; + let directive_opt = if sn.sortition { + self.choose_directive_sortition_with_winner(sn, mining_pk, committed_index_hash) + } else { + self.choose_directive_sortition_without_winner(sn, mining_pk) + }; debug!( "Relayer: Processed sortition {}: Miner directive is {:?}", &consensus_hash, &directive_opt From 3144969192d199cb7e2e376d83316e338a9ca269 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 10 Feb 2025 12:38:33 -0800 Subject: [PATCH 12/13] CRC: add a TenureExtendReason to determine what ot check in check_tenure_timers Signed-off-by: Jacinta Ferrant --- .../stacks-node/src/nakamoto_node/relayer.rs | 205 +++++++++--------- testnet/stacks-node/src/tests/signer/v0.rs | 6 +- 2 files changed, 106 insertions(+), 105 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index a7fe4a2357..726c987bf2 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -250,28 +250,44 @@ impl MinerStopHandle { } } +/// The reason for issuing a tenure extend +#[derive(PartialEq, Eq, Debug, Clone)] +pub enum TenureExtendReason { + /// There was an empty sortition + EmptySortition, + /// There was a bad sortition winner + BadSortitionWinner, + /// We are waiting for the current winner to produce a block. + UnresponsiveWinner, +} + /// Information necessary to determine when to extend a tenure +#[derive(Clone)] pub struct TenureExtendTime { /// The time at which we determined that we should tenure-extend time: Instant, /// The amount of time we should wait before tenure-extending timeout: Duration, + /// The reason for tenure-extending + reason: TenureExtendReason, } impl TenureExtendTime { - /// Create a new `TenureExtendTime` with a delayed `timeout` - pub fn delayed(timeout: Duration) -> Self { + /// Create a new `TenureExtendTime` for an UnresponsiveWinner with the specified `timeout` + pub fn unresponsive_winner(timeout: Duration) -> Self { Self { time: Instant::now(), timeout, + reason: TenureExtendReason::UnresponsiveWinner, } } - /// Create a new `TenureExtendTime` with no `timeout` - pub fn immediate() -> Self { + /// Create a new `TenureExtendTime` with the provided `reason` and no `timeout` + pub fn immediate(reason: TenureExtendReason) -> Self { Self { time: Instant::now(), - timeout: Duration::from_secs(0), + timeout: Duration::from_millis(0), + reason, } } @@ -290,6 +306,17 @@ impl TenureExtendTime { pub fn timeout(&self) -> Duration { self.timeout } + + /// The reason for tenure-extending + pub fn reason(&self) -> &TenureExtendReason { + &self.reason + } + + /// Update the timeout for this `TenureExtendTime` and reset the time + pub fn refresh(&mut self, timeout: Duration) { + self.timeout = timeout; + self.time = Instant::now(); + } } /// Relayer thread @@ -521,10 +548,10 @@ impl RelayerThread { fn choose_directive_sortition_with_winner( &mut self, sn: BlockSnapshot, - mining_pk: Hash160, + mining_pkh: Hash160, committed_index_hash: StacksBlockId, ) -> Option { - let won_sortition = sn.miner_pk_hash == Some(mining_pk); + let won_sortition = sn.miner_pk_hash == Some(mining_pkh); if won_sortition || self.config.get_node_config(false).mock_mining { // a sortition happenend, and we won info!("Won sortition; begin tenure."; @@ -551,10 +578,10 @@ impl RelayerThread { .expect("FATAL: no sortition for canonical stacks tip"); let won_ongoing_tenure_sortition = - canonical_stacks_snapshot.miner_pk_hash == Some(mining_pk); + canonical_stacks_snapshot.miner_pk_hash == Some(mining_pkh); if won_ongoing_tenure_sortition { // we won the current ongoing tenure, but not the most recent sortition. Should we attempt to extend immediately or wait for the incoming miner? - if let Ok(result) = Self::find_highest_valid_sortition( + if let Ok(result) = Self::find_highest_sortition_commits_to_stacks_tip_tenure( &self.sortdb, &mut self.chainstate, &sn, @@ -564,12 +591,14 @@ impl RelayerThread { debug!("Relayer: Did not win current sortition but won the prior valid sortition. Will attempt to extend tenure after allowing the new miner some time to come online."; "tenure_extend_wait_timeout_ms" => self.config.miner.tenure_extend_wait_timeout.as_millis(), ); - self.tenure_extend_time = Some(TenureExtendTime::delayed( + self.tenure_extend_time = Some(TenureExtendTime::unresponsive_winner( self.config.miner.tenure_extend_wait_timeout, )); } else { info!("Relayer: no valid sortition since our last winning sortition. Will extend tenure."); - self.tenure_extend_time = Some(TenureExtendTime::immediate()); + self.tenure_extend_time = Some(TenureExtendTime::immediate( + TenureExtendReason::BadSortitionWinner, + )); } } } @@ -654,7 +683,9 @@ impl RelayerThread { &last_winning_snapshot.consensus_hash ); // prepare to immediately extend after our BlockFound gets mined. - self.tenure_extend_time = Some(TenureExtendTime::immediate()); + self.tenure_extend_time = Some(TenureExtendTime::immediate( + TenureExtendReason::EmptySortition, + )); return Some(MinerDirective::BeginTenure { parent_tenure_start: StacksBlockId( last_winning_snapshot.winning_stacks_block_hash.clone().0, @@ -687,7 +718,7 @@ impl RelayerThread { // by someone else -- there's a chance that this other miner will produce a // BlockFound in the interim. debug!("Relayer: Did not win last winning snapshot despite mining the ongoing tenure. Will attempt to extend tenure after allowing the new miner some time to produce a block."); - self.tenure_extend_time = Some(TenureExtendTime::delayed( + self.tenure_extend_time = Some(TenureExtendTime::unresponsive_winner( self.config.miner.tenure_extend_wait_timeout, )); return None; @@ -714,56 +745,26 @@ impl RelayerThread { // we won the last non-empty sortition. Has there been a BlockFound issued for it? // This would be true if the stacks tip's tenure is at or descends from this snapshot. // If there has _not_ been a BlockFound, then we should issue one. - let ih = self - .sortdb - .index_handle(&last_winning_snapshot.sortition_id); if canonical_stacks_snapshot.block_height > last_winning_snapshot.block_height { // stacks tip is ahead of this snapshot, so no BlockFound can be issued. - test_debug!("Relayer: stacks_tip_sn.block_height ({}) > last_winning_snapshot.block_height ({})", canonical_stacks_snapshot.block_height, last_winning_snapshot.block_height); + test_debug!( + "Stacks_tip_sn.block_height ({}) > last_winning_snapshot.block_height ({})", + canonical_stacks_snapshot.block_height, + last_winning_snapshot.block_height + ); false } else if canonical_stacks_snapshot.block_height == last_winning_snapshot.block_height && canonical_stacks_snapshot.consensus_hash == last_winning_snapshot.consensus_hash { // this is the ongoing tenure snapshot. A BlockFound has already been issued. test_debug!( - "Relayer: ongoing tenure {} already represents last-winning snapshot", + "Ongoing tenure {} already represents last-winning snapshot", &canonical_stacks_snapshot.consensus_hash ); false } else { - // stacks tip's snapshot may be an ancestor of the last-won sortition. - // If so, then we can issue a BlockFound. - SortitionDB::get_ancestor_snapshot( - &ih, - canonical_stacks_snapshot.block_height, - &last_winning_snapshot.sortition_id, - ) - .map_err(|e| { - error!("Relayer: Failed to load ancestor snapshot: {e:?}"); - e - }) - .ok() - .flatten() - .map(|sn| { - let need_blockfound = sn.consensus_hash == canonical_stacks_snapshot.consensus_hash; - if !need_blockfound { - test_debug!( - "Relayer: canonical_stacks_tip_ch ({}) != sn_consensus_hash ({})", - &canonical_stacks_snapshot.consensus_hash, - &sn.consensus_hash - ); - } - need_blockfound - }) - .unwrap_or_else(|| { - test_debug!( - "Relayer: no ancestor at height {} off of sortition {} height {}", - canonical_stacks_snapshot.block_height, - &last_winning_snapshot.consensus_hash, - last_winning_snapshot.block_height - ); - false - }) + // The stacks tip is behind the last-won sortition, so a BlockFound is still needed. + true } } @@ -1345,18 +1346,15 @@ impl RelayerThread { Ok(true) } - /// Determine the highest valid sortition higher than `elected_tenure_id`, but no higher than - /// `sort_tip`. - /// - /// This is the highest non-empty sortition (up to and including `sort_tip`) - /// whose winning commit's parent tenure ID matches the - /// Stacks tip, and whose consensus hash matches the Stacks tip's tenure ID. + /// Determine the highest sortition higher than `elected_tenure_id`, but no higher than + /// `sort_tip` whose winning commit's parent tenure ID matches the `stacks_tip`, + /// and whose consensus hash matches the `stacks_tip`'s tenure ID. /// /// Returns Ok(Some(..)) if such a sortition is found, and is higher than that of /// `elected_tenure_id`. /// Returns Ok(None) if no such sortition is found. /// Returns Err(..) on DB errors. - fn find_highest_valid_sortition( + fn find_highest_sortition_commits_to_stacks_tip_tenure( sortdb: &SortitionDB, chain_state: &mut StacksChainState, sort_tip: &BlockSnapshot, @@ -1761,30 +1759,28 @@ impl RelayerThread { /// Try to start up a tenure-extend if the tenure_extend_time has expired. /// - /// Will check if the tenure-extend time was set and has expired and one of the following is true: - /// - this miner won the highest valid sortition but the burn view has changed. - /// - the subsequent miner appears to be offline. - /// If so, it will stop any existing tenure and attempt to start a new one with an Extended reason. - /// Otherwise, it will do nothing. + /// Will check if the tenure-extend time was set and has expired. If so, will + /// check if the current miner thread needs to issue a BlockFound or if it can + /// immediately tenure-extend. /// /// Note: tenure_extend_time is only set to Some(_) if during sortition processing, the sortition /// winner commit is corrupted or the winning miner has yet to produce a block. - fn try_continue_tenure(&mut self) { + fn check_tenure_timers(&mut self) { // Should begin a tenure-extend? - if let Some(tenure_extend_time) = &self.tenure_extend_time { - if !tenure_extend_time.should_extend() { - test_debug!( - "Relayer: will not try to tenure-extend yet ({} <= {})", - tenure_extend_time.elapsed().as_secs(), - tenure_extend_time.timeout().as_secs() - ); - return; - } - } else { + let Some(tenure_extend_time) = self.tenure_extend_time.clone() else { // No tenure extend time set, so nothing to do. return; + }; + if !tenure_extend_time.should_extend() { + test_debug!( + "Relayer: will not try to tenure-extend yet ({} <= {})", + tenure_extend_time.elapsed().as_secs(), + tenure_extend_time.timeout().as_secs() + ); + return; } - let Some(mining_pk) = self.get_mining_key_pkh() else { + + let Some(mining_pkh) = self.get_mining_key_pkh() else { // This shouldn't really ever hit, but just in case. warn!("Will not tenure extend -- no mining key"); // If we don't have a mining key set, don't bother checking again. @@ -1793,9 +1789,9 @@ impl RelayerThread { }; // reset timer so we can try again if for some reason a miner was already running (e.g. a // blockfound from earlier). - self.tenure_extend_time = Some(TenureExtendTime::delayed( - self.config.miner.tenure_extend_poll_timeout, - )); + self.tenure_extend_time + .as_mut() + .map(|t| t.refresh(self.config.miner.tenure_extend_poll_timeout)); // try to extend, but only if we aren't already running a thread for the current or newer // burnchain view let Ok(burn_tip) = SortitionDB::get_canonical_burn_chain_tip(self.sortdb.conn()) @@ -1825,36 +1821,41 @@ impl RelayerThread { SortitionDB::get_block_snapshot_consensus(self.sortdb.conn(), &canonical_stacks_tip_ch) .expect("FATAL: failed to query sortiiton DB for epoch") .expect("FATAL: no sortition for canonical stacks tip"); - let Ok(last_winning_snapshot) = Self::get_last_winning_snapshot(&self.sortdb, &burn_tip) - .inspect_err(|e| { - warn!("Failed to load last winning snapshot: {e:?}"); - }) - else { - // this should be unreachable, but don't tempt fate. - info!("No prior snapshots have a winning sortition. Will not try to mine."); - self.tenure_extend_time = None; - return; - }; - let won_last_winning_snapshot = last_winning_snapshot.miner_pk_hash == Some(mining_pk); - if won_last_winning_snapshot - && self.need_block_found(&canonical_stacks_snapshot, &last_winning_snapshot) - { - info!("Will not extend tenure -- need to issue a BlockFound first"); - // We may manage to extend later, so don't set the timer to None. - return; + + match tenure_extend_time.reason() { + TenureExtendReason::BadSortitionWinner | TenureExtendReason::EmptySortition => { + // Before we try to extend, check if we need to issue a BlockFound + let Ok(last_winning_snapshot) = + Self::get_last_winning_snapshot(&self.sortdb, &burn_tip).inspect_err(|e| { + warn!("Failed to load last winning snapshot: {e:?}"); + }) + else { + // this should be unreachable, but don't tempt fate. + info!("No prior snapshots have a winning sortition. Will not try to mine."); + self.tenure_extend_time = None; + return; + }; + let won_last_winning_snapshot = + last_winning_snapshot.miner_pk_hash == Some(mining_pkh); + if won_last_winning_snapshot + && self.need_block_found(&canonical_stacks_snapshot, &last_winning_snapshot) + { + info!("Will not tenure extend yet -- need to issue a BlockFound first"); + // We may manage to extend later, so don't set the timer to None. + return; + } + } + TenureExtendReason::UnresponsiveWinner => {} } + let won_ongoing_tenure_sortition = - canonical_stacks_snapshot.miner_pk_hash == Some(mining_pk); + canonical_stacks_snapshot.miner_pk_hash == Some(mining_pkh); if !won_ongoing_tenure_sortition { - // We did not win the ongoing tenure sortition, so nothing we can even do. - // Make sure this check is done AFTER checking for the BlockFound so that - // we can set tenure_extend_time to None in this case without causing problems - // (If we need to issue a block found, we may not have won_ongoing_tenure_sortition) debug!("Will not tenure extend. Did not win ongoing tenure sortition"; "burn_chain_sortition_tip_ch" => %burn_tip.consensus_hash, "canonical_stacks_tip_ch" => %canonical_stacks_tip_ch, "burn_chain_sortition_tip_mining_pk" => ?burn_tip.miner_pk_hash, - "mining_pk" => %mining_pk, + "mining_pk" => %mining_pkh ); self.tenure_extend_time = None; return; @@ -1902,7 +1903,7 @@ impl RelayerThread { let poll_frequency_ms = 1_000; while self.globals.keep_running() { - self.try_continue_tenure(); + self.check_tenure_timers(); let raised_initiative = self.globals.take_initiative(); let timed_out = Instant::now() >= self.next_initiative; let mut initiative_directive = if raised_initiative.is_some() || timed_out { diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 0c11f47133..e35a8b0de0 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -14955,7 +14955,7 @@ fn prev_miner_will_not_attempt_to_extend_if_incoming_miner_produces_a_block() { signer_test.shutdown(); } -/// Test a scenario where a non-blocking minority of miners are configured to favour the incoming miner. +/// Test a scenario where a non-blocking minority of signers are configured to favour the incoming miner. /// The previous miner should extend its tenure and succeed as a majority are configured to favour it /// and its subsequent blocks should be be approved. /// Two miners boot to Nakamoto. @@ -14965,7 +14965,7 @@ fn prev_miner_will_not_attempt_to_extend_if_incoming_miner_produces_a_block() { /// Miner 2 wins the second tenure B. /// A majority of signers mark miner 2 as invalid. /// Miner 2 proposes block N+1' with a TenureChangeCause::BlockFound -/// A majority fo signers rekect block N+1'. +/// A majority fo signers reject block N+1'. /// Miner 1 proposes block N+1 with a TenureChangeCause::Extended /// A majority of signers accept and the stacks tip advances to N+1 /// Miner 1 proposes block N+2 with a transfer tx @@ -15579,7 +15579,7 @@ fn non_blocking_minority_configured_to_favour_incoming_miner() { signer_test.shutdown(); } -/// Test a scenario where a non-blocking majority of miners are configured to favour the previous miner +/// Test a scenario where a non-blocking majority of signers are configured to favour the previous miner /// extending their tenure when the incoming miner is slow to propose a block. The incoming miner should succeed /// and its subsequent blocks should be be approved. /// Two miners boot to Nakamoto. From 0e694a5e479e7a879f5ec996d623b5e648b92562 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 10 Feb 2025 13:53:16 -0800 Subject: [PATCH 13/13] CRC: add a unit test for need_block_found Signed-off-by: Jacinta Ferrant --- .../stacks-node/src/nakamoto_node/relayer.rs | 85 ++++++++++++++++++- 1 file changed, 82 insertions(+), 3 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index 726c987bf2..e5bc3efd07 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -677,7 +677,7 @@ impl RelayerThread { &last_winning_snapshot.consensus_hash ); - if self.need_block_found(&canonical_stacks_snapshot, &last_winning_snapshot) { + if Self::need_block_found(&canonical_stacks_snapshot, &last_winning_snapshot) { info!( "Relayer: will submit late BlockFound for {}", &last_winning_snapshot.consensus_hash @@ -738,7 +738,6 @@ impl RelayerThread { /// Returns true if the stacks tip's snapshot is an ancestor of the last-won sortition /// Returns false otherwise. fn need_block_found( - &mut self, canonical_stacks_snapshot: &BlockSnapshot, last_winning_snapshot: &BlockSnapshot, ) -> bool { @@ -1838,7 +1837,7 @@ impl RelayerThread { let won_last_winning_snapshot = last_winning_snapshot.miner_pk_hash == Some(mining_pkh); if won_last_winning_snapshot - && self.need_block_found(&canonical_stacks_snapshot, &last_winning_snapshot) + && Self::need_block_found(&canonical_stacks_snapshot, &last_winning_snapshot) { info!("Will not tenure extend yet -- need to issue a BlockFound first"); // We may manage to extend later, so don't set the timer to None. @@ -2059,6 +2058,10 @@ pub mod test { use std::io::Write; use std::path::Path; + use rand::{thread_rng, Rng}; + use stacks::burnchains::Txid; + use stacks::chainstate::burn::{BlockSnapshot, ConsensusHash, OpsHash, SortitionHash}; + use stacks::types::chainstate::{BlockHeaderHash, BurnchainHeaderHash, SortitionId, TrieHash}; use stacks::util::hash::Hash160; use stacks::util::secp256k1::Secp256k1PublicKey; use stacks::util::vrf::VRFPublicKey; @@ -2169,4 +2172,80 @@ pub mod test { std::fs::remove_file(path).expect("Failed to delete test file"); } + + #[test] + fn check_need_block_found() { + let consensus_hash_byte = thread_rng().gen(); + let canonical_stacks_snapshot = BlockSnapshot { + block_height: thread_rng().gen::().wrapping_add(1), // Add one to ensure we can always decrease by 1 without underflowing. + burn_header_timestamp: thread_rng().gen(), + burn_header_hash: BurnchainHeaderHash([thread_rng().gen(); 32]), + consensus_hash: ConsensusHash([consensus_hash_byte; 20]), + parent_burn_header_hash: BurnchainHeaderHash([thread_rng().gen(); 32]), + ops_hash: OpsHash([thread_rng().gen(); 32]), + total_burn: thread_rng().gen(), + sortition: true, + sortition_hash: SortitionHash([thread_rng().gen(); 32]), + winning_block_txid: Txid([thread_rng().gen(); 32]), + winning_stacks_block_hash: BlockHeaderHash([thread_rng().gen(); 32]), + index_root: TrieHash([thread_rng().gen(); 32]), + num_sortitions: thread_rng().gen(), + stacks_block_accepted: true, + stacks_block_height: thread_rng().gen(), + arrival_index: thread_rng().gen(), + canonical_stacks_tip_consensus_hash: ConsensusHash([thread_rng().gen(); 20]), + canonical_stacks_tip_hash: BlockHeaderHash([thread_rng().gen(); 32]), + canonical_stacks_tip_height: thread_rng().gen(), + sortition_id: SortitionId([thread_rng().gen(); 32]), + parent_sortition_id: SortitionId([thread_rng().gen(); 32]), + pox_valid: true, + accumulated_coinbase_ustx: thread_rng().gen::() as u128, + miner_pk_hash: Some(Hash160([thread_rng().gen(); 20])), + }; + + // The consensus_hashes are the same, and the block heights are the same. Therefore, don't need a block found. + let last_winning_block_snapshot = canonical_stacks_snapshot.clone(); + assert!(!RelayerThread::need_block_found( + &canonical_stacks_snapshot, + &last_winning_block_snapshot + )); + + // The block height of the canonical tip is higher than the last winning snapshot. We already issued a block found. + let mut canonical_stacks_snapshot_is_higher_than_last_winning_snapshot = + last_winning_block_snapshot.clone(); + canonical_stacks_snapshot_is_higher_than_last_winning_snapshot.block_height = + canonical_stacks_snapshot.block_height.saturating_sub(1); + assert!(!RelayerThread::need_block_found( + &canonical_stacks_snapshot, + &canonical_stacks_snapshot_is_higher_than_last_winning_snapshot + )); + + // The block height is the same, but we have different consensus hashes. We need to issue a block found. + let mut tip_consensus_hash_mismatch = last_winning_block_snapshot.clone(); + tip_consensus_hash_mismatch.consensus_hash = + ConsensusHash([consensus_hash_byte.wrapping_add(1); 20]); + assert!(RelayerThread::need_block_found( + &canonical_stacks_snapshot, + &tip_consensus_hash_mismatch + )); + + // The block height is the same, but we have different consensus hashes. We need to issue a block found. + let mut tip_consensus_hash_mismatch = last_winning_block_snapshot.clone(); + tip_consensus_hash_mismatch.consensus_hash = + ConsensusHash([consensus_hash_byte.wrapping_add(1); 20]); + assert!(RelayerThread::need_block_found( + &canonical_stacks_snapshot, + &tip_consensus_hash_mismatch + )); + + // The block height of the canonical tip is lower than the last winning snapshot blockheight. We need to issue a block found. + let mut canonical_stacks_snapshot_is_lower_than_last_winning_snapshot = + last_winning_block_snapshot.clone(); + canonical_stacks_snapshot_is_lower_than_last_winning_snapshot.block_height = + canonical_stacks_snapshot.block_height.saturating_add(1); + assert!(RelayerThread::need_block_found( + &canonical_stacks_snapshot, + &canonical_stacks_snapshot_is_lower_than_last_winning_snapshot + )); + } }