Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE
- `block-time`
- `to-ascii?`
- Added `contract_cost_limit_percentage` to the miner config file — sets the percentage of a block’s execution cost at which, if a large non-boot contract call would cause a BlockTooBigError, the miner will stop adding further non-boot contract calls and only include STX transfers and boot contract calls for the remainder of the block.
- Fixed a bug caused by a miner winning a sortition with a block commit that pointed to a previous tip, which would cause the miner to try and reorg itself. [#6481](https://github.com/stacks-network/stacks-core/issues/6481)

### Changed

Expand Down
99 changes: 87 additions & 12 deletions stacks-node/src/nakamoto_node/relayer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@ use stacks::net::db::LocalPeer;
use stacks::net::p2p::NetworkHandle;
use stacks::net::relay::Relayer;
use stacks::net::NetworkResult;
use stacks::util_lib::db::Error as DbError;
use stacks_common::types::chainstate::{
BlockHeaderHash, BurnchainHeaderHash, StacksBlockId, StacksPublicKey, VRFSeed,
};
Expand Down Expand Up @@ -82,6 +83,11 @@ pub static TEST_MINER_THREAD_STALL: LazyLock<TestFlag<bool>> = LazyLock::new(Tes
pub static TEST_MINER_THREAD_START_STALL: LazyLock<TestFlag<bool>> =
LazyLock::new(TestFlag::default);

#[cfg(test)]
/// Test flag to set the tip for the miner to commit to
pub static TEST_MINER_COMMIT_TIP: LazyLock<TestFlag<Option<(ConsensusHash, BlockHeaderHash)>>> =
LazyLock::new(TestFlag::default);

/// Command types for the Nakamoto relayer thread, issued to it by other threads
#[allow(clippy::large_enum_variant)]
pub enum RelayerDirective {
Expand Down Expand Up @@ -616,7 +622,10 @@ impl RelayerThread {
/// Specifically:
///
/// If we won the given sortition `sn`, then we can start mining immediately with a `BlockFound`
/// tenure-change. Otherwise, if we won the tenure which started the ongoing Stacks tenure
/// tenure-change. The exception is if we won the sortition, but the sortition's winning commit
/// does not commit to the ongoing tenure. In this case, we instead extend the current tenure.
///
/// Otherwise, if we did not win `sn`, if we won the tenure which started the ongoing Stacks tenure
/// (i.e. we're the active miner), then we _may_ start mining after a timeout _if_ the winning
/// miner (not us) fails to submit a `BlockFound` tenure-change block for `sn`.
fn choose_directive_sortition_with_winner(
Expand All @@ -626,6 +635,57 @@ impl RelayerThread {
committed_index_hash: StacksBlockId,
) -> MinerDirective {
let won_sortition = sn.miner_pk_hash.as_ref() == Some(mining_pkh);

let (canonical_stacks_tip_ch, canonical_stacks_tip_bh) =
SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortdb.conn())
.expect("FATAL: failed to query sortition DB for stacks tip");
let canonical_stacks_snapshot =
SortitionDB::get_block_snapshot_consensus(self.sortdb.conn(), &canonical_stacks_tip_ch)
.expect("FATAL: failed to query sortiiton DB for epoch")
.expect("FATAL: no sortition for canonical stacks tip");

// If we won the sortition, ensure that the sortition's winning commit actually commits to
// the ongoing tenure. If it does not (i.e. commit is "stale" and points to N-1 when we are
// currently in N), and if we are also the ongoing tenure's miner, then we must not attempt
// a tenure change (which would reorg our own signed blocks). Instead, we should immediately
// extend the tenure.
if won_sortition && !self.config.get_node_config(false).mock_mining {
let canonical_stacks_tip =
StacksBlockId::new(&canonical_stacks_tip_ch, &canonical_stacks_tip_bh);

let commits_to_tip_tenure = Self::sortition_commits_to_stacks_tip_tenure(
&mut self.chainstate,
&canonical_stacks_tip,
&canonical_stacks_snapshot,
&sn,
).unwrap_or_else(|e| {
warn!(
"Relayer: Failed to determine if winning sortition commits to current tenure: {e:?}";
"sortition_ch" => %sn.consensus_hash,
"stacks_tip_ch" => %canonical_stacks_tip_ch
);
false
});

if !commits_to_tip_tenure {
let won_ongoing_tenure_sortition =
canonical_stacks_snapshot.miner_pk_hash.as_ref() == Some(mining_pkh);

if won_ongoing_tenure_sortition {
info!(
"Relayer: Won sortition, but commit does not target ongoing tenure. Will extend instead of starting a new tenure.";
"winning_sortition" => %sn.consensus_hash,
"ongoing_tenure" => %canonical_stacks_snapshot.consensus_hash,
"commits_to_tip_tenure?" => commits_to_tip_tenure
);
// Extend tenure to the new burn view instead of attempting BlockFound
return MinerDirective::ContinueTenure {
new_burn_view: sn.consensus_hash,
};
}
}
}

if won_sortition || self.config.get_node_config(false).mock_mining {
// a sortition happenend, and we won
info!("Won sortition; begin tenure.";
Expand All @@ -643,13 +703,6 @@ impl RelayerThread {
"Relayer: did not win sortition {}, so stopping tenure",
&sn.sortition
);
let (canonical_stacks_tip_ch, _) =
SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortdb.conn())
.expect("FATAL: failed to query sortition DB for stacks tip");
let canonical_stacks_snapshot =
SortitionDB::get_block_snapshot_consensus(self.sortdb.conn(), &canonical_stacks_tip_ch)
.expect("FATAL: failed to query sortiiton DB for epoch")
.expect("FATAL: no sortition for canonical stacks tip");

let won_ongoing_tenure_sortition =
canonical_stacks_snapshot.miner_pk_hash.as_ref() == Some(mining_pkh);
Expand Down Expand Up @@ -1633,6 +1686,31 @@ impl RelayerThread {
false
}

/// Get the canonical tip for the miner to commit to.
/// This is provided as a separate function so that it can be overridden for testing.
#[cfg(not(test))]
fn fault_injection_get_tip_for_commit(&self) -> Option<(ConsensusHash, BlockHeaderHash)> {
None
}

#[cfg(test)]
fn fault_injection_get_tip_for_commit(&self) -> Option<(ConsensusHash, BlockHeaderHash)> {
TEST_MINER_COMMIT_TIP.get()
}

fn get_commit_for_tip(&mut self) -> Result<(ConsensusHash, BlockHeaderHash), DbError> {
if let Some((consensus_hash, block_header_hash)) = self.fault_injection_get_tip_for_commit()
{
info!("Relayer: using test tip for commit";
"consensus_hash" => %consensus_hash,
"block_header_hash" => %block_header_hash,
);
Ok((consensus_hash, block_header_hash))
} else {
SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortdb.conn())
}
}

/// Generate and submit the next block-commit, and record it locally
fn issue_block_commit(&mut self) -> Result<(), NakamotoNodeError> {
if self.fault_injection_skip_block_commit() {
Expand All @@ -1641,10 +1719,7 @@ impl RelayerThread {
);
return Ok(());
}
let (tip_block_ch, tip_block_bh) = SortitionDB::get_canonical_stacks_chain_tip_hash(
self.sortdb.conn(),
)
.unwrap_or_else(|e| {
let (tip_block_ch, tip_block_bh) = self.get_commit_for_tip().unwrap_or_else(|e| {
panic!("Failed to load canonical stacks tip: {e:?}");
});
let mut last_committed = self.make_block_commit(&tip_block_ch, &tip_block_bh)?;
Expand Down
78 changes: 41 additions & 37 deletions stacks-node/src/tests/nakamoto_integrations.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5300,8 +5300,8 @@ fn burn_ops_integration_test() {
/// Miner B starts its tenure, Miner B produces a Stacks block b_0, but miner C submits its block commit before b_0 is broadcasted.
/// Bitcoin block C, containing Miner C's block commit, is mined BEFORE miner C has a chance to update their block commit with b_0's information.
/// This test asserts:
/// * tenure C ignores b_0, and correctly builds off of block a_x.
fn forked_tenure_is_ignored() {
/// * tenure C correctly extends b_0, building off of block B despite the commit being submitted before b_0 was broadcasted.
fn bad_commit_does_not_trigger_fork() {
if env::var("BITCOIND_TEST") != Ok("1".into()) {
return;
}
Expand Down Expand Up @@ -5493,27 +5493,46 @@ fn forked_tenure_is_ignored() {
.lock()
.expect("Mutex poisoned")
.get_stacks_blocks_processed();
let block_in_tenure = get_last_block_in_current_tenure(&sortdb, &chainstate).is_some();
// We don't expect a block in this tenure, because the miner should instead be building off
// of a previous tenure
let no_block_in_tenure = get_last_block_in_current_tenure(&sortdb, &chainstate).is_none();
Ok(commits_count > commits_before
&& blocks_count > blocks_before
&& blocks_processed > blocks_processed_before
&& block_in_tenure)
&& no_block_in_tenure)
})
.unwrap();
.unwrap_or_else(|_| {
let commits_count = commits_submitted.load(Ordering::SeqCst);
let blocks_count = mined_blocks.load(Ordering::SeqCst);
let blocks_processed = coord_channel
.lock()
.expect("Mutex poisoned")
.get_stacks_blocks_processed();
let no_block_in_tenure = get_last_block_in_current_tenure(&sortdb, &chainstate).is_none();
error!("Tenure C shouldn't have produced a block";
"commits_count" => commits_count,
"commits_before" => commits_before,
"blocks_count" => blocks_count,
"blocks_before" => blocks_before,
"blocks_processed" => blocks_processed,
"blocks_processed_before" => blocks_processed_before,
"no_block_in_tenure" => no_block_in_tenure,
);
panic!("Tenure C shouldn't have produced a block");
});

info!("Tenure C produced a block!");
info!("Tenure C did not produce a block");

let block_tenure_c = get_last_block_in_current_tenure(&sortdb, &chainstate).unwrap();
let block_tenure_c = get_last_block_in_current_tenure(&sortdb, &chainstate);
assert!(block_tenure_c.is_none());
let blocks = test_observer::get_mined_nakamoto_blocks();
let block_c = blocks.last().unwrap();
info!("Tenure C tip block: {}", &block_tenure_c.index_block_hash());
info!("Tenure C last block: {}", &block_c.block_id);
assert_ne!(block_tenure_b.block_id(), block_tenure_c.index_block_hash());

// Block C was built AFTER Block B was built, but BEFORE it was broadcasted (processed), so it should be built off of Block A
assert_eq!(
block_tenure_c.stacks_block_height,
block_tenure_a.stacks_block_height + 1
block_c.stacks_height,
block_tenure_b.header.chain_length + 1
);

// Now let's produce a second block for tenure C and ensure it builds off of block C.
Expand Down Expand Up @@ -5556,14 +5575,11 @@ fn forked_tenure_is_ignored() {

info!("Tenure C produced a second block!");

let block_2_tenure_c = get_last_block_in_current_tenure(&sortdb, &chainstate).unwrap();
let block_2_tenure_c = get_last_block_in_current_tenure(&sortdb, &chainstate);
assert!(block_2_tenure_c.is_none());
let blocks = test_observer::get_mined_nakamoto_blocks();
let block_2_c = blocks.last().unwrap();

info!(
"Tenure C tip block: {}",
&block_2_tenure_c.index_block_hash()
);
info!("Tenure C last block: {}", &block_2_c.block_id);

info!("Starting tenure D.");
Expand Down Expand Up @@ -5595,8 +5611,6 @@ fn forked_tenure_is_ignored() {
info!("Tenure D last block: {}", block_d.block_id);

assert_ne!(block_tenure_b.block_id(), block_tenure_a.index_block_hash());
assert_ne!(block_tenure_b.block_id(), block_tenure_c.index_block_hash());
assert_ne!(block_tenure_c, block_tenure_a);

// Block B was built atop block A
assert_eq!(
Expand All @@ -5608,39 +5622,29 @@ fn forked_tenure_is_ignored() {
block_tenure_a.index_block_hash().to_string()
);

// Block C was built AFTER Block B was built, but BEFORE it was broadcasted, so it should be built off of Block A
// Block C was built AFTER Block B was built, but BEFORE it was broadcasted, so it should be built off of Block B
assert_eq!(
block_tenure_c.stacks_block_height,
block_tenure_a.stacks_block_height + 1
block_c.stacks_height,
block_tenure_b.header.chain_length + 1
);
assert_eq!(
block_c.parent_block_id,
block_tenure_a.index_block_hash().to_string()
block_tenure_b.header.block_id().to_string()
);

assert_ne!(block_tenure_c, block_2_tenure_c);
assert_ne!(block_2_tenure_c, block_tenure_d);
assert_ne!(block_tenure_c, block_tenure_d);

// Second block of tenure C builds off of block C
assert_eq!(
block_2_tenure_c.stacks_block_height,
block_tenure_c.stacks_block_height + 1,
);
assert_eq!(
block_2_c.parent_block_id,
block_tenure_c.index_block_hash().to_string()
block_2_c.stacks_height,
block_tenure_b.header.chain_length + 2,
);
assert_eq!(block_2_c.parent_block_id, block_c.block_id);

// Tenure D builds off of the second block of tenure C
assert_eq!(
block_tenure_d.stacks_block_height,
block_2_tenure_c.stacks_block_height + 1,
);
assert_eq!(
block_d.parent_block_id,
block_2_tenure_c.index_block_hash().to_string()
block_2_c.stacks_height + 1,
);
assert_eq!(block_d.parent_block_id, block_2_c.block_id);

coord_channel
.lock()
Expand Down
Loading