diff --git a/crates/p2p/src/consensus.rs b/crates/p2p/src/consensus.rs index 575704509b..da492a83b9 100644 --- a/crates/p2p/src/consensus.rs +++ b/crates/p2p/src/consensus.rs @@ -234,7 +234,6 @@ mod tests { use libp2p::identity::Keypair; use p2p_proto::common::{Address, Hash, L1DataAvailabilityMode}; use p2p_proto::consensus::{ - BlockInfo, ProposalFin, ProposalInit, ProposalPart, @@ -260,8 +259,11 @@ mod tests { let height_and_round: HeightAndRound = (1, 2).into(); // Create a sample proposal - let block_info = BlockInfo { + let proposal_init = ProposalInit { height: 100, + round: 2, + valid_round: Some(1), + proposer: Address(Felt::from_hex_str("0x123").unwrap()), timestamp: 1234567890, builder: Address(Felt::from_hex_str("0x456").unwrap()), l1_da_mode: L1DataAvailabilityMode::Calldata, @@ -270,8 +272,10 @@ mod tests { l1_data_gas_price_fri: 3000, l1_gas_price_wei: 4000, l1_data_gas_price_wei: 5000, + starknet_version: "".to_string(), + version_constant_commitment: Default::default(), }; - let proposal = ProposalPart::BlockInfo(block_info); + let proposal = ProposalPart::Init(proposal_init); // Create messages let messages = @@ -301,8 +305,11 @@ mod tests { let height_and_round: HeightAndRound = (1, 2).into(); // Create a sample proposal - let block_info = BlockInfo { + let proposal_init = ProposalInit { height: 100, + round: 2, + valid_round: Some(1), + proposer: Address(Felt::from_hex_str("0x123").unwrap()), timestamp: 1234567890, builder: Address(Felt::from_hex_str("0x456").unwrap()), l1_da_mode: L1DataAvailabilityMode::Calldata, @@ -311,8 +318,10 @@ mod tests { l1_data_gas_price_fri: 3000, l1_gas_price_wei: 4000, l1_data_gas_price_wei: 5000, + starknet_version: "".to_string(), + version_constant_commitment: Default::default(), }; - let proposal = ProposalPart::BlockInfo(block_info); + let proposal = ProposalPart::Init(proposal_init); // Create a message let message = StreamMessage { @@ -496,6 +505,7 @@ mod tests { round: 1, proposal_commitment: Some(Hash(Felt::from_hex_str("0x123").unwrap())), voter: Address(Felt::from_hex_str("0x456").unwrap()), + signature: Default::default(), }, Vote { vote_type: VoteType::Precommit, @@ -503,6 +513,7 @@ mod tests { round: 1, proposal_commitment: Some(Hash(Felt::from_hex_str("0x789").unwrap())), voter: Address(Felt::from_hex_str("0xabc").unwrap()), + signature: Default::default(), }, Vote { vote_type: VoteType::Prevote, @@ -510,6 +521,7 @@ mod tests { round: 2, proposal_commitment: None, // NIL vote voter: Address(Felt::from_hex_str("0xdef").unwrap()), + signature: Default::default(), }, ]; let mut rxs = Vec::new(); @@ -584,13 +596,8 @@ mod tests { stream.push(ProposalPart::Init(ProposalInit { height, round, - proposer: p2p_proto::common::Address(Felt::from_hex_str("0x123").unwrap()), valid_round: None, - })); - - // BlockInfo - stream.push(ProposalPart::BlockInfo(BlockInfo { - height, + proposer: p2p_proto::common::Address(Felt::from_hex_str("0x123").unwrap()), timestamp: 1234567890 + base, builder: p2p_proto::common::Address(Felt::from_hex_str("0x456").unwrap()), l1_da_mode: p2p_proto::common::L1DataAvailabilityMode::Calldata, @@ -599,6 +606,8 @@ mod tests { l1_data_gas_price_fri: 3000 + base as u128, l1_gas_price_wei: 4000 + base as u128, l1_data_gas_price_wei: 5000 + base as u128, + starknet_version: "".to_string(), + version_constant_commitment: Default::default(), })); // TransactionBatch (send a few) @@ -621,6 +630,8 @@ mod tests { // ProposalFin stream.push(ProposalPart::Fin(ProposalFin { proposal_commitment: p2p_proto::common::Hash(Felt::from_hex_str("0x69420abc").unwrap()), + executed_transaction_count: 3, + fin_payload: None, })); ((height, round), stream) diff --git a/crates/p2p/src/consensus/stream.rs b/crates/p2p/src/consensus/stream.rs index b7c9db896f..355bb2e7cf 100644 --- a/crates/p2p/src/consensus/stream.rs +++ b/crates/p2p/src/consensus/stream.rs @@ -140,8 +140,11 @@ mod tests { #[test] fn test_encode_decode() { // Create a sample ProposalPart - let block_info = p2p_proto::consensus::BlockInfo { + let proposal_init = p2p_proto::consensus::ProposalInit { height: 100, + round: 2, + valid_round: Some(1), + proposer: Address(Felt::from_hex_str("0x123").unwrap()), timestamp: 1234567890, builder: Address(Felt::from_hex_str("0x456").unwrap()), l1_da_mode: L1DataAvailabilityMode::Calldata, @@ -150,8 +153,10 @@ mod tests { l1_data_gas_price_fri: 3000, l1_gas_price_wei: 4000, l1_data_gas_price_wei: 5000, + starknet_version: "".to_string(), + version_constant_commitment: Default::default(), }; - let proposal = p2p_proto::consensus::ProposalPart::BlockInfo(block_info); + let proposal = p2p_proto::consensus::ProposalPart::Init(proposal_init); // Create a StreamMessage with the ProposalPart let stream_message = StreamMessage { diff --git a/crates/p2p_proto/proto/consensus/consensus.proto b/crates/p2p_proto/proto/consensus/consensus.proto index ba280e3dcc..8c578c53a4 100644 --- a/crates/p2p_proto/proto/consensus/consensus.proto +++ b/crates/p2p_proto/proto/consensus/consensus.proto @@ -33,6 +33,7 @@ message Vote { // This is optional since a vote can be NIL. optional starknet.common.Hash proposal_commitment = 5; starknet.common.Address voter = 6; + starknet.common.Hashes signature = 7; } message StreamMessage { @@ -45,49 +46,62 @@ message StreamMessage { } message ProposalInit { - uint64 height = 1; - uint32 round = 2; - optional uint32 valid_round = 3; - starknet.common.Address proposer = 4; -} - -message BlockInfo { uint64 height = 1; - uint64 timestamp = 2; - starknet.common.Address builder = 3; - starknet.common.L1DataAvailabilityMode l1_da_mode = 4; - starknet.common.Uint128 l2_gas_price_fri = 5; - starknet.common.Uint128 l1_gas_price_fri = 6; - starknet.common.Uint128 l1_data_gas_price_fri = 7; - starknet.common.Uint128 l1_gas_price_wei = 8; - starknet.common.Uint128 l1_data_gas_price_wei = 9; + uint32 round = 2; + optional uint32 valid_round = 3; + starknet.common.Address proposer = 4; + uint64 timestamp = 5; + starknet.common.Address builder = 6; + starknet.common.L1DataAvailabilityMode l1_da_mode = 7; + starknet.common.Uint128 l2_gas_price_fri = 8; + starknet.common.Uint128 l1_gas_price_fri = 9; + starknet.common.Uint128 l1_data_gas_price_fri = 10; + starknet.common.Uint128 l1_gas_price_wei = 11; + starknet.common.Uint128 l1_data_gas_price_wei = 12; + string starknet_version = 13; + starknet.common.Hash version_constant_commitment = 14; } message TransactionBatch { repeated ConsensusTransaction transactions = 1; } +message CommitmentParts { + starknet.common.Felt252 concatenated_counts = 1; + optional starknet.common.Hash parent_commitment = 2; + starknet.common.Hash transaction_commitment = 3; + starknet.common.Hash event_commitment = 4; + starknet.common.Hash receipt_commitment = 5; +} + +// L2 gas info for the block (next price and gas used). +message L2GasInfo { + starknet.common.Uint128 next_l2_gas_price_fri = 1; + uint64 l2_gas_used = 2; +} + +// Optional payload carried in ProposalFin: commitment parts and L2 gas. +message ProposalFinPayload { + CommitmentParts commitment_parts = 1; + optional L2GasInfo l2_gas_info = 2; +} + message ProposalFin { // Identifies a Starknet block based on the content streamed in the proposal. starknet.common.Hash proposal_commitment = 1; + // Number of executed transactions in the proposal. + uint64 executed_transaction_count = 2; + optional ProposalFinPayload fin_payload = 3; } // Network format: -// 1. First message is ProposalInit -// 2. Last message is ProposalFin -// -// Empty block - no other messages sent. -// -// Block with transactions: -// 3. block_info is sent once -// 4. transactions is sent repeatedly -// 5. executed_transaction_count is sent once +// 1. First message is ProposalInit (init, includes all block metadata) +// 2. transactions is sent repeatedly (for non-empty blocks) +// 3. Last message is ProposalFin message ProposalPart { oneof message { - ProposalInit init = 1; - ProposalFin fin = 2; - BlockInfo block_info = 3; - TransactionBatch transactions = 4; - uint64 executed_transaction_count = 5; + ProposalInit init = 1; + ProposalFin fin = 2; + TransactionBatch transactions = 3; } } \ No newline at end of file diff --git a/crates/p2p_proto/src/common.rs b/crates/p2p_proto/src/common.rs index ec35d2f954..0889f07013 100644 --- a/crates/p2p_proto/src/common.rs +++ b/crates/p2p_proto/src/common.rs @@ -44,7 +44,9 @@ impl Dummy for Hash256 { } } -#[derive(Debug, Clone, PartialEq, Eq, ToProtobuf, TryFromProtobuf, Dummy)] +#[derive( + Debug, Default, Clone, PartialEq, Eq, PartialOrd, Ord, ToProtobuf, TryFromProtobuf, Dummy, +)] #[protobuf(name = "crate::proto::common::Hashes")] pub struct Hashes { pub items: Vec, diff --git a/crates/p2p_proto/src/consensus.rs b/crates/p2p_proto/src/consensus.rs index a020734f5e..c7325ee231 100644 --- a/crates/p2p_proto/src/consensus.rs +++ b/crates/p2p_proto/src/consensus.rs @@ -1,8 +1,9 @@ -use fake::{Dummy, Fake as _}; +use fake::Dummy; +use pathfinder_crypto::Felt; use prost::Message; use proto::consensus::consensus as consensus_proto; -use crate::common::{Address, Hash, L1DataAvailabilityMode}; +use crate::common::{Address, Hash, Hashes, L1DataAvailabilityMode}; use crate::transaction::{DeclareV3WithClass, DeployAccountV3, InvokeV3WithProof, L1HandlerV0}; use crate::{proto, proto_field, ProtobufSerializable, ToProtobuf, TryFromProtobuf}; @@ -39,6 +40,7 @@ pub struct Vote { #[optional] pub proposal_commitment: Option, pub voter: Address, + pub signature: Hashes, } impl ProtobufSerializable for Vote { @@ -57,9 +59,7 @@ impl ProtobufSerializable for Vote { pub enum ProposalPart { Init(ProposalInit), Fin(ProposalFin), - BlockInfo(BlockInfo), TransactionBatch(Vec), - ExecutedTransactionCount(u64), } #[derive(Debug, Clone, PartialEq, Eq, ToProtobuf, TryFromProtobuf, Dummy)] @@ -70,43 +70,50 @@ pub struct ProposalInit { #[optional] pub valid_round: Option, pub proposer: Address, -} - -#[derive(Debug, Clone, PartialEq, Eq, ToProtobuf, TryFromProtobuf, Dummy)] -#[protobuf(name = "consensus_proto::ProposalFin")] -pub struct ProposalFin { - pub proposal_commitment: Hash, -} - -#[derive(Debug, Clone, PartialEq, Eq, ToProtobuf, TryFromProtobuf)] -#[protobuf(name = "consensus_proto::BlockInfo")] -pub struct BlockInfo { - pub height: u64, - pub builder: Address, pub timestamp: u64, + pub builder: Address, + pub l1_da_mode: L1DataAvailabilityMode, pub l2_gas_price_fri: u128, pub l1_gas_price_fri: u128, pub l1_data_gas_price_fri: u128, pub l1_gas_price_wei: u128, pub l1_data_gas_price_wei: u128, - pub l1_da_mode: L1DataAvailabilityMode, + pub starknet_version: String, + pub version_constant_commitment: Hash, } -impl Dummy for BlockInfo { - fn dummy_with_rng(_: &T, rng: &mut R) -> Self { - Self { - height: rng.gen_range(0..i64::MAX) as u64, - builder: fake::Faker.fake_with_rng(rng), - timestamp: rng.gen_range(0..i64::MAX) as u64, - // Keep the prices low enough to avoid overflow when converting between fri and wei - l2_gas_price_fri: rng.gen_range(1..i64::MAX) as u128, - l1_gas_price_fri: rng.gen_range(1..i64::MAX) as u128, - l1_data_gas_price_fri: rng.gen_range(1..i64::MAX) as u128, - l1_gas_price_wei: rng.gen_range(1..i64::MAX) as u128, - l1_data_gas_price_wei: rng.gen_range(1..i64::MAX) as u128, - l1_da_mode: fake::Faker.fake_with_rng(rng), - } - } +#[derive(Debug, Clone, PartialEq, Eq, ToProtobuf, TryFromProtobuf, Dummy)] +#[protobuf(name = "consensus_proto::CommitmentParts")] +pub struct CommitmentParts { + pub concatenated_counts: Felt, + #[optional] + pub parent_commitment: Option, + pub transaction_commitment: Hash, + pub event_commitment: Hash, + pub receipt_commitment: Hash, +} + +#[derive(Debug, Clone, PartialEq, Eq, ToProtobuf, TryFromProtobuf, Dummy)] +#[protobuf(name = "consensus_proto::L2GasInfo")] +pub struct L2GasInfo { + pub next_l2_gas_price_fri: u128, + pub l2_gas_used: u64, +} + +#[derive(Debug, Clone, PartialEq, Eq, ToProtobuf, TryFromProtobuf, Dummy)] +#[protobuf(name = "consensus_proto::ProposalFinPayload")] +pub struct ProposalFinPayload { + pub commitment_parts: CommitmentParts, + pub l2_gas_info: L2GasInfo, +} + +#[derive(Debug, Clone, PartialEq, Eq, ToProtobuf, TryFromProtobuf, Dummy)] +#[protobuf(name = "consensus_proto::ProposalFin")] +pub struct ProposalFin { + pub proposal_commitment: Hash, + pub executed_transaction_count: u64, + #[optional] + pub fin_payload: Option, } #[derive(Debug, Clone, PartialEq, Eq, ToProtobuf, TryFromProtobuf, Dummy)] @@ -159,17 +166,10 @@ pub enum StreamMessageVariant { impl ToProtobuf for ProposalPart { fn to_protobuf(self) -> consensus_proto::ProposalPart { - use consensus_proto::proposal_part::Message::{ - BlockInfo, - ExecutedTransactionCount, - Fin, - Init, - Transactions, - }; + use consensus_proto::proposal_part::Message::{Fin, Init, Transactions}; let msg = match self { ProposalPart::Init(proposal_init) => Init(proposal_init.to_protobuf()), ProposalPart::Fin(proposal_fin) => Fin(proposal_fin.to_protobuf()), - ProposalPart::BlockInfo(block_info) => BlockInfo(block_info.to_protobuf()), ProposalPart::TransactionBatch(transactions) => { Transactions(consensus_proto::TransactionBatch { transactions: transactions @@ -178,9 +178,6 @@ impl ToProtobuf for ProposalPart { .collect(), }) } - ProposalPart::ExecutedTransactionCount(count) => { - ExecutedTransactionCount(count.to_protobuf()) - } }; consensus_proto::ProposalPart { message: Some(msg) } } @@ -191,29 +188,16 @@ impl TryFromProtobuf for ProposalPart { input: consensus_proto::ProposalPart, field_name: &'static str, ) -> Result { - use consensus_proto::proposal_part::Message::{ - BlockInfo, - ExecutedTransactionCount, - Fin, - Init, - Transactions, - }; + use consensus_proto::proposal_part::Message::{Fin, Init, Transactions}; match proto_field(input.message, field_name)? { Init(init) => TryFromProtobuf::try_from_protobuf(init, field_name).map(Self::Init), Fin(fin) => TryFromProtobuf::try_from_protobuf(fin, field_name).map(Self::Fin), - BlockInfo(bi) => { - TryFromProtobuf::try_from_protobuf(bi, field_name).map(Self::BlockInfo) - } Transactions(transactions) => transactions .transactions .into_iter() .map(|txn| TryFromProtobuf::try_from_protobuf(txn, field_name)) .collect::, _>>() .map(Self::TransactionBatch), - ExecutedTransactionCount(count) => { - TryFromProtobuf::try_from_protobuf(count, field_name) - .map(Self::ExecutedTransactionCount) - } } } } @@ -262,17 +246,9 @@ impl ProposalPart { matches!(self, Self::Fin(_)) } - pub fn is_block_info(&self) -> bool { - matches!(self, Self::BlockInfo(_)) - } - pub fn is_transaction_batch(&self) -> bool { matches!(self, Self::TransactionBatch(_)) } - - pub fn is_executed_transaction_count(&self) -> bool { - matches!(self, Self::ExecutedTransactionCount(_)) - } } impl ToProtobuf for TransactionVariant { diff --git a/crates/pathfinder/src/config/integration_testing.rs b/crates/pathfinder/src/config/integration_testing.rs index 0e6c934fce..713c583cd7 100644 --- a/crates/pathfinder/src/config/integration_testing.rs +++ b/crates/pathfinder/src/config/integration_testing.rs @@ -17,9 +17,7 @@ pub use enabled::*; pub enum InjectFailureTrigger { ProposalInitRx, ProposalFinRx, - BlockInfoRx, TransactionBatchRx, - ExecutedTransactionCountRx, ProposalFinalized, PrevoteRx, PrecommitRx, @@ -34,9 +32,7 @@ impl InjectFailureTrigger { match self { InjectFailureTrigger::ProposalInitRx => "proposal_init_rx", InjectFailureTrigger::ProposalFinRx => "proposal_fin_rx", - InjectFailureTrigger::BlockInfoRx => "block_info_rx", InjectFailureTrigger::TransactionBatchRx => "txn_batch_rx", - InjectFailureTrigger::ExecutedTransactionCountRx => "executed_txn_count_rx", InjectFailureTrigger::ProposalFinalized => "proposal_finalized", InjectFailureTrigger::PrevoteRx => "prevote_rx", InjectFailureTrigger::PrecommitRx => "precommit_rx", @@ -55,9 +51,7 @@ impl FromStr for InjectFailureTrigger { match s { "proposal_init_rx" => Ok(InjectFailureTrigger::ProposalInitRx), "proposal_fin_rx" => Ok(InjectFailureTrigger::ProposalFinRx), - "block_info_rx" => Ok(InjectFailureTrigger::BlockInfoRx), "txn_batch_rx" => Ok(InjectFailureTrigger::TransactionBatchRx), - "executed_txn_count_rx" => Ok(InjectFailureTrigger::ExecutedTransactionCountRx), "proposal_finalized" => Ok(InjectFailureTrigger::ProposalFinalized), "prevote_rx" => Ok(InjectFailureTrigger::PrevoteRx), "precommit_rx" => Ok(InjectFailureTrigger::PrecommitRx), diff --git a/crates/pathfinder/src/consensus/inner/batch_execution.rs b/crates/pathfinder/src/consensus/inner/batch_execution.rs index 8d4524ec8b..09c4ca457f 100644 --- a/crates/pathfinder/src/consensus/inner/batch_execution.rs +++ b/crates/pathfinder/src/consensus/inner/batch_execution.rs @@ -1,9 +1,9 @@ -//! Batch execution manager with rollback support for ExecutedTransactionCount +//! Batch execution manager with rollback support for executed transaction count //! -//! This module provides functionality to handle optimistic execution of -//! transaction batches with the ability to rollback when -//! ExecutedTransactionCount indicates fewer transactions were actually executed -//! by the proposer. +//! This module provides functionality to handle optimistic execution +//! of transaction batches with the ability to rollback when the +//! executed transaction count indicates fewer transactions were +//! actually executed by the proposer. use std::collections::{HashMap, HashSet}; @@ -22,17 +22,13 @@ use pathfinder_validator::{ ValidatorWorkerPool, }; -/// Manages batch execution with rollback support for ExecutedTransactionCount +/// Manages batch execution with rollback support for executed transaction count #[derive(Clone)] pub struct BatchExecutionManager { /// Tracks which proposals (height/round) have started execution. /// An entry exists here if at least one batch has been executed (not /// deferred). executing: HashSet, - /// Tracks which proposals (height/round) have had ExecutedTransactionCount - /// processed. An entry exists here if ExecutedTransactionCount has been - /// successfully processed for this height/round. - executed_transaction_count_processed: HashSet, /// Gas price provider for block info validation. gas_price_provider: Option, l2_gas_price_provider: Option, @@ -53,7 +49,6 @@ impl BatchExecutionManager { ) -> Self { Self { executing: HashSet::new(), - executed_transaction_count_processed: HashSet::new(), gas_price_provider, l2_gas_price_provider, worker_pool, @@ -66,35 +61,11 @@ impl BatchExecutionManager { /// /// Returns `true` if at least one batch has been executed (not deferred) /// for this height/round. + #[cfg(test)] pub fn is_executing(&self, height_and_round: &HeightAndRound) -> bool { self.executing.contains(height_and_round) } - /// Check if ExecutedTransactionCount has been processed for the given - /// height and round - /// - /// Returns `true` if ExecutedTransactionCount has been successfully - /// processed for this height/round. - pub fn is_executed_transaction_count_processed( - &self, - height_and_round: &HeightAndRound, - ) -> bool { - self.executed_transaction_count_processed - .contains(height_and_round) - } - - /// Check if ProposalFin should be deferred for the given height and round - /// - /// ProposalFin should be deferred if execution has started but - /// ExecutedTransactionCount hasn't been processed yet. This ensures that we - /// don't finalize a proposal before we know the final transaction count. - /// - /// Note: This is in its own method to prevent drift with tests. - pub fn should_defer_proposal_fin(&self, height_and_round: &HeightAndRound) -> bool { - self.is_executing(height_and_round) - && !self.is_executed_transaction_count_processed(height_and_round) - } - /// Process a transaction batch with deferral support /// /// This is the main method that should be used by the P2P task @@ -125,13 +96,6 @@ impl BatchExecutionManager { "🖧 ⚙️ transaction batch execution for height and round {height_and_round} is \ deferred" ); - assert!( - deferred_executions - .get(&height_and_round) - .is_some_and(|dex| dex.block_info.is_some()), - "If TransactionBatch execution is deferred, a BlockInfo must have been added too \ - (because it arrives first according to message order)" - ); // Defer execution - add to deferred_executions deferred_executions @@ -152,7 +116,6 @@ impl BatchExecutionManager { let mut all_transactions = transactions; let mut validator = if let Some(DeferredExecution { transactions: mut deferred_txns, - block_info: deferred_block_info, .. }) = deferred { @@ -160,24 +123,18 @@ impl BatchExecutionManager { // Prepend them to the new transactions. deferred_txns.extend(all_transactions); all_transactions = deferred_txns; - if let Some(block_info) = deferred_block_info { - validator_stage - .try_into_block_info_stage() - .map_err(|e| ProposalHandlingError::Recoverable(e.into()))? - .validate_block_info( - block_info, + match validator_stage { + ValidatorStage::BlockInfo(stage) => { + stage.validate_block_info( main_db.clone(), decided_blocks, self.gas_price_provider.clone(), None, // TODO: Add L1ToFriValidator when oracle is available self.l2_gas_price_provider.as_ref(), self.worker_pool.clone(), - ) - .map(Box::new)? - } else { - validator_stage - .try_into_transaction_batch_stage() - .map_err(|e| ProposalHandlingError::Recoverable(e.into()))? + )? + } + ValidatorStage::TransactionBatch(stage) => stage, } } else { validator_stage @@ -200,18 +157,19 @@ impl BatchExecutionManager { additionally {deferred_txns_len} previously deferred transactions were executed", ); - // If ExecutedTransactionCount was deferred (arrived before execution started, - // e.g., because batches were deferred), process it now that execution - // has started. - // Assuming message ordering is guaranteed... - // (see p2p::consensus::handle_incoming_proposal_message) - // ...if ExecutedTransactionCount is deferred, all batches are also in the - // deferred entry, so we can safely process ExecutedTransactionCount - // here. + // If executed transaction count was deferred (execution could + // not start before ProposalFin arrived, because the parent + // block wasn't finished yet), process it now that execution + // has started. Assuming message ordering is guaranteed... + // (see p2p::consensus::handle_incoming_proposal_message) + // ...if executed transaction count is set (by ProposalFin + // processing), all transaction batches are also in the + // deferred entry, so we can safely process executed + // transaction count here. if let Some(executed_txn_count) = deferred_executed_transaction_count { tracing::debug!( - "Processing deferred ExecutedTransactionCount for {height_and_round} after batch \ - execution started" + "Processing deferred executed transaction count for {height_and_round} after \ + batch execution started" ); self.process_executed_transaction_count::( height_and_round, @@ -235,9 +193,8 @@ impl BatchExecutionManager { transactions: Vec, validator: &mut ValidatorTransactionBatchStage, ) -> Result<(), ProposalHandlingError> { - // Mark that execution has started for this height/round, even if batch is - // empty. This is necessary because ExecutedTransactionCount may arrive later - // and requires execution to have started. + // Mark that execution has started for this height/round, even + // if batch is empty. self.executing.insert(height_and_round); if transactions.is_empty() { @@ -262,9 +219,8 @@ impl BatchExecutionManager { Ok(()) } - /// Process ExecutedTransactionCount message + /// Processes executed transaction count immediately with rollback support. /// - /// Processes ExecutedTransactionCount immediately with rollback support. /// Assumes execution has already started (at least one batch executed). /// If transactions are deferred, deferral should be handled by the /// caller before calling this function. @@ -279,7 +235,7 @@ impl BatchExecutionManager { if !self.executing.contains(&height_and_round) { return Err(ProposalHandlingError::Fatal(anyhow::anyhow!( "No execution state found for {height_and_round}. Execution should have started \ - before processing ExecutedTransactionCount." + before processing executed transaction count." ))); } @@ -287,8 +243,10 @@ impl BatchExecutionManager { let current_transaction_count = validator.transaction_count(); tracing::debug!( - "Processing ExecutedTransactionCount for {height_and_round}: \ - target={target_transaction_count}, current={current_transaction_count}" + height_and_round = ?height_and_round, + target = target_transaction_count, + current = current_transaction_count, + "Processing executed transaction count" ); if target_transaction_count < current_transaction_count { @@ -301,11 +259,11 @@ impl BatchExecutionManager { } else if target_transaction_count > current_transaction_count { // This shouldn't happen with proper message ordering and no protocol errors. // Ordering is guaranteed by p2p::consensus::handle_incoming_proposal_message. - // ExecutedTransactionCount should arrive after all TransactionBatches, so we - // should have at least as many transactions as - // ExecutedTransactionCount indicates. + // ProposalFin should arrive after all TransactionBatches, so we + // should have at least as many transactions as its + // executed transaction count indicates. tracing::warn!( - "ExecutedTransactionCount for {height_and_round} indicates {} transactions, but \ + "Executed transaction count for {height_and_round} indicates {} transactions, but \ we only have {} transactions. This may indicate a protocol violation or missing \ batches.", target_transaction_count, @@ -318,33 +276,25 @@ impl BatchExecutionManager { "Finalized {height_and_round} with {final_transaction_count} executed transactions" ); - // Mark ExecutedTransactionCount as processed for this height/round - self.executed_transaction_count_processed - .insert(height_and_round); - Ok(()) } /// Clean up completed executions pub fn cleanup(&mut self, height_and_round: &HeightAndRound) { let had_execution = self.executing.remove(height_and_round); - let had_transactions_fin = self - .executed_transaction_count_processed - .remove(height_and_round); - if had_execution || had_transactions_fin { + if had_execution { tracing::debug!("Cleaned up execution state for {height_and_round}"); } } } -/// Represents transactions received from the network that are waiting for -/// previous block to be committed before they can be executed. Also holds -/// optional proposal commitment and proposer address in case that the entire -/// proposal has been received. May also store ExecutedTransactionCount if it -/// arrives while transactions are deferred. +/// Represents transactions received from the network that are waiting +/// for previous block to be committed before they can be +/// executed. Also holds optional proposal commitment and executed +/// transaction count in case ProposalFin arrives while transactions +/// are deferred. #[derive(Debug, Clone, Default)] pub struct DeferredExecution { - pub block_info: Option, pub transactions: Vec, pub commitment: Option, pub executed_transaction_count: Option, @@ -418,20 +368,12 @@ mod tests { Ok(()) } - fn create_test_proposal( - height: u64, - ) -> ( - p2p_proto::consensus::ProposalInit, - p2p_proto::consensus::BlockInfo, - ) { - let init = p2p_proto::consensus::ProposalInit { + fn create_test_proposal(height: u64) -> p2p_proto::consensus::ProposalInit { + p2p_proto::consensus::ProposalInit { height, round: 1, valid_round: None, proposer: p2p_proto::common::Address::default(), - }; - let block_info = p2p_proto::consensus::BlockInfo { - height, timestamp: 1000, builder: p2p_proto::common::Address::default(), l1_da_mode: p2p_proto::common::L1DataAvailabilityMode::Calldata, @@ -440,29 +382,23 @@ mod tests { l1_data_gas_price_wei: 0, l1_gas_price_fri: 0, l1_data_gas_price_fri: 0, - }; - (init, block_info) + starknet_version: "".to_string(), + version_constant_commitment: Default::default(), + } } - /// Test that BatchExecutionManager correctly tracks execution state and - /// ExecutedTransactionCount processing. This verifies the tracking methods - /// that are used by defer_or_execute_proposal_fin to determine whether - /// ProposalFin should be deferred. + /// Test that BatchExecutionManager correctly tracks execution + /// state. #[tokio::test] async fn test_execution_state_tracking() { let storage = StorageBuilder::in_tempdir().expect("Failed to create temp database"); let chain_id = ChainId::SEPOLIA_TESTNET; let worker_pool = create_test_worker_pool(); - let (proposal_init, block_info) = create_test_proposal(1); + let proposal_init = create_test_proposal(1); let mut validator_stage = ValidatorBlockInfoStage::new(chain_id, proposal_init) .and_then(|v| { - v.skip_validation( - block_info, - storage, - Arc::clone(&worker_pool), - DecidedBlocks::default(), - ) + v.skip_validation(storage, Arc::clone(&worker_pool), DecidedBlocks::default()) }) .expect("Failed to create validator stage"); let mut batch_execution_manager = BatchExecutionManager::new( @@ -479,10 +415,6 @@ mod tests { !batch_execution_manager.is_executing(&height_and_round), "Execution should not have started initially" ); - assert!( - !batch_execution_manager.is_executed_transaction_count_processed(&height_and_round), - "ExecutedTransactionCount should not be processed initially" - ); // Execute a batch to start execution let transactions = create_transaction_batch(0, 0, 5, chain_id); @@ -500,20 +432,7 @@ mod tests { "Execution should have started after execute_batch" ); - // Verify ExecutedTransactionCount has NOT been processed yet - assert!( - !batch_execution_manager.is_executed_transaction_count_processed(&height_and_round), - "ExecutedTransactionCount should not be processed yet" - ); - - // Verify that ProposalFin should be deferred - assert!( - batch_execution_manager.should_defer_proposal_fin(&height_and_round), - "ProposalFin should be deferred when execution started but ExecutedTransactionCount \ - not processed" - ); - - // Now process ExecutedTransactionCount + // Now process executed transaction count let executed_transaction_count = 5; batch_execution_manager .process_executed_transaction_count::( @@ -521,24 +440,9 @@ mod tests { executed_transaction_count, &mut validator_stage, ) - .expect("Failed to process ExecutedTransactionCount"); - - // Verify ExecutedTransactionCount is now marked as processed - assert!( - batch_execution_manager.is_executed_transaction_count_processed(&height_and_round), - "ExecutedTransactionCount should be marked as processed after process_transactions_fin" - ); - - // Now ProposalFin should NOT be deferred - assert!( - !batch_execution_manager.should_defer_proposal_fin(&height_and_round), - "ProposalFin should NOT be deferred after ExecutedTransactionCount is processed" - ); + .expect("Failed to process executed transaction count"); } - /// Test that ExecutedTransactionCount arriving before any TransactionBatch - /// is handled gracefully. ExecutedTransactionCount should be stored in - /// deferred entry even if no batches have been deferred yet. #[tokio::test] async fn test_executed_transaction_count_before_any_batch() { let storage = StorageBuilder::in_tempdir().expect("Failed to create temp database"); @@ -568,9 +472,6 @@ mod tests { round: height_and_round.round(), valid_round: None, proposer: proposer_address, - }; - let proposal_block_info = proto_consensus::BlockInfo { - height: height_and_round.height(), timestamp: 2000, builder: proposer_address, l1_da_mode: p2p_proto::common::L1DataAvailabilityMode::Calldata, @@ -579,6 +480,8 @@ mod tests { l1_data_gas_price_fri: 0, l1_gas_price_wei: 0, l1_data_gas_price_wei: 0, + starknet_version: "".to_string(), + version_constant_commitment: Default::default(), }; let worker_pool = create_test_worker_pool(); @@ -603,35 +506,24 @@ mod tests { "No deferred entry should exist initially" ); - // Step 1: ExecutedTransactionCount arrives when execution hasn't started yet - // (Note: With P2P message ordering guarantees, ExecutedTransactionCount will - // always arrive after all TransactionBatches, but execution may not have - // started if batches were deferred. This test simulates the case where - // ExecutedTransactionCount arrives before execution starts, e.g., because - // batches were deferred). + // Step 1: executed transaction count arrives when execution + // hasn't started yet (Note: With P2P message ordering + // guarantees, ProposalFin will always arrive after all + // TransactionBatches, but execution would not have started if + // batches were deferred.) let executed_transaction_count = 5; - // Simulate the fix: create deferred entry and store ExecutedTransactionCount. - // Also store BlockInfo because it should have arrived before any - // batches or ExecutedTransactionCount (according to message ordering). + // Simulate the fix: create deferred entry and store executed + // transaction count. let deferred = deferred_executions.entry(height_and_round).or_default(); - deferred.block_info = Some(proposal_block_info); deferred.executed_transaction_count = Some(executed_transaction_count); - - // Verify BlockInfo was stored - assert!( - deferred_executions - .get(&height_and_round) - .is_some_and(|d| d.block_info.is_some()), - "BlockInfo should be stored in deferred entry" - ); - // Verify ExecutedTransactionCount was stored + // Verify executed transaction count was stored assert!( deferred_executions .get(&height_and_round) .and_then(|d| d.executed_transaction_count.as_ref()) .is_some(), - "ExecutedTransactionCount should be stored in deferred entry" + "Executed transaction count should be stored in deferred entry" ); let validator_stage = ValidatorBlockInfoStage::new(chain_id, proposal_init) @@ -657,19 +549,13 @@ mod tests { "Execution should have started after batch execution" ); - // Verify ExecutedTransactionCount was processed (marked as processed) - assert!( - batch_execution_manager.is_executed_transaction_count_processed(&height_and_round), - "ExecutedTransactionCount should be processed after batch execution" - ); - - // Verify validator state matches ExecutedTransactionCount count + // Verify validator state matches executed transaction count assert!( matches!( next_stage, ValidatorStage::TransactionBatch(stage) if stage.transaction_count() == 5 ), - "Validator should have 5 transactions matching ExecutedTransactionCount" + "Validator should have 5 transactions matching executed transaction count" ); } @@ -687,7 +573,7 @@ mod tests { let height_and_round = HeightAndRound::new(2, 1); let proposer_address = ContractAddress::new_or_panic(Felt::from_hex_str("0x456").unwrap()); - let (proposal_init, proposal_block_info) = create_test_proposal_init( + let proposal_init = create_test_proposal_init( chain_id, height_and_round.height(), height_and_round.round(), @@ -707,10 +593,7 @@ mod tests { let mut deferred_executions: std::collections::HashMap = std::collections::HashMap::new(); - deferred_executions - .entry(height_and_round) - .or_default() - .block_info = Some(proposal_block_info); + deferred_executions.insert(height_and_round, DeferredExecution::default()); // Test 1: Deferral when parent not committed let next_stage = { @@ -790,17 +673,15 @@ mod tests { // with the blockifier's ConcurrentTransactionExecutor and shared worker pools. let worker_pool_2 = create_test_worker_pool(); let height_and_round_2 = HeightAndRound::new(3, 1); - let (proposal_init, block_info) = create_test_proposal(height_and_round_2.height()); + let proposal_init = create_test_proposal(height_and_round_2.height()); let validator_stage_2 = ValidatorBlockInfoStage::new(chain_id, proposal_init) .and_then(|validator| { validator.skip_validation( - block_info, storage.clone(), worker_pool_2.clone(), DecidedBlocks::default(), ) }) - .map(Box::new) .map(ValidatorStage::TransactionBatch) .expect("Failed to create validator stage"); @@ -834,22 +715,17 @@ mod tests { } } - /// Test ExecutedTransactionCount processing with rollback support. + /// Test executed transaction count processing with rollback support. #[tokio::test] async fn test_executed_transaction_count_rollback() { let storage = StorageBuilder::in_tempdir().expect("Failed to create temp database"); let chain_id = ChainId::SEPOLIA_TESTNET; let worker_pool = create_test_worker_pool(); - let (proposal_init, block_info) = create_test_proposal(1); + let proposal_init = create_test_proposal(1); let mut validator_stage = ValidatorBlockInfoStage::new(chain_id, proposal_init) .and_then(|v| { - v.skip_validation( - block_info, - storage, - Arc::clone(&worker_pool), - DecidedBlocks::default(), - ) + v.skip_validation(storage, Arc::clone(&worker_pool), DecidedBlocks::default()) }) .expect("Failed to create validator stage"); @@ -880,11 +756,11 @@ mod tests { assert_eq!( validator_stage.transaction_count(), 14, - "Should have 14 transactions before ExecutedTransactionCount" + "Should have 14 transactions total" ); - // Test 1: Normal case - no rollback (ExecutedTransactionCount matches current - // count) + // Test 1: Normal case - no rollback (executed transaction + // count matches current count) { let executed_transaction_count = 14; @@ -894,12 +770,8 @@ mod tests { executed_transaction_count, &mut validator_stage, ) - .expect("Failed to process ExecutedTransactionCount"); + .expect("Failed to process executed transaction count"); - assert!( - batch_execution_manager.is_executed_transaction_count_processed(&height_and_round), - "ExecutedTransactionCount should be marked as processed" - ); assert_eq!( validator_stage.transaction_count(), 14, @@ -907,22 +779,18 @@ mod tests { ); } - // Test 2: Rollback case - ExecutedTransactionCount indicates fewer transactions - // Create a new worker pool for the second validator to avoid issues with - // blockifier's ConcurrentTransactionExecutor and shared worker pools. + // Test 2: Rollback case - executed transaction count + // indicates fewer transactions. Create a new worker pool for + // the second validator to avoid issues with blockifier's + // ConcurrentTransactionExecutor and shared worker pools. let worker_pool_2 = create_test_worker_pool(); // Re-execute batches to get back to 14 transactions let storage_2 = StorageBuilder::in_tempdir().expect("Failed to create temp database"); - let (proposal_init, block_info) = create_test_proposal(1); + let proposal_init = create_test_proposal(1); let mut validator_stage_2 = ValidatorBlockInfoStage::new(chain_id, proposal_init) .and_then(|validator| { - validator.skip_validation( - block_info, - storage_2, - worker_pool_2, - DecidedBlocks::default(), - ) + validator.skip_validation(storage_2, worker_pool_2, DecidedBlocks::default()) }) .expect("Failed to create validator stage"); @@ -961,16 +829,12 @@ mod tests { executed_transaction_count, &mut validator_stage_2, ) - .expect("Failed to process ExecutedTransactionCount with rollback"); + .expect("Failed to process executed transaction count with rollback"); - assert!( - batch_execution_manager.is_executed_transaction_count_processed(&height_and_round_2), - "ExecutedTransactionCount should be marked as processed after rollback" - ); assert_eq!( validator_stage_2.transaction_count(), 7, - "Transaction count should be rolled back to 7 (matching ExecutedTransactionCount)" + "Transaction count should be rolled back to 7 (matching executed transaction count)" ); } @@ -980,16 +844,11 @@ mod tests { let storage = StorageBuilder::in_tempdir().expect("Failed to create temp database"); let chain_id = ChainId::SEPOLIA_TESTNET; let worker_pool = create_test_worker_pool(); - let (proposal_init, block_info) = create_test_proposal(1); + let proposal_init = create_test_proposal(1); let mut validator_stage = ValidatorBlockInfoStage::new(chain_id, proposal_init) .and_then(|v| { - v.skip_validation( - block_info, - storage, - Arc::clone(&worker_pool), - DecidedBlocks::default(), - ) + v.skip_validation(storage, Arc::clone(&worker_pool), DecidedBlocks::default()) }) .expect("Failed to create validator stage"); @@ -1017,7 +876,7 @@ mod tests { "No transactions should be executed" ); - // ExecutedTransactionCount can be processed after empty batch + // executed transaction count can be processed after empty batch let executed_transaction_count = 0; batch_execution_manager @@ -1026,32 +885,23 @@ mod tests { executed_transaction_count, &mut validator_stage, ) - .expect("Failed to process ExecutedTransactionCount after empty batch"); - - assert!( - batch_execution_manager.is_executed_transaction_count_processed(&height_and_round), - "ExecutedTransactionCount should be processed after empty batch" - ); + .expect("Failed to process executed transaction count after empty batch"); } - /// Test that ExecutedTransactionCount == 0 rolls back all transactions to - /// zero. This covers the edge case where the proposer executed no - /// transactions but the validator optimistically executed some. + /// Test that executed transaction count == 0 rolls back all + /// transactions to zero. This covers the edge case where the + /// proposer executed no transactions but the validator + /// optimistically executed some. #[tokio::test] async fn test_executed_transaction_count_zero_rollback() { let storage = StorageBuilder::in_tempdir().expect("Failed to create temp database"); let chain_id = ChainId::SEPOLIA_TESTNET; let worker_pool = create_test_worker_pool(); - let (proposal_init, block_info) = create_test_proposal(1); + let proposal_init = create_test_proposal(1); let mut validator_stage = ValidatorBlockInfoStage::new(chain_id, proposal_init) .and_then(|v| { - v.skip_validation( - block_info, - storage, - Arc::clone(&worker_pool), - DecidedBlocks::default(), - ) + v.skip_validation(storage, Arc::clone(&worker_pool), DecidedBlocks::default()) }) .expect("Failed to create validator stage"); @@ -1077,7 +927,7 @@ mod tests { assert_eq!( validator_stage.transaction_count(), 5, - "Should have 5 transactions before ExecutedTransactionCount" + "Should have 5 transactions before executed transaction count" ); // ETC == 0 should roll back all transactions @@ -1087,37 +937,28 @@ mod tests { 0, &mut validator_stage, ) - .expect("Failed to process ExecutedTransactionCount with zero rollback"); + .expect("Failed to process executed transaction count with zero rollback"); assert_eq!( validator_stage.transaction_count(), 0, "All transactions should be rolled back when ETC is 0" ); - assert!( - batch_execution_manager.is_executed_transaction_count_processed(&height_and_round), - "ExecutedTransactionCount should be marked as processed" - ); } - /// Test that ExecutedTransactionCount > actual transaction count does not - /// error or inflate the count. The validator continues with the - /// transactions it has. + /// Test that executed transaction count > actual transaction + /// count does not error or inflate the count. The validator + /// continues with the transactions it has. #[tokio::test] async fn test_executed_transaction_count_exceeds_actual() { let storage = StorageBuilder::in_tempdir().expect("Failed to create temp database"); let chain_id = ChainId::SEPOLIA_TESTNET; let worker_pool = create_test_worker_pool(); - let (proposal_init, block_info) = create_test_proposal(1); + let proposal_init = create_test_proposal(1); let mut validator_stage = ValidatorBlockInfoStage::new(chain_id, proposal_init) .and_then(|v| { - v.skip_validation( - block_info, - storage, - Arc::clone(&worker_pool), - DecidedBlocks::default(), - ) + v.skip_validation(storage, Arc::clone(&worker_pool), DecidedBlocks::default()) }) .expect("Failed to create validator stage"); @@ -1143,7 +984,7 @@ mod tests { assert_eq!( validator_stage.transaction_count(), 5, - "Should have 5 transactions before ExecutedTransactionCount" + "Should have 5 transactions before executed transaction count" ); // ETC == 10 exceeds the 5 we have; should warn but not error @@ -1160,9 +1001,5 @@ mod tests { 5, "Transaction count should remain unchanged when ETC exceeds actual" ); - assert!( - batch_execution_manager.is_executed_transaction_count_processed(&height_and_round), - "ExecutedTransactionCount should be marked as processed" - ); } } diff --git a/crates/pathfinder/src/consensus/inner/consensus_task.rs b/crates/pathfinder/src/consensus/inner/consensus_task.rs index 063e848143..b616d9694c 100644 --- a/crates/pathfinder/src/consensus/inner/consensus_task.rs +++ b/crates/pathfinder/src/consensus/inner/consensus_task.rs @@ -165,6 +165,8 @@ pub fn spawn( Ok((wire_proposal, finalized_block)) => { let ProposalFin { proposal_commitment, + executed_transaction_count: _, + fin_payload: _, } = wire_proposal .last() .and_then(ProposalPart::as_fin) diff --git a/crates/pathfinder/src/consensus/inner/dummy_proposal.rs b/crates/pathfinder/src/consensus/inner/dummy_proposal.rs index 2fdc3892a2..2f51f81f6a 100644 --- a/crates/pathfinder/src/consensus/inner/dummy_proposal.rs +++ b/crates/pathfinder/src/consensus/inner/dummy_proposal.rs @@ -9,7 +9,7 @@ use std::time::Duration; use anyhow::Context; use p2p_proto::common::{Address, Hash, L1DataAvailabilityMode}; -use p2p_proto::consensus::{BlockInfo, ProposalFin, ProposalInit, ProposalPart}; +use p2p_proto::consensus::{ProposalFin, ProposalInit, ProposalPart}; use pathfinder_common::{ BlockId, BlockNumber, @@ -272,11 +272,10 @@ pub(crate) fn create_from_bootstrapped_devnet_db( worker_pool.join(); parts.extend(batches.into_iter().map(ProposalPart::TransactionBatch)); - parts.push(ProposalPart::ExecutedTransactionCount( - num_executed_txns as u64, - )); parts.push(ProposalPart::Fin(ProposalFin { proposal_commitment: Hash(block.header.state_diff_commitment.0), + executed_transaction_count: num_executed_txns as u64, + fin_payload: None, })); Ok((parts, block)) @@ -345,32 +344,24 @@ pub(crate) fn create_with_invalid_l1_handler_transactions( round, valid_round: None, proposer: Address(proposer.0), - }; - - let mut parts = vec![ProposalPart::Init(proposal_init.clone())]; - - let block_info = BlockInfo { - height, - builder: Address(proposer.0), timestamp: strictly_increasing_timestamp(latest_timestamp).get(), + builder: Address(proposer.0), + l1_da_mode: L1DataAvailabilityMode::Calldata, l2_gas_price_fri: 1_000_000, l1_gas_price_fri: 1_000_000, l1_data_gas_price_fri: 1_000_000, l1_gas_price_wei: 1_000_000, l1_data_gas_price_wei: 1_000_000, - l1_da_mode: L1DataAvailabilityMode::Calldata, + starknet_version: "".to_string(), + version_constant_commitment: Hash::ZERO, }; - parts.push(ProposalPart::BlockInfo(block_info.clone())); + let mut parts = vec![ProposalPart::Init(proposal_init.clone())]; let validator = ValidatorBlockInfoStage::new(ChainId::SEPOLIA_TESTNET, proposal_init)?; let worker_pool = ExecutorWorkerPool::::new(1).get(); - let mut validator = validator.skip_validation( - block_info.clone(), - main_storage, - worker_pool.clone(), - DecidedBlocks::default(), - )?; + let mut validator = + validator.skip_validation(main_storage, worker_pool.clone(), DecidedBlocks::default())?; let num_executed_txns = config .as_ref() @@ -385,9 +376,6 @@ pub(crate) fn create_with_invalid_l1_handler_transactions( .collect(); parts.extend(batches.into_iter().map(ProposalPart::TransactionBatch)); - parts.push(ProposalPart::ExecutedTransactionCount( - num_executed_txns as u64, - )); validator .execute_batch::( @@ -401,6 +389,8 @@ pub(crate) fn create_with_invalid_l1_handler_transactions( parts.push(ProposalPart::Fin(ProposalFin { proposal_commitment: Hash(block.header.state_diff_commitment.0), + executed_transaction_count: num_executed_txns as u64, + fin_payload: None, })); let worker_pool = Arc::into_inner(worker_pool).context("Failed join worker pool")?; @@ -469,22 +459,18 @@ pub(crate) fn create_test_proposal_init( height: u64, round: u32, proposer: ContractAddress, -) -> (ProposalInit, BlockInfo) { +) -> ProposalInit { let proposer_address = Address(proposer.0); let timestamp = std::time::SystemTime::now() .duration_since(std::time::UNIX_EPOCH) .unwrap_or_default() .as_secs(); - let proposal_init = ProposalInit { + ProposalInit { height, round, valid_round: None, proposer: proposer_address, - }; - - let block_info = BlockInfo { - height, timestamp, builder: proposer_address, l1_da_mode: L1DataAvailabilityMode::default(), @@ -493,9 +479,9 @@ pub(crate) fn create_test_proposal_init( l1_data_gas_price_fri: 1, l1_gas_price_wei: 1_000_000_000, l1_data_gas_price_wei: 1, - }; - - (proposal_init, block_info) + starknet_version: "".to_string(), + version_constant_commitment: Default::default(), + } } #[cfg(test)] diff --git a/crates/pathfinder/src/consensus/inner/integration_testing.rs b/crates/pathfinder/src/consensus/inner/integration_testing.rs index 3886456b52..a883c8625e 100644 --- a/crates/pathfinder/src/consensus/inner/integration_testing.rs +++ b/crates/pathfinder/src/consensus/inner/integration_testing.rs @@ -107,19 +107,11 @@ pub fn debug_fail_on_proposal_part( matches!( (proposal_part, trigger), (ProposalPart::Init(_), InjectFailureTrigger::ProposalInitRx) - | ( - ProposalPart::BlockInfo(_), - InjectFailureTrigger::BlockInfoRx - ) | (ProposalPart::Fin(_), InjectFailureTrigger::ProposalFinRx) | ( ProposalPart::TransactionBatch(_), InjectFailureTrigger::TransactionBatchRx ) - | ( - ProposalPart::ExecutedTransactionCount(_), - InjectFailureTrigger::ExecutedTransactionCountRx - ) ) }, height, diff --git a/crates/pathfinder/src/consensus/inner/p2p_task.rs b/crates/pathfinder/src/consensus/inner/p2p_task.rs index de74562e3f..c851ce5767 100644 --- a/crates/pathfinder/src/consensus/inner/p2p_task.rs +++ b/crates/pathfinder/src/consensus/inner/p2p_task.rs @@ -473,6 +473,8 @@ pub fn spawn( ) => { let ProposalFin { proposal_commitment, + executed_transaction_count: _, + fin_payload: _, } = proposal_parts.last().and_then(ProposalPart::as_fin).expect( "Proposals produced by our node are always coherent and complete", ); @@ -851,23 +853,18 @@ fn execute_deferred_for_next_height( if let Some((hnr, deferred)) = deferred.into_iter().next_back() { tracing::debug!("🖧 ⚙️ executing deferred proposal for height and round {hnr}"); - let block_info = deferred.block_info.expect( - "BlockInfo must be present if a deferred execution exists for height and round", - ); let validator_stage = validator_cache.remove(&hnr)?; let mut validator = validator_stage .try_into_block_info_stage() .map_err(|e| ProposalHandlingError::Recoverable(e.into()))? .validate_block_info( - block_info, main_db, decided_blocks, gas_price_provider, None, // TODO: Add L1ToFriValidator when oracle is available l2_gas_price_provider.as_ref(), worker_pool, - ) - .map(Box::new)?; + )?; // Execute deferred transactions first. let opt_commitment = { @@ -881,15 +878,16 @@ fn execute_deferred_for_next_height( )?; } - // Process deferred ExecutedTransactionCount + // Process deferred executed transaction count if let Some(executed_transaction_count) = deferred.executed_transaction_count { tracing::debug!( - "🖧 ⚙️ processing deferred ExecutedTransactionCount for height and round {hnr}" + "🖧 ⚙️ processing deferred executed transaction count for height and round \ + {hnr}" ); // Execution has started at this point (from execute_batch above, if // transactions were non-empty). If transactions were empty, // execute_batch handles marking execution as started, so we can - // process ExecutedTransactionCount immediately. + // process executed transactioncount immediately. batch_execution_manager.process_executed_transaction_count::( hnr, executed_transaction_count, @@ -1033,11 +1031,10 @@ async fn send_proposal_to_consensus( /// We enforce the following order of proposal parts via /// [ProposalPartsValidator] /// 1. Proposal Init -/// 2. Block Info for non-empty proposals (or Proposal Fin for empty proposals) -/// 3. In random order: at least one Transaction Batch, ExecutedTransactionCount -/// 4. Proposal Fin +/// 2. Transaction Batch (zero or more times) +/// 3. Proposal Fin /// -/// The [spec](https://github.com/starknet-io/starknet-p2p-specs/blob/main/p2p/proto/consensus/consensus.md#order-of-messages) is more restrictive. +/// according to the [spec](https://github.com/starknet-io/starknet-p2p-specs/blob/main/p2p/proto/consensus/consensus.md#order-of-messages). #[allow(clippy::too_many_arguments)] fn handle_incoming_proposal_part( chain_id: ChainId, @@ -1073,16 +1070,6 @@ fn handle_incoming_proposal_part( match (result, proposal_part) { (ValidationResult::Accepted, ProposalPart::Init(init)) => { let validator = ValidatorBlockInfoStage::new(chain_id, init)?; - validator_cache.insert(height_and_round, ValidatorStage::BlockInfo(validator)); - Ok(None) - } - (ValidationResult::Accepted, ProposalPart::BlockInfo(block_info)) => { - let validator_stage = validator_cache.remove(&height_and_round)?; - - let validator = validator_stage - .try_into_block_info_stage() - .map_err(|e| ProposalHandlingError::Recoverable(e.into()))?; - let defer = { let mut db_conn = main_readonly_storage.connection().context( "Creating database connection for deferral check in block info validation", @@ -1090,22 +1077,22 @@ fn handle_incoming_proposal_part( let db_tx = db_conn.transaction().context( "Creating DB transaction for deferral check in block info validation", )?; - should_defer_validation(block_info.height, decided_blocks.clone(), &db_tx)? + should_defer_validation( + validator.proposal_height(), + decided_blocks.clone(), + &db_tx, + )? }; if defer { tracing::debug!( "🖧 ⚙️ deferring block info validation for height and round \ {height_and_round}..." ); - let mut dex = deferred_executions.lock().unwrap(); - let deferred = dex.entry(height_and_round).or_default(); - deferred.block_info = Some(block_info); validator_cache.insert(height_and_round, ValidatorStage::BlockInfo(validator)); return Ok(None); } let new_validator = validator.validate_block_info( - block_info, main_readonly_storage, decided_blocks, gas_price_provider, @@ -1115,7 +1102,7 @@ fn handle_incoming_proposal_part( )?; validator_cache.insert( height_and_round, - ValidatorStage::TransactionBatch(Box::new(new_validator)), + ValidatorStage::TransactionBatch(new_validator), ); Ok(None) } @@ -1138,72 +1125,12 @@ fn handle_incoming_proposal_part( Ok(None) } - ( - ValidationResult::Accepted, - ProposalPart::ExecutedTransactionCount(executed_txn_count), - ) => { - tracing::debug!( - "🖧 ⚙️ handling ExecutedTransactionCount for height and round \ - {height_and_round}..." - ); - - let execution_started = batch_execution_manager.is_executing(&height_and_round); - - if !execution_started { - // Execution hasn't started - store ExecutedTransactionCount for later - // processing. This can happen if: - // - Transactions are deferred (deferred entry already exists) - // - ExecutedTransactionCount arrives before execution starts - let mut dex = deferred_executions.lock().unwrap(); - - let deferred = dex.entry(height_and_round).or_default(); - deferred.executed_transaction_count = Some(executed_txn_count); - tracing::debug!( - "ExecutedTransactionCount for {height_and_round} is deferred - storing for \ - later processing (execution not started yet)" - ); - } else { - let validator_stage = validator_cache.remove(&height_and_round)?; - let mut validator = validator_stage - .try_into_transaction_batch_stage() - .map_err(|e| ProposalHandlingError::Recoverable(e.into()))?; - - batch_execution_manager.process_executed_transaction_count::( - height_and_round, - executed_txn_count, - &mut validator, - )?; - - // Check if ProposalFin was deferred and should now be finalized - let mut dex = deferred_executions.lock().unwrap(); - if let Some(deferred) = dex.get_mut(&height_and_round) { - if let Some(deferred_commitment) = deferred.commitment.take() { - drop(dex); - let block = validator - .consensus_finalize(deferred_commitment.proposal_commitment)?; - tracing::debug!( - "🖧 ⚙️ finalizing deferred ProposalFin for height and round \ - {height_and_round} after ExecutedTransactionCount was processed" - ); - - finalized_blocks.insert(height_and_round, block); - - return Ok(Some(deferred_commitment)); - } - } - - validator_cache.insert( - height_and_round, - ValidatorStage::TransactionBatch(validator), - ); - } - - Ok(None) - } ( ValidationResult::EmptyProposal, ProposalPart::Fin(ProposalFin { proposal_commitment, + executed_transaction_count: _, + fin_payload: _, }), ) => { tracing::debug!( @@ -1236,6 +1163,8 @@ fn handle_incoming_proposal_part( ValidationResult::NonEmptyProposal, ProposalPart::Fin(ProposalFin { proposal_commitment, + executed_transaction_count, + fin_payload: _, }), ) => { tracing::debug!( @@ -1254,6 +1183,7 @@ fn handle_incoming_proposal_part( proposal_commitment, proposer_address, valid_round, + executed_transaction_count, main_readonly_storage.clone(), deferred_executions, batch_execution_manager, @@ -1283,6 +1213,7 @@ fn defer_or_execute_proposal_fin( proposal_commitment: Hash, proposer_address: ContractAddress, valid_round: Option, + executed_transaction_count: u64, main_db: Storage, deferred_executions: Arc>>, batch_execution_manager: &mut BatchExecutionManager, @@ -1314,10 +1245,9 @@ fn defer_or_execute_proposal_fin( ); let mut deferred_executions = deferred_executions.lock().unwrap(); - deferred_executions - .entry(height_and_round) - .or_default() - .commitment = Some(commitment); + let deferred = deferred_executions.entry(height_and_round).or_default(); + deferred.commitment = Some(commitment); + deferred.executed_transaction_count = Some(executed_transaction_count); Ok(None) } else { // The proposal can be finalized now, because the previous @@ -1330,26 +1260,19 @@ fn defer_or_execute_proposal_fin( let deferred_txns_len = deferred.as_ref().map_or(0, |d| d.transactions.len()); let validator = if let Some(deferred) = deferred { - let mut validator = if let Some(block_info) = deferred.block_info { - validator_cache - .remove(&height_and_round)? - .try_into_block_info_stage() - .expect("ValidatorStage to be BlockInfo if BlockInfo is deferred") - .validate_block_info( - block_info, + let validator_stage = validator_cache.remove(&height_and_round)?; + let mut validator = match validator_stage { + ValidatorStage::BlockInfo(stage) => { + stage.validate_block_info( main_db.clone(), decided_blocks, gas_price_provider, None, // TODO: Add L1ToFriValidator when oracle is available l2_gas_price_provider.as_ref(), worker_pool, - ) - .map(Box::new)? - } else { - validator_cache - .remove(&height_and_round)? - .try_into_transaction_batch_stage() - .map_err(|e| ProposalHandlingError::Recoverable(e.into()))? + )? + } + ValidatorStage::TransactionBatch(stage) => stage, }; // Execute deferred transactions first. @@ -1358,9 +1281,7 @@ fn defer_or_execute_proposal_fin( "🖧 ⚙️ executing {deferred_txns_len} deferred transactions for height and \ round {height_and_round} before finalizing proposal..." ); - } - if !deferred.transactions.is_empty() { batch_execution_manager.execute_batch::( height_and_round, deferred.transactions, @@ -1368,20 +1289,18 @@ fn defer_or_execute_proposal_fin( )?; } - // Process deferred ExecutedTransactionCount if it was stored - if let Some(executed_transaction_count) = deferred.executed_transaction_count { - tracing::debug!( - "🖧 ⚙️ processing deferred ExecutedTransactionCount for height and round \ - {height_and_round}" - ); - // Execution has started at this point (from execute_batch), - // so we can process ExecutedTransactionCount immediately - batch_execution_manager.process_executed_transaction_count::( - height_and_round, - executed_transaction_count, - &mut validator, - )?; - } + // Process deferred executed transaction count + tracing::debug!( + "🖧 ⚙️ processing executed transaction count for height and round \ + {height_and_round}" + ); + // Execution has started at this point (from execute_batch), + // so we can proceed immediately + batch_execution_manager.process_executed_transaction_count::( + height_and_round, + executed_transaction_count, + &mut validator, + )?; // Process deferred commitment if it was stored (use it instead of the new one) // (they should match, but the deferred one was received earlier) @@ -1404,32 +1323,17 @@ fn defer_or_execute_proposal_fin( validator } else { - validator_cache + let mut validator = validator_cache .remove(&height_and_round)? .try_into_transaction_batch_stage() - .map_err(|e| ProposalHandlingError::Recoverable(e.into()))? - }; - - // Check if execution has started but ExecutedTransactionCount hasn't been - // processed yet If so, defer ProposalFin until ExecutedTransactionCount - // arrives - if batch_execution_manager.should_defer_proposal_fin(&height_and_round) { - tracing::debug!( - "🖧 ⚙️ consensus finalize for height and round {height_and_round} is deferred \ - because ExecutedTransactionCount hasn't been processed yet" - ); - - let mut deferred_executions = deferred_executions.lock().unwrap(); - deferred_executions - .entry(height_and_round) - .or_default() - .commitment = Some(commitment); - validator_cache.insert( + .map_err(|e| ProposalHandlingError::Recoverable(e.into()))?; + batch_execution_manager.process_executed_transaction_count::( height_and_round, - ValidatorStage::TransactionBatch(validator), - ); - return Ok(None); - } + executed_transaction_count, + &mut validator, + )?; + validator + }; let block = validator.consensus_finalize(commitment.proposal_commitment)?; @@ -1475,6 +1379,7 @@ fn consensus_vote_to_p2p_vote( round: vote.round.as_u32().expect("Round not to be Nil"), proposal_commitment: vote.value.map(|v| Hash(v.0 .0)), voter: Address(vote.validator_address.0), + signature: Default::default(), } } diff --git a/crates/pathfinder/src/consensus/inner/p2p_task/p2p_task_tests.rs b/crates/pathfinder/src/consensus/inner/p2p_task/p2p_task_tests.rs index 7481d78e7e..12bd2973fb 100644 --- a/crates/pathfinder/src/consensus/inner/p2p_task/p2p_task_tests.rs +++ b/crates/pathfinder/src/consensus/inner/p2p_task/p2p_task_tests.rs @@ -1,9 +1,9 @@ //! End-to-end tests for p2p_task //! -//! These tests verify the full integration flow of p2p_task, including proposal -//! processing, deferral logic (when ExecutedTransactionCount or ProposalFin -//! arrive out of order), rollback scenarios. They test the complete path from -//! receiving P2P events to sending consensus commands. +//! These tests verify the full integration flow of p2p_task, +//! including proposal processing, deferral logic and rollback +//! scenarios. They test the complete path from receiving P2P events +//! to sending consensus commands. use std::collections::HashMap; use std::path::PathBuf; @@ -367,8 +367,7 @@ fn verify_proposal_event( /// [test_proposal_fin_deferred_until_parent_block_committed]), then /// finalization can proceed. /// -/// **Test**: Send Init → BlockInfo → TransactionBatch → -/// ExecutedTransactionCount → ProposalFin → +/// **Test**: Send Init → TransactionBatch → ProposalFin → /// MarkBlockAsDecidedAndCleanUp(parent). /// /// Verify ProposalFin is deferred (no proposal event), then verify @@ -424,7 +423,7 @@ async fn test_proposal_fin_deferred_until_parent_block_decided( let proposer_address = ContractAddress::new_or_panic(Felt::from_hex_str("0x456").unwrap()); let h2r1 = HeightAndRound::new(2, 1); let transactions = create_transaction_batch(0, 0, 5, chain_id); - let (proposal_init, block_info) = + let proposal_init = create_test_proposal_init(chain_id, h2r1.height(), h2r1.round(), proposer_address); // Step 1: Send ProposalInit @@ -436,16 +435,7 @@ async fn test_proposal_fin_deferred_until_parent_block_decided( .expect("Failed to send ProposalInit"); env.verify_task_alive().await; - // Step 2: Send BlockInfo - env.p2p_tx - .send(Event { - source: PeerId::random(), - kind: EventKind::Proposal(h2r1, ProposalPart::BlockInfo(block_info)), - }) - .expect("Failed to send BlockInfo"); - env.verify_task_alive().await; - - // Step 3: Send TransactionBatch (execution should start) + // Step 2: Send TransactionBatch (execution should start) env.p2p_tx .send(Event { source: PeerId::random(), @@ -457,16 +447,7 @@ async fn test_proposal_fin_deferred_until_parent_block_decided( // Verify: No proposal event yet (execution started, but not finalized) verify_no_proposal_event(&mut env.rx_from_p2p, Duration::from_millis(200)).await; - // Step 4: Send ExecutedTransactionCount - env.p2p_tx - .send(Event { - source: PeerId::random(), - kind: EventKind::Proposal(h2r1, ProposalPart::ExecutedTransactionCount(5)), - }) - .expect("Failed to send ExecutedTransactionCount"); - env.verify_task_alive().await; - - // Step 5: Send ProposalFin + // Step 3: Send ProposalFin env.p2p_tx .send(Event { source: PeerId::random(), @@ -474,6 +455,8 @@ async fn test_proposal_fin_deferred_until_parent_block_decided( h2r1, ProposalPart::Fin(p2p_proto::consensus::ProposalFin { proposal_commitment: p2p_proto::common::Hash(expected_proposal_commitment2.0), + executed_transaction_count: 5, + fin_payload: None, }), ), }) @@ -523,7 +506,7 @@ async fn test_proposal_fin_deferred_until_parent_block_decided( // Verify: Proposal event should be sent now let proposal_cmd = wait_for_proposal_event(&mut env.rx_from_p2p, Duration::from_secs(3)) .await - .expect("Expected proposal event after ExecutedTransactionCount"); + .expect("Expected proposal event after ConfirmBlockCommitted"); verify_proposal_event(proposal_cmd, 2, expected_proposal_commitment2); } else { // Step 8: It turns out that the feeder gateway was faster to get the @@ -539,8 +522,8 @@ async fn test_proposal_fin_deferred_until_parent_block_decided( /// Execution has started (TransactionBatch received), so ProposalFin must be /// deferred until the parent block is committed, then finalization can proceed. /// -/// **Test**: Send Init → BlockInfo → TransactionBatch → -/// ExecutedTransactionCount → ProposalFin → CommitBlock(parent). +/// **Test**: Send Init → TransactionBatch → ProposalFin → +/// CommitBlock(parent). /// /// Verify ProposalFin is deferred (no proposal event), then verify /// finalization occurs after parent block is committed. @@ -577,7 +560,7 @@ async fn test_proposal_fin_deferred_until_parent_block_committed( let proposer_address = ContractAddress::new_or_panic(Felt::from_hex_str("0x456").unwrap()); let h2r1 = HeightAndRound::new(2, 1); let transactions = create_transaction_batch(0, 0, 5, chain_id); - let (proposal_init, block_info) = + let proposal_init = create_test_proposal_init(chain_id, h2r1.height(), h2r1.round(), proposer_address); // Focus is on batch execution and deferral logic, not commitment validation. @@ -593,16 +576,7 @@ async fn test_proposal_fin_deferred_until_parent_block_committed( .expect("Failed to send ProposalInit"); env.verify_task_alive().await; - // Step 2: Send BlockInfo - env.p2p_tx - .send(Event { - source: PeerId::random(), - kind: EventKind::Proposal(h2r1, ProposalPart::BlockInfo(block_info)), - }) - .expect("Failed to send BlockInfo"); - env.verify_task_alive().await; - - // Step 3: Send TransactionBatch (execution should start) + // Step 2: Send TransactionBatch (execution should start) env.p2p_tx .send(Event { source: PeerId::random(), @@ -614,16 +588,7 @@ async fn test_proposal_fin_deferred_until_parent_block_committed( // Verify: No proposal event yet (execution started, but not finalized) verify_no_proposal_event(&mut env.rx_from_p2p, Duration::from_millis(200)).await; - // Step 4: Send ExecutedTransactionCount - env.p2p_tx - .send(Event { - source: PeerId::random(), - kind: EventKind::Proposal(h2r1, ProposalPart::ExecutedTransactionCount(5)), - }) - .expect("Failed to send ExecutedTransactionCount"); - env.verify_task_alive().await; - - // Step 5: Send ProposalFin + // Step 3: Send ProposalFin env.p2p_tx .send(Event { source: PeerId::random(), @@ -631,6 +596,8 @@ async fn test_proposal_fin_deferred_until_parent_block_committed( h2r1, ProposalPart::Fin(p2p_proto::consensus::ProposalFin { proposal_commitment: p2p_proto::common::Hash(proposal_commitment2.0), + executed_transaction_count: 5, + fin_payload: None, }), ), }) @@ -682,7 +649,7 @@ async fn test_proposal_fin_deferred_until_parent_block_committed( // Verify: Proposal event should be sent now let proposal_cmd = wait_for_proposal_event(&mut env.rx_from_p2p, Duration::from_secs(3)) .await - .expect("Expected proposal event after ExecutedTransactionCount"); + .expect("Expected proposal event after ConfirmBlockCommitted"); verify_proposal_event(proposal_cmd, 2, proposal_commitment2); } else { // Step 8: It turns out that the feeder gateway was faster to get the @@ -695,11 +662,9 @@ async fn test_proposal_fin_deferred_until_parent_block_committed( /// Full proposal flow in normal order. /// /// **Scenario**: Complete proposal flow with all parts arriving in the -/// expected order. ExecutedTransactionCount arrives before ProposalFin, so no -/// deferral is needed. +/// expected order, no deferral is needed. /// -/// **Test**: Send Init → BlockInfo → TransactionBatch → -/// ExecutedTransactionCount → ProposalFin. +/// **Test**: Send Init → TransactionBatch → ProposalFin. /// /// Verify proposal event is sent immediately after ProposalFin (no /// deferral). @@ -714,7 +679,7 @@ async fn test_full_proposal_flow_normal_order() { let proposer_address = ContractAddress::new_or_panic(Felt::from_hex_str("0x456").unwrap()); let height_and_round = HeightAndRound::new(2, 1); let transactions = create_transaction_batch(0, 0, 5, chain_id); - let (proposal_init, block_info) = create_test_proposal_init(chain_id, 2, 1, proposer_address); + let proposal_init = create_test_proposal_init(chain_id, 2, 1, proposer_address); // Focus is on batch execution and deferral logic, not commitment validation. // Using a dummy commitment... @@ -729,16 +694,7 @@ async fn test_full_proposal_flow_normal_order() { .expect("Failed to send ProposalInit"); env.verify_task_alive().await; - // Step 2: Send BlockInfo - env.p2p_tx - .send(Event { - source: PeerId::random(), - kind: EventKind::Proposal(height_and_round, ProposalPart::BlockInfo(block_info)), - }) - .expect("Failed to send BlockInfo"); - env.verify_task_alive().await; - - // Step 3: Send TransactionBatch + // Step 2: Send TransactionBatch env.p2p_tx .send(Event { source: PeerId::random(), @@ -751,23 +707,10 @@ async fn test_full_proposal_flow_normal_order() { env.verify_task_alive().await; // Verify: No proposal event yet (execution started, but - // ExecutedTransactionCount not processed) - verify_no_proposal_event(&mut env.rx_from_p2p, Duration::from_millis(200)).await; - - // Step 4: Send ExecutedTransactionCount - env.p2p_tx - .send(Event { - source: PeerId::random(), - kind: EventKind::Proposal(height_and_round, ProposalPart::ExecutedTransactionCount(5)), - }) - .expect("Failed to send ExecutedTransactionCount"); - env.verify_task_alive().await; - - // Verify: Still no proposal event (ExecutedTransactionCount processed, but - // ProposalFin not received) + // ProposalFin not processed) verify_no_proposal_event(&mut env.rx_from_p2p, Duration::from_millis(200)).await; - // Step 5: Send ProposalFin + // Step 3: Send ProposalFin env.p2p_tx .send(Event { source: PeerId::random(), @@ -775,6 +718,8 @@ async fn test_full_proposal_flow_normal_order() { height_and_round, ProposalPart::Fin(p2p_proto::consensus::ProposalFin { proposal_commitment: p2p_proto::common::Hash(proposal_commitment.0), + executed_transaction_count: 5, + fin_payload: None, }), ), }) @@ -789,18 +734,16 @@ async fn test_full_proposal_flow_normal_order() { env.verify_task_alive().await; } -/// ExecutedTransactionCount deferred when execution not started. +/// TransactionBatch deferred when execution not started. /// /// **Scenario**: Parent block is not committed initially, so -/// TransactionBatch and ExecutedTransactionCount are both deferred. After -/// parent is committed, execution starts and deferred messages are processed. +/// TransactionBatch is deferred. After parent is committed, execution +/// starts and deferred messages are processed. /// -/// **Test**: Send Init → BlockInfo → TransactionBatch → -/// ExecutedTransactionCount (without committing parent). +/// **Test**: Send Init → TransactionBatch (without committing parent). /// /// Verify no execution occurs. Then commit parent block and send another -/// TransactionBatch. Verify deferred ExecutedTransactionCount is processed when -/// execution starts. +/// TransactionBatch. Verify execution starts. #[test_log::test(tokio::test(flavor = "multi_thread"))] async fn test_executed_transaction_count_deferred_when_execution_not_started() { let chain_id = ChainId::SEPOLIA_TESTNET; @@ -813,7 +756,7 @@ async fn test_executed_transaction_count_deferred_when_execution_not_started() { let height_and_round = HeightAndRound::new(2, 1); let transactions_batch1 = create_transaction_batch(0, 0, 3, chain_id); let transactions_batch2 = create_transaction_batch(0, 3, 2, chain_id); // Total: 5 - let (proposal_init, block_info) = create_test_proposal_init(chain_id, 2, 1, proposer_address); + let proposal_init = create_test_proposal_init(chain_id, 2, 1, proposer_address); // Step 1: Send ProposalInit env.p2p_tx @@ -824,16 +767,7 @@ async fn test_executed_transaction_count_deferred_when_execution_not_started() { .expect("Failed to send ProposalInit"); env.verify_task_alive().await; - // Step 2: Send BlockInfo (should be deferred - parent not committed) - env.p2p_tx - .send(Event { - source: PeerId::random(), - kind: EventKind::Proposal(height_and_round, ProposalPart::BlockInfo(block_info)), - }) - .expect("Failed to send BlockInfo"); - env.verify_task_alive().await; - - // Step 3: Send first TransactionBatch (should be deferred - parent not + // Step 2: Send first TransactionBatch (should be deferred - parent not // committed) env.p2p_tx .send(Event { @@ -849,26 +783,12 @@ async fn test_executed_transaction_count_deferred_when_execution_not_started() { // Verify: No proposal event (execution deferred) verify_no_proposal_event(&mut env.rx_from_p2p, Duration::from_millis(200)).await; - // Step 4: Send ExecutedTransactionCount (should be deferred - execution not - // started) - env.p2p_tx - .send(Event { - source: PeerId::random(), - kind: EventKind::Proposal(height_and_round, ProposalPart::ExecutedTransactionCount(5)), - }) - .expect("Failed to send ExecutedTransactionCount"); - env.verify_task_alive().await; - - // Verify: Still no proposal event (ExecutedTransactionCount deferred) - verify_no_proposal_event(&mut env.rx_from_p2p, Duration::from_millis(200)).await; - - // Step 5: Now we commit the parent block + // Step 3: Now we commit the parent block env.create_committed_block(1); tokio::time::sleep(Duration::from_millis(100)).await; - // Step 6: Send another TransactionBatch - // This should trigger execution of deferred batches + process deferred - // ExecutedTransactionCount + // Step 4: Send another TransactionBatch + // This should trigger execution of deferred batches env.p2p_tx .send(Event { source: PeerId::random(), @@ -880,17 +800,15 @@ async fn test_executed_transaction_count_deferred_when_execution_not_started() { .expect("Failed to send second TransactionBatch"); env.verify_task_alive().await; - // At this point, execution should have started and ExecutedTransactionCount - // should be processed... - - // To verify this, we send ProposalFin, then verify that a proposal event is - // sent (which confirms ExecutedTransactionCount was processed). + // At this point, execution should have started. To verify this, + // we send ProposalFin, then verify that a proposal event is sent + // (which confirms executed transaction count was processed). // Once again, using a dummy commitment... let proposal_commitment = ProposalCommitment(Felt::ZERO); - // Step 7: Send ProposalFin - // This should trigger finalization since ExecutedTransactionCount was processed + // Step 5: Send ProposalFin + // This should trigger finalization env.p2p_tx .send(Event { source: PeerId::random(), @@ -898,17 +816,18 @@ async fn test_executed_transaction_count_deferred_when_execution_not_started() { height_and_round, ProposalPart::Fin(p2p_proto::consensus::ProposalFin { proposal_commitment: p2p_proto::common::Hash(proposal_commitment.0), + executed_transaction_count: 5, + fin_payload: None, }), ), }) .expect("Failed to send ProposalFin"); env.verify_task_alive().await; - // Verify: Proposal event should be sent (confirms ExecutedTransactionCount was - // processed) + // Verify: Proposal event should be sent let proposal_cmd = wait_for_proposal_event(&mut env.rx_from_p2p, Duration::from_secs(2)) .await - .expect("Expected proposal event after deferred ExecutedTransactionCount was processed"); + .expect("Expected proposal event after deferred execution"); verify_proposal_event(proposal_cmd, 2, proposal_commitment); env.verify_task_alive().await; } @@ -917,11 +836,10 @@ async fn test_executed_transaction_count_deferred_when_execution_not_started() { /// /// **Scenario**: A proposal contains multiple TransactionBatch messages /// that must all be executed in order. All batches should be executed -/// before ExecutedTransactionCount is processed. +/// before executed transaction count is processed. /// -/// **Test**: Send Init → BlockInfo → TransactionBatch 1 → -/// TransactionBatch 2 → TransactionBatch 3 → ExecutedTransactionCount → -/// ProposalFin. +/// **Test**: Send Init → TransactionBatch 1 → TransactionBatch 2 → +/// TransactionBatch 3 → ProposalFin. /// /// Verify proposal event is sent after ProposalFin. #[test_log::test(tokio::test(flavor = "multi_thread"))] @@ -937,7 +855,7 @@ async fn test_multiple_batches_execution() { let transactions_batch1 = create_transaction_batch(0, 0, 2, chain_id); let transactions_batch2 = create_transaction_batch(0, 2, 3, chain_id); let transactions_batch3 = create_transaction_batch(0, 5, 2, chain_id); // Total: 7 - let (proposal_init, block_info) = create_test_proposal_init(chain_id, 2, 1, proposer_address); + let proposal_init = create_test_proposal_init(chain_id, 2, 1, proposer_address); // Focus is on batch execution and deferral logic, not commitment validation. // Using a dummy commitment... @@ -952,16 +870,7 @@ async fn test_multiple_batches_execution() { .expect("Failed to send ProposalInit"); env.verify_task_alive().await; - // Step 2: Send BlockInfo - env.p2p_tx - .send(Event { - source: PeerId::random(), - kind: EventKind::Proposal(height_and_round, ProposalPart::BlockInfo(block_info)), - }) - .expect("Failed to send BlockInfo"); - env.verify_task_alive().await; - - // Step 3: Send multiple TransactionBatches + // Step 2: Send multiple TransactionBatches env.p2p_tx .send(Event { source: PeerId::random(), @@ -995,16 +904,7 @@ async fn test_multiple_batches_execution() { .expect("Failed to send TransactionBatch3"); env.verify_task_alive().await; - // Step 4: Send ExecutedTransactionCount (total count = 7) - env.p2p_tx - .send(Event { - source: PeerId::random(), - kind: EventKind::Proposal(height_and_round, ProposalPart::ExecutedTransactionCount(7)), - }) - .expect("Failed to send ExecutedTransactionCount"); - env.verify_task_alive().await; - - // Step 5: Send ProposalFin + // Step 3: Send ProposalFin env.p2p_tx .send(Event { source: PeerId::random(), @@ -1012,6 +912,8 @@ async fn test_multiple_batches_execution() { height_and_round, ProposalPart::Fin(p2p_proto::consensus::ProposalFin { proposal_commitment: p2p_proto::common::Hash(proposal_commitment.0), + executed_transaction_count: 7, + fin_payload: None, }), ), }) @@ -1026,16 +928,17 @@ async fn test_multiple_batches_execution() { env.verify_task_alive().await; } -/// ExecutedTransactionCount triggers rollback when count is less than executed. +/// Executed transaction count triggers rollback when count is less than +/// executed. /// /// **Scenario**: We execute 10 transactions (2 batches of 5), but -/// ExecutedTransactionCount indicates only 7 transactions were executed by the -/// proposer. The validator must rollback from 10 to 7 transactions to -/// match the proposer's state. +/// executed transaction count indicates only 7 transactions were +/// executed by the proposer. The validator must rollback from 10 to 7 +/// transactions to match the proposer's state. /// -/// **Test**: Send Init → BlockInfo → TransactionBatch1 (5 txs) → -/// TransactionBatch2 (5 txs) → ExecutedTransactionCount (count=7) → -/// ProposalFin. +/// **Test**: Send Init → TransactionBatch1 (5 txs) → +/// TransactionBatch2 (5 txs) → ProposalFin (executed transaction +/// count=7). /// /// Verify proposal event is sent successfully after rollback, confirming /// the rollback mechanism works correctly. @@ -1051,7 +954,7 @@ async fn test_executed_transaction_count_rollback() { let height_and_round = HeightAndRound::new(2, 1); let transactions_batch1 = create_transaction_batch(0, 0, 5, chain_id); let transactions_batch2 = create_transaction_batch(0, 5, 5, chain_id); // Total: 10 - let (proposal_init, block_info) = create_test_proposal_init(chain_id, 2, 1, proposer_address); + let proposal_init = create_test_proposal_init(chain_id, 2, 1, proposer_address); // Focus is on batch execution and deferral logic, not commitment validation. // Using a dummy commitment... @@ -1066,16 +969,7 @@ async fn test_executed_transaction_count_rollback() { .expect("Failed to send ProposalInit"); env.verify_task_alive().await; - // Step 2: Send BlockInfo - env.p2p_tx - .send(Event { - source: PeerId::random(), - kind: EventKind::Proposal(height_and_round, ProposalPart::BlockInfo(block_info)), - }) - .expect("Failed to send BlockInfo"); - env.verify_task_alive().await; - - // Step 3: Send TransactionBatch 1 (5 transactions) + // Step 2: Send TransactionBatch 1 (5 transactions) env.p2p_tx .send(Event { source: PeerId::random(), @@ -1087,7 +981,7 @@ async fn test_executed_transaction_count_rollback() { .expect("Failed to send TransactionBatch1"); env.verify_task_alive().await; - // Step 4: Send TransactionBatch 2 (5 more transactions, total = 10) + // Step 3: Send TransactionBatch 2 (5 more transactions, total = 10) env.p2p_tx .send(Event { source: PeerId::random(), @@ -1099,17 +993,7 @@ async fn test_executed_transaction_count_rollback() { .expect("Failed to send TransactionBatch2"); env.verify_task_alive().await; - // Step 5: Send ExecutedTransactionCount with count=7 (should trigger rollback - // from 10 to 7) - env.p2p_tx - .send(Event { - source: PeerId::random(), - kind: EventKind::Proposal(height_and_round, ProposalPart::ExecutedTransactionCount(7)), - }) - .expect("Failed to send ExecutedTransactionCount"); - env.verify_task_alive().await; - - // Step 6: Send ProposalFin + // Step 4: Send ProposalFin env.p2p_tx .send(Event { source: PeerId::random(), @@ -1117,6 +1001,8 @@ async fn test_executed_transaction_count_rollback() { height_and_round, ProposalPart::Fin(p2p_proto::consensus::ProposalFin { proposal_commitment: p2p_proto::common::Hash(proposal_commitment.0), + executed_transaction_count: 7, // should trigger rollback from 10 to 7 + fin_payload: None, }), ), }) @@ -1141,16 +1027,17 @@ async fn test_executed_transaction_count_rollback() { /// Empty TransactionBatch execution (non-spec edge case). /// -/// **Scenario**: A proposal contains an empty TransactionBatch. Per the -/// [Starknet consensus spec](https://raw.githubusercontent.com/starknet-io/starknet-p2p-specs/refs/heads/main/p2p/proto/consensus/consensus.md), -/// if a proposer has no transactions, they should send an empty proposal -/// (skipping BlockInfo, TransactionBatch and ExecutedTransactionCount -/// entirely). However, this test covers the case where a non-empty proposal -/// includes an empty TransactionBatch. Such a proposal is invalid per the spec, -/// so we reject it. +/// **Scenario**: A proposal contains an empty TransactionBatch. Per +/// the [Starknet consensus +/// spec](https://raw.githubusercontent.com/starknet-io/starknet-p2p-specs/refs/heads/main/p2p/proto/consensus/consensus.md), +/// if a proposer has no transactions, they should send an empty +/// proposal (skipping TransactionBatch entirely). However, this test +/// covers the case where a non-empty proposal includes an empty +/// TransactionBatch. Such a proposal is invalid per the spec, so we +/// reject it. /// -/// **Test**: Send Init → BlockInfo → TransactionBatch (empty) → -/// ExecutedTransactionCount (count=0) → ProposalFin. +/// **Test**: Send Init → TransactionBatch (empty) → ProposalFin +/// (executed transaction count=0). /// /// Verify that the proposal is rejected and no proposal event is sent. #[test_log::test(tokio::test(flavor = "multi_thread"))] @@ -1164,7 +1051,7 @@ async fn test_empty_batch_is_rejected() { let proposer_address = ContractAddress::new_or_panic(Felt::from_hex_str("0x456").unwrap()); let height_and_round = HeightAndRound::new(2, 1); let empty_transactions = create_transaction_batch(0, 0, 0, chain_id); - let (proposal_init, block_info) = create_test_proposal_init(chain_id, 2, 1, proposer_address); + let proposal_init = create_test_proposal_init(chain_id, 2, 1, proposer_address); env.p2p_tx .send(Event { @@ -1174,14 +1061,6 @@ async fn test_empty_batch_is_rejected() { .expect("Failed to send ProposalInit"); env.verify_task_alive().await; - env.p2p_tx - .send(Event { - source: PeerId::random(), - kind: EventKind::Proposal(height_and_round, ProposalPart::BlockInfo(block_info)), - }) - .expect("Failed to send BlockInfo"); - env.verify_task_alive().await; - env.p2p_tx .send(Event { source: PeerId::random(), @@ -1197,15 +1076,16 @@ async fn test_empty_batch_is_rejected() { env.verify_task_alive().await; } -/// ExecutedTransactionCount indicates more transactions than actually executed. +/// Executed transaction count indicates more transactions than actually +/// executed. /// -/// **Scenario**: We execute 5 transactions, but ExecutedTransactionCount -/// indicates -/// 10. This shouldn't happen with proper message ordering, but the code -/// handles it by logging a warning and continuing. +/// **Scenario**: We execute 5 transactions, but executed transaction +/// count indicates 10. This shouldn't happen with proper message +/// ordering, but the code handles it by logging a warning and +/// continuing. /// -/// **Test**: Send Init → BlockInfo → TransactionBatch (5 txs) → -/// ExecutedTransactionCount (count=10) → ProposalFin. +/// **Test**: Send Init → TransactionBatch (5 txs) → ProposalFin +/// (executed transaction count=10). /// /// Verify processing continues and proposal event is sent (with 5 transactions, /// not 10). @@ -1224,7 +1104,7 @@ async fn test_executed_transaction_count_exceeds_actually_executed() { let proposer_address = ContractAddress::new_or_panic(Felt::from_hex_str("0x456").unwrap()); let height_and_round = HeightAndRound::new(2, 1); let transactions = create_transaction_batch(0, 0, 5, chain_id); - let (proposal_init, block_info) = create_test_proposal_init(chain_id, 2, 1, proposer_address); + let proposal_init = create_test_proposal_init(chain_id, 2, 1, proposer_address); let proposal_commitment = ProposalCommitment(Felt::ZERO); @@ -1236,112 +1116,6 @@ async fn test_executed_transaction_count_exceeds_actually_executed() { .expect("Failed to send ProposalInit"); env.verify_task_alive().await; - env.p2p_tx - .send(Event { - source: PeerId::random(), - kind: EventKind::Proposal(height_and_round, ProposalPart::BlockInfo(block_info)), - }) - .expect("Failed to send BlockInfo"); - env.verify_task_alive().await; - - env.p2p_tx - .send(Event { - source: PeerId::random(), - kind: EventKind::Proposal( - height_and_round, - ProposalPart::TransactionBatch(transactions), - ), - }) - .expect("Failed to send TransactionBatch"); - env.verify_task_alive().await; - - verify_no_proposal_event(&mut env.rx_from_p2p, Duration::from_millis(200)).await; - - env.p2p_tx - .send(Event { - source: PeerId::random(), - kind: EventKind::Proposal(height_and_round, ProposalPart::ExecutedTransactionCount(10)), - }) - .expect("Failed to send ExecutedTransactionCount"); - env.verify_task_alive().await; - - verify_no_proposal_event(&mut env.rx_from_p2p, Duration::from_millis(200)).await; - - env.p2p_tx - .send(Event { - source: PeerId::random(), - kind: EventKind::Proposal( - height_and_round, - ProposalPart::Fin(p2p_proto::consensus::ProposalFin { - proposal_commitment: p2p_proto::common::Hash(proposal_commitment.0), - }), - ), - }) - .expect("Failed to send ProposalFin"); - tokio::time::sleep(Duration::from_millis(500)).await; - - let proposal_cmd = wait_for_proposal_event(&mut env.rx_from_p2p, Duration::from_secs(2)) - .await - .expect("Expected proposal event after ProposalFin"); - verify_proposal_event(proposal_cmd, 2, proposal_commitment); - env.verify_task_alive().await; -} - -/// ExecutedTransactionCount arrives before any TransactionBatch. -/// -/// **Scenario**: ExecutedTransactionCount arrives before execution starts (no -/// batches received yet). It should be deferred until execution starts, -/// then processed. -/// -/// **Test**: Send Init → BlockInfo → ExecutedTransactionCount → -/// TransactionBatch → ProposalFin. -/// -/// Verify ExecutedTransactionCount is deferred, then processed when execution -/// starts, and proposal event is sent. -#[test_log::test(tokio::test(flavor = "multi_thread"))] -async fn test_executed_transaction_count_before_any_batch() { - let chain_id = ChainId::SEPOLIA_TESTNET; - let validator_address = ContractAddress::new_or_panic(Felt::from_hex_str("0x123").unwrap()); - let mut env = TestEnvironment::new(chain_id, validator_address); - env.create_committed_block(1); - env.wait_for_task_initialization().await; - - let proposer_address = ContractAddress::new_or_panic(Felt::from_hex_str("0x456").unwrap()); - let height_and_round = HeightAndRound::new(2, 1); - let transactions = create_transaction_batch(0, 0, 5, chain_id); - let (proposal_init, block_info) = create_test_proposal_init(chain_id, 2, 1, proposer_address); - - let proposal_commitment = ProposalCommitment(Felt::ZERO); - - env.p2p_tx - .send(Event { - source: PeerId::random(), - kind: EventKind::Proposal(height_and_round, ProposalPart::Init(proposal_init)), - }) - .expect("Failed to send ProposalInit"); - env.verify_task_alive().await; - - env.p2p_tx - .send(Event { - source: PeerId::random(), - kind: EventKind::Proposal(height_and_round, ProposalPart::BlockInfo(block_info)), - }) - .expect("Failed to send BlockInfo"); - env.verify_task_alive().await; - - env.p2p_tx - .send(Event { - source: PeerId::random(), - kind: EventKind::Proposal(height_and_round, ProposalPart::ExecutedTransactionCount(5)), - }) - .expect("Failed to send ExecutedTransactionCount"); - env.verify_task_alive().await; - - verify_no_proposal_event(&mut env.rx_from_p2p, Duration::from_millis(200)).await; - - // Step 4: Send TransactionBatch - // This should trigger execution start and process the deferred - // ExecutedTransactionCount env.p2p_tx .send(Event { source: PeerId::random(), @@ -1353,11 +1127,6 @@ async fn test_executed_transaction_count_before_any_batch() { .expect("Failed to send TransactionBatch"); env.verify_task_alive().await; - // Verify: Still no proposal event (ExecutedTransactionCount processed, but - // ProposalFin not received) - // Note: We verify that deferred ExecutedTransactionCount was processed - // indirectly by sending ProposalFin below and confirming the proposal event - // is sent (which requires ExecutedTransactionCount to be processed first). verify_no_proposal_event(&mut env.rx_from_p2p, Duration::from_millis(200)).await; env.p2p_tx @@ -1367,6 +1136,8 @@ async fn test_executed_transaction_count_before_any_batch() { height_and_round, ProposalPart::Fin(p2p_proto::consensus::ProposalFin { proposal_commitment: p2p_proto::common::Hash(proposal_commitment.0), + executed_transaction_count: 10, + fin_payload: None, }), ), }) @@ -1380,15 +1151,14 @@ async fn test_executed_transaction_count_before_any_batch() { env.verify_task_alive().await; } -/// Empty proposal per spec (no TransactionBatch, no ExecutedTransactionCount). +/// Empty proposal per spec (no TransactionBatch). /// /// **Scenario**: A proposer cannot offer a valid proposal, so the height is -/// agreed to be empty. Per the spec, empty proposals skip -/// TransactionBatch and ExecutedTransactionCount entirely. The order is: +/// agreed to be empty. Per the spec, empty proposals skip TransactionBatch +/// entirely. The order is: /// ProposalInit → ProposalFin. /// -/// **Test**: Send ProposalInit → ProposalFin (no TransactionBatch, no -/// ExecutedTransactionCount). +/// **Test**: Send ProposalInit → ProposalFin (no TransactionBatch). /// /// Verify ProposalFin proceeds immediately (not deferred, since execution /// never started), proposal event is sent. @@ -1403,10 +1173,7 @@ async fn test_empty_proposal_per_spec() { let proposer_address = ContractAddress::new_or_panic(Felt::from_hex_str("0x456").unwrap()); let height_and_round = HeightAndRound::new(2, 1); - // For empty proposals, we still need BlockInfo to transition to - // TransactionBatch stage, but we don't send any TransactionBatch or - // ExecutedTransactionCount - let (proposal_init, _block_info) = create_test_proposal_init(chain_id, 2, 1, proposer_address); + let proposal_init = create_test_proposal_init(chain_id, 2, 1, proposer_address); // Using a dummy commitment... let proposal_commitment = ProposalCommitment(Felt::ZERO); @@ -1425,8 +1192,7 @@ async fn test_empty_proposal_per_spec() { // Step 2: Send ProposalFin // Since execution never started (no TransactionBatch), ProposalFin should - // proceed immediately without deferral. This is different from first test - // where execution started but ExecutedTransactionCount wasn't processed yet. + // proceed immediately without deferral. env.p2p_tx .send(Event { source: PeerId::random(), @@ -1434,6 +1200,8 @@ async fn test_empty_proposal_per_spec() { height_and_round, ProposalPart::Fin(p2p_proto::consensus::ProposalFin { proposal_commitment: p2p_proto::common::Hash(proposal_commitment.0), + executed_transaction_count: 0, + fin_payload: None, }), ), }) @@ -1467,7 +1235,7 @@ async fn recv_outdated_event_changes_peer_score() { let proposer_address = ContractAddress::new_or_panic(Felt::from_hex_str("0x456").unwrap()); // We'll use an empty proposal, the content isn't important. - let (proposal_init, _) = create_test_proposal_init( + let proposal_init = create_test_proposal_init( chain_id, proposal_height_and_round.height(), proposal_height_and_round.round(), diff --git a/crates/pathfinder/src/consensus/inner/proposal_validator.rs b/crates/pathfinder/src/consensus/inner/proposal_validator.rs index 067dc7fa1e..cf4cbfead2 100644 --- a/crates/pathfinder/src/consensus/inner/proposal_validator.rs +++ b/crates/pathfinder/src/consensus/inner/proposal_validator.rs @@ -7,19 +7,15 @@ use pathfinder_validator::error::{ProposalError, ProposalHandlingError}; /// /// Enforces the following order: /// 1. Proposal Init (must be first) -/// 2. For non-empty proposals: Block Info (must be second) -/// 3. In any order: Transaction Batches (non-empty), ExecutedTransactionCount -/// (at most once) -/// 4. Proposal Fin (must be last) +/// 2. Transaction Batch (zero or more times) +/// 3. Proposal Fin (must be last) /// /// Empty proposals consist of Init + Fin only. pub struct ProposalPartsValidator { height_and_round: HeightAndRound, parts: Vec, has_init: bool, - has_block_info: bool, has_fin: bool, - has_executed_txn_count: bool, transaction_batch_count: usize, proposer_address: Option, valid_round: Option, @@ -32,8 +28,7 @@ pub enum ValidationResult { Accepted, /// Fin received: this is an empty proposal (Init + Fin only). EmptyProposal, - /// Fin received: this is a non-empty proposal (Init + BlockInfo + content + - /// Fin). + /// Fin received: this is a non-empty proposal (Init + content + Fin). NonEmptyProposal, } @@ -43,9 +38,7 @@ impl ProposalPartsValidator { height_and_round, parts: Vec::new(), has_init: false, - has_block_info: false, has_fin: false, - has_executed_txn_count: false, transaction_batch_count: 0, proposer_address: None, valid_round: None, @@ -59,11 +52,9 @@ impl ProposalPartsValidator { ) -> Result { match part { ProposalPart::Init(init) => self.accept_init(init), - ProposalPart::BlockInfo(_) => self.accept_block_info(part), ProposalPart::TransactionBatch(tx_batch) => { self.accept_transaction_batch(tx_batch, part) } - ProposalPart::ExecutedTransactionCount(_) => self.accept_executed_txn_count(part), ProposalPart::Fin(_) => self.accept_fin(part), } } @@ -107,39 +98,17 @@ impl ProposalPartsValidator { Ok(ValidationResult::Accepted) } - fn accept_block_info( - &mut self, - part: &ProposalPart, - ) -> Result { - if self.parts.len() != 1 { - return Err(ProposalHandlingError::Recoverable( - ProposalError::UnexpectedProposalPart { - message: format!( - "Unexpected proposal BlockInfo for {} at position {}", - self.height_and_round, - self.parts.len() - ), - }, - )); - } - - self.has_block_info = true; - self.parts.push(part.clone()); - Ok(ValidationResult::Accepted) - } - fn accept_transaction_batch( &mut self, tx_batch: &[p2p_proto::consensus::Transaction], part: &ProposalPart, ) -> Result { - if self.parts.len() < 2 { + if self.parts.is_empty() { return Err(ProposalHandlingError::Recoverable( ProposalError::UnexpectedProposalPart { message: format!( - "Unexpected proposal TransactionBatch for {} at position {}", + "Unexpected proposal TransactionBatch for {} at initial position", self.height_and_round, - self.parts.len() ), }, )); @@ -162,38 +131,6 @@ impl ProposalPartsValidator { Ok(ValidationResult::Accepted) } - fn accept_executed_txn_count( - &mut self, - part: &ProposalPart, - ) -> Result { - if !self.has_block_info { - return Err(ProposalHandlingError::Recoverable( - ProposalError::UnexpectedProposalPart { - message: format!( - "Unexpected proposal ExecutedTransactionCount for {} at position {}", - self.height_and_round, - self.parts.len() - ), - }, - )); - } - - if self.has_executed_txn_count { - return Err(ProposalHandlingError::Recoverable( - ProposalError::UnexpectedProposalPart { - message: format!( - "Duplicate ExecutedTransactionCount for {}", - self.height_and_round, - ), - }, - )); - } - - self.has_executed_txn_count = true; - self.parts.push(part.clone()); - Ok(ValidationResult::Accepted) - } - fn accept_fin( &mut self, part: &ProposalPart, @@ -213,8 +150,8 @@ impl ProposalPartsValidator { return Ok(ValidationResult::EmptyProposal); } - // Non-empty proposal: at least Init + BlockInfo + 2 content parts before Fin - if self.parts.len() >= 4 && self.has_block_info { + // Non-empty proposal: at least Init + transaction batch before Fin + if self.parts.len() >= 2 && self.has_init { self.has_fin = true; self.parts.push(part.clone()); return Ok(ValidationResult::NonEmptyProposal); @@ -268,18 +205,10 @@ mod tests { RemoveInit, /// Init must appear exactly once, first. DuplicateInit, - /// BlockInfo must appear exactly once, second (for non-empty - /// proposals). - RemoveBlockInfo, - /// BlockInfo must appear exactly once, second (for non-empty - /// proposals). - DuplicateBlockInfo, /// Non-empty proposals need at least one transaction batch. RemoveAllTransactionBatches, /// Empty batches are rejected. AddEmptyTransactionBatch, - /// At most one ExecutedTransactionCount allowed. - DuplicateExecutedTransactionCount, /// Fin must appear at most once. DuplicateFin, /// Random reordering breaks the required part sequence. @@ -289,11 +218,8 @@ mod tests { const ALL_MUTATIONS: &[Mutation] = &[ Mutation::RemoveInit, Mutation::DuplicateInit, - Mutation::RemoveBlockInfo, - Mutation::DuplicateBlockInfo, Mutation::RemoveAllTransactionBatches, Mutation::AddEmptyTransactionBatch, - Mutation::DuplicateExecutedTransactionCount, Mutation::DuplicateFin, Mutation::ShuffleAll, ]; @@ -311,17 +237,16 @@ mod tests { } /// Generates a valid non-empty proposal: - /// `[Init, BlockInfo, , Fin]` + /// `[Init, , Fin]` /// /// Content parts are 1..50 transactions split into randomly-sized batches, - /// plus one `ExecutedTransactionCount`, shuffled among each other. + /// shuffled among each other. fn create_valid_non_empty_proposal(seed: u64) -> Vec { let mut rng = rand_chacha::ChaCha12Rng::seed_from_u64(seed); let mut parts = Vec::new(); parts.push(ProposalPart::Init(fake::Faker.fake_with_rng(&mut rng))); - parts.push(ProposalPart::BlockInfo(fake::Faker.fake_with_rng(&mut rng))); // Content parts in random order let num_txns: usize = rng.gen_range(1..50); @@ -334,8 +259,6 @@ mod tests { .map(ProposalPart::TransactionBatch) .collect(); - let executed_count: u64 = rng.gen_range(1..=num_txns).try_into().unwrap(); - content_parts.push(ProposalPart::ExecutedTransactionCount(executed_count)); content_parts.shuffle(&mut rng); parts.extend(content_parts); @@ -387,15 +310,6 @@ mod tests { parts.insert(pos, init); } } - Mutation::RemoveBlockInfo => { - parts.retain(|p| !p.is_block_info()); - } - Mutation::DuplicateBlockInfo => { - if let Some(bi) = parts.iter().find(|p| p.is_block_info()).cloned() { - let pos = rng.gen_range(0..=parts.len()); - parts.insert(pos, bi); - } - } Mutation::RemoveAllTransactionBatches => { parts.retain(|p| !p.is_transaction_batch()); } @@ -403,16 +317,6 @@ mod tests { let pos = rng.gen_range(0..=parts.len()); parts.insert(pos, ProposalPart::TransactionBatch(vec![])); } - Mutation::DuplicateExecutedTransactionCount => { - if let Some(etc) = parts - .iter() - .find(|p| p.is_executed_transaction_count()) - .cloned() - { - let pos = rng.gen_range(0..=parts.len()); - parts.insert(pos, etc); - } - } Mutation::DuplicateFin => { if let Some(fin) = parts.iter().find(|p| p.is_proposal_fin()).cloned() { let pos = rng.gen_range(0..=parts.len()); @@ -449,9 +353,7 @@ mod tests { match part { ProposalPart::Init(_) => "Init", ProposalPart::Fin(_) => "Fin", - ProposalPart::BlockInfo(_) => "BlockInfo", ProposalPart::TransactionBatch(_) => "TransactionBatch", - ProposalPart::ExecutedTransactionCount(_) => "ExecutedTransactionCount", } } @@ -476,10 +378,10 @@ mod tests { proptest! { #![proptest_config(ProptestConfig::with_cases(200))] - // A non-empty proposal [Init, BlockInfo, , Fin] with - // randomly-sized transaction batches and ExecutedTransactionCount in - // random order must be fully accepted: all intermediate parts return - // Accepted, Fin returns NonEmptyProposal, and all parts are stored. + // A non-empty proposal [Init, , Fin] with + // randomly-sized transaction batches in random order must be + // fully accepted: all intermediate parts return Accepted, Fin + // returns NonEmptyProposal, and all parts are stored. #[test] fn valid_non_empty_proposals_are_accepted(seed in any::()) { let parts = create_valid_non_empty_proposal(seed); diff --git a/crates/pathfinder/src/devnet.rs b/crates/pathfinder/src/devnet.rs index f15db4fbff..5e0001c7b4 100644 --- a/crates/pathfinder/src/devnet.rs +++ b/crates/pathfinder/src/devnet.rs @@ -12,7 +12,7 @@ use std::time::{Instant, SystemTime}; use anyhow::{Context, Ok}; use p2p::sync::client::conv::ToDto as _; use p2p_proto::common::{Address, Hash}; -use p2p_proto::consensus::{BlockInfo, ProposalInit, ProposalPart}; +use p2p_proto::consensus::{ProposalInit, ProposalPart}; use p2p_proto::sync::transaction::DeclareV3WithoutClass; use pathfinder_block_commitments::compute_final_hash; use pathfinder_common::state_update::StateUpdateData; @@ -374,13 +374,21 @@ pub fn init_proposal_and_validator( round, valid_round: None, proposer, + timestamp: strictly_increasing_timestamp(prev_timestamp).get(), + builder: proposer, + l1_da_mode: p2p_proto::common::L1DataAvailabilityMode::Calldata, + l2_gas_price_fri: GAS_PRICE.0, + l1_gas_price_fri: GAS_PRICE.0, + l1_data_gas_price_fri: GAS_PRICE.0, + l1_gas_price_wei: GAS_PRICE.0, + l1_data_gas_price_wei: GAS_PRICE.0, + starknet_version: "".to_string(), + version_constant_commitment: Hash::ZERO, }; - let block_info = block_info(height, proposer, prev_timestamp); let validator = ValidatorBlockInfoStage::new(ChainId::SEPOLIA_TESTNET, proposal_init.clone()) .expect("valid block height"); let validator = validator.validate_block_info( - block_info.clone(), storage.clone(), DecidedBlocks::default(), None, @@ -388,33 +396,7 @@ pub fn init_proposal_and_validator( None, worker_pool.clone(), )?; - Ok(( - validator, - vec![ - ProposalPart::Init(proposal_init), - ProposalPart::BlockInfo(block_info), - ], - )) -} - -/// Block info for devnet blocks, sufficient for execution, provided that gas -/// prices are not validated against any oracle. -fn block_info( - height: BlockNumber, - proposer: Address, - prev_timestamp: Option, -) -> BlockInfo { - BlockInfo { - height: height.get(), - builder: proposer, - timestamp: strictly_increasing_timestamp(prev_timestamp).get(), - l1_gas_price_fri: GAS_PRICE.0, - l1_data_gas_price_fri: GAS_PRICE.0, - l2_gas_price_fri: GAS_PRICE.0, - l1_gas_price_wei: GAS_PRICE.0, - l1_data_gas_price_wei: GAS_PRICE.0, - l1_da_mode: p2p_proto::common::L1DataAvailabilityMode::Calldata, - } + Ok((validator, vec![ProposalPart::Init(proposal_init)])) } #[cfg(test)] diff --git a/crates/pathfinder/tests/consensus.rs b/crates/pathfinder/tests/consensus.rs index 709c26ba13..eede7e57f0 100644 --- a/crates/pathfinder/tests/consensus.rs +++ b/crates/pathfinder/tests/consensus.rs @@ -65,9 +65,7 @@ mod test { #[case::happy_path(None)] // Bootstrap DB, none of the transactions should get reverted #[case::fail_on_proposal_init_rx(Some(InjectFailureConfig { height: 4, trigger: InjectFailureTrigger::ProposalInitRx }))] - #[case::fail_on_block_info_rx(Some(InjectFailureConfig { height: 4, trigger: InjectFailureTrigger::BlockInfoRx }))] #[case::fail_on_transaction_batch_rx(Some(InjectFailureConfig { height: 4, trigger: InjectFailureTrigger::TransactionBatchRx }))] - #[case::fail_on_executed_transaction_count_rx(Some(InjectFailureConfig { height: 4, trigger: InjectFailureTrigger::ExecutedTransactionCountRx }))] #[case::fail_on_proposal_fin_rx(Some(InjectFailureConfig { height: 4, trigger: InjectFailureTrigger::ProposalFinRx }))] #[case::fail_on_proposal_finalized(Some(InjectFailureConfig { height: 4, trigger: InjectFailureTrigger::ProposalFinalized }))] #[case::fail_on_prevote_rx(Some(InjectFailureConfig { height: 4, trigger: InjectFailureTrigger::PrevoteRx }))] diff --git a/crates/validator/src/lib.rs b/crates/validator/src/lib.rs index 7b3c0a3c16..030222be38 100644 --- a/crates/validator/src/lib.rs +++ b/crates/validator/src/lib.rs @@ -7,7 +7,7 @@ use anyhow::Context; use p2p::sync::client::conv::TryFromDto; use p2p_proto::class::Cairo1Class; use p2p_proto::common::Hash; -use p2p_proto::consensus::{BlockInfo, ProposalInit, TransactionVariant as ConsensusVariant}; +use p2p_proto::consensus::{ProposalInit, TransactionVariant as ConsensusVariant}; use p2p_proto::sync::transaction::{DeclareV3WithoutClass, TransactionVariant as SyncVariant}; use p2p_proto::transaction::DeclareV3WithClass; use pathfinder_class_hash::compute_sierra_class_hash; @@ -139,7 +139,7 @@ pub fn new( #[derive(Debug)] pub struct ValidatorBlockInfoStage { chain_id: ChainId, - proposal_height: BlockNumber, + proposal_init: ProposalInit, } impl ValidatorBlockInfoStage { @@ -148,11 +148,12 @@ impl ValidatorBlockInfoStage { proposal_init: ProposalInit, ) -> Result { // TODO(validator) how can we validate the proposal init? + let _proposal_height = BlockNumber::new(proposal_init.height) + .context("ProposalInit height exceeds i64::MAX") + .map_err(ProposalHandlingError::recoverable)?; Ok(ValidatorBlockInfoStage { chain_id, - proposal_height: BlockNumber::new(proposal_init.height) - .context("ProposalInit height exceeds i64::MAX") - .map_err(ProposalHandlingError::recoverable)?, + proposal_init, }) } @@ -161,7 +162,7 @@ impl ValidatorBlockInfoStage { } pub fn proposal_height(&self) -> u64 { - self.proposal_height.get() + self.proposal_init.height } /// Validate the block info against the parent block and L1 gas price data, @@ -169,7 +170,6 @@ impl ValidatorBlockInfoStage { #[allow(clippy::too_many_arguments)] pub fn validate_block_info( self, - block_info: BlockInfo, main_storage: Storage, decided_blocks: DecidedBlocks, gas_price_provider: Option, @@ -177,69 +177,55 @@ impl ValidatorBlockInfoStage { l2_gas_price_provider: Option<&L2GasPriceProvider>, worker_pool: ValidatorWorkerPool, ) -> Result { + let Self { + chain_id, + proposal_init, + } = self; + let ProposalInit { + height, + round: _, + valid_round: _, + proposer: _, + timestamp, + builder, + l1_da_mode, + l2_gas_price_fri, + l1_gas_price_fri, + l1_data_gas_price_fri, + l1_gas_price_wei, + l1_data_gas_price_wei, + starknet_version: _, + version_constant_commitment: _, + } = proposal_init; let _span = tracing::debug_span!( "Validator::validate_block_info", - height = %block_info.height, - timestamp = %block_info.timestamp, - builder = %block_info.builder.0, + %height, + %timestamp, + builder = %builder.0, ) .entered(); - let Self { - chain_id, - proposal_height, - } = self; - - if proposal_height != block_info.height { - return Err(ProposalHandlingError::recoverable_msg(format!( - "ProposalInit height does not match BlockInfo height: {} != {}", - proposal_height, block_info.height, - ))); - } - - validate_block_info_timestamp( - block_info.height, - block_info.timestamp, - &main_storage, - decided_blocks.clone(), - )?; + validate_block_info_timestamp(height, timestamp, &main_storage, decided_blocks.clone())?; // Validate L1 gas prices if a provider is available if let Some(ref provider) = gas_price_provider { - validate_l1_gas_prices( - block_info.timestamp, - block_info.l1_gas_price_wei, - block_info.l1_data_gas_price_wei, - provider, - )?; + validate_l1_gas_prices(timestamp, l1_gas_price_wei, l1_data_gas_price_wei, provider)?; } // Validate L1 gas prices in FRI terms if let Some(validator) = l1_to_fri_validator { validate_l1_to_fri_prices( - block_info.timestamp, - block_info.l1_gas_price_fri, - block_info.l1_data_gas_price_fri, + timestamp, + l1_gas_price_fri, + l1_data_gas_price_fri, validator, )?; } if let Some(provider) = l2_gas_price_provider { - validate_l2_gas_price(block_info.l2_gas_price_fri, provider)?; + validate_l2_gas_price(l2_gas_price_fri, provider)?; } - let BlockInfo { - height, - timestamp, - builder, - l1_da_mode, - l2_gas_price_fri, - l1_gas_price_fri, - l1_data_gas_price_fri, - l1_gas_price_wei, - l1_data_gas_price_wei, - } = block_info; - let block_info = pathfinder_executor::types::BlockInfo::try_from_proposal( height, timestamp, @@ -282,45 +268,56 @@ impl ValidatorBlockInfoStage { /// Used only for testing and dummy proposal creation. pub fn skip_validation( self, - block_info: BlockInfo, main_storage: Storage, worker_pool: ValidatorWorkerPool, decided_blocks: DecidedBlocks, ) -> Result { + let Self { + chain_id, + proposal_init, + } = self; + let ProposalInit { + height, + round: _, + valid_round: _, + proposer: _, + timestamp, + builder, + l1_da_mode, + l2_gas_price_fri, + l1_gas_price_fri, + l1_data_gas_price_fri, + l1_gas_price_wei, + l1_data_gas_price_wei, + starknet_version: _, + version_constant_commitment: _, + } = proposal_init; let _span = tracing::debug_span!( "Validator::skip_block_info_validation", - height = %block_info.height, - timestamp = %block_info.timestamp, - builder = %block_info.builder.0, + height = %height, + timestamp = %timestamp, + builder = %builder.0, ) .entered(); - let Self { - chain_id, - proposal_height, - } = self; - - tracing::debug!( - "Skipping block info validation for height {}", - proposal_height - ); + tracing::debug!("Skipping block info validation for height {}", height); - let builder = SequencerAddress(block_info.builder.0); - let l1_da_mode = match block_info.l1_da_mode { + let builder = SequencerAddress(builder.0); + let l1_da_mode = match l1_da_mode { p2p_proto::common::L1DataAvailabilityMode::Blob => L1DataAvailabilityMode::Blob, p2p_proto::common::L1DataAvailabilityMode::Calldata => L1DataAvailabilityMode::Calldata, }; let price_converter = BlockInfoPriceConverter::consensus( - block_info.l2_gas_price_fri, - block_info.l1_gas_price_fri, - block_info.l1_data_gas_price_fri, - block_info.l1_gas_price_wei, - block_info.l1_data_gas_price_wei, + l2_gas_price_fri, + l1_gas_price_fri, + l1_data_gas_price_fri, + l1_gas_price_wei, + l1_data_gas_price_wei, ); let block_info = pathfinder_executor::types::BlockInfo::try_from_proposal( - block_info.height, - block_info.timestamp, + height, + timestamp, builder, l1_da_mode, price_converter, @@ -902,7 +899,7 @@ impl std::fmt::Debug for ValidatorTransactionBatchStage { pub enum ValidatorStage { BlockInfo(ValidatorBlockInfoStage), - TransactionBatch(Box), + TransactionBatch(ValidatorTransactionBatchStage), } /// Error indicating that a validator stage conversion failed because the stage @@ -929,7 +926,7 @@ impl ValidatorStage { pub fn try_into_transaction_batch_stage( self, - ) -> Result, WrongValidatorStageError> { + ) -> Result { match self { ValidatorStage::TransactionBatch(stage) => Ok(stage), _ => Err(WrongValidatorStageError { @@ -1206,20 +1203,12 @@ mod tests { ExecutorWorkerPool::::new(1).get() } - fn create_test_proposal( - height: u64, - ) -> ( - p2p_proto::consensus::ProposalInit, - p2p_proto::consensus::BlockInfo, - ) { - let init = p2p_proto::consensus::ProposalInit { + fn create_test_proposal(height: u64) -> p2p_proto::consensus::ProposalInit { + p2p_proto::consensus::ProposalInit { height, round: 1, valid_round: None, proposer: p2p_proto::common::Address::default(), - }; - let block_info = p2p_proto::consensus::BlockInfo { - height, timestamp: 1000, builder: p2p_proto::common::Address::default(), l1_da_mode: p2p_proto::common::L1DataAvailabilityMode::Calldata, @@ -1228,8 +1217,9 @@ mod tests { l1_data_gas_price_wei: 0, l1_gas_price_fri: 0, l1_data_gas_price_fri: 0, - }; - (init, block_info) + starknet_version: "".to_string(), + version_constant_commitment: Default::default(), + } } fn create_test_transaction(index: usize) -> p2p_proto::consensus::Transaction { @@ -1276,16 +1266,11 @@ mod tests { let storage = StorageBuilder::in_tempdir().expect("Failed to create temp database"); let chain_id = ChainId::SEPOLIA_TESTNET; let worker_pool = create_test_worker_pool(); - let (proposal_init, block_info) = create_test_proposal(1); + let proposal_init = create_test_proposal(1); let mut validator_stage = ValidatorBlockInfoStage::new(chain_id, proposal_init) .and_then(|validator| { - validator.skip_validation( - block_info, - storage, - worker_pool, - DecidedBlocks::default(), - ) + validator.skip_validation(storage, worker_pool, DecidedBlocks::default()) }) .expect("Failed to create validator stage"); @@ -1373,16 +1358,11 @@ mod tests { let storage = StorageBuilder::in_tempdir().expect("Failed to create temp database"); let chain_id = ChainId::SEPOLIA_TESTNET; let worker_pool = create_test_worker_pool(); - let (proposal_init, block_info) = create_test_proposal(1); + let proposal_init = create_test_proposal(1); let mut validator_stage = ValidatorBlockInfoStage::new(chain_id, proposal_init) .and_then(|validator| { - validator.skip_validation( - block_info, - storage, - worker_pool, - DecidedBlocks::default(), - ) + validator.skip_validation(storage, worker_pool, DecidedBlocks::default()) }) .expect("Failed to create validator stage"); @@ -1505,11 +1485,6 @@ mod tests { round: hnr.round(), valid_round: None, proposer: p2p_proto::common::Address(Felt::from_hex_str("0x1").unwrap()), - }; - - // Create block info - let block_info = p2p_proto::consensus::BlockInfo { - height: hnr.height(), timestamp: 1000, builder: p2p_proto::common::Address(Felt::from_hex_str("0x1").unwrap()), l1_da_mode: p2p_proto::common::L1DataAvailabilityMode::Calldata, @@ -1518,6 +1493,8 @@ mod tests { l1_data_gas_price_fri: 1, l1_gas_price_wei: 1_000_000_000, l1_data_gas_price_wei: 1, + starknet_version: "".to_string(), + version_constant_commitment: Default::default(), }; // Create validator stages (empty proposal path) @@ -1526,7 +1503,6 @@ mod tests { let validator_transaction_batch = validator_block_info .validate_block_info( - block_info, main_storage.clone(), DecidedBlocks::default(), None, @@ -1633,13 +1609,6 @@ mod tests { round: 0, valid_round: None, proposer: p2p_proto::common::Address(Felt::from_hex_str("0x1").unwrap()), - }; - - let validator_block_info1 = ValidatorBlockInfoStage::new(chain_id, proposal_init1) - .expect("Failed to create ValidatorBlockInfoStage"); - - let block_info1 = p2p_proto::consensus::BlockInfo { - height: 1, timestamp: proposal_timestamp, builder: p2p_proto::common::Address(Felt::from_hex_str("0x1").unwrap()), l1_da_mode: p2p_proto::common::L1DataAvailabilityMode::Calldata, @@ -1648,9 +1617,14 @@ mod tests { l1_data_gas_price_fri: 1, l1_gas_price_wei: 1_000_000_000, l1_data_gas_price_wei: 1, + starknet_version: "".to_string(), + version_constant_commitment: Default::default(), }; + + let validator_block_info1 = ValidatorBlockInfoStage::new(chain_id, proposal_init1) + .expect("Failed to create ValidatorBlockInfoStage"); + let result = validator_block_info1.validate_block_info( - block_info1, storage, DecidedBlocks::default(), None, @@ -1690,13 +1664,6 @@ mod tests { round: 0, valid_round: None, proposer: p2p_proto::common::Address(Felt::from_hex_str("0x1").unwrap()), - }; - - let validator_block_info = ValidatorBlockInfoStage::new(chain_id, proposal_init) - .expect("Failed to create ValidatorBlockInfoStage"); - - let block_info = p2p_proto::consensus::BlockInfo { - height: proposal_height.get(), timestamp: 1000, builder: p2p_proto::common::Address(Felt::from_hex_str("0x1").unwrap()), l1_da_mode: p2p_proto::common::L1DataAvailabilityMode::Calldata, @@ -1705,15 +1672,19 @@ mod tests { l1_data_gas_price_fri: 1, l1_gas_price_wei: 1_000_000_000, l1_data_gas_price_wei: 1, + starknet_version: "".to_string(), + version_constant_commitment: Default::default(), }; + let validator_block_info = ValidatorBlockInfoStage::new(chain_id, proposal_init) + .expect("Failed to create ValidatorBlockInfoStage"); + if proposal_height == BlockNumber::GENESIS { // Genesis block should pass timestamp validation even though it does not have a // parent. assert!( validator_block_info .validate_block_info( - block_info, storage, DecidedBlocks::default(), None, @@ -1727,7 +1698,6 @@ mod tests { } else { let err = validator_block_info .validate_block_info( - block_info, storage, DecidedBlocks::default(), None,