From a56eda7ef0b6c09ee729b2bd952e8afb84414eb1 Mon Sep 17 00:00:00 2001 From: playX18 Date: Thu, 16 Apr 2026 09:16:56 +0700 Subject: [PATCH 1/7] feat(ethexe/compute): use proptests instead of custom mock() --- Cargo.lock | 1 + ethexe/compute/Cargo.toml | 1 + ethexe/compute/src/compute.rs | 461 +++++++++++++------------- ethexe/compute/src/prepare.rs | 383 ++++++++++++---------- ethexe/compute/src/service.rs | 180 +++++------ ethexe/compute/src/tests.rs | 588 ++++++++++++++++++---------------- 6 files changed, 843 insertions(+), 771 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f13e345523f..418fb3ee3e8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5176,6 +5176,7 @@ dependencies = [ "metrics", "metrics-derive", "ntest", + "proptest", "thiserror 2.0.17", "tokio", "wasmparser 0.230.0 (registry+https://github.com/rust-lang/crates.io-index)", diff --git a/ethexe/compute/Cargo.toml b/ethexe/compute/Cargo.toml index a964902cc3d..02e0264f48c 100644 --- a/ethexe/compute/Cargo.toml +++ b/ethexe/compute/Cargo.toml @@ -35,5 +35,6 @@ wasmparser.workspace = true ethexe-common = { workspace = true, features = ["mock"] } ethexe-db = { workspace = true, features = ["mock"] } ntest.workspace = true +proptest.workspace = true # test examples demo-ping = { workspace = true, features = ["ethexe"] } diff --git a/ethexe/compute/src/compute.rs b/ethexe/compute/src/compute.rs index cb665ec9e99..7d118dcc826 100644 --- a/ethexe/compute/src/compute.rs +++ b/ethexe/compute/src/compute.rs @@ -403,7 +403,13 @@ pub(crate) mod utils { #[cfg(test)] mod tests { use super::*; - use crate::{ComputeService, tests::MockProcessor}; + use crate::{ + ComputeService, + tests::{ + MockProcessor, block_chain_strategy, next_compute_event, next_subservice_event, + run_async_test, + }, + }; use ethexe_common::{ DEFAULT_BLOCK_GAS_LIMIT, db::{GlobalsStorageRO, OnChainStorageRW}, @@ -411,7 +417,7 @@ mod tests { RouterEvent, mirror::ExecutableBalanceTopUpRequestedEvent, router::ProgramCreatedEvent, }, gear::StateTransition, - mock::*, + mock::BlockChain, }; use ethexe_processor::Processor; use gear_core::{ @@ -419,6 +425,8 @@ mod tests { rpc::ReplyInfo, }; use gprimitives::{ActorId, H256}; + use proptest::{collection, prelude::*}; + use std::collections::BTreeMap; mod test_utils { use crate::CodeAndIdUnchecked; @@ -515,271 +523,276 @@ mod tests { } } - #[tokio::test] - #[ntest::timeout(3000)] - async fn test_compute() { - gear_utils::init_default_logger(); - - // Create non-empty processor result with transitions - let non_empty_result = FinalizedBlockTransitions { - transitions: vec![StateTransition { - actor_id: ActorId::from([1; 32]), - new_state_hash: H256::from([2; 32]), - value_to_receive: 100, - ..Default::default() - }], - ..Default::default() - }; + fn promise_test_inputs_strategy() -> BoxedStrategy<(BlockChain, Vec)> { + (4usize..=8) + .prop_flat_map(|blockchain_len| { + let requestable_indexes = (2..blockchain_len).collect::>(); + let max_selected = requestable_indexes.len().min(3); - let db = Database::memory(); - let block_hash = BlockChain::mock(1).setup(&db).blocks[1].hash; - let config = ComputeConfig::without_quarantine(); - let mut service = ComputeSubService::new( - config, - db.clone(), - MockProcessor { - process_programs_result: Some(non_empty_result), - ..Default::default() - }, - ); + block_chain_strategy(blockchain_len as u32).prop_flat_map(move |chain| { + prop::sample::subsequence(requestable_indexes.clone(), 1..=max_selected) + .prop_map(move |request_indexes| (chain.clone(), request_indexes)) + }) + }) + .boxed() + } - let announce = Announce { - block_hash, - parent: db.config().genesis_announce_hash, - gas_allowance: Some(100), - injected_transactions: vec![], - }; - let announce_hash = announce.to_hash(); + fn predecessor_test_inputs_strategy() -> BoxedStrategy<(BlockChain, usize)> { + (2usize..=16) + .prop_flat_map(|blockchain_len| { + block_chain_strategy(blockchain_len as u32) + .prop_map(move |chain| (chain, blockchain_len)) + }) + .boxed() + } - service.receive_announce_to_compute(announce, PromisePolicy::Disabled); + async fn collect_compute_events( + service: &mut ComputeService

, + expected_events: usize, + ) -> Vec { + let mut observed_events = Vec::with_capacity(expected_events); - assert_eq!( - service.next().await.unwrap().unwrap_announce_computed(), - announce_hash - ); + while observed_events.len() < expected_events { + observed_events.push(next_compute_event(service).await); + } - // Verify block was marked as computed - assert!(db.announce_meta(announce_hash).computed); + observed_events + } - // Verify transitions were stored in DB - let stored_transitions = db.announce_outcome(announce_hash).unwrap(); - assert_eq!(stored_transitions.len(), 1); - assert_eq!(stored_transitions[0].actor_id, ActorId::from([1; 32])); - assert_eq!(stored_transitions[0].new_state_hash, H256::from([2; 32])); + proptest! { + #![proptest_config(ProptestConfig::with_cases(64))] + + #[test] + fn test_compute( + chain in block_chain_strategy(1), + transitions in collection::vec(any::(), 1..=4) + ) { + gear_utils::init_default_logger(); + + run_async_test(async move { + let db = Database::memory(); + let block_hash = chain.setup(&db).blocks[1].hash; + let config = ComputeConfig::without_quarantine(); + let mut service = ComputeSubService::new( + config, + db.clone(), + MockProcessor { + process_programs_result: Some(FinalizedBlockTransitions { + transitions: transitions.clone(), + ..Default::default() + }), + ..Default::default() + }, + ); - // Verify latest announce - assert_eq!(db.globals().latest_computed_announce_hash, announce_hash); - } + let announce = Announce { + block_hash, + parent: db.config().genesis_announce_hash, + gas_allowance: Some(100), + injected_transactions: vec![], + }; + let announce_hash = announce.to_hash(); - #[tokio::test] - #[ntest::timeout(60000)] - async fn test_compute_with_promises() { - gear_utils::init_default_logger(); - const BLOCKCHAIN_LEN: usize = 10; + service.receive_announce_to_compute(announce, PromisePolicy::Disabled); - let db = Database::memory(); - let mut processor = Processor::new(db.clone()).unwrap(); - let ping_code_id = test_utils::upload_code(&mut processor, demo_ping::WASM_BINARY, &db); - let ping_id = ActorId::from(0x10000); + assert_eq!( + next_subservice_event(&mut service).await, + ComputeEvent::AnnounceComputed(announce_hash) + ); + assert!(db.announce_meta(announce_hash).computed); + assert_eq!(db.announce_outcome(announce_hash).unwrap(), transitions); + assert_eq!(db.globals().latest_computed_announce_hash, announce_hash); + }); + } + } - let blockchain = BlockChain::mock(BLOCKCHAIN_LEN as u32).setup(&db); + proptest! { + #![proptest_config(ProptestConfig::with_cases(16))] + + #[test] + fn test_compute_with_promises( + (chain, request_indexes) in promise_test_inputs_strategy() + ) { + gear_utils::init_default_logger(); + + run_async_test(async move { + let db = Database::memory(); + let mut processor = Processor::new(db.clone()).unwrap(); + let ping_code_id = + test_utils::upload_code(&mut processor, demo_ping::WASM_BINARY, &db); + let ping_id = ActorId::from(0x10000); + let blockchain = chain.setup(&db); + let blockchain_len = blockchain.blocks.len() - 1; + + let start_announce_hash = { + let mut announce = blockchain.block_top_announce(0).announce.clone(); + announce.gas_allowance = Some(DEFAULT_BLOCK_GAS_LIMIT); - // Setup first announce. - let start_announce_hash = { - let mut announce = blockchain.block_top_announce(0).announce.clone(); - announce.gas_allowance = Some(DEFAULT_BLOCK_GAS_LIMIT); + let announce_hash = db.set_announce(announce); + db.mutate_announce_meta(announce_hash, |meta| meta.computed = true); + db.globals_mutate(|globals| { + globals.start_announce_hash = announce_hash; + }); + db.set_announce_program_states(announce_hash, Default::default()); + db.set_announce_schedule(announce_hash, Default::default()); - let announce_hash = db.set_announce(announce); - db.mutate_announce_meta(announce_hash, |meta| meta.computed = true); - db.globals_mutate(|globals| { - globals.start_announce_hash = announce_hash; - }); - db.set_announce_program_states(announce_hash, Default::default()); - db.set_announce_schedule(announce_hash, Default::default()); + announce_hash + }; - announce_hash - }; + let mut parent_announce = start_announce_hash; + let mut announces_by_block = BTreeMap::new(); - // Setup announces and events. - let mut parent_announce = start_announce_hash; - let announces_chain = (1..BLOCKCHAIN_LEN) - .map(|i| { - let announce = { + for i in 1..blockchain_len { let mut announce = blockchain.block_top_announce(i).announce.clone(); announce.gas_allowance = Some(DEFAULT_BLOCK_GAS_LIMIT); announce.parent = parent_announce; - let block = announce.block_hash; - let txs = if i != 1 { - vec![test_utils::injected_tx(ping_id, b"PING".into(), block)] + if i != 1 { + announce.injected_transactions = + vec![test_utils::injected_tx(ping_id, b"PING".into(), announce.block_hash)]; + } + + let announce_hash = db.set_announce(announce.clone()); + db.mutate_announce_meta(announce_hash, |meta| meta.computed = false); + + let mut block_events = if i == 1 { + test_utils::create_program_events(ping_id, ping_code_id) } else { Default::default() }; + block_events.extend(test_utils::block_events(5, ping_id, b"PING".into())); + db.set_block_events(announce.block_hash, &block_events); - announce.injected_transactions = txs; - announce - }; - - let announce_hash = db.set_announce(announce.clone()); - db.mutate_announce_meta(announce_hash, |meta| meta.computed = false); - - let mut block_events = if i == 1 { - test_utils::create_program_events(ping_id, ping_code_id) - } else { - Default::default() - }; - block_events.extend(test_utils::block_events(5, ping_id, b"PING".into())); - db.set_block_events(announce.block_hash, &block_events); - - parent_announce = announce_hash; - announce - }) - .collect::>(); - - let mut compute_service = - ComputeService::new(ComputeConfig::without_quarantine(), db.clone(), processor); - - // Send announces for computation. - compute_service.compute_announce( - announces_chain.get(2).unwrap().clone(), - PromisePolicy::Enabled, - ); - compute_service.compute_announce( - announces_chain.get(5).unwrap().clone(), - PromisePolicy::Enabled, - ); - compute_service.compute_announce( - announces_chain.get(8).unwrap().clone(), - PromisePolicy::Enabled, - ); - - let mut expected_announces = vec![ - announces_chain.get(2).unwrap().to_hash(), - announces_chain.get(5).unwrap().to_hash(), - announces_chain.get(8).unwrap().to_hash(), - ]; - - let mut expected_promises = expected_announces - .iter() - .map(|hash| { - let announce = db.announce(*hash).unwrap(); - let tx = announce.injected_transactions[0].clone().into_data(); - Promise { - tx_hash: tx.to_hash(), - reply: ReplyInfo { - payload: b"PONG".into(), - value: 0, - code: ReplyCode::Success(SuccessReplyReason::Manual), - }, + parent_announce = announce_hash; + announces_by_block.insert(i, announce); } - }) - .collect::>(); - while !expected_announces.is_empty() || !expected_promises.is_empty() { - match compute_service.next().await.unwrap().unwrap() { - ComputeEvent::AnnounceComputed(hash) => { - if *expected_announces.first().unwrap() == hash { - expected_announces.remove(0); - } - } - ComputeEvent::Promise(promise, announce) => { - if *expected_announces.first().unwrap() == announce - && expected_promises.first().unwrap().clone() == promise - { - expected_promises.remove(0); - } + let mut compute_service = + ComputeService::new(ComputeConfig::without_quarantine(), db.clone(), processor); + let mut expected_events = Vec::with_capacity(request_indexes.len() * 2); + + for index in &request_indexes { + let announce = announces_by_block[index].clone(); + let announce_hash = announce.to_hash(); + let tx = announce.injected_transactions[0].clone().into_data(); + + expected_events.push(ComputeEvent::Promise( + Promise { + tx_hash: tx.to_hash(), + reply: ReplyInfo { + payload: b"PONG".into(), + value: 0, + code: ReplyCode::Success(SuccessReplyReason::Manual), + }, + }, + announce_hash, + )); + expected_events.push(ComputeEvent::AnnounceComputed(announce_hash)); + compute_service.compute_announce(announce, PromisePolicy::Enabled); } - _ => unreachable!("unexpected event for current test"), - } + + let observed_events = + collect_compute_events(&mut compute_service, expected_events.len()).await; + assert_eq!(observed_events, expected_events); + }); } - } - #[tokio::test] - #[ntest::timeout(60000)] - async fn test_compute_with_early_break() { - gear_utils::init_default_logger(); + #[test] + fn test_compute_with_early_break( + chain in block_chain_strategy(3), + tx_count in 100usize..=300 + ) { + gear_utils::init_default_logger(); - let db = Database::memory(); - let mut processor = Processor::new(db.clone()).unwrap(); + run_async_test(async move { + let db = Database::memory(); + let mut processor = Processor::new(db.clone()).unwrap(); - let ping_code_id = test_utils::upload_code(&mut processor, demo_ping::WASM_BINARY, &db); - let ping_id = ActorId::from(0x10000); + let ping_code_id = + test_utils::upload_code(&mut processor, demo_ping::WASM_BINARY, &db); + let ping_id = ActorId::from(0x10000); + let blockchain = chain.setup(&db); - let blockchain = BlockChain::mock(3).setup(&db); + let first_announce_hash = { + let mut announce = blockchain.block_top_announce(1).announce.clone(); + announce.gas_allowance = Some(DEFAULT_BLOCK_GAS_LIMIT); - let first_announce_hash = { - let mut announce = blockchain.block_top_announce(1).announce.clone(); - announce.gas_allowance = Some(DEFAULT_BLOCK_GAS_LIMIT); + let mut canonical_events = + test_utils::create_program_events(ping_id, ping_code_id); + canonical_events.push(test_utils::canonical_event(ping_id, b"PING".into())); - let mut canonical_events = test_utils::create_program_events(ping_id, ping_code_id); - canonical_events.push(test_utils::canonical_event(ping_id, b"PING".into())); + db.set_block_events(announce.block_hash, &canonical_events); + db.set_announce(announce) + }; - db.set_block_events(announce.block_hash, &canonical_events); - db.set_announce(announce) - }; + let (announce, announce_hash) = { + let mut announce = blockchain.block_top_announce(2).announce.clone(); + announce.gas_allowance = Some(400_000); + announce.parent = first_announce_hash; + + let ref_block = announce.block_hash; + announce.injected_transactions = (0..tx_count) + .map(|_| test_utils::injected_tx(ping_id, b"PING".into(), ref_block)) + .collect::>(); + let hash = db.set_announce(announce.clone()); + (announce, hash) + }; - let (announce, announce_hash) = { - let mut announce = blockchain.block_top_announce(2).announce.clone(); - announce.gas_allowance = Some(400_000); - announce.parent = first_announce_hash; + let mut compute_service = + ComputeService::new(ComputeConfig::without_quarantine(), db.clone(), processor); + compute_service.compute_announce(announce, PromisePolicy::Enabled); - let ref_block = announce.block_hash; - let txs = (0..300) - .map(|_| test_utils::injected_tx(ping_id, b"PING".into(), ref_block)) - .collect::>(); - announce.injected_transactions = txs; - let hash = db.set_announce(announce.clone()); - (announce, hash) - }; - - let mut compute_service = - ComputeService::new(ComputeConfig::without_quarantine(), db.clone(), processor); - compute_service.compute_announce(announce, PromisePolicy::Enabled); + let mut announce_computed = false; + for _ in 0..=tx_count + 1 { + if next_compute_event(&mut compute_service).await + == ComputeEvent::AnnounceComputed(announce_hash) + { + announce_computed = true; + break; + } + } - loop { - let event = compute_service.next().await.unwrap().unwrap(); - if event == ComputeEvent::AnnounceComputed(announce_hash) { - break; - } + assert!(announce_computed); + }); } } - #[test] - fn collect_not_computed_predecessors_work_correctly() { - const BLOCKCHAIN_LEN: usize = 10; + proptest! { + #![proptest_config(ProptestConfig::with_cases(128))] - let db = Database::memory(); - let blockchain = BlockChain::mock(BLOCKCHAIN_LEN as u32).setup(&db); + #[test] + fn collect_not_computed_predecessors_work_correctly( + (chain, blockchain_len) in predecessor_test_inputs_strategy() + ) { + let db = Database::memory(); + let blockchain = chain.setup(&db); - // Setup announces except the start-announce to not-computed state. - (0..BLOCKCHAIN_LEN - 1).for_each(|idx| { - let announce_hash = blockchain.block_top_announce(idx).announce.to_hash(); + (0..blockchain_len - 1).for_each(|idx| { + let announce_hash = blockchain.block_top_announce(idx).announce.to_hash(); - if idx == 0 { - db.mutate_announce_meta(announce_hash, |meta| meta.computed = true); - } else { - db.mutate_announce_meta(announce_hash, |meta| meta.computed = false); - } - }); + if idx == 0 { + db.mutate_announce_meta(announce_hash, |meta| meta.computed = true); + } else { + db.mutate_announce_meta(announce_hash, |meta| meta.computed = false); + } + }); - let expected_not_computed_announces = (1..BLOCKCHAIN_LEN - 1) - .map(|idx| blockchain.block_top_announce(idx).announce.to_hash()) - .collect::>(); + let expected_not_computed_announces = (1..blockchain_len - 1) + .map(|idx| blockchain.block_top_announce(idx).announce.to_hash()) + .collect::>(); - let head_announce = blockchain - .block_top_announce(BLOCKCHAIN_LEN - 1) - .announce - .clone(); - let not_computed_announces = utils::collect_not_computed_predecessors(&head_announce, &db) - .unwrap() - .into_iter() - .map(|v| v.0) - .collect::>(); - - assert_eq!( - expected_not_computed_announces.len(), - not_computed_announces.len() - ); - assert_eq!(expected_not_computed_announces, not_computed_announces); + let head_announce = blockchain + .block_top_announce(blockchain_len - 1) + .announce + .clone(); + let not_computed_announces = + utils::collect_not_computed_predecessors(&head_announce, &db) + .unwrap() + .into_iter() + .map(|entry| entry.0) + .collect::>(); + + prop_assert_eq!(not_computed_announces, expected_not_computed_announces); + } } } diff --git a/ethexe/compute/src/prepare.rs b/ethexe/compute/src/prepare.rs index 24ae9634fee..958ba6a2835 100644 --- a/ethexe/compute/src/prepare.rs +++ b/ethexe/compute/src/prepare.rs @@ -369,198 +369,237 @@ fn prepare_one_block::random(); - - let block = chain.blocks[1].to_simple().next_block(); - let block = BlockData { - hash: block.hash, - header: block.header, - events: vec![ - BlockEvent::Router(RouterEvent::BatchCommitted(BatchCommittedEvent { - digest: batch_committed, - })), - BlockEvent::Router(RouterEvent::AnnouncesCommitted(AnnouncesCommittedEvent( - block1_announce_hash, - ))), - BlockEvent::Router(RouterEvent::CodeGotValidated(CodeGotValidatedEvent { - code_id: code1_id, - valid: true, - })), - BlockEvent::Router(RouterEvent::CodeValidationRequested( - CodeValidationRequestedEvent { - code_id: code2_id, - timestamp: 1000, - tx_hash: H256::random(), - }, - )), - ], - } - .setup(&db); - - prepare_one_block(&db, block.clone()).unwrap(); - - let meta = db.block_meta(block.hash); - assert!(meta.prepared); - assert_eq!(meta.codes_queue, Some(vec![code2_id].into()),); - assert_eq!(meta.last_committed_batch, Some(batch_committed),); - assert_eq!(meta.last_committed_announce, Some(block1_announce_hash)); + fn announce_hash_strategy() -> BoxedStrategy> { + any::<[u8; 32]>() + .prop_map(H256::from) + .prop_map(|hash| unsafe { HashOf::new(hash) }) + .boxed() } - #[tokio::test] - #[ntest::timeout(3000)] - async fn test_prepare_no_codes() { - gear_utils::init_default_logger(); + fn start_with_codes_strategy() -> BoxedStrategy<(BlockChain, Vec, Vec)> { + block_chain_strategy(1) + .prop_flat_map(|chain| { + collection::vec(any::(), 1..=16).prop_flat_map(move |code| { + let loaded_code_id = CodeId::generate(&code); + let chain = chain.clone(); + distinct_code_ids(3) + .prop_filter( + "extra code ids must differ from the preloaded parent code id", + move |ids| !ids.contains(&loaded_code_id), + ) + .prop_map(move |ids| (chain.clone(), ids, code.clone())) + }) + }) + .boxed() + } - let db = Database::memory(); - let mut service = PrepareSubService::new(db.clone()); - let chain = BlockChain::mock(1).setup(&db); - let block = chain.blocks[1].to_simple().next_block().setup(&db); + proptest! { + #![proptest_config(ProptestConfig::with_cases(128))] + + #[test] + fn test_prepare_one_block( + chain in block_chain_strategy(1), + code_ids in distinct_code_ids(2), + batch_committed in any::<[u8; 32]>().prop_map(Digest), + block1_announce_hash in announce_hash_strategy(), + ) { + gear_utils::init_default_logger(); + + let db = Database::memory(); + let chain = chain.setup(&db); + let [code1_id, code2_id] = <[CodeId; 2]>::try_from(code_ids).unwrap(); + + let block = chain.blocks[1].to_simple().next_block(); + let block = BlockData { + hash: block.hash, + header: block.header, + events: vec![ + BlockEvent::Router(RouterEvent::BatchCommitted(BatchCommittedEvent { + digest: batch_committed, + })), + BlockEvent::Router(RouterEvent::AnnouncesCommitted(AnnouncesCommittedEvent( + block1_announce_hash, + ))), + BlockEvent::Router(RouterEvent::CodeGotValidated(CodeGotValidatedEvent { + code_id: code1_id, + valid: true, + })), + BlockEvent::Router(RouterEvent::CodeValidationRequested( + CodeValidationRequestedEvent { + code_id: code2_id, + timestamp: 1000, + tx_hash: H256::random(), + }, + )), + ], + } + .setup(&db); - service.receive_block_to_prepare(block.hash); + prepare_one_block(&db, block.clone()).unwrap(); - assert_eq!( - service.next().await.unwrap(), - Event::BlockPrepared(block.hash), - ); + let meta = db.block_meta(block.hash); + prop_assert!(meta.prepared); + prop_assert_eq!(meta.codes_queue, Some(vec![code2_id].into())); + prop_assert_eq!(meta.last_committed_batch, Some(batch_committed)); + prop_assert_eq!(meta.last_committed_announce, Some(block1_announce_hash)); + } } - #[tokio::test] - #[ntest::timeout(3000)] - async fn test_prepare_with_codes() { - gear_utils::init_default_logger(); + proptest! { + #![proptest_config(ProptestConfig::with_cases(64))] - let db = Database::memory(); - let mut service = PrepareSubService::new(db.clone()); - let chain = BlockChain::mock(1).setup(&db); + #[test] + fn test_prepare_no_codes(chain in block_chain_strategy(1)) { + gear_utils::init_default_logger(); - let code1_id = CodeId::from([1u8; 32]); - let code2_id = CodeId::from([2u8; 32]); + run_async_test(async move { + let db = Database::memory(); + let mut service = PrepareSubService::new(db.clone()); + let chain = chain.setup(&db); + let block = chain.blocks[1].to_simple().next_block().setup(&db); - let block = chain.blocks[1].to_simple().next_block(); - let block = BlockData { - hash: block.hash, - header: block.header, - events: vec![ - BlockEvent::Router(RouterEvent::CodeGotValidated(CodeGotValidatedEvent { - code_id: code1_id, - valid: true, - })), - BlockEvent::Router(RouterEvent::CodeValidationRequested( - CodeValidationRequestedEvent { - code_id: code2_id, - timestamp: 1000, - tx_hash: H256::random(), - }, - )), - ], - } - .setup(&db); + service.receive_block_to_prepare(block.hash); - service.receive_block_to_prepare(block.hash); - assert_eq!( - service.next().await.unwrap(), - Event::RequestCodes([code1_id, code2_id].into()) - ); + assert_eq!( + next_subservice_event(&mut service).await, + Event::BlockPrepared(block.hash), + ); + }); + } - service.receive_processed_code(code1_id); - assert_eq!( - service.next().await.unwrap(), - Event::BlockPrepared(block.hash), - ); - } + #[test] + fn test_prepare_with_codes(chain in block_chain_strategy(1), code_ids in distinct_code_ids(2)) { + gear_utils::init_default_logger(); + + run_async_test(async move { + let db = Database::memory(); + let mut service = PrepareSubService::new(db.clone()); + let chain = chain.setup(&db); + let [code1_id, code2_id] = <[CodeId; 2]>::try_from(code_ids).unwrap(); + + let block = chain.blocks[1].to_simple().next_block(); + let block = BlockData { + hash: block.hash, + header: block.header, + events: vec![ + BlockEvent::Router(RouterEvent::CodeGotValidated(CodeGotValidatedEvent { + code_id: code1_id, + valid: true, + })), + BlockEvent::Router(RouterEvent::CodeValidationRequested( + CodeValidationRequestedEvent { + code_id: code2_id, + timestamp: 1000, + tx_hash: H256::random(), + }, + )), + ], + } + .setup(&db); - #[tokio::test] - #[ntest::timeout(3000)] - async fn test_sub_service_start_with_codes() { - gear_utils::init_default_logger(); - - let db = Database::memory(); - let mut service = PrepareSubService::new(db.clone()); - - let validated_code_id = CodeId::from([1u8; 32]); - let requested_code_id = CodeId::from([2u8; 32]); - let parent_block_code_id = CodeId::from([3u8; 32]); - - let code = b"1234"; - let parent_block_loaded_code_id = CodeId::generate(code); - - let chain = BlockChain::mock(1) - .tap_mut(|chain| { - chain.blocks[1].as_prepared_mut().codes_queue = - [parent_block_code_id, parent_block_loaded_code_id].into(); - chain.codes.insert( - parent_block_loaded_code_id, - CodeData { - original_bytes: code.to_vec(), - blob_info: Default::default(), - instrumented: None, - }, + service.receive_block_to_prepare(block.hash); + assert_eq!( + next_subservice_event(&mut service).await, + Event::RequestCodes([code1_id, code2_id].into()) ); - }) - .setup(&db); - let block2 = chain.blocks[1].to_simple().next_block(); - let block3 = block2.next_block(); - - BlockData { - hash: block2.hash, - header: block2.header, - events: vec![BlockEvent::Router(RouterEvent::CodeGotValidated( - CodeGotValidatedEvent { - code_id: validated_code_id, - valid: true, - }, - ))], - } - .setup(&db); - - BlockData { - hash: block3.hash, - header: block3.header, - events: vec![BlockEvent::Router(RouterEvent::CodeValidationRequested( - CodeValidationRequestedEvent { - code_id: requested_code_id, - timestamp: 1000, - tx_hash: H256::random(), - }, - ))], + service.receive_processed_code(code1_id); + assert_eq!( + next_subservice_event(&mut service).await, + Event::BlockPrepared(block.hash), + ); + }); } - .setup(&db); - - service.receive_block_to_prepare(block3.hash); - assert_eq!( - service.next().await.unwrap(), - Event::RequestCodes( - [ - parent_block_code_id, - parent_block_loaded_code_id, - validated_code_id, - requested_code_id - ] - .into() - ) - ); - service.receive_processed_code(validated_code_id); - assert_eq!( - service.next().await.unwrap(), - Event::BlockPrepared(block3.hash), - ); + #[test] + fn test_sub_service_start_with_codes( + (chain, code_ids, code) in start_with_codes_strategy() + ) { + gear_utils::init_default_logger(); + + run_async_test(async move { + let db = Database::memory(); + let mut service = PrepareSubService::new(db.clone()); + let [validated_code_id, requested_code_id, parent_block_code_id] = + <[CodeId; 3]>::try_from(code_ids).unwrap(); + let parent_block_loaded_code_id = CodeId::generate(&code); + + let chain = chain + .tap_mut(|chain| { + chain.blocks[1].as_prepared_mut().codes_queue = + [parent_block_code_id, parent_block_loaded_code_id].into(); + chain.codes.insert( + parent_block_loaded_code_id, + CodeData { + original_bytes: code.clone(), + blob_info: Default::default(), + instrumented: None, + }, + ); + }) + .setup(&db); + + let block2 = chain.blocks[1].to_simple().next_block(); + let block3 = block2.next_block(); + + BlockData { + hash: block2.hash, + header: block2.header, + events: vec![BlockEvent::Router(RouterEvent::CodeGotValidated( + CodeGotValidatedEvent { + code_id: validated_code_id, + valid: true, + }, + ))], + } + .setup(&db); + + BlockData { + hash: block3.hash, + header: block3.header, + events: vec![BlockEvent::Router(RouterEvent::CodeValidationRequested( + CodeValidationRequestedEvent { + code_id: requested_code_id, + timestamp: 1000, + tx_hash: H256::random(), + }, + ))], + } + .setup(&db); + + service.receive_block_to_prepare(block3.hash); + assert_eq!( + next_subservice_event(&mut service).await, + Event::RequestCodes( + [ + parent_block_code_id, + parent_block_loaded_code_id, + validated_code_id, + requested_code_id + ] + .into() + ) + ); + + service.receive_processed_code(validated_code_id); + assert_eq!( + next_subservice_event(&mut service).await, + Event::BlockPrepared(block3.hash), + ); + }); + } } } diff --git a/ethexe/compute/src/service.rs b/ethexe/compute/src/service.rs index 5b96f0256a0..812c84d21f3 100644 --- a/ethexe/compute/src/service.rs +++ b/ethexe/compute/src/service.rs @@ -136,116 +136,100 @@ pub(crate) trait SubService: Unpin + Send + 'static { #[cfg(test)] mod tests { - use super::*; - use ethexe_common::{CodeAndIdUnchecked, db::*, mock::*}; + use crate::tests::{MockProcessor, block_chain_strategy, next_compute_event, run_async_test}; + use ethexe_common::{ + CodeAndIdUnchecked, + db::{AnnounceStorageRO, BlockMetaStorageRO, CodesStorageRO}, + mock::Tap, + }; use ethexe_db::Database as DB; - use futures::StreamExt; use gear_core::ids::prelude::CodeIdExt; use gprimitives::CodeId; + use proptest::{collection, prelude::*}; - /// Test ComputeService block preparation functionality - #[tokio::test] - async fn prepare_block() { - gear_utils::init_default_logger(); - - let db = DB::memory(); - let mut service = ComputeService::new_mock_processor(db.clone()); - - let chain = BlockChain::mock(1).setup(&db); - let block = chain.blocks[1].to_simple().next_block().setup(&db); - - // Request block preparation - service.prepare_block(block.hash); - - // Poll service to process the preparation request - let event = service.next().await.unwrap().unwrap(); - assert_eq!(event, ComputeEvent::BlockPrepared(block.hash)); - - // Verify block is marked as prepared in DB - assert!(db.block_meta(block.hash).prepared); - } + proptest! { + #![proptest_config(ProptestConfig::with_cases(64))] - /// Test ComputeService block processing functionality - #[tokio::test] - async fn compute_announce() { - gear_utils::init_default_logger(); + #[test] + fn prepare_block(chain in block_chain_strategy(1)) { + gear_utils::init_default_logger(); - let db = DB::memory(); - let mut service = ComputeService::new_mock_processor(db.clone()); + run_async_test(async move { + let db = DB::memory(); + let mut service = ComputeService::new_mock_processor(db.clone()); - let chain = BlockChain::mock(1).setup(&db); + let chain = chain.setup(&db); + let block = chain.blocks[1].to_simple().next_block().setup(&db); - let block = chain.blocks[1].to_simple().next_block().setup(&db); - - service.prepare_block(block.hash); - let event = service.next().await.unwrap().unwrap(); - assert_eq!(event, ComputeEvent::BlockPrepared(block.hash)); - - // Request computation - let announce = Announce { - block_hash: block.hash, - parent: chain.block_top_announce_hash(1), - gas_allowance: Some(42), - injected_transactions: vec![], - }; - let announce_hash = announce.to_hash(); - service.compute_announce(announce, PromisePolicy::Disabled); - - // Poll service to process the block - let event = service.next().await.unwrap().unwrap(); - assert_eq!(event, ComputeEvent::AnnounceComputed(announce_hash)); - - // Verify block is marked as computed in DB - assert!(db.announce_meta(announce_hash).computed); - } + service.prepare_block(block.hash); - /// Test ComputeService code processing functionality - #[tokio::test] - async fn process_code() { - gear_utils::init_default_logger(); - - let code = vec![0x00, 0x61, 0x73, 0x6d, 0x01, 0x00, 0x00, 0x00]; // Simple WASM header - let code_id = CodeId::generate(&code); - - let db = DB::memory(); - let processor = MockProcessor::with_default_valid_code() - .tap_mut(|p| p.process_codes_result.as_mut().unwrap().code_id = code_id); - let mut service = ComputeService::new( - ComputeConfig::without_quarantine(), - db.clone(), - processor.clone(), - ); - - // Create test code - - let code_and_id = CodeAndIdUnchecked { code, code_id }; - - // Verify code is not yet in DB - assert!(db.code_valid(code_id).is_none()); - - // Request code processing - service.process_code(code_and_id); - - // Poll service to process the code - let event = service.next().await.unwrap().unwrap(); - - // Should receive CodeProcessed event with correct code_id - match event { - ComputeEvent::CodeProcessed(processed_code_id) => { - assert_eq!(processed_code_id, code_id); - } - _ => panic!("Expected CodeProcessed event"), + let event = next_compute_event(&mut service).await; + assert_eq!(event, ComputeEvent::BlockPrepared(block.hash)); + assert!(db.block_meta(block.hash).prepared); + }); } - // Verify that the processor was called for non-validated code - assert_eq!( - processor.process_code_call_count(), - 1, - "Processor should be called for non-validated code" - ); + #[test] + fn compute_announce(chain in block_chain_strategy(1), gas_allowance in 1u64..=1_000_000) { + gear_utils::init_default_logger(); + + run_async_test(async move { + let db = DB::memory(); + let mut service = ComputeService::new_mock_processor(db.clone()); + + let chain = chain.setup(&db); + let block = chain.blocks[1].to_simple().next_block().setup(&db); + + service.prepare_block(block.hash); + assert_eq!( + next_compute_event(&mut service).await, + ComputeEvent::BlockPrepared(block.hash) + ); + + let announce = Announce { + block_hash: block.hash, + parent: chain.block_top_announce_hash(1), + gas_allowance: Some(gas_allowance), + injected_transactions: vec![], + }; + let announce_hash = announce.to_hash(); + service.compute_announce(announce, PromisePolicy::Disabled); + + assert_eq!( + next_compute_event(&mut service).await, + ComputeEvent::AnnounceComputed(announce_hash) + ); + assert!(db.announce_meta(announce_hash).computed); + }); + } - // Verify code is now marked as valid in DB - assert_eq!(db.code_valid(code_id), Some(true)); + #[test] + fn process_code(code in collection::vec(any::(), 1..=64)) { + gear_utils::init_default_logger(); + + run_async_test(async move { + let code_id = CodeId::generate(&code); + let db = DB::memory(); + let processor = MockProcessor::with_default_valid_code() + .tap_mut(|p| p.process_codes_result.as_mut().unwrap().code_id = code_id); + let mut service = ComputeService::new( + ComputeConfig::without_quarantine(), + db.clone(), + processor.clone(), + ); + + assert!(db.code_valid(code_id).is_none()); + + service.process_code(CodeAndIdUnchecked { code, code_id }); + + assert_eq!( + next_compute_event(&mut service).await, + ComputeEvent::CodeProcessed(code_id) + ); + assert_eq!(processor.process_code_call_count(), 1); + assert_eq!(db.code_valid(code_id), Some(true)); + }); + } } } diff --git a/ethexe/compute/src/tests.rs b/ethexe/compute/src/tests.rs index 9f7567c6d30..ad9155ebda7 100644 --- a/ethexe/compute/src/tests.rs +++ b/ethexe/compute/src/tests.rs @@ -17,6 +17,7 @@ // along with this program. If not, see . use super::*; +use crate::service::SubService; use ethexe_common::{ CodeBlobInfo, PromisePolicy, db::*, @@ -24,17 +25,63 @@ use ethexe_common::{ BlockEvent, RouterEvent, router::{CodeGotValidatedEvent, CodeValidationRequestedEvent}, }, - mock::*, + mock::{BlockChain, BlockChainParams, CodeData, DBMockExt}, }; use ethexe_db::Database; use ethexe_processor::ValidCodeInfo; -use futures::StreamExt; +use futures::{Future, StreamExt}; use gear_core::{ code::{CodeMetadata, InstantiatedSectionSizes, InstrumentedCode}, ids::prelude::CodeIdExt, }; +use gprimitives::{CodeId, H256}; +use proptest::{collection, prelude::*}; use std::time::Duration; -use tokio::{sync::mpsc, time::timeout}; +use tokio::{runtime::Builder, sync::mpsc, time::timeout}; + +pub(crate) const ASYNC_EVENT_TIMEOUT: Duration = Duration::from_millis(500); + +pub(crate) fn block_chain_strategy(len: u32) -> BoxedStrategy { + any_with::(BlockChainParams::from(len)).boxed() +} + +pub(crate) fn distinct_code_ids(count: usize) -> BoxedStrategy> { + collection::btree_set(any::<[u8; 32]>().prop_map(CodeId::from), count) + .prop_map(|ids| ids.into_iter().collect()) + .boxed() +} + +pub(crate) fn run_async_test(future: F) -> F::Output { + Builder::new_current_thread() + .enable_all() + .build() + .expect("failed to build tokio runtime") + .block_on(future) +} + +pub(crate) async fn next_compute_event( + compute: &mut ComputeService

, +) -> ComputeEvent { + timeout(ASYNC_EVENT_TIMEOUT, compute.next()) + .await + .expect("timed out waiting for compute event") + .expect("compute stream ended") + .expect("compute service returned error") +} + +pub(crate) async fn next_subservice_event(service: &mut S) -> S::Output { + timeout(ASYNC_EVENT_TIMEOUT, service.next()) + .await + .expect("timed out waiting for sub-service event") + .expect("sub-service returned error") +} + +pub(crate) async fn assert_no_compute_event(compute: &mut ComputeService

) { + assert!( + timeout(ASYNC_EVENT_TIMEOUT, compute.next()).await.is_err(), + "unexpected follow-up compute event" + ); +} // MockProcessor that implements ProcessorExt and always returns Ok with empty results #[derive(Clone, Default)] @@ -165,11 +212,9 @@ struct TestEnv { } impl TestEnv { - // Setup the chain and compute service. - fn new(chain_len: u32, events_in_block: u32) -> TestEnv { + fn new(mut chain: BlockChain, events_in_block: u32) -> TestEnv { let db = Database::memory(); - let mut chain = BlockChain::mock(chain_len); insert_code_events(&mut chain, events_in_block); mark_as_not_prepared(&mut chain); chain = chain.setup(&db); @@ -182,45 +227,36 @@ impl TestEnv { async fn prepare_and_assert_block(&mut self, block: H256) { self.compute.prepare_block(block); - let event = self - .compute - .next() - .await - .unwrap() - .expect("expect compute service request codes to load"); - let codes_to_load = event.unwrap_request_load_codes(); - - for code_id in codes_to_load { - let Some(CodeData { - original_bytes: code, - .. - }) = self.chain.codes.remove(&code_id) - else { - continue; - }; - - self.compute - .process_code(CodeAndIdUnchecked { code, code_id }); - - let event = self - .compute - .next() - .await - .unwrap() - .expect("expect code will be processing"); - let processed_code_id = event.unwrap_code_processed(); - - assert_eq!(processed_code_id, code_id); + match next_compute_event(&mut self.compute).await { + ComputeEvent::RequestLoadCodes(codes_to_load) => { + for code_id in codes_to_load { + let Some(CodeData { + original_bytes: code, + .. + }) = self.chain.codes.remove(&code_id) + else { + continue; + }; + + self.compute + .process_code(CodeAndIdUnchecked { code, code_id }); + + let processed_code_id = next_compute_event(&mut self.compute) + .await + .unwrap_code_processed(); + assert_eq!(processed_code_id, code_id); + } + + let prepared_block = next_compute_event(&mut self.compute) + .await + .unwrap_block_prepared(); + assert_eq!(prepared_block, block); + } + ComputeEvent::BlockPrepared(prepared_block) => { + assert_eq!(prepared_block, block); + } + event => panic!("unexpected compute event while preparing block: {event:?}"), } - - let event = self - .compute - .next() - .await - .unwrap() - .expect("expect block prepared after processing all codes"); - let prepared_block = event.unwrap_block_prepared(); - assert_eq!(prepared_block, block); } async fn compute_and_assert_announce(&mut self, announce: Announce) { @@ -228,14 +264,9 @@ impl TestEnv { self.compute .compute_announce(announce.clone(), PromisePolicy::Disabled); - let event = self - .compute - .next() + let computed_announce = next_compute_event(&mut self.compute) .await - .unwrap() - .expect("expect block will be processing"); - - let computed_announce = event.unwrap_announce_computed(); + .unwrap_announce_computed(); assert_eq!(computed_announce, announce_hash); self.db.mutate_block_meta(announce.block_hash, |meta| { @@ -256,250 +287,253 @@ fn new_announce(db: &Database, block_hash: H256, gas_allowance: Option) -> } } -#[tokio::test] -async fn block_computation_basic() -> Result<()> { - gear_utils::init_default_logger(); - - let mut env = TestEnv::new(1, 3); - - for block in env.chain.blocks.clone().iter().skip(1) { - env.prepare_and_assert_block(block.hash).await; - - let announce = new_announce(&env.db, block.hash, Some(100)); - env.compute_and_assert_announce(announce).await; - } - - Ok(()) +fn chain_with_event_count_strategy() -> BoxedStrategy<(BlockChain, u32)> { + (1u32..=6, 0u32..=4) + .prop_flat_map(|(chain_len, events_in_block)| { + block_chain_strategy(chain_len).prop_map(move |chain| (chain, events_in_block)) + }) + .boxed() } -#[tokio::test] -async fn multiple_preparation_and_one_processing() -> Result<()> { - gear_utils::init_default_logger(); - - let mut env = TestEnv::new(3, 3); +fn single_block_chain_with_event_count_strategy() -> BoxedStrategy<(BlockChain, u32)> { + (0u32..=4) + .prop_flat_map(|events_in_block| { + block_chain_strategy(1).prop_map(move |chain| (chain, events_in_block)) + }) + .boxed() +} - for block in env.chain.blocks.clone().iter().skip(1) { - env.prepare_and_assert_block(block.hash).await; +proptest! { + #![proptest_config(ProptestConfig::with_cases(64))] + + #[test] + fn block_computation_basic((chain, events_in_block) in chain_with_event_count_strategy()) { + gear_utils::init_default_logger(); + + run_async_test(async move { + let mut env = TestEnv::new(chain, events_in_block); + let block_hashes = env + .chain + .blocks + .iter() + .skip(1) + .map(|block| block.hash) + .collect::>(); + + for block_hash in block_hashes { + env.prepare_and_assert_block(block_hash).await; + + let announce = new_announce(&env.db, block_hash, Some(100)); + env.compute_and_assert_announce(announce).await; + } + }); } - // append announces to prepared blocks, except the last one, so that it can be computed - for i in 1..3 { - let announce = new_announce(&env.db, env.chain.blocks[i].hash, Some(100)); - env.db.mutate_block_meta(announce.block_hash, |meta| { - meta.announces - .get_or_insert_default() - .insert(announce.to_hash()); + #[test] + fn multiple_preparation_and_one_processing( + (chain, events_in_block) in chain_with_event_count_strategy() + ) { + gear_utils::init_default_logger(); + + run_async_test(async move { + let mut env = TestEnv::new(chain, events_in_block); + let block_hashes = env + .chain + .blocks + .iter() + .skip(1) + .map(|block| block.hash) + .collect::>(); + + for block_hash in block_hashes { + env.prepare_and_assert_block(block_hash).await; + } + + let last_index = env.chain.blocks.len() - 1; + for i in 1..last_index { + let announce = new_announce(&env.db, env.chain.blocks[i].hash, Some(100)); + env.db.mutate_block_meta(announce.block_hash, |meta| { + meta.announces + .get_or_insert_default() + .insert(announce.to_hash()); + }); + env.db.set_announce(announce); + } + + let announce = new_announce(&env.db, env.chain.blocks[last_index].hash, Some(100)); + env.compute_and_assert_announce(announce).await; }); - env.db.set_announce(announce); } - let announce = new_announce(&env.db, env.chain.blocks[3].hash, Some(100)); - env.compute_and_assert_announce(announce).await; - - Ok(()) -} - -#[tokio::test] -async fn one_preparation_and_multiple_processing() -> Result<()> { - gear_utils::init_default_logger(); - - let mut env = TestEnv::new(3, 3); - - env.prepare_and_assert_block(env.chain.blocks[3].hash).await; - - for block in env.chain.blocks.clone().iter().skip(1) { - let announce = new_announce(&env.db, block.hash, Some(100)); - env.compute_and_assert_announce(announce).await; + #[test] + fn one_preparation_and_multiple_processing( + (chain, events_in_block) in chain_with_event_count_strategy() + ) { + gear_utils::init_default_logger(); + + run_async_test(async move { + let mut env = TestEnv::new(chain, events_in_block); + let last_block_hash = env.chain.blocks.back().unwrap().hash; + env.prepare_and_assert_block(last_block_hash).await; + + let block_hashes = env + .chain + .blocks + .iter() + .skip(1) + .map(|block| block.hash) + .collect::>(); + + for block_hash in block_hashes { + let announce = new_announce(&env.db, block_hash, Some(100)); + env.compute_and_assert_announce(announce).await; + } + }); } - Ok(()) -} - -#[tokio::test] -async fn code_validation_request_does_not_block_preparation() -> Result<()> { - gear_utils::init_default_logger(); - - let mut env = TestEnv::new(1, 3); - - let mut block_events = env.chain.blocks[1].as_synced().events.clone(); - - // add invalid event which shouldn't stop block prepare - block_events.push(BlockEvent::Router(RouterEvent::CodeValidationRequested( - CodeValidationRequestedEvent { - code_id: CodeId::zero(), - timestamp: 0u64, - tx_hash: H256::random(), - }, - ))); - env.db - .set_block_events(env.chain.blocks[1].hash, &block_events); - env.prepare_and_assert_block(env.chain.blocks[1].hash).await; - - let announce = new_announce(&env.db, env.chain.blocks[1].hash, Some(100)); - env.compute_and_assert_announce(announce.clone()).await; - env.compute_and_assert_announce(announce.clone()).await; - - Ok(()) -} - -#[tokio::test] -async fn code_validation_request_for_already_processed_code_does_not_request_loading() -> Result<()> -{ - gear_utils::init_default_logger(); - - let db = Database::memory(); - let processor = MockProcessor::default(); - let mut compute = ComputeService::new( - ComputeConfig::without_quarantine(), - db.clone(), - processor.clone(), - ); - - let code = create_new_code(1); - let code_id = db.set_original_code(&code); - db.set_code_valid(code_id, true); - - // Setup chain and mark blocks as not prepared - let mut chain = BlockChain::mock(1); - mark_as_not_prepared(&mut chain); - let chain = chain.setup(&db); - let block_hash = chain.blocks[1].hash; - - // Add CodeValidationRequested event for the already-validated code - let events = db.block_events(block_hash).unwrap_or_default(); - let mut new_events = events.clone(); - new_events.push(BlockEvent::Router(RouterEvent::CodeValidationRequested( - CodeValidationRequestedEvent { - code_id, - timestamp: 0u64, - tx_hash: H256::random(), - }, - ))); - db.set_block_events(block_hash, &new_events); - - compute.prepare_block(block_hash); - - // The first event should be BlockPrepared, NOT RequestCodes - // because the code is already validated - let event = compute - .next() - .await - .unwrap() - .expect("expect compute service to produce an event"); - - // Verify block was prepared without requesting code loading - let prepared_block = event.unwrap_block_prepared(); - assert_eq!(prepared_block, block_hash); - - // Verify that no follow-up events are produced (no RequestCodes) - let no_follow_up_event = timeout(Duration::from_millis(100), compute.next()).await; - assert!( - no_follow_up_event.is_err(), - "unexpected follow-up compute event after block preparation: {no_follow_up_event:?}" - ); - - // Verify that the processor was NOT called - assert_eq!( - processor.process_code_call_count(), - 0, - "Processor should not be called for already-validated code" - ); - - Ok(()) -} - -#[tokio::test] -async fn code_validation_request_for_non_validated_code_requests_loading() -> Result<()> { - gear_utils::init_default_logger(); - - let db = Database::memory(); - let processor = MockProcessor::default(); - let mut compute = ComputeService::new( - ComputeConfig::without_quarantine(), - db.clone(), - processor.clone(), - ); + #[test] + fn code_validation_request_does_not_block_preparation( + (chain, events_in_block) in single_block_chain_with_event_count_strategy() + ) { + gear_utils::init_default_logger(); + + run_async_test(async move { + let mut env = TestEnv::new(chain, events_in_block); + let block_hash = env.chain.blocks[1].hash; + let mut block_events = env.chain.blocks[1].as_synced().events.clone(); + + block_events.push(BlockEvent::Router(RouterEvent::CodeValidationRequested( + CodeValidationRequestedEvent { + code_id: CodeId::zero(), + timestamp: 0u64, + tx_hash: H256::random(), + }, + ))); + + env.db.set_block_events(block_hash, &block_events); + env.prepare_and_assert_block(block_hash).await; + + let announce = new_announce(&env.db, block_hash, Some(100)); + env.compute_and_assert_announce(announce.clone()).await; + env.compute_and_assert_announce(announce).await; + }); + } - let code = create_new_code(1); - let code_id = db.set_original_code(&code); - // Note: code is NOT marked as valid (db.code_valid(code_id) is None) - - // Setup chain and mark blocks as not prepared - let mut chain = BlockChain::mock(1); - mark_as_not_prepared(&mut chain); - let chain = chain.setup(&db); - let block_hash = chain.blocks[1].hash; - - // Add CodeValidationRequested event for the non-validated code - let events = db.block_events(block_hash).unwrap_or_default(); - let mut new_events = events.clone(); - new_events.push(BlockEvent::Router(RouterEvent::CodeValidationRequested( - CodeValidationRequestedEvent { - code_id, - timestamp: 0u64, - tx_hash: H256::random(), - }, - ))); - db.set_block_events(block_hash, &new_events); - - compute.prepare_block(block_hash); - - // The first event should be RequestCodes because the code is NOT validated - let event = compute - .next() - .await - .unwrap() - .expect("expect compute service to produce an event"); + #[test] + fn code_validation_request_for_already_processed_code_does_not_request_loading( + chain in block_chain_strategy(1) + ) { + gear_utils::init_default_logger(); + + run_async_test(async move { + let db = Database::memory(); + let processor = MockProcessor::default(); + let mut compute = ComputeService::new( + ComputeConfig::without_quarantine(), + db.clone(), + processor.clone(), + ); + + let code = create_new_code(1); + let code_id = db.set_original_code(&code); + db.set_code_valid(code_id, true); + + let mut chain = chain; + mark_as_not_prepared(&mut chain); + let chain = chain.setup(&db); + let block_hash = chain.blocks[1].hash; + + let mut new_events = db.block_events(block_hash).unwrap_or_default(); + new_events.push(BlockEvent::Router(RouterEvent::CodeValidationRequested( + CodeValidationRequestedEvent { + code_id, + timestamp: 0u64, + tx_hash: H256::random(), + }, + ))); + db.set_block_events(block_hash, &new_events); + + compute.prepare_block(block_hash); + + let prepared_block = next_compute_event(&mut compute).await.unwrap_block_prepared(); + assert_eq!(prepared_block, block_hash); + assert_no_compute_event(&mut compute).await; + assert_eq!(processor.process_code_call_count(), 0); + }); + } - // Verify that RequestCodes is emitted for non-validated code - let codes_to_load = event.unwrap_request_load_codes(); - assert!( - codes_to_load.contains(&code_id), - "CodeId should be requested for loading when not validated" - ); + #[test] + fn code_validation_request_for_non_validated_code_requests_loading( + chain in block_chain_strategy(1) + ) { + gear_utils::init_default_logger(); + + run_async_test(async move { + let db = Database::memory(); + let processor = MockProcessor::default(); + let mut compute = ComputeService::new( + ComputeConfig::without_quarantine(), + db.clone(), + processor.clone(), + ); + + let code = create_new_code(1); + let code_id = db.set_original_code(&code); + + let mut chain = chain; + mark_as_not_prepared(&mut chain); + let chain = chain.setup(&db); + let block_hash = chain.blocks[1].hash; + + let mut new_events = db.block_events(block_hash).unwrap_or_default(); + new_events.push(BlockEvent::Router(RouterEvent::CodeValidationRequested( + CodeValidationRequestedEvent { + code_id, + timestamp: 0u64, + tx_hash: H256::random(), + }, + ))); + db.set_block_events(block_hash, &new_events); - Ok(()) -} + compute.prepare_block(block_hash); -#[tokio::test] -async fn process_code_for_already_processed_valid_code_emits_code_processed() -> Result<()> { - gear_utils::init_default_logger(); + let codes_to_load = next_compute_event(&mut compute) + .await + .unwrap_request_load_codes(); + assert!(codes_to_load.contains(&code_id)); + }); + } - let db = Database::memory(); - let processor = MockProcessor::default(); - let mut compute = ComputeService::new( - ComputeConfig::without_quarantine(), - db.clone(), - processor.clone(), - ); + #[test] + fn process_code_for_already_processed_valid_code_emits_code_processed(nonce in any::()) { + gear_utils::init_default_logger(); - let code = create_new_code(2); - let code_id = db.set_original_code(&code); + run_async_test(async move { + let db = Database::memory(); + let processor = MockProcessor::default(); + let mut compute = ComputeService::new( + ComputeConfig::without_quarantine(), + db.clone(), + processor.clone(), + ); - db.set_instrumented_code( - ethexe_runtime_common::VERSION, - code_id, - InstrumentedCode::new(vec![0], InstantiatedSectionSizes::new(0, 0, 0, 0, 0, 0)), - ); - db.set_code_valid(code_id, true); + let code = create_new_code(nonce); + let code_id = db.set_original_code(&code); - compute.process_code(CodeAndIdUnchecked { code_id, code }); + db.set_instrumented_code( + ethexe_runtime_common::VERSION, + code_id, + InstrumentedCode::new(vec![0], InstantiatedSectionSizes::new(0, 0, 0, 0, 0, 0)), + ); + db.set_code_valid(code_id, true); - let event = compute - .next() - .await - .unwrap() - .expect("expect already processed code to produce CodeProcessed event"); - let processed_code_id = event.unwrap_code_processed(); - assert_eq!(processed_code_id, code_id); - - // Verify that the processor was NOT called for already-validated code - // The CodesSubService should short-circuit and emit CodeProcessed without calling the processor - assert_eq!( - processor.process_code_call_count(), - 0, - "Processor should not be called for already-validated code" - ); + compute.process_code(CodeAndIdUnchecked { code_id, code }); - Ok(()) + let processed_code_id = next_compute_event(&mut compute) + .await + .unwrap_code_processed(); + assert_eq!(processed_code_id, code_id); + assert_eq!(processor.process_code_call_count(), 0); + }); + } } From 79996338bd0941f9be83747efadbc57ae6eb159c Mon Sep 17 00:00:00 2001 From: playX18 Date: Thu, 16 Apr 2026 10:34:50 +0700 Subject: [PATCH 2/7] address comments --- ethexe/compute/src/compute.rs | 6 +++--- ethexe/compute/src/tests.rs | 19 ++++++++++++------- 2 files changed, 15 insertions(+), 10 deletions(-) diff --git a/ethexe/compute/src/compute.rs b/ethexe/compute/src/compute.rs index 7d118dcc826..94f89b682b6 100644 --- a/ethexe/compute/src/compute.rs +++ b/ethexe/compute/src/compute.rs @@ -524,10 +524,10 @@ mod tests { } fn promise_test_inputs_strategy() -> BoxedStrategy<(BlockChain, Vec)> { - (4usize..=8) + (4usize..=16) .prop_flat_map(|blockchain_len| { let requestable_indexes = (2..blockchain_len).collect::>(); - let max_selected = requestable_indexes.len().min(3); + let max_selected = requestable_indexes.len(); block_chain_strategy(blockchain_len as u32).prop_flat_map(move |chain| { prop::sample::subsequence(requestable_indexes.clone(), 1..=max_selected) @@ -607,7 +607,7 @@ mod tests { } proptest! { - #![proptest_config(ProptestConfig::with_cases(16))] + #![proptest_config(ProptestConfig::with_cases(64))] #[test] fn test_compute_with_promises( diff --git a/ethexe/compute/src/tests.rs b/ethexe/compute/src/tests.rs index ad9155ebda7..46e73cd147f 100644 --- a/ethexe/compute/src/tests.rs +++ b/ethexe/compute/src/tests.rs @@ -39,7 +39,16 @@ use proptest::{collection, prelude::*}; use std::time::Duration; use tokio::{runtime::Builder, sync::mpsc, time::timeout}; -pub(crate) const ASYNC_EVENT_TIMEOUT: Duration = Duration::from_millis(500); +thread_local! { + // Reuse one current-thread runtime per test thread to avoid rebuilding it for every proptest case. + static TEST_RUNTIME: tokio::runtime::Runtime = Builder::new_current_thread() + .enable_all() + .build() + .expect("failed to build tokio runtime"); +} + +pub(crate) const ASYNC_EVENT_TIMEOUT: Duration = Duration::from_secs(3); +const NO_EVENT_TIMEOUT: Duration = Duration::from_millis(500); pub(crate) fn block_chain_strategy(len: u32) -> BoxedStrategy { any_with::(BlockChainParams::from(len)).boxed() @@ -52,11 +61,7 @@ pub(crate) fn distinct_code_ids(count: usize) -> BoxedStrategy> { } pub(crate) fn run_async_test(future: F) -> F::Output { - Builder::new_current_thread() - .enable_all() - .build() - .expect("failed to build tokio runtime") - .block_on(future) + TEST_RUNTIME.with(|runtime| runtime.block_on(future)) } pub(crate) async fn next_compute_event( @@ -78,7 +83,7 @@ pub(crate) async fn next_subservice_event(service: &mut S) -> S:: pub(crate) async fn assert_no_compute_event(compute: &mut ComputeService

) { assert!( - timeout(ASYNC_EVENT_TIMEOUT, compute.next()).await.is_err(), + timeout(NO_EVENT_TIMEOUT, compute.next()).await.is_err(), "unexpected follow-up compute event" ); } From 9639c39ffc4bc4360c87b8e1c7cbac99eb3963bc Mon Sep 17 00:00:00 2001 From: playX18 Date: Fri, 17 Apr 2026 14:13:27 +0700 Subject: [PATCH 3/7] address comments x2 --- ethexe/compute/src/compute.rs | 27 ++++++++++++++++----------- ethexe/compute/src/prepare.rs | 8 ++++---- ethexe/compute/src/tests.rs | 10 +++++----- 3 files changed, 25 insertions(+), 20 deletions(-) diff --git a/ethexe/compute/src/compute.rs b/ethexe/compute/src/compute.rs index 94f89b682b6..ed18b57892e 100644 --- a/ethexe/compute/src/compute.rs +++ b/ethexe/compute/src/compute.rs @@ -537,13 +537,8 @@ mod tests { .boxed() } - fn predecessor_test_inputs_strategy() -> BoxedStrategy<(BlockChain, usize)> { - (2usize..=16) - .prop_flat_map(|blockchain_len| { - block_chain_strategy(blockchain_len as u32) - .prop_map(move |chain| (chain, blockchain_len)) - }) - .boxed() + fn predecessor_test_inputs_strategy() -> BoxedStrategy { + (2u32..=16).prop_flat_map(block_chain_strategy).boxed() } async fn collect_compute_events( @@ -560,7 +555,7 @@ mod tests { } proptest! { - #![proptest_config(ProptestConfig::with_cases(64))] + #![proptest_config(ProptestConfig::with_cases(32))] #[test] fn test_compute( @@ -671,10 +666,19 @@ mod tests { ComputeService::new(ComputeConfig::without_quarantine(), db.clone(), processor); let mut expected_events = Vec::with_capacity(request_indexes.len() * 2); + // `subsequence` preserves order, predecessors are computed silently, and only the + // requested announces emit the Promise + AnnounceComputed pairs asserted below. for index in &request_indexes { let announce = announces_by_block[index].clone(); let announce_hash = announce.to_hash(); - let tx = announce.injected_transactions[0].clone().into_data(); + let tx = announce + .injected_transactions + .first() + .cloned() + .expect( + "request indexes start at 2, so each requested announce carries one injected transaction", + ) + .into_data(); expected_events.push(ComputeEvent::Promise( Promise { @@ -700,7 +704,7 @@ mod tests { #[test] fn test_compute_with_early_break( chain in block_chain_strategy(3), - tx_count in 100usize..=300 + tx_count in 30usize..=100 ) { gear_utils::init_default_logger(); @@ -762,10 +766,11 @@ mod tests { #[test] fn collect_not_computed_predecessors_work_correctly( - (chain, blockchain_len) in predecessor_test_inputs_strategy() + chain in predecessor_test_inputs_strategy() ) { let db = Database::memory(); let blockchain = chain.setup(&db); + let blockchain_len = blockchain.blocks.len() - 1; (0..blockchain_len - 1).for_each(|idx| { let announce_hash = blockchain.block_top_announce(idx).announce.to_hash(); diff --git a/ethexe/compute/src/prepare.rs b/ethexe/compute/src/prepare.rs index 958ba6a2835..2e3fbf3480d 100644 --- a/ethexe/compute/src/prepare.rs +++ b/ethexe/compute/src/prepare.rs @@ -370,7 +370,7 @@ fn prepare_one_block(), 1..=16).prop_flat_map(move |code| { let loaded_code_id = CodeId::generate(&code); let chain = chain.clone(); - distinct_code_ids(3) + distinct_code_ids_sorted(3) .prop_filter( "extra code ids must differ from the preloaded parent code id", move |ids| !ids.contains(&loaded_code_id), @@ -412,7 +412,7 @@ mod tests { #[test] fn test_prepare_one_block( chain in block_chain_strategy(1), - code_ids in distinct_code_ids(2), + code_ids in distinct_code_ids_sorted(2), batch_committed in any::<[u8; 32]>().prop_map(Digest), block1_announce_hash in announce_hash_strategy(), ) { @@ -481,7 +481,7 @@ mod tests { } #[test] - fn test_prepare_with_codes(chain in block_chain_strategy(1), code_ids in distinct_code_ids(2)) { + fn test_prepare_with_codes(chain in block_chain_strategy(1), code_ids in distinct_code_ids_sorted(2)) { gear_utils::init_default_logger(); run_async_test(async move { diff --git a/ethexe/compute/src/tests.rs b/ethexe/compute/src/tests.rs index 46e73cd147f..2767401c7ba 100644 --- a/ethexe/compute/src/tests.rs +++ b/ethexe/compute/src/tests.rs @@ -54,7 +54,7 @@ pub(crate) fn block_chain_strategy(len: u32) -> BoxedStrategy { any_with::(BlockChainParams::from(len)).boxed() } -pub(crate) fn distinct_code_ids(count: usize) -> BoxedStrategy> { +pub(crate) fn distinct_code_ids_sorted(count: usize) -> BoxedStrategy> { collection::btree_set(any::<[u8; 32]>().prop_map(CodeId::from), count) .prop_map(|ids| ids.into_iter().collect()) .boxed() @@ -197,7 +197,7 @@ fn insert_code_events(chain: &mut BlockChain, events_in_block: u32) { } } -fn mark_as_not_prepared(chain: &mut BlockChain) { +fn reset_to_unprepared(chain: &mut BlockChain) { // skip genesis for block in chain.blocks.iter_mut().skip(1) { block.prepared = None; @@ -221,7 +221,7 @@ impl TestEnv { let db = Database::memory(); insert_code_events(&mut chain, events_in_block); - mark_as_not_prepared(&mut chain); + reset_to_unprepared(&mut chain); chain = chain.setup(&db); let compute = ComputeService::new_with_defaults(db.clone()); @@ -444,7 +444,7 @@ proptest! { db.set_code_valid(code_id, true); let mut chain = chain; - mark_as_not_prepared(&mut chain); + reset_to_unprepared(&mut chain); let chain = chain.setup(&db); let block_hash = chain.blocks[1].hash; @@ -486,7 +486,7 @@ proptest! { let code_id = db.set_original_code(&code); let mut chain = chain; - mark_as_not_prepared(&mut chain); + reset_to_unprepared(&mut chain); let chain = chain.setup(&db); let block_hash = chain.blocks[1].hash; From fbbb86512c9a2f329b94273a18afdb07359a5f0e Mon Sep 17 00:00:00 2001 From: playX18 Date: Fri, 17 Apr 2026 15:26:41 +0700 Subject: [PATCH 4/7] one proptest_config for ethexe/compute with timeouts --- ethexe/compute/src/compute.rs | 8 ++++---- ethexe/compute/src/prepare.rs | 7 ++++--- ethexe/compute/src/service.rs | 6 ++++-- ethexe/compute/src/tests.rs | 11 ++++++++++- 4 files changed, 22 insertions(+), 10 deletions(-) diff --git a/ethexe/compute/src/compute.rs b/ethexe/compute/src/compute.rs index ed18b57892e..039ce6c7955 100644 --- a/ethexe/compute/src/compute.rs +++ b/ethexe/compute/src/compute.rs @@ -407,7 +407,7 @@ mod tests { ComputeService, tests::{ MockProcessor, block_chain_strategy, next_compute_event, next_subservice_event, - run_async_test, + proptest_config, run_async_test, }, }; use ethexe_common::{ @@ -555,7 +555,7 @@ mod tests { } proptest! { - #![proptest_config(ProptestConfig::with_cases(32))] + #![proptest_config(proptest_config(32))] #[test] fn test_compute( @@ -602,7 +602,7 @@ mod tests { } proptest! { - #![proptest_config(ProptestConfig::with_cases(64))] + #![proptest_config(proptest_config(64))] #[test] fn test_compute_with_promises( @@ -762,7 +762,7 @@ mod tests { } proptest! { - #![proptest_config(ProptestConfig::with_cases(128))] + #![proptest_config(proptest_config(128))] #[test] fn collect_not_computed_predecessors_work_correctly( diff --git a/ethexe/compute/src/prepare.rs b/ethexe/compute/src/prepare.rs index 2e3fbf3480d..a20a11c1249 100644 --- a/ethexe/compute/src/prepare.rs +++ b/ethexe/compute/src/prepare.rs @@ -370,7 +370,8 @@ fn prepare_one_block BoxedStrategy { any_with::(BlockChainParams::from(len)).boxed() @@ -88,6 +89,14 @@ pub(crate) async fn assert_no_compute_event(compute: &mut Compu ); } +pub(crate) fn proptest_config(cases: u32) -> ProptestConfig { + ProptestConfig { + cases, + timeout: PROPTEST_TIMEOUT_MS, + ..ProptestConfig::default() + } +} + // MockProcessor that implements ProcessorExt and always returns Ok with empty results #[derive(Clone, Default)] pub(crate) struct MockProcessor { @@ -309,7 +318,7 @@ fn single_block_chain_with_event_count_strategy() -> BoxedStrategy<(BlockChain, } proptest! { - #![proptest_config(ProptestConfig::with_cases(64))] + #![proptest_config(proptest_config(64))] #[test] fn block_computation_basic((chain, events_in_block) in chain_with_event_count_strategy()) { From 0bff4542e35b9af3632902c3b410912e0c9167e5 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Mon, 20 Apr 2026 02:24:43 +0000 Subject: [PATCH 5/7] Merge master and resolve compute test conflicts Agent-Logs-Url: https://github.com/gear-tech/gear/sessions/4b49c20d-0d17-4ae6-a87a-2c56ac2353fe Co-authored-by: playX18 <158266309+playX18@users.noreply.github.com> --- .github/workflows/build.yml | 2 +- .github/workflows/check.yml | 2 +- Cargo.lock | 204 ++++++++++-------- Cargo.toml | 9 +- core/src/code/metadata.rs | 13 +- ethexe/compute/src/codes.rs | 80 +++---- ethexe/compute/src/compute.rs | 7 +- ethexe/compute/src/lib.rs | 141 +++++++++++- ethexe/compute/src/tests.rs | 3 +- ethexe/consensus/src/lib.rs | 193 +++++++++++++++-- ethexe/ethereum/src/router/mod.rs | 7 +- ethexe/processor/src/handling/mod.rs | 1 - .../src/handling/run/chunk_execution_spawn.rs | 97 +++------ ethexe/processor/src/lib.rs | 188 +++++++++++++--- ethexe/processor/src/tests.rs | 47 ++-- .../src/{handling => }/thread_pool.rs | 124 ++++++----- ethexe/service/src/lib.rs | 42 +++- ethexe/service/src/tests/utils/env.rs | 18 +- ethexe/service/src/tests/utils/mod.rs | 10 +- utils/gear-workspace-hack/Cargo.toml | 32 +-- 20 files changed, 851 insertions(+), 369 deletions(-) rename ethexe/processor/src/{handling => }/thread_pool.rs (50%) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 6e4e56d6d0a..f34cba04d9f 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -284,7 +284,7 @@ jobs: - name: "Install: Foundry" uses: foundry-rs/foundry-toolchain@v1 with: - version: nightly-c07d504b4ae67754584f4e05ff0c547a43c50f7b + version: nightly-f1abb2ca347187bb6dea8c3881ca44ce50aab1e7 - name: "Show: Versioning" run: | diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index db0ad147196..0f0ee08912f 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -65,7 +65,7 @@ jobs: - name: "Install: Foundry" uses: foundry-rs/foundry-toolchain@v1 with: - version: nightly-c07d504b4ae67754584f4e05ff0c547a43c50f7b + version: nightly-f1abb2ca347187bb6dea8c3881ca44ce50aab1e7 - name: "Install: Node.js" uses: actions/setup-node@v6 diff --git a/Cargo.lock b/Cargo.lock index 418fb3ee3e8..dfefb85cdf5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -110,8 +110,9 @@ checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" [[package]] name = "alloy" -version = "1.8.3" -source = "git+https://github.com/gear-tech/alloy.git?branch=better-gas-estimation#42d2d5cfb91ed188e227108ed2b7819035dea693" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85805c194576017df6c11057504e1d60b36f3913f8e365945486931f6ee81e40" dependencies = [ "alloy-consensus", "alloy-contract", @@ -135,9 +136,9 @@ dependencies = [ [[package]] name = "alloy-chains" -version = "0.2.30" +version = "0.2.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90f374d3c6d729268bbe2d0e0ff992bb97898b2df756691a62ee1d5f0506bc39" +checksum = "f4e9e31d834fe25fe991b8884e4b9f0e59db4a97d86e05d1464d6899c013cd62" dependencies = [ "alloy-primitives", "num_enum 0.7.5", @@ -147,8 +148,9 @@ dependencies = [ [[package]] name = "alloy-consensus" -version = "1.8.3" -source = "git+https://github.com/gear-tech/alloy.git?branch=better-gas-estimation#42d2d5cfb91ed188e227108ed2b7819035dea693" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8dbe4e5e9107bf6854e7550b666ca654ff2027eabf8153913e2e31ac4b089779" dependencies = [ "alloy-eips", "alloy-primitives", @@ -173,8 +175,9 @@ dependencies = [ [[package]] name = "alloy-consensus-any" -version = "1.8.3" -source = "git+https://github.com/gear-tech/alloy.git?branch=better-gas-estimation#42d2d5cfb91ed188e227108ed2b7819035dea693" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88fc7bbfb98cf5605a35aadf0ba43a7d9f1608d6f220d05e4fbd5144d3b0b625" dependencies = [ "alloy-consensus", "alloy-eips", @@ -186,8 +189,9 @@ dependencies = [ [[package]] name = "alloy-contract" -version = "1.8.3" -source = "git+https://github.com/gear-tech/alloy.git?branch=better-gas-estimation#42d2d5cfb91ed188e227108ed2b7819035dea693" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f4c16fa30b623e40a5b216da00f3b61870f5cbe863b59816ac1ecc2489515a40" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -288,8 +292,9 @@ dependencies = [ [[package]] name = "alloy-eips" -version = "1.8.3" -source = "git+https://github.com/gear-tech/alloy.git?branch=better-gas-estimation#42d2d5cfb91ed188e227108ed2b7819035dea693" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "afb4919fa34b268842f434bfafa9c09136ab7b1a87ce0dd40a61befa35b5408c" dependencies = [ "alloy-eip2124", "alloy-eip2930", @@ -310,8 +315,9 @@ dependencies = [ [[package]] name = "alloy-genesis" -version = "1.8.3" -source = "git+https://github.com/gear-tech/alloy.git?branch=better-gas-estimation#42d2d5cfb91ed188e227108ed2b7819035dea693" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e111e22c1a2133e9ebfd9051ea0eaf63559594d2f50d43cbc6762fbb95fc3c2" dependencies = [ "alloy-eips", "alloy-primitives", @@ -337,9 +343,9 @@ dependencies = [ [[package]] name = "alloy-json-abi" -version = "1.5.2" +version = "1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84e3cf01219c966f95a460c95f1d4c30e12f6c18150c21a30b768af2a2a29142" +checksum = "e9dbe713da0c737d9e5e387b0ba790eb98b14dd207fe53eef50e19a5a8ec3dac" dependencies = [ "alloy-primitives", "alloy-sol-type-parser", @@ -349,8 +355,9 @@ dependencies = [ [[package]] name = "alloy-json-rpc" -version = "1.8.3" -source = "git+https://github.com/gear-tech/alloy.git?branch=better-gas-estimation#42d2d5cfb91ed188e227108ed2b7819035dea693" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31b6af6f374c1eeef8ab8dc26232cd440db167322a4207a3debd3d1ee565ca47" dependencies = [ "alloy-primitives", "alloy-sol-types", @@ -363,8 +370,9 @@ dependencies = [ [[package]] name = "alloy-network" -version = "1.8.3" -source = "git+https://github.com/gear-tech/alloy.git?branch=better-gas-estimation#42d2d5cfb91ed188e227108ed2b7819035dea693" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0a3f5a7f3678b71d33fcc45b714fab8928dbc647d5aff2145e72032d5c849bb" dependencies = [ "alloy-consensus", "alloy-consensus-any", @@ -388,8 +396,9 @@ dependencies = [ [[package]] name = "alloy-network-primitives" -version = "1.8.3" -source = "git+https://github.com/gear-tech/alloy.git?branch=better-gas-estimation#42d2d5cfb91ed188e227108ed2b7819035dea693" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb50dc1fb0e0b2c8748d5bee1aa7acdd18f9e036311bc93a71d97be624030317" dependencies = [ "alloy-consensus", "alloy-eips", @@ -400,8 +409,9 @@ dependencies = [ [[package]] name = "alloy-node-bindings" -version = "1.8.3" -source = "git+https://github.com/gear-tech/alloy.git?branch=better-gas-estimation#42d2d5cfb91ed188e227108ed2b7819035dea693" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85195890fcee519312718dc8418035935ad0d57f57943ca82689732432a702c9" dependencies = [ "alloy-genesis", "alloy-hardforks", @@ -448,8 +458,9 @@ dependencies = [ [[package]] name = "alloy-provider" -version = "1.8.3" -source = "git+https://github.com/gear-tech/alloy.git?branch=better-gas-estimation#42d2d5cfb91ed188e227108ed2b7819035dea693" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2ba5468f78c8893be2d68a7f2fda61753336e5653f006af19781001b5f99e6c" dependencies = [ "alloy-chains", "alloy-consensus", @@ -490,8 +501,9 @@ dependencies = [ [[package]] name = "alloy-pubsub" -version = "1.8.3" -source = "git+https://github.com/gear-tech/alloy.git?branch=better-gas-estimation#42d2d5cfb91ed188e227108ed2b7819035dea693" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffcefb5d3391a320eadb95d398e4135f8cc35c7bf29a6bdb357eadcfc5ee5638" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -533,8 +545,9 @@ dependencies = [ [[package]] name = "alloy-rpc-client" -version = "1.8.3" -source = "git+https://github.com/gear-tech/alloy.git?branch=better-gas-estimation#42d2d5cfb91ed188e227108ed2b7819035dea693" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "222fd4efff0fb9a25184684742c44fe9fa9a16c4ab5bf97583e71c86598ef8f0" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -557,8 +570,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types" -version = "1.8.3" -source = "git+https://github.com/gear-tech/alloy.git?branch=better-gas-estimation#42d2d5cfb91ed188e227108ed2b7819035dea693" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "974df1e56405c27cb8242381f45d8b212ba9df5006046ccf704764a2a4634366" dependencies = [ "alloy-primitives", "alloy-rpc-types-anvil", @@ -571,8 +585,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-anvil" -version = "1.8.3" -source = "git+https://github.com/gear-tech/alloy.git?branch=better-gas-estimation#42d2d5cfb91ed188e227108ed2b7819035dea693" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06bc10b0dca4f5bfc3cd30ed46eab5d651b5bb2cd300d683bdcdf5d2bfe6e82c" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -582,18 +597,24 @@ dependencies = [ [[package]] name = "alloy-rpc-types-any" -version = "1.8.3" -source = "git+https://github.com/gear-tech/alloy.git?branch=better-gas-estimation#42d2d5cfb91ed188e227108ed2b7819035dea693" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "949c0f16a94ae33cdb1139b8dbf9e34d7f26ebfe97962e2a4d620b5f65f48fe4" dependencies = [ "alloy-consensus-any", + "alloy-network-primitives", + "alloy-primitives", "alloy-rpc-types-eth", "alloy-serde", + "serde", + "serde_json", ] [[package]] name = "alloy-rpc-types-beacon" -version = "1.8.3" -source = "git+https://github.com/gear-tech/alloy.git?branch=better-gas-estimation#42d2d5cfb91ed188e227108ed2b7819035dea693" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a8f7fa8ca056bb797a368aeed329e6ace6b62ee4271432ac36ab8ae87a5e60d" dependencies = [ "alloy-eips", "alloy-primitives", @@ -607,8 +628,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-engine" -version = "1.8.3" -source = "git+https://github.com/gear-tech/alloy.git?branch=better-gas-estimation#42d2d5cfb91ed188e227108ed2b7819035dea693" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e59bc947935732cae5b072753e5e034c0b70a8b031c2839f45e2659ba07df9ae" dependencies = [ "alloy-consensus", "alloy-eips", @@ -623,8 +645,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-eth" -version = "1.8.3" -source = "git+https://github.com/gear-tech/alloy.git?branch=better-gas-estimation#42d2d5cfb91ed188e227108ed2b7819035dea693" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc280a41931bd419af86e9e859dd9726b73313aaa2e479b33c0e344f4b892ddb" dependencies = [ "alloy-consensus", "alloy-consensus-any", @@ -643,8 +666,9 @@ dependencies = [ [[package]] name = "alloy-serde" -version = "1.8.3" -source = "git+https://github.com/gear-tech/alloy.git?branch=better-gas-estimation#42d2d5cfb91ed188e227108ed2b7819035dea693" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4848831ff994c88b1c32b7df9c4c1c3eedea4b535bde5eb3c421ef0bdc5ac052" dependencies = [ "alloy-primitives", "serde", @@ -653,8 +677,9 @@ dependencies = [ [[package]] name = "alloy-signer" -version = "1.8.3" -source = "git+https://github.com/gear-tech/alloy.git?branch=better-gas-estimation#42d2d5cfb91ed188e227108ed2b7819035dea693" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "84b8ad9890b212e224291024b1aecfeef72127d27a2f6eebc5e347c40275c4bf" dependencies = [ "alloy-primitives", "async-trait", @@ -667,8 +692,9 @@ dependencies = [ [[package]] name = "alloy-signer-local" -version = "1.8.3" -source = "git+https://github.com/gear-tech/alloy.git?branch=better-gas-estimation#42d2d5cfb91ed188e227108ed2b7819035dea693" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c67d2372aada343130d41e249b59a3cef29b1678dcd3fd80f1c2c4d6b5318f2" dependencies = [ "alloy-consensus", "alloy-network", @@ -685,9 +711,9 @@ dependencies = [ [[package]] name = "alloy-sol-macro" -version = "1.5.2" +version = "1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09eb18ce0df92b4277291bbaa0ed70545d78b02948df756bbd3d6214bf39a218" +checksum = "ab81bab693da9bb79f7a95b64b394718259fdd7e41dceeced4cad57cb71c4f6a" dependencies = [ "alloy-sol-macro-expander", "alloy-sol-macro-input", @@ -699,9 +725,9 @@ dependencies = [ [[package]] name = "alloy-sol-macro-expander" -version = "1.5.2" +version = "1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95d9fa2daf21f59aa546d549943f10b5cce1ae59986774019fbedae834ffe01b" +checksum = "489f1620bb7e2483fb5819ed01ab6edc1d2f93939dce35a5695085a1afd1d699" dependencies = [ "alloy-json-abi", "alloy-sol-macro-input", @@ -711,16 +737,16 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", + "sha3", "syn 2.0.114", "syn-solidity", - "tiny-keccak", ] [[package]] name = "alloy-sol-macro-input" -version = "1.5.2" +version = "1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9396007fe69c26ee118a19f4dee1f5d1d6be186ea75b3881adf16d87f8444686" +checksum = "56cef806ad22d4392c5fc83cf8f2089f988eb99c7067b4e0c6f1971fc1cca318" dependencies = [ "alloy-json-abi", "const-hex", @@ -736,9 +762,9 @@ dependencies = [ [[package]] name = "alloy-sol-type-parser" -version = "1.5.2" +version = "1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af67a0b0dcebe14244fc92002cd8d96ecbf65db4639d479f5fcd5805755a4c27" +checksum = "a6df77fea9d6a2a75c0ef8d2acbdfd92286cc599983d3175ccdc170d3433d249" dependencies = [ "serde", "winnow", @@ -746,9 +772,9 @@ dependencies = [ [[package]] name = "alloy-sol-types" -version = "1.5.2" +version = "1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09aeea64f09a7483bdcd4193634c7e5cf9fd7775ee767585270cd8ce2d69dc95" +checksum = "64612d29379782a5dde6f4b6570d9c756d734d760c0c94c254d361e678a6591f" dependencies = [ "alloy-json-abi", "alloy-primitives", @@ -758,8 +784,9 @@ dependencies = [ [[package]] name = "alloy-transport" -version = "1.8.3" -source = "git+https://github.com/gear-tech/alloy.git?branch=better-gas-estimation#42d2d5cfb91ed188e227108ed2b7819035dea693" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32b7b755e64ae6b5de0d762ed2c780e072167ea5e542076a559e00314352a0bf" dependencies = [ "alloy-json-rpc", "auto_impl", @@ -780,8 +807,9 @@ dependencies = [ [[package]] name = "alloy-transport-http" -version = "1.8.3" -source = "git+https://github.com/gear-tech/alloy.git?branch=better-gas-estimation#42d2d5cfb91ed188e227108ed2b7819035dea693" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a29980e69119444ed26b75e7ee5bed2043870f904a64318297e55800db686564" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -795,8 +823,9 @@ dependencies = [ [[package]] name = "alloy-transport-ws" -version = "1.8.3" -source = "git+https://github.com/gear-tech/alloy.git?branch=better-gas-estimation#42d2d5cfb91ed188e227108ed2b7819035dea693" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e4b71dc951db66795cfb52eef835f64cf15163bc93b656e061b457ce5ebff370" dependencies = [ "alloy-pubsub", "alloy-transport", @@ -829,8 +858,9 @@ dependencies = [ [[package]] name = "alloy-tx-macros" -version = "1.8.3" -source = "git+https://github.com/gear-tech/alloy.git?branch=better-gas-estimation#42d2d5cfb91ed188e227108ed2b7819035dea693" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d8228b9236479ff16b03041b64b86c2bd4e53da1caa45d59b5868cd1571131e" dependencies = [ "darling 0.23.0", "proc-macro2", @@ -1955,7 +1985,7 @@ dependencies = [ "bitflags 2.10.0", "cexpr", "clang-sys", - "itertools 0.13.0", + "itertools 0.11.0", "log", "prettyplease 0.2.37", "proc-macro2", @@ -1972,7 +2002,7 @@ version = "2.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "90dbd31c98227229239363921e60fcf5e558e43ec69094d46fc4996f08d1d5bc" dependencies = [ - "bitcoin_hashes 0.14.1", + "bitcoin_hashes 0.13.0", ] [[package]] @@ -2882,7 +2912,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "117725a109d387c937a1533ce01b450cbde6b88abceea8473c4d7a85853cda3c" dependencies = [ "lazy_static", - "windows-sys 0.59.0", + "windows-sys 0.48.0", ] [[package]] @@ -5065,7 +5095,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" dependencies = [ "libc", - "windows-sys 0.61.2", + "windows-sys 0.52.0", ] [[package]] @@ -8672,7 +8702,7 @@ dependencies = [ "libc", "percent-encoding", "pin-project-lite", - "socket2 0.6.1", + "socket2 0.5.10", "tokio", "tower-service", "tracing", @@ -9139,7 +9169,7 @@ checksum = "3640c1c38b8e4e43584d8df18be5fc6b0aa314ce6ebf51b53313d4306cca8e46" dependencies = [ "hermit-abi 0.5.2", "libc", - "windows-sys 0.61.2", + "windows-sys 0.52.0", ] [[package]] @@ -11666,7 +11696,7 @@ version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9224be3459a0c1d6e9b0f42ab0e76e98b29aef5aba33c0487dfcf47ea08b5150" dependencies = [ - "proc-macro-crate 3.4.0", + "proc-macro-crate 1.1.3", "proc-macro2", "quote", "syn 1.0.109", @@ -11678,7 +11708,7 @@ version = "0.50.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5" dependencies = [ - "windows-sys 0.61.2", + "windows-sys 0.60.2", ] [[package]] @@ -13954,7 +13984,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22505a5c94da8e3b7c2996394d1c933236c4d743e81a410bcca4e6989fc066a4" dependencies = [ "bytes", - "heck 0.5.0", + "heck 0.4.1", "itertools 0.12.1", "log", "multimap 0.10.1", @@ -14189,7 +14219,7 @@ dependencies = [ "quinn-udp 0.5.14", "rustc-hash 2.1.1", "rustls 0.23.36", - "socket2 0.6.1", + "socket2 0.5.10", "thiserror 2.0.17", "tokio", "tracing", @@ -14288,9 +14318,9 @@ dependencies = [ "cfg_aliases 0.2.1", "libc", "once_cell", - "socket2 0.6.1", + "socket2 0.5.10", "tracing", - "windows-sys 0.60.2", + "windows-sys 0.52.0", ] [[package]] @@ -15051,7 +15081,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys 0.4.15", - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] [[package]] @@ -15064,7 +15094,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys 0.11.0", - "windows-sys 0.61.2", + "windows-sys 0.52.0", ] [[package]] @@ -15167,7 +15197,7 @@ dependencies = [ "security-framework 3.5.1", "security-framework-sys", "webpki-root-certs 0.26.11", - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] [[package]] @@ -15188,7 +15218,7 @@ dependencies = [ "security-framework 3.5.1", "security-framework-sys", "webpki-root-certs 1.0.5", - "windows-sys 0.61.2", + "windows-sys 0.52.0", ] [[package]] @@ -18652,9 +18682,9 @@ dependencies = [ [[package]] name = "syn-solidity" -version = "1.5.2" +version = "1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f92d01b5de07eaf324f7fca61cc6bd3d82bbc1de5b6c963e6fe79e86f36580d" +checksum = "53f425ae0b12e2f5ae65542e00898d500d4d318b4baf09f40fd0d410454e9947" dependencies = [ "paste", "proc-macro2", @@ -18784,7 +18814,7 @@ dependencies = [ "getrandom 0.3.4", "once_cell", "rustix 1.1.3", - "windows-sys 0.61.2", + "windows-sys 0.52.0", ] [[package]] @@ -20928,7 +20958,7 @@ version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" dependencies = [ - "windows-sys 0.61.2", + "windows-sys 0.48.0", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 82c1e17b959..30a207b166a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -116,10 +116,10 @@ members = [ [workspace.dependencies] gear-workspace-hack = "0.1.0" -alloy = "1.8" # TODO: #5160 use release version of alloy when BlobGasFiller is fixed -alloy-chains = "0.2" -alloy-primitives = { version = "1.5", default-features = false } -alloy-sol-types = { version = "1.5", default-features = false } +alloy = "2.0" +alloy-chains = "0.2.33" +alloy-primitives = { version = "1.5.7", default-features = false } +alloy-sol-types = { version = "1.5.7", default-features = false } anyhow = { version = "1.0.86", default-features = false } arbitrary = "1.3.2" async-recursion = "1.1.1" @@ -634,7 +634,6 @@ inherits = "release" debug = true [patch.crates-io] -alloy = { version = "1.8", git = "https://github.com/gear-tech/alloy.git", branch = "better-gas-estimation" } gear-workspace-hack = { path = "utils/gear-workspace-hack" } # core2 0.4.0 was yanked on crates.io; pin upstream git so cargo metadata doesn't fail on yanked lookup. # Needed by cid -> sc-network transitive; triggers during wasm-builder sub-project resolve. diff --git a/core/src/code/metadata.rs b/core/src/code/metadata.rs index 69dd4ff1a24..9b69329094b 100644 --- a/core/src/code/metadata.rs +++ b/core/src/code/metadata.rs @@ -32,7 +32,18 @@ use scale_info::{ /// Status of the instrumentation. #[derive( - Clone, Copy, Debug, Decode, DecodeAsType, Encode, EncodeAsType, TypeInfo, PartialEq, Eq, Hash, + Clone, + Copy, + Debug, + Decode, + DecodeAsType, + Encode, + EncodeAsType, + TypeInfo, + PartialEq, + Eq, + Hash, + derive_more::IsVariant, )] pub enum InstrumentationStatus { /// Code is not instrumented yet. diff --git a/ethexe/compute/src/codes.rs b/ethexe/compute/src/codes.rs index 5083e07a913..cd8203d8090 100644 --- a/ethexe/compute/src/codes.rs +++ b/ethexe/compute/src/codes.rs @@ -16,17 +16,20 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::{ComputeError, ProcessorExt, Result, service::SubService}; +use crate::{ProcessorExt, Result, service::SubService}; use ethexe_common::{ CodeAndIdUnchecked, db::{CodesStorageRO, CodesStorageRW}, }; use ethexe_db::Database; use ethexe_processor::{ProcessedCodeInfo, ValidCodeInfo}; +use futures::{FutureExt, StreamExt, future::BoxFuture, stream::FuturesUnordered}; use gprimitives::CodeId; use metrics::Gauge; -use std::task::{Context, Poll}; -use tokio::task::JoinSet; +use std::{ + future, + task::{Context, Poll}, +}; /// Metrics for the [`CodesSubService`]. #[derive(Clone, metrics_derive::Metrics)] @@ -41,7 +44,7 @@ pub struct CodesSubService { processor: P, metrics: Metrics, - processions: JoinSet>, + processions: FuturesUnordered>>, } impl CodesSubService

{ @@ -50,7 +53,7 @@ impl CodesSubService

{ db, processor, metrics: Metrics::default(), - processions: JoinSet::new(), + processions: FuturesUnordered::new(), } } @@ -70,36 +73,37 @@ impl CodesSubService

{ "Instrumented code {code_id:?} must exist in database" ); } - self.processions.spawn(async move { Ok(code_id) }); + self.processions.push(future::ready(Ok(code_id)).boxed()); } else { let db = self.db.clone(); let mut processor = self.processor.clone(); - self.processions.spawn_blocking(move || { - processor - .process_code(code_and_id) - .map(|ProcessedCodeInfo { code_id, valid }| { - if let Some(ValidCodeInfo { - code, + self.processions.push( + async move { + let ProcessedCodeInfo { code_id, valid } = + processor.process_code(code_and_id).await?; + if let Some(ValidCodeInfo { + code, + instrumented_code, + code_metadata, + }) = valid + { + db.set_original_code(&code); + db.set_instrumented_code( + ethexe_runtime_common::VERSION, + code_id, instrumented_code, - code_metadata, - }) = valid - { - db.set_original_code(&code); - db.set_instrumented_code( - ethexe_runtime_common::VERSION, - code_id, - instrumented_code, - ); - db.set_code_metadata(code_id, code_metadata); - db.set_code_valid(code_id, true); - } else { - db.set_code_valid(code_id, false); - } - - code_id - }) - }); + ); + db.set_code_metadata(code_id, code_metadata); + db.set_code_valid(code_id, true); + } else { + db.set_code_valid(code_id, false); + } + + Ok(code_id) + } + .boxed(), + ); } self.metrics @@ -112,14 +116,14 @@ impl SubService for CodesSubService

{ type Output = CodeId; fn poll_next(&mut self, cx: &mut Context<'_>) -> Poll> { - futures::ready!(self.processions.poll_join_next(cx)) - .map(|res| { - self.metrics - .processing_codes - .set(self.processions.len() as f64); - res.map_err(ComputeError::CodeProcessJoin)? - }) - .map_or(Poll::Pending, Poll::Ready) + if let Poll::Ready(Some(res)) = self.processions.poll_next_unpin(cx) { + self.metrics + .processing_codes + .set(self.processions.len() as f64); + return Poll::Ready(res); + } + + Poll::Pending } } diff --git a/ethexe/compute/src/compute.rs b/ethexe/compute/src/compute.rs index 039ce6c7955..48a2f1257a2 100644 --- a/ethexe/compute/src/compute.rs +++ b/ethexe/compute/src/compute.rs @@ -444,7 +444,7 @@ mod tests { const USER_ID: ActorId = ActorId::new([1u8; 32]); - pub fn upload_code(processor: &mut Processor, code: &[u8], db: &Database) -> CodeId { + pub async fn upload_code(processor: &mut Processor, code: &[u8], db: &Database) -> CodeId { let code_id = CodeId::generate(code); let ValidCodeInfo { @@ -456,6 +456,7 @@ mod tests { code: code.to_vec(), code_id, }) + .await .expect("failed to process code") .valid .expect("code is invalid"); @@ -614,7 +615,7 @@ mod tests { let db = Database::memory(); let mut processor = Processor::new(db.clone()).unwrap(); let ping_code_id = - test_utils::upload_code(&mut processor, demo_ping::WASM_BINARY, &db); + test_utils::upload_code(&mut processor, demo_ping::WASM_BINARY, &db).await; let ping_id = ActorId::from(0x10000); let blockchain = chain.setup(&db); let blockchain_len = blockchain.blocks.len() - 1; @@ -713,7 +714,7 @@ mod tests { let mut processor = Processor::new(db.clone()).unwrap(); let ping_code_id = - test_utils::upload_code(&mut processor, demo_ping::WASM_BINARY, &db); + test_utils::upload_code(&mut processor, demo_ping::WASM_BINARY, &db).await; let ping_id = ActorId::from(0x10000); let blockchain = chain.setup(&db); diff --git a/ethexe/compute/src/lib.rs b/ethexe/compute/src/lib.rs index a5c3b8618db..b8f11a00ad9 100644 --- a/ethexe/compute/src/lib.rs +++ b/ethexe/compute/src/lib.rs @@ -16,6 +16,136 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +//! # Ethexe Compute +//! +//! Orchestrates the three pipelines that turn on-chain data into executed +//! state for the ethexe node: code validation, block preparation, and +//! announce computation. The crate wraps `ethexe-processor` and exposes its +//! progress as a `futures::Stream` of [`ComputeEvent`]s: the outer service +//! submits work through a few input methods, then polls the stream and +//! handles each event that comes out. +//! +//! [`ComputeService`] composes three independent sub-services. Each does +//! one thing and emits one family of events: +//! +//! - `codes` — validates and instruments a WASM code blob and marks its +//! validity in the database. Emits [`ComputeEvent::CodeProcessed`]. +//! - `prepare` — brings a synced block (and any not-yet-prepared ancestors) +//! into a state where it can be executed, requesting missing code blobs +//! from the caller along the way. Emits [`ComputeEvent::RequestLoadCodes`] +//! and [`ComputeEvent::BlockPrepared`]. +//! - `compute` — executes an announce (computing any missing ancestor +//! announces first), optionally streaming promises for it. Emits +//! [`ComputeEvent::Promise`] and [`ComputeEvent::AnnounceComputed`]. +//! +//! ## Role in the stack and relation to other crates +//! +//! - `ethexe-processor` is the backend. Compute is generic over the +//! [`ProcessorExt`] trait defined here and has a direct impl for +//! [`Processor`]; the only other impl in the tree is a test mock +//! (`tests::MockProcessor`) that lets the sub-service tests run without +//! any real WASM execution. +//! - `ethexe-blob-loader` is **not** a direct dependency. When `prepare` +//! discovers codes with unknown validation status, it yields +//! [`ComputeEvent::RequestLoadCodes`] upstream; the service layer is +//! responsible for calling the blob loader, and then feeds the loaded +//! bytes back into compute via [`ComputeService::process_code`]. That +//! way compute itself never has to make network calls. +//! - `ethexe-db` is the only place compute reads from and writes to. +//! - `ethexe-service` is the sole consumer: it polls the `futures::Stream` +//! produced by [`ComputeService`] inside the main `tokio::select!` loop +//! and routes each [`ComputeEvent`] variant to the rest of the node +//! (consensus, network, blob-loader). +//! +//! ## Entry points +//! +//! | Method | Effect | +//! |----------------------------------------------|-----------------------------------------------------------------------------------------| +//! | [`ComputeService::process_code`] | Queue a code blob for validation + instrumentation + DB persistence. | +//! | [`ComputeService::prepare_block`] | Queue a synced block for preparation (walks ancestors, emits code requests). | +//! | [`ComputeService::compute_announce`] | Queue an announce for execution with a [`PromisePolicy`](ethexe_common::PromisePolicy). | +//! | `::poll_next` | Drive all three sub-services and yield the next [`ComputeEvent`]. | +//! +//! ## Code processing pipeline (`codes` sub-service) +//! +//! For every code submitted through [`ComputeService::process_code`] the +//! stream eventually yields exactly one [`ComputeEvent::CodeProcessed`] +//! (carrying the same `CodeId`) or a [`ComputeError`]. This holds both +//! for fresh codes and for codes that had already been validated in a +//! previous run, so the caller does not have to de-duplicate. +//! +//! Multiple codes submitted at once can be processed concurrently. +//! +//! ## Block preparation pipeline (`prepare` sub-service) +//! +//! For every block hash submitted through [`ComputeService::prepare_block`] +//! the stream eventually yields exactly one [`ComputeEvent::BlockPrepared`] +//! for that hash or a [`ComputeError`]. Before the block-prepared event, +//! the stream may emit one or more [`ComputeEvent::RequestLoadCodes`] if +//! the block — or any of its still-unprepared ancestors — references codes +//! whose validity has not yet been established. The caller must fetch +//! those codes (out of scope for this crate) and feed them back in through +//! [`ComputeService::process_code`]; preparation resumes automatically as +//! the missing codes arrive. +//! +//! ## Announce computation pipeline (`compute` sub-service) +//! +//! For every announce submitted through [`ComputeService::compute_announce`] +//! with a [`PromisePolicy`](ethexe_common::PromisePolicy), the stream +//! eventually yields exactly one [`ComputeEvent::AnnounceComputed`] for +//! that announce or a [`ComputeError`]. If the caller passed +//! [`PromisePolicy::Enabled`](ethexe_common::PromisePolicy), zero or more +//! [`ComputeEvent::Promise`] events for the same announce are yielded +//! first. Every `Promise` for a given announce is yielded strictly before +//! the `AnnounceComputed` of that announce — `AnnounceComputed` is the +//! "all promises for this announce have been delivered" marker. +//! +//! Computation is sequential: at most one announce is executed at a time. +//! If the announce's parent (or any further ancestor) has not been +//! computed yet, missing ancestors are computed first, in order. +//! Ancestors are always computed without promise collection regardless of +//! the requested policy — promises describe the user-visible result of +//! the target announce only. +//! +//! The target block must already be prepared; otherwise the computation +//! fails with [`ComputeError::BlockNotPrepared`]. +//! +//! Actual WASM execution is delegated to [`ProcessorExt::process_programs`]. +//! +//! ## Canonical event quarantine +//! +//! Ethereum events do not become visible to the runtime on the block they +//! arrive in. When building the execution input for a block, compute +//! instead takes the events from an ancestor that is +//! [`ComputeConfig::canonical_quarantine`](ComputeConfig) blocks older. +//! If the walk back would cross genesis, the returned event list is +//! empty — i.e. the first `canonical_quarantine` blocks after genesis +//! see no Ethereum events at all. +//! +//! ## Event flow summary +//! +//! | [`ComputeEvent`] | Fired by | Expected consumer | +//! |---------------------------|----------|-------------------------------------------------------| +//! | `CodeProcessed(code_id)` | `codes` | Informational. | +//! | `RequestLoadCodes(set)` | `prepare`| Handed to `ethexe-blob-loader` to fetch code blobs. | +//! | `BlockPrepared(hash)` | `prepare`| Handed to `ethexe-consensus`. | +//! | `AnnounceComputed(hash)` | `compute`| Handed to `ethexe-consensus`. | +//! | `Promise(p, ah)` | `compute`| Handed to `ethexe-consensus` for signing. | +//! +//! ## When modifying this crate +//! +//! - A code result must reach the `prepare` sub-service before the +//! corresponding `CodeProcessed` is emitted upstream, otherwise a block +//! waiting on that code will stall for an extra poll. +//! - An announce must only be computed after its block has been prepared. +//! - For announce execution, canonical events must always be read via +//! [`find_canonical_events_post_quarantine`], never directly via +//! `db.block_events(...)` from the announce's own block. Taking the raw +//! events would skip the quarantine and produce non-deterministic state +//! across nodes that disagree on a recent reorg. +//! - For any single announce, `AnnounceComputed` must be the last event +//! emitted; every `Promise` that belongs to it comes strictly before. + pub use compute::{ ComputeConfig, ComputeSubService, utils::{find_canonical_events_post_quarantine, prepare_executable_for_announce}, @@ -62,8 +192,6 @@ pub enum ComputeError { BlockHeaderNotFound(H256), #[error("block validators committed for era not found for block({0})")] CommittedEraNotFound(H256), - #[error("process code join error")] - CodeProcessJoin(#[from] tokio::task::JoinError), #[error("codes queue not found for computed block({0})")] CodesQueueNotFound(H256), #[error("last committed batch not found for computed block({0})")] @@ -101,7 +229,10 @@ pub trait ProcessorExt: Sized + Unpin + Send + Clone + 'static { executable: ExecutableData, promise_out_tx: Option>, ) -> impl Future> + Send; - fn process_code(&mut self, code_and_id: CodeAndIdUnchecked) -> Result; + fn process_code( + &mut self, + code_and_id: CodeAndIdUnchecked, + ) -> impl Future> + Send; } impl ProcessorExt for Processor { @@ -115,7 +246,7 @@ impl ProcessorExt for Processor { .map_err(Into::into) } - fn process_code(&mut self, code_and_id: CodeAndIdUnchecked) -> Result { - self.process_code(code_and_id).map_err(Into::into) + async fn process_code(&mut self, code_and_id: CodeAndIdUnchecked) -> Result { + self.process_code(code_and_id).await.map_err(Into::into) } } diff --git a/ethexe/compute/src/tests.rs b/ethexe/compute/src/tests.rs index 739be1f0dbb..584e5999253 100644 --- a/ethexe/compute/src/tests.rs +++ b/ethexe/compute/src/tests.rs @@ -147,12 +147,11 @@ impl ProcessorExt for MockProcessor { Ok(self.process_programs_result.take().unwrap_or_default()) } - fn process_code(&mut self, code_and_id: CodeAndIdUnchecked) -> Result { + async fn process_code(&mut self, code_and_id: CodeAndIdUnchecked) -> Result { self.process_code_calls .lock() .unwrap() .push(code_and_id.clone()); - Ok(self .process_codes_result .take() diff --git a/ethexe/consensus/src/lib.rs b/ethexe/consensus/src/lib.rs index c1b8ae43850..e6365e0887c 100644 --- a/ethexe/consensus/src/lib.rs +++ b/ethexe/consensus/src/lib.rs @@ -18,19 +18,186 @@ //! # Ethexe Consensus //! -//! This crate provides controlling a behaviour of ethexe node depending on incoming blocks. -//! -//! The main components are: -//! - [`ConsensusService`]: A trait defining the core interface for consensus services -//! - [`ConsensusEvent`]: An enum representing various consensus events which have to be processed by outer services -//! - [`ConnectService`]: An implementation of consensus to run "connect-node" -//! - [`ValidatorService`]: An implementation of consensus to run "validator-node" -//! -//! The crate is organized into several modules: -//! - `connect`: Connection management functionality -//! - `validator`: Block validation services and implementations -//! - `utils`: Utility functions and shared data structures -//! - `announces`: Logic for handling announce branching and related operations +//! Decides what an ethexe node should do as Ethereum blocks arrive: validate +//! announces produced by other nodes, produce announces of its own if it is +//! the producer for a block, coordinate threshold-signed batch commitments, +//! and submit those batches to the on-chain Router contract. +//! +//! Ethereum is the authoritative ledger — this crate does not invent its own +//! BFT protocol. It decides which announces to compute, collects enough +//! validator signatures on the resulting state, and posts the aggregated +//! commitment on-chain. Finality follows from the host chain. +//! +//! Two implementations of [`ConsensusService`] are provided: +//! +//! - [`ConnectService`] — a passive "connect-node" that tracks announces +//! from producers, asks `ethexe-compute` to execute them, and requests +//! missing announces from peers when needed. It knows the validator +//! set (so it can tell whose announce to accept for each block), but +//! it holds no signing key and does not submit anything on-chain. +//! - [`ValidatorService`] — an active validator. In addition to what +//! `ConnectService` does, it produces announces when it is the +//! producer for a block, collects validator signatures on batch +//! commitments, and submits the multi-signed batch to the Router +//! contract. +//! +//! Both share the same [`ConsensusService`] trait and the same +//! [`ConsensusEvent`] output stream, so `ethexe-service` can drive them +//! uniformly. +//! +//! ## Role in the stack and relation to other crates +//! +//! - `ethexe-observer` feeds Ethereum block data through +//! [`ConsensusService::receive_new_chain_head`] and the follow-up +//! [`ConsensusService::receive_synced_block`] notifications. +//! - `ethexe-compute` signals execution progress through +//! [`ConsensusService::receive_prepared_block`], +//! [`ConsensusService::receive_computed_announce`], and hands raw +//! promises back through +//! [`ConsensusService::receive_promise_for_signing`]. +//! - `ethexe-network` delivers producer announces, validation requests +//! and replies, fetched announces and network-forwarded injected +//! transactions. Outgoing network messages leave as +//! [`ConsensusEvent::PublishMessage`], [`ConsensusEvent::PublishPromise`] +//! and [`ConsensusEvent::RequestAnnounces`]. +//! - `ethexe-ethereum` is reached only from [`ValidatorService`], through +//! the [`BatchCommitter`] trait, to submit aggregated batch +//! commitments to the Router contract. [`ConnectService`] neither +//! signs nor posts anything on-chain. +//! - `ethexe-service` is the sole consumer: it routes every trait call +//! into the consensus service and routes every [`ConsensusEvent`] to +//! the right subsystem (compute, network, logs). +//! +//! ## Entry points +//! +//! All inputs arrive through the [`ConsensusService`] trait. Outputs leave +//! through the `futures::Stream` impl that the same trait requires. +//! +//! | Trait method | Meaning of the input | +//! |-----------------------------------------------------------|------------------------------------------------------------------------| +//! | [`receive_new_chain_head`](ConsensusService::receive_new_chain_head) | A new Ethereum chain head. | +//! | [`receive_synced_block`](ConsensusService::receive_synced_block) | The block's data is now available in the DB. | +//! | [`receive_prepared_block`](ConsensusService::receive_prepared_block) | The block is now prepared. | +//! | [`receive_computed_announce`](ConsensusService::receive_computed_announce) | An announce has finished executing and its result is persisted. | +//! | [`receive_announce`](ConsensusService::receive_announce) | A signed producer announce. | +//! | [`receive_promise_for_signing`](ConsensusService::receive_promise_for_signing) | A raw promise that this validator should sign. | +//! | [`receive_validation_request`](ConsensusService::receive_validation_request) | A request to validate a batch commitment. | +//! | [`receive_validation_reply`](ConsensusService::receive_validation_reply) | A signed reply on a batch this validator is coordinating. | +//! | [`receive_announces_response`](ConsensusService::receive_announces_response) | A response to a previous [`ConsensusEvent::RequestAnnounces`]. | +//! | [`receive_injected_transaction`](ConsensusService::receive_injected_transaction) | An injected transaction offered to this validator's pool. | +//! +//! ## Output events +//! +//! | [`ConsensusEvent`] | What it tells the service layer | +//! |--------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------| +//! | [`AnnounceAccepted`](ConsensusEvent::AnnounceAccepted) / [`AnnounceRejected`](ConsensusEvent::AnnounceRejected) | Informational result of validating a received producer announce. | +//! | [`ComputeAnnounce`](ConsensusEvent::ComputeAnnounce) | The outer service must hand this announce to `ethexe-compute`, with the given `PromisePolicy`. | +//! | [`PublishMessage`](ConsensusEvent::PublishMessage) | Signed validator-to-validator message to gossip over the network. | +//! | [`PublishPromise`](ConsensusEvent::PublishPromise) | Signed promise to gossip over the network and deliver to RPC subscribers. | +//! | [`RequestAnnounces`](ConsensusEvent::RequestAnnounces) | Ask the network to fetch announces we are missing. | +//! | [`CommitmentSubmitted`](ConsensusEvent::CommitmentSubmitted) | Informational: a batch was successfully submitted to the Router contract. | +//! | [`Warning`](ConsensusEvent::Warning) | Informational: a non-fatal anomaly (unexpected input, bad reply, etc.) was detected. | +//! +//! ## ConnectService behaviour +//! +//! `ConnectService` observes the chain. For each new Ethereum block it +//! waits until the block is synced and prepared, resolves which +//! validator is the producer for that block, and either validates the +//! producer's announce if one has already been received or keeps +//! waiting for it. +//! +//! Accepted announces turn into [`ConsensusEvent::ComputeAnnounce`] +//! with [`PromisePolicy::Disabled`](ethexe_common::PromisePolicy) — +//! observer nodes never collect promises. If any announce in the +//! ancestor chain is missing locally, the service emits +//! [`ConsensusEvent::RequestAnnounces`] and waits for the network's +//! response before proceeding. +//! +//! ## ValidatorService behaviour +//! +//! A validator runs one attempt per Ethereum block. For every new chain +//! head the service computes which validator is the producer for that +//! block and enters one of two roles. A new chain head always aborts +//! the previous attempt. +//! +//! State flow: +//! +//! ```text +//! Initial +//! │ +//! ├── self is producer ──► Producer ───► Coordinator ───► Initial +//! │ (collects replies, +//! │ submits batch) +//! │ +//! └── other producer ──► Subordinate ─► Participant ────► Initial +//! (validates the +//! producer's batch, +//! signs & replies) +//! ``` +//! +//! These state names appear in emitted [`ConsensusEvent::Warning`] +//! messages, so they are the right handle when reading logs or tracing +//! an issue. +//! +//! Contract visible at the crate boundary: +//! +//! - The service emits exactly one [`ConsensusEvent::ComputeAnnounce`] per +//! block it wants executed (an announce it produced itself or one it +//! accepted from the producer). [`PromisePolicy::Enabled`](ethexe_common::PromisePolicy) +//! is set only when this validator is the producer — only producers +//! collect promises. +//! - When coordinating a batch, the service gossips a +//! [`ConsensusEvent::PublishMessage`] with the validation request, +//! collects enough [`ConsensusService::receive_validation_reply`] calls +//! to satisfy the configured [`ValidatorConfig::signatures_threshold`], +//! and then submits the multi-signed batch through the injected +//! [`BatchCommitter`]. On success a [`ConsensusEvent::CommitmentSubmitted`] +//! is emitted. +//! - When acting as participant, the service validates the incoming +//! batch against its local state. On acceptance it publishes a signed +//! reply over [`ConsensusEvent::PublishMessage`]; on rejection it emits +//! a [`ConsensusEvent::Warning`] and sends nothing to the coordinator. +//! - Unexpected or malformed inputs produce [`ConsensusEvent::Warning`] +//! rather than aborting the service. +//! +//! ## Slot and era model +//! +//! The producer for a block is a deterministic function of the validator +//! set for the block's era and the block's timestamp. Era boundaries are +//! computed from the Ethereum block timestamp relative to the genesis +//! timestamp stored in the database config (see `ProtocolTimelines`). +//! +//! ## Injected transactions +//! +//! On a validator node, injected transactions are checked for standard +//! validity (not duplicated, not outdated, destination exists and is +//! initialized, etc.) and accepted ones are stored in a local pool. When +//! this validator is next the producer for a block, it drains pending +//! transactions from the pool into the announce it creates. +//! `ConnectService` ignores injected transactions entirely. +//! +//! ## When modifying this crate +//! +//! - Ethereum is the authoritative ledger. The crate +//! only decides which announces to execute and which batches to co-sign. +//! - A new Ethereum chain head always resets the validator to `Initial` +//! for that block. Do not introduce state carried across chain heads +//! beyond what is already kept in the database. +//! - `ConnectService` must never sign anything or submit anything +//! on-chain. It has no signer and no `BatchCommitter`; keep it that +//! way. +//! - Unexpected inputs (replies from non-validators, announces from +//! non-producers, transitions that do not match the current state) must +//! be surfaced as [`ConsensusEvent::Warning`], not as hard errors that +//! tear down the stream. +//! - The producer for a block must remain a pure function of on-chain +//! data and the block timestamp. Wall-clock time must not leak into +//! this decision (the only existing wall-clock knob is +//! [`ValidatorConfig::producer_delay`] and it only paces when the +//! producer acts, never who the producer is). +//! - A batch is submitted on-chain only after the number of collected +//! signatures reaches [`ValidatorConfig::signatures_threshold`]; this +//! is the sole trigger. use anyhow::Result; use ethexe_common::{ diff --git a/ethexe/ethereum/src/router/mod.rs b/ethexe/ethereum/src/router/mod.rs index 5bd09a2b2a0..7cb31fd108c 100644 --- a/ethexe/ethereum/src/router/mod.rs +++ b/ethexe/ethereum/src/router/mod.rs @@ -369,9 +369,12 @@ impl Router { } else { format!("{err}") }; - return Err(anyhow!( + log::error!( "Failed to estimate gas for batch commitment: (error: {error}, block info: {latest_block}, calldata: 0x{}, batch commitment: {commitment:?})", - hex::encode(calldata), + hex::encode(calldata) + ); + return Err(anyhow!( + "Failed to estimate gas for batch commitment: {error}" )); } }; diff --git a/ethexe/processor/src/handling/mod.rs b/ethexe/processor/src/handling/mod.rs index bea415794c7..1c97c40cd91 100644 --- a/ethexe/processor/src/handling/mod.rs +++ b/ethexe/processor/src/handling/mod.rs @@ -23,7 +23,6 @@ use gprimitives::ActorId; pub(crate) mod events; pub(crate) mod overlaid; pub(crate) mod run; -mod thread_pool; /// A high-level interface for executing ops, /// which mutate states based on the current block request events. diff --git a/ethexe/processor/src/handling/run/chunk_execution_spawn.rs b/ethexe/processor/src/handling/run/chunk_execution_spawn.rs index 72b674bc023..eaa3d671141 100644 --- a/ethexe/processor/src/handling/run/chunk_execution_spawn.rs +++ b/ethexe/processor/src/handling/run/chunk_execution_spawn.rs @@ -21,9 +21,9 @@ //! This module handles spawning program execution tasks in a thread pool. use super::*; -use crate::{handling::thread_pool::ThreadPool, host::InstanceWrapper}; +use crate::thread_pool; use ethexe_runtime_common::ProcessQueueContext; -use std::sync::LazyLock; +use futures::stream::FuturesOrdered; /// An alias introduced for better readability of the chunks execution steps. pub type ChunkItemOutput = (ActorId, H256, ProgramJournals, u64); @@ -39,55 +39,6 @@ pub async fn spawn_chunk_execution( chunk: Vec<(ActorId, H256)>, queue_type: MessageType, ) -> Result> { - struct Executable { - queue_type: MessageType, - block_info: BlockInfo, - promise_policy: PromisePolicy, - program_id: ActorId, - state_hash: H256, - instrumented_code: InstrumentedCode, - code_metadata: CodeMetadata, - executor: InstanceWrapper, - db: Box, - gas_allowance_for_chunk: u64, - promise_out_tx: Option>, - } - - fn execute_chunk_item(executable: Executable) -> Result { - let Executable { - queue_type, - block_info, - promise_policy, - program_id, - state_hash, - instrumented_code, - code_metadata, - mut executor, - db, - gas_allowance_for_chunk, - promise_out_tx, - } = executable; - - let (jn, new_state_hash, gas_spent) = executor.run( - db, - ProcessQueueContext { - program_id, - state_root: state_hash, - queue_type, - instrumented_code, - code_metadata, - gas_allowance: GasAllowanceCounter::new(gas_allowance_for_chunk), - block_info, - promise_policy, - }, - promise_out_tx, - )?; - Ok((program_id, new_state_hash, jn, gas_spent)) - } - - static THREAD_POOL: LazyLock>> = - LazyLock::new(|| ThreadPool::new(execute_chunk_item)); - let gas_allowance_for_chunk = ctx .inner() .gas_allowance_counter @@ -102,28 +53,32 @@ pub async fn spawn_chunk_execution( timestamp: block_header.timestamp, }; - let executables = chunk + chunk .into_iter() .map(|(program_id, state_hash)| { let (instrumented_code, code_metadata) = ctx.program_code(program_id)?; - - let executor = ctx.inner().instance_creator.instantiate()?; - - Ok(Executable { - queue_type, - block_info, - promise_policy, - program_id, - state_hash, - instrumented_code, - code_metadata, - executor, - db: ctx.inner().db.cas().clone_boxed(), - gas_allowance_for_chunk, - promise_out_tx: ctx.inner().promise_out_tx.clone(), - }) + let mut executor = ctx.inner().instance_creator.instantiate()?; + let db = ctx.inner().db.cas().clone_boxed(); + let promise_out_tx = ctx.inner().promise_out_tx.clone(); + Ok(thread_pool::spawn(move || { + let (jn, new_state_hash, gas_spent) = executor.run( + db, + ProcessQueueContext { + program_id, + state_root: state_hash, + queue_type, + instrumented_code, + code_metadata, + gas_allowance: GasAllowanceCounter::new(gas_allowance_for_chunk), + block_info, + promise_policy, + }, + promise_out_tx, + )?; + Ok((program_id, new_state_hash, jn, gas_spent)) + })) }) - .collect::>>()?; - - THREAD_POOL.spawn_many(executables).try_collect().await + .collect::>>()? + .try_collect() + .await } diff --git a/ethexe/processor/src/lib.rs b/ethexe/processor/src/lib.rs index 073552e3f46..b0d5b2a2206 100644 --- a/ethexe/processor/src/lib.rs +++ b/ethexe/processor/src/lib.rs @@ -16,7 +16,143 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -//! Program's execution service for eGPU. +//! # Ethexe Processor +//! +//! Low-level execution engine that runs Gear programs inside the ethexe +//! node. The crate embeds a pre-compiled [`ethexe_runtime`] WASM artifact +//! and runs it in [`wasmtime`] with host functions that give the runtime +//! access to the database, lazy pages, sandboxed nested WASM, promise +//! publishing, allocation and logging. On top of that it exposes a small +//! API to: +//! +//! - validate and instrument Gear WASM code blobs, +//! - execute an ethexe block (announce) — routing [`BlockRequestEvent`]s +//! into program state mutations, appending [`InjectedTransaction`]s to +//! program queues, running scheduled tasks, and draining program +//! message queues until gas or other limits are exhausted, +//! - simulate a single message against a copy-on-write view of the +//! database without committing anything, for RPC reply queries. +//! +//! ## Role in the stack and relation to other crates +//! +//! `ethexe-processor` is the bottom of the execution stack. It is +//! consumed by: +//! +//! - `ethexe-compute` — calls [`Processor::process_programs`] and +//! [`Processor::process_code`] through its `ProcessorExt` trait (the +//! trait is defined in `ethexe-compute`, together with a direct impl +//! for [`Processor`]). Compute is what the service layer talks to — +//! the processor itself is never polled as a stream and emits no +//! events. +//! - `ethexe-rpc` — uses [`OverlaidProcessor`] (obtained via +//! [`Processor::overlaid`]) to simulate message execution against an +//! overlaid database for read-only reply queries. +//! - `ethexe-service` — constructs the `Processor` instance at startup +//! and hands it to `ComputeService`. +//! +//! ## Entry points +//! +//! | Method | Purpose | +//! |-------------------------------------------|-------------------------------------------------------------------------| +//! | [`Processor::process_code`] | Validate + instrument a WASM blob. Synchronous, does not touch the DB. | +//! | [`Processor::process_programs`] | Execute an ethexe block: events → tasks → queues. Main async workflow. | +//! | [`Processor::overlaid`] | Wrap `self` into an [`OverlaidProcessor`] backed by an overlaid DB. | +//! | [`OverlaidProcessor::execute_for_reply`] | Simulate a single incoming message and return the reply. | +//! +//! ## `process_programs` contract +//! +//! Given an [`ExecutableData`] (block header, program states, schedule, +//! injected transactions, block request events, and optional gas +//! allowance), [`Processor::process_programs`] runs three sequential +//! stages and returns a [`FinalizedBlockTransitions`]: +//! +//! 1. Handle injected transactions and block events: injected transactions +//! are appended to program injected queues; router and mirror events +//! drive the corresponding state mutations (program creation, balance +//! top-up, message queueing, value claims, etc.). +//! 2. Run scheduled tasks that are due at the current block height +//! (mailbox expiry cleanup, reservation removal, etc.). +//! 3. Drain program message queues: the injected queue first, then the +//! canonical queue — unless a soft limit kicks in before that. +//! This stage is skipped entirely when `gas_allowance` is `None`. +//! Promises are collected only during the injected pass; the +//! canonical pass runs with the promise sender dropped, so any code +//! that introduces new promise emission points must make sure they +//! are reached from the injected queue. +//! +//! The third stage uses a chunked parallel executor: non-empty program +//! queues are partitioned by queue size into chunks of up to +//! `ProcessorConfig::chunk_size` programs, and the programs inside a +//! chunk run in parallel, each with its own wasmtime `Store`. +//! Determinism-relevant property: because programs in a chunk run +//! simultaneously, the block gas allowance counter is charged by the +//! **maximum** gas spent in the chunk, not the sum. Execution stops when +//! all queues are empty, the gas allowance is exhausted, or one of the +//! configured soft limits (outgoing messages, payload bytes, call +//! replies, program modifications) kicks in. +//! +//! ## Overlay execution +//! +//! [`OverlaidProcessor`] wraps a [`Processor`] whose database is swapped +//! for an overlaid, copy-on-write view. Mutations are kept in memory and +//! discarded when the overlay is dropped, so the underlying state is +//! never touched. [`OverlaidProcessor::execute_for_reply`] synthesizes a +//! single [`MessageQueueingRequestedEvent`] into the target program's +//! canonical queue and runs against this overlay with the following +//! simulation semantics: +//! +//! - the target program's canonical queue is trimmed to only the +//! synthetic dispatch, so the simulation starts from a clean slate +//! for the target; +//! - every other program whose queue is about to be executed has that +//! queue cleared and its scheduled run skipped — non-target programs +//! only ever execute messages produced during the simulation; +//! - when a journal emits a message to another program, the receiver's +//! queue is cleared first so only the cascading message is processed +//! there; +//! - as soon as a reply to the synthetic message is seen, the +//! simulation short-circuits without performing further +//! queue-clearing work. +//! +//! ## Lazy pages +//! +//! Program memory is not materialized up front. Pages are protected +//! after instance setup and loaded from the database on the first +//! access fault, through the [`gear_lazy_pages`] integration. +//! +//! ## Determinism and error handling +//! +//! - The chunk partitioning is a deterministic function of the program +//! → queue-size map and `chunk_size`, so every node executing the +//! same block arrives at the same partitioning. +//! - The host-side gas counter increments by the maximum gas spent in +//! the chunk; WASM-side state hashing runs inside the WASM runtime +//! and does not depend on chunk layout. +//! - WASM traps (out-of-bounds memory, `unreachable`, wasmtime errors) +//! and host-function panics routed through the `sp_wasm_interface` +//! panic hook are surfaced as [`InstanceError::Wasmtime`] and +//! propagated out of [`Processor::process_programs`]. Raw Rust panics +//! inside a chunk worker are caught and re-raised on the caller via +//! `std::panic::resume_unwind` — they unwind the async task, they do +//! not become an `Err` variant. +//! +//! ## Configuration +//! +//! [`ProcessorConfig`] currently exposes a single knob, `chunk_size`, +//! which controls the number of programs executed in parallel per pass. +//! The default is [`DEFAULT_CHUNK_SIZE`] (16). +//! +//! ## When modifying this crate +//! +//! - Processor must be deterministic. +//! - Changing Processor logic may cause consensus mismatches in already +//! deployed ethexe networks, so be careful when modifying the +//! processing pipeline, and always check backwards compatibility with +//! deployed networks. +//! - Processor is designed to write only in CAS, it must NEVER modify +//! key-value storage from Database. + +pub use host::InstanceError; use core::num::NonZero; use ethexe_common::{ @@ -31,7 +167,7 @@ use ethexe_runtime_common::{ state::Storage, }; use gear_core::{ - code::{CodeMetadata, InstrumentationStatus, InstrumentedCode}, + code::{CodeMetadata, InstrumentedCode}, ids::prelude::CodeIdExt, rpc::ReplyInfo, }; @@ -40,13 +176,11 @@ use handling::{ProcessingHandler, overlaid::OverlaidRunContext, run::CommonRunCo use host::InstanceCreator; use tokio::sync::mpsc; -pub use host::InstanceError; - mod handling; mod host; - #[cfg(test)] mod tests; +mod thread_pool; // Default amount of programs in one chunk to be processed in parallel. pub const DEFAULT_CHUNK_SIZE: NonZero = NonZero::new(16).unwrap(); @@ -137,7 +271,10 @@ impl Processor { OverlaidProcessor(self) } - pub fn process_code(&mut self, code_and_id: CodeAndIdUnchecked) -> Result { + pub async fn process_code( + &mut self, + code_and_id: CodeAndIdUnchecked, + ) -> Result { log::debug!("Processing upload code {code_and_id:?}"); let CodeAndIdUnchecked { code, code_id } = code_and_id; @@ -149,28 +286,27 @@ impl Processor { }); } - let Some((instrumented_code, code_metadata)) = - self.creator.instantiate()?.instrument(&code)? - else { - return Ok(ProcessedCodeInfo { - code_id, - valid: None, - }); - }; - - let InstrumentationStatus::Instrumented { .. } = code_metadata.instrumentation_status() - else { - panic!("Instrumented code returned, but instrumentation status is not Instrumented"); - }; - - Ok(ProcessedCodeInfo { - code_id, - valid: Some(ValidCodeInfo { + let mut instance = self.creator.instantiate()?; + let valid = thread_pool::spawn(move || -> Result<_> { + let instrumented_code = instance.instrument(&code)?; + let info = instrumented_code.map(|(instrumented_code, code_metadata)| ValidCodeInfo { code, instrumented_code, code_metadata, - }), + }); + Ok(info) }) + .await?; + + if let Some(valid) = &valid { + let status = valid.code_metadata.instrumentation_status(); + assert!( + status.is_instrumented(), + "Instrumented code returned, but instrumentation status is not Instrumented: {status:?}" + ); + } + + Ok(ProcessedCodeInfo { code_id, valid }) } pub async fn process_programs( @@ -278,13 +414,13 @@ impl Processor { } } -#[derive(Clone, Default)] +#[derive(Debug, Clone, Default)] pub struct ProcessedCodeInfo { pub code_id: CodeId, pub valid: Option, } -#[derive(Clone)] +#[derive(Debug, Clone)] pub struct ValidCodeInfo { pub code: Vec, pub instrumented_code: InstrumentedCode, diff --git a/ethexe/processor/src/tests.rs b/ethexe/processor/src/tests.rs index 100d19ae1e9..8f664618ab5 100644 --- a/ethexe/processor/src/tests.rs +++ b/ethexe/processor/src/tests.rs @@ -73,7 +73,7 @@ mod utils { (code_id, code) } - pub fn upload_code(processor: &mut Processor, code: &[u8]) -> CodeId { + pub async fn upload_code(processor: &mut Processor, code: &[u8]) -> CodeId { let code_id = CodeId::generate(code); let ValidCodeInfo { @@ -85,6 +85,7 @@ mod utils { code: code.to_vec(), code_id, }) + .await .expect("failed to process code") .valid .expect("code is invalid"); @@ -98,7 +99,7 @@ mod utils { code_id } - pub fn setup_test_env_and_load_codes( + pub async fn setup_test_env_and_load_codes( codes: [&[u8]; N], ) -> (Processor, BlockChain, [CodeId; N]) { let db = Database::memory(); @@ -107,7 +108,7 @@ mod utils { let mut code_ids = Vec::new(); for code in codes { - code_ids.push(upload_code(&mut processor, code)); + code_ids.push(upload_code(&mut processor, code).await); } (processor, chain, code_ids.try_into().unwrap()) @@ -135,7 +136,8 @@ mod utils { } pub async fn simple_init_test(code: impl AsRef<[u8]>) -> InBlockTransitions { - let (mut processor, chain, [code_id]) = setup_test_env_and_load_codes([code.as_ref()]); + let (mut processor, chain, [code_id]) = + setup_test_env_and_load_codes([code.as_ref()]).await; let block1 = chain.blocks[1].to_simple(); let mut handler = setup_handler(processor.db.clone(), block1); @@ -180,7 +182,8 @@ mod utils { async fn ping_init() { init_logger(); - let (mut processor, chain, [code_id]) = setup_test_env_and_load_codes([demo_ping::WASM_BINARY]); + let (mut processor, chain, [code_id]) = + setup_test_env_and_load_codes([demo_ping::WASM_BINARY]).await; // Empty processing for block1 let executable = ExecutableData { @@ -270,8 +273,8 @@ async fn ping_init() { .expect("failed to process send message"); } -#[test] -fn handle_new_code_valid() { +#[tokio::test] +async fn handle_new_code_valid() { init_logger(); let mut processor = Processor::new(Database::memory()).expect("failed to create processor"); @@ -283,6 +286,7 @@ fn handle_new_code_valid() { code: code.clone(), code_id, }) + .await .map(|res| (res.code_id, res.valid.expect("code must be valid"))) .unwrap(); @@ -296,8 +300,8 @@ fn handle_new_code_valid() { ); } -#[test] -fn handle_new_code_invalid() { +#[tokio::test] +async fn handle_new_code_invalid() { init_logger(); let mut processor = Processor::new(Database::memory()).expect("failed to create processor"); @@ -307,6 +311,7 @@ fn handle_new_code_invalid() { assert!( processor .process_code(CodeAndIdUnchecked { code, code_id }) + .await .expect("failed to call runtime api") .valid .is_none() @@ -318,7 +323,7 @@ async fn ping_pong() { init_logger(); let (mut processor, chain, [code_id, ..]) = - setup_test_env_and_load_codes([demo_ping::WASM_BINARY, demo_async::WASM_BINARY]); + setup_test_env_and_load_codes([demo_ping::WASM_BINARY, demo_async::WASM_BINARY]).await; let block1 = chain.blocks[1].to_simple(); let user_id = ActorId::from(10); @@ -398,7 +403,7 @@ async fn async_and_ping() { }; let (mut processor, chain, [ping_code_id, upload_code_id, ..]) = - setup_test_env_and_load_codes([demo_ping::WASM_BINARY, demo_async::WASM_BINARY]); + setup_test_env_and_load_codes([demo_ping::WASM_BINARY, demo_async::WASM_BINARY]).await; let block1 = chain.blocks[1].to_simple(); let mut handler = setup_handler(processor.db.clone(), block1); @@ -537,7 +542,7 @@ async fn many_waits() { let (_, code) = wat_to_wasm(wat.as_str()); - let (mut processor, chain, [code_id]) = setup_test_env_and_load_codes([code.as_slice()]); + let (mut processor, chain, [code_id]) = setup_test_env_and_load_codes([code.as_slice()]).await; let block1 = chain.blocks[1].to_simple(); let wake_block = chain.blocks[1 + blocks_to_wait].to_simple(); @@ -689,7 +694,7 @@ async fn overlay_execution() { }; let (mut processor, chain, [ping_code_id, async_code_id]) = - setup_test_env_and_load_codes([demo_ping::WASM_BINARY, demo_async::WASM_BINARY]); + setup_test_env_and_load_codes([demo_ping::WASM_BINARY, demo_async::WASM_BINARY]).await; let block1 = chain.blocks[1].to_simple(); // ----------------------------------------------------------------------------- @@ -919,7 +924,8 @@ async fn injected_ping_pong() { init_logger(); let (promise_out_tx, mut promise_receiver) = mpsc::unbounded_channel(); - let (mut processor, chain, [code_id]) = setup_test_env_and_load_codes([demo_ping::WASM_BINARY]); + let (mut processor, chain, [code_id]) = + setup_test_env_and_load_codes([demo_ping::WASM_BINARY]).await; let block1 = chain.blocks[1].to_simple(); let user_1 = ActorId::from(10); @@ -1029,7 +1035,8 @@ async fn injected_prioritized_over_canonical() { init_logger(); let (promise_out_tx, mut promise_receiver) = mpsc::unbounded_channel(); - let (mut processor, chain, [code_id]) = setup_test_env_and_load_codes([demo_ping::WASM_BINARY]); + let (mut processor, chain, [code_id]) = + setup_test_env_and_load_codes([demo_ping::WASM_BINARY]).await; let block1 = chain.blocks[1].to_simple(); let canonical_user = ActorId::from(10); @@ -1141,7 +1148,8 @@ async fn injected_prioritized_over_canonical() { async fn executable_balance_charged() { init_logger(); - let (mut processor, chain, [code_id]) = setup_test_env_and_load_codes([demo_ping::WASM_BINARY]); + let (mut processor, chain, [code_id]) = + setup_test_env_and_load_codes([demo_ping::WASM_BINARY]).await; let block1 = chain.blocks[1].to_simple(); let mut handler = setup_handler(processor.db.clone(), block1); @@ -1228,7 +1236,7 @@ async fn executable_balance_injected_panic_not_charged() { let (promise_out_tx, mut promise_receiver) = mpsc::unbounded_channel(); let (mut processor, chain, [code_id]) = - setup_test_env_and_load_codes([demo_panic_payload::WASM_BINARY]); + setup_test_env_and_load_codes([demo_panic_payload::WASM_BINARY]).await; let block1 = chain.blocks[1].to_simple(); let user_id = ActorId::from(10); @@ -1361,7 +1369,8 @@ async fn insufficient_executable_balance_still_charged() { init_logger(); - let (mut processor, chain, [code_id]) = setup_test_env_and_load_codes([demo_ping::WASM_BINARY]); + let (mut processor, chain, [code_id]) = + setup_test_env_and_load_codes([demo_ping::WASM_BINARY]).await; let block1 = chain.blocks[1].to_simple(); let mut handler = setup_handler(processor.db.clone(), block1); @@ -1532,7 +1541,7 @@ async fn injected_and_events_then_tasks_then_queues() { "#; let (_, code) = wat_to_wasm(wat); - let (mut processor, chain, [code_id]) = setup_test_env_and_load_codes([code.as_slice()]); + let (mut processor, chain, [code_id]) = setup_test_env_and_load_codes([code.as_slice()]).await; let task_user = ActorId::from(10); let injected_user_pk = PrivateKey::random(); diff --git a/ethexe/processor/src/handling/thread_pool.rs b/ethexe/processor/src/thread_pool.rs similarity index 50% rename from ethexe/processor/src/handling/thread_pool.rs rename to ethexe/processor/src/thread_pool.rs index a8b9fe2043e..d18c947d8b9 100644 --- a/ethexe/processor/src/handling/thread_pool.rs +++ b/ethexe/processor/src/thread_pool.rs @@ -18,50 +18,63 @@ //! Small custom thread pool interface, because `rayon` is too smart //! and `threadpool` is not smart enough. +//! +//! The global pool in this module is shared across the processor crate for all +//! CPU-bound work, including code instrumentation and chunk execution. +//! `ETHEXE_PROCESSOR_NUM_THREADS` can be used to override the worker count, and +//! because the pool is initialized lazily, the value is captured on first use. + +use std::{env, num::NonZero, panic::AssertUnwindSafe, sync::LazyLock, thread}; + +// Shared across the processor crate for all CPU-bound work. +static DEFAULT_THREAD_POOL: LazyLock = LazyLock::new(ThreadPool::new); + +/// Spawns a given task. +/// +/// Returns the task result once a worker finishes executing it. +/// +/// # Panics +/// +/// Propagates panics from the worker thread to the main thread. +/// +/// Panics if worker thread dies despite using +/// `std::panic::catch_unwind` around the handler. +pub async fn spawn(f: F) -> R +where + F: FnOnce() -> R + Send + 'static, + R: Send + 'static, +{ + DEFAULT_THREAD_POOL.spawn(f).await +} -use futures::prelude::*; -use std::{num::NonZero, panic::AssertUnwindSafe, thread}; - -type Task = (I, tokio::sync::oneshot::Sender>); +type Task = Box; -/// Thread pool that handler tasks of type `I` -/// and produces outputs of type `O`. #[derive(Debug, Clone)] -pub struct ThreadPool { - task_tx: crossbeam::channel::Sender>, +struct ThreadPool { + task_tx: crossbeam::channel::Sender, } -impl ThreadPool -where - I: Send + 'static, - O: Send + 'static, -{ - /// Creates a new thread pool. - pub fn new(handler: F) -> Self - where - F: FnMut(I) -> O + Send + Clone + 'static, - { - let n_cpus = thread::available_parallelism().map_or(1, NonZero::get); +impl ThreadPool { + fn new() -> Self { + let n_cpus = env::var("ETHEXE_PROCESSOR_NUM_THREADS") + .ok() + .and_then(|num| num.parse().ok()) + .or_else(|| thread::available_parallelism().ok()) + .map_or(1, NonZero::get); - let (task_tx, task_rx) = crossbeam::channel::unbounded::>(); + let (task_tx, task_rx) = crossbeam::channel::unbounded::(); for _ in 0..n_cpus { let task_rx = task_rx.clone(); - let handler = handler.clone(); thread::spawn(move || { loop { - let Ok((task, sender)) = task_rx.recv() else { + let Ok(task) = task_rx.recv() else { // All connected `ThreadPool` instances were dropped break; }; - let mut handler = handler.clone(); - - // Output receiver could be cancelled - let _ = sender.send(std::panic::catch_unwind(AssertUnwindSafe(move || { - handler(task) - }))); + task(); } }); } @@ -69,53 +82,46 @@ where Self { task_tx } } - /// Spawns a given task. - /// - /// Returns `Ok(result)` if a worker successfully - /// processed the task and `Err(panic_info)` if the worker panicked. - /// - /// # Panics - /// - /// Propagates panics from the worker thread to the main thread. - /// - /// Panics if worker thread dies despite using - /// `std::panic::catch_unwind` around the handler. - pub async fn spawn(&self, input: I) -> O { + async fn spawn(&self, f: F) -> R + where + F: FnOnce() -> R + Send + 'static, + R: Send + 'static, + { let (tx, rx) = tokio::sync::oneshot::channel(); - self.task_tx - .try_send((input, tx)) - .expect("The channel is unbounded"); + let f = Box::new(move || { + let res = std::panic::catch_unwind(AssertUnwindSafe(f)); + + // Output receiver could be cancelled + let _ = tx.send(res); + }); + + self.task_tx.try_send(f).expect("The channel is unbounded"); rx.await .expect("Worker thread has died") .unwrap_or_else(|err| std::panic::resume_unwind(err)) } - - /// Spawns tasks from an iterator of inputs, - /// producing a stream of outputs. - /// - /// The outputs are ordered the same as inputs. - pub fn spawn_many>(&self, input: II) -> impl Stream { - input - .into_iter() - .map(|input| self.spawn(input)) - .collect::>() - } } #[cfg(test)] mod tests { use super::*; + use futures::{FutureExt, StreamExt, stream::FuturesOrdered}; + + fn task(n: usize) -> String { + "amogus".repeat(n) + } #[tokio::test] async fn test_thread_pool() { - let thread_pool = ThreadPool::new(|n| "amogus".repeat(n)); + assert_eq!(spawn(|| task(2)).await, "amogusamogus"); - assert_eq!(thread_pool.spawn(2).await, "amogusamogus"); assert_eq!( - thread_pool - .spawn_many([0, 1, 2, 3]) + [0, 1, 2, 3] + .into_iter() + .map(|n| spawn(move || task(n))) + .collect::>() .collect::>() .await, vec![ @@ -131,7 +137,7 @@ mod tests { // Ensure that panics don't break things for _ in 0..n_cpus * 2 { assert!( - AssertUnwindSafe(thread_pool.spawn(usize::MAX)) + AssertUnwindSafe(spawn(|| task(usize::MAX))) .catch_unwind() .await .is_err() diff --git a/ethexe/service/src/lib.rs b/ethexe/service/src/lib.rs index 64f8330a8ef..5949f6757ee 100644 --- a/ethexe/service/src/lib.rs +++ b/ethexe/service/src/lib.rs @@ -50,8 +50,9 @@ use crate::config::{Config, ConfigPublicKey}; use alloy::{ node_bindings::{Anvil, AnvilInstance}, providers::{ProviderBuilder, RootProvider, ext::AnvilApi}, + rpc::types::anvil::Metadata, }; -use anyhow::{Context, Result}; +use anyhow::{Context, Result, bail}; use async_trait::async_trait; use ethexe_blob_loader::{BlobLoader, BlobLoaderEvent, BlobLoaderService, ConsensusLayerConfig}; use ethexe_common::{ @@ -77,7 +78,7 @@ use ethexe_processor::{ProcessedCodeInfo, Processor, ProcessorConfig, ValidCodeI use ethexe_prometheus::{PrometheusEvent, PrometheusService}; use ethexe_rpc::{RpcEvent, RpcServer}; use ethexe_service_utils::{OptionFuture as _, OptionStreamNext as _}; -use futures::{StreamExt, stream::FuturesUnordered}; +use futures::{FutureExt, StreamExt, stream::FuturesUnordered}; use gprimitives::{ActorId, CodeId, H256}; use gsigner::secp256k1::{Address, PrivateKey, PublicKey, Signer}; use std::{ @@ -157,6 +158,21 @@ pub struct Service { impl Service { /// Number of reserved dev accounts (deployer, validator). const RESERVED_DEV_ACCOUNTS: u32 = 2; + /// Expected Foundry toolchain commit sha. + const FOUNDRY_TOOLCHAIN_COMMIT_SHA: &str = "f1abb2ca347187bb6dea8c3881ca44ce50aab1e7"; + + fn check_foundry_toolchain_version(client_commit_sha: Option) -> Result<()> { + if let Some(client_commit_sha) = client_commit_sha + && client_commit_sha != Self::FOUNDRY_TOOLCHAIN_COMMIT_SHA + { + bail!( + "Commit hash mismatch in Foundry toolchain! Please use: `foundryup --install nightly-{commit_sha} --force`.", + commit_sha = Self::FOUNDRY_TOOLCHAIN_COMMIT_SHA, + ); + } + + Ok(()) + } pub async fn configure_dev_environment( key_path: PathBuf, @@ -213,6 +229,12 @@ impl Service { .connect(anvil.ws_endpoint().as_str()) .await?; + let Metadata { + client_commit_sha, .. + } = provider.anvil_metadata().await?; + + Self::check_foundry_toolchain_version(client_commit_sha)?; + const ETHER: u128 = 1_000_000_000_000_000_000; let balance = 10_000 * ETHER; let balance = balance.try_into().expect("infallible"); @@ -321,7 +343,7 @@ impl Service { "👶 Genesis block hash wasn't found. Call router.lookupGenesisHash() first" ); - anyhow::bail!("Failed to query valid genesis hash"); + bail!("Failed to query valid genesis hash"); } else { log::info!("👶 Genesis block hash: {genesis_block_hash:?}"); } @@ -555,7 +577,7 @@ impl Service { fetching_result = network_fetcher.maybe_next_some() => Event::Fetching(fetching_result), event = prometheus.maybe_next_some() => event.into(), _ = rpc_handle.as_mut().maybe() => { - anyhow::bail!("`RPCWorker` has terminated, shutting down...") + bail!("`RPCWorker` has terminated, shutting down...") } }; @@ -751,7 +773,7 @@ impl Service { } } PrometheusEvent::ServerClosed(result) => { - anyhow::bail!("Prometheus server closed with result: {result:?}"); + bail!("Prometheus server closed with result: {result:?}"); } }, Event::Fetching(result) => { @@ -806,11 +828,13 @@ impl GenesisInitializer for GenesisInitializerFromFile { fn process_code(&mut self, code_id: CodeId, code: Vec) -> ethexe_db::CodeProcessingFuture { let mut cloned_processor = self.processor.clone(); - let func = move || { + async move { let ProcessedCodeInfo { code_id: _, valid: info, - } = cloned_processor.process_code(CodeAndIdUnchecked { code_id, code })?; + } = cloned_processor + .process_code(CodeAndIdUnchecked { code_id, code }) + .await?; let Some(ValidCodeInfo { code: _, @@ -822,7 +846,7 @@ impl GenesisInitializer for GenesisInitializerFromFile { }; Ok(Some((instrumented_code, code_metadata))) - }; - Box::pin(async move { func() }) + } + .boxed() } } diff --git a/ethexe/service/src/tests/utils/env.rs b/ethexe/service/src/tests/utils/env.rs index aa70a06af27..020d8a50b36 100644 --- a/ethexe/service/src/tests/utils/env.rs +++ b/ethexe/service/src/tests/utils/env.rs @@ -26,7 +26,7 @@ use crate::{ use alloy::{ node_bindings::{Anvil, AnvilInstance}, providers::{ProviderBuilder, RootProvider, ext::AnvilApi}, - rpc::types::anvil::MineOptions, + rpc::types::anvil::{Metadata, MineOptions}, }; use anyhow::Context; use ethexe_blob_loader::{BlobLoader, BlobLoaderService, ConsensusLayerConfig}; @@ -162,13 +162,19 @@ impl TestEnv { let anvil = anvil.spawn(); + let provider: RootProvider = ProviderBuilder::default() + .connect(anvil.ws_endpoint().as_str()) + .await + .expect("failed to connect to anvil"); + + let Metadata { + client_commit_sha, .. + } = provider.anvil_metadata().await?; + + Service::check_foundry_toolchain_version(client_commit_sha)?; + // By default, anvil set system time as block time. For testing purposes we need to have constant increment. if !continuous_block_generation { - let provider: RootProvider = ProviderBuilder::default() - .connect(anvil.ws_endpoint().as_str()) - .await - .expect("failed to connect to anvil"); - provider .anvil_set_block_timestamp_interval(block_time.as_secs()) .await diff --git a/ethexe/service/src/tests/utils/mod.rs b/ethexe/service/src/tests/utils/mod.rs index 6df0f8a0e46..b5d8a0f74f6 100644 --- a/ethexe/service/src/tests/utils/mod.rs +++ b/ethexe/service/src/tests/utils/mod.rs @@ -20,6 +20,7 @@ pub use env::*; use ethexe_db::{GenesisInitializer, dump::StateDump}; use ethexe_processor::Processor; pub use events::*; +use futures::FutureExt; mod env; mod events; @@ -51,15 +52,16 @@ impl GenesisInitializer for GenesisInitializerFromDump { code: Vec, ) -> ethexe_db::CodeProcessingFuture { let mut cloned_processor = self.processor.clone(); - let func = move || { + async move { let info = cloned_processor - .process_code(ethexe_common::CodeAndIdUnchecked { code_id, code })?; + .process_code(ethexe_common::CodeAndIdUnchecked { code_id, code }) + .await?; let Some(valid) = info.valid else { return Ok(None); }; Ok(Some((valid.instrumented_code, valid.code_metadata))) - }; - Box::pin(async move { func() }) + } + .boxed() } } diff --git a/utils/gear-workspace-hack/Cargo.toml b/utils/gear-workspace-hack/Cargo.toml index 46154589a6e..9b18b02b08c 100644 --- a/utils/gear-workspace-hack/Cargo.toml +++ b/utils/gear-workspace-hack/Cargo.toml @@ -217,17 +217,17 @@ features = ["test-helpers"] [dependencies] aes = { version = "0.8", default-features = false, features = ["zeroize"] } ahash = { version = "0.8" } -alloy = { git = "https://github.com/gear-tech/alloy.git", branch = "better-gas-estimation", features = ["kzg", "node-bindings", "provider-anvil-api", "provider-ws", "rpc-types-beacon", "rpc-types-eth", "signer-mnemonic"] } +alloy = { version = "2", features = ["kzg", "node-bindings", "provider-anvil-api", "provider-ws", "rpc-types-beacon", "rpc-types-eth", "signer-mnemonic"] } alloy-chains = { version = "0.2" } -alloy-consensus = { git = "https://github.com/gear-tech/alloy.git", branch = "better-gas-estimation", default-features = false, features = ["kzg", "serde"] } -alloy-contract = { git = "https://github.com/gear-tech/alloy.git", branch = "better-gas-estimation", default-features = false, features = ["pubsub"] } -alloy-eips = { git = "https://github.com/gear-tech/alloy.git", branch = "better-gas-estimation", default-features = false, features = ["kzg", "serde", "std"] } +alloy-consensus = { version = "2", default-features = false, features = ["kzg", "serde"] } +alloy-contract = { version = "2", default-features = false, features = ["pubsub"] } +alloy-eips = { version = "2", default-features = false, features = ["kzg", "serde", "std"] } alloy-json-abi = { version = "1", features = ["serde_json"] } alloy-primitives = { version = "1", features = ["k256", "rlp", "serde"] } -alloy-provider = { git = "https://github.com/gear-tech/alloy.git", branch = "better-gas-estimation", default-features = false, features = ["anvil-node", "ws"] } -alloy-rpc-client = { git = "https://github.com/gear-tech/alloy.git", branch = "better-gas-estimation", default-features = false, features = ["reqwest", "ws"] } -alloy-rpc-types = { git = "https://github.com/gear-tech/alloy.git", branch = "better-gas-estimation", default-features = false, features = ["anvil", "beacon", "eth", "kzg"] } -alloy-signer-local = { git = "https://github.com/gear-tech/alloy.git", branch = "better-gas-estimation", default-features = false, features = ["mnemonic"] } +alloy-provider = { version = "2", default-features = false, features = ["anvil-node", "ws"] } +alloy-rpc-client = { version = "2", default-features = false, features = ["reqwest", "ws"] } +alloy-rpc-types = { version = "2", default-features = false, features = ["anvil", "beacon", "eth", "kzg"] } +alloy-signer-local = { version = "2", default-features = false, features = ["mnemonic"] } alloy-sol-type-parser = { version = "1", default-features = false, features = ["serde", "std"] } alloy-sol-types = { version = "1", features = ["json"] } anyhow = { version = "1" } @@ -489,17 +489,17 @@ zeroize = { version = "1", features = ["derive", "std"] } [build-dependencies] aes = { version = "0.8", default-features = false, features = ["zeroize"] } ahash = { version = "0.8" } -alloy = { git = "https://github.com/gear-tech/alloy.git", branch = "better-gas-estimation", features = ["kzg", "node-bindings", "provider-anvil-api", "provider-ws", "rpc-types-beacon", "rpc-types-eth", "signer-mnemonic"] } +alloy = { version = "2", features = ["kzg", "node-bindings", "provider-anvil-api", "provider-ws", "rpc-types-beacon", "rpc-types-eth", "signer-mnemonic"] } alloy-chains = { version = "0.2" } -alloy-consensus = { git = "https://github.com/gear-tech/alloy.git", branch = "better-gas-estimation", default-features = false, features = ["kzg", "serde"] } -alloy-contract = { git = "https://github.com/gear-tech/alloy.git", branch = "better-gas-estimation", default-features = false, features = ["pubsub"] } -alloy-eips = { git = "https://github.com/gear-tech/alloy.git", branch = "better-gas-estimation", default-features = false, features = ["kzg", "serde", "std"] } +alloy-consensus = { version = "2", default-features = false, features = ["kzg", "serde"] } +alloy-contract = { version = "2", default-features = false, features = ["pubsub"] } +alloy-eips = { version = "2", default-features = false, features = ["kzg", "serde", "std"] } alloy-json-abi = { version = "1", features = ["serde_json"] } alloy-primitives = { version = "1", features = ["k256", "rlp", "serde"] } -alloy-provider = { git = "https://github.com/gear-tech/alloy.git", branch = "better-gas-estimation", default-features = false, features = ["anvil-node", "ws"] } -alloy-rpc-client = { git = "https://github.com/gear-tech/alloy.git", branch = "better-gas-estimation", default-features = false, features = ["reqwest", "ws"] } -alloy-rpc-types = { git = "https://github.com/gear-tech/alloy.git", branch = "better-gas-estimation", default-features = false, features = ["anvil", "beacon", "eth", "kzg"] } -alloy-signer-local = { git = "https://github.com/gear-tech/alloy.git", branch = "better-gas-estimation", default-features = false, features = ["mnemonic"] } +alloy-provider = { version = "2", default-features = false, features = ["anvil-node", "ws"] } +alloy-rpc-client = { version = "2", default-features = false, features = ["reqwest", "ws"] } +alloy-rpc-types = { version = "2", default-features = false, features = ["anvil", "beacon", "eth", "kzg"] } +alloy-signer-local = { version = "2", default-features = false, features = ["mnemonic"] } alloy-sol-macro = { version = "1", default-features = false, features = ["json"] } alloy-sol-macro-expander = { version = "1", default-features = false, features = ["json"] } alloy-sol-macro-input = { version = "1", default-features = false, features = ["json"] } From 03d332420b6afa7aa2c8b191264f7814beee2133 Mon Sep 17 00:00:00 2001 From: playX18 Date: Mon, 20 Apr 2026 09:42:56 +0700 Subject: [PATCH 6/7] Revert "Merge master and resolve compute test conflicts" This reverts commit 0bff4542e35b9af3632902c3b410912e0c9167e5. --- .github/workflows/build.yml | 2 +- .github/workflows/check.yml | 2 +- Cargo.lock | 204 ++++++++---------- Cargo.toml | 9 +- core/src/code/metadata.rs | 13 +- ethexe/compute/src/codes.rs | 80 ++++--- ethexe/compute/src/compute.rs | 7 +- ethexe/compute/src/lib.rs | 141 +----------- ethexe/compute/src/tests.rs | 3 +- ethexe/consensus/src/lib.rs | 193 ++--------------- ethexe/ethereum/src/router/mod.rs | 7 +- ethexe/processor/src/handling/mod.rs | 1 + .../src/handling/run/chunk_execution_spawn.rs | 97 ++++++--- .../src/{ => handling}/thread_pool.rs | 124 +++++------ ethexe/processor/src/lib.rs | 188 +++------------- ethexe/processor/src/tests.rs | 47 ++-- ethexe/service/src/lib.rs | 42 +--- ethexe/service/src/tests/utils/env.rs | 18 +- ethexe/service/src/tests/utils/mod.rs | 10 +- utils/gear-workspace-hack/Cargo.toml | 32 +-- 20 files changed, 369 insertions(+), 851 deletions(-) rename ethexe/processor/src/{ => handling}/thread_pool.rs (50%) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index f34cba04d9f..6e4e56d6d0a 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -284,7 +284,7 @@ jobs: - name: "Install: Foundry" uses: foundry-rs/foundry-toolchain@v1 with: - version: nightly-f1abb2ca347187bb6dea8c3881ca44ce50aab1e7 + version: nightly-c07d504b4ae67754584f4e05ff0c547a43c50f7b - name: "Show: Versioning" run: | diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index 0f0ee08912f..db0ad147196 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -65,7 +65,7 @@ jobs: - name: "Install: Foundry" uses: foundry-rs/foundry-toolchain@v1 with: - version: nightly-f1abb2ca347187bb6dea8c3881ca44ce50aab1e7 + version: nightly-c07d504b4ae67754584f4e05ff0c547a43c50f7b - name: "Install: Node.js" uses: actions/setup-node@v6 diff --git a/Cargo.lock b/Cargo.lock index dfefb85cdf5..418fb3ee3e8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -110,9 +110,8 @@ checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" [[package]] name = "alloy" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85805c194576017df6c11057504e1d60b36f3913f8e365945486931f6ee81e40" +version = "1.8.3" +source = "git+https://github.com/gear-tech/alloy.git?branch=better-gas-estimation#42d2d5cfb91ed188e227108ed2b7819035dea693" dependencies = [ "alloy-consensus", "alloy-contract", @@ -136,9 +135,9 @@ dependencies = [ [[package]] name = "alloy-chains" -version = "0.2.33" +version = "0.2.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4e9e31d834fe25fe991b8884e4b9f0e59db4a97d86e05d1464d6899c013cd62" +checksum = "90f374d3c6d729268bbe2d0e0ff992bb97898b2df756691a62ee1d5f0506bc39" dependencies = [ "alloy-primitives", "num_enum 0.7.5", @@ -148,9 +147,8 @@ dependencies = [ [[package]] name = "alloy-consensus" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8dbe4e5e9107bf6854e7550b666ca654ff2027eabf8153913e2e31ac4b089779" +version = "1.8.3" +source = "git+https://github.com/gear-tech/alloy.git?branch=better-gas-estimation#42d2d5cfb91ed188e227108ed2b7819035dea693" dependencies = [ "alloy-eips", "alloy-primitives", @@ -175,9 +173,8 @@ dependencies = [ [[package]] name = "alloy-consensus-any" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88fc7bbfb98cf5605a35aadf0ba43a7d9f1608d6f220d05e4fbd5144d3b0b625" +version = "1.8.3" +source = "git+https://github.com/gear-tech/alloy.git?branch=better-gas-estimation#42d2d5cfb91ed188e227108ed2b7819035dea693" dependencies = [ "alloy-consensus", "alloy-eips", @@ -189,9 +186,8 @@ dependencies = [ [[package]] name = "alloy-contract" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4c16fa30b623e40a5b216da00f3b61870f5cbe863b59816ac1ecc2489515a40" +version = "1.8.3" +source = "git+https://github.com/gear-tech/alloy.git?branch=better-gas-estimation#42d2d5cfb91ed188e227108ed2b7819035dea693" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -292,9 +288,8 @@ dependencies = [ [[package]] name = "alloy-eips" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afb4919fa34b268842f434bfafa9c09136ab7b1a87ce0dd40a61befa35b5408c" +version = "1.8.3" +source = "git+https://github.com/gear-tech/alloy.git?branch=better-gas-estimation#42d2d5cfb91ed188e227108ed2b7819035dea693" dependencies = [ "alloy-eip2124", "alloy-eip2930", @@ -315,9 +310,8 @@ dependencies = [ [[package]] name = "alloy-genesis" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e111e22c1a2133e9ebfd9051ea0eaf63559594d2f50d43cbc6762fbb95fc3c2" +version = "1.8.3" +source = "git+https://github.com/gear-tech/alloy.git?branch=better-gas-estimation#42d2d5cfb91ed188e227108ed2b7819035dea693" dependencies = [ "alloy-eips", "alloy-primitives", @@ -343,9 +337,9 @@ dependencies = [ [[package]] name = "alloy-json-abi" -version = "1.5.7" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9dbe713da0c737d9e5e387b0ba790eb98b14dd207fe53eef50e19a5a8ec3dac" +checksum = "84e3cf01219c966f95a460c95f1d4c30e12f6c18150c21a30b768af2a2a29142" dependencies = [ "alloy-primitives", "alloy-sol-type-parser", @@ -355,9 +349,8 @@ dependencies = [ [[package]] name = "alloy-json-rpc" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31b6af6f374c1eeef8ab8dc26232cd440db167322a4207a3debd3d1ee565ca47" +version = "1.8.3" +source = "git+https://github.com/gear-tech/alloy.git?branch=better-gas-estimation#42d2d5cfb91ed188e227108ed2b7819035dea693" dependencies = [ "alloy-primitives", "alloy-sol-types", @@ -370,9 +363,8 @@ dependencies = [ [[package]] name = "alloy-network" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0a3f5a7f3678b71d33fcc45b714fab8928dbc647d5aff2145e72032d5c849bb" +version = "1.8.3" +source = "git+https://github.com/gear-tech/alloy.git?branch=better-gas-estimation#42d2d5cfb91ed188e227108ed2b7819035dea693" dependencies = [ "alloy-consensus", "alloy-consensus-any", @@ -396,9 +388,8 @@ dependencies = [ [[package]] name = "alloy-network-primitives" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb50dc1fb0e0b2c8748d5bee1aa7acdd18f9e036311bc93a71d97be624030317" +version = "1.8.3" +source = "git+https://github.com/gear-tech/alloy.git?branch=better-gas-estimation#42d2d5cfb91ed188e227108ed2b7819035dea693" dependencies = [ "alloy-consensus", "alloy-eips", @@ -409,9 +400,8 @@ dependencies = [ [[package]] name = "alloy-node-bindings" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85195890fcee519312718dc8418035935ad0d57f57943ca82689732432a702c9" +version = "1.8.3" +source = "git+https://github.com/gear-tech/alloy.git?branch=better-gas-estimation#42d2d5cfb91ed188e227108ed2b7819035dea693" dependencies = [ "alloy-genesis", "alloy-hardforks", @@ -458,9 +448,8 @@ dependencies = [ [[package]] name = "alloy-provider" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2ba5468f78c8893be2d68a7f2fda61753336e5653f006af19781001b5f99e6c" +version = "1.8.3" +source = "git+https://github.com/gear-tech/alloy.git?branch=better-gas-estimation#42d2d5cfb91ed188e227108ed2b7819035dea693" dependencies = [ "alloy-chains", "alloy-consensus", @@ -501,9 +490,8 @@ dependencies = [ [[package]] name = "alloy-pubsub" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffcefb5d3391a320eadb95d398e4135f8cc35c7bf29a6bdb357eadcfc5ee5638" +version = "1.8.3" +source = "git+https://github.com/gear-tech/alloy.git?branch=better-gas-estimation#42d2d5cfb91ed188e227108ed2b7819035dea693" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -545,9 +533,8 @@ dependencies = [ [[package]] name = "alloy-rpc-client" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "222fd4efff0fb9a25184684742c44fe9fa9a16c4ab5bf97583e71c86598ef8f0" +version = "1.8.3" +source = "git+https://github.com/gear-tech/alloy.git?branch=better-gas-estimation#42d2d5cfb91ed188e227108ed2b7819035dea693" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -570,9 +557,8 @@ dependencies = [ [[package]] name = "alloy-rpc-types" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "974df1e56405c27cb8242381f45d8b212ba9df5006046ccf704764a2a4634366" +version = "1.8.3" +source = "git+https://github.com/gear-tech/alloy.git?branch=better-gas-estimation#42d2d5cfb91ed188e227108ed2b7819035dea693" dependencies = [ "alloy-primitives", "alloy-rpc-types-anvil", @@ -585,9 +571,8 @@ dependencies = [ [[package]] name = "alloy-rpc-types-anvil" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06bc10b0dca4f5bfc3cd30ed46eab5d651b5bb2cd300d683bdcdf5d2bfe6e82c" +version = "1.8.3" +source = "git+https://github.com/gear-tech/alloy.git?branch=better-gas-estimation#42d2d5cfb91ed188e227108ed2b7819035dea693" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -597,24 +582,18 @@ dependencies = [ [[package]] name = "alloy-rpc-types-any" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "949c0f16a94ae33cdb1139b8dbf9e34d7f26ebfe97962e2a4d620b5f65f48fe4" +version = "1.8.3" +source = "git+https://github.com/gear-tech/alloy.git?branch=better-gas-estimation#42d2d5cfb91ed188e227108ed2b7819035dea693" dependencies = [ "alloy-consensus-any", - "alloy-network-primitives", - "alloy-primitives", "alloy-rpc-types-eth", "alloy-serde", - "serde", - "serde_json", ] [[package]] name = "alloy-rpc-types-beacon" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a8f7fa8ca056bb797a368aeed329e6ace6b62ee4271432ac36ab8ae87a5e60d" +version = "1.8.3" +source = "git+https://github.com/gear-tech/alloy.git?branch=better-gas-estimation#42d2d5cfb91ed188e227108ed2b7819035dea693" dependencies = [ "alloy-eips", "alloy-primitives", @@ -628,9 +607,8 @@ dependencies = [ [[package]] name = "alloy-rpc-types-engine" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e59bc947935732cae5b072753e5e034c0b70a8b031c2839f45e2659ba07df9ae" +version = "1.8.3" +source = "git+https://github.com/gear-tech/alloy.git?branch=better-gas-estimation#42d2d5cfb91ed188e227108ed2b7819035dea693" dependencies = [ "alloy-consensus", "alloy-eips", @@ -645,9 +623,8 @@ dependencies = [ [[package]] name = "alloy-rpc-types-eth" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc280a41931bd419af86e9e859dd9726b73313aaa2e479b33c0e344f4b892ddb" +version = "1.8.3" +source = "git+https://github.com/gear-tech/alloy.git?branch=better-gas-estimation#42d2d5cfb91ed188e227108ed2b7819035dea693" dependencies = [ "alloy-consensus", "alloy-consensus-any", @@ -666,9 +643,8 @@ dependencies = [ [[package]] name = "alloy-serde" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4848831ff994c88b1c32b7df9c4c1c3eedea4b535bde5eb3c421ef0bdc5ac052" +version = "1.8.3" +source = "git+https://github.com/gear-tech/alloy.git?branch=better-gas-estimation#42d2d5cfb91ed188e227108ed2b7819035dea693" dependencies = [ "alloy-primitives", "serde", @@ -677,9 +653,8 @@ dependencies = [ [[package]] name = "alloy-signer" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84b8ad9890b212e224291024b1aecfeef72127d27a2f6eebc5e347c40275c4bf" +version = "1.8.3" +source = "git+https://github.com/gear-tech/alloy.git?branch=better-gas-estimation#42d2d5cfb91ed188e227108ed2b7819035dea693" dependencies = [ "alloy-primitives", "async-trait", @@ -692,9 +667,8 @@ dependencies = [ [[package]] name = "alloy-signer-local" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c67d2372aada343130d41e249b59a3cef29b1678dcd3fd80f1c2c4d6b5318f2" +version = "1.8.3" +source = "git+https://github.com/gear-tech/alloy.git?branch=better-gas-estimation#42d2d5cfb91ed188e227108ed2b7819035dea693" dependencies = [ "alloy-consensus", "alloy-network", @@ -711,9 +685,9 @@ dependencies = [ [[package]] name = "alloy-sol-macro" -version = "1.5.7" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab81bab693da9bb79f7a95b64b394718259fdd7e41dceeced4cad57cb71c4f6a" +checksum = "09eb18ce0df92b4277291bbaa0ed70545d78b02948df756bbd3d6214bf39a218" dependencies = [ "alloy-sol-macro-expander", "alloy-sol-macro-input", @@ -725,9 +699,9 @@ dependencies = [ [[package]] name = "alloy-sol-macro-expander" -version = "1.5.7" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "489f1620bb7e2483fb5819ed01ab6edc1d2f93939dce35a5695085a1afd1d699" +checksum = "95d9fa2daf21f59aa546d549943f10b5cce1ae59986774019fbedae834ffe01b" dependencies = [ "alloy-json-abi", "alloy-sol-macro-input", @@ -737,16 +711,16 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "sha3", "syn 2.0.114", "syn-solidity", + "tiny-keccak", ] [[package]] name = "alloy-sol-macro-input" -version = "1.5.7" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56cef806ad22d4392c5fc83cf8f2089f988eb99c7067b4e0c6f1971fc1cca318" +checksum = "9396007fe69c26ee118a19f4dee1f5d1d6be186ea75b3881adf16d87f8444686" dependencies = [ "alloy-json-abi", "const-hex", @@ -762,9 +736,9 @@ dependencies = [ [[package]] name = "alloy-sol-type-parser" -version = "1.5.7" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6df77fea9d6a2a75c0ef8d2acbdfd92286cc599983d3175ccdc170d3433d249" +checksum = "af67a0b0dcebe14244fc92002cd8d96ecbf65db4639d479f5fcd5805755a4c27" dependencies = [ "serde", "winnow", @@ -772,9 +746,9 @@ dependencies = [ [[package]] name = "alloy-sol-types" -version = "1.5.7" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64612d29379782a5dde6f4b6570d9c756d734d760c0c94c254d361e678a6591f" +checksum = "09aeea64f09a7483bdcd4193634c7e5cf9fd7775ee767585270cd8ce2d69dc95" dependencies = [ "alloy-json-abi", "alloy-primitives", @@ -784,9 +758,8 @@ dependencies = [ [[package]] name = "alloy-transport" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32b7b755e64ae6b5de0d762ed2c780e072167ea5e542076a559e00314352a0bf" +version = "1.8.3" +source = "git+https://github.com/gear-tech/alloy.git?branch=better-gas-estimation#42d2d5cfb91ed188e227108ed2b7819035dea693" dependencies = [ "alloy-json-rpc", "auto_impl", @@ -807,9 +780,8 @@ dependencies = [ [[package]] name = "alloy-transport-http" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a29980e69119444ed26b75e7ee5bed2043870f904a64318297e55800db686564" +version = "1.8.3" +source = "git+https://github.com/gear-tech/alloy.git?branch=better-gas-estimation#42d2d5cfb91ed188e227108ed2b7819035dea693" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -823,9 +795,8 @@ dependencies = [ [[package]] name = "alloy-transport-ws" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4b71dc951db66795cfb52eef835f64cf15163bc93b656e061b457ce5ebff370" +version = "1.8.3" +source = "git+https://github.com/gear-tech/alloy.git?branch=better-gas-estimation#42d2d5cfb91ed188e227108ed2b7819035dea693" dependencies = [ "alloy-pubsub", "alloy-transport", @@ -858,9 +829,8 @@ dependencies = [ [[package]] name = "alloy-tx-macros" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d8228b9236479ff16b03041b64b86c2bd4e53da1caa45d59b5868cd1571131e" +version = "1.8.3" +source = "git+https://github.com/gear-tech/alloy.git?branch=better-gas-estimation#42d2d5cfb91ed188e227108ed2b7819035dea693" dependencies = [ "darling 0.23.0", "proc-macro2", @@ -1985,7 +1955,7 @@ dependencies = [ "bitflags 2.10.0", "cexpr", "clang-sys", - "itertools 0.11.0", + "itertools 0.13.0", "log", "prettyplease 0.2.37", "proc-macro2", @@ -2002,7 +1972,7 @@ version = "2.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "90dbd31c98227229239363921e60fcf5e558e43ec69094d46fc4996f08d1d5bc" dependencies = [ - "bitcoin_hashes 0.13.0", + "bitcoin_hashes 0.14.1", ] [[package]] @@ -2912,7 +2882,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "117725a109d387c937a1533ce01b450cbde6b88abceea8473c4d7a85853cda3c" dependencies = [ "lazy_static", - "windows-sys 0.48.0", + "windows-sys 0.59.0", ] [[package]] @@ -5095,7 +5065,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" dependencies = [ "libc", - "windows-sys 0.52.0", + "windows-sys 0.61.2", ] [[package]] @@ -8702,7 +8672,7 @@ dependencies = [ "libc", "percent-encoding", "pin-project-lite", - "socket2 0.5.10", + "socket2 0.6.1", "tokio", "tower-service", "tracing", @@ -9169,7 +9139,7 @@ checksum = "3640c1c38b8e4e43584d8df18be5fc6b0aa314ce6ebf51b53313d4306cca8e46" dependencies = [ "hermit-abi 0.5.2", "libc", - "windows-sys 0.52.0", + "windows-sys 0.61.2", ] [[package]] @@ -11696,7 +11666,7 @@ version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9224be3459a0c1d6e9b0f42ab0e76e98b29aef5aba33c0487dfcf47ea08b5150" dependencies = [ - "proc-macro-crate 1.1.3", + "proc-macro-crate 3.4.0", "proc-macro2", "quote", "syn 1.0.109", @@ -11708,7 +11678,7 @@ version = "0.50.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5" dependencies = [ - "windows-sys 0.60.2", + "windows-sys 0.61.2", ] [[package]] @@ -13984,7 +13954,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22505a5c94da8e3b7c2996394d1c933236c4d743e81a410bcca4e6989fc066a4" dependencies = [ "bytes", - "heck 0.4.1", + "heck 0.5.0", "itertools 0.12.1", "log", "multimap 0.10.1", @@ -14219,7 +14189,7 @@ dependencies = [ "quinn-udp 0.5.14", "rustc-hash 2.1.1", "rustls 0.23.36", - "socket2 0.5.10", + "socket2 0.6.1", "thiserror 2.0.17", "tokio", "tracing", @@ -14318,9 +14288,9 @@ dependencies = [ "cfg_aliases 0.2.1", "libc", "once_cell", - "socket2 0.5.10", + "socket2 0.6.1", "tracing", - "windows-sys 0.52.0", + "windows-sys 0.60.2", ] [[package]] @@ -15081,7 +15051,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys 0.4.15", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -15094,7 +15064,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys 0.11.0", - "windows-sys 0.52.0", + "windows-sys 0.61.2", ] [[package]] @@ -15197,7 +15167,7 @@ dependencies = [ "security-framework 3.5.1", "security-framework-sys", "webpki-root-certs 0.26.11", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -15218,7 +15188,7 @@ dependencies = [ "security-framework 3.5.1", "security-framework-sys", "webpki-root-certs 1.0.5", - "windows-sys 0.52.0", + "windows-sys 0.61.2", ] [[package]] @@ -18682,9 +18652,9 @@ dependencies = [ [[package]] name = "syn-solidity" -version = "1.5.7" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53f425ae0b12e2f5ae65542e00898d500d4d318b4baf09f40fd0d410454e9947" +checksum = "5f92d01b5de07eaf324f7fca61cc6bd3d82bbc1de5b6c963e6fe79e86f36580d" dependencies = [ "paste", "proc-macro2", @@ -18814,7 +18784,7 @@ dependencies = [ "getrandom 0.3.4", "once_cell", "rustix 1.1.3", - "windows-sys 0.52.0", + "windows-sys 0.61.2", ] [[package]] @@ -20958,7 +20928,7 @@ version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" dependencies = [ - "windows-sys 0.48.0", + "windows-sys 0.61.2", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 30a207b166a..82c1e17b959 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -116,10 +116,10 @@ members = [ [workspace.dependencies] gear-workspace-hack = "0.1.0" -alloy = "2.0" -alloy-chains = "0.2.33" -alloy-primitives = { version = "1.5.7", default-features = false } -alloy-sol-types = { version = "1.5.7", default-features = false } +alloy = "1.8" # TODO: #5160 use release version of alloy when BlobGasFiller is fixed +alloy-chains = "0.2" +alloy-primitives = { version = "1.5", default-features = false } +alloy-sol-types = { version = "1.5", default-features = false } anyhow = { version = "1.0.86", default-features = false } arbitrary = "1.3.2" async-recursion = "1.1.1" @@ -634,6 +634,7 @@ inherits = "release" debug = true [patch.crates-io] +alloy = { version = "1.8", git = "https://github.com/gear-tech/alloy.git", branch = "better-gas-estimation" } gear-workspace-hack = { path = "utils/gear-workspace-hack" } # core2 0.4.0 was yanked on crates.io; pin upstream git so cargo metadata doesn't fail on yanked lookup. # Needed by cid -> sc-network transitive; triggers during wasm-builder sub-project resolve. diff --git a/core/src/code/metadata.rs b/core/src/code/metadata.rs index 9b69329094b..69dd4ff1a24 100644 --- a/core/src/code/metadata.rs +++ b/core/src/code/metadata.rs @@ -32,18 +32,7 @@ use scale_info::{ /// Status of the instrumentation. #[derive( - Clone, - Copy, - Debug, - Decode, - DecodeAsType, - Encode, - EncodeAsType, - TypeInfo, - PartialEq, - Eq, - Hash, - derive_more::IsVariant, + Clone, Copy, Debug, Decode, DecodeAsType, Encode, EncodeAsType, TypeInfo, PartialEq, Eq, Hash, )] pub enum InstrumentationStatus { /// Code is not instrumented yet. diff --git a/ethexe/compute/src/codes.rs b/ethexe/compute/src/codes.rs index cd8203d8090..5083e07a913 100644 --- a/ethexe/compute/src/codes.rs +++ b/ethexe/compute/src/codes.rs @@ -16,20 +16,17 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::{ProcessorExt, Result, service::SubService}; +use crate::{ComputeError, ProcessorExt, Result, service::SubService}; use ethexe_common::{ CodeAndIdUnchecked, db::{CodesStorageRO, CodesStorageRW}, }; use ethexe_db::Database; use ethexe_processor::{ProcessedCodeInfo, ValidCodeInfo}; -use futures::{FutureExt, StreamExt, future::BoxFuture, stream::FuturesUnordered}; use gprimitives::CodeId; use metrics::Gauge; -use std::{ - future, - task::{Context, Poll}, -}; +use std::task::{Context, Poll}; +use tokio::task::JoinSet; /// Metrics for the [`CodesSubService`]. #[derive(Clone, metrics_derive::Metrics)] @@ -44,7 +41,7 @@ pub struct CodesSubService { processor: P, metrics: Metrics, - processions: FuturesUnordered>>, + processions: JoinSet>, } impl CodesSubService

{ @@ -53,7 +50,7 @@ impl CodesSubService

{ db, processor, metrics: Metrics::default(), - processions: FuturesUnordered::new(), + processions: JoinSet::new(), } } @@ -73,37 +70,36 @@ impl CodesSubService

{ "Instrumented code {code_id:?} must exist in database" ); } - self.processions.push(future::ready(Ok(code_id)).boxed()); + self.processions.spawn(async move { Ok(code_id) }); } else { let db = self.db.clone(); let mut processor = self.processor.clone(); - self.processions.push( - async move { - let ProcessedCodeInfo { code_id, valid } = - processor.process_code(code_and_id).await?; - if let Some(ValidCodeInfo { - code, - instrumented_code, - code_metadata, - }) = valid - { - db.set_original_code(&code); - db.set_instrumented_code( - ethexe_runtime_common::VERSION, - code_id, + self.processions.spawn_blocking(move || { + processor + .process_code(code_and_id) + .map(|ProcessedCodeInfo { code_id, valid }| { + if let Some(ValidCodeInfo { + code, instrumented_code, - ); - db.set_code_metadata(code_id, code_metadata); - db.set_code_valid(code_id, true); - } else { - db.set_code_valid(code_id, false); - } - - Ok(code_id) - } - .boxed(), - ); + code_metadata, + }) = valid + { + db.set_original_code(&code); + db.set_instrumented_code( + ethexe_runtime_common::VERSION, + code_id, + instrumented_code, + ); + db.set_code_metadata(code_id, code_metadata); + db.set_code_valid(code_id, true); + } else { + db.set_code_valid(code_id, false); + } + + code_id + }) + }); } self.metrics @@ -116,14 +112,14 @@ impl SubService for CodesSubService

{ type Output = CodeId; fn poll_next(&mut self, cx: &mut Context<'_>) -> Poll> { - if let Poll::Ready(Some(res)) = self.processions.poll_next_unpin(cx) { - self.metrics - .processing_codes - .set(self.processions.len() as f64); - return Poll::Ready(res); - } - - Poll::Pending + futures::ready!(self.processions.poll_join_next(cx)) + .map(|res| { + self.metrics + .processing_codes + .set(self.processions.len() as f64); + res.map_err(ComputeError::CodeProcessJoin)? + }) + .map_or(Poll::Pending, Poll::Ready) } } diff --git a/ethexe/compute/src/compute.rs b/ethexe/compute/src/compute.rs index 48a2f1257a2..039ce6c7955 100644 --- a/ethexe/compute/src/compute.rs +++ b/ethexe/compute/src/compute.rs @@ -444,7 +444,7 @@ mod tests { const USER_ID: ActorId = ActorId::new([1u8; 32]); - pub async fn upload_code(processor: &mut Processor, code: &[u8], db: &Database) -> CodeId { + pub fn upload_code(processor: &mut Processor, code: &[u8], db: &Database) -> CodeId { let code_id = CodeId::generate(code); let ValidCodeInfo { @@ -456,7 +456,6 @@ mod tests { code: code.to_vec(), code_id, }) - .await .expect("failed to process code") .valid .expect("code is invalid"); @@ -615,7 +614,7 @@ mod tests { let db = Database::memory(); let mut processor = Processor::new(db.clone()).unwrap(); let ping_code_id = - test_utils::upload_code(&mut processor, demo_ping::WASM_BINARY, &db).await; + test_utils::upload_code(&mut processor, demo_ping::WASM_BINARY, &db); let ping_id = ActorId::from(0x10000); let blockchain = chain.setup(&db); let blockchain_len = blockchain.blocks.len() - 1; @@ -714,7 +713,7 @@ mod tests { let mut processor = Processor::new(db.clone()).unwrap(); let ping_code_id = - test_utils::upload_code(&mut processor, demo_ping::WASM_BINARY, &db).await; + test_utils::upload_code(&mut processor, demo_ping::WASM_BINARY, &db); let ping_id = ActorId::from(0x10000); let blockchain = chain.setup(&db); diff --git a/ethexe/compute/src/lib.rs b/ethexe/compute/src/lib.rs index b8f11a00ad9..a5c3b8618db 100644 --- a/ethexe/compute/src/lib.rs +++ b/ethexe/compute/src/lib.rs @@ -16,136 +16,6 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -//! # Ethexe Compute -//! -//! Orchestrates the three pipelines that turn on-chain data into executed -//! state for the ethexe node: code validation, block preparation, and -//! announce computation. The crate wraps `ethexe-processor` and exposes its -//! progress as a `futures::Stream` of [`ComputeEvent`]s: the outer service -//! submits work through a few input methods, then polls the stream and -//! handles each event that comes out. -//! -//! [`ComputeService`] composes three independent sub-services. Each does -//! one thing and emits one family of events: -//! -//! - `codes` — validates and instruments a WASM code blob and marks its -//! validity in the database. Emits [`ComputeEvent::CodeProcessed`]. -//! - `prepare` — brings a synced block (and any not-yet-prepared ancestors) -//! into a state where it can be executed, requesting missing code blobs -//! from the caller along the way. Emits [`ComputeEvent::RequestLoadCodes`] -//! and [`ComputeEvent::BlockPrepared`]. -//! - `compute` — executes an announce (computing any missing ancestor -//! announces first), optionally streaming promises for it. Emits -//! [`ComputeEvent::Promise`] and [`ComputeEvent::AnnounceComputed`]. -//! -//! ## Role in the stack and relation to other crates -//! -//! - `ethexe-processor` is the backend. Compute is generic over the -//! [`ProcessorExt`] trait defined here and has a direct impl for -//! [`Processor`]; the only other impl in the tree is a test mock -//! (`tests::MockProcessor`) that lets the sub-service tests run without -//! any real WASM execution. -//! - `ethexe-blob-loader` is **not** a direct dependency. When `prepare` -//! discovers codes with unknown validation status, it yields -//! [`ComputeEvent::RequestLoadCodes`] upstream; the service layer is -//! responsible for calling the blob loader, and then feeds the loaded -//! bytes back into compute via [`ComputeService::process_code`]. That -//! way compute itself never has to make network calls. -//! - `ethexe-db` is the only place compute reads from and writes to. -//! - `ethexe-service` is the sole consumer: it polls the `futures::Stream` -//! produced by [`ComputeService`] inside the main `tokio::select!` loop -//! and routes each [`ComputeEvent`] variant to the rest of the node -//! (consensus, network, blob-loader). -//! -//! ## Entry points -//! -//! | Method | Effect | -//! |----------------------------------------------|-----------------------------------------------------------------------------------------| -//! | [`ComputeService::process_code`] | Queue a code blob for validation + instrumentation + DB persistence. | -//! | [`ComputeService::prepare_block`] | Queue a synced block for preparation (walks ancestors, emits code requests). | -//! | [`ComputeService::compute_announce`] | Queue an announce for execution with a [`PromisePolicy`](ethexe_common::PromisePolicy). | -//! | `::poll_next` | Drive all three sub-services and yield the next [`ComputeEvent`]. | -//! -//! ## Code processing pipeline (`codes` sub-service) -//! -//! For every code submitted through [`ComputeService::process_code`] the -//! stream eventually yields exactly one [`ComputeEvent::CodeProcessed`] -//! (carrying the same `CodeId`) or a [`ComputeError`]. This holds both -//! for fresh codes and for codes that had already been validated in a -//! previous run, so the caller does not have to de-duplicate. -//! -//! Multiple codes submitted at once can be processed concurrently. -//! -//! ## Block preparation pipeline (`prepare` sub-service) -//! -//! For every block hash submitted through [`ComputeService::prepare_block`] -//! the stream eventually yields exactly one [`ComputeEvent::BlockPrepared`] -//! for that hash or a [`ComputeError`]. Before the block-prepared event, -//! the stream may emit one or more [`ComputeEvent::RequestLoadCodes`] if -//! the block — or any of its still-unprepared ancestors — references codes -//! whose validity has not yet been established. The caller must fetch -//! those codes (out of scope for this crate) and feed them back in through -//! [`ComputeService::process_code`]; preparation resumes automatically as -//! the missing codes arrive. -//! -//! ## Announce computation pipeline (`compute` sub-service) -//! -//! For every announce submitted through [`ComputeService::compute_announce`] -//! with a [`PromisePolicy`](ethexe_common::PromisePolicy), the stream -//! eventually yields exactly one [`ComputeEvent::AnnounceComputed`] for -//! that announce or a [`ComputeError`]. If the caller passed -//! [`PromisePolicy::Enabled`](ethexe_common::PromisePolicy), zero or more -//! [`ComputeEvent::Promise`] events for the same announce are yielded -//! first. Every `Promise` for a given announce is yielded strictly before -//! the `AnnounceComputed` of that announce — `AnnounceComputed` is the -//! "all promises for this announce have been delivered" marker. -//! -//! Computation is sequential: at most one announce is executed at a time. -//! If the announce's parent (or any further ancestor) has not been -//! computed yet, missing ancestors are computed first, in order. -//! Ancestors are always computed without promise collection regardless of -//! the requested policy — promises describe the user-visible result of -//! the target announce only. -//! -//! The target block must already be prepared; otherwise the computation -//! fails with [`ComputeError::BlockNotPrepared`]. -//! -//! Actual WASM execution is delegated to [`ProcessorExt::process_programs`]. -//! -//! ## Canonical event quarantine -//! -//! Ethereum events do not become visible to the runtime on the block they -//! arrive in. When building the execution input for a block, compute -//! instead takes the events from an ancestor that is -//! [`ComputeConfig::canonical_quarantine`](ComputeConfig) blocks older. -//! If the walk back would cross genesis, the returned event list is -//! empty — i.e. the first `canonical_quarantine` blocks after genesis -//! see no Ethereum events at all. -//! -//! ## Event flow summary -//! -//! | [`ComputeEvent`] | Fired by | Expected consumer | -//! |---------------------------|----------|-------------------------------------------------------| -//! | `CodeProcessed(code_id)` | `codes` | Informational. | -//! | `RequestLoadCodes(set)` | `prepare`| Handed to `ethexe-blob-loader` to fetch code blobs. | -//! | `BlockPrepared(hash)` | `prepare`| Handed to `ethexe-consensus`. | -//! | `AnnounceComputed(hash)` | `compute`| Handed to `ethexe-consensus`. | -//! | `Promise(p, ah)` | `compute`| Handed to `ethexe-consensus` for signing. | -//! -//! ## When modifying this crate -//! -//! - A code result must reach the `prepare` sub-service before the -//! corresponding `CodeProcessed` is emitted upstream, otherwise a block -//! waiting on that code will stall for an extra poll. -//! - An announce must only be computed after its block has been prepared. -//! - For announce execution, canonical events must always be read via -//! [`find_canonical_events_post_quarantine`], never directly via -//! `db.block_events(...)` from the announce's own block. Taking the raw -//! events would skip the quarantine and produce non-deterministic state -//! across nodes that disagree on a recent reorg. -//! - For any single announce, `AnnounceComputed` must be the last event -//! emitted; every `Promise` that belongs to it comes strictly before. - pub use compute::{ ComputeConfig, ComputeSubService, utils::{find_canonical_events_post_quarantine, prepare_executable_for_announce}, @@ -192,6 +62,8 @@ pub enum ComputeError { BlockHeaderNotFound(H256), #[error("block validators committed for era not found for block({0})")] CommittedEraNotFound(H256), + #[error("process code join error")] + CodeProcessJoin(#[from] tokio::task::JoinError), #[error("codes queue not found for computed block({0})")] CodesQueueNotFound(H256), #[error("last committed batch not found for computed block({0})")] @@ -229,10 +101,7 @@ pub trait ProcessorExt: Sized + Unpin + Send + Clone + 'static { executable: ExecutableData, promise_out_tx: Option>, ) -> impl Future> + Send; - fn process_code( - &mut self, - code_and_id: CodeAndIdUnchecked, - ) -> impl Future> + Send; + fn process_code(&mut self, code_and_id: CodeAndIdUnchecked) -> Result; } impl ProcessorExt for Processor { @@ -246,7 +115,7 @@ impl ProcessorExt for Processor { .map_err(Into::into) } - async fn process_code(&mut self, code_and_id: CodeAndIdUnchecked) -> Result { - self.process_code(code_and_id).await.map_err(Into::into) + fn process_code(&mut self, code_and_id: CodeAndIdUnchecked) -> Result { + self.process_code(code_and_id).map_err(Into::into) } } diff --git a/ethexe/compute/src/tests.rs b/ethexe/compute/src/tests.rs index 584e5999253..739be1f0dbb 100644 --- a/ethexe/compute/src/tests.rs +++ b/ethexe/compute/src/tests.rs @@ -147,11 +147,12 @@ impl ProcessorExt for MockProcessor { Ok(self.process_programs_result.take().unwrap_or_default()) } - async fn process_code(&mut self, code_and_id: CodeAndIdUnchecked) -> Result { + fn process_code(&mut self, code_and_id: CodeAndIdUnchecked) -> Result { self.process_code_calls .lock() .unwrap() .push(code_and_id.clone()); + Ok(self .process_codes_result .take() diff --git a/ethexe/consensus/src/lib.rs b/ethexe/consensus/src/lib.rs index e6365e0887c..c1b8ae43850 100644 --- a/ethexe/consensus/src/lib.rs +++ b/ethexe/consensus/src/lib.rs @@ -18,186 +18,19 @@ //! # Ethexe Consensus //! -//! Decides what an ethexe node should do as Ethereum blocks arrive: validate -//! announces produced by other nodes, produce announces of its own if it is -//! the producer for a block, coordinate threshold-signed batch commitments, -//! and submit those batches to the on-chain Router contract. -//! -//! Ethereum is the authoritative ledger — this crate does not invent its own -//! BFT protocol. It decides which announces to compute, collects enough -//! validator signatures on the resulting state, and posts the aggregated -//! commitment on-chain. Finality follows from the host chain. -//! -//! Two implementations of [`ConsensusService`] are provided: -//! -//! - [`ConnectService`] — a passive "connect-node" that tracks announces -//! from producers, asks `ethexe-compute` to execute them, and requests -//! missing announces from peers when needed. It knows the validator -//! set (so it can tell whose announce to accept for each block), but -//! it holds no signing key and does not submit anything on-chain. -//! - [`ValidatorService`] — an active validator. In addition to what -//! `ConnectService` does, it produces announces when it is the -//! producer for a block, collects validator signatures on batch -//! commitments, and submits the multi-signed batch to the Router -//! contract. -//! -//! Both share the same [`ConsensusService`] trait and the same -//! [`ConsensusEvent`] output stream, so `ethexe-service` can drive them -//! uniformly. -//! -//! ## Role in the stack and relation to other crates -//! -//! - `ethexe-observer` feeds Ethereum block data through -//! [`ConsensusService::receive_new_chain_head`] and the follow-up -//! [`ConsensusService::receive_synced_block`] notifications. -//! - `ethexe-compute` signals execution progress through -//! [`ConsensusService::receive_prepared_block`], -//! [`ConsensusService::receive_computed_announce`], and hands raw -//! promises back through -//! [`ConsensusService::receive_promise_for_signing`]. -//! - `ethexe-network` delivers producer announces, validation requests -//! and replies, fetched announces and network-forwarded injected -//! transactions. Outgoing network messages leave as -//! [`ConsensusEvent::PublishMessage`], [`ConsensusEvent::PublishPromise`] -//! and [`ConsensusEvent::RequestAnnounces`]. -//! - `ethexe-ethereum` is reached only from [`ValidatorService`], through -//! the [`BatchCommitter`] trait, to submit aggregated batch -//! commitments to the Router contract. [`ConnectService`] neither -//! signs nor posts anything on-chain. -//! - `ethexe-service` is the sole consumer: it routes every trait call -//! into the consensus service and routes every [`ConsensusEvent`] to -//! the right subsystem (compute, network, logs). -//! -//! ## Entry points -//! -//! All inputs arrive through the [`ConsensusService`] trait. Outputs leave -//! through the `futures::Stream` impl that the same trait requires. -//! -//! | Trait method | Meaning of the input | -//! |-----------------------------------------------------------|------------------------------------------------------------------------| -//! | [`receive_new_chain_head`](ConsensusService::receive_new_chain_head) | A new Ethereum chain head. | -//! | [`receive_synced_block`](ConsensusService::receive_synced_block) | The block's data is now available in the DB. | -//! | [`receive_prepared_block`](ConsensusService::receive_prepared_block) | The block is now prepared. | -//! | [`receive_computed_announce`](ConsensusService::receive_computed_announce) | An announce has finished executing and its result is persisted. | -//! | [`receive_announce`](ConsensusService::receive_announce) | A signed producer announce. | -//! | [`receive_promise_for_signing`](ConsensusService::receive_promise_for_signing) | A raw promise that this validator should sign. | -//! | [`receive_validation_request`](ConsensusService::receive_validation_request) | A request to validate a batch commitment. | -//! | [`receive_validation_reply`](ConsensusService::receive_validation_reply) | A signed reply on a batch this validator is coordinating. | -//! | [`receive_announces_response`](ConsensusService::receive_announces_response) | A response to a previous [`ConsensusEvent::RequestAnnounces`]. | -//! | [`receive_injected_transaction`](ConsensusService::receive_injected_transaction) | An injected transaction offered to this validator's pool. | -//! -//! ## Output events -//! -//! | [`ConsensusEvent`] | What it tells the service layer | -//! |--------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------| -//! | [`AnnounceAccepted`](ConsensusEvent::AnnounceAccepted) / [`AnnounceRejected`](ConsensusEvent::AnnounceRejected) | Informational result of validating a received producer announce. | -//! | [`ComputeAnnounce`](ConsensusEvent::ComputeAnnounce) | The outer service must hand this announce to `ethexe-compute`, with the given `PromisePolicy`. | -//! | [`PublishMessage`](ConsensusEvent::PublishMessage) | Signed validator-to-validator message to gossip over the network. | -//! | [`PublishPromise`](ConsensusEvent::PublishPromise) | Signed promise to gossip over the network and deliver to RPC subscribers. | -//! | [`RequestAnnounces`](ConsensusEvent::RequestAnnounces) | Ask the network to fetch announces we are missing. | -//! | [`CommitmentSubmitted`](ConsensusEvent::CommitmentSubmitted) | Informational: a batch was successfully submitted to the Router contract. | -//! | [`Warning`](ConsensusEvent::Warning) | Informational: a non-fatal anomaly (unexpected input, bad reply, etc.) was detected. | -//! -//! ## ConnectService behaviour -//! -//! `ConnectService` observes the chain. For each new Ethereum block it -//! waits until the block is synced and prepared, resolves which -//! validator is the producer for that block, and either validates the -//! producer's announce if one has already been received or keeps -//! waiting for it. -//! -//! Accepted announces turn into [`ConsensusEvent::ComputeAnnounce`] -//! with [`PromisePolicy::Disabled`](ethexe_common::PromisePolicy) — -//! observer nodes never collect promises. If any announce in the -//! ancestor chain is missing locally, the service emits -//! [`ConsensusEvent::RequestAnnounces`] and waits for the network's -//! response before proceeding. -//! -//! ## ValidatorService behaviour -//! -//! A validator runs one attempt per Ethereum block. For every new chain -//! head the service computes which validator is the producer for that -//! block and enters one of two roles. A new chain head always aborts -//! the previous attempt. -//! -//! State flow: -//! -//! ```text -//! Initial -//! │ -//! ├── self is producer ──► Producer ───► Coordinator ───► Initial -//! │ (collects replies, -//! │ submits batch) -//! │ -//! └── other producer ──► Subordinate ─► Participant ────► Initial -//! (validates the -//! producer's batch, -//! signs & replies) -//! ``` -//! -//! These state names appear in emitted [`ConsensusEvent::Warning`] -//! messages, so they are the right handle when reading logs or tracing -//! an issue. -//! -//! Contract visible at the crate boundary: -//! -//! - The service emits exactly one [`ConsensusEvent::ComputeAnnounce`] per -//! block it wants executed (an announce it produced itself or one it -//! accepted from the producer). [`PromisePolicy::Enabled`](ethexe_common::PromisePolicy) -//! is set only when this validator is the producer — only producers -//! collect promises. -//! - When coordinating a batch, the service gossips a -//! [`ConsensusEvent::PublishMessage`] with the validation request, -//! collects enough [`ConsensusService::receive_validation_reply`] calls -//! to satisfy the configured [`ValidatorConfig::signatures_threshold`], -//! and then submits the multi-signed batch through the injected -//! [`BatchCommitter`]. On success a [`ConsensusEvent::CommitmentSubmitted`] -//! is emitted. -//! - When acting as participant, the service validates the incoming -//! batch against its local state. On acceptance it publishes a signed -//! reply over [`ConsensusEvent::PublishMessage`]; on rejection it emits -//! a [`ConsensusEvent::Warning`] and sends nothing to the coordinator. -//! - Unexpected or malformed inputs produce [`ConsensusEvent::Warning`] -//! rather than aborting the service. -//! -//! ## Slot and era model -//! -//! The producer for a block is a deterministic function of the validator -//! set for the block's era and the block's timestamp. Era boundaries are -//! computed from the Ethereum block timestamp relative to the genesis -//! timestamp stored in the database config (see `ProtocolTimelines`). -//! -//! ## Injected transactions -//! -//! On a validator node, injected transactions are checked for standard -//! validity (not duplicated, not outdated, destination exists and is -//! initialized, etc.) and accepted ones are stored in a local pool. When -//! this validator is next the producer for a block, it drains pending -//! transactions from the pool into the announce it creates. -//! `ConnectService` ignores injected transactions entirely. -//! -//! ## When modifying this crate -//! -//! - Ethereum is the authoritative ledger. The crate -//! only decides which announces to execute and which batches to co-sign. -//! - A new Ethereum chain head always resets the validator to `Initial` -//! for that block. Do not introduce state carried across chain heads -//! beyond what is already kept in the database. -//! - `ConnectService` must never sign anything or submit anything -//! on-chain. It has no signer and no `BatchCommitter`; keep it that -//! way. -//! - Unexpected inputs (replies from non-validators, announces from -//! non-producers, transitions that do not match the current state) must -//! be surfaced as [`ConsensusEvent::Warning`], not as hard errors that -//! tear down the stream. -//! - The producer for a block must remain a pure function of on-chain -//! data and the block timestamp. Wall-clock time must not leak into -//! this decision (the only existing wall-clock knob is -//! [`ValidatorConfig::producer_delay`] and it only paces when the -//! producer acts, never who the producer is). -//! - A batch is submitted on-chain only after the number of collected -//! signatures reaches [`ValidatorConfig::signatures_threshold`]; this -//! is the sole trigger. +//! This crate provides controlling a behaviour of ethexe node depending on incoming blocks. +//! +//! The main components are: +//! - [`ConsensusService`]: A trait defining the core interface for consensus services +//! - [`ConsensusEvent`]: An enum representing various consensus events which have to be processed by outer services +//! - [`ConnectService`]: An implementation of consensus to run "connect-node" +//! - [`ValidatorService`]: An implementation of consensus to run "validator-node" +//! +//! The crate is organized into several modules: +//! - `connect`: Connection management functionality +//! - `validator`: Block validation services and implementations +//! - `utils`: Utility functions and shared data structures +//! - `announces`: Logic for handling announce branching and related operations use anyhow::Result; use ethexe_common::{ diff --git a/ethexe/ethereum/src/router/mod.rs b/ethexe/ethereum/src/router/mod.rs index 7cb31fd108c..5bd09a2b2a0 100644 --- a/ethexe/ethereum/src/router/mod.rs +++ b/ethexe/ethereum/src/router/mod.rs @@ -369,12 +369,9 @@ impl Router { } else { format!("{err}") }; - log::error!( - "Failed to estimate gas for batch commitment: (error: {error}, block info: {latest_block}, calldata: 0x{}, batch commitment: {commitment:?})", - hex::encode(calldata) - ); return Err(anyhow!( - "Failed to estimate gas for batch commitment: {error}" + "Failed to estimate gas for batch commitment: (error: {error}, block info: {latest_block}, calldata: 0x{}, batch commitment: {commitment:?})", + hex::encode(calldata), )); } }; diff --git a/ethexe/processor/src/handling/mod.rs b/ethexe/processor/src/handling/mod.rs index 1c97c40cd91..bea415794c7 100644 --- a/ethexe/processor/src/handling/mod.rs +++ b/ethexe/processor/src/handling/mod.rs @@ -23,6 +23,7 @@ use gprimitives::ActorId; pub(crate) mod events; pub(crate) mod overlaid; pub(crate) mod run; +mod thread_pool; /// A high-level interface for executing ops, /// which mutate states based on the current block request events. diff --git a/ethexe/processor/src/handling/run/chunk_execution_spawn.rs b/ethexe/processor/src/handling/run/chunk_execution_spawn.rs index eaa3d671141..72b674bc023 100644 --- a/ethexe/processor/src/handling/run/chunk_execution_spawn.rs +++ b/ethexe/processor/src/handling/run/chunk_execution_spawn.rs @@ -21,9 +21,9 @@ //! This module handles spawning program execution tasks in a thread pool. use super::*; -use crate::thread_pool; +use crate::{handling::thread_pool::ThreadPool, host::InstanceWrapper}; use ethexe_runtime_common::ProcessQueueContext; -use futures::stream::FuturesOrdered; +use std::sync::LazyLock; /// An alias introduced for better readability of the chunks execution steps. pub type ChunkItemOutput = (ActorId, H256, ProgramJournals, u64); @@ -39,6 +39,55 @@ pub async fn spawn_chunk_execution( chunk: Vec<(ActorId, H256)>, queue_type: MessageType, ) -> Result> { + struct Executable { + queue_type: MessageType, + block_info: BlockInfo, + promise_policy: PromisePolicy, + program_id: ActorId, + state_hash: H256, + instrumented_code: InstrumentedCode, + code_metadata: CodeMetadata, + executor: InstanceWrapper, + db: Box, + gas_allowance_for_chunk: u64, + promise_out_tx: Option>, + } + + fn execute_chunk_item(executable: Executable) -> Result { + let Executable { + queue_type, + block_info, + promise_policy, + program_id, + state_hash, + instrumented_code, + code_metadata, + mut executor, + db, + gas_allowance_for_chunk, + promise_out_tx, + } = executable; + + let (jn, new_state_hash, gas_spent) = executor.run( + db, + ProcessQueueContext { + program_id, + state_root: state_hash, + queue_type, + instrumented_code, + code_metadata, + gas_allowance: GasAllowanceCounter::new(gas_allowance_for_chunk), + block_info, + promise_policy, + }, + promise_out_tx, + )?; + Ok((program_id, new_state_hash, jn, gas_spent)) + } + + static THREAD_POOL: LazyLock>> = + LazyLock::new(|| ThreadPool::new(execute_chunk_item)); + let gas_allowance_for_chunk = ctx .inner() .gas_allowance_counter @@ -53,32 +102,28 @@ pub async fn spawn_chunk_execution( timestamp: block_header.timestamp, }; - chunk + let executables = chunk .into_iter() .map(|(program_id, state_hash)| { let (instrumented_code, code_metadata) = ctx.program_code(program_id)?; - let mut executor = ctx.inner().instance_creator.instantiate()?; - let db = ctx.inner().db.cas().clone_boxed(); - let promise_out_tx = ctx.inner().promise_out_tx.clone(); - Ok(thread_pool::spawn(move || { - let (jn, new_state_hash, gas_spent) = executor.run( - db, - ProcessQueueContext { - program_id, - state_root: state_hash, - queue_type, - instrumented_code, - code_metadata, - gas_allowance: GasAllowanceCounter::new(gas_allowance_for_chunk), - block_info, - promise_policy, - }, - promise_out_tx, - )?; - Ok((program_id, new_state_hash, jn, gas_spent)) - })) + + let executor = ctx.inner().instance_creator.instantiate()?; + + Ok(Executable { + queue_type, + block_info, + promise_policy, + program_id, + state_hash, + instrumented_code, + code_metadata, + executor, + db: ctx.inner().db.cas().clone_boxed(), + gas_allowance_for_chunk, + promise_out_tx: ctx.inner().promise_out_tx.clone(), + }) }) - .collect::>>()? - .try_collect() - .await + .collect::>>()?; + + THREAD_POOL.spawn_many(executables).try_collect().await } diff --git a/ethexe/processor/src/thread_pool.rs b/ethexe/processor/src/handling/thread_pool.rs similarity index 50% rename from ethexe/processor/src/thread_pool.rs rename to ethexe/processor/src/handling/thread_pool.rs index d18c947d8b9..a8b9fe2043e 100644 --- a/ethexe/processor/src/thread_pool.rs +++ b/ethexe/processor/src/handling/thread_pool.rs @@ -18,63 +18,50 @@ //! Small custom thread pool interface, because `rayon` is too smart //! and `threadpool` is not smart enough. -//! -//! The global pool in this module is shared across the processor crate for all -//! CPU-bound work, including code instrumentation and chunk execution. -//! `ETHEXE_PROCESSOR_NUM_THREADS` can be used to override the worker count, and -//! because the pool is initialized lazily, the value is captured on first use. - -use std::{env, num::NonZero, panic::AssertUnwindSafe, sync::LazyLock, thread}; - -// Shared across the processor crate for all CPU-bound work. -static DEFAULT_THREAD_POOL: LazyLock = LazyLock::new(ThreadPool::new); - -/// Spawns a given task. -/// -/// Returns the task result once a worker finishes executing it. -/// -/// # Panics -/// -/// Propagates panics from the worker thread to the main thread. -/// -/// Panics if worker thread dies despite using -/// `std::panic::catch_unwind` around the handler. -pub async fn spawn(f: F) -> R -where - F: FnOnce() -> R + Send + 'static, - R: Send + 'static, -{ - DEFAULT_THREAD_POOL.spawn(f).await -} -type Task = Box; +use futures::prelude::*; +use std::{num::NonZero, panic::AssertUnwindSafe, thread}; +type Task = (I, tokio::sync::oneshot::Sender>); + +/// Thread pool that handler tasks of type `I` +/// and produces outputs of type `O`. #[derive(Debug, Clone)] -struct ThreadPool { - task_tx: crossbeam::channel::Sender, +pub struct ThreadPool { + task_tx: crossbeam::channel::Sender>, } -impl ThreadPool { - fn new() -> Self { - let n_cpus = env::var("ETHEXE_PROCESSOR_NUM_THREADS") - .ok() - .and_then(|num| num.parse().ok()) - .or_else(|| thread::available_parallelism().ok()) - .map_or(1, NonZero::get); +impl ThreadPool +where + I: Send + 'static, + O: Send + 'static, +{ + /// Creates a new thread pool. + pub fn new(handler: F) -> Self + where + F: FnMut(I) -> O + Send + Clone + 'static, + { + let n_cpus = thread::available_parallelism().map_or(1, NonZero::get); - let (task_tx, task_rx) = crossbeam::channel::unbounded::(); + let (task_tx, task_rx) = crossbeam::channel::unbounded::>(); for _ in 0..n_cpus { let task_rx = task_rx.clone(); + let handler = handler.clone(); thread::spawn(move || { loop { - let Ok(task) = task_rx.recv() else { + let Ok((task, sender)) = task_rx.recv() else { // All connected `ThreadPool` instances were dropped break; }; - task(); + let mut handler = handler.clone(); + + // Output receiver could be cancelled + let _ = sender.send(std::panic::catch_unwind(AssertUnwindSafe(move || { + handler(task) + }))); } }); } @@ -82,46 +69,53 @@ impl ThreadPool { Self { task_tx } } - async fn spawn(&self, f: F) -> R - where - F: FnOnce() -> R + Send + 'static, - R: Send + 'static, - { + /// Spawns a given task. + /// + /// Returns `Ok(result)` if a worker successfully + /// processed the task and `Err(panic_info)` if the worker panicked. + /// + /// # Panics + /// + /// Propagates panics from the worker thread to the main thread. + /// + /// Panics if worker thread dies despite using + /// `std::panic::catch_unwind` around the handler. + pub async fn spawn(&self, input: I) -> O { let (tx, rx) = tokio::sync::oneshot::channel(); - let f = Box::new(move || { - let res = std::panic::catch_unwind(AssertUnwindSafe(f)); - - // Output receiver could be cancelled - let _ = tx.send(res); - }); - - self.task_tx.try_send(f).expect("The channel is unbounded"); + self.task_tx + .try_send((input, tx)) + .expect("The channel is unbounded"); rx.await .expect("Worker thread has died") .unwrap_or_else(|err| std::panic::resume_unwind(err)) } + + /// Spawns tasks from an iterator of inputs, + /// producing a stream of outputs. + /// + /// The outputs are ordered the same as inputs. + pub fn spawn_many>(&self, input: II) -> impl Stream { + input + .into_iter() + .map(|input| self.spawn(input)) + .collect::>() + } } #[cfg(test)] mod tests { use super::*; - use futures::{FutureExt, StreamExt, stream::FuturesOrdered}; - - fn task(n: usize) -> String { - "amogus".repeat(n) - } #[tokio::test] async fn test_thread_pool() { - assert_eq!(spawn(|| task(2)).await, "amogusamogus"); + let thread_pool = ThreadPool::new(|n| "amogus".repeat(n)); + assert_eq!(thread_pool.spawn(2).await, "amogusamogus"); assert_eq!( - [0, 1, 2, 3] - .into_iter() - .map(|n| spawn(move || task(n))) - .collect::>() + thread_pool + .spawn_many([0, 1, 2, 3]) .collect::>() .await, vec![ @@ -137,7 +131,7 @@ mod tests { // Ensure that panics don't break things for _ in 0..n_cpus * 2 { assert!( - AssertUnwindSafe(spawn(|| task(usize::MAX))) + AssertUnwindSafe(thread_pool.spawn(usize::MAX)) .catch_unwind() .await .is_err() diff --git a/ethexe/processor/src/lib.rs b/ethexe/processor/src/lib.rs index b0d5b2a2206..073552e3f46 100644 --- a/ethexe/processor/src/lib.rs +++ b/ethexe/processor/src/lib.rs @@ -16,143 +16,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -//! # Ethexe Processor -//! -//! Low-level execution engine that runs Gear programs inside the ethexe -//! node. The crate embeds a pre-compiled [`ethexe_runtime`] WASM artifact -//! and runs it in [`wasmtime`] with host functions that give the runtime -//! access to the database, lazy pages, sandboxed nested WASM, promise -//! publishing, allocation and logging. On top of that it exposes a small -//! API to: -//! -//! - validate and instrument Gear WASM code blobs, -//! - execute an ethexe block (announce) — routing [`BlockRequestEvent`]s -//! into program state mutations, appending [`InjectedTransaction`]s to -//! program queues, running scheduled tasks, and draining program -//! message queues until gas or other limits are exhausted, -//! - simulate a single message against a copy-on-write view of the -//! database without committing anything, for RPC reply queries. -//! -//! ## Role in the stack and relation to other crates -//! -//! `ethexe-processor` is the bottom of the execution stack. It is -//! consumed by: -//! -//! - `ethexe-compute` — calls [`Processor::process_programs`] and -//! [`Processor::process_code`] through its `ProcessorExt` trait (the -//! trait is defined in `ethexe-compute`, together with a direct impl -//! for [`Processor`]). Compute is what the service layer talks to — -//! the processor itself is never polled as a stream and emits no -//! events. -//! - `ethexe-rpc` — uses [`OverlaidProcessor`] (obtained via -//! [`Processor::overlaid`]) to simulate message execution against an -//! overlaid database for read-only reply queries. -//! - `ethexe-service` — constructs the `Processor` instance at startup -//! and hands it to `ComputeService`. -//! -//! ## Entry points -//! -//! | Method | Purpose | -//! |-------------------------------------------|-------------------------------------------------------------------------| -//! | [`Processor::process_code`] | Validate + instrument a WASM blob. Synchronous, does not touch the DB. | -//! | [`Processor::process_programs`] | Execute an ethexe block: events → tasks → queues. Main async workflow. | -//! | [`Processor::overlaid`] | Wrap `self` into an [`OverlaidProcessor`] backed by an overlaid DB. | -//! | [`OverlaidProcessor::execute_for_reply`] | Simulate a single incoming message and return the reply. | -//! -//! ## `process_programs` contract -//! -//! Given an [`ExecutableData`] (block header, program states, schedule, -//! injected transactions, block request events, and optional gas -//! allowance), [`Processor::process_programs`] runs three sequential -//! stages and returns a [`FinalizedBlockTransitions`]: -//! -//! 1. Handle injected transactions and block events: injected transactions -//! are appended to program injected queues; router and mirror events -//! drive the corresponding state mutations (program creation, balance -//! top-up, message queueing, value claims, etc.). -//! 2. Run scheduled tasks that are due at the current block height -//! (mailbox expiry cleanup, reservation removal, etc.). -//! 3. Drain program message queues: the injected queue first, then the -//! canonical queue — unless a soft limit kicks in before that. -//! This stage is skipped entirely when `gas_allowance` is `None`. -//! Promises are collected only during the injected pass; the -//! canonical pass runs with the promise sender dropped, so any code -//! that introduces new promise emission points must make sure they -//! are reached from the injected queue. -//! -//! The third stage uses a chunked parallel executor: non-empty program -//! queues are partitioned by queue size into chunks of up to -//! `ProcessorConfig::chunk_size` programs, and the programs inside a -//! chunk run in parallel, each with its own wasmtime `Store`. -//! Determinism-relevant property: because programs in a chunk run -//! simultaneously, the block gas allowance counter is charged by the -//! **maximum** gas spent in the chunk, not the sum. Execution stops when -//! all queues are empty, the gas allowance is exhausted, or one of the -//! configured soft limits (outgoing messages, payload bytes, call -//! replies, program modifications) kicks in. -//! -//! ## Overlay execution -//! -//! [`OverlaidProcessor`] wraps a [`Processor`] whose database is swapped -//! for an overlaid, copy-on-write view. Mutations are kept in memory and -//! discarded when the overlay is dropped, so the underlying state is -//! never touched. [`OverlaidProcessor::execute_for_reply`] synthesizes a -//! single [`MessageQueueingRequestedEvent`] into the target program's -//! canonical queue and runs against this overlay with the following -//! simulation semantics: -//! -//! - the target program's canonical queue is trimmed to only the -//! synthetic dispatch, so the simulation starts from a clean slate -//! for the target; -//! - every other program whose queue is about to be executed has that -//! queue cleared and its scheduled run skipped — non-target programs -//! only ever execute messages produced during the simulation; -//! - when a journal emits a message to another program, the receiver's -//! queue is cleared first so only the cascading message is processed -//! there; -//! - as soon as a reply to the synthetic message is seen, the -//! simulation short-circuits without performing further -//! queue-clearing work. -//! -//! ## Lazy pages -//! -//! Program memory is not materialized up front. Pages are protected -//! after instance setup and loaded from the database on the first -//! access fault, through the [`gear_lazy_pages`] integration. -//! -//! ## Determinism and error handling -//! -//! - The chunk partitioning is a deterministic function of the program -//! → queue-size map and `chunk_size`, so every node executing the -//! same block arrives at the same partitioning. -//! - The host-side gas counter increments by the maximum gas spent in -//! the chunk; WASM-side state hashing runs inside the WASM runtime -//! and does not depend on chunk layout. -//! - WASM traps (out-of-bounds memory, `unreachable`, wasmtime errors) -//! and host-function panics routed through the `sp_wasm_interface` -//! panic hook are surfaced as [`InstanceError::Wasmtime`] and -//! propagated out of [`Processor::process_programs`]. Raw Rust panics -//! inside a chunk worker are caught and re-raised on the caller via -//! `std::panic::resume_unwind` — they unwind the async task, they do -//! not become an `Err` variant. -//! -//! ## Configuration -//! -//! [`ProcessorConfig`] currently exposes a single knob, `chunk_size`, -//! which controls the number of programs executed in parallel per pass. -//! The default is [`DEFAULT_CHUNK_SIZE`] (16). -//! -//! ## When modifying this crate -//! -//! - Processor must be deterministic. -//! - Changing Processor logic may cause consensus mismatches in already -//! deployed ethexe networks, so be careful when modifying the -//! processing pipeline, and always check backwards compatibility with -//! deployed networks. -//! - Processor is designed to write only in CAS, it must NEVER modify -//! key-value storage from Database. - -pub use host::InstanceError; +//! Program's execution service for eGPU. use core::num::NonZero; use ethexe_common::{ @@ -167,7 +31,7 @@ use ethexe_runtime_common::{ state::Storage, }; use gear_core::{ - code::{CodeMetadata, InstrumentedCode}, + code::{CodeMetadata, InstrumentationStatus, InstrumentedCode}, ids::prelude::CodeIdExt, rpc::ReplyInfo, }; @@ -176,11 +40,13 @@ use handling::{ProcessingHandler, overlaid::OverlaidRunContext, run::CommonRunCo use host::InstanceCreator; use tokio::sync::mpsc; +pub use host::InstanceError; + mod handling; mod host; + #[cfg(test)] mod tests; -mod thread_pool; // Default amount of programs in one chunk to be processed in parallel. pub const DEFAULT_CHUNK_SIZE: NonZero = NonZero::new(16).unwrap(); @@ -271,10 +137,7 @@ impl Processor { OverlaidProcessor(self) } - pub async fn process_code( - &mut self, - code_and_id: CodeAndIdUnchecked, - ) -> Result { + pub fn process_code(&mut self, code_and_id: CodeAndIdUnchecked) -> Result { log::debug!("Processing upload code {code_and_id:?}"); let CodeAndIdUnchecked { code, code_id } = code_and_id; @@ -286,27 +149,28 @@ impl Processor { }); } - let mut instance = self.creator.instantiate()?; - let valid = thread_pool::spawn(move || -> Result<_> { - let instrumented_code = instance.instrument(&code)?; - let info = instrumented_code.map(|(instrumented_code, code_metadata)| ValidCodeInfo { + let Some((instrumented_code, code_metadata)) = + self.creator.instantiate()?.instrument(&code)? + else { + return Ok(ProcessedCodeInfo { + code_id, + valid: None, + }); + }; + + let InstrumentationStatus::Instrumented { .. } = code_metadata.instrumentation_status() + else { + panic!("Instrumented code returned, but instrumentation status is not Instrumented"); + }; + + Ok(ProcessedCodeInfo { + code_id, + valid: Some(ValidCodeInfo { code, instrumented_code, code_metadata, - }); - Ok(info) + }), }) - .await?; - - if let Some(valid) = &valid { - let status = valid.code_metadata.instrumentation_status(); - assert!( - status.is_instrumented(), - "Instrumented code returned, but instrumentation status is not Instrumented: {status:?}" - ); - } - - Ok(ProcessedCodeInfo { code_id, valid }) } pub async fn process_programs( @@ -414,13 +278,13 @@ impl Processor { } } -#[derive(Debug, Clone, Default)] +#[derive(Clone, Default)] pub struct ProcessedCodeInfo { pub code_id: CodeId, pub valid: Option, } -#[derive(Debug, Clone)] +#[derive(Clone)] pub struct ValidCodeInfo { pub code: Vec, pub instrumented_code: InstrumentedCode, diff --git a/ethexe/processor/src/tests.rs b/ethexe/processor/src/tests.rs index 8f664618ab5..100d19ae1e9 100644 --- a/ethexe/processor/src/tests.rs +++ b/ethexe/processor/src/tests.rs @@ -73,7 +73,7 @@ mod utils { (code_id, code) } - pub async fn upload_code(processor: &mut Processor, code: &[u8]) -> CodeId { + pub fn upload_code(processor: &mut Processor, code: &[u8]) -> CodeId { let code_id = CodeId::generate(code); let ValidCodeInfo { @@ -85,7 +85,6 @@ mod utils { code: code.to_vec(), code_id, }) - .await .expect("failed to process code") .valid .expect("code is invalid"); @@ -99,7 +98,7 @@ mod utils { code_id } - pub async fn setup_test_env_and_load_codes( + pub fn setup_test_env_and_load_codes( codes: [&[u8]; N], ) -> (Processor, BlockChain, [CodeId; N]) { let db = Database::memory(); @@ -108,7 +107,7 @@ mod utils { let mut code_ids = Vec::new(); for code in codes { - code_ids.push(upload_code(&mut processor, code).await); + code_ids.push(upload_code(&mut processor, code)); } (processor, chain, code_ids.try_into().unwrap()) @@ -136,8 +135,7 @@ mod utils { } pub async fn simple_init_test(code: impl AsRef<[u8]>) -> InBlockTransitions { - let (mut processor, chain, [code_id]) = - setup_test_env_and_load_codes([code.as_ref()]).await; + let (mut processor, chain, [code_id]) = setup_test_env_and_load_codes([code.as_ref()]); let block1 = chain.blocks[1].to_simple(); let mut handler = setup_handler(processor.db.clone(), block1); @@ -182,8 +180,7 @@ mod utils { async fn ping_init() { init_logger(); - let (mut processor, chain, [code_id]) = - setup_test_env_and_load_codes([demo_ping::WASM_BINARY]).await; + let (mut processor, chain, [code_id]) = setup_test_env_and_load_codes([demo_ping::WASM_BINARY]); // Empty processing for block1 let executable = ExecutableData { @@ -273,8 +270,8 @@ async fn ping_init() { .expect("failed to process send message"); } -#[tokio::test] -async fn handle_new_code_valid() { +#[test] +fn handle_new_code_valid() { init_logger(); let mut processor = Processor::new(Database::memory()).expect("failed to create processor"); @@ -286,7 +283,6 @@ async fn handle_new_code_valid() { code: code.clone(), code_id, }) - .await .map(|res| (res.code_id, res.valid.expect("code must be valid"))) .unwrap(); @@ -300,8 +296,8 @@ async fn handle_new_code_valid() { ); } -#[tokio::test] -async fn handle_new_code_invalid() { +#[test] +fn handle_new_code_invalid() { init_logger(); let mut processor = Processor::new(Database::memory()).expect("failed to create processor"); @@ -311,7 +307,6 @@ async fn handle_new_code_invalid() { assert!( processor .process_code(CodeAndIdUnchecked { code, code_id }) - .await .expect("failed to call runtime api") .valid .is_none() @@ -323,7 +318,7 @@ async fn ping_pong() { init_logger(); let (mut processor, chain, [code_id, ..]) = - setup_test_env_and_load_codes([demo_ping::WASM_BINARY, demo_async::WASM_BINARY]).await; + setup_test_env_and_load_codes([demo_ping::WASM_BINARY, demo_async::WASM_BINARY]); let block1 = chain.blocks[1].to_simple(); let user_id = ActorId::from(10); @@ -403,7 +398,7 @@ async fn async_and_ping() { }; let (mut processor, chain, [ping_code_id, upload_code_id, ..]) = - setup_test_env_and_load_codes([demo_ping::WASM_BINARY, demo_async::WASM_BINARY]).await; + setup_test_env_and_load_codes([demo_ping::WASM_BINARY, demo_async::WASM_BINARY]); let block1 = chain.blocks[1].to_simple(); let mut handler = setup_handler(processor.db.clone(), block1); @@ -542,7 +537,7 @@ async fn many_waits() { let (_, code) = wat_to_wasm(wat.as_str()); - let (mut processor, chain, [code_id]) = setup_test_env_and_load_codes([code.as_slice()]).await; + let (mut processor, chain, [code_id]) = setup_test_env_and_load_codes([code.as_slice()]); let block1 = chain.blocks[1].to_simple(); let wake_block = chain.blocks[1 + blocks_to_wait].to_simple(); @@ -694,7 +689,7 @@ async fn overlay_execution() { }; let (mut processor, chain, [ping_code_id, async_code_id]) = - setup_test_env_and_load_codes([demo_ping::WASM_BINARY, demo_async::WASM_BINARY]).await; + setup_test_env_and_load_codes([demo_ping::WASM_BINARY, demo_async::WASM_BINARY]); let block1 = chain.blocks[1].to_simple(); // ----------------------------------------------------------------------------- @@ -924,8 +919,7 @@ async fn injected_ping_pong() { init_logger(); let (promise_out_tx, mut promise_receiver) = mpsc::unbounded_channel(); - let (mut processor, chain, [code_id]) = - setup_test_env_and_load_codes([demo_ping::WASM_BINARY]).await; + let (mut processor, chain, [code_id]) = setup_test_env_and_load_codes([demo_ping::WASM_BINARY]); let block1 = chain.blocks[1].to_simple(); let user_1 = ActorId::from(10); @@ -1035,8 +1029,7 @@ async fn injected_prioritized_over_canonical() { init_logger(); let (promise_out_tx, mut promise_receiver) = mpsc::unbounded_channel(); - let (mut processor, chain, [code_id]) = - setup_test_env_and_load_codes([demo_ping::WASM_BINARY]).await; + let (mut processor, chain, [code_id]) = setup_test_env_and_load_codes([demo_ping::WASM_BINARY]); let block1 = chain.blocks[1].to_simple(); let canonical_user = ActorId::from(10); @@ -1148,8 +1141,7 @@ async fn injected_prioritized_over_canonical() { async fn executable_balance_charged() { init_logger(); - let (mut processor, chain, [code_id]) = - setup_test_env_and_load_codes([demo_ping::WASM_BINARY]).await; + let (mut processor, chain, [code_id]) = setup_test_env_and_load_codes([demo_ping::WASM_BINARY]); let block1 = chain.blocks[1].to_simple(); let mut handler = setup_handler(processor.db.clone(), block1); @@ -1236,7 +1228,7 @@ async fn executable_balance_injected_panic_not_charged() { let (promise_out_tx, mut promise_receiver) = mpsc::unbounded_channel(); let (mut processor, chain, [code_id]) = - setup_test_env_and_load_codes([demo_panic_payload::WASM_BINARY]).await; + setup_test_env_and_load_codes([demo_panic_payload::WASM_BINARY]); let block1 = chain.blocks[1].to_simple(); let user_id = ActorId::from(10); @@ -1369,8 +1361,7 @@ async fn insufficient_executable_balance_still_charged() { init_logger(); - let (mut processor, chain, [code_id]) = - setup_test_env_and_load_codes([demo_ping::WASM_BINARY]).await; + let (mut processor, chain, [code_id]) = setup_test_env_and_load_codes([demo_ping::WASM_BINARY]); let block1 = chain.blocks[1].to_simple(); let mut handler = setup_handler(processor.db.clone(), block1); @@ -1541,7 +1532,7 @@ async fn injected_and_events_then_tasks_then_queues() { "#; let (_, code) = wat_to_wasm(wat); - let (mut processor, chain, [code_id]) = setup_test_env_and_load_codes([code.as_slice()]).await; + let (mut processor, chain, [code_id]) = setup_test_env_and_load_codes([code.as_slice()]); let task_user = ActorId::from(10); let injected_user_pk = PrivateKey::random(); diff --git a/ethexe/service/src/lib.rs b/ethexe/service/src/lib.rs index 5949f6757ee..64f8330a8ef 100644 --- a/ethexe/service/src/lib.rs +++ b/ethexe/service/src/lib.rs @@ -50,9 +50,8 @@ use crate::config::{Config, ConfigPublicKey}; use alloy::{ node_bindings::{Anvil, AnvilInstance}, providers::{ProviderBuilder, RootProvider, ext::AnvilApi}, - rpc::types::anvil::Metadata, }; -use anyhow::{Context, Result, bail}; +use anyhow::{Context, Result}; use async_trait::async_trait; use ethexe_blob_loader::{BlobLoader, BlobLoaderEvent, BlobLoaderService, ConsensusLayerConfig}; use ethexe_common::{ @@ -78,7 +77,7 @@ use ethexe_processor::{ProcessedCodeInfo, Processor, ProcessorConfig, ValidCodeI use ethexe_prometheus::{PrometheusEvent, PrometheusService}; use ethexe_rpc::{RpcEvent, RpcServer}; use ethexe_service_utils::{OptionFuture as _, OptionStreamNext as _}; -use futures::{FutureExt, StreamExt, stream::FuturesUnordered}; +use futures::{StreamExt, stream::FuturesUnordered}; use gprimitives::{ActorId, CodeId, H256}; use gsigner::secp256k1::{Address, PrivateKey, PublicKey, Signer}; use std::{ @@ -158,21 +157,6 @@ pub struct Service { impl Service { /// Number of reserved dev accounts (deployer, validator). const RESERVED_DEV_ACCOUNTS: u32 = 2; - /// Expected Foundry toolchain commit sha. - const FOUNDRY_TOOLCHAIN_COMMIT_SHA: &str = "f1abb2ca347187bb6dea8c3881ca44ce50aab1e7"; - - fn check_foundry_toolchain_version(client_commit_sha: Option) -> Result<()> { - if let Some(client_commit_sha) = client_commit_sha - && client_commit_sha != Self::FOUNDRY_TOOLCHAIN_COMMIT_SHA - { - bail!( - "Commit hash mismatch in Foundry toolchain! Please use: `foundryup --install nightly-{commit_sha} --force`.", - commit_sha = Self::FOUNDRY_TOOLCHAIN_COMMIT_SHA, - ); - } - - Ok(()) - } pub async fn configure_dev_environment( key_path: PathBuf, @@ -229,12 +213,6 @@ impl Service { .connect(anvil.ws_endpoint().as_str()) .await?; - let Metadata { - client_commit_sha, .. - } = provider.anvil_metadata().await?; - - Self::check_foundry_toolchain_version(client_commit_sha)?; - const ETHER: u128 = 1_000_000_000_000_000_000; let balance = 10_000 * ETHER; let balance = balance.try_into().expect("infallible"); @@ -343,7 +321,7 @@ impl Service { "👶 Genesis block hash wasn't found. Call router.lookupGenesisHash() first" ); - bail!("Failed to query valid genesis hash"); + anyhow::bail!("Failed to query valid genesis hash"); } else { log::info!("👶 Genesis block hash: {genesis_block_hash:?}"); } @@ -577,7 +555,7 @@ impl Service { fetching_result = network_fetcher.maybe_next_some() => Event::Fetching(fetching_result), event = prometheus.maybe_next_some() => event.into(), _ = rpc_handle.as_mut().maybe() => { - bail!("`RPCWorker` has terminated, shutting down...") + anyhow::bail!("`RPCWorker` has terminated, shutting down...") } }; @@ -773,7 +751,7 @@ impl Service { } } PrometheusEvent::ServerClosed(result) => { - bail!("Prometheus server closed with result: {result:?}"); + anyhow::bail!("Prometheus server closed with result: {result:?}"); } }, Event::Fetching(result) => { @@ -828,13 +806,11 @@ impl GenesisInitializer for GenesisInitializerFromFile { fn process_code(&mut self, code_id: CodeId, code: Vec) -> ethexe_db::CodeProcessingFuture { let mut cloned_processor = self.processor.clone(); - async move { + let func = move || { let ProcessedCodeInfo { code_id: _, valid: info, - } = cloned_processor - .process_code(CodeAndIdUnchecked { code_id, code }) - .await?; + } = cloned_processor.process_code(CodeAndIdUnchecked { code_id, code })?; let Some(ValidCodeInfo { code: _, @@ -846,7 +822,7 @@ impl GenesisInitializer for GenesisInitializerFromFile { }; Ok(Some((instrumented_code, code_metadata))) - } - .boxed() + }; + Box::pin(async move { func() }) } } diff --git a/ethexe/service/src/tests/utils/env.rs b/ethexe/service/src/tests/utils/env.rs index 020d8a50b36..aa70a06af27 100644 --- a/ethexe/service/src/tests/utils/env.rs +++ b/ethexe/service/src/tests/utils/env.rs @@ -26,7 +26,7 @@ use crate::{ use alloy::{ node_bindings::{Anvil, AnvilInstance}, providers::{ProviderBuilder, RootProvider, ext::AnvilApi}, - rpc::types::anvil::{Metadata, MineOptions}, + rpc::types::anvil::MineOptions, }; use anyhow::Context; use ethexe_blob_loader::{BlobLoader, BlobLoaderService, ConsensusLayerConfig}; @@ -162,19 +162,13 @@ impl TestEnv { let anvil = anvil.spawn(); - let provider: RootProvider = ProviderBuilder::default() - .connect(anvil.ws_endpoint().as_str()) - .await - .expect("failed to connect to anvil"); - - let Metadata { - client_commit_sha, .. - } = provider.anvil_metadata().await?; - - Service::check_foundry_toolchain_version(client_commit_sha)?; - // By default, anvil set system time as block time. For testing purposes we need to have constant increment. if !continuous_block_generation { + let provider: RootProvider = ProviderBuilder::default() + .connect(anvil.ws_endpoint().as_str()) + .await + .expect("failed to connect to anvil"); + provider .anvil_set_block_timestamp_interval(block_time.as_secs()) .await diff --git a/ethexe/service/src/tests/utils/mod.rs b/ethexe/service/src/tests/utils/mod.rs index b5d8a0f74f6..6df0f8a0e46 100644 --- a/ethexe/service/src/tests/utils/mod.rs +++ b/ethexe/service/src/tests/utils/mod.rs @@ -20,7 +20,6 @@ pub use env::*; use ethexe_db::{GenesisInitializer, dump::StateDump}; use ethexe_processor::Processor; pub use events::*; -use futures::FutureExt; mod env; mod events; @@ -52,16 +51,15 @@ impl GenesisInitializer for GenesisInitializerFromDump { code: Vec, ) -> ethexe_db::CodeProcessingFuture { let mut cloned_processor = self.processor.clone(); - async move { + let func = move || { let info = cloned_processor - .process_code(ethexe_common::CodeAndIdUnchecked { code_id, code }) - .await?; + .process_code(ethexe_common::CodeAndIdUnchecked { code_id, code })?; let Some(valid) = info.valid else { return Ok(None); }; Ok(Some((valid.instrumented_code, valid.code_metadata))) - } - .boxed() + }; + Box::pin(async move { func() }) } } diff --git a/utils/gear-workspace-hack/Cargo.toml b/utils/gear-workspace-hack/Cargo.toml index 9b18b02b08c..46154589a6e 100644 --- a/utils/gear-workspace-hack/Cargo.toml +++ b/utils/gear-workspace-hack/Cargo.toml @@ -217,17 +217,17 @@ features = ["test-helpers"] [dependencies] aes = { version = "0.8", default-features = false, features = ["zeroize"] } ahash = { version = "0.8" } -alloy = { version = "2", features = ["kzg", "node-bindings", "provider-anvil-api", "provider-ws", "rpc-types-beacon", "rpc-types-eth", "signer-mnemonic"] } +alloy = { git = "https://github.com/gear-tech/alloy.git", branch = "better-gas-estimation", features = ["kzg", "node-bindings", "provider-anvil-api", "provider-ws", "rpc-types-beacon", "rpc-types-eth", "signer-mnemonic"] } alloy-chains = { version = "0.2" } -alloy-consensus = { version = "2", default-features = false, features = ["kzg", "serde"] } -alloy-contract = { version = "2", default-features = false, features = ["pubsub"] } -alloy-eips = { version = "2", default-features = false, features = ["kzg", "serde", "std"] } +alloy-consensus = { git = "https://github.com/gear-tech/alloy.git", branch = "better-gas-estimation", default-features = false, features = ["kzg", "serde"] } +alloy-contract = { git = "https://github.com/gear-tech/alloy.git", branch = "better-gas-estimation", default-features = false, features = ["pubsub"] } +alloy-eips = { git = "https://github.com/gear-tech/alloy.git", branch = "better-gas-estimation", default-features = false, features = ["kzg", "serde", "std"] } alloy-json-abi = { version = "1", features = ["serde_json"] } alloy-primitives = { version = "1", features = ["k256", "rlp", "serde"] } -alloy-provider = { version = "2", default-features = false, features = ["anvil-node", "ws"] } -alloy-rpc-client = { version = "2", default-features = false, features = ["reqwest", "ws"] } -alloy-rpc-types = { version = "2", default-features = false, features = ["anvil", "beacon", "eth", "kzg"] } -alloy-signer-local = { version = "2", default-features = false, features = ["mnemonic"] } +alloy-provider = { git = "https://github.com/gear-tech/alloy.git", branch = "better-gas-estimation", default-features = false, features = ["anvil-node", "ws"] } +alloy-rpc-client = { git = "https://github.com/gear-tech/alloy.git", branch = "better-gas-estimation", default-features = false, features = ["reqwest", "ws"] } +alloy-rpc-types = { git = "https://github.com/gear-tech/alloy.git", branch = "better-gas-estimation", default-features = false, features = ["anvil", "beacon", "eth", "kzg"] } +alloy-signer-local = { git = "https://github.com/gear-tech/alloy.git", branch = "better-gas-estimation", default-features = false, features = ["mnemonic"] } alloy-sol-type-parser = { version = "1", default-features = false, features = ["serde", "std"] } alloy-sol-types = { version = "1", features = ["json"] } anyhow = { version = "1" } @@ -489,17 +489,17 @@ zeroize = { version = "1", features = ["derive", "std"] } [build-dependencies] aes = { version = "0.8", default-features = false, features = ["zeroize"] } ahash = { version = "0.8" } -alloy = { version = "2", features = ["kzg", "node-bindings", "provider-anvil-api", "provider-ws", "rpc-types-beacon", "rpc-types-eth", "signer-mnemonic"] } +alloy = { git = "https://github.com/gear-tech/alloy.git", branch = "better-gas-estimation", features = ["kzg", "node-bindings", "provider-anvil-api", "provider-ws", "rpc-types-beacon", "rpc-types-eth", "signer-mnemonic"] } alloy-chains = { version = "0.2" } -alloy-consensus = { version = "2", default-features = false, features = ["kzg", "serde"] } -alloy-contract = { version = "2", default-features = false, features = ["pubsub"] } -alloy-eips = { version = "2", default-features = false, features = ["kzg", "serde", "std"] } +alloy-consensus = { git = "https://github.com/gear-tech/alloy.git", branch = "better-gas-estimation", default-features = false, features = ["kzg", "serde"] } +alloy-contract = { git = "https://github.com/gear-tech/alloy.git", branch = "better-gas-estimation", default-features = false, features = ["pubsub"] } +alloy-eips = { git = "https://github.com/gear-tech/alloy.git", branch = "better-gas-estimation", default-features = false, features = ["kzg", "serde", "std"] } alloy-json-abi = { version = "1", features = ["serde_json"] } alloy-primitives = { version = "1", features = ["k256", "rlp", "serde"] } -alloy-provider = { version = "2", default-features = false, features = ["anvil-node", "ws"] } -alloy-rpc-client = { version = "2", default-features = false, features = ["reqwest", "ws"] } -alloy-rpc-types = { version = "2", default-features = false, features = ["anvil", "beacon", "eth", "kzg"] } -alloy-signer-local = { version = "2", default-features = false, features = ["mnemonic"] } +alloy-provider = { git = "https://github.com/gear-tech/alloy.git", branch = "better-gas-estimation", default-features = false, features = ["anvil-node", "ws"] } +alloy-rpc-client = { git = "https://github.com/gear-tech/alloy.git", branch = "better-gas-estimation", default-features = false, features = ["reqwest", "ws"] } +alloy-rpc-types = { git = "https://github.com/gear-tech/alloy.git", branch = "better-gas-estimation", default-features = false, features = ["anvil", "beacon", "eth", "kzg"] } +alloy-signer-local = { git = "https://github.com/gear-tech/alloy.git", branch = "better-gas-estimation", default-features = false, features = ["mnemonic"] } alloy-sol-macro = { version = "1", default-features = false, features = ["json"] } alloy-sol-macro-expander = { version = "1", default-features = false, features = ["json"] } alloy-sol-macro-input = { version = "1", default-features = false, features = ["json"] } From 7f51525d8564cc0b8bebb2d771cd257e49c18354 Mon Sep 17 00:00:00 2001 From: playX18 Date: Mon, 20 Apr 2026 09:58:20 +0700 Subject: [PATCH 7/7] fix merge conflict --- ethexe/compute/src/compute.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ethexe/compute/src/compute.rs b/ethexe/compute/src/compute.rs index 2561c1d5113..48a2f1257a2 100644 --- a/ethexe/compute/src/compute.rs +++ b/ethexe/compute/src/compute.rs @@ -615,7 +615,7 @@ mod tests { let db = Database::memory(); let mut processor = Processor::new(db.clone()).unwrap(); let ping_code_id = - test_utils::upload_code(&mut processor, demo_ping::WASM_BINARY, &db); + test_utils::upload_code(&mut processor, demo_ping::WASM_BINARY, &db).await; let ping_id = ActorId::from(0x10000); let blockchain = chain.setup(&db); let blockchain_len = blockchain.blocks.len() - 1; @@ -714,7 +714,7 @@ mod tests { let mut processor = Processor::new(db.clone()).unwrap(); let ping_code_id = - test_utils::upload_code(&mut processor, demo_ping::WASM_BINARY, &db); + test_utils::upload_code(&mut processor, demo_ping::WASM_BINARY, &db).await; let ping_id = ActorId::from(0x10000); let blockchain = chain.setup(&db);