Skip to content
Open
Show file tree
Hide file tree
Changes from 5 commits
Commits
Show all changes
24 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 9 additions & 0 deletions cmd/ethrex/cli.rs
Original file line number Diff line number Diff line change
Expand Up @@ -280,6 +280,14 @@ pub struct Options {
help_heading = "Block building options"
)]
pub gas_limit: u64,
#[arg(
long = "generate-witness",
action = ArgAction::SetTrue,
default_value = "false",
help = "Generate execution witnesses for received blocks.",
help_heading = "Node options"
)]
pub generate_witness: bool,
}

impl Options {
Expand Down Expand Up @@ -355,6 +363,7 @@ impl Default for Options {
lookup_interval: Default::default(),
extra_data: get_minimal_client_version(),
gas_limit: DEFAULT_BUILDER_GAS_CEIL,
generate_witness: false,
}
}
}
Expand Down
1 change: 1 addition & 0 deletions cmd/ethrex/initializers.rs
Original file line number Diff line number Diff line change
Expand Up @@ -223,6 +223,7 @@ pub async fn init_rpc_api(
log_filter_handler,
opts.gas_limit,
opts.extra_data.clone(),
opts.generate_witness,
);

tracker.spawn(rpc_api);
Expand Down
1 change: 1 addition & 0 deletions cmd/ethrex/l2/initializers.rs
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,7 @@ fn init_rpc_api(
rollup_store,
log_filter_handler,
gas_ceil.unwrap_or(DEFAULT_BUILDER_GAS_CEIL),
opts.generate_witness,
);

tracker.spawn(rpc_api);
Expand Down
2 changes: 2 additions & 0 deletions crates/l2/networking/rpc/rpc.rs
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,7 @@ pub async fn start_api(
rollup_store: StoreRollup,
log_filter_handler: Option<reload::Handle<EnvFilter, Registry>>,
gas_ceil: u64,
generate_witness: bool,
) -> Result<(), RpcErr> {
// TODO: Refactor how filters are handled,
// filters are used by the filters endpoints (eth_newFilter, eth_getFilterChanges, ...etc)
Expand All @@ -110,6 +111,7 @@ pub async fn start_api(
log_filter_handler,
gas_ceil,
block_worker_channel,
generate_witness,
},
valid_delegation_addresses,
sponsor_pk,
Expand Down
16 changes: 16 additions & 0 deletions crates/networking/rpc/debug/execution_witness.rs
Original file line number Diff line number Diff line change
Expand Up @@ -234,6 +234,22 @@ impl RpcHandler for ExecutionWitnessRequest {
blocks.push(block);
}

if blocks.len() == 1 {
// Check if we have a cached witness for this block
let block = &blocks[0];
if let Some(witness) = context
.storage
.get_witness_by_number_and_hash(block.header.number, block.hash())?
{
let rpc_execution_witness =
RpcExecutionWitness::try_from(witness).map_err(|e| {
RpcErr::Internal(format!("Failed to create rpc execution witness {e}"))
})?;
return serde_json::to_value(rpc_execution_witness)
.map_err(|error| RpcErr::Internal(error.to_string()));
}
}

let execution_witness = context
.blockchain
.generate_witness_for_blocks(&blocks)
Expand Down
20 changes: 19 additions & 1 deletion crates/networking/rpc/engine/payload.rs
Original file line number Diff line number Diff line change
Expand Up @@ -617,7 +617,25 @@ async fn handle_new_payload_v1_v2(
}

// All checks passed, execute payload
let payload_status = try_execute_payload(block, &context, latest_valid_hash).await?;
let payload_status = try_execute_payload(block.clone(), &context, latest_valid_hash).await?;

let block_hash = block.hash();
let block_number = block.header.number;

// Generate and store witness if required
if context.generate_witness {
let witness = context
.blockchain
.generate_witness_for_blocks(&[block])
.await
.map_err(|e| {
RpcErr::Internal(format!("Failed to generate witness for new payload: {e}"))
})?;
context
.storage
.store_witness(block_hash, block_number, witness)
.await?;
}
Ok(payload_status)
}

Expand Down
3 changes: 3 additions & 0 deletions crates/networking/rpc/rpc.rs
Original file line number Diff line number Diff line change
Expand Up @@ -176,6 +176,7 @@ pub struct RpcApiContext {
pub log_filter_handler: Option<reload::Handle<EnvFilter, Registry>>,
pub gas_ceil: u64,
pub block_worker_channel: UnboundedSender<(oneshot::Sender<Result<(), ChainError>>, Block)>,
pub generate_witness: bool,
}

#[derive(Debug, Clone)]
Expand Down Expand Up @@ -281,6 +282,7 @@ pub async fn start_api(
log_filter_handler: Option<reload::Handle<EnvFilter, Registry>>,
gas_ceil: u64,
extra_data: String,
generate_witness: bool,
) -> Result<(), RpcErr> {
// TODO: Refactor how filters are handled,
// filters are used by the filters endpoints (eth_newFilter, eth_getFilterChanges, ...etc)
Expand All @@ -303,6 +305,7 @@ pub async fn start_api(
log_filter_handler,
gas_ceil,
block_worker_channel,
generate_witness,
};

// Periodically clean up the active filters for the filters endpoints.
Expand Down
2 changes: 2 additions & 0 deletions crates/networking/rpc/test_utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -246,6 +246,7 @@ pub async fn start_test_api() -> tokio::task::JoinHandle<()> {
None,
DEFAULT_BUILDER_GAS_CEIL,
String::new(),
false,
)
.await
.unwrap()
Expand Down Expand Up @@ -273,6 +274,7 @@ pub async fn default_context_with_storage(storage: Store) -> RpcApiContext {
log_filter_handler: None,
gas_ceil: DEFAULT_BUILDER_GAS_CEIL,
block_worker_channel,
generate_witness: false,
}
}

Expand Down
8 changes: 7 additions & 1 deletion crates/storage/api/tables.rs
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,12 @@ pub const STORAGE_FLATKEYVALUE: &str = "storage_flatkeyvalue";

pub const MISC_VALUES: &str = "misc_values";

pub const TABLES: [&str; 17] = [
/// Execution witnesses column family: [`Vec<u8>`] => [`Vec<u8>`]
/// - [`Vec<u8>`] = `block_hash.as_bytes().to_vec()`
/// - [`Vec<u8>`] = `witness.encode_to_vec()`
pub const EXECUTION_WITNESSES: &str = "execution_witnesses";

pub const TABLES: [&str; 18] = [
CHAIN_DATA,
ACCOUNT_CODES,
BODIES,
Expand All @@ -105,4 +110,5 @@ pub const TABLES: [&str; 17] = [
ACCOUNT_FLATKEYVALUE,
STORAGE_FLATKEYVALUE,
MISC_VALUES,
EXECUTION_WITNESSES,
];
91 changes: 87 additions & 4 deletions crates/storage/store.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,9 @@ use crate::{
StorageBackend,
tables::{
ACCOUNT_CODES, ACCOUNT_FLATKEYVALUE, ACCOUNT_TRIE_NODES, BLOCK_NUMBERS, BODIES,
CANONICAL_BLOCK_HASHES, CHAIN_DATA, FULLSYNC_HEADERS, HEADERS, INVALID_CHAINS,
MISC_VALUES, PENDING_BLOCKS, RECEIPTS, SNAP_STATE, STORAGE_FLATKEYVALUE,
STORAGE_TRIE_NODES, TRANSACTION_LOCATIONS,
CANONICAL_BLOCK_HASHES, CHAIN_DATA, EXECUTION_WITNESSES, FULLSYNC_HEADERS, HEADERS,
INVALID_CHAINS, MISC_VALUES, PENDING_BLOCKS, RECEIPTS, SNAP_STATE,
STORAGE_FLATKEYVALUE, STORAGE_TRIE_NODES, TRANSACTION_LOCATIONS,
},
},
apply_prefix,
Expand All @@ -26,7 +26,7 @@ use ethrex_common::{
types::{
AccountInfo, AccountState, AccountUpdate, Block, BlockBody, BlockHash, BlockHeader,
BlockNumber, ChainConfig, Code, ForkId, Genesis, GenesisAccount, Index, Receipt,
Transaction,
Transaction, block_execution_witness::ExecutionWitness,
},
utils::keccak,
};
Expand Down Expand Up @@ -58,6 +58,9 @@ pub const STATE_TRIE_SEGMENTS: usize = 2;
/// This will always be the amount yielded by snapshot reads unless there are less elements left
pub const MAX_SNAPSHOT_READS: usize = 100;

/// Maximum number of execution witnesses to keep in the database
pub const MAX_WITNESSES: u64 = 128;

// We use one constant for in-memory and another for on-disk backends.
// This is due to tests requiring state older than 128 blocks.
// TODO: unify these
Expand Down Expand Up @@ -1701,6 +1704,86 @@ impl Store {
Ok(state_root)
}

// Key format: block_number (8 bytes, big-endian) + block_hash (32 bytes)
fn make_witness_key(block_number: u64, block_hash: &BlockHash) -> Vec<u8> {
let mut key = Vec::with_capacity(8 + 32);
key.extend_from_slice(&block_number.to_be_bytes());
key.extend_from_slice(block_hash.as_bytes());
key
}

pub async fn store_witness(
&self,
block_hash: BlockHash,
block_number: u64,
witness: ExecutionWitness,
) -> Result<(), StoreError> {
let key = Self::make_witness_key(block_number, &block_hash);
let value = serde_json::to_vec(&witness)?;
self.write(EXECUTION_WITNESSES, key, value)?;
// Clean up old witnesses (keep only last 128)
self.cleanup_old_witnesses(block_number).await
}

async fn cleanup_old_witnesses(&self, latest_block_number: u64) -> Result<(), StoreError> {
// If we have less than 128 blocks, no cleanup needed
if latest_block_number <= MAX_WITNESSES {
return Ok(());
}

let threshold = latest_block_number - MAX_WITNESSES;

// Get iterator for all witness keys
let db = self.backend.clone();
let old_witnesses =
tokio::task::spawn_blocking(move || -> Result<Vec<Vec<u8>>, StoreError> {
let tx = db.begin_read()?;
let iter = tx.prefix_iterator(EXECUTION_WITNESSES, &[])?;
let mut old_witnesses = Vec::new();

for result in iter {
let (key, _) = result?;

// Parse block number from key (first 8 bytes after prefix)
let block_num_bytes: [u8; 8] = key[..8].try_into().map_err(|_| {
StoreError::Custom("Failed to parse block number".to_string())
})?;
let block_number = u64::from_be_bytes(block_num_bytes);

if block_number <= threshold {
old_witnesses.push(key.to_vec()); // Convert Box<[u8]> to Vec<u8>
}
}
Ok(old_witnesses)
})
.await
.map_err(|e| StoreError::Custom(format!("Task panicked: {}", e)))??;

// Delete old witnesses in batch
if !old_witnesses.is_empty() {
for key in old_witnesses {
self.delete(EXECUTION_WITNESSES, key)?;
}
}

Ok(())
}

pub fn get_witness_by_number_and_hash(
&self,
block_number: u64,
block_hash: BlockHash,
) -> Result<Option<ExecutionWitness>, StoreError> {
let key = Self::make_witness_key(block_number, &block_hash);
match self.read(EXECUTION_WITNESSES, key)? {
Some(value) => {
let witness: ExecutionWitness = serde_json::from_slice(&value)?;
Ok(Some(witness))
}
None => Ok(None),
}
}

pub async fn add_initial_state(&mut self, genesis: Genesis) -> Result<(), StoreError> {
debug!("Storing initial state from genesis");

Expand Down
17 changes: 10 additions & 7 deletions docs/CLI.md
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ Node options:

--log.level <LOG_LEVEL>
Possible values: info, debug, trace, warn, error

[env: ETHREX_LOG_LEVEL=]
[default: INFO]

Expand All @@ -71,6 +71,9 @@ Node options:

[default: 10000]

--generate-witness
Generate execution witnesses for received blocks.

P2P options:
--bootnodes <BOOTNODE_LIST>...
Comma separated enode URLs for P2P discovery bootstrap.
Expand Down Expand Up @@ -102,14 +105,14 @@ P2P options:
[default: 1000]

--p2p.target-peers <MAX_PEERS>
Max amount of connected peers.
[default: 100]
Max amount of connected peers.

[default: 100]

--p2p.lookup-interval <INITIAL_LOOKUP_INTERVAL>
Initial Lookup Time Interval (ms) to trigger each Discovery lookup message and RLPx connection attempt.
[default: 100]

[default: 100]

RPC options:
--http.addr <ADDRESS>
Expand Down
Loading