From e32a1d96965479b7582e61e0cb2002c4f6715b89 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Mon, 14 Jul 2025 11:15:36 +0530 Subject: [PATCH 01/15] feat: make lookup table creation async in account cloner --- .../src/remote_account_cloner_worker.rs | 49 +++++++++---------- 1 file changed, 23 insertions(+), 26 deletions(-) diff --git a/magicblock-account-cloner/src/remote_account_cloner_worker.rs b/magicblock-account-cloner/src/remote_account_cloner_worker.rs index 6a1251f9..afe98b90 100644 --- a/magicblock-account-cloner/src/remote_account_cloner_worker.rs +++ b/magicblock-account-cloner/src/remote_account_cloner_worker.rs @@ -710,23 +710,32 @@ where // Allow the committer service to reserve pubkeys in lookup tables // that could be needed when we commit this account - // NOTE: we start reserving pubkeys so the transaction can complete while we - // clone the account. - let reserve_pubkeys_handle = if let Some(committor) = - self.changeset_committor.as_ref() - { + if let Some(committor) = self.changeset_committor.as_ref() { let committor = Arc::clone(committor); let pubkey = *pubkey; let owner = delegation_record.owner; - Some(tokio::spawn(map_committor_request_result( - committor.reserve_pubkeys_for_committee(pubkey, owner), - committor, - ))) - } else { - None - }; + tokio::spawn(async move { + match map_committor_request_result( + committor + .reserve_pubkeys_for_committee(pubkey, owner), + committor, + ) + .await + { + Ok(initiated) => { + trace!( + "Reserving lookup keys for {pubkey} took {:?}", + initiated.elapsed() + ); + } + Err(err) => { + error!("Failed to reserve lookup keys for {pubkey}: {err:?}"); + } + }; + }); + } - let sig = self.do_clone_delegated_account( + self.do_clone_delegated_account( pubkey, // TODO(GabrielePicco): Avoid cloning &Account { @@ -734,19 +743,7 @@ where ..account.clone() }, delegation_record, - )?; - - if let Some(handle) = reserve_pubkeys_handle { - let initiated = handle.await.map_err(|err| { - AccountClonerError::JoinError(format!("{err} {err:?}")) - })??; - trace!( - "Reserving lookup keys for {pubkey} took {:?}", - initiated.elapsed() - ); - } - - sig + )? } }; // Return the result From 900fc2dc96b9afd3efe6e7029a83e88a983b1599 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Mon, 14 Jul 2025 12:27:31 +0530 Subject: [PATCH 02/15] feat: add ensure_pubkeys_table method and refcount queries This commit introduces the method to the manager. This method allows ensuring that a set of pubkeys are present in lookup tables without incrementing their reference count if they already exist. If a pubkey is not in any table, a new table is created for it, and the refcount is initialized to 1. To support this and provide better introspection, the following was added: - to query the reference count of a pubkey within a specific table. - to query the reference count of a pubkey across all active tables. Integration tests are included to verify the behavior of in different scenarios, such as when pubkeys already exist or when they are new. --- magicblock-table-mania/src/lookup_table_rc.rs | 15 ++ magicblock-table-mania/src/manager.rs | 48 ++++ .../tests/ix_ensure_pubkey_table.rs | 236 ++++++++++++++++++ 3 files changed, 299 insertions(+) create mode 100644 test-integration/test-table-mania/tests/ix_ensure_pubkey_table.rs diff --git a/magicblock-table-mania/src/lookup_table_rc.rs b/magicblock-table-mania/src/lookup_table_rc.rs index 5bd85622..79a9ae23 100644 --- a/magicblock-table-mania/src/lookup_table_rc.rs +++ b/magicblock-table-mania/src/lookup_table_rc.rs @@ -109,6 +109,15 @@ impl RefcountedPubkeys { .values() .any(|rc_pubkey| rc_pubkey.load(Ordering::SeqCst) > 0) } + + /// Returns the refcount of a pubkey if it exists in this table + /// - *pubkey* to query refcount for + /// - *returns* `Some(refcount)` if the pubkey exists, `None` otherwise + fn get_refcount(&self, pubkey: &Pubkey) -> Option { + self.pubkeys + .get(pubkey) + .map(|count| count.load(Ordering::SeqCst)) + } } impl Deref for RefcountedPubkeys { @@ -316,6 +325,12 @@ impl LookupTableRc { }) } + /// Returns the refcount of a pubkey if it exists in this table + /// - *pubkey* to query refcount for + /// - *returns* `Some(refcount)` if the pubkey exists, `None` otherwise + pub fn get_refcount(&self, pubkey: &Pubkey) -> Option { + self.pubkeys()?.get_refcount(pubkey) + } /// Returns `true` if the we requested to deactivate this table. /// NOTE: this doesn't mean that the deactivation period passed, thus /// the table could still be considered _deactivating_ on chain. diff --git a/magicblock-table-mania/src/manager.rs b/magicblock-table-mania/src/manager.rs index 8ad5d42b..fc2c1c3a 100644 --- a/magicblock-table-mania/src/manager.rs +++ b/magicblock-table-mania/src/manager.rs @@ -128,6 +128,17 @@ impl TableMania { pubkeys } + /// Returns the refcount of a pubkey if it exists in any active table + /// - *pubkey* to query refcount for + /// - *returns* `Some(refcount)` if the pubkey exists in any table, `None` otherwise + pub async fn get_pubkey_refcount(&self, pubkey: &Pubkey) -> Option { + for table in self.active_tables.read().await.iter() { + if let Some(refcount) = table.get_refcount(pubkey) { + return Some(refcount); + } + } + None + } // ----------------- // Reserve // ----------------- @@ -148,6 +159,43 @@ impl TableMania { self.reserve_new_pubkeys(authority, &remaining).await } + /// Ensures that pubkeys exist in any active table without increasing reference counts. + /// If tables for any pubkeys do not exist, creates them using the same transaction + /// logic as when reserving pubkeys. + /// + /// This method awaits the transaction outcome and returns once all pubkeys are part of tables. + /// + /// - *authority* - The authority keypair to use for creating tables if needed + /// - *pubkeys* - The pubkeys to ensure exist in tables + /// - *returns* `Ok(())` if all pubkeys are now part of tables + pub async fn ensure_pubkeys_table( + &self, + authority: &Keypair, + pubkeys: &HashSet, + ) -> TableManiaResult<()> { + let mut remaining = HashSet::new(); + + // 1. Check which pubkeys already exist in any table + for pubkey in pubkeys { + let mut found = false; + for table in self.active_tables.read().await.iter() { + if table.contains_key(pubkey) { + found = true; + break; + } + } + if !found { + remaining.insert(*pubkey); + } + } + + // 2. If any pubkeys dont exist, create tables for them + if !remaining.is_empty() { + self.reserve_new_pubkeys(authority, &remaining).await?; + } + + Ok(()) + } /// Tries to find a table that holds this pubkey already and reserves it. /// - *pubkey* to reserve /// - *returns* `true` if the pubkey could be reserved diff --git a/test-integration/test-table-mania/tests/ix_ensure_pubkey_table.rs b/test-integration/test-table-mania/tests/ix_ensure_pubkey_table.rs new file mode 100644 index 00000000..770bb36c --- /dev/null +++ b/test-integration/test-table-mania/tests/ix_ensure_pubkey_table.rs @@ -0,0 +1,236 @@ +use std::collections::HashSet; + +use log::*; +use solana_pubkey::Pubkey; +use solana_sdk::signature::Keypair; +use test_tools_core::init_logger; +mod utils; + +#[tokio::test] +async fn test_ensure_pubkeys_table_existing_pubkey() { + init_logger!(); + let authority = Keypair::new(); + let table_mania = utils::setup_table_mania(&authority).await; + + // Create a pubkey and reserve it first + let pubkey = Pubkey::new_unique(); + let mut pubkeys_set = HashSet::new(); + pubkeys_set.insert(pubkey); + + // Reserve the pubkey first + table_mania + .reserve_pubkeys(&authority, &pubkeys_set) + .await + .unwrap(); + + // Get refcount before ensure_pubkeys_table + let refcount_before = table_mania.get_pubkey_refcount(&pubkey).await; + debug!("Refcount before ensure: {:?}", refcount_before); + + // Now ensure the pubkey exists - should find it without creating a new table + let tables_count_before = table_mania.active_tables_count().await; + debug!("Active tables before ensure: {}", tables_count_before); + + table_mania + .ensure_pubkeys_table(&authority, &pubkeys_set) + .await + .unwrap(); + + // Get refcount after ensure_pubkeys_table + let refcount_after = table_mania.get_pubkey_refcount(&pubkey).await; + debug!("Refcount after ensure: {:?}", refcount_after); + + // Verify refcount didn't increase + assert_eq!( + refcount_before, refcount_after, + "ensure_pubkeys_table should not increase refcount" + ); + + let tables_count_after = table_mania.active_tables_count().await; + debug!("Active tables after ensure: {}", tables_count_after); + + // Should not have created a new table + assert_eq!(tables_count_before, tables_count_after); + + // Verify the pubkey is still in a table + let active_pubkeys = table_mania.active_table_pubkeys().await; + assert!(active_pubkeys.contains(&pubkey)); +} + +#[tokio::test] +async fn test_ensure_pubkeys_table_new_pubkey() { + init_logger!(); + let authority = Keypair::new(); + let table_mania = utils::setup_table_mania(&authority).await; + + let pubkey = Pubkey::new_unique(); + let mut pubkeys_set = HashSet::new(); + pubkeys_set.insert(pubkey); + + let tables_count_before = table_mania.active_tables_count().await; + debug!("Active tables before ensure: {}", tables_count_before); + + // Ensure the pubkey exists - should create a new table + table_mania + .ensure_pubkeys_table(&authority, &pubkeys_set) + .await + .unwrap(); + + // Get refcount after ensure_pubkeys_table + let refcount_after = table_mania.get_pubkey_refcount(&pubkey).await; + debug!("Refcount after ensure: {:?}", refcount_after); + + // Verify refcount is set to 1 (from the ensure_pubkeys_table call) + assert_eq!( + refcount_after, + Some(1), + "ensure_pubkeys_table should set refcount to 1 for new pubkeys" + ); + + let tables_count_after = table_mania.active_tables_count().await; + debug!("Active tables after ensure: {}", tables_count_after); + + // Should have created a new table + assert_eq!(tables_count_before + 1, tables_count_after); + + // Verify the pubkey is now in a table + let active_pubkeys = table_mania.active_table_pubkeys().await; + assert!(active_pubkeys.contains(&pubkey)); +} + +#[tokio::test] +async fn test_ensure_pubkeys_table_of_reserved_pubkey_doesnt_affect_ref_count() +{ + init_logger!(); + let authority = Keypair::new(); + let table_mania = utils::setup_table_mania(&authority).await; + + let pubkey = Pubkey::new_unique(); + let mut pubkeys_set = HashSet::new(); + pubkeys_set.insert(pubkey); + + // 1. Reserve the pubkey first + table_mania + .reserve_pubkeys(&authority, &pubkeys_set) + .await + .unwrap(); + + // assert that table refcount is 1 + let refcount_after_reserve = table_mania.get_pubkey_refcount(&pubkey).await; + debug!("Refcount after reserve: {:?}", refcount_after_reserve); + assert_eq!( + refcount_after_reserve, + Some(1), + "reserve_pubkeys should set refcount to 1" + ); + + // 2. Ensure pubkey + table_mania + .ensure_pubkeys_table(&authority, &pubkeys_set) + .await + .unwrap(); + + // assert that table refcount is still 1 + let refcount_after_ensure = table_mania.get_pubkey_refcount(&pubkey).await; + debug!("Refcount after ensure: {:?}", refcount_after_ensure); + assert_eq!( + refcount_after_ensure, + Some(1), + "ensure_pubkeys_table should not change refcount for reserved pubkey" + ); + + // Verify the pubkey is still in a table + let active_pubkeys = table_mania.active_table_pubkeys().await; + assert!(active_pubkeys.contains(&pubkey)); +} + +#[tokio::test] +async fn test_ensure_pubkeys_multiple_some_reserved_some_not_all_have_final_refcount_one( +) { + init_logger!(); + let authority = Keypair::new(); + let table_mania = utils::setup_table_mania(&authority).await; + + // Create multiple pubkeys + let reserved_pubkey1 = Pubkey::new_unique(); + let reserved_pubkey2 = Pubkey::new_unique(); + let new_pubkey1 = Pubkey::new_unique(); + let new_pubkey2 = Pubkey::new_unique(); + + // Reserve some pubkeys first + let mut reserved_pubkeys_set = HashSet::new(); + reserved_pubkeys_set.insert(reserved_pubkey1); + reserved_pubkeys_set.insert(reserved_pubkey2); + + table_mania + .reserve_pubkeys(&authority, &reserved_pubkeys_set) + .await + .unwrap(); + + // Verify reserved pubkeys have refcount 1 + let reserved_refcount1 = + table_mania.get_pubkey_refcount(&reserved_pubkey1).await; + let reserved_refcount2 = + table_mania.get_pubkey_refcount(&reserved_pubkey2).await; + assert_eq!(reserved_refcount1, Some(1)); + assert_eq!(reserved_refcount2, Some(1)); + + // Verify new pubkeys have no refcount yet + let new_refcount1 = table_mania.get_pubkey_refcount(&new_pubkey1).await; + let new_refcount2 = table_mania.get_pubkey_refcount(&new_pubkey2).await; + assert_eq!(new_refcount1, None); + assert_eq!(new_refcount2, None); + + // Now ensure all pubkeys (both reserved and new) in a single call + let mut all_pubkeys_set = HashSet::new(); + all_pubkeys_set.insert(reserved_pubkey1); + all_pubkeys_set.insert(reserved_pubkey2); + all_pubkeys_set.insert(new_pubkey1); + all_pubkeys_set.insert(new_pubkey2); + + table_mania + .ensure_pubkeys_table(&authority, &all_pubkeys_set) + .await + .unwrap(); + + // Verify all pubkeys have refcount 1 after ensure + let final_refcount1 = + table_mania.get_pubkey_refcount(&reserved_pubkey1).await; + let final_refcount2 = + table_mania.get_pubkey_refcount(&reserved_pubkey2).await; + let final_refcount3 = table_mania.get_pubkey_refcount(&new_pubkey1).await; + let final_refcount4 = table_mania.get_pubkey_refcount(&new_pubkey2).await; + + debug!("Final refcount reserved_pubkey1: {:?}", final_refcount1); + debug!("Final refcount reserved_pubkey2: {:?}", final_refcount2); + debug!("Final refcount new_pubkey1: {:?}", final_refcount3); + debug!("Final refcount new_pubkey2: {:?}", final_refcount4); + + assert_eq!( + final_refcount1, + Some(1), + "Reserved pubkey should maintain refcount 1" + ); + assert_eq!( + final_refcount2, + Some(1), + "Reserved pubkey should maintain refcount 1" + ); + assert_eq!( + final_refcount3, + Some(1), + "New pubkey should have refcount 1" + ); + assert_eq!( + final_refcount4, + Some(1), + "New pubkey should have refcount 1" + ); + + // Verify all pubkeys are in active tables + let active_pubkeys = table_mania.active_table_pubkeys().await; + assert!(active_pubkeys.contains(&reserved_pubkey1)); + assert!(active_pubkeys.contains(&reserved_pubkey2)); + assert!(active_pubkeys.contains(&new_pubkey1)); + assert!(active_pubkeys.contains(&new_pubkey2)); +} From f926854419bea8be22a8276a9549445e7a6df1aa Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Mon, 14 Jul 2025 13:57:36 +0530 Subject: [PATCH 03/15] feat: committor service handles table creation failure, converting commit stages --- .../src/commit/commit_using_args.rs | 64 +++++++++++++------ .../src/commit/common.rs | 40 ++++++++++++ .../src/commit_stage.rs | 12 ++++ magicblock-committor-service/src/error.rs | 3 + .../src/persist/error.rs | 3 + .../src/persist/types/commit_status.rs | 11 ++++ 6 files changed, 115 insertions(+), 18 deletions(-) diff --git a/magicblock-committor-service/src/commit/commit_using_args.rs b/magicblock-committor-service/src/commit/commit_using_args.rs index 6e99cf8f..7c227d2b 100644 --- a/magicblock-committor-service/src/commit/commit_using_args.rs +++ b/magicblock-committor-service/src/commit/commit_using_args.rs @@ -10,6 +10,7 @@ use super::CommittorProcessor; use crate::{ commit::common::{ get_accounts_to_undelegate, lookup_table_keys, send_and_confirm, + stages_from_lookup_table_err, }, commit_stage::{CommitSignatures, CommitStage}, persist::CommitStrategy, @@ -101,6 +102,7 @@ impl CommittorProcessor { (tm, keys) }); + let strategy = CommitStrategy::args(use_lookup); let compute_budget_ixs = me .compute_budget_config .args_process_budget() @@ -119,12 +121,21 @@ impl CommittorProcessor { Ok(sig) => sig, Err(err) => { error!("Failed to commit changeset with {} accounts using args: {:?}", committees.len(), err); - let strategy = CommitStrategy::args(use_lookup); + let sigs = err.signature().map(|sig| CommitSignatures { process_signature: sig, finalize_signature: None, undelegate_signature: None, }); + if let Some(stages) = stages_from_lookup_table_err( + &err, + &commit_infos, + strategy, + sigs, + ) { + return stages; + } + return commit_infos .into_iter() .map(|x| { @@ -165,18 +176,25 @@ impl CommittorProcessor { "Failed to finalize changeset using args: {:?}", err ); + + let sigs = CommitSignatures { + process_signature: process_sig, + finalize_signature: err.signature(), + undelegate_signature: None, + }; + if let Some(stages) = stages_from_lookup_table_err( + &err, + &commit_infos, + strategy, + Some(sigs), + ) { + return stages; + } + return commit_infos .into_iter() .map(|x| { - CommitStage::FailedFinalize(( - x, - CommitStrategy::args(use_lookup), - CommitSignatures { - process_signature: process_sig, - finalize_signature: err.signature(), - undelegate_signature: None, - }, - )) + CommitStage::FailedFinalize((x, strategy, sigs)) }) .collect(); } @@ -219,7 +237,7 @@ impl CommittorProcessor { .map(|x| { CommitStage::FailedUndelegate(( x, - CommitStrategy::args(use_lookup), + strategy, CommitSignatures { process_signature: process_sig, finalize_signature: finalize_sig, @@ -254,17 +272,27 @@ impl CommittorProcessor { "Failed to undelegate accounts via transaction '{}': {:?}", err, err ); + let sigs = CommitSignatures { + process_signature: process_sig, + finalize_signature: finalize_sig, + undelegate_signature: err.signature(), + }; + + if let Some(stages) = stages_from_lookup_table_err( + &err, + &commit_infos, + strategy, + Some(sigs.clone()), + ) { + return stages; + } return commit_infos .into_iter() .map(|x| { CommitStage::FailedUndelegate(( x, - CommitStrategy::args(use_lookup), - CommitSignatures { - process_signature: process_sig, - finalize_signature: finalize_sig, - undelegate_signature: err.signature(), - }, + strategy, + sigs.clone(), )) }) .collect(); @@ -282,7 +310,7 @@ impl CommittorProcessor { .map(|x| { CommitStage::Succeeded(( x, - CommitStrategy::args(use_lookup), + strategy, CommitSignatures { process_signature: process_sig, finalize_signature: finalize_sig, diff --git a/magicblock-committor-service/src/commit/common.rs b/magicblock-committor-service/src/commit/common.rs index f666ccba..5c8ecb2a 100644 --- a/magicblock-committor-service/src/commit/common.rs +++ b/magicblock-committor-service/src/commit/common.rs @@ -20,8 +20,11 @@ use solana_sdk::{ }; use crate::{ + commit_stage::CommitSignatures, error::{CommittorServiceError, CommittorServiceResult}, + persist::CommitStrategy, pubkeys_provider::{provide_committee_pubkeys, provide_common_pubkeys}, + CommitInfo, CommitStage, }; pub(crate) fn lookup_table_keys( @@ -102,6 +105,17 @@ pub(crate) async fn send_and_confirm( if let Some((table_mania, keys_from_tables)) = table_mania_setup { let start = Instant::now(); + // Ensure all pubkeys have tables before proceeding + if let Err(err) = table_mania + .ensure_pubkeys_table(&authority, &keys_from_tables) + .await + { + error!("Failed to ensure table exists for pubkeys: {:?}", err); + return Err(CommittorServiceError::CouldNotCreateLookupTable( + format!("{:?}", err), + )); + } + // NOTE: we assume that all needed pubkeys were reserved earlier let address_lookup_tables = table_mania .try_get_active_address_lookup_table_accounts( @@ -205,3 +219,29 @@ pub(crate) async fn send_and_confirm( Ok(res.into_signature()) } } + +/// Checks if the error is a lookup table creation error and returns the appropriate +/// commit stages if so. Returns None if the error is not a lookup table error. +pub(crate) fn stages_from_lookup_table_err( + err: &CommittorServiceError, + commit_infos: &[CommitInfo], + strategy: CommitStrategy, + signatures: Option, +) -> Option> { + if let CommittorServiceError::CouldNotCreateLookupTable(_) = err { + Some( + commit_infos + .iter() + .map(|x| { + CommitStage::CouldNotCreateLookupTable(( + x.clone(), + strategy, + signatures.clone(), + )) + }) + .collect(), + ) + } else { + None + } +} diff --git a/magicblock-committor-service/src/commit_stage.rs b/magicblock-committor-service/src/commit_stage.rs index 41d50359..0e7eeb00 100644 --- a/magicblock-committor-service/src/commit_stage.rs +++ b/magicblock-committor-service/src/commit_stage.rs @@ -88,6 +88,13 @@ pub enum CommitStage { /// initialized, but then this issue was detected. PartOfTooLargeBundleToProcess(CommitInfo), + /// Failed to create the lookup table required for this commit. + /// This happens when the table mania system cannot ensure that the lookup + /// table exists for the pubkeys needed by this commit. + CouldNotCreateLookupTable( + (CommitInfo, CommitStrategy, Option), + ), + /// The commit was properly initialized and added to a chunk of instructions to process /// commits via a transaction. For large commits the buffer and chunk accounts were properly /// prepared and haven't been closed. @@ -211,6 +218,7 @@ impl CommitStage { | BufferAndChunkPartiallyInitialized((ci, _)) | BufferAndChunkFullyInitialized((ci, _)) | PartOfTooLargeBundleToProcess(ci) + | CouldNotCreateLookupTable((ci, _)) | FailedProcess((ci, _, _)) | PartOfTooLargeBundleToFinalize(ci) | FailedFinalize((ci, _, _)) @@ -235,6 +243,7 @@ impl CommitStage { | Failed((_, strategy)) | BufferAndChunkPartiallyInitialized((_, strategy)) | BufferAndChunkFullyInitialized((_, strategy)) + | CouldNotCreateLookupTable((_, strategy)) | FailedProcess((_, strategy, _)) | FailedFinalize((_, strategy, _)) | FailedUndelegate((_, strategy, _)) @@ -262,6 +271,9 @@ impl CommitStage { | PartOfTooLargeBundleToFinalize(ci) => { CommitStatus::PartOfTooLargeBundleToProcess(ci.bundle_id()) } + CouldNotCreateLookupTable((ci, _)) => { + CommitStatus::CouldNotCreateLookupTable(ci.bundle_id()) + } FailedProcess((ci, strategy, sigs)) => CommitStatus::FailedProcess(( ci.bundle_id(), *strategy, diff --git a/magicblock-committor-service/src/error.rs b/magicblock-committor-service/src/error.rs index dc4315dd..45ff8033 100644 --- a/magicblock-committor-service/src/error.rs +++ b/magicblock-committor-service/src/error.rs @@ -75,6 +75,9 @@ pub enum CommittorServiceError { Pubkey, solana_sdk::program_error::ProgramError, ), + + #[error("Could not create lookup table for pubkey {0}")] + CouldNotCreateLookupTable(String), } impl CommittorServiceError { diff --git a/magicblock-committor-service/src/persist/error.rs b/magicblock-committor-service/src/persist/error.rs index 1d5e7590..36238bac 100644 --- a/magicblock-committor-service/src/persist/error.rs +++ b/magicblock-committor-service/src/persist/error.rs @@ -35,4 +35,7 @@ pub enum CommitPersistError { #[error("Commit Status needs commit strategy: '{0}' ({0:?})")] CommitStatusNeedsStrategy(String), + + #[error("Could not create lookup table: '{0}' ({0:?})")] + CouldNotCreateLookupTable(String), } diff --git a/magicblock-committor-service/src/persist/types/commit_status.rs b/magicblock-committor-service/src/persist/types/commit_status.rs index e6964ecc..7fdc83e6 100644 --- a/magicblock-committor-service/src/persist/types/commit_status.rs +++ b/magicblock-committor-service/src/persist/types/commit_status.rs @@ -29,6 +29,8 @@ pub enum CommitStatus { /// The commit is part of a bundle that contains too many commits to be included /// in a single transaction. Thus we cannot commit any of them. PartOfTooLargeBundleToProcess(u64), + /// Failed to create or access the lookup table required for this commit. + CouldNotCreateLookupTable(u64), /// The commit was properly initialized and added to a chunk of instructions to process /// commits via a transaction. For large commits the buffer and chunk accounts were properly /// prepared and haven't been closed. @@ -60,6 +62,9 @@ impl fmt::Display for CommitStatus { CommitStatus::PartOfTooLargeBundleToProcess(bundle_id) => { write!(f, "PartOfTooLargeBundleToProcess({})", bundle_id) } + CommitStatus::CouldNotCreateLookupTable(bundle_id) => { + write!(f, "CouldNotCreateLookupTable({})", bundle_id) + } CommitStatus::FailedProcess((bundle_id, strategy, sigs)) => { write!( f, @@ -159,6 +164,9 @@ impl "PartOfTooLargeBundleToProcess" => { Ok(PartOfTooLargeBundleToProcess(get_bundle_id!())) } + "CouldNotCreateLookupTable" => { + Ok(CouldNotCreateLookupTable(get_bundle_id!())) + } "FailedProcess" => { Ok(FailedProcess((get_bundle_id!(), strategy, sigs))) } @@ -209,6 +217,7 @@ impl CommitStatus { "BufferAndChunkFullyInitialized" } PartOfTooLargeBundleToProcess(_) => "PartOfTooLargeBundleToProcess", + CouldNotCreateLookupTable(_) => "CouldNotCreateLookupTable", FailedProcess(_) => "FailedProcess", FailedFinalize(_) => "FailedFinalize", FailedUndelegate(_) => "FailedUndelegate", @@ -224,6 +233,7 @@ impl CommitStatus { | BufferAndChunkInitialized(bundle_id) | BufferAndChunkFullyInitialized(bundle_id) | PartOfTooLargeBundleToProcess(bundle_id) + | CouldNotCreateLookupTable(bundle_id) | FailedProcess((bundle_id, _, _)) | FailedFinalize((bundle_id, _, _)) | FailedUndelegate((bundle_id, _, _)) @@ -251,6 +261,7 @@ impl CommitStatus { | BufferAndChunkInitialized(_) | BufferAndChunkFullyInitialized(_) => CommitStrategy::FromBuffer, PartOfTooLargeBundleToProcess(_) => CommitStrategy::Undetermined, + CouldNotCreateLookupTable(_) => CommitStrategy::Undetermined, FailedProcess((_, strategy, _)) => *strategy, FailedFinalize((_, strategy, _)) => *strategy, FailedUndelegate((_, strategy, _)) => *strategy, From 05861fad03128d48b2d3188a9d10798b76666edc Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Mon, 14 Jul 2025 16:20:41 +0530 Subject: [PATCH 04/15] chore: addendum to previous for commits using buffer - also named some complex types --- .../src/commit/commit_using_args.rs | 16 +- .../src/commit/commit_using_buffer.rs | 155 ++++++++++++------ .../src/commit/committor_processor.rs | 53 +++--- .../src/commit_stage.rs | 6 +- 4 files changed, 150 insertions(+), 80 deletions(-) diff --git a/magicblock-committor-service/src/commit/commit_using_args.rs b/magicblock-committor-service/src/commit/commit_using_args.rs index 7c227d2b..aead9341 100644 --- a/magicblock-committor-service/src/commit/commit_using_args.rs +++ b/magicblock-committor-service/src/commit/commit_using_args.rs @@ -131,7 +131,7 @@ impl CommittorProcessor { &err, &commit_infos, strategy, - sigs, + sigs.clone(), ) { return stages; } @@ -139,11 +139,7 @@ impl CommittorProcessor { return commit_infos .into_iter() .map(|x| { - CommitStage::FailedProcess(( - x, - strategy, - sigs.as_ref().cloned(), - )) + CommitStage::FailedProcess((x, strategy, sigs.clone())) }) .collect(); } @@ -186,7 +182,7 @@ impl CommittorProcessor { &err, &commit_infos, strategy, - Some(sigs), + Some(sigs.clone()), ) { return stages; } @@ -194,7 +190,11 @@ impl CommittorProcessor { return commit_infos .into_iter() .map(|x| { - CommitStage::FailedFinalize((x, strategy, sigs)) + CommitStage::FailedFinalize(( + x, + strategy, + sigs.clone(), + )) }) .collect(); } diff --git a/magicblock-committor-service/src/commit/commit_using_buffer.rs b/magicblock-committor-service/src/commit/commit_using_buffer.rs index 3e161fb8..41ba6b30 100644 --- a/magicblock-committor-service/src/commit/commit_using_buffer.rs +++ b/magicblock-committor-service/src/commit/commit_using_buffer.rs @@ -21,11 +21,16 @@ use magicblock_rpc_client::{ MagicBlockSendTransactionConfig, }; use solana_pubkey::Pubkey; -use solana_sdk::{hash::Hash, instruction::Instruction, signer::Signer}; +use solana_sdk::{ + hash::Hash, instruction::Instruction, signature::Signature, signer::Signer, +}; use tokio::task::JoinSet; use super::{ - common::send_and_confirm, + common::{ + get_accounts_to_undelegate, send_and_confirm, + stages_from_lookup_table_err, + }, process_buffers::{ chunked_ixs_to_process_commitables_and_close_pdas, ChunkedIxsToProcessCommitablesAndClosePdasResult, @@ -33,7 +38,6 @@ use super::{ CommittorProcessor, }; use crate::{ - commit::common::get_accounts_to_undelegate, commit_stage::CommitSignatures, error::{CommitAccountError, CommitAccountResult}, finalize::{ @@ -191,12 +195,23 @@ impl CommittorProcessor { .await; commit_stages.extend(failed_process.into_iter().flat_map( - |(sig, xs)| { + |(sig, xs, err)| { let sigs = sig.map(|x| CommitSignatures { process_signature: x, finalize_signature: None, undelegate_signature: None, }); + if let Some(err) = err.as_ref() { + if let Some(stages) = stages_from_lookup_table_err( + err, + &xs, + commit_strategy, + sigs.clone(), + ) { + return stages; + } + } + xs.into_iter() .map(|x| { CommitStage::FailedProcess(( @@ -274,7 +289,37 @@ impl CommittorProcessor { ) .await; commit_stages.extend(failed_finalize.into_iter().flat_map( - |(sig, infos)| { + |(sig, infos, err)| { + fn get_sigs_for_bundle(bundle_id: u64, processed_signatures: &HashMap, finalized_sig: Option) -> CommitSignatures { + CommitSignatures { + // SAFETY: signatures for all bundles of succeeded process transactions + // have been added above + process_signature: *processed_signatures + .get(&bundle_id) + .unwrap(), + finalize_signature: finalized_sig, + undelegate_signature: None, + } + } + + if let Some(err) = err.as_ref() { + if let Some(stages) = stages_from_lookup_table_err( + err, + &infos, + commit_strategy, + None, + ) { + return stages.into_iter().map(|x| match x { + CommitStage::CouldNotCreateLookupTable((commit_info, strategy, _)) => { + let bundle_id = commit_info.bundle_id(); + let sigs = get_sigs_for_bundle(bundle_id, &processed_signatures, sig); + CommitStage::CouldNotCreateLookupTable((commit_info, strategy, Some(sigs))) + }, + _ => unreachable!("stages_from_lookup_table_err always creates 'CommitStage::CouldNotCreateLookupTable'"), + }).collect::>() + } + } + infos .into_iter() .map(|x| { @@ -282,15 +327,7 @@ impl CommittorProcessor { CommitStage::FailedFinalize(( x, commit_strategy, - CommitSignatures { - // SAFETY: signatures for all bundles of succeeded process transactions - // have been added above - process_signature: *processed_signatures - .get(&bundle_id) - .unwrap(), - finalize_signature: sig, - undelegate_signature: None, - }, + get_sigs_for_bundle(bundle_id, &processed_signatures, sig), )) }) .collect::>() @@ -359,6 +396,25 @@ impl CommittorProcessor { finalize_and_undelegate.len(), "BUG: same amount of accounts to undelegate as to finalize and undelegate" ); + fn get_sigs_for_bundle( + bundle_id: u64, + processed_signatures: &HashMap, + finalized_signatures: &HashMap, + undelegated_sig: Option, + ) -> CommitSignatures { + CommitSignatures { + // SAFETY: signatures for all bundles of succeeded process transactions + // have been added above + process_signature: *processed_signatures + .get(&bundle_id) + .unwrap(), + finalize_signature: finalized_signatures + .get(&bundle_id) + .cloned(), + undelegate_signature: undelegated_sig, + } + } + let undelegate_ixs = match undelegate_commitables_ixs( &processor.magicblock_rpc_client, processor.authority.pubkey(), @@ -378,19 +434,12 @@ impl CommittorProcessor { CommitStage::FailedUndelegate(( x.clone(), CommitStrategy::args(use_lookup), - CommitSignatures { - // SAFETY: signatures for all bundles of succeeded process transactions - // have been added above - process_signature: - *processed_signatures - .get(&bundle_id) - .unwrap(), - finalize_signature: - finalized_signatures - .get(&bundle_id) - .cloned(), - undelegate_signature: err.signature(), - }, + get_sigs_for_bundle( + bundle_id, + &processed_signatures, + &finalized_signatures, + err.signature(), + ), )) }), ); @@ -417,7 +466,30 @@ impl CommittorProcessor { commit_stages.extend( failed_undelegate.into_iter().flat_map( - |(sig, infos)| { + |(sig, infos, err)| { + + if let Some(err) = err.as_ref() { + if let Some(stages) = stages_from_lookup_table_err( + err, + &infos, + commit_strategy, + None, + ) { + return stages.into_iter().map(|x| match x { + CommitStage::CouldNotCreateLookupTable((commit_info, strategy, _)) => { + let bundle_id = commit_info.bundle_id(); + let sigs = get_sigs_for_bundle( + bundle_id, + &processed_signatures, + &finalized_signatures, + sig); + CommitStage::CouldNotCreateLookupTable((commit_info, strategy, Some(sigs))) + }, + _ => unreachable!("stages_from_lookup_table_err always creates 'CommitStage::CouldNotCreateLookupTable'"), + }).collect::>() + } + } + infos .into_iter() .map(|x| { @@ -425,19 +497,12 @@ impl CommittorProcessor { CommitStage::FailedUndelegate(( x, commit_strategy, - CommitSignatures { - // SAFETY: signatures for all bundles of succeeded process transactions - // have been added above - process_signature: - *processed_signatures - .get(&bundle_id) - .unwrap(), - finalize_signature: - finalized_signatures - .get(&bundle_id) - .cloned(), - undelegate_signature: sig, - }, + get_sigs_for_bundle( + bundle_id, + &processed_signatures, + &finalized_signatures, + sig, + ), )) }) .collect::>() @@ -1009,18 +1074,14 @@ impl CommittorProcessor { rpc_client, authority, [write_budget_ixs, vec![ix]].concat(), - format!("write chunk for offset {}", chunk.offset), + format!("write chunk ({} bytes)", chunk_bytes), Some(latest_blockhash), - // NOTE: We could use `processed` here and wait to get the processed status at - // least which would make things a bit slower. - // However that way we would avoid sending unnecessary transactions potentially - // since we may not see some written chunks yet when we get the chunks account. MagicBlockSendTransactionConfig::ensure_processed(), None, ) .await .inspect_err(|err| { - error!("{:?}", err); + warn!("{:?}", err); }) }); } diff --git a/magicblock-committor-service/src/commit/committor_processor.rs b/magicblock-committor-service/src/commit/committor_processor.rs index e78c8c17..ca9875ea 100644 --- a/magicblock-committor-service/src/commit/committor_processor.rs +++ b/magicblock-committor-service/src/commit/committor_processor.rs @@ -26,7 +26,7 @@ use crate::{ commit_strategy::{split_changesets_by_commit_strategy, SplitChangesets}, compute_budget::{ComputeBudget, ComputeBudgetConfig}, config::ChainConfig, - error::CommittorServiceResult, + error::{CommittorServiceError, CommittorServiceResult}, persist::{ BundleSignatureRow, CommitPersister, CommitStatusRow, CommitStrategy, }, @@ -341,10 +341,7 @@ impl CommittorProcessor { chunked_close_ixs: Option>>, table_mania: Option<&TableMania>, owners: &HashMap, - ) -> ( - Vec<(Signature, Vec<(CommitInfo, InstructionsKind)>)>, - Vec<(Option, Vec)>, - ) { + ) -> (ProcessIxChunkSuccesses, ProcessIxChunkFailures) { let latest_blockhash = match self.magicblock_rpc_client.get_latest_blockhash().await { Ok(bh) => bh, @@ -364,6 +361,7 @@ impl CommittorProcessor { .into_iter() .map(|ixs| ixs.commit_info) .collect::>(), + None::, ) }) .collect::>(); @@ -372,11 +370,8 @@ impl CommittorProcessor { }; let mut join_set = JoinSet::new(); - let successes = Arc::< - Mutex)>>, - >::default(); - let failures = - Arc::, Vec)>>>::default(); + let successes = ProcessIxChunkSuccessesRc::default(); + let failures = ProcessIxChunkFailuresRc::default(); for ixs_chunk in ixs_chunks { let authority = self.authority.insecure_clone(); let rpc_client = self.magicblock_rpc_client.clone(); @@ -439,9 +434,7 @@ impl CommittorProcessor { if let Some(latest_blockhash) = latest_blockhash { let mut join_set = JoinSet::new(); - let failures = Arc::< - Mutex, Vec)>>, - >::default(); + let failures = ProcessIxChunkFailuresRc::default(); for ixs_chunk in chunked_close_ixs { let authority = self.authority.insecure_clone(); let rpc_client = self.magicblock_rpc_client.clone(); @@ -450,7 +443,7 @@ impl CommittorProcessor { let compute_budget = self.compute_budget_config.buffer_close_budget(); // We ignore close successes - let successes = Default::default(); + let successes = ProcessIxChunkSuccessesRc::default(); // We only log close failures since the commit was processed successfully let failures = failures.clone(); join_set.spawn(process_ixs_chunk( @@ -489,6 +482,16 @@ impl CommittorProcessor { } } +pub(crate) type ProcessIxChunkSuccesses = + Vec<(Signature, Vec<(CommitInfo, InstructionsKind)>)>; +pub(crate) type ProcessIxChunkSuccessesRc = Arc>; +pub(crate) type ProcessIxChunkFailures = Vec<( + Option, + Vec, + Option, +)>; +pub(crate) type ProcessIxChunkFailuresRc = Arc>; + /// Processes a single chunk of instructions, sending them as a transaction. /// Updates the shared success or failure lists based on the transaction outcome. #[allow(clippy::type_complexity, clippy::too_many_arguments)] @@ -497,10 +500,8 @@ pub(crate) async fn process_ixs_chunk( compute_budget: ComputeBudget, authority: Keypair, rpc_client: MagicblockRpcClient, - successes: Arc< - Mutex)>>, - >, - failures: Arc, Vec)>>>, + successes: ProcessIxChunkSuccessesRc, + failures: ProcessIxChunkFailuresRc, table_mania: Option, owners: HashMap, latest_blockhash: Hash, @@ -546,14 +547,22 @@ pub(crate) async fn process_ixs_chunk( commit_infos.len(), err ); + + // Check if this is a lookup table error + let lookup_table_err = matches!( + err, + CommittorServiceError::CouldNotCreateLookupTable(_) + ); + let commit_infos = commit_infos .into_iter() .map(|(commit_info, _)| commit_info) .collect(); - failures - .lock() - .expect("ix failures mutex poisoned") - .push((err.signature(), commit_infos)); + failures.lock().expect("ix failures mutex poisoned").push(( + err.signature(), + commit_infos, + lookup_table_err.then_some(err), + )); } } } diff --git a/magicblock-committor-service/src/commit_stage.rs b/magicblock-committor-service/src/commit_stage.rs index 0e7eeb00..131badd5 100644 --- a/magicblock-committor-service/src/commit_stage.rs +++ b/magicblock-committor-service/src/commit_stage.rs @@ -218,7 +218,7 @@ impl CommitStage { | BufferAndChunkPartiallyInitialized((ci, _)) | BufferAndChunkFullyInitialized((ci, _)) | PartOfTooLargeBundleToProcess(ci) - | CouldNotCreateLookupTable((ci, _)) + | CouldNotCreateLookupTable((ci, _, _)) | FailedProcess((ci, _, _)) | PartOfTooLargeBundleToFinalize(ci) | FailedFinalize((ci, _, _)) @@ -243,7 +243,7 @@ impl CommitStage { | Failed((_, strategy)) | BufferAndChunkPartiallyInitialized((_, strategy)) | BufferAndChunkFullyInitialized((_, strategy)) - | CouldNotCreateLookupTable((_, strategy)) + | CouldNotCreateLookupTable((_, strategy, _)) | FailedProcess((_, strategy, _)) | FailedFinalize((_, strategy, _)) | FailedUndelegate((_, strategy, _)) @@ -271,7 +271,7 @@ impl CommitStage { | PartOfTooLargeBundleToFinalize(ci) => { CommitStatus::PartOfTooLargeBundleToProcess(ci.bundle_id()) } - CouldNotCreateLookupTable((ci, _)) => { + CouldNotCreateLookupTable((ci, _, _)) => { CommitStatus::CouldNotCreateLookupTable(ci.bundle_id()) } FailedProcess((ci, strategy, sigs)) => CommitStatus::FailedProcess(( From 080b4472d943c96a2ad31dfc92cdd2f83dcb0bc8 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Mon, 14 Jul 2025 16:31:34 +0530 Subject: [PATCH 05/15] ci: temporarily add CI running ix tests on this branch --- .../workflows/ci-test-integration-branch.yml | 32 +++++++++++++++++++ 1 file changed, 32 insertions(+) create mode 100644 .github/workflows/ci-test-integration-branch.yml diff --git a/.github/workflows/ci-test-integration-branch.yml b/.github/workflows/ci-test-integration-branch.yml new file mode 100644 index 00000000..09d8421d --- /dev/null +++ b/.github/workflows/ci-test-integration-branch.yml @@ -0,0 +1,32 @@ +on: + push: + branches: [thlorenz/committor-improve-table-speed] + +name: Run CI - Integration Tests (Branch) + +jobs: + run_make_ci_test: + runs-on: extra-large + steps: + - name: Checkout this magicblock-validator + uses: actions/checkout@v2 + with: + path: magicblock-validator + + - uses: ./magicblock-validator/.github/actions/setup-build-env + with: + build_cache_key_name: "magicblock-validator-ci-test-integration-branch-v000" + rust_toolchain_release: "1.84.1" + github_access_token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }} + github_token: ${{ secrets.GITHUB_TOKEN }} + + - uses: ./magicblock-validator/.github/actions/setup-solana + + - name: Run integration tests + run: | + sudo prlimit --pid $$ --nofile=1048576:1048576 + sudo sysctl fs.inotify.max_user_instances=1280 + sudo sysctl fs.inotify.max_user_watches=655360 + make ci-test-integration + shell: bash + working-directory: magicblock-validator From 2d7d1402c7b04300cd74f23c65e3e92f39554e51 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Tue, 15 Jul 2025 09:33:25 +0530 Subject: [PATCH 06/15] chore: log ix test name on success and failures --- test-integration/test-runner/bin/run_tests.rs | 29 ++++++++++--------- 1 file changed, 16 insertions(+), 13 deletions(-) diff --git a/test-integration/test-runner/bin/run_tests.rs b/test-integration/test-runner/bin/run_tests.rs index b3d33881..c40e880c 100644 --- a/test-integration/test-runner/bin/run_tests.rs +++ b/test-integration/test-runner/bin/run_tests.rs @@ -57,15 +57,18 @@ pub fn main() { }; // Assert that all tests passed - assert_cargo_tests_passed(security_output); - assert_cargo_tests_passed(scenarios_output); - assert_cargo_tests_passed(cloning_output); - assert_cargo_tests_passed(issues_frequent_commits_output); - assert_cargo_tests_passed(restore_ledger_output); - assert_cargo_tests_passed(magicblock_api_output); - assert_cargo_tests_passed(table_mania_output); - assert_cargo_tests_passed(committor_output); - assert_cargo_tests_passed(magicblock_pubsub_output); + assert_cargo_tests_passed(security_output, "security"); + assert_cargo_tests_passed(scenarios_output, "scenarios"); + assert_cargo_tests_passed(cloning_output, "cloning"); + assert_cargo_tests_passed( + issues_frequent_commits_output, + "issues_frequent_commits", + ); + assert_cargo_tests_passed(restore_ledger_output, "restore_ledger"); + assert_cargo_tests_passed(magicblock_api_output, "magicblock_api"); + assert_cargo_tests_passed(table_mania_output, "table_mania"); + assert_cargo_tests_passed(committor_output, "committor"); + assert_cargo_tests_passed(magicblock_pubsub_output, "magicblock_pubsub"); } fn should_run_test(test_name: &str) -> bool { @@ -438,19 +441,19 @@ fn run_magicblock_pubsub_tests( // ----------------- // Configs/Checks // ----------------- -fn assert_cargo_tests_passed(output: process::Output) { +fn assert_cargo_tests_passed(output: process::Output, test_name: &str) { if !output.status.success() { - eprintln!("cargo test"); + eprintln!("cargo test '{}'", test_name); eprintln!("status: {}", output.status); eprintln!("stdout: {}", String::from_utf8_lossy(&output.stdout)); eprintln!("stderr: {}", String::from_utf8_lossy(&output.stderr)); } else if std::env::var("DUMP").is_ok() { - eprintln!("cargo test success"); + eprintln!("cargo test success '{}'", test_name); eprintln!("stdout: {}", String::from_utf8_lossy(&output.stdout)); eprintln!("stderr: {}", String::from_utf8_lossy(&output.stderr)); } // If a test in the suite fails the status shows that - assert!(output.status.success(), "cargo test failed"); + assert!(output.status.success(), "cargo test failed '{}'", test_name); } #[derive(Default)] From 727254cb1469b1e825621582bf4fd228b1afbe2e Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Tue, 15 Jul 2025 10:23:11 +0530 Subject: [PATCH 07/15] chore: adding safety multiplier to table mania CU budgets --- magicblock-table-mania/src/compute_budget.rs | 16 ++++++++++++---- .../test-table-mania/tests/ix_lookup_table.rs | 6 +++--- 2 files changed, 15 insertions(+), 7 deletions(-) diff --git a/magicblock-table-mania/src/compute_budget.rs b/magicblock-table-mania/src/compute_budget.rs index e9ebe5be..bb4b1f9f 100644 --- a/magicblock-table-mania/src/compute_budget.rs +++ b/magicblock-table-mania/src/compute_budget.rs @@ -2,6 +2,14 @@ use solana_sdk::{ compute_budget::ComputeBudgetInstruction, instruction::Instruction, }; +/// We multiply the CU values that we determined since we've seen compute budget exceeded errors +/// if we use them as is. The only way to determine CUs for a particular transaction 100% is +/// to simulate that exact transaction. +/// This would make table management even slower, but is an option to consider for the future. +/// The multiplier is based on the observation that [CREATE_AND_EXTEND_TABLE_CUS] were _safe_ at +/// 16K-17K CUs which we slightly exceed if we multiply by this multiplier. +const SAFETY_MULTIPLIER: u32 = 7; + /// Compute units required to create and extend a lookup table, with the initial /// pubkeys. This is the same no matter how many pubkeys are added to the table /// initially. @@ -10,14 +18,14 @@ use solana_sdk::{ /// https://github.com/solana-program/address-lookup-table/blob/main/program/src/processor.rs . /// This test repo (https://github.com/thlorenz/create-program-address-cus) verified /// that the variation is around 25 CUs, but to be on the safe side we add 10x that. -pub const CREATE_AND_EXTEND_TABLE_CUS: u32 = 2_400 + 250; +pub const CREATE_AND_EXTEND_TABLE_CUS: u32 = (2_400 + 250) * SAFETY_MULTIPLIER; /// Compute units required to extend a lookup table with additional pubkeys /// This is the same no matter how many pubkeys are added to the table. -pub const EXTEND_TABLE_CUS: u32 = 1_200; +pub const EXTEND_TABLE_CUS: u32 = 1_200 * SAFETY_MULTIPLIER; /// Compute units required to deactivate a lookup table. -pub const DEACTIVATE_TABLE_CUS: u32 = 1_050; +pub const DEACTIVATE_TABLE_CUS: u32 = 1_050 * SAFETY_MULTIPLIER; /// Compute units required to close a lookup table. -pub const CLOSE_TABLE_CUS: u32 = 1_050; +pub const CLOSE_TABLE_CUS: u32 = 1_050 * SAFETY_MULTIPLIER; #[derive(Clone)] pub struct TableManiaComputeBudget { diff --git a/test-integration/test-table-mania/tests/ix_lookup_table.rs b/test-integration/test-table-mania/tests/ix_lookup_table.rs index 7be76499..02e1a093 100644 --- a/test-integration/test-table-mania/tests/ix_lookup_table.rs +++ b/test-integration/test-table-mania/tests/ix_lookup_table.rs @@ -267,7 +267,7 @@ async fn test_lookup_table_ixs_cus_per_pubkey() { *lookup_table.extend_signatures().unwrap().last().unwrap(); let cus = get_tx_cus(&rpc_client, &extend_sig).await; debug!("Extend for {i:03} CUs {cus:04}CUs"); - assert_eq!(cus, EXTEND_TABLE_CUS as u64); + assert!(cus <= EXTEND_TABLE_CUS as u64); lookup_table .deactivate(&rpc_client, &validator_auth, &budgets.deactivate) @@ -280,7 +280,7 @@ async fn test_lookup_table_ixs_cus_per_pubkey() { ) .await; debug!("Deactivate table {cus:03}CUs"); - assert_eq!(cus, DEACTIVATE_TABLE_CUS as u64); + assert!(cus <= DEACTIVATE_TABLE_CUS as u64); #[cfg(feature = "test_table_close")] { @@ -305,7 +305,7 @@ async fn test_lookup_table_ixs_cus_per_pubkey() { assert!(is_closed); let cus = get_tx_cus(&rpc_client, &close_sig.unwrap()).await; debug!("Close table {cus:03}CUs",); - assert_eq!(cus, CLOSE_TABLE_CUS as u64); + assert!(cus <= CLOSE_TABLE_CUS as u64); } } } From 4b674b4506038a54d02d13e09729dded91bd6177 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Tue, 15 Jul 2025 10:56:05 +0530 Subject: [PATCH 08/15] chore: remove special handling of lookup table creation error - this added more complexity and we can treat it just like any table mania error instead, keeping the code much simpler - left in some refactorings done while adding the special handling, since they improved the code quality --- .../src/commit/commit_using_args.rs | 27 ------- .../src/commit/commit_using_buffer.rs | 79 ++++--------------- .../src/commit/committor_processor.rs | 25 ++---- .../src/commit/common.rs | 41 +--------- .../src/commit_stage.rs | 12 --- magicblock-committor-service/src/error.rs | 3 - 6 files changed, 26 insertions(+), 161 deletions(-) diff --git a/magicblock-committor-service/src/commit/commit_using_args.rs b/magicblock-committor-service/src/commit/commit_using_args.rs index aead9341..12033eaa 100644 --- a/magicblock-committor-service/src/commit/commit_using_args.rs +++ b/magicblock-committor-service/src/commit/commit_using_args.rs @@ -10,7 +10,6 @@ use super::CommittorProcessor; use crate::{ commit::common::{ get_accounts_to_undelegate, lookup_table_keys, send_and_confirm, - stages_from_lookup_table_err, }, commit_stage::{CommitSignatures, CommitStage}, persist::CommitStrategy, @@ -127,15 +126,6 @@ impl CommittorProcessor { finalize_signature: None, undelegate_signature: None, }); - if let Some(stages) = stages_from_lookup_table_err( - &err, - &commit_infos, - strategy, - sigs.clone(), - ) { - return stages; - } - return commit_infos .into_iter() .map(|x| { @@ -178,15 +168,6 @@ impl CommittorProcessor { finalize_signature: err.signature(), undelegate_signature: None, }; - if let Some(stages) = stages_from_lookup_table_err( - &err, - &commit_infos, - strategy, - Some(sigs.clone()), - ) { - return stages; - } - return commit_infos .into_iter() .map(|x| { @@ -278,14 +259,6 @@ impl CommittorProcessor { undelegate_signature: err.signature(), }; - if let Some(stages) = stages_from_lookup_table_err( - &err, - &commit_infos, - strategy, - Some(sigs.clone()), - ) { - return stages; - } return commit_infos .into_iter() .map(|x| { diff --git a/magicblock-committor-service/src/commit/commit_using_buffer.rs b/magicblock-committor-service/src/commit/commit_using_buffer.rs index 41ba6b30..dcf1a55b 100644 --- a/magicblock-committor-service/src/commit/commit_using_buffer.rs +++ b/magicblock-committor-service/src/commit/commit_using_buffer.rs @@ -27,10 +27,7 @@ use solana_sdk::{ use tokio::task::JoinSet; use super::{ - common::{ - get_accounts_to_undelegate, send_and_confirm, - stages_from_lookup_table_err, - }, + common::{get_accounts_to_undelegate, send_and_confirm}, process_buffers::{ chunked_ixs_to_process_commitables_and_close_pdas, ChunkedIxsToProcessCommitablesAndClosePdasResult, @@ -195,23 +192,12 @@ impl CommittorProcessor { .await; commit_stages.extend(failed_process.into_iter().flat_map( - |(sig, xs, err)| { + |(sig, xs)| { let sigs = sig.map(|x| CommitSignatures { process_signature: x, finalize_signature: None, undelegate_signature: None, }); - if let Some(err) = err.as_ref() { - if let Some(stages) = stages_from_lookup_table_err( - err, - &xs, - commit_strategy, - sigs.clone(), - ) { - return stages; - } - } - xs.into_iter() .map(|x| { CommitStage::FailedProcess(( @@ -289,11 +275,15 @@ impl CommittorProcessor { ) .await; commit_stages.extend(failed_finalize.into_iter().flat_map( - |(sig, infos, err)| { - fn get_sigs_for_bundle(bundle_id: u64, processed_signatures: &HashMap, finalized_sig: Option) -> CommitSignatures { + |(sig, infos)| { + fn get_sigs_for_bundle( + bundle_id: u64, + processed_signatures: &HashMap, + finalized_sig: Option, + ) -> CommitSignatures { CommitSignatures { - // SAFETY: signatures for all bundles of succeeded process transactions - // have been added above + // SAFETY: signatures for all bundles of succeeded process transactions + // have been added above process_signature: *processed_signatures .get(&bundle_id) .unwrap(), @@ -302,24 +292,6 @@ impl CommittorProcessor { } } - if let Some(err) = err.as_ref() { - if let Some(stages) = stages_from_lookup_table_err( - err, - &infos, - commit_strategy, - None, - ) { - return stages.into_iter().map(|x| match x { - CommitStage::CouldNotCreateLookupTable((commit_info, strategy, _)) => { - let bundle_id = commit_info.bundle_id(); - let sigs = get_sigs_for_bundle(bundle_id, &processed_signatures, sig); - CommitStage::CouldNotCreateLookupTable((commit_info, strategy, Some(sigs))) - }, - _ => unreachable!("stages_from_lookup_table_err always creates 'CommitStage::CouldNotCreateLookupTable'"), - }).collect::>() - } - } - infos .into_iter() .map(|x| { @@ -327,7 +299,11 @@ impl CommittorProcessor { CommitStage::FailedFinalize(( x, commit_strategy, - get_sigs_for_bundle(bundle_id, &processed_signatures, sig), + get_sigs_for_bundle( + bundle_id, + &processed_signatures, + sig, + ), )) }) .collect::>() @@ -466,30 +442,7 @@ impl CommittorProcessor { commit_stages.extend( failed_undelegate.into_iter().flat_map( - |(sig, infos, err)| { - - if let Some(err) = err.as_ref() { - if let Some(stages) = stages_from_lookup_table_err( - err, - &infos, - commit_strategy, - None, - ) { - return stages.into_iter().map(|x| match x { - CommitStage::CouldNotCreateLookupTable((commit_info, strategy, _)) => { - let bundle_id = commit_info.bundle_id(); - let sigs = get_sigs_for_bundle( - bundle_id, - &processed_signatures, - &finalized_signatures, - sig); - CommitStage::CouldNotCreateLookupTable((commit_info, strategy, Some(sigs))) - }, - _ => unreachable!("stages_from_lookup_table_err always creates 'CommitStage::CouldNotCreateLookupTable'"), - }).collect::>() - } - } - + |(sig, infos)| { infos .into_iter() .map(|x| { diff --git a/magicblock-committor-service/src/commit/committor_processor.rs b/magicblock-committor-service/src/commit/committor_processor.rs index ca9875ea..3ab70ea3 100644 --- a/magicblock-committor-service/src/commit/committor_processor.rs +++ b/magicblock-committor-service/src/commit/committor_processor.rs @@ -26,7 +26,7 @@ use crate::{ commit_strategy::{split_changesets_by_commit_strategy, SplitChangesets}, compute_budget::{ComputeBudget, ComputeBudgetConfig}, config::ChainConfig, - error::{CommittorServiceError, CommittorServiceResult}, + error::CommittorServiceResult, persist::{ BundleSignatureRow, CommitPersister, CommitStatusRow, CommitStrategy, }, @@ -361,7 +361,6 @@ impl CommittorProcessor { .into_iter() .map(|ixs| ixs.commit_info) .collect::>(), - None::, ) }) .collect::>(); @@ -485,11 +484,8 @@ impl CommittorProcessor { pub(crate) type ProcessIxChunkSuccesses = Vec<(Signature, Vec<(CommitInfo, InstructionsKind)>)>; pub(crate) type ProcessIxChunkSuccessesRc = Arc>; -pub(crate) type ProcessIxChunkFailures = Vec<( - Option, - Vec, - Option, -)>; +pub(crate) type ProcessIxChunkFailures = + Vec<(Option, Vec)>; pub(crate) type ProcessIxChunkFailuresRc = Arc>; /// Processes a single chunk of instructions, sending them as a transaction. @@ -548,21 +544,14 @@ pub(crate) async fn process_ixs_chunk( err ); - // Check if this is a lookup table error - let lookup_table_err = matches!( - err, - CommittorServiceError::CouldNotCreateLookupTable(_) - ); - let commit_infos = commit_infos .into_iter() .map(|(commit_info, _)| commit_info) .collect(); - failures.lock().expect("ix failures mutex poisoned").push(( - err.signature(), - commit_infos, - lookup_table_err.then_some(err), - )); + failures + .lock() + .expect("ix failures mutex poisoned") + .push((err.signature(), commit_infos)); } } } diff --git a/magicblock-committor-service/src/commit/common.rs b/magicblock-committor-service/src/commit/common.rs index 5c8ecb2a..79948576 100644 --- a/magicblock-committor-service/src/commit/common.rs +++ b/magicblock-committor-service/src/commit/common.rs @@ -20,11 +20,8 @@ use solana_sdk::{ }; use crate::{ - commit_stage::CommitSignatures, error::{CommittorServiceError, CommittorServiceResult}, - persist::CommitStrategy, pubkeys_provider::{provide_committee_pubkeys, provide_common_pubkeys}, - CommitInfo, CommitStage, }; pub(crate) fn lookup_table_keys( @@ -106,17 +103,11 @@ pub(crate) async fn send_and_confirm( let start = Instant::now(); // Ensure all pubkeys have tables before proceeding - if let Err(err) = table_mania + table_mania .ensure_pubkeys_table(&authority, &keys_from_tables) - .await - { - error!("Failed to ensure table exists for pubkeys: {:?}", err); - return Err(CommittorServiceError::CouldNotCreateLookupTable( - format!("{:?}", err), - )); - } + .await?; - // NOTE: we assume that all needed pubkeys were reserved earlier + // NOTE: we assume that all needed pubkeys were reserved by now let address_lookup_tables = table_mania .try_get_active_address_lookup_table_accounts( &keys_from_tables, @@ -219,29 +210,3 @@ pub(crate) async fn send_and_confirm( Ok(res.into_signature()) } } - -/// Checks if the error is a lookup table creation error and returns the appropriate -/// commit stages if so. Returns None if the error is not a lookup table error. -pub(crate) fn stages_from_lookup_table_err( - err: &CommittorServiceError, - commit_infos: &[CommitInfo], - strategy: CommitStrategy, - signatures: Option, -) -> Option> { - if let CommittorServiceError::CouldNotCreateLookupTable(_) = err { - Some( - commit_infos - .iter() - .map(|x| { - CommitStage::CouldNotCreateLookupTable(( - x.clone(), - strategy, - signatures.clone(), - )) - }) - .collect(), - ) - } else { - None - } -} diff --git a/magicblock-committor-service/src/commit_stage.rs b/magicblock-committor-service/src/commit_stage.rs index 131badd5..41d50359 100644 --- a/magicblock-committor-service/src/commit_stage.rs +++ b/magicblock-committor-service/src/commit_stage.rs @@ -88,13 +88,6 @@ pub enum CommitStage { /// initialized, but then this issue was detected. PartOfTooLargeBundleToProcess(CommitInfo), - /// Failed to create the lookup table required for this commit. - /// This happens when the table mania system cannot ensure that the lookup - /// table exists for the pubkeys needed by this commit. - CouldNotCreateLookupTable( - (CommitInfo, CommitStrategy, Option), - ), - /// The commit was properly initialized and added to a chunk of instructions to process /// commits via a transaction. For large commits the buffer and chunk accounts were properly /// prepared and haven't been closed. @@ -218,7 +211,6 @@ impl CommitStage { | BufferAndChunkPartiallyInitialized((ci, _)) | BufferAndChunkFullyInitialized((ci, _)) | PartOfTooLargeBundleToProcess(ci) - | CouldNotCreateLookupTable((ci, _, _)) | FailedProcess((ci, _, _)) | PartOfTooLargeBundleToFinalize(ci) | FailedFinalize((ci, _, _)) @@ -243,7 +235,6 @@ impl CommitStage { | Failed((_, strategy)) | BufferAndChunkPartiallyInitialized((_, strategy)) | BufferAndChunkFullyInitialized((_, strategy)) - | CouldNotCreateLookupTable((_, strategy, _)) | FailedProcess((_, strategy, _)) | FailedFinalize((_, strategy, _)) | FailedUndelegate((_, strategy, _)) @@ -271,9 +262,6 @@ impl CommitStage { | PartOfTooLargeBundleToFinalize(ci) => { CommitStatus::PartOfTooLargeBundleToProcess(ci.bundle_id()) } - CouldNotCreateLookupTable((ci, _, _)) => { - CommitStatus::CouldNotCreateLookupTable(ci.bundle_id()) - } FailedProcess((ci, strategy, sigs)) => CommitStatus::FailedProcess(( ci.bundle_id(), *strategy, diff --git a/magicblock-committor-service/src/error.rs b/magicblock-committor-service/src/error.rs index 45ff8033..dc4315dd 100644 --- a/magicblock-committor-service/src/error.rs +++ b/magicblock-committor-service/src/error.rs @@ -75,9 +75,6 @@ pub enum CommittorServiceError { Pubkey, solana_sdk::program_error::ProgramError, ), - - #[error("Could not create lookup table for pubkey {0}")] - CouldNotCreateLookupTable(String), } impl CommittorServiceError { From 075fa0d1060db2e0dcb7097e996580d5321bba55 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Tue, 15 Jul 2025 11:16:31 +0530 Subject: [PATCH 09/15] chore: add convenience tasks to Makefile to isolate ix tests --- test-integration/Makefile | 36 ++++++++++++++++++++++++++++++++++-- 1 file changed, 34 insertions(+), 2 deletions(-) diff --git a/test-integration/Makefile b/test-integration/Makefile index e1148af8..d9626413 100644 --- a/test-integration/Makefile +++ b/test-integration/Makefile @@ -21,7 +21,7 @@ COMMITTOR_PROGRAM_SO := $(ROOT_DEPLOY_DIR)/magicblock_committor_program.so PROGRAMS_SO := $(FLEXI_COUNTER_SO) $(SCHEDULECOMMIT_SO) $(SCHEDULECOMMIT_SECURITY_SO) $(COMMITTOR_PROGRAM_SO) -list-tasks: +list: @cat Makefile | grep "^[a-z].*:" | sed 's/:.*//g' list-programs: @echo $(PROGRAMS_SO) @@ -36,6 +36,38 @@ test-force-mb: $(PROGRAMS_SO) test-ledger-restore FORCE_MAGIC_BLOCK_VALIDATOR=1 \ cargo run --package test-runner --bin run-tests +test-schedulecommit: + RUN_TESTS=schedulecommit \ + $(MAKE) test + +test-issues-frequent-commits: + RUN_TESTS=issues_frequent_commmits \ + $(MAKE) test + +test-cloning: + RUN_TESTS=cloning \ + $(MAKE) test + +test-restore-ledger: + RUN_TESTS=restore_ledger \ + $(MAKE) test + +test-magicblock-api: + RUN_TESTS=magicblock_api \ + $(MAKE) test + +test-table-mania: + RUN_TESTS=table_mania \ + $(MAKE) test + +test-committor: + RUN_TESTS=committor \ + $(MAKE) test + +test-pubsub: + RUN_TESTS=pubsub \ + $(MAKE) test + $(FLEXI_COUNTER_SO): $(FLEXI_COUNTER_SRC) cargo build-sbf --manifest-path $(FLEXI_COUNTER_DIR)/Cargo.toml $(SCHEDULECOMMIT_SO): $(SCHEDULECOMMIT_SRC) @@ -53,4 +85,4 @@ deploy-flexi-counter: $(FLEXI_COUNTER_SO) $(DIR)/target/deploy/program_flexi_counter.so -.PHONY: test test-force-mb deploy-flexi-counter +.PHONY: test test-force-mb test-schedulecommit test-issues-frequent-commits test-cloning test-restore-ledger test-magicblock-api test-table-mania test-committor test-pubsub deploy-flexi-counter list From ac78e86e54e401155b4ab3f579dc6712a8876272 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Tue, 15 Jul 2025 12:20:42 +0530 Subject: [PATCH 10/15] chore: properly cleanup when ledger restore single airdrop test fails --- .../tests/01_single_airdrop.rs | 20 ++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/test-integration/test-ledger-restore/tests/01_single_airdrop.rs b/test-integration/test-ledger-restore/tests/01_single_airdrop.rs index 87982cbb..34d5063f 100644 --- a/test-integration/test-ledger-restore/tests/01_single_airdrop.rs +++ b/test-integration/test-ledger-restore/tests/01_single_airdrop.rs @@ -1,7 +1,7 @@ use cleanass::{assert, assert_eq}; use std::{path::Path, process::Child}; -use integration_test_tools::{expect, tmpdir::resolve_tmp_dir}; +use integration_test_tools::{expect, tmpdir::resolve_tmp_dir, unwrap}; use solana_sdk::{ commitment_config::CommitmentConfig, pubkey::Pubkey, signature::Signature, }; @@ -57,14 +57,24 @@ fn read_ledger( assert_eq!(acc.lamports, 1_111_111, cleanup(&mut validator)); if let Some(sig) = airdrop_sig1 { - let status = expect!(ctx.try_ephem_client(), validator) + let status = match expect!(ctx.try_ephem_client(), validator) .get_signature_status_with_commitment_and_history( sig, CommitmentConfig::confirmed(), true, - ) - .unwrap() - .unwrap(); + ) { + Ok(status) => { + unwrap!( + status, + format!("Should have received signature status for {sig}"), + validator + ) + } + Err(err) => { + cleanup(&mut validator); + panic!("Error fetching signature status: {:?}", err); + } + }; assert!(status.is_ok(), cleanup(&mut validator)); } From bccd78074ea12b1d1b093167ac51ae250e959c47 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Tue, 15 Jul 2025 12:21:52 +0530 Subject: [PATCH 11/15] chore: remove tmp CI workflow --- .../workflows/ci-test-integration-branch.yml | 32 ------------------- 1 file changed, 32 deletions(-) delete mode 100644 .github/workflows/ci-test-integration-branch.yml diff --git a/.github/workflows/ci-test-integration-branch.yml b/.github/workflows/ci-test-integration-branch.yml deleted file mode 100644 index 09d8421d..00000000 --- a/.github/workflows/ci-test-integration-branch.yml +++ /dev/null @@ -1,32 +0,0 @@ -on: - push: - branches: [thlorenz/committor-improve-table-speed] - -name: Run CI - Integration Tests (Branch) - -jobs: - run_make_ci_test: - runs-on: extra-large - steps: - - name: Checkout this magicblock-validator - uses: actions/checkout@v2 - with: - path: magicblock-validator - - - uses: ./magicblock-validator/.github/actions/setup-build-env - with: - build_cache_key_name: "magicblock-validator-ci-test-integration-branch-v000" - rust_toolchain_release: "1.84.1" - github_access_token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }} - github_token: ${{ secrets.GITHUB_TOKEN }} - - - uses: ./magicblock-validator/.github/actions/setup-solana - - - name: Run integration tests - run: | - sudo prlimit --pid $$ --nofile=1048576:1048576 - sudo sysctl fs.inotify.max_user_instances=1280 - sudo sysctl fs.inotify.max_user_watches=655360 - make ci-test-integration - shell: bash - working-directory: magicblock-validator From f007d4fbcb3b65a81fc8c52c743b9e474b7fe180 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Tue, 15 Jul 2025 19:59:56 +0530 Subject: [PATCH 12/15] chore: removing obsolete CouldNotCreateLookupTable error and commit status --- magicblock-committor-service/src/persist/error.rs | 3 --- .../src/persist/types/commit_status.rs | 11 ----------- 2 files changed, 14 deletions(-) diff --git a/magicblock-committor-service/src/persist/error.rs b/magicblock-committor-service/src/persist/error.rs index 36238bac..1d5e7590 100644 --- a/magicblock-committor-service/src/persist/error.rs +++ b/magicblock-committor-service/src/persist/error.rs @@ -35,7 +35,4 @@ pub enum CommitPersistError { #[error("Commit Status needs commit strategy: '{0}' ({0:?})")] CommitStatusNeedsStrategy(String), - - #[error("Could not create lookup table: '{0}' ({0:?})")] - CouldNotCreateLookupTable(String), } diff --git a/magicblock-committor-service/src/persist/types/commit_status.rs b/magicblock-committor-service/src/persist/types/commit_status.rs index 7fdc83e6..e6964ecc 100644 --- a/magicblock-committor-service/src/persist/types/commit_status.rs +++ b/magicblock-committor-service/src/persist/types/commit_status.rs @@ -29,8 +29,6 @@ pub enum CommitStatus { /// The commit is part of a bundle that contains too many commits to be included /// in a single transaction. Thus we cannot commit any of them. PartOfTooLargeBundleToProcess(u64), - /// Failed to create or access the lookup table required for this commit. - CouldNotCreateLookupTable(u64), /// The commit was properly initialized and added to a chunk of instructions to process /// commits via a transaction. For large commits the buffer and chunk accounts were properly /// prepared and haven't been closed. @@ -62,9 +60,6 @@ impl fmt::Display for CommitStatus { CommitStatus::PartOfTooLargeBundleToProcess(bundle_id) => { write!(f, "PartOfTooLargeBundleToProcess({})", bundle_id) } - CommitStatus::CouldNotCreateLookupTable(bundle_id) => { - write!(f, "CouldNotCreateLookupTable({})", bundle_id) - } CommitStatus::FailedProcess((bundle_id, strategy, sigs)) => { write!( f, @@ -164,9 +159,6 @@ impl "PartOfTooLargeBundleToProcess" => { Ok(PartOfTooLargeBundleToProcess(get_bundle_id!())) } - "CouldNotCreateLookupTable" => { - Ok(CouldNotCreateLookupTable(get_bundle_id!())) - } "FailedProcess" => { Ok(FailedProcess((get_bundle_id!(), strategy, sigs))) } @@ -217,7 +209,6 @@ impl CommitStatus { "BufferAndChunkFullyInitialized" } PartOfTooLargeBundleToProcess(_) => "PartOfTooLargeBundleToProcess", - CouldNotCreateLookupTable(_) => "CouldNotCreateLookupTable", FailedProcess(_) => "FailedProcess", FailedFinalize(_) => "FailedFinalize", FailedUndelegate(_) => "FailedUndelegate", @@ -233,7 +224,6 @@ impl CommitStatus { | BufferAndChunkInitialized(bundle_id) | BufferAndChunkFullyInitialized(bundle_id) | PartOfTooLargeBundleToProcess(bundle_id) - | CouldNotCreateLookupTable(bundle_id) | FailedProcess((bundle_id, _, _)) | FailedFinalize((bundle_id, _, _)) | FailedUndelegate((bundle_id, _, _)) @@ -261,7 +251,6 @@ impl CommitStatus { | BufferAndChunkInitialized(_) | BufferAndChunkFullyInitialized(_) => CommitStrategy::FromBuffer, PartOfTooLargeBundleToProcess(_) => CommitStrategy::Undetermined, - CouldNotCreateLookupTable(_) => CommitStrategy::Undetermined, FailedProcess((_, strategy, _)) => *strategy, FailedFinalize((_, strategy, _)) => *strategy, FailedUndelegate((_, strategy, _)) => *strategy, From dd6bf69a999f0b78aa55c7855069584b4f55e6f6 Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Thu, 17 Jul 2025 13:32:29 +0530 Subject: [PATCH 13/15] chore: address - use relaxed ordering in get_refcount method - taco-paco --- magicblock-table-mania/src/lookup_table_rc.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/magicblock-table-mania/src/lookup_table_rc.rs b/magicblock-table-mania/src/lookup_table_rc.rs index 79a9ae23..8220ecd8 100644 --- a/magicblock-table-mania/src/lookup_table_rc.rs +++ b/magicblock-table-mania/src/lookup_table_rc.rs @@ -116,7 +116,7 @@ impl RefcountedPubkeys { fn get_refcount(&self, pubkey: &Pubkey) -> Option { self.pubkeys .get(pubkey) - .map(|count| count.load(Ordering::SeqCst)) + .map(|count| count.load(Ordering::Relaxed)) } } From 5dc7a338e041aca25d86e9935ad4565cec0f8e5f Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Thu, 17 Jul 2025 13:39:41 +0530 Subject: [PATCH 14/15] chore: optimize lock acquisition in ensure_pubkeys_table - taco-paco --- magicblock-table-mania/src/manager.rs | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/magicblock-table-mania/src/manager.rs b/magicblock-table-mania/src/manager.rs index fc2c1c3a..4901ccd7 100644 --- a/magicblock-table-mania/src/manager.rs +++ b/magicblock-table-mania/src/manager.rs @@ -176,18 +176,21 @@ impl TableMania { let mut remaining = HashSet::new(); // 1. Check which pubkeys already exist in any table - for pubkey in pubkeys { - let mut found = false; - for table in self.active_tables.read().await.iter() { - if table.contains_key(pubkey) { - found = true; - break; + { + let active_tables = self.active_tables.read().await; + for pubkey in pubkeys { + let mut found = false; + for table in active_tables.iter() { + if table.contains_key(pubkey) { + found = true; + break; + } + } + if !found { + remaining.insert(*pubkey); } } - if !found { - remaining.insert(*pubkey); - } - } + } // Drop the lock here before calling reserve_new_pubkeys // 2. If any pubkeys dont exist, create tables for them if !remaining.is_empty() { From 17d728ca747f90cc6eed6f56157491611575401c Mon Sep 17 00:00:00 2001 From: Thorsten Lorenz Date: Thu, 17 Jul 2025 13:57:28 +0530 Subject: [PATCH 15/15] fix: clippy warning --- magicblock-accounts-db/src/index.rs | 7 +++---- test-integration/Cargo.lock | 2 +- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/magicblock-accounts-db/src/index.rs b/magicblock-accounts-db/src/index.rs index b9916c42..b597b6f6 100644 --- a/magicblock-accounts-db/src/index.rs +++ b/magicblock-accounts-db/src/index.rs @@ -5,7 +5,6 @@ use lmdb::{ Cursor, DatabaseFlags, Environment, RwTransaction, Transaction, WriteFlags, }; use log::warn; -use magicblock_config::AccountsDbConfig; use solana_pubkey::Pubkey; use table::Table; use utils::*; @@ -131,12 +130,12 @@ impl AccountsDbIndex { }; let offset = // SAFETY: - // The accounts index stores two u32 values (offset and blocks) + // The accounts index stores two u32 values (offset and blocks) // serialized into 8 byte long slice. Here we are interested only in the first 4 bytes // (offset). The memory used by lmdb to store the serialization might not be u32 - // aligned, so we make use `read_unaligned`. + // aligned, so we make use `read_unaligned`. // - // We read the data stored by corresponding put in `insert_account`, + // We read the data stored by corresponding put in `insert_account`, // thus it should be of valid length and contain valid value unsafe { (offset.as_ptr() as *const u32).read_unaligned() }; Ok(offset) diff --git a/test-integration/Cargo.lock b/test-integration/Cargo.lock index bed5de2d..4584e9c0 100644 --- a/test-integration/Cargo.lock +++ b/test-integration/Cargo.lock @@ -6260,7 +6260,7 @@ dependencies = [ [[package]] name = "solana-account" version = "2.2.1" -source = "git+https://github.com/magicblock-labs/solana-account.git?rev=7bdfefc#7bdfefc2fc6a6327f5c93575cf3d4a13cf6ff51a" +source = "git+https://github.com/magicblock-labs/solana-account.git?rev=176540a#176540ae8445a3161b2e8d5ab97a4d48bab35679" dependencies = [ "bincode", "qualifier_attr",