diff --git a/magicblock-account-cloner/src/remote_account_cloner_worker.rs b/magicblock-account-cloner/src/remote_account_cloner_worker.rs index 6a1251f9..afe98b90 100644 --- a/magicblock-account-cloner/src/remote_account_cloner_worker.rs +++ b/magicblock-account-cloner/src/remote_account_cloner_worker.rs @@ -710,23 +710,32 @@ where // Allow the committer service to reserve pubkeys in lookup tables // that could be needed when we commit this account - // NOTE: we start reserving pubkeys so the transaction can complete while we - // clone the account. - let reserve_pubkeys_handle = if let Some(committor) = - self.changeset_committor.as_ref() - { + if let Some(committor) = self.changeset_committor.as_ref() { let committor = Arc::clone(committor); let pubkey = *pubkey; let owner = delegation_record.owner; - Some(tokio::spawn(map_committor_request_result( - committor.reserve_pubkeys_for_committee(pubkey, owner), - committor, - ))) - } else { - None - }; + tokio::spawn(async move { + match map_committor_request_result( + committor + .reserve_pubkeys_for_committee(pubkey, owner), + committor, + ) + .await + { + Ok(initiated) => { + trace!( + "Reserving lookup keys for {pubkey} took {:?}", + initiated.elapsed() + ); + } + Err(err) => { + error!("Failed to reserve lookup keys for {pubkey}: {err:?}"); + } + }; + }); + } - let sig = self.do_clone_delegated_account( + self.do_clone_delegated_account( pubkey, // TODO(GabrielePicco): Avoid cloning &Account { @@ -734,19 +743,7 @@ where ..account.clone() }, delegation_record, - )?; - - if let Some(handle) = reserve_pubkeys_handle { - let initiated = handle.await.map_err(|err| { - AccountClonerError::JoinError(format!("{err} {err:?}")) - })??; - trace!( - "Reserving lookup keys for {pubkey} took {:?}", - initiated.elapsed() - ); - } - - sig + )? } }; // Return the result diff --git a/magicblock-accounts-db/src/index.rs b/magicblock-accounts-db/src/index.rs index b9916c42..b597b6f6 100644 --- a/magicblock-accounts-db/src/index.rs +++ b/magicblock-accounts-db/src/index.rs @@ -5,7 +5,6 @@ use lmdb::{ Cursor, DatabaseFlags, Environment, RwTransaction, Transaction, WriteFlags, }; use log::warn; -use magicblock_config::AccountsDbConfig; use solana_pubkey::Pubkey; use table::Table; use utils::*; @@ -131,12 +130,12 @@ impl AccountsDbIndex { }; let offset = // SAFETY: - // The accounts index stores two u32 values (offset and blocks) + // The accounts index stores two u32 values (offset and blocks) // serialized into 8 byte long slice. Here we are interested only in the first 4 bytes // (offset). The memory used by lmdb to store the serialization might not be u32 - // aligned, so we make use `read_unaligned`. + // aligned, so we make use `read_unaligned`. // - // We read the data stored by corresponding put in `insert_account`, + // We read the data stored by corresponding put in `insert_account`, // thus it should be of valid length and contain valid value unsafe { (offset.as_ptr() as *const u32).read_unaligned() }; Ok(offset) diff --git a/magicblock-committor-service/src/commit/commit_using_args.rs b/magicblock-committor-service/src/commit/commit_using_args.rs index 6e99cf8f..12033eaa 100644 --- a/magicblock-committor-service/src/commit/commit_using_args.rs +++ b/magicblock-committor-service/src/commit/commit_using_args.rs @@ -101,6 +101,7 @@ impl CommittorProcessor { (tm, keys) }); + let strategy = CommitStrategy::args(use_lookup); let compute_budget_ixs = me .compute_budget_config .args_process_budget() @@ -119,7 +120,7 @@ impl CommittorProcessor { Ok(sig) => sig, Err(err) => { error!("Failed to commit changeset with {} accounts using args: {:?}", committees.len(), err); - let strategy = CommitStrategy::args(use_lookup); + let sigs = err.signature().map(|sig| CommitSignatures { process_signature: sig, finalize_signature: None, @@ -128,11 +129,7 @@ impl CommittorProcessor { return commit_infos .into_iter() .map(|x| { - CommitStage::FailedProcess(( - x, - strategy, - sigs.as_ref().cloned(), - )) + CommitStage::FailedProcess((x, strategy, sigs.clone())) }) .collect(); } @@ -165,17 +162,19 @@ impl CommittorProcessor { "Failed to finalize changeset using args: {:?}", err ); + + let sigs = CommitSignatures { + process_signature: process_sig, + finalize_signature: err.signature(), + undelegate_signature: None, + }; return commit_infos .into_iter() .map(|x| { CommitStage::FailedFinalize(( x, - CommitStrategy::args(use_lookup), - CommitSignatures { - process_signature: process_sig, - finalize_signature: err.signature(), - undelegate_signature: None, - }, + strategy, + sigs.clone(), )) }) .collect(); @@ -219,7 +218,7 @@ impl CommittorProcessor { .map(|x| { CommitStage::FailedUndelegate(( x, - CommitStrategy::args(use_lookup), + strategy, CommitSignatures { process_signature: process_sig, finalize_signature: finalize_sig, @@ -254,17 +253,19 @@ impl CommittorProcessor { "Failed to undelegate accounts via transaction '{}': {:?}", err, err ); + let sigs = CommitSignatures { + process_signature: process_sig, + finalize_signature: finalize_sig, + undelegate_signature: err.signature(), + }; + return commit_infos .into_iter() .map(|x| { CommitStage::FailedUndelegate(( x, - CommitStrategy::args(use_lookup), - CommitSignatures { - process_signature: process_sig, - finalize_signature: finalize_sig, - undelegate_signature: err.signature(), - }, + strategy, + sigs.clone(), )) }) .collect(); @@ -282,7 +283,7 @@ impl CommittorProcessor { .map(|x| { CommitStage::Succeeded(( x, - CommitStrategy::args(use_lookup), + strategy, CommitSignatures { process_signature: process_sig, finalize_signature: finalize_sig, diff --git a/magicblock-committor-service/src/commit/commit_using_buffer.rs b/magicblock-committor-service/src/commit/commit_using_buffer.rs index 3e161fb8..dcf1a55b 100644 --- a/magicblock-committor-service/src/commit/commit_using_buffer.rs +++ b/magicblock-committor-service/src/commit/commit_using_buffer.rs @@ -21,11 +21,13 @@ use magicblock_rpc_client::{ MagicBlockSendTransactionConfig, }; use solana_pubkey::Pubkey; -use solana_sdk::{hash::Hash, instruction::Instruction, signer::Signer}; +use solana_sdk::{ + hash::Hash, instruction::Instruction, signature::Signature, signer::Signer, +}; use tokio::task::JoinSet; use super::{ - common::send_and_confirm, + common::{get_accounts_to_undelegate, send_and_confirm}, process_buffers::{ chunked_ixs_to_process_commitables_and_close_pdas, ChunkedIxsToProcessCommitablesAndClosePdasResult, @@ -33,7 +35,6 @@ use super::{ CommittorProcessor, }; use crate::{ - commit::common::get_accounts_to_undelegate, commit_stage::CommitSignatures, error::{CommitAccountError, CommitAccountResult}, finalize::{ @@ -275,6 +276,22 @@ impl CommittorProcessor { .await; commit_stages.extend(failed_finalize.into_iter().flat_map( |(sig, infos)| { + fn get_sigs_for_bundle( + bundle_id: u64, + processed_signatures: &HashMap, + finalized_sig: Option, + ) -> CommitSignatures { + CommitSignatures { + // SAFETY: signatures for all bundles of succeeded process transactions + // have been added above + process_signature: *processed_signatures + .get(&bundle_id) + .unwrap(), + finalize_signature: finalized_sig, + undelegate_signature: None, + } + } + infos .into_iter() .map(|x| { @@ -282,15 +299,11 @@ impl CommittorProcessor { CommitStage::FailedFinalize(( x, commit_strategy, - CommitSignatures { - // SAFETY: signatures for all bundles of succeeded process transactions - // have been added above - process_signature: *processed_signatures - .get(&bundle_id) - .unwrap(), - finalize_signature: sig, - undelegate_signature: None, - }, + get_sigs_for_bundle( + bundle_id, + &processed_signatures, + sig, + ), )) }) .collect::>() @@ -359,6 +372,25 @@ impl CommittorProcessor { finalize_and_undelegate.len(), "BUG: same amount of accounts to undelegate as to finalize and undelegate" ); + fn get_sigs_for_bundle( + bundle_id: u64, + processed_signatures: &HashMap, + finalized_signatures: &HashMap, + undelegated_sig: Option, + ) -> CommitSignatures { + CommitSignatures { + // SAFETY: signatures for all bundles of succeeded process transactions + // have been added above + process_signature: *processed_signatures + .get(&bundle_id) + .unwrap(), + finalize_signature: finalized_signatures + .get(&bundle_id) + .cloned(), + undelegate_signature: undelegated_sig, + } + } + let undelegate_ixs = match undelegate_commitables_ixs( &processor.magicblock_rpc_client, processor.authority.pubkey(), @@ -378,19 +410,12 @@ impl CommittorProcessor { CommitStage::FailedUndelegate(( x.clone(), CommitStrategy::args(use_lookup), - CommitSignatures { - // SAFETY: signatures for all bundles of succeeded process transactions - // have been added above - process_signature: - *processed_signatures - .get(&bundle_id) - .unwrap(), - finalize_signature: - finalized_signatures - .get(&bundle_id) - .cloned(), - undelegate_signature: err.signature(), - }, + get_sigs_for_bundle( + bundle_id, + &processed_signatures, + &finalized_signatures, + err.signature(), + ), )) }), ); @@ -425,19 +450,12 @@ impl CommittorProcessor { CommitStage::FailedUndelegate(( x, commit_strategy, - CommitSignatures { - // SAFETY: signatures for all bundles of succeeded process transactions - // have been added above - process_signature: - *processed_signatures - .get(&bundle_id) - .unwrap(), - finalize_signature: - finalized_signatures - .get(&bundle_id) - .cloned(), - undelegate_signature: sig, - }, + get_sigs_for_bundle( + bundle_id, + &processed_signatures, + &finalized_signatures, + sig, + ), )) }) .collect::>() @@ -1009,18 +1027,14 @@ impl CommittorProcessor { rpc_client, authority, [write_budget_ixs, vec![ix]].concat(), - format!("write chunk for offset {}", chunk.offset), + format!("write chunk ({} bytes)", chunk_bytes), Some(latest_blockhash), - // NOTE: We could use `processed` here and wait to get the processed status at - // least which would make things a bit slower. - // However that way we would avoid sending unnecessary transactions potentially - // since we may not see some written chunks yet when we get the chunks account. MagicBlockSendTransactionConfig::ensure_processed(), None, ) .await .inspect_err(|err| { - error!("{:?}", err); + warn!("{:?}", err); }) }); } diff --git a/magicblock-committor-service/src/commit/committor_processor.rs b/magicblock-committor-service/src/commit/committor_processor.rs index e78c8c17..3ab70ea3 100644 --- a/magicblock-committor-service/src/commit/committor_processor.rs +++ b/magicblock-committor-service/src/commit/committor_processor.rs @@ -341,10 +341,7 @@ impl CommittorProcessor { chunked_close_ixs: Option>>, table_mania: Option<&TableMania>, owners: &HashMap, - ) -> ( - Vec<(Signature, Vec<(CommitInfo, InstructionsKind)>)>, - Vec<(Option, Vec)>, - ) { + ) -> (ProcessIxChunkSuccesses, ProcessIxChunkFailures) { let latest_blockhash = match self.magicblock_rpc_client.get_latest_blockhash().await { Ok(bh) => bh, @@ -372,11 +369,8 @@ impl CommittorProcessor { }; let mut join_set = JoinSet::new(); - let successes = Arc::< - Mutex)>>, - >::default(); - let failures = - Arc::, Vec)>>>::default(); + let successes = ProcessIxChunkSuccessesRc::default(); + let failures = ProcessIxChunkFailuresRc::default(); for ixs_chunk in ixs_chunks { let authority = self.authority.insecure_clone(); let rpc_client = self.magicblock_rpc_client.clone(); @@ -439,9 +433,7 @@ impl CommittorProcessor { if let Some(latest_blockhash) = latest_blockhash { let mut join_set = JoinSet::new(); - let failures = Arc::< - Mutex, Vec)>>, - >::default(); + let failures = ProcessIxChunkFailuresRc::default(); for ixs_chunk in chunked_close_ixs { let authority = self.authority.insecure_clone(); let rpc_client = self.magicblock_rpc_client.clone(); @@ -450,7 +442,7 @@ impl CommittorProcessor { let compute_budget = self.compute_budget_config.buffer_close_budget(); // We ignore close successes - let successes = Default::default(); + let successes = ProcessIxChunkSuccessesRc::default(); // We only log close failures since the commit was processed successfully let failures = failures.clone(); join_set.spawn(process_ixs_chunk( @@ -489,6 +481,13 @@ impl CommittorProcessor { } } +pub(crate) type ProcessIxChunkSuccesses = + Vec<(Signature, Vec<(CommitInfo, InstructionsKind)>)>; +pub(crate) type ProcessIxChunkSuccessesRc = Arc>; +pub(crate) type ProcessIxChunkFailures = + Vec<(Option, Vec)>; +pub(crate) type ProcessIxChunkFailuresRc = Arc>; + /// Processes a single chunk of instructions, sending them as a transaction. /// Updates the shared success or failure lists based on the transaction outcome. #[allow(clippy::type_complexity, clippy::too_many_arguments)] @@ -497,10 +496,8 @@ pub(crate) async fn process_ixs_chunk( compute_budget: ComputeBudget, authority: Keypair, rpc_client: MagicblockRpcClient, - successes: Arc< - Mutex)>>, - >, - failures: Arc, Vec)>>>, + successes: ProcessIxChunkSuccessesRc, + failures: ProcessIxChunkFailuresRc, table_mania: Option, owners: HashMap, latest_blockhash: Hash, @@ -546,6 +543,7 @@ pub(crate) async fn process_ixs_chunk( commit_infos.len(), err ); + let commit_infos = commit_infos .into_iter() .map(|(commit_info, _)| commit_info) diff --git a/magicblock-committor-service/src/commit/common.rs b/magicblock-committor-service/src/commit/common.rs index f666ccba..79948576 100644 --- a/magicblock-committor-service/src/commit/common.rs +++ b/magicblock-committor-service/src/commit/common.rs @@ -102,7 +102,12 @@ pub(crate) async fn send_and_confirm( if let Some((table_mania, keys_from_tables)) = table_mania_setup { let start = Instant::now(); - // NOTE: we assume that all needed pubkeys were reserved earlier + // Ensure all pubkeys have tables before proceeding + table_mania + .ensure_pubkeys_table(&authority, &keys_from_tables) + .await?; + + // NOTE: we assume that all needed pubkeys were reserved by now let address_lookup_tables = table_mania .try_get_active_address_lookup_table_accounts( &keys_from_tables, diff --git a/magicblock-table-mania/src/lookup_table_rc.rs b/magicblock-table-mania/src/lookup_table_rc.rs index 5bd85622..8220ecd8 100644 --- a/magicblock-table-mania/src/lookup_table_rc.rs +++ b/magicblock-table-mania/src/lookup_table_rc.rs @@ -109,6 +109,15 @@ impl RefcountedPubkeys { .values() .any(|rc_pubkey| rc_pubkey.load(Ordering::SeqCst) > 0) } + + /// Returns the refcount of a pubkey if it exists in this table + /// - *pubkey* to query refcount for + /// - *returns* `Some(refcount)` if the pubkey exists, `None` otherwise + fn get_refcount(&self, pubkey: &Pubkey) -> Option { + self.pubkeys + .get(pubkey) + .map(|count| count.load(Ordering::Relaxed)) + } } impl Deref for RefcountedPubkeys { @@ -316,6 +325,12 @@ impl LookupTableRc { }) } + /// Returns the refcount of a pubkey if it exists in this table + /// - *pubkey* to query refcount for + /// - *returns* `Some(refcount)` if the pubkey exists, `None` otherwise + pub fn get_refcount(&self, pubkey: &Pubkey) -> Option { + self.pubkeys()?.get_refcount(pubkey) + } /// Returns `true` if the we requested to deactivate this table. /// NOTE: this doesn't mean that the deactivation period passed, thus /// the table could still be considered _deactivating_ on chain. diff --git a/magicblock-table-mania/src/manager.rs b/magicblock-table-mania/src/manager.rs index 8ad5d42b..4901ccd7 100644 --- a/magicblock-table-mania/src/manager.rs +++ b/magicblock-table-mania/src/manager.rs @@ -128,6 +128,17 @@ impl TableMania { pubkeys } + /// Returns the refcount of a pubkey if it exists in any active table + /// - *pubkey* to query refcount for + /// - *returns* `Some(refcount)` if the pubkey exists in any table, `None` otherwise + pub async fn get_pubkey_refcount(&self, pubkey: &Pubkey) -> Option { + for table in self.active_tables.read().await.iter() { + if let Some(refcount) = table.get_refcount(pubkey) { + return Some(refcount); + } + } + None + } // ----------------- // Reserve // ----------------- @@ -148,6 +159,46 @@ impl TableMania { self.reserve_new_pubkeys(authority, &remaining).await } + /// Ensures that pubkeys exist in any active table without increasing reference counts. + /// If tables for any pubkeys do not exist, creates them using the same transaction + /// logic as when reserving pubkeys. + /// + /// This method awaits the transaction outcome and returns once all pubkeys are part of tables. + /// + /// - *authority* - The authority keypair to use for creating tables if needed + /// - *pubkeys* - The pubkeys to ensure exist in tables + /// - *returns* `Ok(())` if all pubkeys are now part of tables + pub async fn ensure_pubkeys_table( + &self, + authority: &Keypair, + pubkeys: &HashSet, + ) -> TableManiaResult<()> { + let mut remaining = HashSet::new(); + + // 1. Check which pubkeys already exist in any table + { + let active_tables = self.active_tables.read().await; + for pubkey in pubkeys { + let mut found = false; + for table in active_tables.iter() { + if table.contains_key(pubkey) { + found = true; + break; + } + } + if !found { + remaining.insert(*pubkey); + } + } + } // Drop the lock here before calling reserve_new_pubkeys + + // 2. If any pubkeys dont exist, create tables for them + if !remaining.is_empty() { + self.reserve_new_pubkeys(authority, &remaining).await?; + } + + Ok(()) + } /// Tries to find a table that holds this pubkey already and reserves it. /// - *pubkey* to reserve /// - *returns* `true` if the pubkey could be reserved diff --git a/test-integration/Cargo.lock b/test-integration/Cargo.lock index bed5de2d..4584e9c0 100644 --- a/test-integration/Cargo.lock +++ b/test-integration/Cargo.lock @@ -6260,7 +6260,7 @@ dependencies = [ [[package]] name = "solana-account" version = "2.2.1" -source = "git+https://github.com/magicblock-labs/solana-account.git?rev=7bdfefc#7bdfefc2fc6a6327f5c93575cf3d4a13cf6ff51a" +source = "git+https://github.com/magicblock-labs/solana-account.git?rev=176540a#176540ae8445a3161b2e8d5ab97a4d48bab35679" dependencies = [ "bincode", "qualifier_attr", diff --git a/test-integration/Makefile b/test-integration/Makefile index e1148af8..d9626413 100644 --- a/test-integration/Makefile +++ b/test-integration/Makefile @@ -21,7 +21,7 @@ COMMITTOR_PROGRAM_SO := $(ROOT_DEPLOY_DIR)/magicblock_committor_program.so PROGRAMS_SO := $(FLEXI_COUNTER_SO) $(SCHEDULECOMMIT_SO) $(SCHEDULECOMMIT_SECURITY_SO) $(COMMITTOR_PROGRAM_SO) -list-tasks: +list: @cat Makefile | grep "^[a-z].*:" | sed 's/:.*//g' list-programs: @echo $(PROGRAMS_SO) @@ -36,6 +36,38 @@ test-force-mb: $(PROGRAMS_SO) test-ledger-restore FORCE_MAGIC_BLOCK_VALIDATOR=1 \ cargo run --package test-runner --bin run-tests +test-schedulecommit: + RUN_TESTS=schedulecommit \ + $(MAKE) test + +test-issues-frequent-commits: + RUN_TESTS=issues_frequent_commmits \ + $(MAKE) test + +test-cloning: + RUN_TESTS=cloning \ + $(MAKE) test + +test-restore-ledger: + RUN_TESTS=restore_ledger \ + $(MAKE) test + +test-magicblock-api: + RUN_TESTS=magicblock_api \ + $(MAKE) test + +test-table-mania: + RUN_TESTS=table_mania \ + $(MAKE) test + +test-committor: + RUN_TESTS=committor \ + $(MAKE) test + +test-pubsub: + RUN_TESTS=pubsub \ + $(MAKE) test + $(FLEXI_COUNTER_SO): $(FLEXI_COUNTER_SRC) cargo build-sbf --manifest-path $(FLEXI_COUNTER_DIR)/Cargo.toml $(SCHEDULECOMMIT_SO): $(SCHEDULECOMMIT_SRC) @@ -53,4 +85,4 @@ deploy-flexi-counter: $(FLEXI_COUNTER_SO) $(DIR)/target/deploy/program_flexi_counter.so -.PHONY: test test-force-mb deploy-flexi-counter +.PHONY: test test-force-mb test-schedulecommit test-issues-frequent-commits test-cloning test-restore-ledger test-magicblock-api test-table-mania test-committor test-pubsub deploy-flexi-counter list diff --git a/test-integration/test-ledger-restore/tests/01_single_airdrop.rs b/test-integration/test-ledger-restore/tests/01_single_airdrop.rs index 87982cbb..34d5063f 100644 --- a/test-integration/test-ledger-restore/tests/01_single_airdrop.rs +++ b/test-integration/test-ledger-restore/tests/01_single_airdrop.rs @@ -1,7 +1,7 @@ use cleanass::{assert, assert_eq}; use std::{path::Path, process::Child}; -use integration_test_tools::{expect, tmpdir::resolve_tmp_dir}; +use integration_test_tools::{expect, tmpdir::resolve_tmp_dir, unwrap}; use solana_sdk::{ commitment_config::CommitmentConfig, pubkey::Pubkey, signature::Signature, }; @@ -57,14 +57,24 @@ fn read_ledger( assert_eq!(acc.lamports, 1_111_111, cleanup(&mut validator)); if let Some(sig) = airdrop_sig1 { - let status = expect!(ctx.try_ephem_client(), validator) + let status = match expect!(ctx.try_ephem_client(), validator) .get_signature_status_with_commitment_and_history( sig, CommitmentConfig::confirmed(), true, - ) - .unwrap() - .unwrap(); + ) { + Ok(status) => { + unwrap!( + status, + format!("Should have received signature status for {sig}"), + validator + ) + } + Err(err) => { + cleanup(&mut validator); + panic!("Error fetching signature status: {:?}", err); + } + }; assert!(status.is_ok(), cleanup(&mut validator)); } diff --git a/test-integration/test-runner/bin/run_tests.rs b/test-integration/test-runner/bin/run_tests.rs index b3d33881..c40e880c 100644 --- a/test-integration/test-runner/bin/run_tests.rs +++ b/test-integration/test-runner/bin/run_tests.rs @@ -57,15 +57,18 @@ pub fn main() { }; // Assert that all tests passed - assert_cargo_tests_passed(security_output); - assert_cargo_tests_passed(scenarios_output); - assert_cargo_tests_passed(cloning_output); - assert_cargo_tests_passed(issues_frequent_commits_output); - assert_cargo_tests_passed(restore_ledger_output); - assert_cargo_tests_passed(magicblock_api_output); - assert_cargo_tests_passed(table_mania_output); - assert_cargo_tests_passed(committor_output); - assert_cargo_tests_passed(magicblock_pubsub_output); + assert_cargo_tests_passed(security_output, "security"); + assert_cargo_tests_passed(scenarios_output, "scenarios"); + assert_cargo_tests_passed(cloning_output, "cloning"); + assert_cargo_tests_passed( + issues_frequent_commits_output, + "issues_frequent_commits", + ); + assert_cargo_tests_passed(restore_ledger_output, "restore_ledger"); + assert_cargo_tests_passed(magicblock_api_output, "magicblock_api"); + assert_cargo_tests_passed(table_mania_output, "table_mania"); + assert_cargo_tests_passed(committor_output, "committor"); + assert_cargo_tests_passed(magicblock_pubsub_output, "magicblock_pubsub"); } fn should_run_test(test_name: &str) -> bool { @@ -438,19 +441,19 @@ fn run_magicblock_pubsub_tests( // ----------------- // Configs/Checks // ----------------- -fn assert_cargo_tests_passed(output: process::Output) { +fn assert_cargo_tests_passed(output: process::Output, test_name: &str) { if !output.status.success() { - eprintln!("cargo test"); + eprintln!("cargo test '{}'", test_name); eprintln!("status: {}", output.status); eprintln!("stdout: {}", String::from_utf8_lossy(&output.stdout)); eprintln!("stderr: {}", String::from_utf8_lossy(&output.stderr)); } else if std::env::var("DUMP").is_ok() { - eprintln!("cargo test success"); + eprintln!("cargo test success '{}'", test_name); eprintln!("stdout: {}", String::from_utf8_lossy(&output.stdout)); eprintln!("stderr: {}", String::from_utf8_lossy(&output.stderr)); } // If a test in the suite fails the status shows that - assert!(output.status.success(), "cargo test failed"); + assert!(output.status.success(), "cargo test failed '{}'", test_name); } #[derive(Default)] diff --git a/test-integration/test-table-mania/tests/ix_ensure_pubkey_table.rs b/test-integration/test-table-mania/tests/ix_ensure_pubkey_table.rs new file mode 100644 index 00000000..770bb36c --- /dev/null +++ b/test-integration/test-table-mania/tests/ix_ensure_pubkey_table.rs @@ -0,0 +1,236 @@ +use std::collections::HashSet; + +use log::*; +use solana_pubkey::Pubkey; +use solana_sdk::signature::Keypair; +use test_tools_core::init_logger; +mod utils; + +#[tokio::test] +async fn test_ensure_pubkeys_table_existing_pubkey() { + init_logger!(); + let authority = Keypair::new(); + let table_mania = utils::setup_table_mania(&authority).await; + + // Create a pubkey and reserve it first + let pubkey = Pubkey::new_unique(); + let mut pubkeys_set = HashSet::new(); + pubkeys_set.insert(pubkey); + + // Reserve the pubkey first + table_mania + .reserve_pubkeys(&authority, &pubkeys_set) + .await + .unwrap(); + + // Get refcount before ensure_pubkeys_table + let refcount_before = table_mania.get_pubkey_refcount(&pubkey).await; + debug!("Refcount before ensure: {:?}", refcount_before); + + // Now ensure the pubkey exists - should find it without creating a new table + let tables_count_before = table_mania.active_tables_count().await; + debug!("Active tables before ensure: {}", tables_count_before); + + table_mania + .ensure_pubkeys_table(&authority, &pubkeys_set) + .await + .unwrap(); + + // Get refcount after ensure_pubkeys_table + let refcount_after = table_mania.get_pubkey_refcount(&pubkey).await; + debug!("Refcount after ensure: {:?}", refcount_after); + + // Verify refcount didn't increase + assert_eq!( + refcount_before, refcount_after, + "ensure_pubkeys_table should not increase refcount" + ); + + let tables_count_after = table_mania.active_tables_count().await; + debug!("Active tables after ensure: {}", tables_count_after); + + // Should not have created a new table + assert_eq!(tables_count_before, tables_count_after); + + // Verify the pubkey is still in a table + let active_pubkeys = table_mania.active_table_pubkeys().await; + assert!(active_pubkeys.contains(&pubkey)); +} + +#[tokio::test] +async fn test_ensure_pubkeys_table_new_pubkey() { + init_logger!(); + let authority = Keypair::new(); + let table_mania = utils::setup_table_mania(&authority).await; + + let pubkey = Pubkey::new_unique(); + let mut pubkeys_set = HashSet::new(); + pubkeys_set.insert(pubkey); + + let tables_count_before = table_mania.active_tables_count().await; + debug!("Active tables before ensure: {}", tables_count_before); + + // Ensure the pubkey exists - should create a new table + table_mania + .ensure_pubkeys_table(&authority, &pubkeys_set) + .await + .unwrap(); + + // Get refcount after ensure_pubkeys_table + let refcount_after = table_mania.get_pubkey_refcount(&pubkey).await; + debug!("Refcount after ensure: {:?}", refcount_after); + + // Verify refcount is set to 1 (from the ensure_pubkeys_table call) + assert_eq!( + refcount_after, + Some(1), + "ensure_pubkeys_table should set refcount to 1 for new pubkeys" + ); + + let tables_count_after = table_mania.active_tables_count().await; + debug!("Active tables after ensure: {}", tables_count_after); + + // Should have created a new table + assert_eq!(tables_count_before + 1, tables_count_after); + + // Verify the pubkey is now in a table + let active_pubkeys = table_mania.active_table_pubkeys().await; + assert!(active_pubkeys.contains(&pubkey)); +} + +#[tokio::test] +async fn test_ensure_pubkeys_table_of_reserved_pubkey_doesnt_affect_ref_count() +{ + init_logger!(); + let authority = Keypair::new(); + let table_mania = utils::setup_table_mania(&authority).await; + + let pubkey = Pubkey::new_unique(); + let mut pubkeys_set = HashSet::new(); + pubkeys_set.insert(pubkey); + + // 1. Reserve the pubkey first + table_mania + .reserve_pubkeys(&authority, &pubkeys_set) + .await + .unwrap(); + + // assert that table refcount is 1 + let refcount_after_reserve = table_mania.get_pubkey_refcount(&pubkey).await; + debug!("Refcount after reserve: {:?}", refcount_after_reserve); + assert_eq!( + refcount_after_reserve, + Some(1), + "reserve_pubkeys should set refcount to 1" + ); + + // 2. Ensure pubkey + table_mania + .ensure_pubkeys_table(&authority, &pubkeys_set) + .await + .unwrap(); + + // assert that table refcount is still 1 + let refcount_after_ensure = table_mania.get_pubkey_refcount(&pubkey).await; + debug!("Refcount after ensure: {:?}", refcount_after_ensure); + assert_eq!( + refcount_after_ensure, + Some(1), + "ensure_pubkeys_table should not change refcount for reserved pubkey" + ); + + // Verify the pubkey is still in a table + let active_pubkeys = table_mania.active_table_pubkeys().await; + assert!(active_pubkeys.contains(&pubkey)); +} + +#[tokio::test] +async fn test_ensure_pubkeys_multiple_some_reserved_some_not_all_have_final_refcount_one( +) { + init_logger!(); + let authority = Keypair::new(); + let table_mania = utils::setup_table_mania(&authority).await; + + // Create multiple pubkeys + let reserved_pubkey1 = Pubkey::new_unique(); + let reserved_pubkey2 = Pubkey::new_unique(); + let new_pubkey1 = Pubkey::new_unique(); + let new_pubkey2 = Pubkey::new_unique(); + + // Reserve some pubkeys first + let mut reserved_pubkeys_set = HashSet::new(); + reserved_pubkeys_set.insert(reserved_pubkey1); + reserved_pubkeys_set.insert(reserved_pubkey2); + + table_mania + .reserve_pubkeys(&authority, &reserved_pubkeys_set) + .await + .unwrap(); + + // Verify reserved pubkeys have refcount 1 + let reserved_refcount1 = + table_mania.get_pubkey_refcount(&reserved_pubkey1).await; + let reserved_refcount2 = + table_mania.get_pubkey_refcount(&reserved_pubkey2).await; + assert_eq!(reserved_refcount1, Some(1)); + assert_eq!(reserved_refcount2, Some(1)); + + // Verify new pubkeys have no refcount yet + let new_refcount1 = table_mania.get_pubkey_refcount(&new_pubkey1).await; + let new_refcount2 = table_mania.get_pubkey_refcount(&new_pubkey2).await; + assert_eq!(new_refcount1, None); + assert_eq!(new_refcount2, None); + + // Now ensure all pubkeys (both reserved and new) in a single call + let mut all_pubkeys_set = HashSet::new(); + all_pubkeys_set.insert(reserved_pubkey1); + all_pubkeys_set.insert(reserved_pubkey2); + all_pubkeys_set.insert(new_pubkey1); + all_pubkeys_set.insert(new_pubkey2); + + table_mania + .ensure_pubkeys_table(&authority, &all_pubkeys_set) + .await + .unwrap(); + + // Verify all pubkeys have refcount 1 after ensure + let final_refcount1 = + table_mania.get_pubkey_refcount(&reserved_pubkey1).await; + let final_refcount2 = + table_mania.get_pubkey_refcount(&reserved_pubkey2).await; + let final_refcount3 = table_mania.get_pubkey_refcount(&new_pubkey1).await; + let final_refcount4 = table_mania.get_pubkey_refcount(&new_pubkey2).await; + + debug!("Final refcount reserved_pubkey1: {:?}", final_refcount1); + debug!("Final refcount reserved_pubkey2: {:?}", final_refcount2); + debug!("Final refcount new_pubkey1: {:?}", final_refcount3); + debug!("Final refcount new_pubkey2: {:?}", final_refcount4); + + assert_eq!( + final_refcount1, + Some(1), + "Reserved pubkey should maintain refcount 1" + ); + assert_eq!( + final_refcount2, + Some(1), + "Reserved pubkey should maintain refcount 1" + ); + assert_eq!( + final_refcount3, + Some(1), + "New pubkey should have refcount 1" + ); + assert_eq!( + final_refcount4, + Some(1), + "New pubkey should have refcount 1" + ); + + // Verify all pubkeys are in active tables + let active_pubkeys = table_mania.active_table_pubkeys().await; + assert!(active_pubkeys.contains(&reserved_pubkey1)); + assert!(active_pubkeys.contains(&reserved_pubkey2)); + assert!(active_pubkeys.contains(&new_pubkey1)); + assert!(active_pubkeys.contains(&new_pubkey2)); +}