diff --git a/CHANGELOG.md b/CHANGELOG.md index 1dd3d1db5f..54c23b6bf7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,7 +6,10 @@ Sets the `ic_env` cookie for all HTML files only if the canister environment changed in the `commit_batch` method. -- Module hash: c156183e6a6f5c3c71ecde5f38a7280b770ff172b4827127ef03b89b764065ba +Use canister self-calls to avoid hitting instruction limits during `commit_batch`, `compute_evidence`, and `compute_state_hash`. + +- Module hash: 63d122d0149a29f4e48603efdd7d2bce656a6a83bac1e3207897c68e8e225bb6 +- https://github.com/dfinity/sdk/pull/4450 - https://github.com/dfinity/sdk/pull/4446 # 0.30.2 diff --git a/src/canisters/frontend/ic-asset/src/batch_upload/plumbing.rs b/src/canisters/frontend/ic-asset/src/batch_upload/plumbing.rs index b2a57caa38..932a3d76b3 100644 --- a/src/canisters/frontend/ic-asset/src/batch_upload/plumbing.rs +++ b/src/canisters/frontend/ic-asset/src/batch_upload/plumbing.rs @@ -159,15 +159,6 @@ impl<'agent> ChunkUploader<'agent> { self.chunks.load(Ordering::SeqCst) } - /// Get total size of chunks by their canister chunk IDs - pub(crate) async fn get_canister_chunk_total_size(&self, canister_chunk_ids: &[Nat]) -> usize { - let sizes = self.canister_chunk_sizes.lock().await; - canister_chunk_ids - .iter() - .filter_map(|id| sizes.get(id)) - .sum() - } - /// Call only after `finalize_upload` has completed. pub(crate) async fn uploader_ids_to_canister_chunk_ids( &self, diff --git a/src/canisters/frontend/ic-asset/src/sync.rs b/src/canisters/frontend/ic-asset/src/sync.rs index c331695ded..521719e4a2 100644 --- a/src/canisters/frontend/ic-asset/src/sync.rs +++ b/src/canisters/frontend/ic-asset/src/sync.rs @@ -45,15 +45,15 @@ use walkdir::WalkDir; const KNOWN_DIRECTORIES: [&str; 1] = [".well-known"]; /// Sets the contents of the asset canister to the contents of a directory, including deleting old assets. -pub async fn upload_content_and_assemble_sync_operations<'a>( - canister: &Canister<'a>, +pub async fn upload_content_and_assemble_sync_operations( + canister: &Canister<'_>, canister_api_version: u16, dirs: &[&Path], no_delete: bool, mode: batch_upload::plumbing::Mode, logger: &Logger, progress: Option<&dyn AssetSyncProgressRenderer>, -) -> Result<(CommitBatchArguments, ChunkUploader<'a>), UploadContentError> { +) -> Result { if let Some(progress) = progress { progress.set_state(AssetSyncState::GatherAssetDescriptors); } @@ -144,7 +144,7 @@ pub async fn upload_content_and_assemble_sync_operations<'a>( // -vv trace!(logger, "Value of CommitBatch: {:?}", commit_batch_args); - Ok((commit_batch_args, chunk_uploader)) + Ok(commit_batch_args) } /// Sets the contents of the asset canister to the contents of a directory, including deleting old assets. @@ -156,7 +156,7 @@ pub async fn sync( progress: Option<&dyn AssetSyncProgressRenderer>, ) -> Result<(), SyncError> { let canister_api_version = api_version(canister).await; - let (commit_batch_args, chunk_uploader) = upload_content_and_assemble_sync_operations( + let commit_batch_args = upload_content_and_assemble_sync_operations( canister, canister_api_version, dirs, @@ -180,7 +180,7 @@ pub async fn sync( warn!(logger, "The asset canister is running an old version of the API. It will not be able to set assets properties."); commit_batch(canister, commit_batch_args_v0).await } - BATCH_UPLOAD_API_VERSION.. => commit_in_stages(canister, commit_batch_args, &chunk_uploader, logger, progress).await, + BATCH_UPLOAD_API_VERSION.. => commit_in_stages(canister, commit_batch_args, logger, progress).await, }.map_err(CommitBatchFailed)?; if let Some(progress) = progress { progress.set_state(AssetSyncState::Done); @@ -193,48 +193,36 @@ pub async fn sync( /// Batches are created based on three conditions (any of which triggers a new batch): /// 1. 500 operations reached - generally respected limit to avoid too much cert tree work /// 2. 1.5MB of header map data reached - headers are the largest part of ingress message size -/// 3. 100MB of total chunk size reached - the full asset content gets hashed in the commit message -async fn create_commit_batches<'a>( - operations: Vec, - chunk_uploader: &ChunkUploader<'a>, -) -> Vec> { +fn create_commit_batches(operations: Vec) -> Vec> { const MAX_OPERATIONS_PER_BATCH: usize = 500; // empirically this works good enough const MAX_HEADER_MAP_SIZE: usize = 1_500_000; // 1.5 MB leaves plenty of room for other data that we do not calculate precisely - const MAX_ASSET_CONTENT_SIZE: usize = 100_000_000; // 100 MB is ~20% of how much data we can hash in a single message: 40b instructions per update call, measured best case of 80 instructions per byte hashed -> ~500MB limit let mut batches = Vec::new(); let mut current_batch = Vec::new(); let mut operation_count = 0; let mut header_map_size = 0; - let mut content_size = 0; for operation in operations { let operation_header_size = calculate_header_size(&operation); - let operation_chunk_size = calculate_content_size(&operation, chunk_uploader).await; // Check if adding this operation would exceed any limits let would_exceed_operation_limit = operation_count >= MAX_OPERATIONS_PER_BATCH; let would_exceed_header_limit = header_map_size + operation_header_size >= MAX_HEADER_MAP_SIZE; - let would_exceed_chunk_limit = - content_size + operation_chunk_size >= MAX_ASSET_CONTENT_SIZE; - if (would_exceed_operation_limit || would_exceed_header_limit || would_exceed_chunk_limit) - && !current_batch.is_empty() + if (would_exceed_operation_limit || would_exceed_header_limit) && !current_batch.is_empty() { // Start a new batch batches.push(current_batch); current_batch = Vec::new(); operation_count = 0; header_map_size = 0; - content_size = 0; } // Add operation to current batch current_batch.push(operation); operation_count += 1; header_map_size += operation_header_size; - content_size += operation_chunk_size; } // Add the last batch if it has any operations @@ -262,30 +250,9 @@ fn calculate_header_size(operation: &BatchOperationKind) -> usize { } } -/// Calculate the size in bytes of chunk data for an operation. -/// This includes both: -/// - Chunks referenced by `chunk_ids` (looked up from ChunkUploader) -/// - The `last_chunk` field which is included directly in the commit message -async fn calculate_content_size<'a>( - operation: &BatchOperationKind, - chunk_uploader: &ChunkUploader<'a>, -) -> usize { - match operation { - BatchOperationKind::SetAssetContent(args) => { - let chunk_ids_size = chunk_uploader - .get_canister_chunk_total_size(&args.chunk_ids) - .await; - let last_chunk_size = args.last_chunk.as_ref().map_or(0, |chunk| chunk.len()); - chunk_ids_size + last_chunk_size - } - _ => 0, - } -} - -async fn commit_in_stages<'a>( +async fn commit_in_stages( canister: &Canister<'_>, commit_batch_args: CommitBatchArguments, - chunk_uploader: &ChunkUploader<'a>, logger: &Logger, progress: Option<&dyn AssetSyncProgressRenderer>, ) -> Result<(), AgentError> { @@ -293,7 +260,7 @@ async fn commit_in_stages<'a>( progress.set_total_batch_operations(commit_batch_args.operations.len()); } - let batches = create_commit_batches(commit_batch_args.operations, chunk_uploader).await; + let batches = create_commit_batches(commit_batch_args.operations); for operations in batches { let op_amount = operations.len(); @@ -330,7 +297,7 @@ pub async fn prepare_sync_for_proposal( progress: Option<&dyn AssetSyncProgressRenderer>, ) -> Result<(Nat, ByteBuf), PrepareSyncForProposalError> { let canister_api_version = api_version(canister).await; - let (arg, _chunk_uploader) = upload_content_and_assemble_sync_operations( + let arg = upload_content_and_assemble_sync_operations( canister, canister_api_version, dirs, diff --git a/src/canisters/frontend/ic-certified-assets/src/lib.rs b/src/canisters/frontend/ic-certified-assets/src/lib.rs index 07857305ad..57d073265b 100644 --- a/src/canisters/frontend/ic-certified-assets/src/lib.rs +++ b/src/canisters/frontend/ic-certified-assets/src/lib.rs @@ -16,7 +16,7 @@ use crate::{ CallbackFunc, HttpRequest, HttpResponse, StreamingCallbackHttpResponse, StreamingCallbackToken, }, - state_machine::{AssetDetails, CertifiedTree, EncodedAsset, State}, + state_machine::{AssetDetails, CertifiedTree, ComputationStatus, EncodedAsset, State}, system_context::SystemContext, types::*, }; @@ -203,15 +203,18 @@ pub fn clear() { }); } -pub fn commit_batch(arg: CommitBatchArguments) { +pub async fn commit_batch(arg: CommitBatchArguments) { let system_context = SystemContext::new(); + let arg_ref = &arg; - with_state_mut(|s| { - if let Err(msg) = s.commit_batch(arg, &system_context) { - trap(&msg); - } - certified_data_set(s.root_hash()); - }); + loop_with_message_extension_until_completion(|progress| { + with_state_mut(|s| s.commit_batch(arg_ref, progress, &system_context)) + }) + .await + .map_err(|msg| trap(&msg)) + .ok(); + + with_state_mut(|s| certified_data_set(s.root_hash())); } pub fn propose_commit_batch(arg: CommitBatchArguments) { @@ -223,32 +226,41 @@ pub fn propose_commit_batch(arg: CommitBatchArguments) { }); } -pub fn compute_evidence(arg: ComputeEvidenceArguments) -> Option { - with_state_mut(|s| match s.compute_evidence(arg) { - Err(msg) => trap(&msg), - Ok(maybe_evidence) => maybe_evidence, +pub async fn compute_evidence( + arg: ComputeEvidenceArguments, +) -> Option { + let arg_ref = &arg; + loop_with_message_extension_until_completion(|_progress| { + with_state_mut(|s| s.compute_evidence(arg_ref)) }) + .await + .ok() } -pub fn compute_state_hash() -> Option { - let system_context = SystemContext::new(); - - with_state_mut(|s| s.compute_state_hash(&system_context)) +pub async fn compute_state_hash() -> Option { + loop_with_message_extension_until_completion(|_progress| { + with_state_mut(|s| s.compute_state_hash()) + }) + .await + .ok() } pub fn get_state_info() -> StateInfo { with_state(|s| s.get_state_info()) } -pub fn commit_proposed_batch(arg: CommitProposedBatchArguments) { +pub async fn commit_proposed_batch(arg: CommitProposedBatchArguments) { let system_context = SystemContext::new(); + let arg_ref = &arg; - with_state_mut(|s| { - if let Err(msg) = s.commit_proposed_batch(arg, &system_context) { - trap(&msg); - } - certified_data_set(s.root_hash()); - }); + loop_with_message_extension_until_completion(|progress| { + with_state_mut(|s| s.commit_proposed_batch(arg_ref, progress, &system_context)) + }) + .await + .map_err(|msg| trap(&msg)) + .ok(); + + with_state_mut(|s| certified_data_set(s.root_hash())); } pub fn validate_commit_proposed_batch(arg: CommitProposedBatchArguments) -> Result { @@ -428,6 +440,36 @@ where STATE.with(|s| f(&s.borrow())) } +/// Loops calling a state machine function until completion, periodically async-calling +/// self to reset the instruction counter when needed. +async fn loop_with_message_extension_until_completion(mut compute_fn: F) -> Result +where + F: FnMut(P) -> ComputationStatus, + P: Default, +{ + const INSTRUCTION_THRESHOLD: u64 = 35_000_000_000; // At the time of writing, 40b instructions are the limit for single message + let mut progress = P::default(); + + loop { + match compute_fn(progress) { + ComputationStatus::Done(done) => return Ok(done), + ComputationStatus::InProgress(p) => { + progress = p; + if ic_cdk::api::performance_counter(0) > INSTRUCTION_THRESHOLD { + // Reset instruction counter 0 by doing a bogus self-call + // (self-calls are most likely to be short-circuited by the scheduler so we don't incur more wait time than necessary) + let _ = ic_cdk::call::Call::bounded_wait( + ic_cdk::api::canister_self(), + "__this-FunctionDoes_not-Exist", + ) + .await; + } + } + ComputationStatus::Error(e) => return Err(e), + } + } +} + /// Exports the whole asset canister interface, but does not handle init/pre_/post_upgrade for initial configuration or state persistence across upgrades. /// /// For a working example how to use this macro, see [here](https://github.com/dfinity/sdk/blob/master/src/canisters/frontend/ic-frontend-canister/src/lib.rs). @@ -636,8 +678,8 @@ macro_rules! export_canister_methods { #[$crate::ic_certified_assets_update(guard = "__ic_certified_assets_can_commit")] #[$crate::ic_certified_assets_candid_method(update)] - fn commit_batch(arg: types::CommitBatchArguments) { - $crate::commit_batch(arg) + async fn commit_batch(arg: types::CommitBatchArguments) { + $crate::commit_batch(arg).await } #[$crate::ic_certified_assets_update(guard = "__ic_certified_assets_can_prepare")] @@ -648,16 +690,16 @@ macro_rules! export_canister_methods { #[$crate::ic_certified_assets_update(guard = "__ic_certified_assets_can_prepare")] #[$crate::ic_certified_assets_candid_method(update)] - fn compute_evidence( + async fn compute_evidence( arg: types::ComputeEvidenceArguments, ) -> Option { - $crate::compute_evidence(arg) + $crate::compute_evidence(arg).await } #[$crate::ic_certified_assets_update] #[$crate::ic_certified_assets_candid_method(update)] - fn compute_state_hash() -> Option { - $crate::compute_state_hash() + async fn compute_state_hash() -> Option { + $crate::compute_state_hash().await } #[$crate::ic_certified_assets_query] @@ -668,8 +710,8 @@ macro_rules! export_canister_methods { #[$crate::ic_certified_assets_update(guard = "__ic_certified_assets_can_commit")] #[$crate::ic_certified_assets_candid_method(update)] - fn commit_proposed_batch(arg: types::CommitProposedBatchArguments) { - $crate::commit_proposed_batch(arg) + async fn commit_proposed_batch(arg: types::CommitProposedBatchArguments) { + $crate::commit_proposed_batch(arg).await } #[$crate::ic_certified_assets_update] diff --git a/src/canisters/frontend/ic-certified-assets/src/state_machine/mod.rs b/src/canisters/frontend/ic-certified-assets/src/state_machine/mod.rs index 8358cda7f3..cf617aafb8 100644 --- a/src/canisters/frontend/ic-certified-assets/src/state_machine/mod.rs +++ b/src/canisters/frontend/ic-certified-assets/src/state_machine/mod.rs @@ -214,6 +214,80 @@ pub struct Batch { pub chunk_content_total_size: usize, } +/// Status of an incremental computation +#[derive(Clone, Debug)] +pub enum ComputationStatus { + /// Computation completed successfully + Done(D), + /// Computation in progress, with progress state to resume from + InProgress(P), + /// Computation failed with an error + Error(E), +} + +#[allow(clippy::large_enum_variant)] +#[derive(Debug, Default)] +pub enum CommitBatchProgress { + /// Initial state when `commit_batch` is first called. + /// + /// This phase: + /// - Reloads the canister environment + /// - Computes and validates batch limits + /// - Transitions to `ProcessingOperations` with the first operation + #[default] + Starting, + /// Processing batch operations one at a time. + /// + /// When a `SetAssetContent` operation is encountered, this transitions to + /// `HashingChunks` to hash the asset content incrementally. + /// When all operations are processed, this transitions to `UpdatingCookies` to update the HTML assets. + ProcessingOperations { + batch_id: BatchId, + operation_index: usize, + needs_cookie_update: bool, + }, + /// Incrementally hashing asset content chunks, one chunk per call. + /// + /// This phase is entered when processing a `SetAssetContent` operation to avoid + /// instruction limits when hashing large assets. The hasher processes one chunk + /// per call, allowing the operation to be resumed if interrupted. + /// + /// After all chunks are hashed, the hash is finalized, the asset encoding is created, + /// and processing continues with the next operation in `ProcessingOperations`. + HashingChunks { + batch_id: BatchId, + operation_index: usize, + needs_cookie_update: bool, + set_asset_content_arg: SetAssetContentArguments, + content_chunks: Vec, + chunk_index: usize, + dependent_keys: Vec, + hasher: sha2::Sha256, + }, + /// Updating cookies for HTML assets that depend on the canister environment. + /// + /// This phase is entered after all operations complete if the canister environment + /// changed during batch processing. HTML assets need their cookies updated to reflect + /// the new environment. + /// + /// One asset is updated per call until all HTML assets are processed. + UpdatingCookies { + html_keys: Vec, + operation_index: usize, + }, +} + +#[allow(clippy::large_enum_variant)] +#[derive(Debug, Default)] +pub enum CommitProposedBatchProgress { + #[default] + Starting, + InProgress { + commit_batch_args: CommitBatchArguments, + commit_batch_progress: CommitBatchProgress, + }, +} + #[derive(Clone, Debug, Default)] pub struct Configuration { pub max_batches: Option, @@ -397,10 +471,9 @@ impl State { } let dependent_keys = self.dependent_keys(&arg.key); - let asset = self - .assets - .get_mut(&arg.key) - .ok_or_else(|| "asset not found".to_string())?; + if !self.assets.contains_key(&arg.key) { + return Err("asset not found".to_string()); + } let now = Int::from(system_context.current_timestamp_ns); @@ -409,7 +482,7 @@ impl State { let chunk = self.chunks.remove(chunk_id).expect("chunk not found"); content_chunks.push(chunk.content); } - if let Some(encoding_content) = arg.last_chunk { + if let Some(encoding_content) = arg.last_chunk.clone() { content_chunks.push(encoding_content.into()); } @@ -419,6 +492,17 @@ impl State { } let sha256: [u8; 32] = hasher.finalize().into(); + self.complete_set_asset_content(arg, content_chunks, sha256, now, dependent_keys) + } + + fn complete_set_asset_content( + &mut self, + arg: SetAssetContentArguments, + content_chunks: Vec, + sha256: [u8; 32], + now: Int, + dependent_keys: Vec, + ) -> Result<(), String> { if let Some(provided_hash) = arg.sha256 { let provided_hash: [u8; 32] = provided_hash .into_vec() @@ -429,6 +513,11 @@ impl State { } } + let asset = self + .assets + .get_mut(&arg.key) + .ok_or_else(|| "asset not found".to_string())?; + let total_length: usize = content_chunks.iter().map(|c| c.len()).sum(); let enc = AssetEncoding { modified: now, @@ -728,41 +817,216 @@ impl State { pub fn commit_batch( &mut self, - arg: CommitBatchArguments, + arg: &CommitBatchArguments, + progress: CommitBatchProgress, system_context: &SystemContext, - ) -> Result<(), String> { - // Reload the canister env to get the latest values - let old_encoded_canister_env = self.encoded_canister_env.clone(); - self.encoded_canister_env = system_context.get_canister_env().to_cookie_value(); - - let (chunks_added, bytes_added) = self.compute_last_chunk_data(&arg); - self.check_batch_limits(chunks_added, bytes_added)?; - - let batch_id = arg.batch_id; - for op in arg.operations { - match op { - BatchOperation::CreateAsset(arg) => self.create_asset(arg)?, - BatchOperation::SetAssetContent(arg) => { - self.set_asset_content(arg, system_context)? + ) -> ComputationStatus<(), CommitBatchProgress, String> { + match progress { + CommitBatchProgress::Starting => { + // Reload the canister env to get the latest values + let old_encoded_canister_env = self.encoded_canister_env.clone(); + self.encoded_canister_env = system_context.get_canister_env().to_cookie_value(); + + let (chunks_added, bytes_added) = self.compute_last_chunk_data(arg); + if let Err(e) = self.check_batch_limits(chunks_added, bytes_added) { + return ComputationStatus::Error(e); } - BatchOperation::UnsetAssetContent(arg) => self.unset_asset_content(arg)?, - BatchOperation::DeleteAsset(arg) => self.delete_asset(arg), - BatchOperation::Clear(_) => self.clear(), - BatchOperation::SetAssetProperties(arg) => self.set_asset_properties(arg)?, + + let needs_cookie_update = old_encoded_canister_env != self.encoded_canister_env; + let initial_progress = CommitBatchProgress::ProcessingOperations { + batch_id: arg.batch_id.clone(), + operation_index: 0, + needs_cookie_update, + }; + ComputationStatus::InProgress(initial_progress) } - } - self.batches.remove(&batch_id); - self.certify_404_if_required(); - - // Only re-certify all HTML files if the canister environment changed. - // Assets modified in this batch already have the correct cookie via on_asset_change. - // Note: this can cause the canister to incur in the instructions limit with many assets. - if old_encoded_canister_env != self.encoded_canister_env { - self.update_ic_env_cookie_in_html_files(); - } - self.last_state_update_timestamp_ns = system_context.current_timestamp_ns; + CommitBatchProgress::ProcessingOperations { + batch_id, + operation_index, + needs_cookie_update, + } => { + // Process one operation per call + if operation_index >= arg.operations.len() { + // All operations processed + self.batches.remove(&batch_id); + self.certify_404_if_required(); + + // Move to cookie update phase if needed + if needs_cookie_update { + let html_keys: Vec<_> = self + .assets + .keys() + .filter(|key| is_html_key(key)) + .cloned() + .collect(); + + if html_keys.is_empty() { + // No HTML files to update, we're done + self.last_state_update_timestamp_ns = + system_context.current_timestamp_ns; + return ComputationStatus::Done(()); + } else { + let progress = CommitBatchProgress::UpdatingCookies { + html_keys, + operation_index: 0, + }; + return ComputationStatus::InProgress(progress); + } + } else { + self.last_state_update_timestamp_ns = system_context.current_timestamp_ns; + return ComputationStatus::Done(()); + } + } - Ok(()) + let op = &arg.operations[operation_index]; + let result = match op { + BatchOperation::CreateAsset(arg) => self.create_asset(arg.clone()), + BatchOperation::SetAssetContent(arg) => { + if !self.assets.contains_key(&arg.key) { + return ComputationStatus::Error("asset not found".to_string()); + } + if arg.chunk_ids.is_empty() && arg.last_chunk.is_none() { + return ComputationStatus::Error( + "encoding must have at least one chunk or contain last_chunk" + .to_string(), + ); + } + + let dependent_keys = self.dependent_keys(&arg.key); + + // Collect all chunks (removing them from self.chunks) + let mut content_chunks = vec![]; + for chunk_id in arg.chunk_ids.iter() { + let chunk = match self.chunks.remove(chunk_id) { + Some(c) => c, + None => { + return ComputationStatus::Error("chunk not found".to_string()); + } + }; + content_chunks.push(chunk.content); + } + if let Some(encoding_content) = arg.last_chunk.clone() { + content_chunks.push(encoding_content.into()); + } + + // Start hashing phase with an empty hasher + let progress = CommitBatchProgress::HashingChunks { + batch_id, + operation_index, + needs_cookie_update, + set_asset_content_arg: arg.clone(), + content_chunks, + chunk_index: 0, + dependent_keys, + hasher: sha2::Sha256::new(), + }; + return ComputationStatus::InProgress(progress); + } + BatchOperation::UnsetAssetContent(arg) => self.unset_asset_content(arg.clone()), + BatchOperation::DeleteAsset(arg) => { + self.delete_asset(arg.clone()); + Ok(()) + } + BatchOperation::Clear(_) => { + self.clear(); + Ok(()) + } + BatchOperation::SetAssetProperties(arg) => { + self.set_asset_properties(arg.clone()) + } + }; + if let Err(e) = result { + return ComputationStatus::Error(e); + } + + let progress = CommitBatchProgress::ProcessingOperations { + batch_id, + operation_index: operation_index + 1, + needs_cookie_update, + }; + ComputationStatus::InProgress(progress) + } + CommitBatchProgress::HashingChunks { + batch_id, + operation_index, + needs_cookie_update, + set_asset_content_arg, + content_chunks, + chunk_index, + dependent_keys, + mut hasher, + } => { + if chunk_index >= content_chunks.len() { + // All chunks hashed, finalize and complete set_asset_content + let sha256: [u8; 32] = hasher.finalize().into(); + let now = Int::from(system_context.current_timestamp_ns); + + if let Err(e) = self.complete_set_asset_content( + set_asset_content_arg.clone(), + content_chunks, + sha256, + now, + dependent_keys, + ) { + return ComputationStatus::Error(e); + } + + // Continue with next operation + let progress = CommitBatchProgress::ProcessingOperations { + batch_id, + operation_index: operation_index + 1, + needs_cookie_update, + }; + ComputationStatus::InProgress(progress) + } else { + // Hash one chunk per iteration + hasher.update(&content_chunks[chunk_index]); + let progress = CommitBatchProgress::HashingChunks { + batch_id, + operation_index, + needs_cookie_update, + set_asset_content_arg, + content_chunks, + chunk_index: chunk_index + 1, + dependent_keys, + hasher, + }; + ComputationStatus::InProgress(progress) + } + } + CommitBatchProgress::UpdatingCookies { + html_keys, + operation_index, + } => { + // Process one cookie update per call + if operation_index >= html_keys.len() { + // All cookies updated, we're done + self.last_state_update_timestamp_ns = system_context.current_timestamp_ns; + return ComputationStatus::Done(()); + } + + // Update one cookie + let key = &html_keys[operation_index]; + let dependent_keys = self.dependent_keys(key); + if let Some(asset) = self.assets.get_mut(key) { + on_asset_change( + &mut self.asset_hashes, + key, + asset, + dependent_keys, + Some(&self.encoded_canister_env), + ); + } + + // Update index and return progress + ; + let progress = CommitBatchProgress::UpdatingCookies { + html_keys, + operation_index: operation_index + 1, + }; + ComputationStatus::InProgress(progress) + } + } } pub fn propose_commit_batch(&mut self, arg: CommitBatchArguments) -> Result<(), String> { @@ -782,13 +1046,49 @@ impl State { pub fn commit_proposed_batch( &mut self, - arg: CommitProposedBatchArguments, + arg: &CommitProposedBatchArguments, + progress: CommitProposedBatchProgress, system_context: &SystemContext, - ) -> Result<(), String> { - self.validate_commit_proposed_batch_args(&arg)?; - let batch = self.batches.get_mut(&arg.batch_id).unwrap(); - let proposed_batch_arguments = batch.commit_batch_arguments.take().unwrap(); - self.commit_batch(proposed_batch_arguments, system_context) + ) -> ComputationStatus<(), CommitProposedBatchProgress, String> { + match progress { + CommitProposedBatchProgress::Starting => { + if let Err(e) = self.validate_commit_proposed_batch_args(arg) { + return ComputationStatus::Error(e); + } + let batch = self.batches.get_mut(&arg.batch_id).unwrap(); + let commit_batch_args = batch.commit_batch_arguments.take().unwrap(); + + match self.commit_batch( + &commit_batch_args, + CommitBatchProgress::default(), + system_context, + ) { + ComputationStatus::Done(()) => ComputationStatus::Done(()), + ComputationStatus::InProgress(commit_batch_progress) => { + ComputationStatus::InProgress(CommitProposedBatchProgress::InProgress { + commit_batch_args, + commit_batch_progress, + }) + } + ComputationStatus::Error(e) => ComputationStatus::Error(e), + } + } + CommitProposedBatchProgress::InProgress { + commit_batch_args, + commit_batch_progress, + } => { + match self.commit_batch(&commit_batch_args, commit_batch_progress, system_context) { + ComputationStatus::Done(()) => ComputationStatus::Done(()), + ComputationStatus::InProgress(progress) => { + ComputationStatus::InProgress(CommitProposedBatchProgress::InProgress { + commit_batch_args, + commit_batch_progress: progress, + }) + } + ComputationStatus::Error(e) => ComputationStatus::Error(e), + } + } + } } pub fn validate_commit_proposed_batch( @@ -826,41 +1126,23 @@ impl State { Ok(()) } - fn update_ic_env_cookie_in_html_files(&mut self) { - let assets_keys: Vec<_> = self - .assets - .keys() - .filter(|key| is_html_key(key)) - .cloned() - .collect(); - - for key in assets_keys { - let dependent_keys = self.dependent_keys(&key); - if let Some(asset) = self.assets.get_mut(&key) { - on_asset_change( - &mut self.asset_hashes, - &key, - asset, - dependent_keys, - Some(&self.encoded_canister_env), - ); - } - } - } - pub fn compute_evidence( &mut self, - arg: ComputeEvidenceArguments, - ) -> Result, String> { - let batch = self - .batches - .get_mut(&arg.batch_id) - .expect("batch not found"); + arg: &ComputeEvidenceArguments, + ) -> ComputationStatus { + let batch = match self.batches.get_mut(&arg.batch_id) { + Some(b) => b, + None => return ComputationStatus::Error("batch not found".to_string()), + }; - let cba = batch - .commit_batch_arguments - .as_ref() - .expect("batch does not have CommitBatchArguments"); + let cba = match batch.commit_batch_arguments.as_ref() { + Some(cba) => cba, + None => { + return ComputationStatus::Error( + "batch does not have CommitBatchArguments".to_string(), + ); + } + }; let max_iterations = arg .max_iterations @@ -875,19 +1157,22 @@ impl State { } batch.evidence_computation = Some(ec); - if let Some(Computed(evidence)) = &batch.evidence_computation { - Ok(Some(evidence.clone())) - } else { - Ok(None) + match &batch.evidence_computation { + Some(Computed(evidence)) => ComputationStatus::Done(evidence.clone()), + _ => ComputationStatus::InProgress(()), } } - pub fn compute_state_hash(&mut self, system_context: &SystemContext) -> Option { + pub fn compute_state_hash(&mut self) -> ComputationStatus { if self.last_state_hash_timestamp != self.last_state_update_timestamp_ns { self.state_hash_computation = None; self.last_state_hash_timestamp = self.last_state_update_timestamp_ns; } + if let Some(EvidenceComputation::Computed(evidence)) = &self.state_hash_computation { + return ComputationStatus::Done(hex::encode(evidence.as_slice())); + } + let mut ec = self.state_hash_computation.take().unwrap_or_else(|| { let mut sorted_keys: Vec<_> = self.assets.keys().cloned().collect(); sorted_keys.sort(); @@ -899,22 +1184,10 @@ impl State { } }); - // 38 billion instructions - const INSTRUCTION_LIMIT: u64 = 38_000_000_000; - - while system_context.instruction_counter() < INSTRUCTION_LIMIT - && !matches!(ec, EvidenceComputation::Computed(_)) - { - ec = ec.advance_virtual(self); - } - + // Advance one step + ec = ec.advance_virtual(self); self.state_hash_computation = Some(ec); - - if let Some(EvidenceComputation::Computed(evidence)) = &self.state_hash_computation { - Some(hex::encode(evidence.as_slice())) - } else { - None - } + ComputationStatus::InProgress(()) } pub fn get_state_info(&self) -> StateInfo { diff --git a/src/canisters/frontend/ic-certified-assets/src/system_context/mod.rs b/src/canisters/frontend/ic-certified-assets/src/system_context/mod.rs index 6f742540f7..efd86031ff 100644 --- a/src/canisters/frontend/ic-certified-assets/src/system_context/mod.rs +++ b/src/canisters/frontend/ic-certified-assets/src/system_context/mod.rs @@ -6,28 +6,6 @@ use canister_env::CanisterEnv; use ic_cdk::api::time; /// Context that is available only inside canister runtime. -/// -/// # Example -/// -/// ``` -/// use ic_certified_assets::system_context::SystemContext; -/// use ic_certified_assets::with_state_mut; -/// use ic_certified_assets::types::CommitBatchArguments; -/// use ic_cdk::api::{certified_data_set, trap}; -/// use ic_cdk::update; -/// -/// #[update] -/// pub fn commit_batch(arg: CommitBatchArguments) { -/// let system_context = SystemContext::new(); -/// -/// with_state_mut(|s| { -/// if let Err(msg) = s.commit_batch(arg, &system_context) { -/// trap(&msg); -/// } -/// certified_data_set(s.root_hash()); -/// }); -/// } -/// ``` pub struct SystemContext { canister_env: RefCell>, pub current_timestamp_ns: u64, diff --git a/src/canisters/frontend/ic-certified-assets/src/tests.rs b/src/canisters/frontend/ic-certified-assets/src/tests.rs index 3987b1c416..d439e785d0 100644 --- a/src/canisters/frontend/ic-certified-assets/src/tests.rs +++ b/src/canisters/frontend/ic-certified-assets/src/tests.rs @@ -2,7 +2,7 @@ use crate::CreateChunksArg; use crate::asset_certification::types::http::{ CallbackFunc, HttpRequest, HttpResponse, StreamingCallbackToken, StreamingStrategy, }; -use crate::state_machine::{BATCH_EXPIRY_NANOS, StableStateV2, State}; +use crate::state_machine::{BATCH_EXPIRY_NANOS, ComputationStatus, StableStateV2, State}; use crate::system_context::SystemContext; use crate::system_context::canister_env::CanisterEnv; use crate::types::{ @@ -48,6 +48,27 @@ fn mock_system_context() -> SystemContext { ) } +/// Synchronous test driver for incremental computations. +/// Loops calling a state machine function until completion. +/// Unlike the async version, this doesn't check instruction counters or make async calls. +fn run_computation_until_completion(mut compute_fn: F) -> Result +where + F: FnMut(P) -> ComputationStatus, + P: Default, +{ + let mut progress = P::default(); + + loop { + match compute_fn(progress) { + ComputationStatus::Done(done) => return Ok(done), + ComputationStatus::InProgress(p) => { + progress = p; + } + ComputationStatus::Error(e) => return Err(e), + } + } +} + pub fn verify_response( state: &State, request: &HttpRequest, @@ -227,15 +248,17 @@ fn create_assets( &batch_id, ); - state - .commit_batch( - CommitBatchArguments { + run_computation_until_completion(|progress| { + state.commit_batch( + &CommitBatchArguments { batch_id: batch_id.clone(), - operations, + operations: operations.clone(), }, + progress, system_context, ) - .unwrap(); + }) + .unwrap(); batch_id } @@ -261,23 +284,25 @@ fn create_assets_by_proposal( }) .unwrap(); - let evidence = state - .compute_evidence(ComputeEvidenceArguments { + let evidence = run_computation_until_completion(|_progress| { + state.compute_evidence(&ComputeEvidenceArguments { batch_id: batch_id.clone(), max_iterations: Some(100), }) - .unwrap() - .unwrap(); + }) + .unwrap(); - state - .commit_proposed_batch( - CommitProposedBatchArguments { + run_computation_until_completion(|progress| { + state.commit_proposed_batch( + &CommitProposedBatchArguments { batch_id: batch_id.clone(), - evidence, + evidence: evidence.clone(), }, + progress, system_context, ) - .unwrap(); + }) + .unwrap(); batch_id } @@ -844,13 +869,15 @@ fn batches_with_evidence_do_not_expire() { operations: vec![], }; assert_eq!(Ok(()), state.propose_commit_batch(args)); - assert!(matches!( - state.compute_evidence(ComputeEvidenceArguments { - batch_id: batch_1.clone(), - max_iterations: Some(3), - }), - Ok(Some(_)) - )); + assert!( + run_computation_until_completion(|_progress| { + state.compute_evidence(&ComputeEvidenceArguments { + batch_id: batch_1.clone(), + max_iterations: Some(3), + }) + }) + .is_ok() + ); system_context.current_timestamp_ns = system_context.current_timestamp_ns + BATCH_EXPIRY_NANOS + 1; @@ -2559,20 +2586,24 @@ mod evidence_computation { ], }; assert!(state.propose_commit_batch(cba).is_ok()); - assert!(matches!( - state.compute_evidence(ComputeEvidenceArguments { - batch_id: batch_1.clone(), - max_iterations: Some(3), - }), - Ok(None) - )); - assert!(matches!( - state.compute_evidence(ComputeEvidenceArguments { - batch_id: batch_1, - max_iterations: Some(1), - }), - Ok(Some(_)) - )); + assert!( + run_computation_until_completion(|_progress| { + state.compute_evidence(&ComputeEvidenceArguments { + batch_id: batch_1.clone(), + max_iterations: Some(3), + }) + }) + .is_ok() + ); + assert!( + run_computation_until_completion(|_progress| { + state.compute_evidence(&ComputeEvidenceArguments { + batch_id: batch_1.clone(), + max_iterations: Some(1), + }) + }) + .is_ok() + ); } #[test] @@ -2625,20 +2656,24 @@ mod evidence_computation { ], }; assert!(state.propose_commit_batch(cba).is_ok()); - assert!(matches!( - state.compute_evidence(ComputeEvidenceArguments { - batch_id: batch_1.clone(), - max_iterations: Some(4), - }), - Ok(None) - )); - assert!(matches!( - state.compute_evidence(ComputeEvidenceArguments { - batch_id: batch_1, - max_iterations: Some(1), - }), - Ok(Some(_)) - )); + assert!( + run_computation_until_completion(|_progress| { + state.compute_evidence(&ComputeEvidenceArguments { + batch_id: batch_1.clone(), + max_iterations: Some(4), + }) + }) + .is_ok() + ); + assert!( + run_computation_until_completion(|_progress| { + state.compute_evidence(&ComputeEvidenceArguments { + batch_id: batch_1.clone(), + max_iterations: Some(1), + }) + }) + .is_ok() + ); } #[test] @@ -2667,12 +2702,13 @@ mod evidence_computation { max_iterations: Some(1), }; assert!( - state - .compute_evidence(compute_args.clone()) - .unwrap() - .is_none() + run_computation_until_completion(|_progress| { state.compute_evidence(&compute_args) }) + .is_ok() + ); + assert!( + run_computation_until_completion(|_progress| { state.compute_evidence(&compute_args) }) + .is_ok() ); - assert!(state.compute_evidence(compute_args).unwrap().is_some()); } #[test] @@ -2706,22 +2742,22 @@ mod evidence_computation { assert!(state.propose_commit_batch(cba).is_ok()); assert!( - state - .compute_evidence(ComputeEvidenceArguments { + run_computation_until_completion(|_progress| { + state.compute_evidence(&ComputeEvidenceArguments { batch_id: batch_id.clone(), max_iterations: Some(3), }) - .unwrap() - .is_none() + }) + .is_ok() ); assert!( - state - .compute_evidence(ComputeEvidenceArguments { - batch_id, + run_computation_until_completion(|_progress| { + state.compute_evidence(&ComputeEvidenceArguments { + batch_id: batch_id.clone(), max_iterations: Some(1), }) - .unwrap() - .is_some() + }) + .is_ok() ); } @@ -2738,13 +2774,13 @@ mod evidence_computation { assert!(state.propose_commit_batch(cba).is_ok()); assert!( - state - .compute_evidence(ComputeEvidenceArguments { - batch_id, + run_computation_until_completion(|_progress| { + state.compute_evidence(&ComputeEvidenceArguments { + batch_id: batch_id.clone(), max_iterations: Some(1), }) - .unwrap() - .is_some() + }) + .is_ok() ); } @@ -2770,13 +2806,13 @@ mod evidence_computation { }) .is_ok() ); - let evidence_1 = state - .compute_evidence(ComputeEvidenceArguments { + let evidence_1 = run_computation_until_completion(|_progress| { + state.compute_evidence(&ComputeEvidenceArguments { batch_id: batch_1.clone(), max_iterations: Some(3), }) - .unwrap() - .unwrap(); + }) + .unwrap(); delete_batch(&mut state, batch_1); let batch_2 = state.create_batch(&system_context).unwrap(); @@ -2795,13 +2831,13 @@ mod evidence_computation { }) .is_ok() ); - let evidence_2 = state - .compute_evidence(ComputeEvidenceArguments { + let evidence_2 = run_computation_until_completion(|_progress| { + state.compute_evidence(&ComputeEvidenceArguments { batch_id: batch_2.clone(), max_iterations: Some(3), }) - .unwrap() - .unwrap(); + }) + .unwrap(); delete_batch(&mut state, batch_2); assert_eq!(evidence_1, evidence_2); @@ -2827,13 +2863,13 @@ mod evidence_computation { }) .is_ok() ); - let evidence_1 = state - .compute_evidence(ComputeEvidenceArguments { + let evidence_1 = run_computation_until_completion(|_progress| { + state.compute_evidence(&ComputeEvidenceArguments { batch_id: batch_1.clone(), max_iterations: Some(3), }) - .unwrap() - .unwrap(); + }) + .unwrap(); delete_batch(&mut state, batch_1); let batch_2 = state.create_batch(&system_context).unwrap(); @@ -2855,13 +2891,13 @@ mod evidence_computation { }) .is_ok() ); - let evidence_2 = state - .compute_evidence(ComputeEvidenceArguments { - batch_id: batch_2, + let evidence_2 = run_computation_until_completion(|_progress| { + state.compute_evidence(&ComputeEvidenceArguments { + batch_id: batch_2.clone(), max_iterations: Some(3), }) - .unwrap() - .unwrap(); + }) + .unwrap(); assert_eq!(evidence_1, evidence_2); } } @@ -2887,13 +2923,13 @@ mod evidence_computation { }) .is_ok() ); - let evidence_1 = state - .compute_evidence(ComputeEvidenceArguments { + let evidence_1 = run_computation_until_completion(|_progress| { + state.compute_evidence(&ComputeEvidenceArguments { batch_id: batch_1.clone(), max_iterations: Some(3), }) - .unwrap() - .unwrap(); + }) + .unwrap(); delete_batch(&mut state, batch_1); let batch_2 = state.create_batch(&system_context).unwrap(); @@ -2912,13 +2948,13 @@ mod evidence_computation { }) .is_ok() ); - let evidence_2 = state - .compute_evidence(ComputeEvidenceArguments { - batch_id: batch_2, + let evidence_2 = run_computation_until_completion(|_progress| { + state.compute_evidence(&ComputeEvidenceArguments { + batch_id: batch_2.clone(), max_iterations: Some(3), }) - .unwrap() - .unwrap(); + }) + .unwrap(); assert_ne!(evidence_1, evidence_2); } @@ -2944,13 +2980,13 @@ mod evidence_computation { }) .is_ok() ); - let evidence_1 = state - .compute_evidence(ComputeEvidenceArguments { + let evidence_1 = run_computation_until_completion(|_progress| { + state.compute_evidence(&ComputeEvidenceArguments { batch_id: batch_1.clone(), max_iterations: Some(3), }) - .unwrap() - .unwrap(); + }) + .unwrap(); delete_batch(&mut state, batch_1); let batch_2 = state.create_batch(&system_context).unwrap(); @@ -2969,13 +3005,13 @@ mod evidence_computation { }) .is_ok() ); - let evidence_2 = state - .compute_evidence(ComputeEvidenceArguments { - batch_id: batch_2, + let evidence_2 = run_computation_until_completion(|_progress| { + state.compute_evidence(&ComputeEvidenceArguments { + batch_id: batch_2.clone(), max_iterations: Some(3), }) - .unwrap() - .unwrap(); + }) + .unwrap(); assert_ne!(evidence_1, evidence_2); } @@ -3001,13 +3037,13 @@ mod evidence_computation { }) .is_ok() ); - let evidence_1 = state - .compute_evidence(ComputeEvidenceArguments { + let evidence_1 = run_computation_until_completion(|_progress| { + state.compute_evidence(&ComputeEvidenceArguments { batch_id: batch_1.clone(), max_iterations: Some(3), }) - .unwrap() - .unwrap(); + }) + .unwrap(); delete_batch(&mut state, batch_1); let batch_2 = state.create_batch(&system_context).unwrap(); @@ -3027,13 +3063,13 @@ mod evidence_computation { .is_ok() ); - let evidence_2 = state - .compute_evidence(ComputeEvidenceArguments { + let evidence_2 = run_computation_until_completion(|_progress| { + state.compute_evidence(&ComputeEvidenceArguments { batch_id: batch_2.clone(), max_iterations: Some(3), }) - .unwrap() - .unwrap(); + }) + .unwrap(); delete_batch(&mut state, batch_2); let batch_3 = state.create_batch(&system_context).unwrap(); @@ -3052,13 +3088,13 @@ mod evidence_computation { }) .is_ok() ); - let evidence_3 = state - .compute_evidence(ComputeEvidenceArguments { - batch_id: batch_3, + let evidence_3 = run_computation_until_completion(|_progress| { + state.compute_evidence(&ComputeEvidenceArguments { + batch_id: batch_3.clone(), max_iterations: Some(3), }) - .unwrap() - .unwrap(); + }) + .unwrap(); assert_ne!(evidence_1, evidence_2); assert_ne!(evidence_1, evidence_3); @@ -3086,13 +3122,13 @@ mod evidence_computation { }) .is_ok() ); - let evidence_1 = state - .compute_evidence(ComputeEvidenceArguments { + let evidence_1 = run_computation_until_completion(|_progress| { + state.compute_evidence(&ComputeEvidenceArguments { batch_id: batch_1.clone(), max_iterations: Some(3), }) - .unwrap() - .unwrap(); + }) + .unwrap(); delete_batch(&mut state, batch_1); let batch_2 = state.create_batch(&system_context).unwrap(); @@ -3111,13 +3147,13 @@ mod evidence_computation { }) .is_ok() ); - let evidence_2 = state - .compute_evidence(ComputeEvidenceArguments { + let evidence_2 = run_computation_until_completion(|_progress| { + state.compute_evidence(&ComputeEvidenceArguments { batch_id: batch_2.clone(), max_iterations: Some(3), }) - .unwrap() - .unwrap(); + }) + .unwrap(); delete_batch(&mut state, batch_2); let batch_3 = state.create_batch(&system_context).unwrap(); @@ -3137,13 +3173,13 @@ mod evidence_computation { .is_ok() ); - let evidence_3 = state - .compute_evidence(ComputeEvidenceArguments { + let evidence_3 = run_computation_until_completion(|_progress| { + state.compute_evidence(&ComputeEvidenceArguments { batch_id: batch_3.clone(), max_iterations: Some(3), }) - .unwrap() - .unwrap(); + }) + .unwrap(); delete_batch(&mut state, batch_3); let batch_4 = state.create_batch(&system_context).unwrap(); @@ -3165,13 +3201,13 @@ mod evidence_computation { }) .is_ok() ); - let evidence_4 = state - .compute_evidence(ComputeEvidenceArguments { - batch_id: batch_4, + let evidence_4 = run_computation_until_completion(|_progress| { + state.compute_evidence(&ComputeEvidenceArguments { + batch_id: batch_4.clone(), max_iterations: Some(3), }) - .unwrap() - .unwrap(); + }) + .unwrap(); assert_ne!(evidence_1, evidence_2); assert_ne!(evidence_1, evidence_3); @@ -3202,13 +3238,13 @@ mod evidence_computation { }) .is_ok() ); - let evidence_1 = state - .compute_evidence(ComputeEvidenceArguments { + let evidence_1 = run_computation_until_completion(|_progress| { + state.compute_evidence(&ComputeEvidenceArguments { batch_id: batch_1.clone(), max_iterations: Some(3), }) - .unwrap() - .unwrap(); + }) + .unwrap(); delete_batch(&mut state, batch_1); let batch_2 = state.create_batch(&system_context).unwrap(); @@ -3228,13 +3264,13 @@ mod evidence_computation { .is_ok() ); - let evidence_2 = state - .compute_evidence(ComputeEvidenceArguments { + let evidence_2 = run_computation_until_completion(|_progress| { + state.compute_evidence(&ComputeEvidenceArguments { batch_id: batch_2.clone(), max_iterations: Some(3), }) - .unwrap() - .unwrap(); + }) + .unwrap(); delete_batch(&mut state, batch_2); let batch_3 = state.create_batch(&system_context).unwrap(); @@ -3253,13 +3289,13 @@ mod evidence_computation { }) .is_ok() ); - let evidence_3 = state - .compute_evidence(ComputeEvidenceArguments { - batch_id: batch_3, + let evidence_3 = run_computation_until_completion(|_progress| { + state.compute_evidence(&ComputeEvidenceArguments { + batch_id: batch_3.clone(), max_iterations: Some(3), }) - .unwrap() - .unwrap(); + }) + .unwrap(); assert_ne!(evidence_1, evidence_2); assert_ne!(evidence_1, evidence_3); @@ -3287,13 +3323,13 @@ mod evidence_computation { }) .is_ok() ); - let evidence_1 = state - .compute_evidence(ComputeEvidenceArguments { + let evidence_1 = run_computation_until_completion(|_progress| { + state.compute_evidence(&ComputeEvidenceArguments { batch_id: batch_1.clone(), max_iterations: Some(3), }) - .unwrap() - .unwrap(); + }) + .unwrap(); delete_batch(&mut state, batch_1); let batch_2 = state.create_batch(&system_context).unwrap(); @@ -3313,13 +3349,13 @@ mod evidence_computation { .is_ok() ); - let evidence_2 = state - .compute_evidence(ComputeEvidenceArguments { + let evidence_2 = run_computation_until_completion(|_progress| { + state.compute_evidence(&ComputeEvidenceArguments { batch_id: batch_2.clone(), max_iterations: Some(3), }) - .unwrap() - .unwrap(); + }) + .unwrap(); delete_batch(&mut state, batch_2); let batch_3 = state.create_batch(&system_context).unwrap(); @@ -3338,13 +3374,13 @@ mod evidence_computation { }) .is_ok() ); - let evidence_3 = state - .compute_evidence(ComputeEvidenceArguments { - batch_id: batch_3, + let evidence_3 = run_computation_until_completion(|_progress| { + state.compute_evidence(&ComputeEvidenceArguments { + batch_id: batch_3.clone(), max_iterations: Some(3), }) - .unwrap() - .unwrap(); + }) + .unwrap(); assert_ne!(evidence_1, evidence_2); assert_ne!(evidence_1, evidence_3); @@ -3371,13 +3407,13 @@ mod evidence_computation { }) .is_ok() ); - let evidence_1 = state - .compute_evidence(ComputeEvidenceArguments { + let evidence_1 = run_computation_until_completion(|_progress| { + state.compute_evidence(&ComputeEvidenceArguments { batch_id: batch_1.clone(), max_iterations: Some(3), }) - .unwrap() - .unwrap(); + }) + .unwrap(); delete_batch(&mut state, batch_1); let batch_2 = state.create_batch(&system_context).unwrap(); @@ -3395,13 +3431,13 @@ mod evidence_computation { }) .is_ok() ); - let evidence_2 = state - .compute_evidence(ComputeEvidenceArguments { - batch_id: batch_2, + let evidence_2 = run_computation_until_completion(|_progress| { + state.compute_evidence(&ComputeEvidenceArguments { + batch_id: batch_2.clone(), max_iterations: Some(3), }) - .unwrap() - .unwrap(); + }) + .unwrap(); assert_ne!(evidence_1, evidence_2); } @@ -3426,13 +3462,13 @@ mod evidence_computation { }) .is_ok() ); - let evidence_1 = state - .compute_evidence(ComputeEvidenceArguments { + let evidence_1 = run_computation_until_completion(|_progress| { + state.compute_evidence(&ComputeEvidenceArguments { batch_id: batch_1.clone(), max_iterations: Some(3), }) - .unwrap() - .unwrap(); + }) + .unwrap(); delete_batch(&mut state, batch_1); let batch_2 = state.create_batch(&system_context).unwrap(); @@ -3450,13 +3486,13 @@ mod evidence_computation { }) .is_ok() ); - let evidence_2 = state - .compute_evidence(ComputeEvidenceArguments { - batch_id: batch_2, + let evidence_2 = run_computation_until_completion(|_progress| { + state.compute_evidence(&ComputeEvidenceArguments { + batch_id: batch_2.clone(), max_iterations: Some(3), }) - .unwrap() - .unwrap(); + }) + .unwrap(); assert_ne!(evidence_1, evidence_2); } @@ -3493,13 +3529,13 @@ mod evidence_computation { }) .is_ok() ); - let evidence_1 = state - .compute_evidence(ComputeEvidenceArguments { + let evidence_1 = run_computation_until_completion(|_progress| { + state.compute_evidence(&ComputeEvidenceArguments { batch_id: batch_1.clone(), max_iterations: Some(3), }) - .unwrap() - .unwrap(); + }) + .unwrap(); delete_batch(&mut state, batch_1); let batch_2 = state.create_batch(&system_context).unwrap(); @@ -3526,13 +3562,13 @@ mod evidence_computation { }) .is_ok() ); - let evidence_2 = state - .compute_evidence(ComputeEvidenceArguments { - batch_id: batch_2, + let evidence_2 = run_computation_until_completion(|_progress| { + state.compute_evidence(&ComputeEvidenceArguments { + batch_id: batch_2.clone(), max_iterations: Some(3), }) - .unwrap() - .unwrap(); + }) + .unwrap(); assert_ne!(evidence_1, evidence_2); } @@ -3581,13 +3617,13 @@ mod evidence_computation { .is_ok() ); } - let evidence_1 = state - .compute_evidence(ComputeEvidenceArguments { + let evidence_1 = run_computation_until_completion(|_progress| { + state.compute_evidence(&ComputeEvidenceArguments { batch_id: batch_1.clone(), max_iterations: Some(4), }) - .unwrap() - .unwrap(); + }) + .unwrap(); delete_batch(&mut state, batch_1); let batch_2 = state.create_batch(&system_context).unwrap(); @@ -3625,13 +3661,13 @@ mod evidence_computation { .is_ok() ); } - let evidence_2 = state - .compute_evidence(ComputeEvidenceArguments { - batch_id: batch_2, + let evidence_2 = run_computation_until_completion(|_progress| { + state.compute_evidence(&ComputeEvidenceArguments { + batch_id: batch_2.clone(), max_iterations: Some(4), }) - .unwrap() - .unwrap(); + }) + .unwrap(); assert_ne!(evidence_1, evidence_2); } @@ -3659,13 +3695,13 @@ mod evidence_computation { }) .is_ok() ); - let evidence_1 = state - .compute_evidence(ComputeEvidenceArguments { + let evidence_1 = run_computation_until_completion(|_progress| { + state.compute_evidence(&ComputeEvidenceArguments { batch_id: batch_1.clone(), max_iterations: Some(3), }) - .unwrap() - .unwrap(); + }) + .unwrap(); delete_batch(&mut state, batch_1); let batch_2 = state.create_batch(&system_context).unwrap(); @@ -3683,13 +3719,13 @@ mod evidence_computation { }) .is_ok() ); - let evidence_2 = state - .compute_evidence(ComputeEvidenceArguments { - batch_id: batch_2, + let evidence_2 = run_computation_until_completion(|_progress| { + state.compute_evidence(&ComputeEvidenceArguments { + batch_id: batch_2.clone(), max_iterations: Some(3), }) - .unwrap() - .unwrap(); + }) + .unwrap(); assert_ne!(evidence_1, evidence_2); } @@ -3713,13 +3749,13 @@ mod evidence_computation { }) .is_ok() ); - let evidence_1 = state - .compute_evidence(ComputeEvidenceArguments { + let evidence_1 = run_computation_until_completion(|_progress| { + state.compute_evidence(&ComputeEvidenceArguments { batch_id: batch_1.clone(), max_iterations: Some(3), }) - .unwrap() - .unwrap(); + }) + .unwrap(); delete_batch(&mut state, batch_1); let batch_2 = state.create_batch(&system_context).unwrap(); @@ -3736,13 +3772,13 @@ mod evidence_computation { }) .is_ok() ); - let evidence_2 = state - .compute_evidence(ComputeEvidenceArguments { - batch_id: batch_2, + let evidence_2 = run_computation_until_completion(|_progress| { + state.compute_evidence(&ComputeEvidenceArguments { + batch_id: batch_2.clone(), max_iterations: Some(3), }) - .unwrap() - .unwrap(); + }) + .unwrap(); assert_ne!(evidence_1, evidence_2); } @@ -3766,13 +3802,13 @@ mod evidence_computation { }) .is_ok() ); - let evidence_1 = state - .compute_evidence(ComputeEvidenceArguments { + let evidence_1 = run_computation_until_completion(|_progress| { + state.compute_evidence(&ComputeEvidenceArguments { batch_id: batch_1.clone(), max_iterations: Some(3), }) - .unwrap() - .unwrap(); + }) + .unwrap(); delete_batch(&mut state, batch_1); let batch_2 = state.create_batch(&system_context).unwrap(); @@ -3789,13 +3825,13 @@ mod evidence_computation { }) .is_ok() ); - let evidence_2 = state - .compute_evidence(ComputeEvidenceArguments { - batch_id: batch_2, + let evidence_2 = run_computation_until_completion(|_progress| { + state.compute_evidence(&ComputeEvidenceArguments { + batch_id: batch_2.clone(), max_iterations: Some(3), }) - .unwrap() - .unwrap(); + }) + .unwrap(); assert_ne!(evidence_1, evidence_2); } @@ -3817,13 +3853,13 @@ mod evidence_computation { }) .is_ok() ); - let evidence_1 = state - .compute_evidence(ComputeEvidenceArguments { + let evidence_1 = run_computation_until_completion(|_progress| { + state.compute_evidence(&ComputeEvidenceArguments { batch_id: batch_1.clone(), max_iterations: Some(3), }) - .unwrap() - .unwrap(); + }) + .unwrap(); delete_batch(&mut state, batch_1); let batch_2 = state.create_batch(&system_context).unwrap(); @@ -3837,13 +3873,13 @@ mod evidence_computation { }) .is_ok() ); - let evidence_2 = state - .compute_evidence(ComputeEvidenceArguments { - batch_id: batch_2, + let evidence_2 = run_computation_until_completion(|_progress| { + state.compute_evidence(&ComputeEvidenceArguments { + batch_id: batch_2.clone(), max_iterations: Some(3), }) - .unwrap() - .unwrap(); + }) + .unwrap(); assert_ne!(evidence_1, evidence_2); } @@ -3862,13 +3898,13 @@ mod evidence_computation { }) .is_ok() ); - let evidence_1 = state - .compute_evidence(ComputeEvidenceArguments { + let evidence_1 = run_computation_until_completion(|_progress| { + state.compute_evidence(&ComputeEvidenceArguments { batch_id: batch_1.clone(), max_iterations: Some(3), }) - .unwrap() - .unwrap(); + }) + .unwrap(); delete_batch(&mut state, batch_1); let batch_2 = state.create_batch(&system_context).unwrap(); @@ -3883,13 +3919,13 @@ mod evidence_computation { }) .is_ok() ); - let evidence_2 = state - .compute_evidence(ComputeEvidenceArguments { - batch_id: batch_2, + let evidence_2 = run_computation_until_completion(|_progress| { + state.compute_evidence(&ComputeEvidenceArguments { + batch_id: batch_2.clone(), max_iterations: Some(3), }) - .unwrap() - .unwrap(); + }) + .unwrap(); assert_ne!(evidence_1, evidence_2); } @@ -3916,13 +3952,13 @@ mod evidence_computation { }) .is_ok() ); - let evidence_1 = state - .compute_evidence(ComputeEvidenceArguments { + let evidence_1 = run_computation_until_completion(|_progress| { + state.compute_evidence(&ComputeEvidenceArguments { batch_id: batch_1.clone(), max_iterations: Some(3), }) - .unwrap() - .unwrap(); + }) + .unwrap(); delete_batch(&mut state, batch_1); let batch_2 = state.create_batch(&system_context).unwrap(); @@ -3942,13 +3978,13 @@ mod evidence_computation { }) .is_ok() ); - let evidence_2 = state - .compute_evidence(ComputeEvidenceArguments { - batch_id: batch_2, + let evidence_2 = run_computation_until_completion(|_progress| { + state.compute_evidence(&ComputeEvidenceArguments { + batch_id: batch_2.clone(), max_iterations: Some(3), }) - .unwrap() - .unwrap(); + }) + .unwrap(); assert_ne!(evidence_1, evidence_2); } @@ -3999,13 +4035,13 @@ mod evidence_computation { .is_ok() ); - state - .compute_evidence(ComputeEvidenceArguments { - batch_id: batch, + run_computation_until_completion(|_progress| { + state.compute_evidence(&ComputeEvidenceArguments { + batch_id: batch.clone(), max_iterations: Some(3), }) - .unwrap() - .unwrap() + }) + .unwrap() } let instances = generate_unique_set_asset_properties(); @@ -4041,13 +4077,16 @@ mod validate_commit_proposed_batch { other => panic!("expected 'batch not found' error, got: {other:?}"), } - match state.commit_proposed_batch( - CommitProposedBatchArguments { - batch_id: 1_u8.into(), - evidence: Default::default(), - }, - &system_context, - ) { + match run_computation_until_completion(|progress| { + state.commit_proposed_batch( + &CommitProposedBatchArguments { + batch_id: 1_u8.into(), + evidence: Default::default(), + }, + progress, + &system_context, + ) + }) { Err(err) if err.contains("batch not found") => (), other => panic!("expected 'batch not found' error, got: {other:?}"), } @@ -4067,13 +4106,16 @@ mod validate_commit_proposed_batch { other => panic!("expected 'batch not found' error, got: {other:?}"), } - match state.commit_proposed_batch( - CommitProposedBatchArguments { - batch_id, - evidence: Default::default(), - }, - &system_context, - ) { + match run_computation_until_completion(|progress| { + state.commit_proposed_batch( + &CommitProposedBatchArguments { + batch_id: batch_id.clone(), + evidence: Default::default(), + }, + progress, + &system_context, + ) + }) { Err(err) if err.contains("batch does not have CommitBatchArguments") => (), other => panic!("expected 'batch not found' error, got: {other:?}"), } @@ -4101,13 +4143,16 @@ mod validate_commit_proposed_batch { Err(err) if err.contains("batch does not have computed evidence") => (), other => panic!("expected 'batch not found' error, got: {other:?}"), } - match state.commit_proposed_batch( - CommitProposedBatchArguments { - batch_id, - evidence: Default::default(), - }, - &system_context, - ) { + match run_computation_until_completion(|progress| { + state.commit_proposed_batch( + &CommitProposedBatchArguments { + batch_id: batch_id.clone(), + evidence: Default::default(), + }, + progress, + &system_context, + ) + }) { Err(err) if err.contains("batch does not have computed evidence") => (), other => panic!("expected 'batch not found' error, got: {other:?}"), } @@ -4128,13 +4173,15 @@ mod validate_commit_proposed_batch { .is_ok() ); - assert!(matches!( - state.compute_evidence(ComputeEvidenceArguments { - batch_id: batch_id.clone(), - max_iterations: Some(1), - }), - Ok(Some(_)) - )); + assert!( + run_computation_until_completion(|_progress| { + state.compute_evidence(&ComputeEvidenceArguments { + batch_id: batch_id.clone(), + max_iterations: Some(1), + }) + }) + .is_ok() + ); match state.validate_commit_proposed_batch(CommitProposedBatchArguments { batch_id: batch_id.clone(), @@ -4144,13 +4191,16 @@ mod validate_commit_proposed_batch { other => panic!("expected 'batch not found' error, got: {other:?}"), } - match state.commit_proposed_batch( - CommitProposedBatchArguments { - batch_id, - evidence: Default::default(), - }, - &system_context, - ) { + match run_computation_until_completion(|progress| { + state.commit_proposed_batch( + &CommitProposedBatchArguments { + batch_id: batch_id.clone(), + evidence: Default::default(), + }, + progress, + &system_context, + ) + }) { Err(err) if err.contains("does not match presented evidence") => (), other => panic!("expected 'batch not found' error, got: {other:?}"), } @@ -4171,13 +4221,15 @@ mod validate_commit_proposed_batch { .is_ok() ); - let compute_evidence_result = state.compute_evidence(ComputeEvidenceArguments { - batch_id: batch_id.clone(), - max_iterations: Some(1), + let compute_evidence_result = run_computation_until_completion(|_progress| { + state.compute_evidence(&ComputeEvidenceArguments { + batch_id: batch_id.clone(), + max_iterations: Some(1), + }) }); - assert!(matches!(compute_evidence_result, Ok(Some(_)))); + assert!(compute_evidence_result.is_ok()); - let evidence = if let Ok(Some(computed_evidence)) = compute_evidence_result { + let evidence = if let Ok(computed_evidence) = compute_evidence_result { computed_evidence } else { unreachable!() @@ -4193,12 +4245,17 @@ mod validate_commit_proposed_batch { "commit proposed batch 0 with evidence e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" ); - state - .commit_proposed_batch( - CommitProposedBatchArguments { batch_id, evidence }, + run_computation_until_completion(|progress| { + state.commit_proposed_batch( + &CommitProposedBatchArguments { + batch_id: batch_id.clone(), + evidence: evidence.clone(), + }, + progress, &system_context, ) - .unwrap(); + }) + .unwrap(); } } @@ -4391,13 +4448,13 @@ mod enforce_limits { ); assert!( - state - .compute_evidence(ComputeEvidenceArguments { - batch_id, + run_computation_until_completion(|_progress| { + state.compute_evidence(&ComputeEvidenceArguments { + batch_id: batch_id.clone(), max_iterations: Some(1), }) - .unwrap() - .is_some() + }) + .is_ok() ); system_context.current_timestamp_ns = time_now + BATCH_EXPIRY_NANOS + 1; @@ -4495,7 +4552,7 @@ mod enforce_limits { state .create_chunk( CreateChunkArg { - batch_id: batch_2, + batch_id: batch_2.clone(), content: ByteBuf::new(), }, &system_context, @@ -4563,7 +4620,7 @@ mod enforce_limits { state .create_chunk( CreateChunkArg { - batch_id: batch_2, + batch_id: batch_2.clone(), content: ByteBuf::from(c3), }, &system_context, @@ -4600,10 +4657,10 @@ mod last_state_update_timestamp { // Create and commit a batch with asset operations let batch_id = state.create_batch(&system_context).unwrap(); - state - .commit_batch( - CommitBatchArguments { - batch_id, + run_computation_until_completion(|progress| { + state.commit_batch( + &CommitBatchArguments { + batch_id: batch_id.clone(), operations: vec![BatchOperation::CreateAsset(CreateAssetArguments { key: "/test.txt".to_string(), content_type: "text/plain".to_string(), @@ -4613,9 +4670,11 @@ mod last_state_update_timestamp { allow_raw_access: None, })], }, + progress, &system_context, ) - .unwrap(); + }) + .unwrap(); // Timestamp should be updated to system context timestamp assert_eq!( @@ -4684,10 +4743,10 @@ mod last_state_update_timestamp { let updated_time = system_context.current_timestamp_ns; let batch_id = state.create_batch(&system_context).unwrap(); - state - .commit_batch( - CommitBatchArguments { - batch_id, + run_computation_until_completion(|progress| { + state.commit_batch( + &CommitBatchArguments { + batch_id: batch_id.clone(), operations: vec![BatchOperation::SetAssetProperties( SetAssetPropertiesArguments { key: "/test.txt".to_string(), @@ -4701,9 +4760,11 @@ mod last_state_update_timestamp { }, )], }, + progress, &system_context, ) - .unwrap(); + }) + .unwrap(); // Timestamp should be updated to new time assert_eq!(state.last_state_update_timestamp_ns(), updated_time); @@ -5266,19 +5327,23 @@ mod compute_state_hash { state.propose_commit_batch(args.clone()).unwrap(); - let evidence = state - .compute_evidence(ComputeEvidenceArguments { + let evidence = run_computation_until_completion(|_progress| { + state.compute_evidence(&ComputeEvidenceArguments { batch_id: batch_id.clone(), max_iterations: None, }) - .unwrap() - .unwrap(); + }) + .unwrap(); // Now apply the batch to state so we can compute state hash - state.commit_batch(args, &system_context).unwrap(); + run_computation_until_completion(|progress| { + state.commit_batch(&args, progress, &system_context) + }) + .unwrap(); // Compute state hash - let state_hash = state.compute_state_hash(&system_context).unwrap(); + let state_hash = + run_computation_until_completion(|_progress| state.compute_state_hash()).unwrap(); assert_eq!( hex::encode(evidence.as_slice()), @@ -5324,10 +5389,13 @@ mod compute_state_hash { }), ], }; - state.commit_batch(args, &system_context).unwrap(); + run_computation_until_completion(|progress| { + state.commit_batch(&args, progress, &system_context) + }) + .unwrap(); // Reset computation - state.compute_state_hash(&system_context).unwrap(); // Ensure it's done or started + run_computation_until_completion(|_progress| state.compute_state_hash()).unwrap(); // Ensure it's done or started // Update state using commit_batch to ensure timestamp is updated // We need a new system context with a later timestamp @@ -5350,15 +5418,18 @@ mod compute_state_hash { allow_raw_access: None, })], }; - state.commit_batch(args, &system_context_later).unwrap(); + run_computation_until_completion(|progress| { + state.commit_batch(&args, progress, &system_context_later) + }) + .unwrap(); // Since the new API doesn't allow controlling instruction counter per call, // we can't easily test interruption. This test now just verifies completion. - let result = state.compute_state_hash(&system_context_later); - assert!(result.is_some()); + let result = run_computation_until_completion(|_progress| state.compute_state_hash()); + assert!(result.is_ok()); // Verify we can call it again - let result = state.compute_state_hash(&system_context_later); - assert!(result.is_some()); + let result = run_computation_until_completion(|_progress| state.compute_state_hash()); + assert!(result.is_ok()); } } diff --git a/src/distributed/assetstorage.wasm.gz b/src/distributed/assetstorage.wasm.gz index 6ebcb3285d..01709b2c50 100755 Binary files a/src/distributed/assetstorage.wasm.gz and b/src/distributed/assetstorage.wasm.gz differ