From 84939ac33f43f03f1a76ca4e271ae969ea44a7ae Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Thu, 6 Mar 2025 15:54:51 -0600 Subject: [PATCH 1/7] Remove unnecessary drain --- lightning/src/ln/channel.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 27ba267c431..533e5c49be9 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -3590,7 +3590,7 @@ impl ChannelContext where SP::Target: SignerProvider { } bitcoin_tx.txid }; - let mut htlcs_cloned: Vec<_> = commitment_data.htlcs_included.iter().map(|htlc| (htlc.0.clone(), htlc.1.map(|h| h.clone()))).collect(); + let htlcs_cloned: Vec<_> = commitment_data.htlcs_included.iter().map(|htlc| (htlc.0.clone(), htlc.1.map(|h| h.clone()))).collect(); // If our counterparty updated the channel fee in this commitment transaction, check that // they can actually afford the new fee now. @@ -3629,7 +3629,7 @@ impl ChannelContext where SP::Target: SignerProvider { let holder_keys = commitment_data.stats.tx.trust().keys(); let mut nondust_htlc_sources = Vec::with_capacity(commitment_data.stats.tx.htlcs().len()); let mut dust_htlcs = Vec::with_capacity(htlcs_cloned.len() - commitment_data.stats.tx.htlcs().len()); - for (idx, (htlc, mut source_opt)) in htlcs_cloned.drain(..).enumerate() { + for (idx, (htlc, mut source_opt)) in htlcs_cloned.into_iter().enumerate() { if let Some(_) = htlc.transaction_output_index { let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, commitment_data.stats.tx.feerate_per_kw(), funding.get_counterparty_selected_contest_delay().unwrap(), &htlc, funding.get_channel_type(), From 89919aa96f8f439a75949bb330c33869caafe7e1 Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Thu, 6 Mar 2025 16:33:21 -0600 Subject: [PATCH 2/7] Add pending funding scopes to FundedChannel Once a channel is funded, it may be spliced to add or remove funds. The new funding transaction is pending until confirmed on chain and thus needs to be tracked. Additionally, it may be replaced by another transaction using RBF with a higher fee. Hence, there may be more than one pending FundingScope to track for a splice. This commit adds support for tracking pending funding scopes. The following commits will account for any pending scopes where applicable (e.g., when handling commitment_signed). --- lightning/src/chain/onchaintx.rs | 2 +- lightning/src/ln/chan_utils.rs | 19 ++++++---- lightning/src/ln/channel.rs | 60 ++++++++++++++++++++++++++++++-- lightning/src/sign/mod.rs | 6 ++-- 4 files changed, 75 insertions(+), 12 deletions(-) diff --git a/lightning/src/chain/onchaintx.rs b/lightning/src/chain/onchaintx.rs index d0deef6ba77..5ec0713bae6 100644 --- a/lightning/src/chain/onchaintx.rs +++ b/lightning/src/chain/onchaintx.rs @@ -368,7 +368,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP let prev_holder_commitment = Readable::read(reader)?; let _prev_holder_htlc_sigs: Option>> = Readable::read(reader)?; - let channel_parameters = ReadableArgs::::read(reader, channel_value_satoshis)?; + let channel_parameters = ReadableArgs::>::read(reader, Some(channel_value_satoshis))?; // Read the serialized signer bytes, but don't deserialize them, as we'll obtain our signer // by re-deriving the private key material. diff --git a/lightning/src/ln/chan_utils.rs b/lightning/src/ln/chan_utils.rs index cb87616ab32..93fd78fbd32 100644 --- a/lightning/src/ln/chan_utils.rs +++ b/lightning/src/ln/chan_utils.rs @@ -1034,8 +1034,8 @@ impl Writeable for ChannelTransactionParameters { } } -impl ReadableArgs for ChannelTransactionParameters { - fn read(reader: &mut R, read_args: u64) -> Result { +impl ReadableArgs> for ChannelTransactionParameters { + fn read(reader: &mut R, read_args: Option) -> Result { let mut holder_pubkeys = RequiredWrapper(None); let mut holder_selected_contest_delay = RequiredWrapper(None); let mut is_outbound_from_holder = RequiredWrapper(None); @@ -1058,10 +1058,17 @@ impl ReadableArgs for ChannelTransactionParameters { (13, channel_value_satoshis, option), }); - let channel_value_satoshis = channel_value_satoshis.unwrap_or(read_args); - if channel_value_satoshis != read_args { - return Err(DecodeError::InvalidValue); - } + let channel_value_satoshis = match read_args { + None => channel_value_satoshis.ok_or(DecodeError::InvalidValue)?, + Some(expected_value) => { + let channel_value_satoshis = channel_value_satoshis.unwrap_or(expected_value); + if channel_value_satoshis == expected_value { + channel_value_satoshis + } else { + return Err(DecodeError::InvalidValue); + } + }, + }; let mut additional_features = ChannelTypeFeatures::empty(); additional_features.set_anchors_nonzero_fee_htlc_tx_required(); diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 533e5c49be9..55230a0231b 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -60,7 +60,7 @@ use crate::sign::{EntropySource, ChannelSigner, SignerProvider, NodeSigner, Reci use crate::events::{ClosureReason, Event}; use crate::events::bump_transaction::BASE_INPUT_WEIGHT; use crate::routing::gossip::NodeId; -use crate::util::ser::{Readable, ReadableArgs, TransactionU16LenLimited, Writeable, Writer}; +use crate::util::ser::{Readable, ReadableArgs, RequiredWrapper, TransactionU16LenLimited, Writeable, Writer}; use crate::util::logger::{Logger, Record, WithContext}; use crate::util::errors::APIError; use crate::util::config::{UserConfig, ChannelConfig, LegacyChannelConfig, ChannelHandshakeConfig, ChannelHandshakeLimits, MaxDustHTLCExposure}; @@ -1519,6 +1519,7 @@ impl Channel where }; let mut funded_channel = FundedChannel { funding: chan.funding, + pending_funding: vec![], context: chan.context, interactive_tx_signing_session: chan.interactive_tx_signing_session, holder_commitment_point, @@ -1665,6 +1666,53 @@ pub(super) struct FundingScope { funding_transaction: Option, } +impl Writeable for FundingScope { + fn write(&self, writer: &mut W) -> Result<(), io::Error> { + write_tlv_fields!(writer, { + (1, self.value_to_self_msat, required), + (3, self.counterparty_selected_channel_reserve_satoshis, option), + (5, self.holder_selected_channel_reserve_satoshis, required), + (7, self.channel_transaction_parameters, (required: ReadableArgs, None)), + (9, self.funding_transaction, option), + }); + Ok(()) + } +} + +impl Readable for FundingScope { + fn read(reader: &mut R) -> Result { + let mut value_to_self_msat = RequiredWrapper(None); + let mut counterparty_selected_channel_reserve_satoshis = None; + let mut holder_selected_channel_reserve_satoshis = RequiredWrapper(None); + let mut channel_transaction_parameters = RequiredWrapper(None); + let mut funding_transaction = None; + + read_tlv_fields!(reader, { + (1, value_to_self_msat, required), + (3, counterparty_selected_channel_reserve_satoshis, option), + (5, holder_selected_channel_reserve_satoshis, required), + (7, channel_transaction_parameters, (required: ReadableArgs, None)), + (9, funding_transaction, option), + }); + + Ok(Self { + value_to_self_msat: value_to_self_msat.0.unwrap(), + counterparty_selected_channel_reserve_satoshis, + holder_selected_channel_reserve_satoshis: holder_selected_channel_reserve_satoshis.0.unwrap(), + #[cfg(debug_assertions)] + holder_max_commitment_tx_output: Mutex::new((0, 0)), + #[cfg(debug_assertions)] + counterparty_max_commitment_tx_output: Mutex::new((0, 0)), + channel_transaction_parameters: channel_transaction_parameters.0.unwrap(), + funding_transaction, + #[cfg(any(test, fuzzing))] + next_local_commitment_tx_fee_info_cached: Mutex::new(None), + #[cfg(any(test, fuzzing))] + next_remote_commitment_tx_fee_info_cached: Mutex::new(None), + }) + } +} + impl FundingScope { pub fn get_value_satoshis(&self) -> u64 { self.channel_transaction_parameters.channel_value_satoshis @@ -4945,6 +4993,7 @@ pub(super) struct DualFundingChannelContext { // Counterparty designates channel data owned by the another channel participant entity. pub(super) struct FundedChannel where SP::Target: SignerProvider { pub funding: FundingScope, + pending_funding: Vec, pub context: ChannelContext, pub interactive_tx_signing_session: Option, holder_commitment_point: HolderCommitmentPoint, @@ -9548,6 +9597,7 @@ impl OutboundV1Channel where SP::Target: SignerProvider { let mut channel = FundedChannel { funding: self.funding, + pending_funding: vec![], context: self.context, interactive_tx_signing_session: None, is_v2_established: false, @@ -9824,6 +9874,7 @@ impl InboundV1Channel where SP::Target: SignerProvider { // `ChannelMonitor`. let mut channel = FundedChannel { funding: self.funding, + pending_funding: vec![], context: self.context, interactive_tx_signing_session: None, is_v2_established: false, @@ -10633,6 +10684,7 @@ impl Writeable for FundedChannel where SP::Target: SignerProvider (49, self.context.local_initiated_shutdown, option), // Added in 0.0.122 (51, is_manual_broadcast, option), // Added in 0.0.124 (53, funding_tx_broadcast_safe_event_emitted, option), // Added in 0.0.124 + (54, self.pending_funding, optional_vec), // Added in 0.2 (55, removed_htlc_failure_attribution_data, optional_vec), // Added in 0.2 (57, holding_cell_failure_attribution_data, optional_vec), // Added in 0.2 }); @@ -10862,7 +10914,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, &'c Channel _ => return Err(DecodeError::InvalidValue), }; - let channel_parameters: ChannelTransactionParameters = ReadableArgs::::read(reader, channel_value_satoshis)?; + let channel_parameters: ChannelTransactionParameters = ReadableArgs::>::read(reader, Some(channel_value_satoshis))?; let funding_transaction: Option = Readable::read(reader)?; let counterparty_cur_commitment_point = Readable::read(reader)?; @@ -10932,6 +10984,8 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, &'c Channel let mut next_holder_commitment_point_opt: Option = None; let mut is_manual_broadcast = None; + let mut pending_funding = Some(Vec::new()); + read_tlv_fields!(reader, { (0, announcement_sigs, option), (1, minimum_depth, option), @@ -10967,6 +11021,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, &'c Channel (49, local_initiated_shutdown, option), (51, is_manual_broadcast, option), (53, funding_tx_broadcast_safe_event_emitted, option), + (54, pending_funding, optional_vec), // Added in 0.2 (55, removed_htlc_failure_attribution_data, optional_vec), (57, holding_cell_failure_attribution_data, optional_vec), }); @@ -11142,6 +11197,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, &'c Channel channel_transaction_parameters: channel_parameters, funding_transaction, }, + pending_funding: pending_funding.unwrap(), context: ChannelContext { user_id, diff --git a/lightning/src/sign/mod.rs b/lightning/src/sign/mod.rs index 8e2d8697a47..df79df6bab8 100644 --- a/lightning/src/sign/mod.rs +++ b/lightning/src/sign/mod.rs @@ -124,7 +124,7 @@ impl_writeable_tlv_based!(DelayedPaymentOutputDescriptor, { (8, revocation_pubkey, required), (10, channel_keys_id, required), (12, channel_value_satoshis, required), - (13, channel_transaction_parameters, (option: ReadableArgs, channel_value_satoshis.0.unwrap())), + (13, channel_transaction_parameters, (option: ReadableArgs, Some(channel_value_satoshis.0.unwrap()))), }); pub(crate) const P2WPKH_WITNESS_WEIGHT: u64 = 1 /* num stack items */ + @@ -199,7 +199,7 @@ impl_writeable_tlv_based!(StaticPaymentOutputDescriptor, { (2, output, required), (4, channel_keys_id, required), (6, channel_value_satoshis, required), - (7, channel_transaction_parameters, (option: ReadableArgs, channel_value_satoshis.0.unwrap())), + (7, channel_transaction_parameters, (option: ReadableArgs, Some(channel_value_satoshis.0.unwrap()))), }); /// Describes the necessary information to spend a spendable output. @@ -559,7 +559,7 @@ pub struct ChannelDerivationParameters { impl_writeable_tlv_based!(ChannelDerivationParameters, { (0, value_satoshis, required), (2, keys_id, required), - (4, transaction_parameters, (required: ReadableArgs, value_satoshis.0.unwrap())), + (4, transaction_parameters, (required: ReadableArgs, Some(value_satoshis.0.unwrap()))), }); /// A descriptor used to sign for a commitment transaction's HTLC output. From 00bfb05d6fc1e21c9cb2606213c48facbc90600a Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Tue, 11 Mar 2025 23:09:21 -0500 Subject: [PATCH 3/7] Send commitment_signed batch for splicing A FundedChannel may have more than one pending FundingScope during splicing, one for the splice attempt and one or more for any RBF attempts. When this is the case, send a commitment_signed message for each FundingScope and include the necessary batch information (i.e., batch_size and funding_txid) to the counterparty. --- fuzz/src/chanmon_consistency.rs | 6 +- fuzz/src/full_stack.rs | 8 +- lightning/src/chain/chainmonitor.rs | 8 +- lightning/src/ln/async_signer_tests.rs | 16 +- lightning/src/ln/blinded_payment_tests.rs | 4 +- lightning/src/ln/chanmon_update_fail_tests.rs | 161 +++++++-------- lightning/src/ln/channel.rs | 111 ++++++---- lightning/src/ln/channelmanager.rs | 23 ++- lightning/src/ln/dual_funding_tests.rs | 2 +- lightning/src/ln/functional_test_utils.rs | 54 ++--- lightning/src/ln/functional_tests.rs | 192 +++++++++--------- lightning/src/ln/invoice_utils.rs | 2 +- lightning/src/ln/monitor_tests.rs | 4 +- lightning/src/ln/msgs.rs | 29 ++- lightning/src/ln/onion_route_tests.rs | 2 +- lightning/src/ln/payment_tests.rs | 40 ++-- lightning/src/ln/peer_handler.rs | 11 +- lightning/src/ln/priv_short_conf_tests.rs | 4 +- lightning/src/ln/quiescence_tests.rs | 16 +- lightning/src/ln/reload_tests.rs | 6 +- lightning/src/ln/reorg_tests.rs | 2 +- lightning/src/ln/shutdown_tests.rs | 6 +- 22 files changed, 398 insertions(+), 309 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 1869dfb0d1c..e0b038c30cd 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -1089,7 +1089,7 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { for event in &mut events_iter { had_events = true; match event { - MessageSendEvent::UpdateHTLCs { node_id, updates: CommitmentUpdate { update_add_htlcs, update_fail_htlcs, update_fulfill_htlcs, update_fail_malformed_htlcs, update_fee, commitment_signed } } => { + MessageSendEvent::UpdateHTLCs { node_id, channel_id, updates: CommitmentUpdate { update_add_htlcs, update_fail_htlcs, update_fulfill_htlcs, update_fail_malformed_htlcs, update_fee, commitment_signed } } => { for (idx, dest) in nodes.iter().enumerate() { if dest.get_our_node_id() == node_id { for update_add in update_add_htlcs.iter() { @@ -1127,7 +1127,7 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { !update_fail_htlcs.is_empty() || !update_fail_malformed_htlcs.is_empty(); if $limit_events != ProcessMessages::AllMessages && processed_change { // If we only want to process some messages, don't deliver the CS until later. - extra_ev = Some(MessageSendEvent::UpdateHTLCs { node_id, updates: CommitmentUpdate { + extra_ev = Some(MessageSendEvent::UpdateHTLCs { node_id, channel_id, updates: CommitmentUpdate { update_add_htlcs: Vec::new(), update_fail_htlcs: Vec::new(), update_fulfill_htlcs: Vec::new(), @@ -1138,7 +1138,7 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { break; } out.locked_write(format!("Delivering commitment_signed from node {} to node {}.\n", $node, idx).as_bytes()); - dest.handle_commitment_signed(nodes[$node].get_our_node_id(), &commitment_signed); + dest.handle_commitment_signed_batch_test(nodes[$node].get_our_node_id(), &commitment_signed); break; } } diff --git a/fuzz/src/full_stack.rs b/fuzz/src/full_stack.rs index a2f4ecac227..241f1bbb72a 100644 --- a/fuzz/src/full_stack.rs +++ b/fuzz/src/full_stack.rs @@ -1639,13 +1639,13 @@ mod tests { // 5 assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling SendRevokeAndACK event in peer_handler for node 030000000000000000000000000000000000000000000000000000000000000002 for channel 3d00000000000000000000000000000000000000000000000000000000000000".to_string())), Some(&4)); // 6 - assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling UpdateHTLCs event in peer_handler for node 030000000000000000000000000000000000000000000000000000000000000002 with 0 adds, 0 fulfills, 0 fails for channel 3d00000000000000000000000000000000000000000000000000000000000000".to_string())), Some(&3)); + assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling UpdateHTLCs event in peer_handler for node 030000000000000000000000000000000000000000000000000000000000000002 with 0 adds, 0 fulfills, 0 fails, 1 commits for channel 3d00000000000000000000000000000000000000000000000000000000000000".to_string())), Some(&3)); // 7 - assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling UpdateHTLCs event in peer_handler for node 030200000000000000000000000000000000000000000000000000000000000000 with 1 adds, 0 fulfills, 0 fails for channel 3a00000000000000000000000000000000000000000000000000000000000000".to_string())), Some(&3)); + assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling UpdateHTLCs event in peer_handler for node 030200000000000000000000000000000000000000000000000000000000000000 with 1 adds, 0 fulfills, 0 fails, 1 commits for channel 3a00000000000000000000000000000000000000000000000000000000000000".to_string())), Some(&3)); // 8 - assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling UpdateHTLCs event in peer_handler for node 030000000000000000000000000000000000000000000000000000000000000002 with 0 adds, 1 fulfills, 0 fails for channel 3d00000000000000000000000000000000000000000000000000000000000000".to_string())), Some(&1)); + assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling UpdateHTLCs event in peer_handler for node 030000000000000000000000000000000000000000000000000000000000000002 with 0 adds, 1 fulfills, 0 fails, 1 commits for channel 3d00000000000000000000000000000000000000000000000000000000000000".to_string())), Some(&1)); // 9 - assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling UpdateHTLCs event in peer_handler for node 030000000000000000000000000000000000000000000000000000000000000002 with 0 adds, 0 fulfills, 1 fails for channel 3d00000000000000000000000000000000000000000000000000000000000000".to_string())), Some(&2)); + assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling UpdateHTLCs event in peer_handler for node 030000000000000000000000000000000000000000000000000000000000000002 with 0 adds, 0 fulfills, 1 fails, 1 commits for channel 3d00000000000000000000000000000000000000000000000000000000000000".to_string())), Some(&2)); // 10 assert_eq!(log_entries.get(&("lightning::chain::channelmonitor".to_string(), "Input spending counterparty commitment tx (0000000000000000000000000000000000000000000000000000000000000073:0) in 0000000000000000000000000000000000000000000000000000000000000067 resolves outbound HTLC with payment hash ff00000000000000000000000000000000000000000000000000000000000000 with timeout".to_string())), Some(&1)); } diff --git a/lightning/src/chain/chainmonitor.rs b/lightning/src/chain/chainmonitor.rs index b953b386ed6..c2b0a62e2d5 100644 --- a/lightning/src/chain/chainmonitor.rs +++ b/lightning/src/chain/chainmonitor.rs @@ -1008,20 +1008,20 @@ mod tests { let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); expect_payment_sent(&nodes[0], payment_preimage_1, None, false, false); - nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &updates.commitment_signed); + nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &updates.commitment_signed); check_added_monitors!(nodes[0], 1); let (as_first_raa, as_first_update) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id()); nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_first_raa); check_added_monitors!(nodes[1], 1); let bs_second_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &as_first_update); + nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_first_update); check_added_monitors!(nodes[1], 1); let bs_first_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &bs_second_updates.update_fulfill_htlcs[0]); expect_payment_sent(&nodes[0], payment_preimage_2, None, false, false); - nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &bs_second_updates.commitment_signed); + nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_second_updates.commitment_signed); check_added_monitors!(nodes[0], 1); nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_first_raa); expect_payment_path_successful!(nodes[0]); @@ -1030,7 +1030,7 @@ mod tests { nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_second_raa); check_added_monitors!(nodes[1], 1); - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &as_second_update); + nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_second_update); check_added_monitors!(nodes[1], 1); let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); diff --git a/lightning/src/ln/async_signer_tests.rs b/lightning/src/ln/async_signer_tests.rs index 9c0802bd921..9cd319c044c 100644 --- a/lightning/src/ln/async_signer_tests.rs +++ b/lightning/src/ln/async_signer_tests.rs @@ -262,7 +262,7 @@ fn do_test_async_commitment_signature_for_commitment_signed_revoke_and_ack(enabl dst.disable_channel_signer_op(&src.node.get_our_node_id(), &chan_id, SignerOp::GetPerCommitmentPoint); dst.disable_channel_signer_op(&src.node.get_our_node_id(), &chan_id, SignerOp::ReleaseCommitmentSecret); dst.disable_channel_signer_op(&src.node.get_our_node_id(), &chan_id, SignerOp::SignCounterpartyCommitment); - dst.node.handle_commitment_signed(src.node.get_our_node_id(), &payment_event.commitment_msg); + dst.node.handle_commitment_signed_batch_test(src.node.get_our_node_id(), &payment_event.commitment_msg); check_added_monitors(dst, 1); let mut enabled_signer_ops = new_hash_set(); @@ -456,7 +456,7 @@ fn do_test_async_raa_peer_disconnect(test_case: UnblockSignerAcrossDisconnectCas // Mark dst's signer as unavailable and handle src's commitment_signed: while dst won't yet have a // `commitment_signed` of its own to offer, it should publish a `revoke_and_ack`. dst.disable_channel_signer_op(&src.node.get_our_node_id(), &chan_id, block_raa_signer_op); - dst.node.handle_commitment_signed(src.node.get_our_node_id(), &payment_event.commitment_msg); + dst.node.handle_commitment_signed_batch_test(src.node.get_our_node_id(), &payment_event.commitment_msg); check_added_monitors(dst, 1); let events = dst.node.get_and_clear_pending_msg_events(); @@ -580,7 +580,7 @@ fn do_test_async_commitment_signature_peer_disconnect(test_case: UnblockSignerAc // Mark dst's signer as unavailable and handle src's commitment_signed: while dst won't yet have a // `commitment_signed` of its own to offer, it should publish a `revoke_and_ack`. dst.disable_channel_signer_op(&src.node.get_our_node_id(), &chan_id, SignerOp::SignCounterpartyCommitment); - dst.node.handle_commitment_signed(src.node.get_our_node_id(), &payment_event.commitment_msg); + dst.node.handle_commitment_signed_batch_test(src.node.get_our_node_id(), &payment_event.commitment_msg); check_added_monitors(dst, 1); if test_case != UnblockSignerAcrossDisconnectCase::BeforeMonitorRestored { @@ -690,13 +690,13 @@ fn do_test_async_commitment_signature_ordering(monitor_update_failure: bool) { let events_2 = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events_2.len(), 1); match events_2[0] { - MessageSendEvent::UpdateHTLCs { node_id: _, updates: msgs::CommitmentUpdate { ref update_fulfill_htlcs, ref commitment_signed, .. } } => { + MessageSendEvent::UpdateHTLCs { node_id: _, channel_id: _, updates: msgs::CommitmentUpdate { ref update_fulfill_htlcs, ref commitment_signed, .. } } => { nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &update_fulfill_htlcs[0]); expect_payment_sent(&nodes[0], payment_preimage_1, None, false, false); if monitor_update_failure { chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); } - nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), commitment_signed); + nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), commitment_signed); if monitor_update_failure { assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); } else { @@ -766,7 +766,7 @@ fn do_test_async_commitment_signature_ordering(monitor_update_failure: bool) { // Now that everything is restored, get the CS + RAA and handle them. nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &as_resp.2.as_ref().unwrap().update_add_htlcs[0]); - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &as_resp.2.as_ref().unwrap().commitment_signed); + nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_resp.2.as_ref().unwrap().commitment_signed); nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), as_resp.1.as_ref().unwrap()); let (bs_revoke_and_ack, bs_second_commitment_signed) = get_revoke_commit_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); check_added_monitors!(nodes[1], 2); @@ -777,12 +777,12 @@ fn do_test_async_commitment_signature_ordering(monitor_update_failure: bool) { let as_commitment_signed = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); check_added_monitors!(nodes[0], 1); - nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &bs_second_commitment_signed); + nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_second_commitment_signed); let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); // No commitment_signed so get_event_msg's assert(len == 1) passes check_added_monitors!(nodes[0], 1); - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &as_commitment_signed.commitment_signed); + nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_commitment_signed.commitment_signed); let bs_second_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); // No commitment_signed so get_event_msg's assert(len == 1) passes check_added_monitors!(nodes[1], 1); diff --git a/lightning/src/ln/blinded_payment_tests.rs b/lightning/src/ln/blinded_payment_tests.rs index 17494b06098..7051a1c1a8a 100644 --- a/lightning/src/ln/blinded_payment_tests.rs +++ b/lightning/src/ln/blinded_payment_tests.rs @@ -1009,7 +1009,7 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) { let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[2].node.get_our_node_id()); nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event_1_2.msgs[0]); - nodes[2].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &payment_event_1_2.commitment_msg); + nodes[2].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &payment_event_1_2.commitment_msg); check_added_monitors!(nodes[2], 1); nodes[2].node.handle_shutdown(nodes[1].node.get_our_node_id(), &node_1_shutdown); @@ -1051,7 +1051,7 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) { assert_eq!(events.len(), 2); events.into_iter().find_map(|ev| { match ev { - MessageSendEvent:: UpdateHTLCs { node_id, updates } => { + MessageSendEvent::UpdateHTLCs { node_id, channel_id: _, updates } => { assert_eq!(node_id, nodes[0].node.get_our_node_id()); return Some(updates) }, diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index 10214488e3e..1192a14e2b9 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -96,7 +96,8 @@ fn test_monitor_and_persister_update_fail() { let mut node_0_per_peer_lock; let mut node_0_peer_state_lock; if let Some(channel) = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan.2).as_funded_mut() { - if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) { + assert_eq!(updates.commitment_signed.len(), 1); + if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed[0], &node_cfgs[0].logger) { // Check that the persister returns InProgress (and will never actually complete) // as the monitor update errors. if let ChannelMonitorUpdateStatus::InProgress = chain_mon.chain_monitor.update_channel(chan.2, &update) {} else { panic!("Expected monitor paused"); } @@ -273,7 +274,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { let events_2 = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events_2.len(), 1); let (bs_initial_fulfill, bs_initial_commitment_signed) = match events_2[0] { - MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => { + MessageSendEvent::UpdateHTLCs { ref node_id, channel_id: _, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => { assert_eq!(*node_id, nodes[0].node.get_our_node_id()); assert!(update_add_htlcs.is_empty()); assert_eq!(update_fulfill_htlcs.len(), 1); @@ -293,7 +294,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { _ => panic!("Unexpected event"), } - nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), commitment_signed); + nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), commitment_signed); check_added_monitors!(nodes[0], 1); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); } @@ -393,7 +394,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { _ => panic!("Unexpected event"), } - nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &bs_resp.2.as_ref().unwrap().commitment_signed); + nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_resp.2.as_ref().unwrap().commitment_signed); let as_resp_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); // No commitment_signed so get_event_msg's assert(len == 1) passes check_added_monitors!(nodes[0], 1); @@ -413,7 +414,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { assert!(bs_resp == second_bs_resp); } - (SendEvent::from_commitment_update(nodes[1].node.get_our_node_id(), as_resp.2.unwrap()), as_resp.1.unwrap()) + (SendEvent::from_commitment_update(nodes[1].node.get_our_node_id(), channel_id, as_resp.2.unwrap()), as_resp.1.unwrap()) } else { let mut events_4 = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events_4.len(), 2); @@ -429,7 +430,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id()); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &payment_event.commitment_msg); + nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &payment_event.commitment_msg); let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); // nodes[1] is awaiting an RAA from nodes[0] still so get_event_msg's assert(len == 1) passes check_added_monitors!(nodes[1], 1); @@ -523,12 +524,12 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { } } - nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &bs_second_commitment_update.commitment_signed); + nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_second_commitment_update.commitment_signed); let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); // No commitment_signed so get_event_msg's assert(len == 1) passes check_added_monitors!(nodes[0], 1); - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &as_commitment_update.commitment_signed); + nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_commitment_update.commitment_signed); let bs_second_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); // No commitment_signed so get_event_msg's assert(len == 1) passes check_added_monitors!(nodes[1], 1); @@ -613,7 +614,7 @@ fn test_monitor_update_fail_cs() { nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &send_event.msgs[0]); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &send_event.commitment_msg); + nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &send_event.commitment_msg); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -634,7 +635,7 @@ fn test_monitor_update_fail_cs() { _ => panic!("Unexpected event"), } match responses[1] { - MessageSendEvent::UpdateHTLCs { ref updates, ref node_id } => { + MessageSendEvent::UpdateHTLCs { ref updates, ref node_id, channel_id: _ } => { assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fulfill_htlcs.is_empty()); assert!(updates.update_fail_htlcs.is_empty()); @@ -643,7 +644,7 @@ fn test_monitor_update_fail_cs() { assert_eq!(*node_id, nodes[0].node.get_our_node_id()); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &updates.commitment_signed); + nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &updates.commitment_signed); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[0], 1); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); @@ -760,14 +761,14 @@ fn test_monitor_update_raa_while_paused() { let send_event_2 = SendEvent::from_event(nodes[1].node.get_and_clear_pending_msg_events().remove(0)); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &send_event_1.msgs[0]); - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &send_event_1.commitment_msg); + nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &send_event_1.commitment_msg); check_added_monitors!(nodes[1], 1); let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[0].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &send_event_2.msgs[0]); - nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &send_event_2.commitment_msg); + nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &send_event_2.commitment_msg); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[0], 1); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); @@ -785,11 +786,11 @@ fn test_monitor_update_raa_while_paused() { check_added_monitors!(nodes[1], 1); let bs_cs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &as_update_raa.1); + nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_update_raa.1); check_added_monitors!(nodes[1], 1); let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &bs_cs.commitment_signed); + nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_cs.commitment_signed); check_added_monitors!(nodes[0], 1); let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); @@ -893,7 +894,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { send_event = SendEvent::from_event(nodes[2].node.get_and_clear_pending_msg_events().remove(0)); nodes[1].node.handle_update_add_htlc(nodes[2].node.get_our_node_id(), &send_event.msgs[0]); - nodes[1].node.handle_commitment_signed(nodes[2].node.get_our_node_id(), &send_event.commitment_msg); + nodes[1].node.handle_commitment_signed_batch_test(nodes[2].node.get_our_node_id(), &send_event.commitment_msg); check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); (Some(payment_preimage_4), Some(payment_hash_4)) @@ -919,7 +920,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { // ordering of the two events that both go to nodes[2] have to stay in the same order. let nodes_0_event = remove_first_msg_event_to_node(&nodes[0].node.get_our_node_id(), &mut events_3); let messages_a = match nodes_0_event { - MessageSendEvent::UpdateHTLCs { node_id, mut updates } => { + MessageSendEvent::UpdateHTLCs { node_id, mut updates, channel_id: _ } => { assert_eq!(node_id, nodes[0].node.get_our_node_id()); assert!(updates.update_fulfill_htlcs.is_empty()); assert_eq!(updates.update_fail_htlcs.len(), 1); @@ -955,7 +956,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &send_event_b.msgs[0]); let as_cs; if test_ignore_second_cs { - nodes[2].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &send_event_b.commitment_msg); + nodes[2].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &send_event_b.commitment_msg); check_added_monitors!(nodes[2], 1); let bs_revoke_and_ack = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); nodes[2].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &raa.unwrap()); @@ -971,10 +972,10 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { check_added_monitors!(nodes[1], 1); as_cs = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id()); - nodes[1].node.handle_commitment_signed(nodes[2].node.get_our_node_id(), &bs_cs.commitment_signed); + nodes[1].node.handle_commitment_signed_batch_test(nodes[2].node.get_our_node_id(), &bs_cs.commitment_signed); check_added_monitors!(nodes[1], 1); } else { - nodes[2].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &send_event_b.commitment_msg); + nodes[2].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &send_event_b.commitment_msg); check_added_monitors!(nodes[2], 1); let bs_revoke_and_commit = nodes[2].node.get_and_clear_pending_msg_events(); @@ -992,14 +993,14 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { as_cs = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id()); match bs_revoke_and_commit[1] { - MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => { + MessageSendEvent::UpdateHTLCs { ref node_id, channel_id: _, ref updates } => { assert_eq!(*node_id, nodes[1].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fail_htlcs.is_empty()); assert!(updates.update_fail_malformed_htlcs.is_empty()); assert!(updates.update_fulfill_htlcs.is_empty()); assert!(updates.update_fee.is_none()); - nodes[1].node.handle_commitment_signed(nodes[2].node.get_our_node_id(), &updates.commitment_signed); + nodes[1].node.handle_commitment_signed_batch_test(nodes[2].node.get_our_node_id(), &updates.commitment_signed); check_added_monitors!(nodes[1], 1); }, _ => panic!("Unexpected event"), @@ -1015,7 +1016,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &as_cs.update_add_htlcs[0]); - nodes[2].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &as_cs.commitment_signed); + nodes[2].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &as_cs.commitment_signed); check_added_monitors!(nodes[2], 1); let bs_second_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); @@ -1027,7 +1028,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - nodes[1].node.handle_commitment_signed(nodes[2].node.get_our_node_id(), &bs_second_cs.commitment_signed); + nodes[1].node.handle_commitment_signed_batch_test(nodes[2].node.get_our_node_id(), &bs_second_cs.commitment_signed); check_added_monitors!(nodes[1], 1); let as_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id()); @@ -1206,7 +1207,7 @@ fn raa_no_response_awaiting_raa_state() { assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &payment_event.commitment_msg); + nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &payment_event.commitment_msg); check_added_monitors!(nodes[1], 1); let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -1216,7 +1217,7 @@ fn raa_no_response_awaiting_raa_state() { assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); - nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &bs_responses.1); + nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_responses.1); check_added_monitors!(nodes[0], 1); let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); @@ -1226,7 +1227,7 @@ fn raa_no_response_awaiting_raa_state() { chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &payment_event.commitment_msg); + nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &payment_event.commitment_msg); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -1258,12 +1259,12 @@ fn raa_no_response_awaiting_raa_state() { assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); - nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &bs_responses.1); + nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_responses.1); check_added_monitors!(nodes[0], 1); let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &payment_event.commitment_msg); + nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &payment_event.commitment_msg); check_added_monitors!(nodes[1], 1); let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); @@ -1277,7 +1278,7 @@ fn raa_no_response_awaiting_raa_state() { nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_raa); check_added_monitors!(nodes[0], 1); - nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &bs_update.commitment_signed); + nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_update.commitment_signed); check_added_monitors!(nodes[0], 1); let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); @@ -1348,7 +1349,7 @@ fn claim_while_disconnected_monitor_update_fail() { let as_updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &as_updates.update_add_htlcs[0]); - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &as_updates.commitment_signed); + nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_updates.commitment_signed); check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); // Note that nodes[1] not updating monitor here is OK - it wont take action on the new HTLC @@ -1365,11 +1366,11 @@ fn claim_while_disconnected_monitor_update_fail() { assert_eq!(bs_msgs.len(), 2); match bs_msgs[0] { - MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => { + MessageSendEvent::UpdateHTLCs { ref node_id, channel_id: _, ref updates } => { assert_eq!(*node_id, nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); expect_payment_sent(&nodes[0], payment_preimage_1, None, false, false); - nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &updates.commitment_signed); + nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &updates.commitment_signed); check_added_monitors!(nodes[0], 1); let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); @@ -1391,11 +1392,11 @@ fn claim_while_disconnected_monitor_update_fail() { let as_commitment = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); let bs_commitment = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &bs_commitment.commitment_signed); + nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_commitment.commitment_signed); check_added_monitors!(nodes[0], 1); let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &as_commitment.commitment_signed); + nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_commitment.commitment_signed); check_added_monitors!(nodes[1], 1); let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_raa); @@ -1447,7 +1448,7 @@ fn monitor_failed_no_reestablish_response() { assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &payment_event.commitment_msg); + nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &payment_event.commitment_msg); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[1], 1); @@ -1479,7 +1480,7 @@ fn monitor_failed_no_reestablish_response() { nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_responses.0); check_added_monitors!(nodes[0], 1); - nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &bs_responses.1); + nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_responses.1); check_added_monitors!(nodes[0], 1); let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); @@ -1524,13 +1525,13 @@ fn first_message_on_recv_ordering() { let payment_event = SendEvent::from_event(events.pop().unwrap()); assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id()); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &payment_event.commitment_msg); + nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &payment_event.commitment_msg); check_added_monitors!(nodes[1], 1); let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_responses.0); check_added_monitors!(nodes[0], 1); - nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &bs_responses.1); + nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_responses.1); check_added_monitors!(nodes[0], 1); let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); @@ -1560,7 +1561,7 @@ fn first_message_on_recv_ordering() { // RAA/CS response, which should be generated when we call channel_monitor_update (with the // appropriate HTLC acceptance). nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &payment_event.commitment_msg); + nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &payment_event.commitment_msg); check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -1575,7 +1576,7 @@ fn first_message_on_recv_ordering() { let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_responses.0); check_added_monitors!(nodes[0], 1); - nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &bs_responses.1); + nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_responses.1); check_added_monitors!(nodes[0], 1); let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); @@ -2063,7 +2064,7 @@ fn test_pending_update_fee_ack_on_reconnect() { assert!(as_update_fee_msgs.update_fee.is_some()); nodes[1].node.handle_update_fee(nodes[0].node.get_our_node_id(), as_update_fee_msgs.update_fee.as_ref().unwrap()); - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &as_update_fee_msgs.commitment_signed); + nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_update_fee_msgs.commitment_signed); check_added_monitors!(nodes[1], 1); let bs_first_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); // bs_first_raa is not delivered until it is re-generated after reconnect @@ -2095,7 +2096,7 @@ fn test_pending_update_fee_ack_on_reconnect() { get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id()); nodes[0].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &bs_initial_send_msgs.update_add_htlcs[0]); - nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &bs_initial_send_msgs.commitment_signed); + nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_initial_send_msgs.commitment_signed); check_added_monitors!(nodes[0], 1); nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id())); check_added_monitors!(nodes[1], 1); @@ -2103,11 +2104,11 @@ fn test_pending_update_fee_ack_on_reconnect() { nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_first_raa); check_added_monitors!(nodes[0], 1); - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()).commitment_signed); + nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()).commitment_signed); check_added_monitors!(nodes[1], 1); let bs_third_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &bs_second_cs); + nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_second_cs); check_added_monitors!(nodes[0], 1); nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_third_raa); check_added_monitors!(nodes[0], 1); @@ -2225,19 +2226,19 @@ fn do_update_fee_resend_test(deliver_update: bool, parallel_updates: bool) { assert!(update_msgs.update_fee.is_some()); nodes[1].node.handle_update_fee(nodes[0].node.get_our_node_id(), update_msgs.update_fee.as_ref().unwrap()); if parallel_updates { - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &update_msgs.commitment_signed); + nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &update_msgs.commitment_signed); check_added_monitors!(nodes[1], 1); let (bs_first_raa, bs_first_cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_first_raa); check_added_monitors!(nodes[0], 1); let as_second_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); - nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &bs_first_cs); + nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_first_cs); check_added_monitors!(nodes[0], 1); let as_first_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); nodes[1].node.handle_update_fee(nodes[0].node.get_our_node_id(), as_second_update.update_fee.as_ref().unwrap()); - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &as_second_update.commitment_signed); + nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_second_update.commitment_signed); check_added_monitors!(nodes[1], 1); let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); @@ -2248,7 +2249,7 @@ fn do_update_fee_resend_test(deliver_update: bool, parallel_updates: bool) { nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_second_raa); check_added_monitors!(nodes[0], 1); - nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &bs_second_cs.commitment_signed); + nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_second_cs.commitment_signed); check_added_monitors!(nodes[0], 1); let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); @@ -2324,7 +2325,7 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { check_added_monitors!(nodes[0], 1); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &send.msgs[0]); - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &send.commitment_msg); + nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &send.commitment_msg); check_added_monitors!(nodes[1], 1); let (raa, cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -2404,11 +2405,11 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { assert_eq!(events.len(), 1); // Deliver the pending in-flight CS - nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &cs); + nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &cs); check_added_monitors!(nodes[0], 1); let commitment_msg = match events.pop().unwrap() { - MessageSendEvent::UpdateHTLCs { node_id, updates } => { + MessageSendEvent::UpdateHTLCs { node_id, channel_id: _, updates } => { assert_eq!(node_id, nodes[1].node.get_our_node_id()); assert!(updates.update_fail_htlcs.is_empty()); assert!(updates.update_fail_malformed_htlcs.is_empty()); @@ -2423,7 +2424,7 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { _ => panic!("Unexpected event type!"), }; - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &commitment_msg); + nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &commitment_msg); check_added_monitors!(nodes[1], 1); let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); @@ -2492,13 +2493,13 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &send_event.msgs[0]); - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &send_event.commitment_msg); + nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &send_event.commitment_msg); check_added_monitors!(nodes[1], 1); let (bs_raa, bs_cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_raa); check_added_monitors!(nodes[0], 1); - nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &bs_cs); + nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_cs); check_added_monitors!(nodes[0], 1); as_raa = Some(get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id())); @@ -2687,7 +2688,7 @@ fn double_temp_error() { assert_eq!(msg_events.len(), 1); let (update_fulfill_1, commitment_signed_b1, node_id) = { match &msg_events[0] { - &MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => { + &MessageSendEvent::UpdateHTLCs { ref node_id, channel_id: _, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => { assert!(update_add_htlcs.is_empty()); assert_eq!(update_fulfill_htlcs.len(), 1); assert!(update_fail_htlcs.is_empty()); @@ -2702,7 +2703,7 @@ fn double_temp_error() { nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &update_fulfill_1); check_added_monitors!(nodes[0], 0); expect_payment_sent(&nodes[0], payment_preimage_1, None, false, false); - nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &commitment_signed_b1); + nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &commitment_signed_b1); check_added_monitors!(nodes[0], 1); nodes[0].node.process_pending_htlc_forwards(); let (raa_a1, commitment_signed_a1) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id()); @@ -2710,7 +2711,7 @@ fn double_temp_error() { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &raa_a1); check_added_monitors!(nodes[1], 1); - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &commitment_signed_a1); + nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &commitment_signed_a1); check_added_monitors!(nodes[1], 1); // Complete the second HTLC. @@ -2718,7 +2719,7 @@ fn double_temp_error() { let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 2); (match &events[0] { - MessageSendEvent::UpdateHTLCs { node_id, updates } => { + MessageSendEvent::UpdateHTLCs { node_id, channel_id: _, updates } => { assert_eq!(*node_id, nodes[0].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fail_htlcs.is_empty()); @@ -2974,7 +2975,7 @@ fn test_blocked_chan_preimage_release() { // Finish the CS dance between nodes[0] and nodes[1]. Note that until the event handling, the // update_fulfill_htlc + CS is held, even though the preimage is already on disk for the // channel. - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &as_htlc_fulfill_updates.commitment_signed); + nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_htlc_fulfill_updates.commitment_signed); check_added_monitors(&nodes[1], 1); let (a, raa) = do_main_commitment_signed_dance(&nodes[1], &nodes[0], false); assert!(a.is_none()); @@ -3051,13 +3052,13 @@ fn do_test_inverted_mon_completion_order(with_latest_manager: bool, complete_bc_ // Now step the Commitment Signed Dance between B and C forward a bit (or fully), ensuring we // won't get the preimage when the nodes reconnect and we have to get it from the // ChannelMonitor. - nodes[1].node.handle_commitment_signed(nodes[2].node.get_our_node_id(), &cs_updates.commitment_signed); + nodes[1].node.handle_commitment_signed_batch_test(nodes[2].node.get_our_node_id(), &cs_updates.commitment_signed); check_added_monitors(&nodes[1], 1); if complete_bc_commitment_dance { let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[2].node.get_our_node_id()); nodes[2].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_revoke_and_ack); check_added_monitors(&nodes[2], 1); - nodes[2].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &bs_commitment_signed); + nodes[2].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_commitment_signed); check_added_monitors(&nodes[2], 1); let cs_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); @@ -3215,7 +3216,7 @@ fn do_test_durable_preimages_on_closed_channel(close_chans_before_reload: bool, // Now step the Commitment Signed Dance between B and C forward a bit, ensuring we won't get // the preimage when the nodes reconnect, at which point we have to ensure we get it from the // ChannelMonitor. - nodes[1].node.handle_commitment_signed(nodes[2].node.get_our_node_id(), &cs_updates.commitment_signed); + nodes[1].node.handle_commitment_signed_batch_test(nodes[2].node.get_our_node_id(), &cs_updates.commitment_signed); check_added_monitors(&nodes[1], 1); let _ = get_revoke_commit_msgs!(nodes[1], nodes[2].node.get_our_node_id()); @@ -3368,13 +3369,13 @@ fn test_sync_async_persist_doesnt_hang() { let bs_updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]); expect_payment_sent(&nodes[0], payment_preimage_1, None, false, false); - nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &bs_updates.commitment_signed); + nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_updates.commitment_signed); check_added_monitors(&nodes[0], 1); let (as_raa, as_cs) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id()); nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_raa); check_added_monitors(&nodes[1], 1); - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &as_cs); + nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_cs); check_added_monitors(&nodes[1], 1); let bs_final_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); @@ -3394,7 +3395,7 @@ fn test_sync_async_persist_doesnt_hang() { let bs_updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]); - nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &bs_updates.commitment_signed); + nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_updates.commitment_signed); check_added_monitors(&nodes[0], 1); // At this point, we have completed an extra `ChannelMonitorUpdate` but the `ChannelManager` @@ -3418,7 +3419,7 @@ fn test_sync_async_persist_doesnt_hang() { // Finally, complete the claiming of the second payment nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_raa); check_added_monitors(&nodes[1], 1); - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &as_cs); + nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_cs); check_added_monitors(&nodes[1], 1); let bs_final_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); @@ -3465,13 +3466,13 @@ fn do_test_reload_mon_update_completion_actions(close_during_reload: bool) { // Now step the Commitment Signed Dance between B and C and check that after the final RAA B // doesn't let the preimage-removing monitor update fly. - nodes[1].node.handle_commitment_signed(nodes[2].node.get_our_node_id(), &cs_updates.commitment_signed); + nodes[1].node.handle_commitment_signed_batch_test(nodes[2].node.get_our_node_id(), &cs_updates.commitment_signed); check_added_monitors(&nodes[1], 1); let (bs_raa, bs_cs) = get_revoke_commit_msgs!(nodes[1], nodes[2].node.get_our_node_id()); nodes[2].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_raa); check_added_monitors(&nodes[2], 1); - nodes[2].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &bs_cs); + nodes[2].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_cs); check_added_monitors(&nodes[2], 1); let cs_final_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); @@ -3609,7 +3610,7 @@ fn do_test_glacial_peer_cant_hang(hold_chan_a: bool) { let mut c_update = msg_events.iter() .filter(|ev| matches!(ev, MessageSendEvent::UpdateHTLCs { node_id, .. } if *node_id == node_c_id)) .cloned().collect::>(); - let a_filtermap = |ev| if let MessageSendEvent::UpdateHTLCs { node_id, updates } = ev { + let a_filtermap = |ev| if let MessageSendEvent::UpdateHTLCs { node_id, channel_id: _, updates } = ev { if node_id == node_a_id { Some(updates) } else { @@ -3686,14 +3687,14 @@ fn test_partial_claim_mon_update_compl_actions() { expect_payment_forwarded!(nodes[1], nodes[0], nodes[3], Some(1000), false, false); let _bs_updates_for_a = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); - nodes[1].node.handle_commitment_signed(nodes[3].node.get_our_node_id(), &updates.commitment_signed); + nodes[1].node.handle_commitment_signed_batch_test(nodes[3].node.get_our_node_id(), &updates.commitment_signed); check_added_monitors(&nodes[1], 1); let (bs_raa, bs_cs) = get_revoke_commit_msgs(&nodes[1], &nodes[3].node.get_our_node_id()); nodes[3].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_raa); check_added_monitors(&nodes[3], 0); - nodes[3].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &bs_cs); + nodes[3].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_cs); check_added_monitors(&nodes[3], 0); assert!(nodes[3].node.get_and_clear_pending_msg_events().is_empty()); @@ -3723,7 +3724,7 @@ fn test_partial_claim_mon_update_compl_actions() { expect_payment_forwarded!(nodes[2], nodes[0], nodes[3], Some(1000), false, false); let _cs_updates_for_a = get_htlc_update_msgs(&nodes[2], &nodes[0].node.get_our_node_id()); - nodes[2].node.handle_commitment_signed(nodes[3].node.get_our_node_id(), &updates.commitment_signed); + nodes[2].node.handle_commitment_signed_batch_test(nodes[3].node.get_our_node_id(), &updates.commitment_signed); check_added_monitors(&nodes[2], 1); }, _ => panic!(), @@ -3734,7 +3735,7 @@ fn test_partial_claim_mon_update_compl_actions() { nodes[3].node.handle_revoke_and_ack(nodes[2].node.get_our_node_id(), &cs_raa); check_added_monitors(&nodes[3], 1); - nodes[3].node.handle_commitment_signed(nodes[2].node.get_our_node_id(), &cs_cs); + nodes[3].node.handle_commitment_signed_batch_test(nodes[2].node.get_our_node_id(), &cs_cs); check_added_monitors(&nodes[3], 1); let ds_raa = get_event_msg!(nodes[3], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id()); @@ -3981,7 +3982,7 @@ fn test_single_channel_multiple_mpp() { nodes[7].node.handle_update_fulfill_htlc(node_8_id, &first_updates.update_fulfill_htlcs[0]); check_added_monitors(&nodes[7], 1); expect_payment_forwarded!(nodes[7], nodes[1], nodes[8], Some(1000), false, false); - nodes[7].node.handle_commitment_signed(node_8_id, &first_updates.commitment_signed); + nodes[7].node.handle_commitment_signed_batch_test(node_8_id, &first_updates.commitment_signed); check_added_monitors(&nodes[7], 1); let (raa, cs) = get_revoke_commit_msgs(&nodes[7], &node_8_id); @@ -4022,7 +4023,7 @@ fn test_single_channel_multiple_mpp() { // Now drive everything to the end, at least as far as node 7 is concerned... *nodes[8].chain_monitor.write_blocker.lock().unwrap() = None; - nodes[8].node.handle_commitment_signed(node_7_id, &cs); + nodes[8].node.handle_commitment_signed_batch_test(node_7_id, &cs); check_added_monitors(&nodes[8], 1); let (updates, raa) = get_updates_and_revoke(&nodes[8], &nodes[7].node.get_our_node_id()); @@ -4038,7 +4039,7 @@ fn test_single_channel_multiple_mpp() { next_source += 1; } - nodes[7].node.handle_commitment_signed(node_8_id, &updates.commitment_signed); + nodes[7].node.handle_commitment_signed_batch_test(node_8_id, &updates.commitment_signed); nodes[7].node.handle_revoke_and_ack(node_8_id, &raa); if updates.update_fulfill_htlcs.get(2).is_some() { check_added_monitors(&nodes[7], 5); @@ -4049,7 +4050,7 @@ fn test_single_channel_multiple_mpp() { let (raa, cs) = get_revoke_commit_msgs(&nodes[7], &node_8_id); nodes[8].node.handle_revoke_and_ack(node_7_id, &raa); - nodes[8].node.handle_commitment_signed(node_7_id, &cs); + nodes[8].node.handle_commitment_signed_batch_test(node_7_id, &cs); check_added_monitors(&nodes[8], 2); let (updates, raa) = get_updates_and_revoke(&nodes[8], &node_7_id); @@ -4065,7 +4066,7 @@ fn test_single_channel_multiple_mpp() { expect_payment_forwarded!(nodes[7], nodes[next_source], nodes[8], Some(1000), false, false); } - nodes[7].node.handle_commitment_signed(node_8_id, &updates.commitment_signed); + nodes[7].node.handle_commitment_signed_batch_test(node_8_id, &updates.commitment_signed); nodes[7].node.handle_revoke_and_ack(node_8_id, &raa); if updates.update_fulfill_htlcs.get(2).is_some() { check_added_monitors(&nodes[7], 5); @@ -4075,7 +4076,7 @@ fn test_single_channel_multiple_mpp() { let (raa, cs) = get_revoke_commit_msgs(&nodes[7], &node_8_id); nodes[8].node.handle_revoke_and_ack(node_7_id, &raa); - nodes[8].node.handle_commitment_signed(node_7_id, &cs); + nodes[8].node.handle_commitment_signed_batch_test(node_7_id, &cs); check_added_monitors(&nodes[8], 2); let raa = get_event_msg!(nodes[8], MessageSendEvent::SendRevokeAndACK, node_7_id); diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 55230a0231b..a624eee0015 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -8848,11 +8848,32 @@ impl FundedChannel where } self.context.resend_order = RAACommitmentOrder::RevokeAndACKFirst; - let (mut htlcs_ref, counterparty_commitment_tx) = - self.build_commitment_no_state_update(logger); - let counterparty_commitment_txid = counterparty_commitment_tx.trust().txid(); - let htlcs: Vec<(HTLCOutputInCommitment, Option>)> = - htlcs_ref.drain(..).map(|(htlc, htlc_source)| (htlc, htlc_source.map(|source_ref| Box::new(source_ref.clone())))).collect(); + let mut updates = Vec::with_capacity(self.pending_funding.len() + 1); + for funding in core::iter::once(&self.funding).chain(self.pending_funding.iter()) { + let (mut htlcs_ref, counterparty_commitment_tx) = + self.build_commitment_no_state_update(funding, logger); + let htlc_outputs: Vec<(HTLCOutputInCommitment, Option>)> = + htlcs_ref.drain(..).map(|(htlc, htlc_source)| (htlc, htlc_source.map(|source_ref| Box::new(source_ref.clone())))).collect(); + + if self.pending_funding.is_empty() { + // Soon, we will switch this to `LatestCounterpartyCommitmentTX`, + // and provide the full commit tx instead of the information needed to rebuild it. + updates.push(ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo { + commitment_txid: counterparty_commitment_tx.trust().txid(), + htlc_outputs, + commitment_number: self.context.cur_counterparty_commitment_transaction_number, + their_per_commitment_point: self.context.counterparty_cur_commitment_point.unwrap(), + feerate_per_kw: Some(counterparty_commitment_tx.feerate_per_kw()), + to_broadcaster_value_sat: Some(counterparty_commitment_tx.to_broadcaster_value_sat()), + to_countersignatory_value_sat: Some(counterparty_commitment_tx.to_countersignatory_value_sat()), + }); + } else { + updates.push(ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTX { + htlc_outputs, + commitment_tx: counterparty_commitment_tx, + }); + } + } if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent { self.context.announcement_sigs_state = AnnouncementSigsState::Committed; @@ -8861,37 +8882,30 @@ impl FundedChannel where self.context.latest_monitor_update_id += 1; let monitor_update = ChannelMonitorUpdate { update_id: self.context.latest_monitor_update_id, - // Soon, we will switch this to `LatestCounterpartyCommitmentTX`, - // and provide the full commit tx instead of the information needed to rebuild it. - updates: vec![ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo { - commitment_txid: counterparty_commitment_txid, - htlc_outputs: htlcs.clone(), - commitment_number: self.context.cur_counterparty_commitment_transaction_number, - their_per_commitment_point: self.context.counterparty_cur_commitment_point.unwrap(), - feerate_per_kw: Some(counterparty_commitment_tx.feerate_per_kw()), - to_broadcaster_value_sat: Some(counterparty_commitment_tx.to_broadcaster_value_sat()), - to_countersignatory_value_sat: Some(counterparty_commitment_tx.to_countersignatory_value_sat()), - }], + updates, channel_id: Some(self.context.channel_id()), }; self.context.channel_state.set_awaiting_remote_revoke(); monitor_update } - fn build_commitment_no_state_update(&self, logger: &L) - -> (Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>, CommitmentTransaction) - where L::Target: Logger + fn build_commitment_no_state_update( + &self, funding: &FundingScope, logger: &L, + ) -> (Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>, CommitmentTransaction) + where + L::Target: Logger, { - let commitment_data = self.context.build_commitment_transaction(&self.funding, - self.context.cur_counterparty_commitment_transaction_number, - &self.context.counterparty_cur_commitment_point.unwrap(), false, true, logger); + let commitment_data = self.context.build_commitment_transaction( + funding, self.context.cur_counterparty_commitment_transaction_number, + &self.context.counterparty_cur_commitment_point.unwrap(), false, true, logger, + ); let counterparty_commitment_tx = commitment_data.stats.tx; #[cfg(any(test, fuzzing))] { - if !self.funding.is_outbound() { - let projected_commit_tx_info = self.funding.next_remote_commitment_tx_fee_info_cached.lock().unwrap().take(); - *self.funding.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None; + if !funding.is_outbound() { + let projected_commit_tx_info = funding.next_remote_commitment_tx_fee_info_cached.lock().unwrap().take(); + *funding.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None; if let Some(info) = projected_commit_tx_info { let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len(); if info.total_pending_htlcs == total_pending_htlcs @@ -8910,14 +8924,32 @@ impl FundedChannel where /// Only fails in case of signer rejection. Used for channel_reestablish commitment_signed /// generation when we shouldn't change HTLC/channel state. - fn send_commitment_no_state_update(&self, logger: &L) -> Result where L::Target: Logger { + fn send_commitment_no_state_update( + &self, logger: &L, + ) -> Result, ChannelError> + where + L::Target: Logger, + { + core::iter::once(&self.funding) + .chain(self.pending_funding.iter()) + .map(|funding| self.send_commitment_no_state_update_for_funding(funding, logger)) + .collect::, ChannelError>>() + } + + fn send_commitment_no_state_update_for_funding( + &self, funding: &FundingScope, logger: &L, + ) -> Result + where + L::Target: Logger, + { // Get the fee tests from `build_commitment_no_state_update` #[cfg(any(test, fuzzing))] - self.build_commitment_no_state_update(logger); + self.build_commitment_no_state_update(funding, logger); - let commitment_data = self.context.build_commitment_transaction(&self.funding, - self.context.cur_counterparty_commitment_transaction_number, - &self.context.counterparty_cur_commitment_point.unwrap(), false, true, logger); + let commitment_data = self.context.build_commitment_transaction( + funding, self.context.cur_counterparty_commitment_transaction_number, + &self.context.counterparty_cur_commitment_point.unwrap(), false, true, logger, + ); let counterparty_commitment_tx = commitment_data.stats.tx; match &self.context.holder_signer { @@ -8926,7 +8958,7 @@ impl FundedChannel where { let res = ecdsa.sign_counterparty_commitment( - &self.funding.channel_transaction_parameters, + &funding.channel_transaction_parameters, &counterparty_commitment_tx, commitment_data.inbound_htlc_preimages, commitment_data.outbound_htlc_preimages, @@ -8938,25 +8970,34 @@ impl FundedChannel where let trusted_tx = counterparty_commitment_tx.trust(); log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {} in channel {}", encode::serialize_hex(&trusted_tx.built_transaction().transaction), - &trusted_tx.txid(), encode::serialize_hex(&self.funding.get_funding_redeemscript()), + &trusted_tx.txid(), encode::serialize_hex(&funding.get_funding_redeemscript()), log_bytes!(signature.serialize_compact()[..]), &self.context.channel_id()); let counterparty_keys = trusted_tx.keys(); debug_assert_eq!(htlc_signatures.len(), trusted_tx.htlcs().len()); for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(trusted_tx.htlcs()) { log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {} in channel {}", - encode::serialize_hex(&chan_utils::build_htlc_transaction(&trusted_tx.txid(), trusted_tx.feerate_per_kw(), self.funding.get_holder_selected_contest_delay(), htlc, self.funding.get_channel_type(), &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)), - encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, self.funding.get_channel_type(), &counterparty_keys)), + encode::serialize_hex(&chan_utils::build_htlc_transaction(&trusted_tx.txid(), trusted_tx.feerate_per_kw(), funding.get_holder_selected_contest_delay(), htlc, funding.get_channel_type(), &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)), + encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, funding.get_channel_type(), &counterparty_keys)), log_bytes!(counterparty_keys.broadcaster_htlc_key.to_public_key().serialize()), log_bytes!(htlc_sig.serialize_compact()[..]), &self.context.channel_id()); } } + let batch = if self.pending_funding.is_empty() { None } else { + Some(msgs::CommitmentSignedBatch { + batch_size: self.pending_funding.len() as u16 + 1, + funding_txid: funding + .get_funding_txo() + .expect("splices should have their funding transactions negotiated before exiting quiescence while un-negotiated splices are discarded on reload") + .txid, + }) + }; Ok(msgs::CommitmentSigned { channel_id: self.context.channel_id, signature, htlc_signatures, - batch: None, + batch, #[cfg(taproot)] partial_signature_with_nonce: None, }) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 53fc3e6dfde..597ce2cf0ca 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -7692,6 +7692,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if let Some(update) = commitment_update { pending_msg_events.push(MessageSendEvent::UpdateHTLCs { node_id: counterparty_node_id, + channel_id: channel.context.channel_id(), updates: update, }); } @@ -8523,8 +8524,9 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } peer_state.pending_msg_events.push(MessageSendEvent::UpdateHTLCs { node_id: counterparty_node_id, + channel_id: msg.channel_id, updates: CommitmentUpdate { - commitment_signed, + commitment_signed: vec![commitment_signed], update_add_htlcs: vec![], update_fulfill_htlcs: vec![], update_fail_htlcs: vec![], @@ -9732,6 +9734,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } let cu_msg = msgs.commitment_update.map(|updates| MessageSendEvent::UpdateHTLCs { node_id, + channel_id: chan.context().channel_id(), updates, }); let raa_msg = msgs.revoke_and_ack.map(|msg| MessageSendEvent::SendRevokeAndACK { @@ -15095,17 +15098,17 @@ mod tests { let bs_first_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &bs_first_updates.update_fulfill_htlcs[0]); expect_payment_sent(&nodes[0], payment_preimage, None, false, false); - nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &bs_first_updates.commitment_signed); + nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_first_updates.commitment_signed); check_added_monitors!(nodes[0], 1); let (as_first_raa, as_first_cs) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id()); nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_first_raa); check_added_monitors!(nodes[1], 1); let bs_second_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &as_first_cs); + nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_first_cs); check_added_monitors!(nodes[1], 1); let bs_first_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &bs_second_updates.update_fulfill_htlcs[0]); - nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &bs_second_updates.commitment_signed); + nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_second_updates.commitment_signed); check_added_monitors!(nodes[0], 1); let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_first_raa); @@ -15113,7 +15116,7 @@ mod tests { check_added_monitors!(nodes[0], 1); nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_second_raa); check_added_monitors!(nodes[1], 1); - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &as_second_updates.commitment_signed); + nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_second_updates.commitment_signed); check_added_monitors!(nodes[1], 1); let bs_third_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_third_raa); @@ -16475,10 +16478,10 @@ pub mod bench { Retry::Attempts(0)).unwrap(); let payment_event = SendEvent::from_event($node_a.get_and_clear_pending_msg_events().pop().unwrap()); $node_b.handle_update_add_htlc($node_a.get_our_node_id(), &payment_event.msgs[0]); - $node_b.handle_commitment_signed($node_a.get_our_node_id(), &payment_event.commitment_msg); + $node_b.handle_commitment_signed_batch_test($node_a.get_our_node_id(), &payment_event.commitment_msg); let (raa, cs) = get_revoke_commit_msgs(&ANodeHolder { node: &$node_b }, &$node_a.get_our_node_id()); $node_a.handle_revoke_and_ack($node_b.get_our_node_id(), &raa); - $node_a.handle_commitment_signed($node_b.get_our_node_id(), &cs); + $node_a.handle_commitment_signed_batch_test($node_b.get_our_node_id(), &cs); $node_b.handle_revoke_and_ack($node_a.get_our_node_id(), &get_event_msg!(ANodeHolder { node: &$node_a }, MessageSendEvent::SendRevokeAndACK, $node_b.get_our_node_id())); expect_pending_htlcs_forwardable!(ANodeHolder { node: &$node_b }); @@ -16487,17 +16490,17 @@ pub mod bench { expect_payment_claimed!(ANodeHolder { node: &$node_b }, payment_hash, 10_000); match $node_b.get_and_clear_pending_msg_events().pop().unwrap() { - MessageSendEvent::UpdateHTLCs { node_id, updates } => { + MessageSendEvent::UpdateHTLCs { node_id, channel_id: _, updates } => { assert_eq!(node_id, $node_a.get_our_node_id()); $node_a.handle_update_fulfill_htlc($node_b.get_our_node_id(), &updates.update_fulfill_htlcs[0]); - $node_a.handle_commitment_signed($node_b.get_our_node_id(), &updates.commitment_signed); + $node_a.handle_commitment_signed_batch_test($node_b.get_our_node_id(), &updates.commitment_signed); }, _ => panic!("Failed to generate claim event"), } let (raa, cs) = get_revoke_commit_msgs(&ANodeHolder { node: &$node_a }, &$node_b.get_our_node_id()); $node_b.handle_revoke_and_ack($node_a.get_our_node_id(), &raa); - $node_b.handle_commitment_signed($node_a.get_our_node_id(), &cs); + $node_b.handle_commitment_signed_batch_test($node_a.get_our_node_id(), &cs); $node_a.handle_revoke_and_ack($node_b.get_our_node_id(), &get_event_msg!(ANodeHolder { node: &$node_b }, MessageSendEvent::SendRevokeAndACK, $node_a.get_our_node_id())); expect_payment_sent!(ANodeHolder { node: &$node_a }, payment_preimage); diff --git a/lightning/src/ln/dual_funding_tests.rs b/lightning/src/ln/dual_funding_tests.rs index 07d23763c99..1a7bab39dc4 100644 --- a/lightning/src/ln/dual_funding_tests.rs +++ b/lightning/src/ln/dual_funding_tests.rs @@ -122,7 +122,7 @@ fn do_test_v2_channel_establishment(session: V2ChannelEstablishmentTestSession) let msg_events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 1); let _msg_commitment_signed_from_1 = match msg_events[0] { - MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => { + MessageSendEvent::UpdateHTLCs { ref node_id, channel_id: _, ref updates } => { assert_eq!(*node_id, nodes[0].node.get_our_node_id()); updates.commitment_signed.clone() }, diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index f71258f358e..042736a9c1a 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -756,7 +756,7 @@ pub fn create_chan_between_nodes_with_value<'a, 'b, 'c: 'd, 'd>(node_a: &'a Node } /// Gets an RAA and CS which were sent in response to a commitment update -pub fn get_revoke_commit_msgs>(node: &H, recipient: &PublicKey) -> (msgs::RevokeAndACK, msgs::CommitmentSigned) { +pub fn get_revoke_commit_msgs>(node: &H, recipient: &PublicKey) -> (msgs::RevokeAndACK, Vec) { let events = node.node().get_and_clear_pending_msg_events(); assert_eq!(events.len(), 2); (match events[0] { @@ -766,13 +766,14 @@ pub fn get_revoke_commit_msgs>(node: & }, _ => panic!("Unexpected event"), }, match events[1] { - MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => { + MessageSendEvent::UpdateHTLCs { ref node_id, ref channel_id, ref updates } => { assert_eq!(node_id, recipient); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fulfill_htlcs.is_empty()); assert!(updates.update_fail_htlcs.is_empty()); assert!(updates.update_fail_malformed_htlcs.is_empty()); assert!(updates.update_fee.is_none()); + assert!(updates.commitment_signed.iter().all(|cs| cs.channel_id == *channel_id)); updates.commitment_signed.clone() }, _ => panic!("Unexpected event"), @@ -785,7 +786,7 @@ pub fn get_updates_and_revoke>(node: & let events = node.node().get_and_clear_pending_msg_events(); assert_eq!(events.len(), 2); (match events[0] { - MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => { + MessageSendEvent::UpdateHTLCs { ref node_id, channel_id: _, ref updates } => { assert_eq!(node_id, recipient); (*updates).clone() }, @@ -871,7 +872,7 @@ pub fn get_htlc_update_msgs(node: &Node, recipient: &PublicKey) -> msgs::Commitm let events = node.node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); match events[0] { - MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => { + MessageSendEvent::UpdateHTLCs { ref node_id, channel_id: _, ref updates } => { assert_eq!(node_id, recipient); (*updates).clone() }, @@ -1925,21 +1926,22 @@ pub fn close_channel<'a, 'b, 'c>(outbound_node: &Node<'a, 'b, 'c>, inbound_node: pub struct SendEvent { pub node_id: PublicKey, + pub channel_id: ChannelId, pub msgs: Vec, - pub commitment_msg: msgs::CommitmentSigned, + pub commitment_msg: Vec, } impl SendEvent { - pub fn from_commitment_update(node_id: PublicKey, updates: msgs::CommitmentUpdate) -> SendEvent { + pub fn from_commitment_update(node_id: PublicKey, channel_id: ChannelId, updates: msgs::CommitmentUpdate) -> SendEvent { assert!(updates.update_fulfill_htlcs.is_empty()); assert!(updates.update_fail_htlcs.is_empty()); assert!(updates.update_fail_malformed_htlcs.is_empty()); assert!(updates.update_fee.is_none()); - SendEvent { node_id, msgs: updates.update_add_htlcs, commitment_msg: updates.commitment_signed } + SendEvent { node_id, channel_id, msgs: updates.update_add_htlcs, commitment_msg: updates.commitment_signed } } pub fn from_event(event: MessageSendEvent) -> SendEvent { match event { - MessageSendEvent::UpdateHTLCs { node_id, updates } => SendEvent::from_commitment_update(node_id, updates), + MessageSendEvent::UpdateHTLCs { node_id, channel_id, updates } => SendEvent::from_commitment_update(node_id, channel_id, updates), _ => panic!("Unexpected event type!"), } } @@ -2065,7 +2067,7 @@ macro_rules! commitment_signed_dance { { $crate::ln::functional_test_utils::check_added_monitors(&$node_a, 0); assert!($node_a.node.get_and_clear_pending_msg_events().is_empty()); - $node_a.node.handle_commitment_signed($node_b.node.get_our_node_id(), &$commitment_signed); + $node_a.node.handle_commitment_signed_batch_test($node_b.node.get_our_node_id(), &$commitment_signed); check_added_monitors(&$node_a, 1); let (extra_msg_option, bs_revoke_and_ack) = $crate::ln::functional_test_utils::do_main_commitment_signed_dance(&$node_a, &$node_b, $fail_backwards); assert!(extra_msg_option.is_none()); @@ -2108,7 +2110,7 @@ pub fn do_main_commitment_signed_dance(node_a: &Node<'_, '_, '_>, node_b: &Node< node_b.node.handle_revoke_and_ack(node_a.node.get_our_node_id(), &as_revoke_and_ack); assert!(node_b.node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(node_b, 1); - node_b.node.handle_commitment_signed(node_a.node.get_our_node_id(), &as_commitment_signed); + node_b.node.handle_commitment_signed_batch_test(node_a.node.get_our_node_id(), &as_commitment_signed); let (bs_revoke_and_ack, extra_msg_option) = { let mut events = node_b.node.get_and_clear_pending_msg_events(); assert!(events.len() <= 2); @@ -2134,14 +2136,15 @@ pub fn do_main_commitment_signed_dance(node_a: &Node<'_, '_, '_>, node_b: &Node< /// /// If `skip_last_step` is unset, also checks for the payment failure update for the previous hop /// on failure or that no new messages are left over on success. -pub fn do_commitment_signed_dance(node_a: &Node<'_, '_, '_>, node_b: &Node<'_, '_, '_>, commitment_signed: &msgs::CommitmentSigned, fail_backwards: bool, skip_last_step: bool) { +pub fn do_commitment_signed_dance(node_a: &Node<'_, '_, '_>, node_b: &Node<'_, '_, '_>, commitment_signed: &Vec, fail_backwards: bool, skip_last_step: bool) { check_added_monitors!(node_a, 0); assert!(node_a.node.get_and_clear_pending_msg_events().is_empty()); - node_a.node.handle_commitment_signed(node_b.node.get_our_node_id(), commitment_signed); + node_a.node.handle_commitment_signed_batch_test(node_b.node.get_our_node_id(), commitment_signed); check_added_monitors!(node_a, 1); // If this commitment signed dance was due to a claim, don't check for an RAA monitor update. - let got_claim = node_a.node.test_raa_monitor_updates_held(node_b.node.get_our_node_id(), commitment_signed.channel_id); + let channel_id = commitment_signed[0].channel_id; + let got_claim = node_a.node.test_raa_monitor_updates_held(node_b.node.get_our_node_id(), channel_id); if fail_backwards { assert!(!got_claim); } commitment_signed_dance!(node_a, node_b, (), fail_backwards, true, false, got_claim); @@ -2149,7 +2152,7 @@ pub fn do_commitment_signed_dance(node_a: &Node<'_, '_, '_>, node_b: &Node<'_, ' if fail_backwards { expect_pending_htlcs_forwardable_and_htlc_handling_failed!(node_a, - vec![crate::events::HTLCDestination::NextHopChannel{ node_id: Some(node_b.node.get_our_node_id()), channel_id: commitment_signed.channel_id }]); + vec![crate::events::HTLCDestination::NextHopChannel{ node_id: Some(node_b.node.get_our_node_id()), channel_id }]); check_added_monitors!(node_a, 1); let node_a_per_peer_state = node_a.node.per_peer_state.read().unwrap(); @@ -2996,19 +2999,20 @@ pub fn pass_claimed_payment_along_route(args: ClaimAlongRouteArgs) -> u64 { macro_rules! msgs_from_ev { ($ev: expr) => { match $ev { - &MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => { + &MessageSendEvent::UpdateHTLCs { ref node_id, ref channel_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => { assert!(update_add_htlcs.is_empty()); assert_eq!(update_fulfill_htlcs.len(), 1); assert!(update_fail_htlcs.is_empty()); assert!(update_fail_malformed_htlcs.is_empty()); assert!(update_fee.is_none()); + assert!(commitment_signed.iter().all(|cs| cs.channel_id == *channel_id)); ((update_fulfill_htlcs[0].clone(), commitment_signed.clone()), node_id.clone()) }, _ => panic!("Unexpected event"), } } } - let mut per_path_msgs: Vec<((msgs::UpdateFulfillHTLC, msgs::CommitmentSigned), PublicKey)> = Vec::with_capacity(expected_paths.len()); + let mut per_path_msgs: Vec<((msgs::UpdateFulfillHTLC, Vec), PublicKey)> = Vec::with_capacity(expected_paths.len()); let mut events = expected_paths[0].last().unwrap().node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), expected_paths.len()); @@ -3180,17 +3184,18 @@ pub fn pass_failed_payment_back<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expe let mut expected_paths: Vec<_> = expected_paths_slice.iter().collect(); check_added_monitors!(expected_paths[0].last().unwrap(), expected_paths.len()); - let mut per_path_msgs: Vec<((msgs::UpdateFailHTLC, msgs::CommitmentSigned), PublicKey)> = Vec::with_capacity(expected_paths.len()); + let mut per_path_msgs: Vec<((msgs::UpdateFailHTLC, Vec), PublicKey)> = Vec::with_capacity(expected_paths.len()); let events = expected_paths[0].last().unwrap().node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), expected_paths.len()); for ev in events.iter() { let (update_fail, commitment_signed, node_id) = match ev { - &MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => { + &MessageSendEvent::UpdateHTLCs { ref node_id, ref channel_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => { assert!(update_add_htlcs.is_empty()); assert!(update_fulfill_htlcs.is_empty()); assert_eq!(update_fail_htlcs.len(), 1); assert!(update_fail_malformed_htlcs.is_empty()); assert!(update_fee.is_none()); + assert!(commitment_signed.iter().all(|cs| cs.channel_id == *channel_id)); (update_fail_htlcs[0].clone(), commitment_signed.clone(), node_id.clone()) }, _ => panic!("Unexpected event"), @@ -3219,12 +3224,13 @@ pub fn pass_failed_payment_back<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expe if update_next_node { assert_eq!(events.len(), 1); match events[0] { - MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => { + MessageSendEvent::UpdateHTLCs { ref node_id, ref channel_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => { assert!(update_add_htlcs.is_empty()); assert!(update_fulfill_htlcs.is_empty()); assert_eq!(update_fail_htlcs.len(), 1); assert!(update_fail_malformed_htlcs.is_empty()); assert!(update_fee.is_none()); + assert!(commitment_signed.iter().all(|cs| cs.channel_id == *channel_id)); expected_next_node = node_id.clone(); next_msgs = Some((update_fail_htlcs[0].clone(), commitment_signed.clone())); }, @@ -3722,8 +3728,9 @@ macro_rules! handle_chan_reestablish_msgs { idx += 1; RAACommitmentOrder::RevokeAndACKFirst }, - &MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => { + &MessageSendEvent::UpdateHTLCs { ref node_id, ref channel_id, ref updates } => { assert_eq!(*node_id, $dst_node.node.get_our_node_id()); + assert!(updates.commitment_signed.iter().all(|cs| cs.channel_id == *channel_id)); commitment_update = Some(updates.clone()); idx += 1; RAACommitmentOrder::CommitmentFirst @@ -3742,9 +3749,10 @@ macro_rules! handle_chan_reestablish_msgs { revoke_and_ack = Some(msg.clone()); idx += 1; }, - &MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => { + &MessageSendEvent::UpdateHTLCs { ref node_id, ref channel_id, ref updates } => { assert_eq!(*node_id, $dst_node.node.get_our_node_id()); assert!(commitment_update.is_none()); + assert!(updates.commitment_signed.iter().all(|cs| cs.channel_id == *channel_id)); commitment_update = Some(updates.clone()); idx += 1; }, @@ -3911,7 +3919,7 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { if !pending_responding_commitment_signed.0 { commitment_signed_dance!(node_a, node_b, commitment_update.commitment_signed, false); } else { - node_a.node.handle_commitment_signed(node_b.node.get_our_node_id(), &commitment_update.commitment_signed); + node_a.node.handle_commitment_signed_batch_test(node_b.node.get_our_node_id(), &commitment_update.commitment_signed); check_added_monitors!(node_a, 1); let as_revoke_and_ack = get_event_msg!(node_a, MessageSendEvent::SendRevokeAndACK, node_b.node.get_our_node_id()); // No commitment_signed so get_event_msg's assert(len == 1) passes @@ -3969,7 +3977,7 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) { if !pending_responding_commitment_signed.1 { commitment_signed_dance!(node_b, node_a, commitment_update.commitment_signed, false); } else { - node_b.node.handle_commitment_signed(node_a.node.get_our_node_id(), &commitment_update.commitment_signed); + node_b.node.handle_commitment_signed_batch_test(node_a.node.get_our_node_id(), &commitment_update.commitment_signed); check_added_monitors!(node_b, 1); let bs_revoke_and_ack = get_event_msg!(node_b, MessageSendEvent::SendRevokeAndACK, node_a.node.get_our_node_id()); // No commitment_signed so get_event_msg's assert(len == 1) passes diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 639ad651f9f..4aaa1a3f31e 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -311,13 +311,13 @@ pub fn test_async_inbound_update_fee() { // ...now when the messages get delivered everyone should be happy nodes[0].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event.msgs[0]); - nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &payment_event.commitment_msg); // (2) + nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &payment_event.commitment_msg); // (2) let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); // nodes[0] is awaiting nodes[1] revoke_and_ack so get_event_msg's assert(len == 1) passes check_added_monitors!(nodes[0], 1); // deliver(1), generate (3): - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), commitment_signed); + nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), commitment_signed); let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); // nodes[1] is awaiting nodes[0] revoke_and_ack so get_event_msg's assert(len == 1) passes check_added_monitors!(nodes[1], 1); @@ -340,12 +340,12 @@ pub fn test_async_inbound_update_fee() { assert!(as_update.update_fee.is_none()); // (5) check_added_monitors!(nodes[0], 1); - nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &bs_update.commitment_signed); // deliver (4) + nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_update.commitment_signed); // deliver (4) let as_second_revoke = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); // only (6) so get_event_msg's assert(len == 1) passes check_added_monitors!(nodes[0], 1); - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &as_update.commitment_signed); // deliver (5) + nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_update.commitment_signed); // deliver (5) let bs_second_revoke = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); check_added_monitors!(nodes[1], 1); @@ -411,7 +411,7 @@ pub fn test_update_fee_unordered_raa() { // ...now when the messages get delivered everyone should be happy nodes[0].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event.msgs[0]); - nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &payment_event.commitment_msg); // (2) + nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &payment_event.commitment_msg); // (2) let as_revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); // nodes[0] is awaiting nodes[1] revoke_and_ack so get_event_msg's assert(len == 1) passes check_added_monitors!(nodes[0], 1); @@ -470,7 +470,7 @@ pub fn test_multi_flight_update_fee() { // Deliver first update_fee/commitment_signed pair, generating (1) and (2): nodes[1].node.handle_update_fee(nodes[0].node.get_our_node_id(), update_msg_1); - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), commitment_signed_1); + nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), commitment_signed_1); let (bs_revoke_msg, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); check_added_monitors!(nodes[1], 1); @@ -509,7 +509,7 @@ pub fn test_multi_flight_update_fee() { assert_eq!(as_second_update.update_fee.as_ref().unwrap().feerate_per_kw, update_msg_2.feerate_per_kw); // Deliver (2) commitment_signed - nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &bs_commitment_signed); + nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_commitment_signed); let as_revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); check_added_monitors!(nodes[0], 1); // No commitment_signed so get_event_msg's assert(len == 1) passes @@ -519,7 +519,7 @@ pub fn test_multi_flight_update_fee() { check_added_monitors!(nodes[1], 1); // Delever (4) - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &as_second_update.commitment_signed); + nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_second_update.commitment_signed); let (bs_second_revoke, bs_second_commitment) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); check_added_monitors!(nodes[1], 1); @@ -527,7 +527,7 @@ pub fn test_multi_flight_update_fee() { assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[0], 1); - nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &bs_second_commitment); + nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_second_commitment); let as_second_revoke = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); // No commitment_signed so get_event_msg's assert(len == 1) passes check_added_monitors!(nodes[0], 1); @@ -651,14 +651,14 @@ pub fn test_update_fee_vanilla() { let events_0 = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events_0.len(), 1); let (update_msg, commitment_signed) = match events_0[0] { - MessageSendEvent::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => { + MessageSendEvent::UpdateHTLCs { node_id:_, channel_id: _, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => { (update_fee.as_ref(), commitment_signed) }, _ => panic!("Unexpected event"), }; nodes[1].node.handle_update_fee(nodes[0].node.get_our_node_id(), update_msg.unwrap()); - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), commitment_signed); + nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), commitment_signed); let (revoke_msg, commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); check_added_monitors!(nodes[1], 1); @@ -666,7 +666,7 @@ pub fn test_update_fee_vanilla() { assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[0], 1); - nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &commitment_signed); + nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &commitment_signed); let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); // No commitment_signed so get_event_msg's assert(len == 1) passes check_added_monitors!(nodes[0], 1); @@ -895,13 +895,13 @@ pub fn test_update_fee_with_fundee_update_add_htlc() { let events_0 = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events_0.len(), 1); let (update_msg, commitment_signed) = match events_0[0] { - MessageSendEvent::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => { + MessageSendEvent::UpdateHTLCs { node_id:_, channel_id: _, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => { (update_fee.as_ref(), commitment_signed) }, _ => panic!("Unexpected event"), }; nodes[1].node.handle_update_fee(nodes[0].node.get_our_node_id(), update_msg.unwrap()); - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), commitment_signed); + nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), commitment_signed); let (revoke_msg, commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); check_added_monitors!(nodes[1], 1); @@ -923,7 +923,7 @@ pub fn test_update_fee_with_fundee_update_add_htlc() { assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[0], 1); - nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &commitment_signed); + nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &commitment_signed); let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); // No commitment_signed so get_event_msg's assert(len == 1) passes check_added_monitors!(nodes[0], 1); @@ -939,7 +939,7 @@ pub fn test_update_fee_with_fundee_update_add_htlc() { assert_eq!(commitment_update.update_fee.is_none(), true); nodes[0].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &commitment_update.update_add_htlcs[0]); - nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &commitment_update.commitment_signed); + nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &commitment_update.commitment_signed); check_added_monitors!(nodes[0], 1); let (revoke, commitment_signed) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id()); @@ -947,7 +947,7 @@ pub fn test_update_fee_with_fundee_update_add_htlc() { check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &commitment_signed); + nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &commitment_signed); check_added_monitors!(nodes[1], 1); let revoke = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); // No commitment_signed so get_event_msg's assert(len == 1) passes @@ -1010,7 +1010,7 @@ pub fn test_update_fee() { let events_0 = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events_0.len(), 1); let (update_msg, commitment_signed) = match events_0[0] { - MessageSendEvent::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => { + MessageSendEvent::UpdateHTLCs { node_id:_, channel_id: _, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => { (update_fee.as_ref(), commitment_signed) }, _ => panic!("Unexpected event"), @@ -1018,7 +1018,7 @@ pub fn test_update_fee() { nodes[1].node.handle_update_fee(nodes[0].node.get_our_node_id(), update_msg.unwrap()); // Generate (2) and (3): - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), commitment_signed); + nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), commitment_signed); let (revoke_msg, commitment_signed_0) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); check_added_monitors!(nodes[1], 1); @@ -1037,21 +1037,21 @@ pub fn test_update_fee() { let events_0 = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events_0.len(), 1); let (update_msg, commitment_signed) = match events_0[0] { - MessageSendEvent::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => { + MessageSendEvent::UpdateHTLCs { node_id:_, channel_id: _, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => { (update_fee.as_ref(), commitment_signed) }, _ => panic!("Unexpected event"), }; nodes[1].node.handle_update_fee(nodes[0].node.get_our_node_id(), update_msg.unwrap()); - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), commitment_signed); + nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), commitment_signed); check_added_monitors!(nodes[1], 1); // ... creating (5) let revoke_msg = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); // No commitment_signed so get_event_msg's assert(len == 1) passes // Handle (3), creating (6): - nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &commitment_signed_0); + nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &commitment_signed_0); check_added_monitors!(nodes[0], 1); let revoke_msg_0 = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); // No commitment_signed so get_event_msg's assert(len == 1) passes @@ -1072,7 +1072,7 @@ pub fn test_update_fee() { check_added_monitors!(nodes[1], 1); // Deliver (7) - nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &commitment_update.commitment_signed); + nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &commitment_update.commitment_signed); check_added_monitors!(nodes[0], 1); let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); // No commitment_signed so get_event_msg's assert(len == 1) passes @@ -1288,7 +1288,7 @@ pub fn holding_cell_htlc_counting() { // Now forward all the pending HTLCs and claim them back nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &initial_payment_event.msgs[0]); - nodes[2].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &initial_payment_event.commitment_msg); + nodes[2].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &initial_payment_event.commitment_msg); check_added_monitors!(nodes[2], 1); let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[2], nodes[1].node.get_our_node_id()); @@ -1296,14 +1296,14 @@ pub fn holding_cell_htlc_counting() { check_added_monitors!(nodes[1], 1); let as_updates = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id()); - nodes[1].node.handle_commitment_signed(nodes[2].node.get_our_node_id(), &bs_commitment_signed); + nodes[1].node.handle_commitment_signed_batch_test(nodes[2].node.get_our_node_id(), &bs_commitment_signed); check_added_monitors!(nodes[1], 1); let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id()); for ref update in as_updates.update_add_htlcs.iter() { nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), update); } - nodes[2].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &as_updates.commitment_signed); + nodes[2].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &as_updates.commitment_signed); check_added_monitors!(nodes[2], 1); nodes[2].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &as_raa); check_added_monitors!(nodes[2], 1); @@ -1311,7 +1311,7 @@ pub fn holding_cell_htlc_counting() { nodes[1].node.handle_revoke_and_ack(nodes[2].node.get_our_node_id(), &bs_revoke_and_ack); check_added_monitors!(nodes[1], 1); - nodes[1].node.handle_commitment_signed(nodes[2].node.get_our_node_id(), &bs_commitment_signed); + nodes[1].node.handle_commitment_signed_batch_test(nodes[2].node.get_our_node_id(), &bs_commitment_signed); check_added_monitors!(nodes[1], 1); let as_final_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id()); @@ -1447,7 +1447,7 @@ pub fn test_duplicate_htlc_different_direction_onchain() { assert_eq!(node_id, nodes[1].node.get_our_node_id()); assert_eq!(msg.as_ref().unwrap().data, "Channel closed because commitment or closing transaction was confirmed on chain."); }, - MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. } } => { + MessageSendEvent::UpdateHTLCs { ref node_id, channel_id: _, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. } } => { assert!(update_add_htlcs.is_empty()); assert!(update_fail_htlcs.is_empty()); assert_eq!(update_fulfill_htlcs.len(), 1); @@ -2080,7 +2080,7 @@ pub fn test_channel_reserve_holding_cell_htlcs() { assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); // flush the pending htlc - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &payment_event_1.commitment_msg); + nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &payment_event_1.commitment_msg); let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); check_added_monitors!(nodes[1], 1); @@ -2089,7 +2089,7 @@ pub fn test_channel_reserve_holding_cell_htlcs() { check_added_monitors!(nodes[0], 1); let commitment_update_2 = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); - nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &as_commitment_signed); + nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &as_commitment_signed); let bs_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); // No commitment_signed so get_event_msg's assert(len == 1) passes check_added_monitors!(nodes[0], 1); @@ -2237,13 +2237,13 @@ pub fn channel_reserve_in_flight_removes() { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &bs_removes.update_fulfill_htlcs[0]); - nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &bs_removes.commitment_signed); + nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_removes.commitment_signed); check_added_monitors!(nodes[0], 1); let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); expect_payment_sent(&nodes[0], payment_preimage_1, None, false, false); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &send_1.msgs[0]); - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &send_1.commitment_msg); + nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &send_1.commitment_msg); check_added_monitors!(nodes[1], 1); // B is already AwaitingRAA, so cant generate a CS here let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); @@ -2256,7 +2256,7 @@ pub fn channel_reserve_in_flight_removes() { check_added_monitors!(nodes[0], 1); let as_cs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &as_cs.commitment_signed); + nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_cs.commitment_signed); check_added_monitors!(nodes[1], 1); let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); @@ -2266,7 +2266,7 @@ pub fn channel_reserve_in_flight_removes() { // can no longer broadcast a commitment transaction with it and B has the preimage so can go // on-chain as necessary). nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &bs_cs.update_fulfill_htlcs[0]); - nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &bs_cs.commitment_signed); + nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_cs.commitment_signed); check_added_monitors!(nodes[0], 1); let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); expect_payment_sent(&nodes[0], payment_preimage_2, None, false, false); @@ -2298,13 +2298,13 @@ pub fn channel_reserve_in_flight_removes() { }; nodes[0].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &send_2.msgs[0]); - nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &send_2.commitment_msg); + nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &send_2.commitment_msg); check_added_monitors!(nodes[0], 1); let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); // Now just resolve all the outstanding messages/HTLCs for completeness... - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &as_cs.commitment_signed); + nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_cs.commitment_signed); check_added_monitors!(nodes[1], 1); let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); @@ -2316,7 +2316,7 @@ pub fn channel_reserve_in_flight_removes() { expect_payment_path_successful!(nodes[0]); let as_cs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &as_cs.commitment_signed); + nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_cs.commitment_signed); check_added_monitors!(nodes[1], 1); let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); @@ -2546,7 +2546,7 @@ pub fn channel_monitor_network_test() { let events = $node.node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); match events[0] { - MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, .. } } => { + MessageSendEvent::UpdateHTLCs { ref node_id, channel_id: _, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, .. } } => { assert!(update_add_htlcs.is_empty()); assert!(update_fail_htlcs.is_empty()); assert_eq!(*node_id, $prev_node.node.get_our_node_id()); @@ -3152,13 +3152,13 @@ pub fn test_multiple_package_conflicts() { ); nodes[0] .node - .handle_commitment_signed(nodes[1].node.get_our_node_id(), &updates.commitment_signed); + .handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &updates.commitment_signed); check_added_monitors(&nodes[0], 1); let (revoke_ack, commit_signed) = get_revoke_commit_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &revoke_ack); - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &commit_signed); + nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &commit_signed); check_added_monitors(&nodes[1], 4); let events = nodes[1].node.get_and_clear_pending_msg_events(); @@ -3171,7 +3171,7 @@ pub fn test_multiple_package_conflicts() { expect_payment_sent!(nodes[0], preimage_1); let updates = match &events[0] { - MessageSendEvent::UpdateHTLCs { node_id: _, updates } => updates, + MessageSendEvent::UpdateHTLCs { node_id: _, channel_id: _, updates } => updates, _ => panic!("Unexpected event"), }; assert_eq!(updates.update_fulfill_htlcs.len(), 1); @@ -3336,7 +3336,7 @@ pub fn test_htlc_on_chain_success() { } match nodes_0_event { - MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. } } => { + MessageSendEvent::UpdateHTLCs { ref node_id, channel_id: _, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. } } => { assert!(update_add_htlcs.is_empty()); assert!(update_fail_htlcs.is_empty()); assert_eq!(update_fulfill_htlcs.len(), 1); @@ -3480,7 +3480,7 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) { let events = nodes[2].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); match events[0] { - MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. } } => { + MessageSendEvent::UpdateHTLCs { ref node_id, channel_id: _, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. } } => { assert!(update_add_htlcs.is_empty()); assert!(!update_fail_htlcs.is_empty()); assert!(update_fulfill_htlcs.is_empty()); @@ -3534,7 +3534,7 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) { let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); match events[0] { - MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. } } => { + MessageSendEvent::UpdateHTLCs { ref node_id, channel_id: _, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. } } => { assert!(update_add_htlcs.is_empty()); assert!(!update_fail_htlcs.is_empty()); assert!(update_fulfill_htlcs.is_empty()); @@ -3600,7 +3600,7 @@ pub fn test_simple_commitment_revoked_fail_backward() { let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); match events[0] { - MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, ref commitment_signed, .. } } => { + MessageSendEvent::UpdateHTLCs { ref node_id, channel_id: _, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, ref commitment_signed, .. } } => { assert!(update_add_htlcs.is_empty()); assert_eq!(update_fail_htlcs.len(), 1); assert!(update_fulfill_htlcs.is_empty()); @@ -3681,7 +3681,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use assert_eq!(updates.update_fail_htlcs.len(), 1); assert!(updates.update_fee.is_none()); nodes[1].node.handle_update_fail_htlc(nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]); - nodes[1].node.handle_commitment_signed(nodes[2].node.get_our_node_id(), &updates.commitment_signed); + nodes[1].node.handle_commitment_signed_batch_test(nodes[2].node.get_our_node_id(), &updates.commitment_signed); check_added_monitors!(nodes[1], 1); // Note that nodes[1] is in AwaitingRAA, so won't send a CS let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id()); @@ -3700,7 +3700,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use nodes[1].node.handle_update_fail_htlc(nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]); // At this point first_payment_hash has dropped out of the latest two commitment // transactions that nodes[1] is tracking... - nodes[1].node.handle_commitment_signed(nodes[2].node.get_our_node_id(), &updates.commitment_signed); + nodes[1].node.handle_commitment_signed_batch_test(nodes[2].node.get_our_node_id(), &updates.commitment_signed); check_added_monitors!(nodes[1], 1); // Note that nodes[1] is (still) in AwaitingRAA, so won't send a CS let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id()); @@ -3766,7 +3766,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use if deliver_bs_raa { let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events); match nodes_2_event { - MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. } } => { + MessageSendEvent::UpdateHTLCs { ref node_id, channel_id: _, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. } } => { assert_eq!(nodes[2].node.get_our_node_id(), *node_id); assert_eq!(update_add_htlcs.len(), 1); assert!(update_fulfill_htlcs.is_empty()); @@ -3788,7 +3788,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use let nodes_0_event = remove_first_msg_event_to_node(&nodes[0].node.get_our_node_id(), &mut events); match nodes_0_event { - MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, ref commitment_signed, .. } } => { + MessageSendEvent::UpdateHTLCs { ref node_id, channel_id: _, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, ref commitment_signed, .. } } => { assert!(update_add_htlcs.is_empty()); assert_eq!(update_fail_htlcs.len(), 3); assert!(update_fulfill_htlcs.is_empty()); @@ -4033,7 +4033,7 @@ pub fn test_force_close_fail_back() { check_added_monitors!(nodes[1], 1); nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event.msgs[0]); - nodes[2].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &payment_event.commitment_msg); + nodes[2].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &payment_event.commitment_msg); check_added_monitors!(nodes[2], 1); let (_, _) = get_revoke_commit_msgs!(nodes[2], nodes[1].node.get_our_node_id()); @@ -4041,7 +4041,8 @@ pub fn test_force_close_fail_back() { // state or updated nodes[1]' state. Now force-close and broadcast that commitment/HTLC // transaction and ensure nodes[1] doesn't fail-backwards (this was originally a bug!). let error_message = "Channel force-closed"; - nodes[2].node.force_close_broadcasting_latest_txn(&payment_event.commitment_msg.channel_id, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap(); + let channel_id = payment_event.commitment_msg[0].channel_id; + nodes[2].node.force_close_broadcasting_latest_txn(&channel_id, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap(); check_closed_broadcast!(nodes[2], true); check_added_monitors!(nodes[2], 1); check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[1].node.get_our_node_id()], 100000); @@ -4063,7 +4064,7 @@ pub fn test_force_close_fail_back() { // Now check that if we add the preimage to ChannelMonitor it broadcasts our HTLC-Success.. { - get_monitor!(nodes[2], payment_event.commitment_msg.channel_id) + get_monitor!(nodes[2], channel_id) .provide_payment_preimage_unsafe_legacy( &our_payment_hash, &our_payment_preimage, &node_cfgs[2].tx_broadcaster, &LowerBoundedFeeEstimator::new(node_cfgs[2].fee_estimator), &node_cfgs[2].logger @@ -4276,7 +4277,7 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken } else { nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); if messages_delivered >= 3 { - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &payment_event.commitment_msg); + nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &payment_event.commitment_msg); check_added_monitors!(nodes[1], 1); let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -4286,7 +4287,7 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken check_added_monitors!(nodes[0], 1); if messages_delivered >= 5 { - nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &bs_commitment_signed); + nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_commitment_signed); let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); // No commitment_signed so get_event_msg's assert(len == 1) passes check_added_monitors!(nodes[0], 1); @@ -4393,7 +4394,7 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken let events_3 = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events_3.len(), 1); let (update_fulfill_htlc, commitment_signed) = match events_3[0] { - MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => { + MessageSendEvent::UpdateHTLCs { ref node_id, channel_id: _, ref updates } => { assert_eq!(*node_id, nodes[0].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fail_htlcs.is_empty()); @@ -4419,7 +4420,7 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken } if messages_delivered >= 2 { - nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &commitment_signed); + nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &commitment_signed); check_added_monitors!(nodes[0], 1); let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id()); @@ -4429,7 +4430,7 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken check_added_monitors!(nodes[1], 1); if messages_delivered >= 4 { - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &as_commitment_signed); + nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_commitment_signed); let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); // No commitment_signed so get_event_msg's assert(len == 1) passes check_added_monitors!(nodes[1], 1); @@ -4636,7 +4637,7 @@ pub fn test_drop_messages_peer_disconnect_dual_htlc() { let events_2 = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events_2.len(), 1); match events_2[0] { - MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => { + MessageSendEvent::UpdateHTLCs { ref node_id, channel_id: _, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => { assert_eq!(*node_id, nodes[0].node.get_our_node_id()); assert!(update_add_htlcs.is_empty()); assert_eq!(update_fulfill_htlcs.len(), 1); @@ -4655,7 +4656,7 @@ pub fn test_drop_messages_peer_disconnect_dual_htlc() { _ => panic!("Unexpected event"), } - nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), commitment_signed); + nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), commitment_signed); let _ = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); // No commitment_signed so get_event_msg's assert(len == 1) passes check_added_monitors!(nodes[0], 1); @@ -4696,7 +4697,7 @@ pub fn test_drop_messages_peer_disconnect_dual_htlc() { assert!(as_resp.2.as_ref().unwrap().update_fail_malformed_htlcs.is_empty()); assert!(as_resp.2.as_ref().unwrap().update_fee.is_none()); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &as_resp.2.as_ref().unwrap().update_add_htlcs[0]); - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &as_resp.2.as_ref().unwrap().commitment_signed); + nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_resp.2.as_ref().unwrap().commitment_signed); let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); // No commitment_signed so get_event_msg's assert(len == 1) passes check_added_monitors!(nodes[1], 1); @@ -4719,12 +4720,12 @@ pub fn test_drop_messages_peer_disconnect_dual_htlc() { assert!(as_commitment_signed.update_fee.is_none()); check_added_monitors!(nodes[0], 1); - nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &bs_second_commitment_signed.commitment_signed); + nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_second_commitment_signed.commitment_signed); let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); // No commitment_signed so get_event_msg's assert(len == 1) passes check_added_monitors!(nodes[0], 1); - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &as_commitment_signed.commitment_signed); + nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_commitment_signed.commitment_signed); let bs_second_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); // No commitment_signed so get_event_msg's assert(len == 1) passes check_added_monitors!(nodes[1], 1); @@ -5384,7 +5385,7 @@ pub fn test_onchain_to_onchain_claim() { } match nodes_0_event { - MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. } } => { + MessageSendEvent::UpdateHTLCs { ref node_id, channel_id: _, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. } } => { assert!(update_add_htlcs.is_empty()); assert!(update_fail_htlcs.is_empty()); assert_eq!(update_fulfill_htlcs.len(), 1); @@ -5812,7 +5813,7 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno let mut a_done = false; for msg in cs_msgs { match msg { - MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => { + MessageSendEvent::UpdateHTLCs { ref node_id, channel_id: _, ref updates } => { // Both under-dust HTLCs and the one above-dust HTLC that we had already failed // should be failed-backwards here. let target = if *node_id == nodes[0].node.get_our_node_id() { @@ -6108,7 +6109,7 @@ fn do_htlc_claim_local_commitment_only(use_dust: bool) { nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]); expect_payment_sent(&nodes[0], payment_preimage, None, false, false); - nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &bs_updates.commitment_signed); + nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_updates.commitment_signed); check_added_monitors!(nodes[0], 1); let as_updates = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id()); nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_updates.0); @@ -6176,12 +6177,12 @@ fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &bs_updates.update_fail_htlcs[0]); - nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &bs_updates.commitment_signed); + nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_updates.commitment_signed); check_added_monitors!(nodes[0], 1); let as_updates = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id()); nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_updates.0); check_added_monitors!(nodes[1], 1); - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &as_updates.1); + nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_updates.1); check_added_monitors!(nodes[1], 1); let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); @@ -6377,7 +6378,7 @@ pub fn test_fail_holding_cell_htlc_upon_free() { assert_eq!(chan_stat.holding_cell_outbound_amount_msat, max_can_send); // Flush the pending fee update. - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), commitment_signed); + nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), commitment_signed); let (as_revoke_and_ack, _) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); check_added_monitors!(nodes[1], 1); nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &as_revoke_and_ack); @@ -6464,11 +6465,11 @@ pub fn test_free_and_fail_holding_cell_htlcs() { assert_eq!(chan_stat.holding_cell_outbound_amount_msat, amt_1 + amt_2); // Flush the pending fee update. - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), commitment_signed); + nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), commitment_signed); let (revoke_and_ack, commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); check_added_monitors!(nodes[1], 1); nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &revoke_and_ack); - nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &commitment_signed); + nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &commitment_signed); check_added_monitors!(nodes[0], 2); // Upon receipt of the RAA, there will be an attempt to resend the holding cell HTLCs, @@ -6598,11 +6599,11 @@ pub fn test_fail_holding_cell_htlc_upon_free_multihop() { assert_eq!(chan_stat.holding_cell_outbound_amount_msat, max_can_send); // Flush the pending fee update. - nodes[2].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), commitment_signed); + nodes[2].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), commitment_signed); let (raa, commitment_signed) = get_revoke_commit_msgs!(nodes[2], nodes[1].node.get_our_node_id()); check_added_monitors!(nodes[2], 1); nodes[1].node.handle_revoke_and_ack(nodes[2].node.get_our_node_id(), &raa); - nodes[1].node.handle_commitment_signed(nodes[2].node.get_our_node_id(), &commitment_signed); + nodes[1].node.handle_commitment_signed_batch_test(nodes[2].node.get_our_node_id(), &commitment_signed); check_added_monitors!(nodes[1], 2); // A final RAA message is generated to finalize the fee update. @@ -6648,13 +6649,13 @@ pub fn test_fail_holding_cell_htlc_upon_free_multihop() { // Pass the failure messages back to nodes[0]. nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &fail_msg); - nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &commitment_signed); + nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &commitment_signed); // Complete the HTLC failure+removal process. let (raa, commitment_signed) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id()); check_added_monitors!(nodes[0], 1); nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &raa); - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &commitment_signed); + nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &commitment_signed); check_added_monitors!(nodes[1], 2); let final_raa_event = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(final_raa_event.len(), 1); @@ -6802,7 +6803,7 @@ pub fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_num_and_htlc_id_increme let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); - if let MessageSendEvent::UpdateHTLCs { node_id: _, updates: msgs::CommitmentUpdate{ update_add_htlcs: ref htlcs, .. }, } = events[0] { + if let MessageSendEvent::UpdateHTLCs { node_id: _, channel_id: _, updates: msgs::CommitmentUpdate{ update_add_htlcs: ref htlcs, .. }, } = events[0] { assert_eq!(htlcs[0].htlc_id, i); } else { assert!(false); @@ -7048,8 +7049,9 @@ pub fn test_update_add_htlc_bolt2_receiver_check_repeated_id_ignore() { //Resend HTLC nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); - assert_eq!(updates.commitment_signed.htlc_signatures.len(), 1); - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &updates.commitment_signed); + assert_eq!(updates.commitment_signed.len(), 1); + assert_eq!(updates.commitment_signed[0].htlc_signatures.len(), 1); + nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &updates.commitment_signed); check_added_monitors!(nodes[1], 1); let _bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -7179,7 +7181,7 @@ pub fn test_update_fulfill_htlc_bolt2_incorrect_htlc_id() { assert_eq!(events.len(), 1); let mut update_fulfill_msg: msgs::UpdateFulfillHTLC = { match events[0] { - MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => { + MessageSendEvent::UpdateHTLCs { node_id: _, channel_id: _, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => { assert!(update_add_htlcs.is_empty()); assert_eq!(update_fulfill_htlcs.len(), 1); assert!(update_fail_htlcs.is_empty()); @@ -7222,7 +7224,7 @@ pub fn test_update_fulfill_htlc_bolt2_wrong_preimage() { assert_eq!(events.len(), 1); let mut update_fulfill_msg: msgs::UpdateFulfillHTLC = { match events[0] { - MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => { + MessageSendEvent::UpdateHTLCs { node_id: _, channel_id: _, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => { assert!(update_add_htlcs.is_empty()); assert_eq!(update_fulfill_htlcs.len(), 1); assert!(update_fail_htlcs.is_empty()); @@ -7274,7 +7276,7 @@ pub fn test_update_fulfill_htlc_bolt2_missing_badonion_bit_for_malformed_htlc_me let mut update_msg: msgs::UpdateFailMalformedHTLC = { match events[0] { - MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => { + MessageSendEvent::UpdateHTLCs { node_id: _, channel_id: _, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => { assert!(update_add_htlcs.is_empty()); assert!(update_fulfill_htlcs.is_empty()); assert!(update_fail_htlcs.is_empty()); @@ -7339,9 +7341,9 @@ pub fn test_update_fulfill_htlc_bolt2_after_malformed_htlc_message_must_forward_ let events_3 = nodes[2].node.get_and_clear_pending_msg_events(); assert_eq!(events_3.len(), 1); - let update_msg : (msgs::UpdateFailMalformedHTLC, msgs::CommitmentSigned) = { + let update_msg : (msgs::UpdateFailMalformedHTLC, Vec) = { match events_3[0] { - MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => { + MessageSendEvent::UpdateHTLCs { node_id: _, channel_id: _, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => { assert!(update_add_htlcs.is_empty()); assert!(update_fulfill_htlcs.is_empty()); assert!(update_fail_htlcs.is_empty()); @@ -7363,7 +7365,7 @@ pub fn test_update_fulfill_htlc_bolt2_after_malformed_htlc_message_must_forward_ //Confirm that handlinge the update_malformed_htlc message produces an update_fail_htlc message to be forwarded back along the route match events_4[0] { - MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => { + MessageSendEvent::UpdateHTLCs { node_id: _, channel_id: _, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => { assert!(update_add_htlcs.is_empty()); assert!(update_fulfill_htlcs.is_empty()); assert_eq!(update_fail_htlcs.len(), 1); @@ -7495,7 +7497,7 @@ fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) { let remove = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &remove.update_fail_htlcs[0]); - nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &remove.commitment_signed); + nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &remove.commitment_signed); check_added_monitors!(nodes[0], 1); // Cache one local commitment tx as lastest @@ -7763,7 +7765,7 @@ pub fn test_check_htlc_underpaying() { let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let (update_fail_htlc, commitment_signed) = match events[0] { - MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => { + MessageSendEvent::UpdateHTLCs { node_id: _, channel_id: _, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => { assert!(update_add_htlcs.is_empty()); assert!(update_fulfill_htlcs.is_empty()); assert_eq!(update_fail_htlcs.len(), 1); @@ -8965,7 +8967,7 @@ pub fn test_bad_secret_hash() { // We should fail the payment back let mut events = nodes[1].node.get_and_clear_pending_msg_events(); match events.pop().unwrap() { - MessageSendEvent::UpdateHTLCs { node_id: _, updates: msgs::CommitmentUpdate { update_fail_htlcs, commitment_signed, .. } } => { + MessageSendEvent::UpdateHTLCs { node_id: _, channel_id: _, updates: msgs::CommitmentUpdate { update_fail_htlcs, commitment_signed, .. } } => { nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false); }, @@ -9056,7 +9058,8 @@ pub fn test_update_err_monitor_lockdown() { let mut node_0_per_peer_lock; let mut node_0_peer_state_lock; if let Some(channel) = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2).as_funded_mut() { - if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) { + assert_eq!(updates.commitment_signed.len(), 1); + if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed[0], &node_cfgs[0].logger) { assert_eq!(watchtower.chain_monitor.update_channel(chan_1.2, &update), ChannelMonitorUpdateStatus::InProgress); assert_eq!(nodes[0].chain_monitor.update_channel(chan_1.2, &update), ChannelMonitorUpdateStatus::Completed); } else { assert!(false); } @@ -9157,7 +9160,8 @@ pub fn test_concurrent_monitor_claim() { let mut node_0_per_peer_lock; let mut node_0_peer_state_lock; if let Some(channel) = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2).as_funded_mut() { - if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) { + assert_eq!(updates.commitment_signed.len(), 1); + if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed[0], &node_cfgs[0].logger) { // Watchtower Alice should already have seen the block and reject the update assert_eq!(watchtower_alice.chain_monitor.update_channel(chan_1.2, &update), ChannelMonitorUpdateStatus::InProgress); assert_eq!(watchtower_bob.chain_monitor.update_channel(chan_1.2, &update), ChannelMonitorUpdateStatus::Completed); @@ -9376,7 +9380,7 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain _ => panic!("Unexpected event"), }; } - nodes[1].node.handle_commitment_signed(nodes[2].node.get_our_node_id(), &carol_updates.commitment_signed); + nodes[1].node.handle_commitment_signed_batch_test(nodes[2].node.get_our_node_id(), &carol_updates.commitment_signed); // One monitor update for the preimage to update the Bob<->Alice channel, one monitor update // Carol<->Bob's updated commitment transaction info. check_added_monitors!(nodes[1], 2); @@ -9391,7 +9395,7 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain _ => panic!("Unexpected event"), }; let bob_updates = match events[1] { - MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => { + MessageSendEvent::UpdateHTLCs { ref node_id, channel_id: _, ref updates } => { assert_eq!(*node_id, nodes[2].node.get_our_node_id()); (*updates).clone() }, @@ -9400,7 +9404,7 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain nodes[2].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bob_revocation); check_added_monitors!(nodes[2], 1); - nodes[2].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &bob_updates.commitment_signed); + nodes[2].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bob_updates.commitment_signed); check_added_monitors!(nodes[2], 1); let events = nodes[2].node.get_and_clear_pending_msg_events(); @@ -11263,14 +11267,14 @@ pub fn test_disconnects_peer_awaiting_response_ticks() { check_added_monitors!(&nodes[0], 1); let alice_fee_update = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); nodes[1].node.handle_update_fee(nodes[0].node.get_our_node_id(), alice_fee_update.update_fee.as_ref().unwrap()); - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &alice_fee_update.commitment_signed); + nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &alice_fee_update.commitment_signed); check_added_monitors!(&nodes[1], 1); // This will prompt Bob (nodes[1]) to respond with his `CommitmentSigned` and `RevokeAndACK`. let (bob_revoke_and_ack, bob_commitment_signed) = get_revoke_commit_msgs!(&nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bob_revoke_and_ack); check_added_monitors!(&nodes[0], 1); - nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &bob_commitment_signed); + nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bob_commitment_signed); check_added_monitors(&nodes[0], 1); // Alice then needs to send her final `RevokeAndACK` to complete the commitment dance. We @@ -11572,7 +11576,7 @@ fn do_test_multi_post_event_actions(do_reload: bool) { let send_event = SendEvent::from_node(&nodes[1]); nodes[0].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &send_event.msgs[0]); - nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &send_event.commitment_msg); + nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &send_event.commitment_msg); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); if do_reload { diff --git a/lightning/src/ln/invoice_utils.rs b/lightning/src/ln/invoice_utils.rs index 79220456bbd..a8eec347f1f 100644 --- a/lightning/src/ln/invoice_utils.rs +++ b/lightning/src/ln/invoice_utils.rs @@ -797,7 +797,7 @@ mod test { SendEvent::from_event(events.remove(0)) }; nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &payment_event.commitment_msg); + nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &payment_event.commitment_msg); check_added_monitors(&nodes[1], 1); let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 2); diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index 8f7d72fb47e..e70e2cfa99b 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -579,13 +579,13 @@ fn do_test_claim_value_force_close(anchors: bool, prev_commitment_tx: bool) { // To build a previous commitment transaction, deliver one round of commitment messages. nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &b_htlc_msgs.update_fulfill_htlcs[0]); expect_payment_sent(&nodes[0], payment_preimage, None, false, false); - nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &b_htlc_msgs.commitment_signed); + nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &b_htlc_msgs.commitment_signed); check_added_monitors!(nodes[0], 1); let (as_raa, as_cs) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id()); nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_raa); let _htlc_updates = get_htlc_update_msgs!(&nodes[1], nodes[0].node.get_our_node_id()); check_added_monitors!(nodes[1], 1); - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &as_cs); + nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_cs); let _bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); check_added_monitors!(nodes[1], 1); } diff --git a/lightning/src/ln/msgs.rs b/lightning/src/ln/msgs.rs index b1f2b7a1c63..0638c4cc885 100644 --- a/lightning/src/ln/msgs.rs +++ b/lightning/src/ln/msgs.rs @@ -45,6 +45,8 @@ use crate::sign::{NodeSigner, Recipient}; #[allow(unused_imports)] use crate::prelude::*; +use alloc::collections::BTreeMap; + use core::fmt; use core::fmt::Debug; use core::ops::Deref; @@ -1515,8 +1517,8 @@ pub struct CommitmentUpdate { pub update_fail_malformed_htlcs: Vec, /// An `update_fee` message which should be sent pub update_fee: Option, - /// A `commitment_signed` message which should be sent - pub commitment_signed: CommitmentSigned, + /// `commitment_signed` messages which should be sent + pub commitment_signed: Vec, } /// An event generated by a [`BaseMessageHandler`] which indicates a message should be sent to a @@ -1685,6 +1687,8 @@ pub enum MessageSendEvent { UpdateHTLCs { /// The node_id of the node which should receive these message(s) node_id: PublicKey, + /// The channel_id associated with all the update messages. + channel_id: ChannelId, /// The update messages which should be sent. ALL messages in the struct should be sent! updates: CommitmentUpdate, }, @@ -1932,9 +1936,30 @@ pub trait ChannelMessageHandler : BaseMessageHandler { fn handle_update_fail_malformed_htlc(&self, their_node_id: PublicKey, msg: &UpdateFailMalformedHTLC); /// Handle an incoming `commitment_signed` message from the given peer. fn handle_commitment_signed(&self, their_node_id: PublicKey, msg: &CommitmentSigned); + /// Handle a batch of incoming `commitment_signed` message from the given peer. + fn handle_commitment_signed_batch( + &self, their_node_id: PublicKey, channel_id: ChannelId, + batch: BTreeMap, + ) {} /// Handle an incoming `revoke_and_ack` message from the given peer. fn handle_revoke_and_ack(&self, their_node_id: PublicKey, msg: &RevokeAndACK); + #[cfg(any(test, fuzzing, feature = "_test_utils"))] + fn handle_commitment_signed_batch_test(&self, their_node_id: PublicKey, batch: &Vec) { + assert!(!batch.is_empty()); + if batch.len() == 1 { + assert!(batch[0].batch.is_none()); + self.handle_commitment_signed(their_node_id, &batch[0]); + } else { + let channel_id = batch[0].channel_id; + let batch: BTreeMap = batch.iter().cloned().map(|mut cs| { + let funding_txid = cs.batch.take().unwrap().funding_txid; + (funding_txid, cs) + }).collect(); + self.handle_commitment_signed_batch(their_node_id, channel_id, batch); + } + } + /// Handle an incoming `update_fee` message from the given peer. fn handle_update_fee(&self, their_node_id: PublicKey, msg: &UpdateFee); diff --git a/lightning/src/ln/onion_route_tests.rs b/lightning/src/ln/onion_route_tests.rs index 7dc6113c138..1558f4e6c79 100644 --- a/lightning/src/ln/onion_route_tests.rs +++ b/lightning/src/ln/onion_route_tests.rs @@ -1391,7 +1391,7 @@ fn do_test_fail_htlc_backwards_with_reason(failure_code: FailureCode) { let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let (update_fail_htlc, commitment_signed) = match events[0] { - MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => { + MessageSendEvent::UpdateHTLCs { node_id: _, channel_id: _, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => { assert!(update_add_htlcs.is_empty()); assert!(update_fulfill_htlcs.is_empty()); assert_eq!(update_fail_htlcs.len(), 1); diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index 84a90c50ecd..13570393288 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -1308,7 +1308,7 @@ fn failed_probe_yields_event() { let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None, None]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); - create_announced_chan_between_nodes(&nodes, 0, 1); + let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2; create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 100000, 90000000); let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), 42); @@ -1320,7 +1320,7 @@ fn failed_probe_yields_event() { // node[0] -- update_add_htlcs -> node[1] check_added_monitors!(nodes[0], 1); let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); - let probe_event = SendEvent::from_commitment_update(nodes[1].node.get_our_node_id(), updates); + let probe_event = SendEvent::from_commitment_update(nodes[1].node.get_our_node_id(), channel_id, updates); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &probe_event.msgs[0]); check_added_monitors!(nodes[1], 0); commitment_signed_dance!(nodes[1], nodes[0], probe_event.commitment_msg, false); @@ -1368,7 +1368,7 @@ fn onchain_failed_probe_yields_event() { // node[0] -- update_add_htlcs -> node[1] check_added_monitors!(nodes[0], 1); let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); - let probe_event = SendEvent::from_commitment_update(nodes[1].node.get_our_node_id(), updates); + let probe_event = SendEvent::from_commitment_update(nodes[1].node.get_our_node_id(), chan_id, updates); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &probe_event.msgs[0]); check_added_monitors!(nodes[1], 0); commitment_signed_dance!(nodes[1], nodes[0], probe_event.commitment_msg, false); @@ -2545,7 +2545,7 @@ fn auto_retry_partial_failure() { let mut payment_event = SendEvent::from_event(msg_events.remove(0)); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &payment_event.commitment_msg); + nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &payment_event.commitment_msg); check_added_monitors!(nodes[1], 1); let (bs_first_raa, bs_first_cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -2553,7 +2553,7 @@ fn auto_retry_partial_failure() { check_added_monitors!(nodes[0], 1); let as_second_htlc_updates = SendEvent::from_node(&nodes[0]); - nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &bs_first_cs); + nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_first_cs); check_added_monitors!(nodes[0], 1); let as_first_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); @@ -2562,14 +2562,14 @@ fn auto_retry_partial_failure() { nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &as_second_htlc_updates.msgs[0]); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &as_second_htlc_updates.msgs[1]); - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &as_second_htlc_updates.commitment_msg); + nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_second_htlc_updates.commitment_msg); check_added_monitors!(nodes[1], 1); let (bs_second_raa, bs_second_cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_second_raa); check_added_monitors!(nodes[0], 1); - nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &bs_second_cs); + nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_second_cs); check_added_monitors!(nodes[0], 1); let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); @@ -2586,7 +2586,7 @@ fn auto_retry_partial_failure() { nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &bs_claim_update.update_fulfill_htlcs[0]); expect_payment_sent(&nodes[0], payment_preimage, None, false, false); - nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &bs_claim_update.commitment_signed); + nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_claim_update.commitment_signed); check_added_monitors!(nodes[0], 1); let (as_third_raa, as_third_cs) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id()); @@ -2594,7 +2594,7 @@ fn auto_retry_partial_failure() { check_added_monitors!(nodes[1], 4); let bs_second_claim_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &as_third_cs); + nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_third_cs); check_added_monitors!(nodes[1], 1); let bs_third_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); @@ -2604,14 +2604,14 @@ fn auto_retry_partial_failure() { nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &bs_second_claim_update.update_fulfill_htlcs[0]); nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &bs_second_claim_update.update_fulfill_htlcs[1]); - nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &bs_second_claim_update.commitment_signed); + nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_second_claim_update.commitment_signed); check_added_monitors!(nodes[0], 1); let (as_fourth_raa, as_fourth_cs) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id()); nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_fourth_raa); check_added_monitors!(nodes[1], 1); - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &as_fourth_cs); + nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_fourth_cs); check_added_monitors!(nodes[1], 1); let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); @@ -2990,7 +2990,7 @@ fn no_extra_retries_on_back_to_back_fail() { assert_eq!(first_htlc_updates.msgs.len(), 1); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &first_htlc_updates.msgs[0]); - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &first_htlc_updates.commitment_msg); + nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &first_htlc_updates.commitment_msg); check_added_monitors!(nodes[1], 1); let (bs_first_raa, bs_first_cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -3000,7 +3000,7 @@ fn no_extra_retries_on_back_to_back_fail() { let second_htlc_updates = SendEvent::from_node(&nodes[0]); assert_eq!(second_htlc_updates.msgs.len(), 1); - nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &bs_first_cs); + nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_first_cs); check_added_monitors!(nodes[0], 1); let as_first_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); @@ -3008,13 +3008,13 @@ fn no_extra_retries_on_back_to_back_fail() { check_added_monitors!(nodes[1], 1); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &second_htlc_updates.msgs[0]); - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &second_htlc_updates.commitment_msg); + nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &second_htlc_updates.commitment_msg); check_added_monitors!(nodes[1], 1); let (bs_second_raa, bs_second_cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_second_raa); check_added_monitors!(nodes[0], 1); - nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &bs_second_cs); + nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_second_cs); check_added_monitors!(nodes[0], 1); let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); @@ -3192,7 +3192,7 @@ fn test_simple_partial_retry() { assert_eq!(first_htlc_updates.msgs.len(), 1); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &first_htlc_updates.msgs[0]); - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &first_htlc_updates.commitment_msg); + nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &first_htlc_updates.commitment_msg); check_added_monitors!(nodes[1], 1); let (bs_first_raa, bs_first_cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -3202,7 +3202,7 @@ fn test_simple_partial_retry() { let second_htlc_updates = SendEvent::from_node(&nodes[0]); assert_eq!(second_htlc_updates.msgs.len(), 1); - nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &bs_first_cs); + nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_first_cs); check_added_monitors!(nodes[0], 1); let as_first_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); @@ -3221,7 +3221,7 @@ fn test_simple_partial_retry() { let mut msg_events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 2); let mut handle_update_htlcs = |event: MessageSendEvent| { - if let MessageSendEvent::UpdateHTLCs { node_id, updates } = event { + if let MessageSendEvent::UpdateHTLCs { node_id, channel_id: _, updates } = event { if node_id == nodes[0].node.get_our_node_id() { assert_eq!(updates.update_fail_htlcs.len(), 1); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); @@ -3360,7 +3360,7 @@ fn test_threaded_payment_retries() { let mut send_msg_events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(send_msg_events.len(), 2); send_msg_events.retain(|msg| - if let MessageSendEvent::UpdateHTLCs { node_id, .. } = msg { + if let MessageSendEvent::UpdateHTLCs { node_id, channel_id: _, .. } = msg { // Drop the commitment update for nodes[2], we can just let that one sit pending // forever. *node_id == nodes[1].node.get_our_node_id() @@ -3481,7 +3481,7 @@ fn do_no_missing_sent_on_reload(persist_manager_with_payment: bool, at_midpoint: if at_midpoint { let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); - nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &updates.commitment_signed); + nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &updates.commitment_signed); check_added_monitors!(nodes[0], 1); } else { let htlc_fulfill_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); diff --git a/lightning/src/ln/peer_handler.rs b/lightning/src/ln/peer_handler.rs index b4383210802..a48224ef612 100644 --- a/lightning/src/ln/peer_handler.rs +++ b/lightning/src/ln/peer_handler.rs @@ -2307,13 +2307,14 @@ impl { - log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(commitment_signed.channel_id), None), "Handling UpdateHTLCs event in peer_handler for node {} with {} adds, {} fulfills, {} fails for channel {}", + MessageSendEvent::UpdateHTLCs { ref node_id, ref channel_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => { + log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(*channel_id), None), "Handling UpdateHTLCs event in peer_handler for node {} with {} adds, {} fulfills, {} fails, {} commits for channel {}", log_pubkey!(node_id), update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), - &commitment_signed.channel_id); + commitment_signed.len(), + channel_id); let mut peer = get_peer_for_forwarding!(node_id)?; for msg in update_add_htlcs { self.enqueue_message(&mut *peer, msg); @@ -2330,7 +2331,9 @@ impl { log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None), "Handling SendRevokeAndACK event in peer_handler for node {} for channel {}", diff --git a/lightning/src/ln/priv_short_conf_tests.rs b/lightning/src/ln/priv_short_conf_tests.rs index 045f4793343..ae2f7cf1b32 100644 --- a/lightning/src/ln/priv_short_conf_tests.rs +++ b/lightning/src/ln/priv_short_conf_tests.rs @@ -700,14 +700,14 @@ fn test_0conf_channel_with_async_monitor() { let as_send = SendEvent::from_node(&nodes[0]); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &as_send.msgs[0]); - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &as_send.commitment_msg); + nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_send.commitment_msg); check_added_monitors!(nodes[1], 1); let (bs_raa, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_raa); check_added_monitors!(nodes[0], 1); - nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &bs_commitment_signed); + nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_commitment_signed); check_added_monitors!(nodes[0], 1); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); diff --git a/lightning/src/ln/quiescence_tests.rs b/lightning/src/ln/quiescence_tests.rs index 15df7f3293e..d35fe5a33be 100644 --- a/lightning/src/ln/quiescence_tests.rs +++ b/lightning/src/ln/quiescence_tests.rs @@ -105,7 +105,9 @@ fn allow_shutdown_while_awaiting_quiescence(local_shutdown: bool) { // Attempt to send an HTLC, but don't fully commit it yet. let update_add = get_htlc_update_msgs!(local_node, remote_node_id); remote_node.node.handle_update_add_htlc(local_node_id, &update_add.update_add_htlcs[0]); - remote_node.node.handle_commitment_signed(local_node_id, &update_add.commitment_signed); + remote_node + .node + .handle_commitment_signed_batch_test(local_node_id, &update_add.commitment_signed); let (revoke_and_ack, commit_sig) = get_revoke_commit_msgs!(remote_node, local_node_id); local_node.node.handle_revoke_and_ack(remote_node_id, &revoke_and_ack); check_added_monitors(local_node, 1); @@ -132,7 +134,7 @@ fn allow_shutdown_while_awaiting_quiescence(local_shutdown: bool) { // Continue exchanging messages until the HTLC is irrevocably committed and eventually failed // back as we are shutting down. - local_node.node.handle_commitment_signed(remote_node_id, &commit_sig); + local_node.node.handle_commitment_signed_batch_test(remote_node_id, &commit_sig); check_added_monitors(local_node, 1); let last_revoke_and_ack = @@ -148,12 +150,14 @@ fn allow_shutdown_while_awaiting_quiescence(local_shutdown: bool) { let update_fail = get_htlc_update_msgs!(remote_node, local_node_id); local_node.node.handle_update_fail_htlc(remote_node_id, &update_fail.update_fail_htlcs[0]); - local_node.node.handle_commitment_signed(remote_node_id, &update_fail.commitment_signed); + local_node + .node + .handle_commitment_signed_batch_test(remote_node_id, &update_fail.commitment_signed); let (revoke_and_ack, commit_sig) = get_revoke_commit_msgs!(local_node, remote_node_id); remote_node.node.handle_revoke_and_ack(local_node_id, &revoke_and_ack); check_added_monitors(remote_node, 1); - remote_node.node.handle_commitment_signed(local_node_id, &commit_sig); + remote_node.node.handle_commitment_signed_batch_test(local_node_id, &commit_sig); check_added_monitors(remote_node, 1); let last_revoke_and_ack = @@ -196,7 +200,7 @@ fn test_quiescence_tracks_monitor_update_in_progress_and_waits_for_async_signer( let update = get_htlc_update_msgs!(&nodes[1], node_id_0); nodes[0].node.handle_update_fulfill_htlc(node_id_1, &update.update_fulfill_htlcs[0]); - nodes[0].node.handle_commitment_signed(node_id_1, &update.commitment_signed); + nodes[0].node.handle_commitment_signed_batch_test(node_id_1, &update.commitment_signed); check_added_monitors(&nodes[0], 1); // While settling back the payment, propose quiescence from nodes[1]. We won't see its `stfu` go @@ -210,7 +214,7 @@ fn test_quiescence_tracks_monitor_update_in_progress_and_waits_for_async_signer( let (revoke_and_ack, commit_sig) = get_revoke_commit_msgs!(&nodes[0], node_id_1); nodes[1].node.handle_revoke_and_ack(node_id_0, &revoke_and_ack); check_added_monitors(&nodes[1], 1); - nodes[1].node.handle_commitment_signed(node_id_0, &commit_sig); + nodes[1].node.handle_commitment_signed_batch_test(node_id_0, &commit_sig); check_added_monitors(&nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index e5f4bc20018..a2d567dba96 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -543,7 +543,7 @@ fn do_test_data_loss_protect(reconnect_panicing: bool, substantially_old: bool, let update_add_commit = SendEvent::from_node(&nodes[0]); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &update_add_commit.msgs[0]); - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &update_add_commit.commitment_msg); + nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &update_add_commit.commitment_msg); check_added_monitors(&nodes[1], 1); let (raa, cs) = get_revoke_commit_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); @@ -551,7 +551,7 @@ fn do_test_data_loss_protect(reconnect_panicing: bool, substantially_old: bool, check_added_monitors(&nodes[0], 1); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); if !not_stale { - nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &cs); + nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &cs); check_added_monitors(&nodes[0], 1); // A now revokes their original state, at which point reconnect should panic let raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); @@ -1043,7 +1043,7 @@ fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_ht let payment_event = SendEvent::from_node(&nodes[1]); nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event.msgs[0]); - nodes[2].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &payment_event.commitment_msg); + nodes[2].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &payment_event.commitment_msg); check_added_monitors!(nodes[2], 1); if claim_htlc { diff --git a/lightning/src/ln/reorg_tests.rs b/lightning/src/ln/reorg_tests.rs index c83f06026e5..7c6ac7dffa0 100644 --- a/lightning/src/ln/reorg_tests.rs +++ b/lightning/src/ln/reorg_tests.rs @@ -724,7 +724,7 @@ fn test_htlc_preimage_claim_prev_counterparty_commitment_after_current_counterpa check_added_monitors(&nodes[0], 1); let mut msg_events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 1); - let (update_fee, commit_sig) = if let MessageSendEvent::UpdateHTLCs { node_id, mut updates } = msg_events.pop().unwrap() { + let (update_fee, commit_sig) = if let MessageSendEvent::UpdateHTLCs { node_id, channel_id: _, mut updates } = msg_events.pop().unwrap() { assert_eq!(node_id, nodes[1].node.get_our_node_id()); (updates.update_fee.take().unwrap(), updates.commitment_signed) } else { diff --git a/lightning/src/ln/shutdown_tests.rs b/lightning/src/ln/shutdown_tests.rs index 1fb26a270d4..b9fec7ce97a 100644 --- a/lightning/src/ln/shutdown_tests.rs +++ b/lightning/src/ln/shutdown_tests.rs @@ -461,7 +461,7 @@ fn do_htlc_fail_async_shutdown(blinded_recipient: bool) { let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &updates.commitment_signed); + nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &updates.commitment_signed); check_added_monitors!(nodes[1], 1); nodes[1].node.handle_shutdown(nodes[0].node.get_our_node_id(), &node_0_shutdown); commitment_signed_dance!(nodes[1], nodes[0], (), false, true, false, false); @@ -1357,7 +1357,7 @@ fn do_outbound_update_no_early_closing_signed(use_htlc: bool) { } else { nodes[1].node.handle_update_fee(nodes[0].node.get_our_node_id(), &updates.update_fee.unwrap()); } - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &updates.commitment_signed); + nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &updates.commitment_signed); check_added_monitors(&nodes[1], 1); let (bs_raa, bs_cs) = get_revoke_commit_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); @@ -1370,7 +1370,7 @@ fn do_outbound_update_no_early_closing_signed(use_htlc: bool) { assert_eq!(nodes[0].node.get_and_clear_pending_msg_events(), Vec::new()); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &bs_cs); + nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_cs); check_added_monitors(&nodes[0], 1); assert_eq!(nodes[0].node.get_and_clear_pending_msg_events(), Vec::new()); From eed614b45bf00220e9e0b0068c5c34c3738be4fe Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Wed, 2 Apr 2025 10:52:30 -0500 Subject: [PATCH 4/7] Add a LogicalMessage to use in PeerManager Splicing introduces a concept of batched commitment_signed messages for each pending splice transaction. These can be treated as one logical message, even though the protocol currently defines them as separate commitment_signed messages with a TLV for batch information. Add a LogicalMessage wrapper around wire::Message such that it can be used internally by PeerManager. A CommitmentSignedBatch variant will be added in the next commit. --- lightning/src/ln/peer_handler.rs | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/lightning/src/ln/peer_handler.rs b/lightning/src/ln/peer_handler.rs index a48224ef612..9edfaa34b18 100644 --- a/lightning/src/ln/peer_handler.rs +++ b/lightning/src/ln/peer_handler.rs @@ -858,6 +858,10 @@ pub struct PeerManager } +enum LogicalMessage { + FromWire(wire::Message), +} + enum MessageHandlingError { PeerHandleError(PeerHandleError), LightningError(LightningError), @@ -1645,10 +1649,11 @@ impl { + self.do_handle_message_without_peer_lock(peer_mutex, message, their_node_id, &logger) + }, + None => Ok(None), } } @@ -1662,7 +1667,7 @@ impl::Target as wire::CustomMessageReader>::CustomMessage>, their_node_id: PublicKey, logger: &WithContext<'a, L> - ) -> Result::Target as wire::CustomMessageReader>::CustomMessage>>, MessageHandlingError> + ) -> Result::Target as wire::CustomMessageReader>::CustomMessage>>, MessageHandlingError> { peer_lock.received_message_since_timer_tick = true; @@ -1774,7 +1779,7 @@ impl Date: Thu, 27 Mar 2025 16:15:14 -0500 Subject: [PATCH 5/7] Batch commitment_signed messages in PeerManager During splicing, commitment_signed messages need to be collected into a single batch before they are handled. Rather than including this as part of the channel state machine logic, batch when reading messages from the wire since they can be considered one logical message. --- lightning/src/ln/peer_handler.rs | 60 ++++++++++++++++++++++++++++++++ 1 file changed, 60 insertions(+) diff --git a/lightning/src/ln/peer_handler.rs b/lightning/src/ln/peer_handler.rs index 9edfaa34b18..401d9825c89 100644 --- a/lightning/src/ln/peer_handler.rs +++ b/lightning/src/ln/peer_handler.rs @@ -15,6 +15,7 @@ //! call into the provided message handlers (probably a ChannelManager and P2PGossipSync) with //! messages they should handle, and encoding/sending response messages. +use bitcoin::Txid; use bitcoin::constants::ChainHash; use bitcoin::secp256k1::{self, Secp256k1, SecretKey, PublicKey}; @@ -41,6 +42,8 @@ use crate::util::string::PrintableString; #[allow(unused_imports)] use crate::prelude::*; +use alloc::collections::{btree_map, BTreeMap}; + use crate::io; use crate::sync::{Mutex, MutexGuard, FairRwLock}; use core::sync::atomic::{AtomicBool, AtomicU32, AtomicI32, Ordering}; @@ -608,6 +611,8 @@ struct Peer { received_channel_announce_since_backlogged: bool, inbound_connection: bool, + + commitment_signed_batch: Option<(ChannelId, BTreeMap)>, } impl Peer { @@ -860,6 +865,7 @@ pub struct PeerManager { FromWire(wire::Message), + CommitmentSignedBatch(ChannelId, BTreeMap), } enum MessageHandlingError { @@ -1144,6 +1150,8 @@ impl { self.do_handle_message_without_peer_lock(peer_mutex, message, their_node_id, &logger) }, + Some(LogicalMessage::CommitmentSignedBatch(channel_id, batch)) => { + log_trace!(logger, "Received commitment_signed batch {:?} from {}", batch, log_pubkey!(their_node_id)); + self.message_handler.chan_handler.handle_commitment_signed_batch(their_node_id, channel_id, batch); + return Ok(None); + }, None => Ok(None), } } @@ -1747,6 +1762,51 @@ impl { entry.insert(msg); }, + btree_map::Entry::Occupied(_) => { + log_debug!(logger, "Peer {} sent batched commitment_signed with duplicate funding_txid {} for channel {}", log_pubkey!(their_node_id), channel_id, &batch.funding_txid); + return Err(PeerHandleError { }.into()); + } + } + + if buffer.len() >= batch_size { + let (channel_id, batch) = peer_lock.commitment_signed_batch.take().expect("batch should have been inserted"); + return Ok(Some(LogicalMessage::CommitmentSignedBatch(channel_id, batch))); + } else { + return Ok(None); + } + } else if peer_lock.commitment_signed_batch.is_some() { + log_debug!(logger, "Peer {} sent non-batched commitment_signed for channel {} when expecting batched commitment_signed", log_pubkey!(their_node_id), &msg.channel_id); + return Err(PeerHandleError { }.into()); + } else { + return Ok(Some(LogicalMessage::FromWire(wire::Message::CommitmentSigned(msg)))); + } + } else if peer_lock.commitment_signed_batch.is_some() { + log_debug!(logger, "Peer {} sent non-commitment_signed message when expecting batched commitment_signed", log_pubkey!(their_node_id)); + return Err(PeerHandleError { }.into()); + } + if let wire::Message::GossipTimestampFilter(_msg) = message { // When supporting gossip messages, start initial gossip sync only after we receive // a GossipTimestampFilter From a1de713e01e77fbd6f2ac63e9d4bb7a99dc514d4 Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Thu, 6 Mar 2025 16:50:07 -0600 Subject: [PATCH 6/7] Batch commitment_signed messages for splicing A FundedChannel may have more than one pending FundingScope during splicing, one for the splice attempt and one or more for any RBF attempts. The counterparty will send a commitment_signed message for each pending splice transaction and the current funding transaction. Defer handling these commitment_signed messages until the entire batch has arrived. Then validate them individually, also checking if all the pending splice transactions and the current funding transaction have a corresponding commitment_signed in the batch. --- lightning-net-tokio/src/lib.rs | 9 +++- lightning/src/ln/channel.rs | 77 ++++++++++++++++++++++++++---- lightning/src/ln/channelmanager.rs | 37 ++++++++++++++ lightning/src/ln/msgs.rs | 2 +- lightning/src/ln/peer_handler.rs | 6 +++ lightning/src/util/test_utils.rs | 8 ++++ 6 files changed, 127 insertions(+), 12 deletions(-) diff --git a/lightning-net-tokio/src/lib.rs b/lightning-net-tokio/src/lib.rs index a0daa8235b5..95b83b105ac 100644 --- a/lightning-net-tokio/src/lib.rs +++ b/lightning-net-tokio/src/lib.rs @@ -622,15 +622,17 @@ impl Hash for SocketDescriptor { mod tests { use bitcoin::constants::ChainHash; use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey}; - use bitcoin::Network; + use bitcoin::{Network, Txid}; use lightning::ln::msgs::*; use lightning::ln::peer_handler::{IgnoringMessageHandler, MessageHandler, PeerManager}; + use lightning::ln::types::ChannelId; use lightning::routing::gossip::NodeId; use lightning::types::features::*; use lightning::util::test_utils::TestNodeSigner; use tokio::sync::mpsc; + use std::collections::BTreeMap; use std::mem; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::{Arc, Mutex}; @@ -723,6 +725,11 @@ mod tests { ) { } fn handle_commitment_signed(&self, _their_node_id: PublicKey, _msg: &CommitmentSigned) {} + fn handle_commitment_signed_batch( + &self, _their_node_id: PublicKey, _channel_id: ChannelId, + _batch: BTreeMap, + ) { + } fn handle_revoke_and_ack(&self, _their_node_id: PublicKey, _msg: &RevokeAndACK) {} fn handle_update_fee(&self, _their_node_id: PublicKey, _msg: &UpdateFee) {} fn handle_announcement_signatures( diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index a624eee0015..db3261ee4b8 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -66,6 +66,8 @@ use crate::util::errors::APIError; use crate::util::config::{UserConfig, ChannelConfig, LegacyChannelConfig, ChannelHandshakeConfig, ChannelHandshakeLimits, MaxDustHTLCExposure}; use crate::util::scid_utils::scid_from_parts; +use alloc::collections::BTreeMap; + use crate::io; use crate::prelude::*; use core::time::Duration; @@ -5777,6 +5779,11 @@ impl FundedChannel where ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }, ))); } + + if msg.batch.is_some() { + return Err(ChannelError::close("Peer sent initial commitment_signed with a batch".to_owned())); + } + let holder_commitment_point = &mut self.holder_commitment_point.clone(); self.context.assert_no_commitment_advancement(holder_commitment_point.transaction_number(), "initial commitment_signed"); @@ -5804,6 +5811,49 @@ impl FundedChannel where pub fn commitment_signed(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result, ChannelError> where L::Target: Logger { + self.commitment_signed_check_state()?; + + let updates = self + .context + .validate_commitment_signed(&self.funding, &self.holder_commitment_point, msg, logger) + .map(|LatestHolderCommitmentTXInfo { commitment_tx, htlc_outputs, nondust_htlc_sources }| + vec![ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo { + commitment_tx, htlc_outputs, claimed_htlcs: vec![], nondust_htlc_sources, + }] + )?; + + self.commitment_signed_update_monitor(updates, logger) + } + + pub fn commitment_signed_batch(&mut self, batch: &BTreeMap, logger: &L) -> Result, ChannelError> + where L::Target: Logger + { + self.commitment_signed_check_state()?; + + // Any commitment_signed not associated with a FundingScope is ignored below if a + // pending splice transaction has confirmed since receiving the batch. + let updates = core::iter::once(&self.funding) + .chain(self.pending_funding.iter()) + .map(|funding| { + let funding_txid = funding.get_funding_txo().unwrap().txid; + let msg = batch + .get(&funding_txid) + .ok_or_else(|| ChannelError::close(format!("Peer did not send a commitment_signed for pending splice transaction: {}", funding_txid)))?; + self.context + .validate_commitment_signed(funding, &self.holder_commitment_point, msg, logger) + .map(|LatestHolderCommitmentTXInfo { commitment_tx, htlc_outputs, nondust_htlc_sources }| + ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo { + commitment_tx, htlc_outputs, claimed_htlcs: vec![], nondust_htlc_sources, + } + ) + } + ) + .collect::, ChannelError>>()?; + + self.commitment_signed_update_monitor(updates, logger) + } + + fn commitment_signed_check_state(&self) -> Result<(), ChannelError> { if self.context.channel_state.is_quiescent() { return Err(ChannelError::WarnAndDisconnect("Got commitment_signed message while quiescent".to_owned())); } @@ -5817,8 +5867,12 @@ impl FundedChannel where return Err(ChannelError::close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned())); } - let commitment_tx_info = self.context.validate_commitment_signed(&self.funding, &self.holder_commitment_point, msg, logger)?; + Ok(()) + } + fn commitment_signed_update_monitor(&mut self, mut updates: Vec, logger: &L) -> Result, ChannelError> + where L::Target: Logger + { if self.holder_commitment_point.advance(&self.context.holder_signer, &self.context.secp_ctx, logger).is_err() { // We only fail to advance our commitment point/number if we're currently // waiting for our signer to unblock and provide a commitment point. @@ -5872,18 +5926,21 @@ impl FundedChannel where } } - let LatestHolderCommitmentTXInfo { - commitment_tx, htlc_outputs, nondust_htlc_sources, - } = commitment_tx_info; + for mut update in updates.iter_mut() { + if let ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo { + claimed_htlcs: ref mut update_claimed_htlcs, .. + } = &mut update { + debug_assert!(update_claimed_htlcs.is_empty()); + *update_claimed_htlcs = claimed_htlcs.clone(); + } else { + debug_assert!(false); + } + } + self.context.latest_monitor_update_id += 1; let mut monitor_update = ChannelMonitorUpdate { update_id: self.context.latest_monitor_update_id, - updates: vec![ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo { - commitment_tx, - htlc_outputs, - claimed_htlcs, - nondust_htlc_sources, - }], + updates, channel_id: Some(self.context.channel_id()), }; diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 597ce2cf0ca..41d252e12f4 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -9020,6 +9020,38 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } } + fn internal_commitment_signed_batch(&self, counterparty_node_id: &PublicKey, channel_id: ChannelId, batch: &BTreeMap) -> Result<(), MsgHandleErrInternal> { + let per_peer_state = self.per_peer_state.read().unwrap(); + let peer_state_mutex = per_peer_state.get(counterparty_node_id) + .ok_or_else(|| { + debug_assert!(false); + MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), channel_id) + })?; + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + match peer_state.channel_by_id.entry(channel_id) { + hash_map::Entry::Occupied(mut chan_entry) => { + let chan = chan_entry.get_mut(); + let logger = WithChannelContext::from(&self.logger, &chan.context(), None); + let funding_txo = chan.funding().get_funding_txo(); + if let Some(chan) = chan.as_funded_mut() { + let monitor_update_opt = try_channel_entry!( + self, peer_state, chan.commitment_signed_batch(batch, &&logger), chan_entry + ); + + if let Some(monitor_update) = monitor_update_opt { + handle_new_monitor_update!( + self, funding_txo.unwrap(), monitor_update, peer_state_lock, peer_state, + per_peer_state, chan + ); + } + } + Ok(()) + }, + hash_map::Entry::Vacant(_) => Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), channel_id)) + } + } + fn push_decode_update_add_htlcs(&self, mut update_add_htlcs: (u64, Vec)) { let mut push_forward_event = self.forward_htlcs.lock().unwrap().is_empty(); let mut decode_update_add_htlcs = self.decode_update_add_htlcs.lock().unwrap(); @@ -12130,6 +12162,11 @@ where let _ = handle_error!(self, self.internal_commitment_signed(&counterparty_node_id, msg), counterparty_node_id); } + fn handle_commitment_signed_batch(&self, counterparty_node_id: PublicKey, channel_id: ChannelId, batch: BTreeMap) { + let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); + let _ = handle_error!(self, self.internal_commitment_signed_batch(&counterparty_node_id, channel_id, &batch), counterparty_node_id); + } + fn handle_revoke_and_ack(&self, counterparty_node_id: PublicKey, msg: &msgs::RevokeAndACK) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let _ = handle_error!(self, self.internal_revoke_and_ack(&counterparty_node_id, msg), counterparty_node_id); diff --git a/lightning/src/ln/msgs.rs b/lightning/src/ln/msgs.rs index 0638c4cc885..c27db4a55b4 100644 --- a/lightning/src/ln/msgs.rs +++ b/lightning/src/ln/msgs.rs @@ -1940,7 +1940,7 @@ pub trait ChannelMessageHandler : BaseMessageHandler { fn handle_commitment_signed_batch( &self, their_node_id: PublicKey, channel_id: ChannelId, batch: BTreeMap, - ) {} + ); /// Handle an incoming `revoke_and_ack` message from the given peer. fn handle_revoke_and_ack(&self, their_node_id: PublicKey, msg: &RevokeAndACK); diff --git a/lightning/src/ln/peer_handler.rs b/lightning/src/ln/peer_handler.rs index 401d9825c89..07e63cf576c 100644 --- a/lightning/src/ln/peer_handler.rs +++ b/lightning/src/ln/peer_handler.rs @@ -334,6 +334,12 @@ impl ChannelMessageHandler for ErroringMessageHandler { fn handle_commitment_signed(&self, their_node_id: PublicKey, msg: &msgs::CommitmentSigned) { ErroringMessageHandler::push_error(self, their_node_id, msg.channel_id); } + fn handle_commitment_signed_batch( + &self, their_node_id: PublicKey, channel_id: ChannelId, + _batch: BTreeMap, + ) { + ErroringMessageHandler::push_error(self, their_node_id, channel_id); + } fn handle_revoke_and_ack(&self, their_node_id: PublicKey, msg: &msgs::RevokeAndACK) { ErroringMessageHandler::push_error(self, their_node_id, msg.channel_id); } diff --git a/lightning/src/util/test_utils.rs b/lightning/src/util/test_utils.rs index 2d7d6fb5876..f90bfb97ef7 100644 --- a/lightning/src/util/test_utils.rs +++ b/lightning/src/util/test_utils.rs @@ -79,6 +79,8 @@ use bitcoin::secp256k1::{self, PublicKey, Scalar, Secp256k1, SecretKey}; use lightning_invoice::RawBolt11Invoice; +use alloc::collections::BTreeMap; + use crate::io; use crate::prelude::*; use crate::sign::{EntropySource, NodeSigner, RandomBytes, Recipient, SignerProvider}; @@ -1053,6 +1055,12 @@ impl msgs::ChannelMessageHandler for TestChannelMessageHandler { fn handle_commitment_signed(&self, _their_node_id: PublicKey, msg: &msgs::CommitmentSigned) { self.received_msg(wire::Message::CommitmentSigned(msg.clone())); } + fn handle_commitment_signed_batch( + &self, _their_node_id: PublicKey, _channel_id: ChannelId, + _batch: BTreeMap, + ) { + unreachable!() + } fn handle_revoke_and_ack(&self, _their_node_id: PublicKey, msg: &msgs::RevokeAndACK) { self.received_msg(wire::Message::RevokeAndACK(msg.clone())); } From 80e3235c216db4dca4e2681c8f8d4a7aee7157a0 Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Fri, 7 Mar 2025 16:12:46 -0600 Subject: [PATCH 7/7] Consider funding scopes in get_available_balances A FundedChannel may have more than one pending FundingScope during splicing, one for the splice attempt and one or more for any RBF attempts. When calling get_available_balances, consider all funding scopes and take the minimum by next_outbound_htlc_limit_msat. This is used both informationally and to determine which channel to use to forward an HTLC. The choice of next_outbound_htlc_limit_msat is somewhat arbitrary but matches the field used when determining which channel used to forward an HTLC. Any field should do since each field should be adjusted by the same amount relative to another FundingScope given the nature of the fields (i.e., inbound/outbound capacity, min/max HTLC limit). Using the minimum was chosen since an order for an HTLC to be sent over the channel, it must be possible for each funding scope -- both the confirmed one and any pending scopes, one of which may eventually confirm. --- lightning/src/ln/channel.rs | 47 ++++++++++++++++++++++++++---- lightning/src/ln/channel_state.rs | 12 ++++---- lightning/src/ln/channelmanager.rs | 32 +++++++++++--------- 3 files changed, 67 insertions(+), 24 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index db3261ee4b8..005ec566f0e 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -1552,6 +1552,25 @@ impl Channel where } } } + + /// Get the available balances, see [`AvailableBalances`]'s fields for more info. + /// Doesn't bother handling the + /// if-we-removed-it-already-but-haven't-fully-resolved-they-can-still-send-an-inbound-HTLC + /// corner case properly. + pub fn get_available_balances( + &self, fee_estimator: &LowerBoundedFeeEstimator, + ) -> AvailableBalances + where + F::Target: FeeEstimator, + { + match &self.phase { + ChannelPhase::Undefined => unreachable!(), + ChannelPhase::Funded(chan) => chan.get_available_balances(fee_estimator), + ChannelPhase::UnfundedOutboundV1(chan) => chan.context.get_available_balances_for_scope(&chan.funding, fee_estimator), + ChannelPhase::UnfundedInboundV1(chan) => chan.context.get_available_balances_for_scope(&chan.funding, fee_estimator), + ChannelPhase::UnfundedV2(chan) => chan.context.get_available_balances_for_scope(&chan.funding, fee_estimator), + } + } } impl From> for Channel @@ -4195,11 +4214,7 @@ impl ChannelContext where SP::Target: SignerProvider { outbound_details } - /// Get the available balances, see [`AvailableBalances`]'s fields for more info. - /// Doesn't bother handling the - /// if-we-removed-it-already-but-haven't-fully-resolved-they-can-still-send-an-inbound-HTLC - /// corner case properly. - pub fn get_available_balances( + fn get_available_balances_for_scope( &self, funding: &FundingScope, fee_estimator: &LowerBoundedFeeEstimator, ) -> AvailableBalances where @@ -8792,7 +8807,7 @@ impl FundedChannel where return Err(ChannelError::Ignore("Cannot send 0-msat HTLC".to_owned())); } - let available_balances = self.context.get_available_balances(&self.funding, fee_estimator); + let available_balances = self.get_available_balances(fee_estimator); if amount_msat < available_balances.next_outbound_htlc_minimum_msat { return Err(ChannelError::Ignore(format!("Cannot send less than our next-HTLC minimum - {} msat", available_balances.next_outbound_htlc_minimum_msat))); @@ -8872,6 +8887,26 @@ impl FundedChannel where Ok(Some(res)) } + pub(super) fn get_available_balances( + &self, fee_estimator: &LowerBoundedFeeEstimator, + ) -> AvailableBalances + where + F::Target: FeeEstimator, + { + core::iter::once(&self.funding) + .chain(self.pending_funding.iter()) + .map(|funding| self.context.get_available_balances_for_scope(funding, fee_estimator)) + .reduce(|acc, e| { + AvailableBalances { + inbound_capacity_msat: acc.inbound_capacity_msat.min(e.inbound_capacity_msat), + outbound_capacity_msat: acc.outbound_capacity_msat.min(e.outbound_capacity_msat), + next_outbound_htlc_limit_msat: acc.next_outbound_htlc_limit_msat.min(e.next_outbound_htlc_limit_msat), + next_outbound_htlc_minimum_msat: acc.next_outbound_htlc_minimum_msat.max(e.next_outbound_htlc_minimum_msat), + } + }) + .expect("At least one FundingScope is always provided") + } + fn build_commitment_no_status_check(&mut self, logger: &L) -> ChannelMonitorUpdate where L::Target: Logger { log_trace!(logger, "Updating HTLC state for a newly-sent commitment_signed..."); // We can upgrade the status of some HTLCs that are waiting on a commitment, even if we diff --git a/lightning/src/ln/channel_state.rs b/lightning/src/ln/channel_state.rs index 2da4eae97f2..c941e0eb9d0 100644 --- a/lightning/src/ln/channel_state.rs +++ b/lightning/src/ln/channel_state.rs @@ -15,7 +15,7 @@ use bitcoin::secp256k1::PublicKey; use crate::chain::chaininterface::{FeeEstimator, LowerBoundedFeeEstimator}; use crate::chain::transaction::OutPoint; -use crate::ln::channel::{ChannelContext, FundingScope}; +use crate::ln::channel::Channel; use crate::ln::types::ChannelId; use crate::sign::SignerProvider; use crate::types::features::{ChannelTypeFeatures, InitFeatures}; @@ -475,15 +475,17 @@ impl ChannelDetails { self.short_channel_id.or(self.outbound_scid_alias) } - pub(super) fn from_channel_context( - context: &ChannelContext, funding: &FundingScope, best_block_height: u32, - latest_features: InitFeatures, fee_estimator: &LowerBoundedFeeEstimator, + pub(super) fn from_channel( + channel: &Channel, best_block_height: u32, latest_features: InitFeatures, + fee_estimator: &LowerBoundedFeeEstimator, ) -> Self where SP::Target: SignerProvider, F::Target: FeeEstimator, { - let balance = context.get_available_balances(funding, fee_estimator); + let context = channel.context(); + let funding = channel.funding(); + let balance = channel.get_available_balances(fee_estimator); let (to_remote_reserve_satoshis, to_self_reserve_satoshis) = funding.get_holder_counterparty_selected_channel_reserve_satoshis(); #[allow(deprecated)] // TODO: Remove once balance_msat is removed. diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 41d252e12f4..d0a73e89992 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3730,7 +3730,7 @@ where Ok(temporary_channel_id) } - fn list_funded_channels_with_filter)) -> bool + Copy>(&self, f: Fn) -> Vec { + fn list_funded_channels_with_filter)) -> bool + Copy>(&self, f: Fn) -> Vec { // Allocate our best estimate of the number of channels we have in the `res` // Vec. Sadly the `short_to_chan_info` map doesn't cover channels without // a scid or a scid alias. Therefore reallocations may still occur, but is @@ -3745,11 +3745,13 @@ where let peer_state = &mut *peer_state_lock; res.extend(peer_state.channel_by_id.iter() // Only `Channels` in the `Channel::Funded` phase can be considered funded. - .filter_map(|(chan_id, chan)| chan.as_funded().map(|chan| (chan_id, chan))) + .filter(|(_, chan)| chan.is_funded()) .filter(f) .map(|(_channel_id, channel)| { - ChannelDetails::from_channel_context(&channel.context, &channel.funding, best_block_height, - peer_state.latest_features.clone(), &self.fee_estimator) + ChannelDetails::from_channel( + channel, best_block_height, peer_state.latest_features.clone(), + &self.fee_estimator, + ) }) ); } @@ -3772,9 +3774,11 @@ where for (_cp_id, peer_state_mutex) in per_peer_state.iter() { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; - for (context, funding) in peer_state.channel_by_id.iter().map(|(_, chan)| (chan.context(), chan.funding())) { - let details = ChannelDetails::from_channel_context(context, funding, best_block_height, - peer_state.latest_features.clone(), &self.fee_estimator); + for (_, channel) in peer_state.channel_by_id.iter() { + let details = ChannelDetails::from_channel( + channel, best_block_height, peer_state.latest_features.clone(), + &self.fee_estimator, + ); res.push(details); } } @@ -3792,7 +3796,7 @@ where // Note we use is_live here instead of usable which leads to somewhat confused // internal/external nomenclature, but that's ok cause that's probably what the user // really wanted anyway. - self.list_funded_channels_with_filter(|&(_, ref channel)| channel.context.is_live()) + self.list_funded_channels_with_filter(|&(_, ref channel)| channel.context().is_live()) } /// Gets the list of channels we have with a given counterparty, in random order. @@ -3804,13 +3808,15 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; let features = &peer_state.latest_features; - let context_to_details = |(context, funding)| { - ChannelDetails::from_channel_context(context, funding, best_block_height, features.clone(), &self.fee_estimator) + let channel_to_details = |channel| { + ChannelDetails::from_channel( + channel, best_block_height, features.clone(), &self.fee_estimator, + ) }; return peer_state.channel_by_id .iter() - .map(|(_, chan)| (chan.context(), chan.funding())) - .map(context_to_details) + .map(|(_, chan)| (chan)) + .map(channel_to_details) .collect(); } vec![] @@ -6066,7 +6072,7 @@ where let maybe_optimal_channel = peer_state.channel_by_id.values_mut() .filter_map(Channel::as_funded_mut) .filter_map(|chan| { - let balances = chan.context.get_available_balances(&chan.funding, &self.fee_estimator); + let balances = chan.get_available_balances(&self.fee_estimator); if outgoing_amt_msat <= balances.next_outbound_htlc_limit_msat && outgoing_amt_msat >= balances.next_outbound_htlc_minimum_msat && chan.context.is_usable() {