From f63066ef389b70ac1a536335ee9d9e88c5c6bf64 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Tue, 18 Nov 2025 10:22:41 +0100 Subject: [PATCH 1/3] Inline error variable --- lightning/src/ln/channelmanager.rs | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index d84d46e8995..8486cd45be9 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -6740,16 +6740,13 @@ where } }, None => { - let error = format!( - "Channel not found for the passed counterparty node_id {next_node_id}" - ); let logger = WithContext::from( &self.logger, Some(next_node_id), Some(*next_hop_channel_id), None, ); - log_error!(logger, "{error} when attempting to forward intercepted HTLC"); + log_error!(logger, "Channel not found for the passed counterparty node_id {next_node_id} when attempting to forward intercepted HTLC"); return Err(APIError::ChannelUnavailable { err: format!( "Channel with id {next_hop_channel_id} not found for the passed counterparty node_id {next_node_id}" From 2f6b1976fe7c768947f08a27bc77ba175c389eac Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Tue, 18 Nov 2025 11:36:44 +0100 Subject: [PATCH 2/3] Deduplicate node id and payment hash logging Now that the default Display implementation of Record also outputs structured log fields, that data can be removed from the log messages. This declutters the log output and also helps with verticality in the code. Note that deduplication is only carried out for log statements that are always called in a log context. For some statements this isn't the case because not every caller sets up the context. Those are left unchanged. --- lightning/src/ln/channelmanager.rs | 76 +++++++++---------- lightning/src/ln/peer_handler.rs | 113 +++++++++++------------------ lightning/src/util/logger.rs | 11 ++- 3 files changed, 86 insertions(+), 114 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 8486cd45be9..6b28ab7885e 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -5243,7 +5243,7 @@ where .map_err(|e| { let first_hop_key = Some(path.hops.first().unwrap().pubkey); let logger = WithContext::from(&self.logger, first_hop_key, None, Some(*payment_hash)); - log_error!(logger, "Failed to build an onion for path for payment hash {payment_hash}"); + log_error!(logger, "Failed to build an onion for path"); e })?; @@ -5272,7 +5272,7 @@ where ); log_trace!( logger, - "Attempting to send payment with payment hash {payment_hash} along path with next hop {first_chan_scid}" + "Attempting to send payment along path with next hop {first_chan_scid}" ); let per_peer_state = self.per_peer_state.read().unwrap(); @@ -6746,7 +6746,10 @@ where Some(*next_hop_channel_id), None, ); - log_error!(logger, "Channel not found for the passed counterparty node_id {next_node_id} when attempting to forward intercepted HTLC"); + log_error!( + logger, + "Channel not found when attempting to forward intercepted HTLC" + ); return Err(APIError::ChannelUnavailable { err: format!( "Channel with id {next_hop_channel_id} not found for the passed counterparty node_id {next_node_id}" @@ -7485,8 +7488,13 @@ where } else { "alternate" }; - log_trace!(logger, "Forwarding HTLC from SCID {} with payment_hash {} and next hop SCID {} over {} with corresponding peer {}", - prev_outbound_scid_alias, &payment_hash, short_chan_id, channel_description, &counterparty_node_id); + log_trace!( + logger, + "Forwarding HTLC from SCID {} with next hop SCID {} over {}", + prev_outbound_scid_alias, + short_chan_id, + channel_description + ); if let Err((reason, msg)) = optimal_channel.queue_add_htlc( *outgoing_amt_msat, *payment_hash, @@ -7498,13 +7506,7 @@ where &self.fee_estimator, &&logger, ) { - log_trace!( - logger, - "Failed to forward HTLC with payment_hash {} to peer {}: {}", - &payment_hash, - &counterparty_node_id, - msg - ); + log_trace!(logger, "Failed to forward HTLC: {}", msg); if let Some(chan) = peer_state .channel_by_id @@ -8188,8 +8190,7 @@ where if peer_state.is_connected { if funded_chan.should_disconnect_peer_awaiting_response() { let logger = WithChannelContext::from(&self.logger, &funded_chan.context, None); - log_debug!(logger, "Disconnecting peer {} due to not making any progress", - counterparty_node_id); + log_debug!(logger, "Disconnecting peer due to not making any progress"); pending_msg_events.push(MessageSendEvent::HandleError { node_id: counterparty_node_id, action: msgs::ErrorAction::DisconnectPeerWithWarning { @@ -8560,9 +8561,8 @@ where }) => { log_trace!( WithContext::from(&self.logger, None, Some(*channel_id), Some(*payment_hash)), - "Failing {}HTLC with payment_hash {} backwards from us: {:?}", + "Failing {}HTLC backwards from us: {:?}", if blinded_failure.is_some() { "blinded " } else { "" }, - &payment_hash, onion_error ); // In case of trampoline + phantom we prioritize the trampoline failure over the phantom failure. @@ -9848,9 +9848,9 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id) .ok_or_else(|| { - let err_str = format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"); - log_error!(logger, "{}", err_str); + log_error!(logger, "Can't find peer matching the passed counterparty node_id"); + let err_str = format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"); APIError::ChannelUnavailable { err: err_str } })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); @@ -12013,11 +12013,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ None, ); } else { - log_trace!( - logger, - "Failing HTLC with hash {} from our monitor", - &htlc_update.payment_hash - ); + log_trace!(logger, "Failing HTLC from our monitor"); let failure_reason = LocalHTLCFailureReason::OnChainTimeout; let receiver = HTLCHandlingFailureType::Forward { node_id: Some(counterparty_node_id), @@ -13684,8 +13680,7 @@ where let remove_peer = { log_debug!( WithContext::from(&self.logger, Some(counterparty_node_id), None, None), - "Marking channels with {} disconnected and generating channel_updates.", - log_pubkey!(counterparty_node_id) + "Marking channels disconnected and generating channel_updates.", ); if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); @@ -13870,7 +13865,7 @@ where } } - log_debug!(logger, "Generating channel_reestablish events for {}", log_pubkey!(counterparty_node_id)); + log_debug!(logger, "Generating channel_reestablish events"); let per_peer_state = self.per_peer_state.read().unwrap(); if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) { @@ -16868,8 +16863,7 @@ where Some(*payment_hash), ); log_info!(logger, - "Failing HTLC with hash {} as it is missing in the ChannelMonitor but was present in the (stale) ChannelManager", - &payment_hash); + "Failing HTLC as it is missing in the ChannelMonitor but was present in the (stale) ChannelManager"); failed_htlcs.push(( channel_htlc_source.clone(), *payment_hash, @@ -17523,18 +17517,20 @@ where // still have an entry for this HTLC in `forward_htlcs` or // `pending_intercepted_htlcs`, we were apparently not persisted after // the monitor was when forwarding the payment. - decode_update_add_htlcs.retain(|src_outb_alias, update_add_htlcs| { - update_add_htlcs.retain(|update_add_htlc| { - let matches = *src_outb_alias == prev_hop_data.prev_outbound_scid_alias && - update_add_htlc.htlc_id == prev_hop_data.htlc_id; - if matches { - log_info!(logger, "Removing pending to-decode HTLC with hash {} as it was forwarded to the closed channel", - &htlc.payment_hash); - } - !matches - }); - !update_add_htlcs.is_empty() - }); + decode_update_add_htlcs.retain( + |src_outb_alias, update_add_htlcs| { + update_add_htlcs.retain(|update_add_htlc| { + let matches = *src_outb_alias + == prev_hop_data.prev_outbound_scid_alias + && update_add_htlc.htlc_id == prev_hop_data.htlc_id; + if matches { + log_info!(logger, "Removing pending to-decode HTLC as it was forwarded to the closed channel"); + } + !matches + }); + !update_add_htlcs.is_empty() + }, + ); forward_htlcs.retain(|_, forwards| { forwards.retain(|forward| { if let HTLCForwardInfo::AddHTLC(htlc_info) = forward { diff --git a/lightning/src/ln/peer_handler.rs b/lightning/src/ln/peer_handler.rs index 83e2a570e05..c3b490ef31a 100644 --- a/lightning/src/ln/peer_handler.rs +++ b/lightning/src/ln/peer_handler.rs @@ -1729,12 +1729,12 @@ where /// Append a message to a peer's pending outbound/write buffer fn enqueue_message(&self, peer: &mut Peer, message: &M) { let their_node_id = peer.their_node_id.map(|p| p.0); - if let Some(node_id) = their_node_id { + if their_node_id.is_some() { let logger = WithContext::from(&self.logger, their_node_id, None, None); if is_gossip_msg(message.type_id()) { - log_gossip!(logger, "Enqueueing message {:?} to {}", message, node_id); + log_gossip!(logger, "Enqueueing message {:?}", message); } else { - log_trace!(logger, "Enqueueing message {:?} to {}", message, node_id); + log_trace!(logger, "Enqueueing message {:?}", message); } } else { debug_assert!(false, "node_id should be set by the time we send a message"); @@ -1842,7 +1842,7 @@ where let logger = WithContext::from(&self.logger, Some(their_node_id), None, None); match self.node_id_to_descriptor.lock().unwrap().entry(their_node_id) { hash_map::Entry::Occupied(e) => { - log_trace!(logger, "Got second connection with {}, closing", their_node_id); + log_trace!(logger, "Got second connection, closing"); // Unset `their_node_id` so that we don't generate a peer_disconnected event peer.their_node_id = None; // Check that the peers map is consistent with the @@ -1852,7 +1852,7 @@ where return Err(PeerHandleError { }) }, hash_map::Entry::Vacant(entry) => { - log_debug!(logger, "Finished noise handshake for connection with {}", their_node_id); + log_debug!(logger, "Finished noise handshake for connection"); entry.insert(peer_descriptor.clone()) }, }; @@ -2141,8 +2141,7 @@ where if msg.features.requires_unknown_bits_from(&our_features) { log_debug!( logger, - "Peer {} requires features unknown to us: {:?}", - their_node_id, + "Peer requires features unknown to us: {:?}", msg.features.required_unknown_bits_from(&our_features) ); return Err(PeerHandleError {}.into()); @@ -2162,12 +2161,7 @@ where return Err(PeerHandleError {}.into()); } - log_info!( - logger, - "Received peer Init message from {}: {}", - their_node_id, - msg.features - ); + log_info!(logger, "Received Init message: {}", msg.features); // For peers not supporting gossip queries start sync now, otherwise wait until we receive a filter. if msg.features.initial_routing_sync() && !msg.features.supports_gossip_queries() { @@ -2177,20 +2171,12 @@ where let inbound = peer_lock.inbound_connection; let route_handler = &self.message_handler.route_handler; if let Err(()) = route_handler.peer_connected(their_node_id, &msg, inbound) { - log_debug!( - logger, - "Route Handler decided we couldn't communicate with peer {}", - their_node_id, - ); + log_debug!(logger, "Route Handler decided we couldn't communicate with peer"); return Err(PeerHandleError {}.into()); } let chan_handler = &self.message_handler.chan_handler; if let Err(()) = chan_handler.peer_connected(their_node_id, &msg, inbound) { - log_debug!( - logger, - "Channel Handler decided we couldn't communicate with peer {}", - their_node_id, - ); + log_debug!(logger, "Channel Handler decided we couldn't communicate with peer"); self.message_handler.route_handler.peer_disconnected(their_node_id); return Err(PeerHandleError {}.into()); } @@ -2209,8 +2195,7 @@ where if let Err(()) = custom_handler.peer_connected(their_node_id, &msg, inbound) { log_debug!( logger, - "Custom Message Handler decided we couldn't communicate with peer {}", - their_node_id, + "Custom Message Handler decided we couldn't communicate with peer" ); self.message_handler.route_handler.peer_disconnected(their_node_id); self.message_handler.chan_handler.peer_disconnected(their_node_id); @@ -2221,8 +2206,7 @@ where if let Err(()) = sends_handler.peer_connected(their_node_id, &msg, inbound) { log_debug!( logger, - "Sending-Only Message Handler decided we couldn't communicate with peer {}", - their_node_id, + "Sending-Only Message Handler decided we couldn't communicate with peer" ); self.message_handler.route_handler.peer_disconnected(their_node_id); self.message_handler.chan_handler.peer_disconnected(their_node_id); @@ -2235,7 +2219,7 @@ where peer_lock.their_features = Some(msg.features); return Ok(None); } else if peer_lock.their_features.is_none() { - log_debug!(logger, "Peer {} sent non-Init first message", their_node_id); + log_debug!(logger, "Peer sent non-Init first message"); return Err(PeerHandleError {}.into()); } @@ -2407,9 +2391,9 @@ where their_node_id: PublicKey, logger: &WithContext<'a, L>, ) -> Result, MessageHandlingError> { if is_gossip_msg(message.type_id()) { - log_gossip!(logger, "Received message {:?} from {}", message, their_node_id); + log_gossip!(logger, "Received message {:?}", message); } else { - log_trace!(logger, "Received message {:?} from {}", message, their_node_id); + log_trace!(logger, "Received message {:?}", message); } let mut should_forward = None; @@ -2435,12 +2419,7 @@ where } }, wire::Message::Warning(msg) => { - log_debug!( - logger, - "Got warning message from {}: {}", - their_node_id, - PrintableString(&msg.data) - ); + log_debug!(logger, "Got warning message: {}", PrintableString(&msg.data)); }, wire::Message::Ping(msg) => { @@ -3128,8 +3107,7 @@ where Some(msg.channel_id), None ), - "Handling Shutdown event in peer_handler for node {}", - node_id + "Handling Shutdown event in peer_handler", ); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, @@ -3239,8 +3217,11 @@ where } }, MessageSendEvent::SendChannelUpdate { ref node_id, ref msg } => { - log_trace!(WithContext::from(&self.logger, Some(*node_id), None, None), "Handling SendChannelUpdate event in peer_handler for node {} for channel {}", - node_id, msg.contents.short_channel_id); + log_trace!( + WithContext::from(&self.logger, Some(*node_id), None, None), + "Handling SendChannelUpdate event in peer_handler for channel {}", + msg.contents.short_channel_id + ); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, MessageSendEvent::HandleError { node_id, action } => { @@ -3248,11 +3229,11 @@ where match action { msgs::ErrorAction::DisconnectPeer { msg } => { if let Some(msg) = msg.as_ref() { - log_trace!(logger, "Handling DisconnectPeer HandleError event in peer_handler for node {} with message {}", - node_id, msg.data); + log_trace!(logger, "Handling DisconnectPeer HandleError event in peer_handler with message {}", + msg.data); } else { - log_trace!(logger, "Handling DisconnectPeer HandleError event in peer_handler for node {}", - node_id); + log_trace!(logger, "Handling DisconnectPeer HandleError event in peer_handler", + ); } // We do not have the peers write lock, so we just store that we're // about to disconnect the peer and do it after we finish @@ -3263,8 +3244,8 @@ where peers_to_disconnect.insert(node_id, msg); }, msgs::ErrorAction::DisconnectPeerWithWarning { msg } => { - log_trace!(logger, "Handling DisconnectPeer HandleError event in peer_handler for node {} with message {}", - node_id, msg.data); + log_trace!(logger, "Handling DisconnectPeer HandleError event in peer_handler with message {}", + msg.data); // We do not have the peers write lock, so we just store that we're // about to disconnect the peer and do it after we finish // processing most messages. @@ -3275,21 +3256,19 @@ where log_given_level!( logger, level, - "Received a HandleError event to be ignored for node {}", - node_id, + "Received a HandleError event to be ignored", ); }, msgs::ErrorAction::IgnoreDuplicateGossip => {}, msgs::ErrorAction::IgnoreError => { log_debug!( logger, - "Received a HandleError event to be ignored for node {}", - node_id, + "Received a HandleError event to be ignored", ); }, msgs::ErrorAction::SendErrorMessage { ref msg } => { - log_trace!(logger, "Handling SendErrorMessage HandleError event in peer_handler for node {} with message {}", - node_id, + log_trace!(logger, "Handling SendErrorMessage HandleError event in peer_handler with message {}", + msg.data); self.enqueue_message( &mut *get_peer_for_forwarding!(&node_id)?, @@ -3300,8 +3279,8 @@ where ref msg, ref log_level, } => { - log_given_level!(logger, *log_level, "Handling SendWarningMessage HandleError event in peer_handler for node {} with message {}", - node_id, + log_given_level!(logger, *log_level, "Handling SendWarningMessage HandleError event in peer_handler with message {}", + msg.data); self.enqueue_message( &mut *get_peer_for_forwarding!(&node_id)?, @@ -3311,21 +3290,21 @@ where } }, MessageSendEvent::SendChannelRangeQuery { ref node_id, ref msg } => { - log_gossip!(WithContext::from(&self.logger, Some(*node_id), None, None), "Handling SendChannelRangeQuery event in peer_handler for node {} with first_blocknum={}, number_of_blocks={}", - node_id, + log_gossip!(WithContext::from(&self.logger, Some(*node_id), None, None), "Handling SendChannelRangeQuery event in peer_handler with first_blocknum={}, number_of_blocks={}", + msg.first_blocknum, msg.number_of_blocks); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, MessageSendEvent::SendShortIdsQuery { ref node_id, ref msg } => { - log_gossip!(WithContext::from(&self.logger, Some(*node_id), None, None), "Handling SendShortIdsQuery event in peer_handler for node {} with num_scids={}", - node_id, + log_gossip!(WithContext::from(&self.logger, Some(*node_id), None, None), "Handling SendShortIdsQuery event in peer_handler with num_scids={}", + msg.short_channel_ids.len()); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, MessageSendEvent::SendReplyChannelRange { ref node_id, ref msg } => { - log_gossip!(WithContext::from(&self.logger, Some(*node_id), None, None), "Handling SendReplyChannelRange event in peer_handler for node {} with num_scids={} first_blocknum={} number_of_blocks={}, sync_complete={}", - node_id, + log_gossip!(WithContext::from(&self.logger, Some(*node_id), None, None), "Handling SendReplyChannelRange event in peer_handler with num_scids={} first_blocknum={} number_of_blocks={}, sync_complete={}", + msg.short_channel_ids.len(), msg.first_blocknum, msg.number_of_blocks, @@ -3333,8 +3312,8 @@ where self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); }, MessageSendEvent::SendGossipTimestampFilter { ref node_id, ref msg } => { - log_gossip!(WithContext::from(&self.logger, Some(*node_id), None, None), "Handling SendGossipTimestampFilter event in peer_handler for node {} with first_timestamp={}, timestamp_range={}", - node_id, + log_gossip!(WithContext::from(&self.logger, Some(*node_id), None, None), "Handling SendGossipTimestampFilter event in peer_handler with first_timestamp={}, timestamp_range={}", + msg.first_timestamp, msg.timestamp_range); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); @@ -3445,8 +3424,7 @@ where if let Some((node_id, _)) = peer.their_node_id { log_trace!( WithContext::from(&self.logger, Some(node_id), None, None), - "Disconnecting peer with id {} due to {}", - node_id, + "Disconnecting peer due to {}", reason ); self.message_handler.route_handler.peer_disconnected(node_id); @@ -3471,12 +3449,7 @@ where let peer = peer_lock.lock().unwrap(); if let Some((node_id, _)) = peer.their_node_id { let logger = WithContext::from(&self.logger, Some(node_id), None, None); - log_trace!( - logger, - "Handling disconnection of peer {} because {}", - node_id, - reason - ); + log_trace!(logger, "Handling disconnection of peer because {}", reason); let removed = self.node_id_to_descriptor.lock().unwrap().remove(&node_id); debug_assert!(removed.is_some(), "descriptor maps should be consistent"); if !peer.handshake_complete() { diff --git a/lightning/src/util/logger.rs b/lightning/src/util/logger.rs index cb525a75ade..b49cd32c131 100644 --- a/lightning/src/util/logger.rs +++ b/lightning/src/util/logger.rs @@ -101,7 +101,8 @@ macro_rules! impl_record { pub struct Record<$($args)?> { /// The verbosity level of the message. pub level: Level, - /// The node id of the peer pertaining to the logged record. + /// The node id of the peer pertaining to the logged record. Since peer_id is not repeated in the message body, + /// include it in the log output so entries remain clear. /// /// Note that in some cases a [`Self::channel_id`] may be filled in but this may still be /// `None`, depending on if the peer information is readily available in LDK when the log is @@ -123,7 +124,8 @@ pub struct Record<$($args)?> { pub file: &'static str, /// The line containing the message. pub line: u32, - /// The payment hash. + /// The payment hash. Since payment_hash is not repeated in the message body, include it in the log output so + /// entries remain clear. /// /// Note that this is only filled in for logs pertaining to a specific payment, and will be /// `None` for logs which are not directly related to a payment. @@ -279,8 +281,9 @@ impl<'fmt: 'r, 'r> Write for SubstringFormatter<'fmt, 'r> { /// A trait encapsulating the operations required of a logger. Keep in mind that log messages might not be entirely /// self-explanatory and may need accompanying context fields to be fully understood. pub trait Logger { - /// Logs the [`Record`]. Since the record's [`Record::channel_id`] is not embedded in the message body, log - /// implementations should print it alongside the message to keep entries clear. + /// Logs the [`Record`]. Since [`Record::channel_id`], [`Record::peer_id`] and [`Record::payment_hash`] are not + /// embedded in the message body, log implementations should print those alongside the message to keep entries + /// clear. fn log(&self, record: Record); } From a6812c8e272179bfb75077753574eedcece738d3 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Tue, 18 Nov 2025 11:40:40 +0100 Subject: [PATCH 3/3] Rustfmt modified fns --- lightning/src/ln/channelmanager.rs | 194 ++++++++++++++++++----------- 1 file changed, 123 insertions(+), 71 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 6b28ab7885e..1ee4fc957dd 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -9827,12 +9827,11 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } /// TODO(dual_funding): Allow contributions, pass intended amount and inputs - #[rustfmt::skip] fn do_accept_inbound_channel( - &self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey, accept_0conf: bool, - user_channel_id: u128, config_overrides: Option + &self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey, + accept_0conf: bool, user_channel_id: u128, + config_overrides: Option, ) -> Result<(), APIError> { - let mut config = self.config.read().unwrap().clone(); // Apply configuration overrides. @@ -9840,17 +9839,23 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ config.apply(&overrides); }; - let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(*temporary_channel_id), None); + let logger = WithContext::from( + &self.logger, + Some(*counterparty_node_id), + Some(*temporary_channel_id), + None, + ); let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let peers_without_funded_channels = - self.peers_without_funded_channels(|peer| { peer.total_channel_count() > 0 }); + self.peers_without_funded_channels(|peer| peer.total_channel_count() > 0); let per_peer_state = self.per_peer_state.read().unwrap(); - let peer_state_mutex = per_peer_state.get(counterparty_node_id) - .ok_or_else(|| { + let peer_state_mutex = per_peer_state.get(counterparty_node_id).ok_or_else(|| { log_error!(logger, "Can't find peer matching the passed counterparty node_id"); - let err_str = format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"); + let err_str = format!( + "Can't find a peer matching the passed counterparty node_id {counterparty_node_id}" + ); APIError::ChannelUnavailable { err: err_str } })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); @@ -9865,42 +9870,63 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ Some(unaccepted_channel) => { let best_block_height = self.best_block.read().unwrap().height; match unaccepted_channel.open_channel_msg { - OpenChannelMessage::V1(open_channel_msg) => { - InboundV1Channel::new( - &self.fee_estimator, &self.entropy_source, &self.signer_provider, *counterparty_node_id, - &self.channel_type_features(), &peer_state.latest_features, &open_channel_msg, - user_channel_id, &config, best_block_height, &self.logger, accept_0conf - ).map_err(|err| MsgHandleErrInternal::from_chan_no_close(err, *temporary_channel_id) - ).map(|mut channel| { - let logger = WithChannelContext::from(&self.logger, &channel.context, None); - let message_send_event = channel.accept_inbound_channel(&&logger).map(|msg| { + OpenChannelMessage::V1(open_channel_msg) => InboundV1Channel::new( + &self.fee_estimator, + &self.entropy_source, + &self.signer_provider, + *counterparty_node_id, + &self.channel_type_features(), + &peer_state.latest_features, + &open_channel_msg, + user_channel_id, + &config, + best_block_height, + &self.logger, + accept_0conf, + ) + .map_err(|err| { + MsgHandleErrInternal::from_chan_no_close(err, *temporary_channel_id) + }) + .map(|mut channel| { + let logger = WithChannelContext::from(&self.logger, &channel.context, None); + let message_send_event = + channel.accept_inbound_channel(&&logger).map(|msg| { MessageSendEvent::SendAcceptChannel { node_id: *counterparty_node_id, msg, } }); - (*temporary_channel_id, Channel::from(channel), message_send_event) - }) - }, - OpenChannelMessage::V2(open_channel_msg) => { - PendingV2Channel::new_inbound( - &self.fee_estimator, &self.entropy_source, &self.signer_provider, - self.get_our_node_id(), *counterparty_node_id, - &self.channel_type_features(), &peer_state.latest_features, - &open_channel_msg, - user_channel_id, &config, best_block_height, - &self.logger, - ).map_err(|e| { - let channel_id = open_channel_msg.common_fields.temporary_channel_id; - MsgHandleErrInternal::from_chan_no_close(e, channel_id) - }).map(|channel| { - let message_send_event = MessageSendEvent::SendAcceptChannelV2 { - node_id: channel.context.get_counterparty_node_id(), - msg: channel.accept_inbound_dual_funded_channel() - }; - (channel.context.channel_id(), Channel::from(channel), Some(message_send_event)) - }) - }, + (*temporary_channel_id, Channel::from(channel), message_send_event) + }), + OpenChannelMessage::V2(open_channel_msg) => PendingV2Channel::new_inbound( + &self.fee_estimator, + &self.entropy_source, + &self.signer_provider, + self.get_our_node_id(), + *counterparty_node_id, + &self.channel_type_features(), + &peer_state.latest_features, + &open_channel_msg, + user_channel_id, + &config, + best_block_height, + &self.logger, + ) + .map_err(|e| { + let channel_id = open_channel_msg.common_fields.temporary_channel_id; + MsgHandleErrInternal::from_chan_no_close(e, channel_id) + }) + .map(|channel| { + let message_send_event = MessageSendEvent::SendAcceptChannelV2 { + node_id: channel.context.get_counterparty_node_id(), + msg: channel.accept_inbound_dual_funded_channel(), + }; + ( + channel.context.channel_id(), + Channel::from(channel), + Some(message_send_event), + ) + }), } }, None => { @@ -9908,7 +9934,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ log_error!(logger, "{}", err_str); return Err(APIError::APIMisuseError { err: err_str }); - } + }, }; // We have to match below instead of map_err on the above as in the map_err closure the borrow checker @@ -9919,13 +9945,19 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ mem::drop(peer_state_lock); mem::drop(per_peer_state); // TODO(dunxen): Find/make less icky way to do this. - match handle_error!(self, Result::<(), MsgHandleErrInternal>::Err(err), *counterparty_node_id) { - Ok(_) => unreachable!("`handle_error` only returns Err as we've passed in an Err"), + match handle_error!( + self, + Result::<(), MsgHandleErrInternal>::Err(err), + *counterparty_node_id + ) { + Ok(_) => { + unreachable!("`handle_error` only returns Err as we've passed in an Err") + }, Err(e) => { return Err(APIError::ChannelUnavailable { err: e.err }); }, } - } + }, }; if accept_0conf { @@ -9934,9 +9966,12 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } else if channel.funding().get_channel_type().requires_zero_conf() { let send_msg_err_event = MessageSendEvent::HandleError { node_id: channel.context().get_counterparty_node_id(), - action: msgs::ErrorAction::SendErrorMessage{ - msg: msgs::ErrorMessage { channel_id: *temporary_channel_id, data: "No zero confirmation channels accepted".to_owned(), } - } + action: msgs::ErrorAction::SendErrorMessage { + msg: msgs::ErrorMessage { + channel_id: *temporary_channel_id, + data: "No zero confirmation channels accepted".to_owned(), + }, + }, }; debug_assert!(peer_state.is_connected); peer_state.pending_msg_events.push(send_msg_err_event); @@ -9951,13 +9986,19 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if is_only_peer_channel && peers_without_funded_channels >= MAX_UNFUNDED_CHANNEL_PEERS { let send_msg_err_event = MessageSendEvent::HandleError { node_id: channel.context().get_counterparty_node_id(), - action: msgs::ErrorAction::SendErrorMessage{ - msg: msgs::ErrorMessage { channel_id: *temporary_channel_id, data: "Have too many peers with unfunded channels, not accepting new ones".to_owned(), } - } + action: msgs::ErrorAction::SendErrorMessage { + msg: msgs::ErrorMessage { + channel_id: *temporary_channel_id, + data: + "Have too many peers with unfunded channels, not accepting new ones" + .to_owned(), + }, + }, }; debug_assert!(peer_state.is_connected); peer_state.pending_msg_events.push(send_msg_err_event); - let err_str = "Too many peers with unfunded channels, refusing to accept new ones".to_owned(); + let err_str = + "Too many peers with unfunded channels, refusing to accept new ones".to_owned(); log_error!(logger, "{}", err_str); return Err(APIError::APIMisuseError { err: err_str }); @@ -13671,7 +13712,6 @@ where provided_init_features(&self.config.read().unwrap()) } - #[rustfmt::skip] fn peer_disconnected(&self, counterparty_node_id: PublicKey) { let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || { let mut splice_failed_events = Vec::new(); @@ -13779,7 +13819,10 @@ where debug_assert!(peer_state.is_connected, "A disconnected peer cannot disconnect"); peer_state.is_connected = false; peer_state.ok_to_remove(true) - } else { debug_assert!(false, "Unconnected peer disconnected"); true } + } else { + debug_assert!(false, "Unconnected peer disconnected"); + true + } }; if remove_peer { per_peer_state.remove(&counterparty_node_id); @@ -13804,11 +13847,16 @@ where }); } - #[rustfmt::skip] - fn peer_connected(&self, counterparty_node_id: PublicKey, init_msg: &msgs::Init, inbound: bool) -> Result<(), ()> { + fn peer_connected( + &self, counterparty_node_id: PublicKey, init_msg: &msgs::Init, inbound: bool, + ) -> Result<(), ()> { let logger = WithContext::from(&self.logger, Some(counterparty_node_id), None, None); if !init_msg.features.supports_static_remote_key() { - log_debug!(logger, "Peer {} does not support static remote key, disconnecting", log_pubkey!(counterparty_node_id)); + log_debug!( + logger, + "Peer {} does not support static remote key, disconnecting", + log_pubkey!(counterparty_node_id) + ); return Err(()); } @@ -13819,8 +13867,10 @@ where // peer immediately (as long as it doesn't have funded channels). If we have a bunch of // unfunded channels taking up space in memory for disconnected peers, we still let new // peers connect, but we'll reject new channels from them. - let connected_peers_without_funded_channels = self.peers_without_funded_channels(|node| node.is_connected); - let inbound_peer_limited = inbound && connected_peers_without_funded_channels >= MAX_NO_CHANNEL_PEERS; + let connected_peers_without_funded_channels = + self.peers_without_funded_channels(|node| node.is_connected); + let inbound_peer_limited = + inbound && connected_peers_without_funded_channels >= MAX_NO_CHANNEL_PEERS; { let mut peer_state_lock = self.per_peer_state.write().unwrap(); @@ -13848,9 +13898,9 @@ where peer_state.latest_features = init_msg.features.clone(); let best_block_height = self.best_block.read().unwrap().height; - if inbound_peer_limited && - Self::unfunded_channel_count(&*peer_state, best_block_height) == - peer_state.channel_by_id.len() + if inbound_peer_limited + && Self::unfunded_channel_count(&*peer_state, best_block_height) + == peer_state.channel_by_id.len() { res = Err(()); return NotifyOption::SkipPersistNoEvents; @@ -13859,7 +13909,10 @@ where debug_assert!(peer_state.pending_msg_events.is_empty()); peer_state.pending_msg_events.clear(); - debug_assert!(!peer_state.is_connected, "A peer shouldn't be connected twice"); + debug_assert!( + !peer_state.is_connected, + "A peer shouldn't be connected twice" + ); peer_state.is_connected = true; }, } @@ -13876,27 +13929,26 @@ where if !peer_state.peer_storage.is_empty() { pending_msg_events.push(MessageSendEvent::SendPeerStorageRetrieval { node_id: counterparty_node_id.clone(), - msg: msgs::PeerStorageRetrieval { - data: peer_state.peer_storage.clone() - }, + msg: msgs::PeerStorageRetrieval { data: peer_state.peer_storage.clone() }, }); } for (_, chan) in peer_state.channel_by_id.iter_mut() { let logger = WithChannelContext::from(&self.logger, &chan.context(), None); match chan.peer_connected_get_handshake(self.chain_hash, &&logger) { - ReconnectionMsg::Reestablish(msg) => + ReconnectionMsg::Reestablish(msg) => { pending_msg_events.push(MessageSendEvent::SendChannelReestablish { node_id: chan.context().get_counterparty_node_id(), msg, - }), - ReconnectionMsg::Open(OpenChannelMessage::V1(msg)) => - pending_msg_events.push(MessageSendEvent::SendOpenChannel { + }) + }, + ReconnectionMsg::Open(OpenChannelMessage::V1(msg)) => pending_msg_events + .push(MessageSendEvent::SendOpenChannel { node_id: chan.context().get_counterparty_node_id(), msg, }), - ReconnectionMsg::Open(OpenChannelMessage::V2(msg)) => - pending_msg_events.push(MessageSendEvent::SendOpenChannelV2 { + ReconnectionMsg::Open(OpenChannelMessage::V2(msg)) => pending_msg_events + .push(MessageSendEvent::SendOpenChannelV2 { node_id: chan.context().get_counterparty_node_id(), msg, }),