@@ -717,6 +717,8 @@ impl_writeable_tlv_based_enum!(SentHTLCId,
717
717
type PerSourcePendingForward =
718
718
(u64, Option<PublicKey>, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>);
719
719
720
+ type RAAMonitorUpdateBlockingActionMap = BTreeMap<ChannelId, Vec<RAAMonitorUpdateBlockingAction>>;
721
+
720
722
mod fuzzy_channelmanager {
721
723
use super::*;
722
724
@@ -1517,10 +1519,10 @@ where
1517
1519
return false;
1518
1520
}
1519
1521
}
1520
- !self
1521
- .channel_by_id
1522
- .iter()
1523
- .any(|(_, channel)| channel.is_funded() || channel.funding().is_outbound() )
1522
+ let chan_is_funded_or_outbound = |(_, channel): (_, &Channel<SP>)| {
1523
+ channel.is_funded() || channel.funding().is_outbound()
1524
+ };
1525
+ !self.channel_by_id.iter().any(chan_is_funded_or_outbound )
1524
1526
&& self.monitor_update_blocked_actions.is_empty()
1525
1527
&& self.closed_channel_monitor_update_ids.is_empty()
1526
1528
}
@@ -3313,17 +3315,14 @@ macro_rules! emit_funding_tx_broadcast_safe_event {
3313
3315
macro_rules! emit_channel_pending_event {
3314
3316
($locked_events: expr, $channel: expr) => {
3315
3317
if $channel.context.should_emit_channel_pending_event() {
3318
+ let funding_txo = $channel.funding.get_funding_txo().unwrap();
3316
3319
$locked_events.push_back((
3317
3320
events::Event::ChannelPending {
3318
3321
channel_id: $channel.context.channel_id(),
3319
3322
former_temporary_channel_id: $channel.context.temporary_channel_id(),
3320
3323
counterparty_node_id: $channel.context.get_counterparty_node_id(),
3321
3324
user_channel_id: $channel.context.get_user_id(),
3322
- funding_txo: $channel
3323
- .funding
3324
- .get_funding_txo()
3325
- .unwrap()
3326
- .into_bitcoin_outpoint(),
3325
+ funding_txo: funding_txo.into_bitcoin_outpoint(),
3327
3326
channel_type: Some($channel.funding.get_channel_type().clone()),
3328
3327
},
3329
3328
None,
@@ -3798,8 +3797,8 @@ where
3798
3797
let mut outbound_scid_alias = 0;
3799
3798
let mut i = 0;
3800
3799
loop {
3800
+ // fuzzing chacha20 doesn't use the key at all so we always get the same alias
3801
3801
if cfg!(fuzzing) {
3802
- // fuzzing chacha20 doesn't use the key at all so we always get the same alias
3803
3802
outbound_scid_alias += 1;
3804
3803
} else {
3805
3804
outbound_scid_alias = fake_scid::Namespace::OutboundAlias.get_fake_scid(
@@ -3931,22 +3930,17 @@ where
3931
3930
for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
3932
3931
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
3933
3932
let peer_state = &mut *peer_state_lock;
3934
- res.extend(
3935
- peer_state
3936
- .channel_by_id
3937
- .iter()
3938
- // Only `Channels` in the `Channel::Funded` phase can be considered funded.
3939
- .filter(|(_, chan)| chan.is_funded())
3940
- .filter(f)
3941
- .map(|(_channel_id, channel)| {
3942
- ChannelDetails::from_channel(
3943
- channel,
3944
- best_block_height,
3945
- peer_state.latest_features.clone(),
3946
- &self.fee_estimator,
3947
- )
3948
- }),
3949
- );
3933
+ // Only `Channels` in the `Channel::Funded` phase can be considered funded.
3934
+ let filtered_chan_by_id =
3935
+ peer_state.channel_by_id.iter().filter(|(_, chan)| chan.is_funded()).filter(f);
3936
+ res.extend(filtered_chan_by_id.map(|(_channel_id, channel)| {
3937
+ ChannelDetails::from_channel(
3938
+ channel,
3939
+ best_block_height,
3940
+ peer_state.latest_features.clone(),
3941
+ &self.fee_estimator,
3942
+ )
3943
+ }));
3950
3944
}
3951
3945
}
3952
3946
res
@@ -4013,12 +4007,8 @@ where
4013
4007
&self.fee_estimator,
4014
4008
)
4015
4009
};
4016
- return peer_state
4017
- .channel_by_id
4018
- .iter()
4019
- .map(|(_, chan)| (chan))
4020
- .map(channel_to_details)
4021
- .collect();
4010
+ let chan_by_id = peer_state.channel_by_id.iter();
4011
+ return chan_by_id.map(|(_, chan)| (chan)).map(channel_to_details).collect();
4022
4012
}
4023
4013
vec![]
4024
4014
}
@@ -8919,9 +8909,12 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
8919
8909
) -> Result<(), MsgHandleErrInternal> {
8920
8910
self.internal_tx_msg(&counterparty_node_id, msg.channel_id, |channel: &mut Channel<SP>| {
8921
8911
match channel.as_unfunded_v2_mut() {
8922
- Some(unfunded_channel) => Ok(unfunded_channel
8923
- .tx_add_output(msg)
8924
- .into_msg_send_event(counterparty_node_id)),
8912
+ Some(unfunded_channel) => {
8913
+ let msg_send_event = unfunded_channel
8914
+ .tx_add_output(msg)
8915
+ .into_msg_send_event(counterparty_node_id);
8916
+ Ok(msg_send_event)
8917
+ },
8925
8918
None => Err("tx_add_output"),
8926
8919
}
8927
8920
})
@@ -8932,9 +8925,12 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
8932
8925
) -> Result<(), MsgHandleErrInternal> {
8933
8926
self.internal_tx_msg(&counterparty_node_id, msg.channel_id, |channel: &mut Channel<SP>| {
8934
8927
match channel.as_unfunded_v2_mut() {
8935
- Some(unfunded_channel) => Ok(unfunded_channel
8936
- .tx_remove_input(msg)
8937
- .into_msg_send_event(counterparty_node_id)),
8928
+ Some(unfunded_channel) => {
8929
+ let msg_send_event = unfunded_channel
8930
+ .tx_remove_input(msg)
8931
+ .into_msg_send_event(counterparty_node_id);
8932
+ Ok(msg_send_event)
8933
+ },
8938
8934
None => Err("tx_remove_input"),
8939
8935
}
8940
8936
})
@@ -8945,9 +8941,12 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
8945
8941
) -> Result<(), MsgHandleErrInternal> {
8946
8942
self.internal_tx_msg(&counterparty_node_id, msg.channel_id, |channel: &mut Channel<SP>| {
8947
8943
match channel.as_unfunded_v2_mut() {
8948
- Some(unfunded_channel) => Ok(unfunded_channel
8949
- .tx_remove_output(msg)
8950
- .into_msg_send_event(counterparty_node_id)),
8944
+ Some(unfunded_channel) => {
8945
+ let msg_send_event = unfunded_channel
8946
+ .tx_remove_output(msg)
8947
+ .into_msg_send_event(counterparty_node_id);
8948
+ Ok(msg_send_event)
8949
+ },
8951
8950
None => Err("tx_remove_output"),
8952
8951
}
8953
8952
})
@@ -9657,13 +9656,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
9657
9656
let is_processing_events = self.pending_events_processor.load(Ordering::Acquire);
9658
9657
let num_forward_events = pending_events
9659
9658
.iter()
9660
- .filter(|(ev, _)| {
9661
- if let events::Event::PendingHTLCsForwardable { .. } = ev {
9662
- true
9663
- } else {
9664
- false
9665
- }
9666
- })
9659
+ .filter(|(ev, _)| matches!(ev, events::Event::PendingHTLCsForwardable { .. }))
9667
9660
.count();
9668
9661
// We only want to push a PendingHTLCsForwardable event if no others are queued. Processing
9669
9662
// events is done in batches and they are not removed until we're done processing each
@@ -9686,25 +9679,24 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
9686
9679
/// completes. Note that this needs to happen in the same [`PeerState`] mutex as any release of
9687
9680
/// the [`ChannelMonitorUpdate`] in question.
9688
9681
fn raa_monitor_updates_held(
9689
- &self,
9690
- actions_blocking_raa_monitor_updates: &BTreeMap<
9691
- ChannelId,
9692
- Vec<RAAMonitorUpdateBlockingAction>,
9693
- >,
9682
+ &self, actions_blocking_raa_monitor_updates: &RAAMonitorUpdateBlockingActionMap,
9694
9683
channel_funding_outpoint: OutPoint, channel_id: ChannelId, counterparty_node_id: PublicKey,
9695
9684
) -> bool {
9696
- actions_blocking_raa_monitor_updates
9685
+ let update_blocking_action_present = actions_blocking_raa_monitor_updates
9697
9686
.get(&channel_id)
9698
9687
.map(|v| !v.is_empty())
9699
- .unwrap_or(false)
9700
- || self.pending_events.lock().unwrap().iter().any(|(_, action)| {
9701
- action
9702
- == &Some(EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
9703
- channel_funding_outpoint,
9704
- channel_id,
9705
- counterparty_node_id,
9706
- })
9707
- })
9688
+ .unwrap_or(false);
9689
+ if update_blocking_action_present {
9690
+ return true;
9691
+ }
9692
+ return self.pending_events.lock().unwrap().iter().any(|(_, action)| {
9693
+ action
9694
+ == &Some(EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
9695
+ channel_funding_outpoint,
9696
+ channel_id,
9697
+ counterparty_node_id,
9698
+ })
9699
+ });
9708
9700
}
9709
9701
9710
9702
#[cfg(any(test, feature = "_test_utils"))]
@@ -10960,30 +10952,32 @@ where
10960
10952
payer_note: Option<String>, payment_id: PaymentId, retry_strategy: Retry,
10961
10953
route_params_config: RouteParametersConfig,
10962
10954
) -> Result<(), Bolt12SemanticError> {
10955
+ let create_pending_payment_fn = |invoice_request: &InvoiceRequest, nonce| {
10956
+ let expiration = StaleExpiration::TimerTicks(1);
10957
+ let retryable_invoice_request = RetryableInvoiceRequest {
10958
+ invoice_request: invoice_request.clone(),
10959
+ nonce,
10960
+ needs_retry: true,
10961
+ };
10962
+ self.pending_outbound_payments
10963
+ .add_new_awaiting_invoice(
10964
+ payment_id,
10965
+ expiration,
10966
+ retry_strategy,
10967
+ route_params_config,
10968
+ Some(retryable_invoice_request),
10969
+ )
10970
+ .map_err(|_| Bolt12SemanticError::DuplicatePaymentId)
10971
+ };
10972
+
10963
10973
self.pay_for_offer_intern(
10964
10974
offer,
10965
10975
quantity,
10966
10976
amount_msats,
10967
10977
payer_note,
10968
10978
payment_id,
10969
10979
None,
10970
- |invoice_request, nonce| {
10971
- let expiration = StaleExpiration::TimerTicks(1);
10972
- let retryable_invoice_request = RetryableInvoiceRequest {
10973
- invoice_request: invoice_request.clone(),
10974
- nonce,
10975
- needs_retry: true,
10976
- };
10977
- self.pending_outbound_payments
10978
- .add_new_awaiting_invoice(
10979
- payment_id,
10980
- expiration,
10981
- retry_strategy,
10982
- route_params_config,
10983
- Some(retryable_invoice_request),
10984
- )
10985
- .map_err(|_| Bolt12SemanticError::DuplicatePaymentId)
10986
- },
10980
+ create_pending_payment_fn,
10987
10981
)
10988
10982
}
10989
10983
@@ -11287,9 +11281,8 @@ where
11287
11281
}
11288
11282
11289
11283
fn get_peers_for_blinded_path(&self) -> Vec<MessageForwardNode> {
11290
- self.per_peer_state
11291
- .read()
11292
- .unwrap()
11284
+ let per_peer_state = self.per_peer_state.read().unwrap();
11285
+ per_peer_state
11293
11286
.iter()
11294
11287
.map(|(node_id, peer_state)| (node_id, peer_state.lock().unwrap()))
11295
11288
.filter(|(_, peer)| peer.is_connected)
@@ -12109,13 +12102,10 @@ where
12109
12102
self.do_chain_event(None, |channel| {
12110
12103
if let Some(funding_txo) = channel.funding.get_funding_txo() {
12111
12104
if funding_txo.txid == *txid {
12112
- channel
12113
- .funding_transaction_unconfirmed(&&WithChannelContext::from(
12114
- &self.logger,
12115
- &channel.context,
12116
- None,
12117
- ))
12118
- .map(|()| (None, Vec::new(), None))
12105
+ let chan_context =
12106
+ WithChannelContext::from(&self.logger, &channel.context, None);
12107
+ let res = channel.funding_transaction_unconfirmed(&&chan_context);
12108
+ res.map(|()| (None, Vec::new(), None))
12119
12109
} else {
12120
12110
Ok((None, Vec::new(), None))
12121
12111
}
@@ -12442,13 +12432,13 @@ where
12442
12432
MR::Target: MessageRouter,
12443
12433
L::Target: Logger,
12444
12434
{
12445
- fn handle_open_channel(&self, counterparty_node_id: PublicKey, msg : &msgs::OpenChannel) {
12435
+ fn handle_open_channel(&self, counterparty_node_id: PublicKey, message : &msgs::OpenChannel) {
12446
12436
// Note that we never need to persist the updated ChannelManager for an inbound
12447
12437
// open_channel message - pre-funded channels are never written so there should be no
12448
12438
// change to the contents.
12449
12439
let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
12450
- let res =
12451
- self.internal_open_channel(&counterparty_node_id, OpenChannelMessageRef::V1( msg) );
12440
+ let msg = OpenChannelMessageRef::V1(message);
12441
+ let res = self.internal_open_channel(&counterparty_node_id, msg);
12452
12442
let persist = match &res {
12453
12443
Err(e) if e.closes_channel() => {
12454
12444
debug_assert!(false, "We shouldn't close a new channel");
@@ -12957,16 +12947,10 @@ where
12957
12947
{
12958
12948
let RetryableInvoiceRequest { invoice_request, nonce, .. } = retryable_invoice_request;
12959
12949
12960
- if self
12961
- .flow
12962
- .enqueue_invoice_request(
12963
- invoice_request,
12964
- payment_id,
12965
- nonce,
12966
- self.get_peers_for_blinded_path(),
12967
- )
12968
- .is_err()
12969
- {
12950
+ let peers = self.get_peers_for_blinded_path();
12951
+ let enqueue_invreq_res =
12952
+ self.flow.enqueue_invoice_request(invoice_request, payment_id, nonce, peers);
12953
+ if enqueue_invreq_res.is_err() {
12970
12954
log_warn!(
12971
12955
self.logger,
12972
12956
"Retry failed for invoice request with payment_id {}",
@@ -14075,11 +14059,9 @@ impl Readable for VecDeque<(Event, Option<EventCompletionAction>)> {
14075
14059
fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
14076
14060
let len: u64 = Readable::read(reader)?;
14077
14061
const MAX_ALLOC_SIZE: u64 = 1024 * 16;
14078
- let mut events: Self = VecDeque::with_capacity(cmp::min(
14079
- MAX_ALLOC_SIZE
14080
- / mem::size_of::<(events::Event, Option<EventCompletionAction>)>() as u64,
14081
- len,
14082
- ) as usize);
14062
+ let event_size = mem::size_of::<(events::Event, Option<EventCompletionAction>)>();
14063
+ let mut events: Self =
14064
+ VecDeque::with_capacity(cmp::min(MAX_ALLOC_SIZE / event_size as u64, len) as usize);
14083
14065
for _ in 0..len {
14084
14066
let ev_opt = MaybeReadable::read(reader)?;
14085
14067
let action = Readable::read(reader)?;
0 commit comments