Skip to content

Commit 2c85a0d

Browse files
committed
Move receiving HTLCs to a receive_htlcs field
Previously, we'd store receiving HTLCs side-by-side with HTLCs forwards in the `forwards_htlcs` field under SCID 0. Here, we opt to split out a separate `receive_htlcs` field, also omitting the 0 magic value.
1 parent 0d1705d commit 2c85a0d

File tree

2 files changed

+89
-25
lines changed

2 files changed

+89
-25
lines changed

lightning/src/ln/channelmanager.rs

Lines changed: 85 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -2478,6 +2478,8 @@ where
24782478
// | |
24792479
// | |__`pending_intercepted_htlcs`
24802480
// |
2481+
// |__`receive_htlcs`
2482+
// |
24812483
// |__`decode_update_add_htlcs`
24822484
// |
24832485
// |__`per_peer_state`
@@ -2553,7 +2555,7 @@ pub struct ChannelManager<
25532555
/// See `ChannelManager` struct-level documentation for lock order requirements.
25542556
pending_outbound_payments: OutboundPayments,
25552557

2556-
/// SCID/SCID Alias -> forward infos. Key of 0 means payments received.
2558+
/// SCID/SCID Alias -> forward infos.
25572559
///
25582560
/// Note that because we may have an SCID Alias as the key we can have two entries per channel,
25592561
/// though in practice we probably won't be receiving HTLCs for a channel both via the alias
@@ -2562,6 +2564,9 @@ pub struct ChannelManager<
25622564
/// Note that no consistency guarantees are made about the existence of a channel with the
25632565
/// `short_channel_id` here, nor the `short_channel_id` in the `PendingHTLCInfo`!
25642566
///
2567+
/// This will also hold any [`FailHTLC`]s arising from handling [`Self::pending_intercepted_htlcs`] or
2568+
/// [`Self::receive_htlcs`].
2569+
///
25652570
/// See `ChannelManager` struct-level documentation for lock order requirements.
25662571
#[cfg(test)]
25672572
pub(super) forward_htlcs: Mutex<HashMap<u64, Vec<HTLCForwardInfo>>>,
@@ -2570,9 +2575,21 @@ pub struct ChannelManager<
25702575
/// Storage for HTLCs that have been intercepted and bubbled up to the user. We hold them here
25712576
/// until the user tells us what we should do with them.
25722577
///
2578+
/// Note that any failures that may arise from handling these will be pushed to
2579+
/// [`Self::forward_htlcs`] with the previous hop's SCID.
2580+
///
25732581
/// See `ChannelManager` struct-level documentation for lock order requirements.
25742582
pending_intercepted_htlcs: Mutex<HashMap<InterceptId, PendingAddHTLCInfo>>,
2575-
2583+
/// Storage for HTLCs that are meant for us.
2584+
///
2585+
/// Note that any failures that may arise from handling these will be pushed to
2586+
/// [`Self::forward_htlcs`] with the previous hop's SCID.
2587+
///
2588+
/// See `ChannelManager` struct-level documentation for lock order requirements.
2589+
#[cfg(test)]
2590+
pub(super) receive_htlcs: Mutex<Vec<HTLCForwardInfo>>,
2591+
#[cfg(not(test))]
2592+
receive_htlcs: Mutex<Vec<HTLCForwardInfo>>,
25762593
/// SCID/SCID Alias -> pending `update_add_htlc`s to decode.
25772594
///
25782595
/// Note that because we may have an SCID Alias as the key we can have two entries per channel,
@@ -3755,6 +3772,7 @@ where
37553772
outbound_scid_aliases: Mutex::new(new_hash_set()),
37563773
pending_outbound_payments: OutboundPayments::new(new_hash_map()),
37573774
forward_htlcs: Mutex::new(new_hash_map()),
3775+
receive_htlcs: Mutex::new(Vec::new()),
37583776
decode_update_add_htlcs: Mutex::new(new_hash_map()),
37593777
claimable_payments: Mutex::new(ClaimablePayments { claimable_payments: new_hash_map(), pending_claiming_payments: new_hash_map() }),
37603778
pending_intercepted_htlcs: Mutex::new(new_hash_map()),
@@ -6494,6 +6512,9 @@ where
64946512
if !self.forward_htlcs.lock().unwrap().is_empty() {
64956513
return true;
64966514
}
6515+
if !self.receive_htlcs.lock().unwrap().is_empty() {
6516+
return true;
6517+
}
64976518
if !self.decode_update_add_htlcs.lock().unwrap().is_empty() {
64986519
return true;
64996520
}
@@ -6541,20 +6562,19 @@ where
65416562

65426563
for (short_chan_id, mut pending_forwards) in forward_htlcs {
65436564
should_persist = NotifyOption::DoPersist;
6544-
if short_chan_id != 0 {
6545-
self.process_forward_htlcs(
6546-
short_chan_id,
6547-
&mut pending_forwards,
6548-
&mut failed_forwards,
6549-
&mut phantom_receives,
6550-
);
6551-
} else {
6552-
self.process_receive_htlcs(
6553-
&mut pending_forwards,
6554-
&mut new_events,
6555-
&mut failed_forwards,
6556-
);
6557-
}
6565+
self.process_forward_htlcs(
6566+
short_chan_id,
6567+
&mut pending_forwards,
6568+
&mut failed_forwards,
6569+
&mut phantom_receives,
6570+
);
6571+
}
6572+
6573+
let mut receive_htlcs = Vec::new();
6574+
mem::swap(&mut receive_htlcs, &mut self.receive_htlcs.lock().unwrap());
6575+
if !receive_htlcs.is_empty() {
6576+
self.process_receive_htlcs(receive_htlcs, &mut new_events, &mut failed_forwards);
6577+
should_persist = NotifyOption::DoPersist;
65586578
}
65596579

65606580
let best_block_height = self.best_block.read().unwrap().height;
@@ -7068,11 +7088,11 @@ where
70687088
}
70697089

70707090
fn process_receive_htlcs(
7071-
&self, pending_forwards: &mut Vec<HTLCForwardInfo>,
7091+
&self, receive_htlcs: Vec<HTLCForwardInfo>,
70727092
new_events: &mut VecDeque<(Event, Option<EventCompletionAction>)>,
70737093
failed_forwards: &mut Vec<FailedHTLCForward>,
70747094
) {
7075-
'next_forwardable_htlc: for forward_info in pending_forwards.drain(..) {
7095+
'next_forwardable_htlc: for forward_info in receive_htlcs.into_iter() {
70767096
match forward_info {
70777097
HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
70787098
prev_short_channel_id,
@@ -10613,8 +10633,21 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
1061310633
let scid = match forward_info.routing {
1061410634
PendingHTLCRouting::Forward { short_channel_id, .. } => short_channel_id,
1061510635
PendingHTLCRouting::TrampolineForward { .. } => 0,
10616-
PendingHTLCRouting::Receive { .. } => 0,
10617-
PendingHTLCRouting::ReceiveKeysend { .. } => 0,
10636+
PendingHTLCRouting::Receive { .. }
10637+
| PendingHTLCRouting::ReceiveKeysend { .. } => {
10638+
self.receive_htlcs.lock().unwrap().push(HTLCForwardInfo::AddHTLC(
10639+
PendingAddHTLCInfo {
10640+
prev_short_channel_id,
10641+
prev_counterparty_node_id,
10642+
prev_funding_outpoint,
10643+
prev_channel_id,
10644+
prev_htlc_id,
10645+
prev_user_channel_id,
10646+
forward_info,
10647+
},
10648+
));
10649+
continue;
10650+
},
1061810651
};
1061910652
// Pull this now to avoid introducing a lock order with `forward_htlcs`.
1062010653
let is_our_scid = self.short_to_chan_info.read().unwrap().contains_key(&scid);
@@ -15279,6 +15312,8 @@ where
1527915312
}
1528015313
}
1528115314

15315+
let receive_htlcs = self.receive_htlcs.lock().unwrap();
15316+
1528215317
let mut decode_update_add_htlcs_opt = None;
1528315318
let decode_update_add_htlcs = self.decode_update_add_htlcs.lock().unwrap();
1528415319
if !decode_update_add_htlcs.is_empty() {
@@ -15446,6 +15481,7 @@ where
1544615481
(17, in_flight_monitor_updates, option),
1544715482
(19, peer_storage_dir, optional_vec),
1544815483
(21, WithoutLength(&self.flow.writeable_async_receive_offer_cache()), required),
15484+
(23, *receive_htlcs, required_vec),
1544915485
});
1545015486

1545115487
Ok(())
@@ -16006,6 +16042,7 @@ where
1600616042
const MAX_ALLOC_SIZE: usize = 1024 * 64;
1600716043
let forward_htlcs_count: u64 = Readable::read(reader)?;
1600816044
let mut forward_htlcs = hash_map_with_capacity(cmp::min(forward_htlcs_count as usize, 128));
16045+
let mut legacy_receive_htlcs: Vec<HTLCForwardInfo> = Vec::new();
1600916046
for _ in 0..forward_htlcs_count {
1601016047
let short_channel_id = Readable::read(reader)?;
1601116048
let pending_forwards_count: u64 = Readable::read(reader)?;
@@ -16014,7 +16051,26 @@ where
1601416051
MAX_ALLOC_SIZE / mem::size_of::<HTLCForwardInfo>(),
1601516052
));
1601616053
for _ in 0..pending_forwards_count {
16017-
pending_forwards.push(Readable::read(reader)?);
16054+
let pending_htlc = Readable::read(reader)?;
16055+
// Prior to LDK 0.2, Receive HTLCs used to be stored in `forward_htlcs` under SCID == 0. Here we migrate
16056+
// the old data if necessary.
16057+
if short_channel_id == 0 {
16058+
match pending_htlc {
16059+
HTLCForwardInfo::AddHTLC(ref htlc_info) => {
16060+
if matches!(
16061+
htlc_info.forward_info.routing,
16062+
PendingHTLCRouting::Receive { .. }
16063+
| PendingHTLCRouting::ReceiveKeysend { .. }
16064+
) {
16065+
legacy_receive_htlcs.push(pending_htlc);
16066+
continue;
16067+
}
16068+
},
16069+
_ => {},
16070+
}
16071+
}
16072+
16073+
pending_forwards.push(pending_htlc);
1601816074
}
1601916075
forward_htlcs.insert(short_channel_id, pending_forwards);
1602016076
}
@@ -16131,6 +16187,7 @@ where
1613116187
let mut inbound_payment_id_secret = None;
1613216188
let mut peer_storage_dir: Option<Vec<(PublicKey, Vec<u8>)>> = None;
1613316189
let mut async_receive_offer_cache: AsyncReceiveOfferCache = AsyncReceiveOfferCache::new();
16190+
let mut receive_htlcs = None;
1613416191
read_tlv_fields!(reader, {
1613516192
(1, pending_outbound_payments_no_retry, option),
1613616193
(2, pending_intercepted_htlcs, option),
@@ -16149,8 +16206,14 @@ where
1614916206
(17, in_flight_monitor_updates, option),
1615016207
(19, peer_storage_dir, optional_vec),
1615116208
(21, async_receive_offer_cache, (default_value, async_receive_offer_cache)),
16209+
(23, receive_htlcs, optional_vec),
1615216210
});
1615316211
let mut decode_update_add_htlcs = decode_update_add_htlcs.unwrap_or_else(|| new_hash_map());
16212+
debug_assert!(
16213+
receive_htlcs.as_ref().map_or(true, |r| r.is_empty())
16214+
|| legacy_receive_htlcs.is_empty()
16215+
);
16216+
let receive_htlcs = receive_htlcs.unwrap_or_else(|| legacy_receive_htlcs);
1615416217
let peer_storage_dir: Vec<(PublicKey, Vec<u8>)> = peer_storage_dir.unwrap_or_else(Vec::new);
1615516218
if fake_scid_rand_bytes.is_none() {
1615616219
fake_scid_rand_bytes = Some(args.entropy_source.get_secure_random_bytes());
@@ -16981,6 +17044,7 @@ where
1698117044
pending_intercepted_htlcs: Mutex::new(pending_intercepted_htlcs.unwrap()),
1698217045

1698317046
forward_htlcs: Mutex::new(forward_htlcs),
17047+
receive_htlcs: Mutex::new(receive_htlcs),
1698417048
decode_update_add_htlcs: Mutex::new(decode_update_add_htlcs),
1698517049
claimable_payments: Mutex::new(ClaimablePayments {
1698617050
claimable_payments,

lightning/src/ln/payment_tests.rs

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -636,8 +636,8 @@ fn test_reject_mpp_keysend_htlc_mismatching_secret() {
636636
nodes[3].node.process_pending_update_add_htlcs();
637637

638638
assert!(nodes[3].node.get_and_clear_pending_msg_events().is_empty());
639-
assert_eq!(nodes[3].node.forward_htlcs.lock().unwrap().len(), 1);
640-
match nodes[3].node.forward_htlcs.lock().unwrap().get_mut(&0).unwrap().get_mut(0).unwrap() {
639+
assert_eq!(nodes[3].node.receive_htlcs.lock().unwrap().len(), 1);
640+
match nodes[3].node.receive_htlcs.lock().unwrap().get_mut(0).unwrap() {
641641
&mut HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo { ref mut forward_info, .. }) => {
642642
match forward_info.routing {
643643
PendingHTLCRouting::ReceiveKeysend { ref mut payment_data, .. } => {
@@ -683,8 +683,8 @@ fn test_reject_mpp_keysend_htlc_mismatching_secret() {
683683
assert!(nodes[3].node.get_and_clear_pending_events().is_empty());
684684
assert!(nodes[3].node.get_and_clear_pending_msg_events().is_empty());
685685
nodes[3].node.process_pending_update_add_htlcs();
686-
assert_eq!(nodes[3].node.forward_htlcs.lock().unwrap().len(), 1);
687-
match nodes[3].node.forward_htlcs.lock().unwrap().get_mut(&0).unwrap().get_mut(0).unwrap() {
686+
assert_eq!(nodes[3].node.receive_htlcs.lock().unwrap().len(), 1);
687+
match nodes[3].node.receive_htlcs.lock().unwrap().get_mut(0).unwrap() {
688688
&mut HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo { ref mut forward_info, .. }) => {
689689
match forward_info.routing {
690690
PendingHTLCRouting::ReceiveKeysend { ref mut payment_data, .. } => {

0 commit comments

Comments
 (0)