Skip to content

Commit 6f4b099

Browse files
Include release_held_htlc blinded paths in RAA
As part of supporting sending payments as an often-offline sender, the sender needs to send held_htlc_available onion messages such that the reply path to the message terminates at their always-online channel counterparty that is holding the HTLC. That way when the recipient responds with release_held_htlc, the sender's counterparty will receive that message. Here the counterparty starts including said reply paths in the revoke_and_ack message destined for the sender, so the sender can use these paths in subsequent held_htlc_available messages. We put the paths in the RAA to ensure the sender receives the blinded paths, because failure to deliver the paths means the HTLC will timeout/fail.
1 parent 5eed8d9 commit 6f4b099

File tree

2 files changed

+67
-24
lines changed

2 files changed

+67
-24
lines changed

lightning/src/ln/channel.rs

Lines changed: 53 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@ use bitcoin::{secp256k1, sighash, TxIn};
2828
#[cfg(splicing)]
2929
use bitcoin::{FeeRate, Sequence};
3030

31+
use crate::blinded_path::message::BlindedMessagePath;
3132
use crate::chain::chaininterface::{
3233
fee_for_weight, ConfirmationTarget, FeeEstimator, LowerBoundedFeeEstimator,
3334
};
@@ -283,6 +284,24 @@ impl InboundHTLCState {
283284
_ => None,
284285
}
285286
}
287+
288+
/// Whether we need to hold onto this HTLC until receipt of a corresponding [`ReleaseHeldHtlc`]
289+
/// onion message.
290+
///
291+
/// [`ReleaseHeldHtlc`]: crate::onion_message::async_payments::ReleaseHeldHtlc
292+
fn should_hold_htlc(&self) -> bool {
293+
match self {
294+
InboundHTLCState::RemoteAnnounced(res)
295+
| InboundHTLCState::AwaitingRemoteRevokeToAnnounce(res)
296+
| InboundHTLCState::AwaitingAnnouncedRemoteRevoke(res) => match res {
297+
InboundHTLCResolution::Pending { update_add_htlc } => {
298+
update_add_htlc.hold_htlc.is_some()
299+
},
300+
InboundHTLCResolution::Resolved { .. } => false,
301+
},
302+
InboundHTLCState::Committed | InboundHTLCState::LocalRemoved(_) => false,
303+
}
304+
}
286305
}
287306

288307
struct InboundHTLCOutput {
@@ -1606,12 +1625,12 @@ where
16061625
}
16071626

16081627
#[rustfmt::skip]
1609-
pub fn signer_maybe_unblocked<L: Deref>(
1610-
&mut self, chain_hash: ChainHash, logger: &L,
1611-
) -> Option<SignerResumeUpdates> where L::Target: Logger {
1628+
pub fn signer_maybe_unblocked<L: Deref, CBP>(
1629+
&mut self, chain_hash: ChainHash, logger: &L, path_for_release_htlc: CBP
1630+
) -> Option<SignerResumeUpdates> where L::Target: Logger, CBP: Fn(u64) -> BlindedMessagePath {
16121631
match &mut self.phase {
16131632
ChannelPhase::Undefined => unreachable!(),
1614-
ChannelPhase::Funded(chan) => Some(chan.signer_maybe_unblocked(logger)),
1633+
ChannelPhase::Funded(chan) => Some(chan.signer_maybe_unblocked(logger, path_for_release_htlc)),
16151634
ChannelPhase::UnfundedOutboundV1(chan) => {
16161635
let (open_channel, funding_created) = chan.signer_maybe_unblocked(chain_hash, logger);
16171636
Some(SignerResumeUpdates {
@@ -8712,13 +8731,14 @@ where
87128731
/// successfully and we should restore normal operation. Returns messages which should be sent
87138732
/// to the remote side.
87148733
#[rustfmt::skip]
8715-
pub fn monitor_updating_restored<L: Deref, NS: Deref>(
8734+
pub fn monitor_updating_restored<L: Deref, NS: Deref, CBP>(
87168735
&mut self, logger: &L, node_signer: &NS, chain_hash: ChainHash,
8717-
user_config: &UserConfig, best_block_height: u32
8736+
user_config: &UserConfig, best_block_height: u32, path_for_release_htlc: CBP
87188737
) -> MonitorRestoreUpdates
87198738
where
87208739
L::Target: Logger,
8721-
NS::Target: NodeSigner
8740+
NS::Target: NodeSigner,
8741+
CBP: Fn(u64) -> BlindedMessagePath
87228742
{
87238743
assert!(self.context.channel_state.is_monitor_update_in_progress());
87248744
self.context.channel_state.clear_monitor_update_in_progress();
@@ -8787,7 +8807,7 @@ where
87878807
}
87888808

87898809
let mut raa = if self.context.monitor_pending_revoke_and_ack {
8790-
self.get_last_revoke_and_ack(logger)
8810+
self.get_last_revoke_and_ack(path_for_release_htlc, logger)
87918811
} else { None };
87928812
let mut commitment_update = if self.context.monitor_pending_commitment_signed {
87938813
self.get_last_commitment_update_for_send(logger).ok()
@@ -8877,7 +8897,9 @@ where
88778897
/// Indicates that the signer may have some signatures for us, so we should retry if we're
88788898
/// blocked.
88798899
#[rustfmt::skip]
8880-
pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> SignerResumeUpdates where L::Target: Logger {
8900+
pub fn signer_maybe_unblocked<L: Deref, CBP>(
8901+
&mut self, logger: &L, path_for_release_htlc: CBP
8902+
) -> SignerResumeUpdates where L::Target: Logger, CBP: Fn(u64) -> BlindedMessagePath {
88818903
if !self.holder_commitment_point.can_advance() {
88828904
log_trace!(logger, "Attempting to update holder per-commitment point...");
88838905
self.holder_commitment_point.try_resolve_pending(&self.context.holder_signer, &self.context.secp_ctx, logger);
@@ -8905,7 +8927,7 @@ where
89058927
} else { None };
89068928
let mut revoke_and_ack = if self.context.signer_pending_revoke_and_ack {
89078929
log_trace!(logger, "Attempting to generate pending revoke and ack...");
8908-
self.get_last_revoke_and_ack(logger)
8930+
self.get_last_revoke_and_ack(path_for_release_htlc, logger)
89098931
} else { None };
89108932

89118933
if self.context.resend_order == RAACommitmentOrder::CommitmentFirst
@@ -8976,9 +8998,12 @@ where
89768998
}
89778999
}
89789000

8979-
fn get_last_revoke_and_ack<L: Deref>(&mut self, logger: &L) -> Option<msgs::RevokeAndACK>
9001+
fn get_last_revoke_and_ack<CBP, L: Deref>(
9002+
&mut self, path_for_release_htlc: CBP, logger: &L,
9003+
) -> Option<msgs::RevokeAndACK>
89809004
where
89819005
L::Target: Logger,
9006+
CBP: Fn(u64) -> BlindedMessagePath,
89829007
{
89839008
debug_assert!(
89849009
self.holder_commitment_point.next_transaction_number() <= INITIAL_COMMITMENT_NUMBER - 2
@@ -8991,14 +9016,22 @@ where
89919016
.ok();
89929017
if let Some(per_commitment_secret) = per_commitment_secret {
89939018
if self.holder_commitment_point.can_advance() {
9019+
let mut release_htlc_message_paths = Vec::new();
9020+
for htlc in &self.context.pending_inbound_htlcs {
9021+
if htlc.state.should_hold_htlc() {
9022+
let path = path_for_release_htlc(htlc.htlc_id);
9023+
release_htlc_message_paths.push((htlc.htlc_id, path));
9024+
}
9025+
}
9026+
89949027
self.context.signer_pending_revoke_and_ack = false;
89959028
return Some(msgs::RevokeAndACK {
89969029
channel_id: self.context.channel_id,
89979030
per_commitment_secret,
89989031
next_per_commitment_point: self.holder_commitment_point.next_point(),
89999032
#[cfg(taproot)]
90009033
next_local_nonce: None,
9001-
release_htlc_message_paths: Vec::new(),
9034+
release_htlc_message_paths,
90029035
});
90039036
}
90049037
}
@@ -9146,13 +9179,15 @@ where
91469179
/// May panic if some calls other than message-handling calls (which will all Err immediately)
91479180
/// have been called between remove_uncommitted_htlcs_and_mark_paused and this call.
91489181
#[rustfmt::skip]
9149-
pub fn channel_reestablish<L: Deref, NS: Deref>(
9182+
pub fn channel_reestablish<L: Deref, NS: Deref, CBP>(
91509183
&mut self, msg: &msgs::ChannelReestablish, logger: &L, node_signer: &NS,
9151-
chain_hash: ChainHash, user_config: &UserConfig, best_block: &BestBlock
9184+
chain_hash: ChainHash, user_config: &UserConfig, best_block: &BestBlock,
9185+
path_for_release_htlc: CBP,
91529186
) -> Result<ReestablishResponses, ChannelError>
91539187
where
91549188
L::Target: Logger,
9155-
NS::Target: NodeSigner
9189+
NS::Target: NodeSigner,
9190+
CBP: Fn(u64) -> BlindedMessagePath
91569191
{
91579192
if !self.context.channel_state.is_peer_disconnected() {
91589193
// While BOLT 2 doesn't indicate explicitly we should error this channel here, it
@@ -9371,7 +9406,7 @@ where
93719406
self.context.monitor_pending_revoke_and_ack = true;
93729407
None
93739408
} else {
9374-
self.get_last_revoke_and_ack(logger)
9409+
self.get_last_revoke_and_ack(path_for_release_htlc, logger)
93759410
}
93769411
} else {
93779412
debug_assert!(false, "All values should have been handled in the four cases above");
@@ -16635,6 +16670,7 @@ mod tests {
1663516670
chain_hash,
1663616671
&config,
1663716672
0,
16673+
|_| unreachable!()
1663816674
);
1663916675

1664016676
// Receive funding_signed, but the channel will be configured to hold sending channel_ready and
@@ -16649,6 +16685,7 @@ mod tests {
1664916685
chain_hash,
1665016686
&config,
1665116687
0,
16688+
|_| unreachable!()
1665216689
);
1665316690
// Our channel_ready shouldn't be sent yet, even with trust_own_funding_0conf set,
1665416691
// as the funding transaction depends on all channels in the batch becoming ready.

lightning/src/ln/channelmanager.rs

Lines changed: 14 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -377,7 +377,7 @@ impl PendingHTLCRouting {
377377

378378
/// Whether this HTLC should be held by our node until we receive a corresponding
379379
/// [`ReleaseHeldHtlc`] onion message.
380-
fn should_hold_htlc(&self) -> bool {
380+
pub(super) fn should_hold_htlc(&self) -> bool {
381381
match self {
382382
Self::Forward { hold_htlc: Some(()), .. } => true,
383383
_ => false,
@@ -3443,18 +3443,20 @@ macro_rules! emit_initial_channel_ready_event {
34433443
/// set for this channel is empty!
34443444
macro_rules! handle_monitor_update_completion {
34453445
($self: ident, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { {
3446+
let channel_id = $chan.context.channel_id();
3447+
let counterparty_node_id = $chan.context.get_counterparty_node_id();
34463448
#[cfg(debug_assertions)]
34473449
{
34483450
let in_flight_updates =
3449-
$peer_state.in_flight_monitor_updates.get(&$chan.context.channel_id());
3451+
$peer_state.in_flight_monitor_updates.get(&channel_id);
34503452
assert!(in_flight_updates.map(|(_, updates)| updates.is_empty()).unwrap_or(true));
34513453
assert_eq!($chan.blocked_monitor_updates_pending(), 0);
34523454
}
34533455
let logger = WithChannelContext::from(&$self.logger, &$chan.context, None);
34543456
let mut updates = $chan.monitor_updating_restored(&&logger,
34553457
&$self.node_signer, $self.chain_hash, &*$self.config.read().unwrap(),
3456-
$self.best_block.read().unwrap().height);
3457-
let counterparty_node_id = $chan.context.get_counterparty_node_id();
3458+
$self.best_block.read().unwrap().height,
3459+
|htlc_id| $self.path_for_release_held_htlc(htlc_id, &channel_id, &counterparty_node_id));
34583460
let channel_update = if updates.channel_ready.is_some() && $chan.context.is_usable() {
34593461
// We only send a channel_update in the case where we are just now sending a
34603462
// channel_ready and the channel is in a usable state. We may re-send a
@@ -3470,7 +3472,7 @@ macro_rules! handle_monitor_update_completion {
34703472
} else { None };
34713473

34723474
let update_actions = $peer_state.monitor_update_blocked_actions
3473-
.remove(&$chan.context.channel_id()).unwrap_or(Vec::new());
3475+
.remove(&channel_id).unwrap_or(Vec::new());
34743476

34753477
let (htlc_forwards, decode_update_add_htlcs) = $self.handle_channel_resumption(
34763478
&mut $peer_state.pending_msg_events, $chan, updates.raa,
@@ -3482,7 +3484,6 @@ macro_rules! handle_monitor_update_completion {
34823484
$peer_state.pending_msg_events.push(upd);
34833485
}
34843486

3485-
let channel_id = $chan.context.channel_id();
34863487
let unbroadcasted_batch_funding_txid = $chan.context.unbroadcasted_batch_funding_txid(&$chan.funding);
34873488
core::mem::drop($peer_state_lock);
34883489
core::mem::drop($per_peer_state_lock);
@@ -11177,6 +11178,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
1117711178
self.chain_hash,
1117811179
&self.config.read().unwrap(),
1117911180
&*self.best_block.read().unwrap(),
11181+
|htlc_id| self.path_for_release_held_htlc(htlc_id, &msg.channel_id, counterparty_node_id)
1118011182
);
1118111183
let responses = try_channel_entry!(self, peer_state, res, chan_entry);
1118211184
let mut channel_update = None;
@@ -11652,9 +11654,13 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
1165211654

1165311655
// Returns whether we should remove this channel as it's just been closed.
1165411656
let unblock_chan = |chan: &mut Channel<SP>, pending_msg_events: &mut Vec<MessageSendEvent>| -> Option<ShutdownResult> {
11657+
let channel_id = chan.context().channel_id();
1165511658
let logger = WithChannelContext::from(&self.logger, &chan.context(), None);
1165611659
let node_id = chan.context().get_counterparty_node_id();
11657-
if let Some(msgs) = chan.signer_maybe_unblocked(self.chain_hash, &&logger) {
11660+
if let Some(msgs) = chan.signer_maybe_unblocked(
11661+
self.chain_hash, &&logger,
11662+
|htlc_id| self.path_for_release_held_htlc(htlc_id, &channel_id, &node_id)
11663+
) {
1165811664
if let Some(msg) = msgs.open_channel {
1165911665
pending_msg_events.push(MessageSendEvent::SendOpenChannel {
1166011666
node_id,
@@ -11675,7 +11681,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
1167511681
}
1167611682
let cu_msg = msgs.commitment_update.map(|updates| MessageSendEvent::UpdateHTLCs {
1167711683
node_id,
11678-
channel_id: chan.context().channel_id(),
11684+
channel_id,
1167911685
updates,
1168011686
});
1168111687
let raa_msg = msgs.revoke_and_ack.map(|msg| MessageSendEvent::SendRevokeAndACK {

0 commit comments

Comments
 (0)