Skip to content

Commit d034929

Browse files
Fix rustfmt'd short ChannelManager methods
In the previous commit we formatted a bunch of short methods. Here we clean up the default formatting that rustfmt applied by extracting code into variables.
1 parent 66c11ff commit d034929

File tree

1 file changed

+90
-108
lines changed

1 file changed

+90
-108
lines changed

lightning/src/ln/channelmanager.rs

Lines changed: 90 additions & 108 deletions
Original file line numberDiff line numberDiff line change
@@ -717,6 +717,8 @@ impl_writeable_tlv_based_enum!(SentHTLCId,
717717
type PerSourcePendingForward =
718718
(u64, Option<PublicKey>, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>);
719719

720+
type RAAMonitorUpdateBlockingActionMap = BTreeMap<ChannelId, Vec<RAAMonitorUpdateBlockingAction>>;
721+
720722
mod fuzzy_channelmanager {
721723
use super::*;
722724

@@ -1517,10 +1519,10 @@ where
15171519
return false;
15181520
}
15191521
}
1520-
!self
1521-
.channel_by_id
1522-
.iter()
1523-
.any(|(_, channel)| channel.is_funded() || channel.funding().is_outbound())
1522+
let chan_is_funded_or_outbound = |(_, channel): (_, &Channel<SP>)| {
1523+
channel.is_funded() || channel.funding().is_outbound()
1524+
};
1525+
!self.channel_by_id.iter().any(chan_is_funded_or_outbound)
15241526
&& self.monitor_update_blocked_actions.is_empty()
15251527
&& self.closed_channel_monitor_update_ids.is_empty()
15261528
}
@@ -3313,17 +3315,14 @@ macro_rules! emit_funding_tx_broadcast_safe_event {
33133315
macro_rules! emit_channel_pending_event {
33143316
($locked_events: expr, $channel: expr) => {
33153317
if $channel.context.should_emit_channel_pending_event() {
3318+
let funding_txo = $channel.funding.get_funding_txo().unwrap();
33163319
$locked_events.push_back((
33173320
events::Event::ChannelPending {
33183321
channel_id: $channel.context.channel_id(),
33193322
former_temporary_channel_id: $channel.context.temporary_channel_id(),
33203323
counterparty_node_id: $channel.context.get_counterparty_node_id(),
33213324
user_channel_id: $channel.context.get_user_id(),
3322-
funding_txo: $channel
3323-
.funding
3324-
.get_funding_txo()
3325-
.unwrap()
3326-
.into_bitcoin_outpoint(),
3325+
funding_txo: funding_txo.into_bitcoin_outpoint(),
33273326
channel_type: Some($channel.funding.get_channel_type().clone()),
33283327
},
33293328
None,
@@ -3798,8 +3797,8 @@ where
37983797
let mut outbound_scid_alias = 0;
37993798
let mut i = 0;
38003799
loop {
3800+
// fuzzing chacha20 doesn't use the key at all so we always get the same alias
38013801
if cfg!(fuzzing) {
3802-
// fuzzing chacha20 doesn't use the key at all so we always get the same alias
38033802
outbound_scid_alias += 1;
38043803
} else {
38053804
outbound_scid_alias = fake_scid::Namespace::OutboundAlias.get_fake_scid(
@@ -3931,22 +3930,17 @@ where
39313930
for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
39323931
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
39333932
let peer_state = &mut *peer_state_lock;
3934-
res.extend(
3935-
peer_state
3936-
.channel_by_id
3937-
.iter()
3938-
// Only `Channels` in the `Channel::Funded` phase can be considered funded.
3939-
.filter(|(_, chan)| chan.is_funded())
3940-
.filter(f)
3941-
.map(|(_channel_id, channel)| {
3942-
ChannelDetails::from_channel(
3943-
channel,
3944-
best_block_height,
3945-
peer_state.latest_features.clone(),
3946-
&self.fee_estimator,
3947-
)
3948-
}),
3949-
);
3933+
// Only `Channels` in the `Channel::Funded` phase can be considered funded.
3934+
let filtered_chan_by_id =
3935+
peer_state.channel_by_id.iter().filter(|(_, chan)| chan.is_funded()).filter(f);
3936+
res.extend(filtered_chan_by_id.map(|(_channel_id, channel)| {
3937+
ChannelDetails::from_channel(
3938+
channel,
3939+
best_block_height,
3940+
peer_state.latest_features.clone(),
3941+
&self.fee_estimator,
3942+
)
3943+
}));
39503944
}
39513945
}
39523946
res
@@ -4013,12 +4007,8 @@ where
40134007
&self.fee_estimator,
40144008
)
40154009
};
4016-
return peer_state
4017-
.channel_by_id
4018-
.iter()
4019-
.map(|(_, chan)| (chan))
4020-
.map(channel_to_details)
4021-
.collect();
4010+
let chan_by_id = peer_state.channel_by_id.iter();
4011+
return chan_by_id.map(|(_, chan)| (chan)).map(channel_to_details).collect();
40224012
}
40234013
vec![]
40244014
}
@@ -8919,9 +8909,12 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
89198909
) -> Result<(), MsgHandleErrInternal> {
89208910
self.internal_tx_msg(&counterparty_node_id, msg.channel_id, |channel: &mut Channel<SP>| {
89218911
match channel.as_unfunded_v2_mut() {
8922-
Some(unfunded_channel) => Ok(unfunded_channel
8923-
.tx_add_output(msg)
8924-
.into_msg_send_event(counterparty_node_id)),
8912+
Some(unfunded_channel) => {
8913+
let msg_send_event = unfunded_channel
8914+
.tx_add_output(msg)
8915+
.into_msg_send_event(counterparty_node_id);
8916+
Ok(msg_send_event)
8917+
},
89258918
None => Err("tx_add_output"),
89268919
}
89278920
})
@@ -8932,9 +8925,12 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
89328925
) -> Result<(), MsgHandleErrInternal> {
89338926
self.internal_tx_msg(&counterparty_node_id, msg.channel_id, |channel: &mut Channel<SP>| {
89348927
match channel.as_unfunded_v2_mut() {
8935-
Some(unfunded_channel) => Ok(unfunded_channel
8936-
.tx_remove_input(msg)
8937-
.into_msg_send_event(counterparty_node_id)),
8928+
Some(unfunded_channel) => {
8929+
let msg_send_event = unfunded_channel
8930+
.tx_remove_input(msg)
8931+
.into_msg_send_event(counterparty_node_id);
8932+
Ok(msg_send_event)
8933+
},
89388934
None => Err("tx_remove_input"),
89398935
}
89408936
})
@@ -8945,9 +8941,12 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
89458941
) -> Result<(), MsgHandleErrInternal> {
89468942
self.internal_tx_msg(&counterparty_node_id, msg.channel_id, |channel: &mut Channel<SP>| {
89478943
match channel.as_unfunded_v2_mut() {
8948-
Some(unfunded_channel) => Ok(unfunded_channel
8949-
.tx_remove_output(msg)
8950-
.into_msg_send_event(counterparty_node_id)),
8944+
Some(unfunded_channel) => {
8945+
let msg_send_event = unfunded_channel
8946+
.tx_remove_output(msg)
8947+
.into_msg_send_event(counterparty_node_id);
8948+
Ok(msg_send_event)
8949+
},
89518950
None => Err("tx_remove_output"),
89528951
}
89538952
})
@@ -9657,13 +9656,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
96579656
let is_processing_events = self.pending_events_processor.load(Ordering::Acquire);
96589657
let num_forward_events = pending_events
96599658
.iter()
9660-
.filter(|(ev, _)| {
9661-
if let events::Event::PendingHTLCsForwardable { .. } = ev {
9662-
true
9663-
} else {
9664-
false
9665-
}
9666-
})
9659+
.filter(|(ev, _)| matches!(ev, events::Event::PendingHTLCsForwardable { .. }))
96679660
.count();
96689661
// We only want to push a PendingHTLCsForwardable event if no others are queued. Processing
96699662
// events is done in batches and they are not removed until we're done processing each
@@ -9686,25 +9679,24 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
96869679
/// completes. Note that this needs to happen in the same [`PeerState`] mutex as any release of
96879680
/// the [`ChannelMonitorUpdate`] in question.
96889681
fn raa_monitor_updates_held(
9689-
&self,
9690-
actions_blocking_raa_monitor_updates: &BTreeMap<
9691-
ChannelId,
9692-
Vec<RAAMonitorUpdateBlockingAction>,
9693-
>,
9682+
&self, actions_blocking_raa_monitor_updates: &RAAMonitorUpdateBlockingActionMap,
96949683
channel_funding_outpoint: OutPoint, channel_id: ChannelId, counterparty_node_id: PublicKey,
96959684
) -> bool {
9696-
actions_blocking_raa_monitor_updates
9685+
let update_blocking_action_present = actions_blocking_raa_monitor_updates
96979686
.get(&channel_id)
96989687
.map(|v| !v.is_empty())
9699-
.unwrap_or(false)
9700-
|| self.pending_events.lock().unwrap().iter().any(|(_, action)| {
9701-
action
9702-
== &Some(EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
9703-
channel_funding_outpoint,
9704-
channel_id,
9705-
counterparty_node_id,
9706-
})
9707-
})
9688+
.unwrap_or(false);
9689+
if update_blocking_action_present {
9690+
return true;
9691+
}
9692+
return self.pending_events.lock().unwrap().iter().any(|(_, action)| {
9693+
action
9694+
== &Some(EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
9695+
channel_funding_outpoint,
9696+
channel_id,
9697+
counterparty_node_id,
9698+
})
9699+
});
97089700
}
97099701

97109702
#[cfg(any(test, feature = "_test_utils"))]
@@ -10960,30 +10952,32 @@ where
1096010952
payer_note: Option<String>, payment_id: PaymentId, retry_strategy: Retry,
1096110953
route_params_config: RouteParametersConfig,
1096210954
) -> Result<(), Bolt12SemanticError> {
10955+
let create_pending_payment_fn = |invoice_request: &InvoiceRequest, nonce| {
10956+
let expiration = StaleExpiration::TimerTicks(1);
10957+
let retryable_invoice_request = RetryableInvoiceRequest {
10958+
invoice_request: invoice_request.clone(),
10959+
nonce,
10960+
needs_retry: true,
10961+
};
10962+
self.pending_outbound_payments
10963+
.add_new_awaiting_invoice(
10964+
payment_id,
10965+
expiration,
10966+
retry_strategy,
10967+
route_params_config,
10968+
Some(retryable_invoice_request),
10969+
)
10970+
.map_err(|_| Bolt12SemanticError::DuplicatePaymentId)
10971+
};
10972+
1096310973
self.pay_for_offer_intern(
1096410974
offer,
1096510975
quantity,
1096610976
amount_msats,
1096710977
payer_note,
1096810978
payment_id,
1096910979
None,
10970-
|invoice_request, nonce| {
10971-
let expiration = StaleExpiration::TimerTicks(1);
10972-
let retryable_invoice_request = RetryableInvoiceRequest {
10973-
invoice_request: invoice_request.clone(),
10974-
nonce,
10975-
needs_retry: true,
10976-
};
10977-
self.pending_outbound_payments
10978-
.add_new_awaiting_invoice(
10979-
payment_id,
10980-
expiration,
10981-
retry_strategy,
10982-
route_params_config,
10983-
Some(retryable_invoice_request),
10984-
)
10985-
.map_err(|_| Bolt12SemanticError::DuplicatePaymentId)
10986-
},
10980+
create_pending_payment_fn,
1098710981
)
1098810982
}
1098910983

@@ -11287,9 +11281,8 @@ where
1128711281
}
1128811282

1128911283
fn get_peers_for_blinded_path(&self) -> Vec<MessageForwardNode> {
11290-
self.per_peer_state
11291-
.read()
11292-
.unwrap()
11284+
let per_peer_state = self.per_peer_state.read().unwrap();
11285+
per_peer_state
1129311286
.iter()
1129411287
.map(|(node_id, peer_state)| (node_id, peer_state.lock().unwrap()))
1129511288
.filter(|(_, peer)| peer.is_connected)
@@ -12109,13 +12102,10 @@ where
1210912102
self.do_chain_event(None, |channel| {
1211012103
if let Some(funding_txo) = channel.funding.get_funding_txo() {
1211112104
if funding_txo.txid == *txid {
12112-
channel
12113-
.funding_transaction_unconfirmed(&&WithChannelContext::from(
12114-
&self.logger,
12115-
&channel.context,
12116-
None,
12117-
))
12118-
.map(|()| (None, Vec::new(), None))
12105+
let chan_context =
12106+
WithChannelContext::from(&self.logger, &channel.context, None);
12107+
let res = channel.funding_transaction_unconfirmed(&&chan_context);
12108+
res.map(|()| (None, Vec::new(), None))
1211912109
} else {
1212012110
Ok((None, Vec::new(), None))
1212112111
}
@@ -12442,13 +12432,13 @@ where
1244212432
MR::Target: MessageRouter,
1244312433
L::Target: Logger,
1244412434
{
12445-
fn handle_open_channel(&self, counterparty_node_id: PublicKey, msg: &msgs::OpenChannel) {
12435+
fn handle_open_channel(&self, counterparty_node_id: PublicKey, message: &msgs::OpenChannel) {
1244612436
// Note that we never need to persist the updated ChannelManager for an inbound
1244712437
// open_channel message - pre-funded channels are never written so there should be no
1244812438
// change to the contents.
1244912439
let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
12450-
let res =
12451-
self.internal_open_channel(&counterparty_node_id, OpenChannelMessageRef::V1(msg));
12440+
let msg = OpenChannelMessageRef::V1(message);
12441+
let res = self.internal_open_channel(&counterparty_node_id, msg);
1245212442
let persist = match &res {
1245312443
Err(e) if e.closes_channel() => {
1245412444
debug_assert!(false, "We shouldn't close a new channel");
@@ -12957,16 +12947,10 @@ where
1295712947
{
1295812948
let RetryableInvoiceRequest { invoice_request, nonce, .. } = retryable_invoice_request;
1295912949

12960-
if self
12961-
.flow
12962-
.enqueue_invoice_request(
12963-
invoice_request,
12964-
payment_id,
12965-
nonce,
12966-
self.get_peers_for_blinded_path(),
12967-
)
12968-
.is_err()
12969-
{
12950+
let peers = self.get_peers_for_blinded_path();
12951+
let enqueue_invreq_res =
12952+
self.flow.enqueue_invoice_request(invoice_request, payment_id, nonce, peers);
12953+
if enqueue_invreq_res.is_err() {
1297012954
log_warn!(
1297112955
self.logger,
1297212956
"Retry failed for invoice request with payment_id {}",
@@ -14075,11 +14059,9 @@ impl Readable for VecDeque<(Event, Option<EventCompletionAction>)> {
1407514059
fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
1407614060
let len: u64 = Readable::read(reader)?;
1407714061
const MAX_ALLOC_SIZE: u64 = 1024 * 16;
14078-
let mut events: Self = VecDeque::with_capacity(cmp::min(
14079-
MAX_ALLOC_SIZE
14080-
/ mem::size_of::<(events::Event, Option<EventCompletionAction>)>() as u64,
14081-
len,
14082-
) as usize);
14062+
let event_size = mem::size_of::<(events::Event, Option<EventCompletionAction>)>();
14063+
let mut events: Self =
14064+
VecDeque::with_capacity(cmp::min(MAX_ALLOC_SIZE / event_size as u64, len) as usize);
1408314065
for _ in 0..len {
1408414066
let ev_opt = MaybeReadable::read(reader)?;
1408514067
let action = Readable::read(reader)?;

0 commit comments

Comments
 (0)