@@ -3860,3 +3860,224 @@ fn test_claim_to_closed_channel_blocks_claimed_event() {
3860
3860
nodes[ 1 ] . chain_monitor . complete_sole_pending_chan_update ( & chan_a. 2 ) ;
3861
3861
expect_payment_claimed ! ( nodes[ 1 ] , payment_hash, 1_000_000 ) ;
3862
3862
}
3863
+
3864
+ #[ test]
3865
+ #[ cfg( all( feature = "std" , not( target_os = "windows" ) ) ) ]
3866
+ fn test_single_channel_multiple_mpp ( ) {
3867
+ use std:: sync:: atomic:: { AtomicBool , Ordering } ;
3868
+
3869
+ // Test what happens when we attempt to claim an MPP with many parts that came to us through
3870
+ // the same channel with a synchronous persistence interface which has very high latency.
3871
+ //
3872
+ // Previously, if a `revoke_and_ack` came in while we were still running in
3873
+ // `ChannelManager::claim_payment` we'd end up hanging waiting to apply a
3874
+ // `ChannelMonitorUpdate` until after it completed. See the commit which introduced this test
3875
+ // for more info.
3876
+ let chanmon_cfgs = create_chanmon_cfgs ( 9 ) ;
3877
+ let node_cfgs = create_node_cfgs ( 9 , & chanmon_cfgs) ;
3878
+ let node_chanmgrs = create_node_chanmgrs ( 9 , & node_cfgs, & [ const { None } ; 9 ] ) ;
3879
+ let mut nodes = create_network ( 9 , & node_cfgs, & node_chanmgrs) ;
3880
+
3881
+ let node_7_id = nodes[ 7 ] . node . get_our_node_id ( ) ;
3882
+ let node_8_id = nodes[ 8 ] . node . get_our_node_id ( ) ;
3883
+
3884
+ // Send an MPP payment in six parts along the path shown from top to bottom
3885
+ // 0
3886
+ // 1 2 3 4 5 6
3887
+ // 7
3888
+ // 8
3889
+ //
3890
+ // We can in theory reproduce this issue with fewer channels/HTLCs, but getting this test
3891
+ // robust is rather challenging. We rely on having the main test thread wait on locks held in
3892
+ // the background `claim_funds` thread and unlocking when the `claim_funds` thread completes a
3893
+ // single `ChannelMonitorUpdate`.
3894
+ // This thread calls `get_and_clear_pending_msg_events()` and `handle_revoke_and_ack()`, both
3895
+ // of which require `ChannelManager` locks, but we have to make sure this thread gets a chance
3896
+ // to be blocked on the mutexes before we let the background thread wake `claim_funds` so that
3897
+ // the mutex can switch to this main thread.
3898
+ // This relies on our locks being fair, but also on our threads getting runtime during the test
3899
+ // run, which can be pretty competitive. Thus we do a dumb dance to be as conservative as
3900
+ // possible - we have a background thread which completes a `ChannelMonitorUpdate` (by sending
3901
+ // into the `write_blocker` mpsc) but it doesn't run until a mpsc channel sends from this main
3902
+ // thread to the background thread, and then we let it sleep a while before we send the
3903
+ // `ChannelMonitorUpdate` unblocker.
3904
+ // Further, we give ourselves two chances each time, needing 4 HTLCs just to unlock our two
3905
+ // `ChannelManager` calls. We then need a few remaining HTLCs to actually trigger the bug, so
3906
+ // we use 6 HTLCs.
3907
+ // Finaly, we do not run this test on Winblowz because it, somehow, in 2025, does not implement
3908
+ // actual preemptive multitasking and thinks that cooperative multitasking somehow is
3909
+ // acceptable in the 21st century, let alone a quarter of the way into it.
3910
+ const MAX_THREAD_INIT_TIME : std:: time:: Duration = std:: time:: Duration :: from_secs ( 1 ) ;
3911
+
3912
+ create_announced_chan_between_nodes_with_value ( & nodes, 0 , 1 , 100_000 , 0 ) ;
3913
+ create_announced_chan_between_nodes_with_value ( & nodes, 0 , 2 , 100_000 , 0 ) ;
3914
+ create_announced_chan_between_nodes_with_value ( & nodes, 0 , 3 , 100_000 , 0 ) ;
3915
+ create_announced_chan_between_nodes_with_value ( & nodes, 0 , 4 , 100_000 , 0 ) ;
3916
+ create_announced_chan_between_nodes_with_value ( & nodes, 0 , 5 , 100_000 , 0 ) ;
3917
+ create_announced_chan_between_nodes_with_value ( & nodes, 0 , 6 , 100_000 , 0 ) ;
3918
+
3919
+ create_announced_chan_between_nodes_with_value ( & nodes, 1 , 7 , 100_000 , 0 ) ;
3920
+ create_announced_chan_between_nodes_with_value ( & nodes, 2 , 7 , 100_000 , 0 ) ;
3921
+ create_announced_chan_between_nodes_with_value ( & nodes, 3 , 7 , 100_000 , 0 ) ;
3922
+ create_announced_chan_between_nodes_with_value ( & nodes, 4 , 7 , 100_000 , 0 ) ;
3923
+ create_announced_chan_between_nodes_with_value ( & nodes, 5 , 7 , 100_000 , 0 ) ;
3924
+ create_announced_chan_between_nodes_with_value ( & nodes, 6 , 7 , 100_000 , 0 ) ;
3925
+ create_announced_chan_between_nodes_with_value ( & nodes, 7 , 8 , 1_000_000 , 0 ) ;
3926
+
3927
+ let ( mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash ! ( & nodes[ 0 ] , nodes[ 8 ] , 50_000_000 ) ;
3928
+
3929
+ send_along_route_with_secret ( & nodes[ 0 ] , route, & [ & [ & nodes[ 1 ] , & nodes[ 7 ] , & nodes[ 8 ] ] , & [ & nodes[ 2 ] , & nodes[ 7 ] , & nodes[ 8 ] ] , & [ & nodes[ 3 ] , & nodes[ 7 ] , & nodes[ 8 ] ] , & [ & nodes[ 4 ] , & nodes[ 7 ] , & nodes[ 8 ] ] , & [ & nodes[ 5 ] , & nodes[ 7 ] , & nodes[ 8 ] ] , & [ & nodes[ 6 ] , & nodes[ 7 ] , & nodes[ 8 ] ] ] , 50_000_000 , payment_hash, payment_secret) ;
3930
+
3931
+ let ( do_a_write, blocker) = std:: sync:: mpsc:: sync_channel ( 0 ) ;
3932
+ * nodes[ 8 ] . chain_monitor . write_blocker . lock ( ) . unwrap ( ) = Some ( blocker) ;
3933
+
3934
+ // Until we have std::thread::scoped we have to unsafe { turn off the borrow checker }.
3935
+ // We do this by casting a pointer to a `TestChannelManager` to a pointer to a
3936
+ // `TestChannelManager` with different (in this case 'static) lifetime.
3937
+ // This is even suggested in the second example at
3938
+ // https://doc.rust-lang.org/std/mem/fn.transmute.html#examples
3939
+ let claim_node: & ' static TestChannelManager < ' static , ' static > =
3940
+ unsafe { std:: mem:: transmute ( nodes[ 8 ] . node as & TestChannelManager ) } ;
3941
+ let thrd = std:: thread:: spawn ( move || {
3942
+ // Initiate the claim in a background thread as it will immediately block waiting on the
3943
+ // `write_blocker` we set above.
3944
+ claim_node. claim_funds ( payment_preimage) ;
3945
+ } ) ;
3946
+
3947
+ // First unlock one monitor so that we have a pending
3948
+ // `update_fulfill_htlc`/`commitment_signed` pair to pass to our counterparty.
3949
+ do_a_write. send ( ( ) ) . unwrap ( ) ;
3950
+
3951
+ // Then fetch the `update_fulfill_htlc`/`commitment_signed`. Note that the
3952
+ // `get_and_clear_pending_msg_events` will immediately hang trying to take a peer lock which
3953
+ // `claim_funds` is holding. Thus, we release a second write after a small sleep in the
3954
+ // background to give `claim_funds` a chance to step forward, unblocking
3955
+ // `get_and_clear_pending_msg_events`.
3956
+ let do_a_write_background = do_a_write. clone ( ) ;
3957
+ let block_thrd2 = AtomicBool :: new ( true ) ;
3958
+ let block_thrd2_read: & ' static AtomicBool = unsafe { std:: mem:: transmute ( & block_thrd2) } ;
3959
+ let thrd2 = std:: thread:: spawn ( move || {
3960
+ while block_thrd2_read. load ( Ordering :: Acquire ) {
3961
+ std:: thread:: yield_now ( ) ;
3962
+ }
3963
+ std:: thread:: sleep ( MAX_THREAD_INIT_TIME ) ;
3964
+ do_a_write_background. send ( ( ) ) . unwrap ( ) ;
3965
+ std:: thread:: sleep ( MAX_THREAD_INIT_TIME ) ;
3966
+ do_a_write_background. send ( ( ) ) . unwrap ( ) ;
3967
+ } ) ;
3968
+ block_thrd2. store ( false , Ordering :: Release ) ;
3969
+ let first_updates = get_htlc_update_msgs ( & nodes[ 8 ] , & nodes[ 7 ] . node . get_our_node_id ( ) ) ;
3970
+ thrd2. join ( ) . unwrap ( ) ;
3971
+
3972
+ // Disconnect node 6 from all its peers so it doesn't bother to fail the HTLCs back
3973
+ nodes[ 7 ] . node . peer_disconnected ( nodes[ 1 ] . node . get_our_node_id ( ) ) ;
3974
+ nodes[ 7 ] . node . peer_disconnected ( nodes[ 2 ] . node . get_our_node_id ( ) ) ;
3975
+ nodes[ 7 ] . node . peer_disconnected ( nodes[ 3 ] . node . get_our_node_id ( ) ) ;
3976
+ nodes[ 7 ] . node . peer_disconnected ( nodes[ 4 ] . node . get_our_node_id ( ) ) ;
3977
+ nodes[ 7 ] . node . peer_disconnected ( nodes[ 5 ] . node . get_our_node_id ( ) ) ;
3978
+ nodes[ 7 ] . node . peer_disconnected ( nodes[ 6 ] . node . get_our_node_id ( ) ) ;
3979
+
3980
+ nodes[ 7 ] . node . handle_update_fulfill_htlc ( node_8_id, & first_updates. update_fulfill_htlcs [ 0 ] ) ;
3981
+ check_added_monitors ( & nodes[ 7 ] , 1 ) ;
3982
+ expect_payment_forwarded ! ( nodes[ 7 ] , nodes[ 1 ] , nodes[ 8 ] , Some ( 1000 ) , false , false ) ;
3983
+ nodes[ 7 ] . node . handle_commitment_signed ( node_8_id, & first_updates. commitment_signed ) ;
3984
+ check_added_monitors ( & nodes[ 7 ] , 1 ) ;
3985
+ let ( raa, cs) = get_revoke_commit_msgs ( & nodes[ 7 ] , & node_8_id) ;
3986
+
3987
+ // Now, handle the `revoke_and_ack` from node 5. Note that `claim_funds` is still blocked on
3988
+ // our peer lock, so we have to release a write to let it process.
3989
+ // After this call completes, the channel previously would be locked up and should not be able
3990
+ // to make further progress.
3991
+ let do_a_write_background = do_a_write. clone ( ) ;
3992
+ let block_thrd3 = AtomicBool :: new ( true ) ;
3993
+ let block_thrd3_read: & ' static AtomicBool = unsafe { std:: mem:: transmute ( & block_thrd3) } ;
3994
+ let thrd3 = std:: thread:: spawn ( move || {
3995
+ while block_thrd3_read. load ( Ordering :: Acquire ) {
3996
+ std:: thread:: yield_now ( ) ;
3997
+ }
3998
+ std:: thread:: sleep ( MAX_THREAD_INIT_TIME ) ;
3999
+ do_a_write_background. send ( ( ) ) . unwrap ( ) ;
4000
+ std:: thread:: sleep ( MAX_THREAD_INIT_TIME ) ;
4001
+ do_a_write_background. send ( ( ) ) . unwrap ( ) ;
4002
+ } ) ;
4003
+ block_thrd3. store ( false , Ordering :: Release ) ;
4004
+ nodes[ 8 ] . node . handle_revoke_and_ack ( node_7_id, & raa) ;
4005
+ thrd3. join ( ) . unwrap ( ) ;
4006
+ assert ! ( !thrd. is_finished( ) ) ;
4007
+
4008
+ let thrd4 = std:: thread:: spawn ( move || {
4009
+ do_a_write. send ( ( ) ) . unwrap ( ) ;
4010
+ do_a_write. send ( ( ) ) . unwrap ( ) ;
4011
+ } ) ;
4012
+
4013
+ thrd4. join ( ) . unwrap ( ) ;
4014
+ thrd. join ( ) . unwrap ( ) ;
4015
+
4016
+ expect_payment_claimed ! ( nodes[ 8 ] , payment_hash, 50_000_000 ) ;
4017
+
4018
+ // At the end, we should have 7 ChannelMonitorUpdates - 6 for HTLC claims, and one for the
4019
+ // above `revoke_and_ack`.
4020
+ check_added_monitors ( & nodes[ 8 ] , 7 ) ;
4021
+
4022
+ // Now drive everything to the end, at least as far as node 7 is concerned...
4023
+ * nodes[ 8 ] . chain_monitor . write_blocker . lock ( ) . unwrap ( ) = None ;
4024
+ nodes[ 8 ] . node . handle_commitment_signed ( node_7_id, & cs) ;
4025
+ check_added_monitors ( & nodes[ 8 ] , 1 ) ;
4026
+
4027
+ let ( updates, raa) = get_updates_and_revoke ( & nodes[ 8 ] , & nodes[ 7 ] . node . get_our_node_id ( ) ) ;
4028
+
4029
+ nodes[ 7 ] . node . handle_update_fulfill_htlc ( node_8_id, & updates. update_fulfill_htlcs [ 0 ] ) ;
4030
+ expect_payment_forwarded ! ( nodes[ 7 ] , nodes[ 2 ] , nodes[ 8 ] , Some ( 1000 ) , false , false ) ;
4031
+ nodes[ 7 ] . node . handle_update_fulfill_htlc ( node_8_id, & updates. update_fulfill_htlcs [ 1 ] ) ;
4032
+ expect_payment_forwarded ! ( nodes[ 7 ] , nodes[ 3 ] , nodes[ 8 ] , Some ( 1000 ) , false , false ) ;
4033
+ let mut next_source = 4 ;
4034
+ if let Some ( update) = updates. update_fulfill_htlcs . get ( 2 ) {
4035
+ nodes[ 7 ] . node . handle_update_fulfill_htlc ( node_8_id, update) ;
4036
+ expect_payment_forwarded ! ( nodes[ 7 ] , nodes[ 4 ] , nodes[ 8 ] , Some ( 1000 ) , false , false ) ;
4037
+ next_source += 1 ;
4038
+ }
4039
+
4040
+ nodes[ 7 ] . node . handle_commitment_signed ( node_8_id, & updates. commitment_signed ) ;
4041
+ nodes[ 7 ] . node . handle_revoke_and_ack ( node_8_id, & raa) ;
4042
+ if updates. update_fulfill_htlcs . get ( 2 ) . is_some ( ) {
4043
+ check_added_monitors ( & nodes[ 7 ] , 5 ) ;
4044
+ } else {
4045
+ check_added_monitors ( & nodes[ 7 ] , 4 ) ;
4046
+ }
4047
+
4048
+ let ( raa, cs) = get_revoke_commit_msgs ( & nodes[ 7 ] , & node_8_id) ;
4049
+
4050
+ nodes[ 8 ] . node . handle_revoke_and_ack ( node_7_id, & raa) ;
4051
+ nodes[ 8 ] . node . handle_commitment_signed ( node_7_id, & cs) ;
4052
+ check_added_monitors ( & nodes[ 8 ] , 2 ) ;
4053
+
4054
+ let ( updates, raa) = get_updates_and_revoke ( & nodes[ 8 ] , & node_7_id) ;
4055
+
4056
+ nodes[ 7 ] . node . handle_update_fulfill_htlc ( node_8_id, & updates. update_fulfill_htlcs [ 0 ] ) ;
4057
+ expect_payment_forwarded ! ( nodes[ 7 ] , nodes[ next_source] , nodes[ 8 ] , Some ( 1000 ) , false , false ) ;
4058
+ next_source += 1 ;
4059
+ nodes[ 7 ] . node . handle_update_fulfill_htlc ( node_8_id, & updates. update_fulfill_htlcs [ 1 ] ) ;
4060
+ expect_payment_forwarded ! ( nodes[ 7 ] , nodes[ next_source] , nodes[ 8 ] , Some ( 1000 ) , false , false ) ;
4061
+ next_source += 1 ;
4062
+ if let Some ( update) = updates. update_fulfill_htlcs . get ( 2 ) {
4063
+ nodes[ 7 ] . node . handle_update_fulfill_htlc ( node_8_id, update) ;
4064
+ expect_payment_forwarded ! ( nodes[ 7 ] , nodes[ next_source] , nodes[ 8 ] , Some ( 1000 ) , false , false ) ;
4065
+ }
4066
+
4067
+ nodes[ 7 ] . node . handle_commitment_signed ( node_8_id, & updates. commitment_signed ) ;
4068
+ nodes[ 7 ] . node . handle_revoke_and_ack ( node_8_id, & raa) ;
4069
+ if updates. update_fulfill_htlcs . get ( 2 ) . is_some ( ) {
4070
+ check_added_monitors ( & nodes[ 7 ] , 5 ) ;
4071
+ } else {
4072
+ check_added_monitors ( & nodes[ 7 ] , 4 ) ;
4073
+ }
4074
+
4075
+ let ( raa, cs) = get_revoke_commit_msgs ( & nodes[ 7 ] , & node_8_id) ;
4076
+ nodes[ 8 ] . node . handle_revoke_and_ack ( node_7_id, & raa) ;
4077
+ nodes[ 8 ] . node . handle_commitment_signed ( node_7_id, & cs) ;
4078
+ check_added_monitors ( & nodes[ 8 ] , 2 ) ;
4079
+
4080
+ let raa = get_event_msg ! ( nodes[ 8 ] , MessageSendEvent :: SendRevokeAndACK , node_7_id) ;
4081
+ nodes[ 7 ] . node . handle_revoke_and_ack ( node_8_id, & raa) ;
4082
+ check_added_monitors ( & nodes[ 7 ] , 1 ) ;
4083
+ }
0 commit comments