Merge pull request #1503 from valentinewallace/2022-05-onion-msgs
[rust-lightning] / lightning / src / ln / payment_tests.rs
index a86676422ed8df71faabd0b4ae7295cd1e4ed8c6..785edecebbe3d0b7eddb0f0f6de2011f895fdde1 100644 (file)
 use chain::{ChannelMonitorUpdateErr, Confirm, Listen, Watch};
 use chain::channelmonitor::{ANTI_REORG_DELAY, ChannelMonitor, LATENCY_GRACE_PERIOD_BLOCKS};
 use chain::transaction::OutPoint;
-use ln::channelmanager::{BREAKDOWN_TIMEOUT, ChannelManager, ChannelManagerReadArgs, PaymentId, PaymentSendFailure};
+use chain::keysinterface::KeysInterface;
+use ln::channel::EXPIRE_PREV_CONFIG_TICKS;
+use ln::channelmanager::{BREAKDOWN_TIMEOUT, ChannelManager, ChannelManagerReadArgs, MPP_TIMEOUT_TICKS, PaymentId, PaymentSendFailure};
 use ln::features::{InitFeatures, InvoiceFeatures};
 use ln::msgs;
 use ln::msgs::ChannelMessageHandler;
 use routing::router::{PaymentParameters, get_route};
-use util::events::{ClosureReason, Event, MessageSendEvent, MessageSendEventsProvider};
+use util::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider};
 use util::test_utils;
 use util::errors::APIError;
 use util::enforcing_trait_impls::EnforcingSigner;
@@ -27,6 +29,7 @@ use util::ser::{ReadableArgs, Writeable};
 use io;
 
 use bitcoin::{Block, BlockHeader, BlockHash};
+use bitcoin::network::constants::Network;
 
 use prelude::*;
 
@@ -40,7 +43,7 @@ fn retry_single_path_payment() {
        let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
 
        let _chan_0 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
-       let _chan_1 = create_announced_chan_between_nodes(&nodes, 2, 1, InitFeatures::known(), InitFeatures::known());
+       let chan_1 = create_announced_chan_between_nodes(&nodes, 2, 1, InitFeatures::known(), InitFeatures::known());
        // Rebalance to find a route
        send_payment(&nodes[2], &vec!(&nodes[1])[..], 3_000_000);
 
@@ -59,7 +62,7 @@ fn retry_single_path_payment() {
        check_added_monitors!(nodes[1], 0);
        commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
        expect_pending_htlcs_forwardable!(nodes[1]);
-       expect_pending_htlcs_forwardable!(&nodes[1]);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_1.2 }]);
        let htlc_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
        assert!(htlc_updates.update_add_htlcs.is_empty());
        assert_eq!(htlc_updates.update_fail_htlcs.len(), 1);
@@ -68,7 +71,7 @@ fn retry_single_path_payment() {
        check_added_monitors!(nodes[1], 1);
        nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &htlc_updates.update_fail_htlcs[0]);
        commitment_signed_dance!(nodes[0], nodes[1], htlc_updates.commitment_signed, false);
-       expect_payment_failed_conditions!(nodes[0], payment_hash, false, PaymentFailedConditions::new().mpp_parts_remain());
+       expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new().mpp_parts_remain());
 
        // Rebalance the channel so the retry succeeds.
        send_payment(&nodes[2], &vec!(&nodes[1])[..], 3_000_000);
@@ -117,10 +120,10 @@ fn mpp_retry() {
        let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
        let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
 
-       let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
-       let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
-       let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
-       let chan_4_id = create_announced_chan_between_nodes(&nodes, 3, 2, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
+       let (chan_1_update, _, _, _) = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
+       let (chan_2_update, _, _, _) = create_announced_chan_between_nodes(&nodes, 0, 2, InitFeatures::known(), InitFeatures::known());
+       let (chan_3_update, _, _, _) = create_announced_chan_between_nodes(&nodes, 1, 3, InitFeatures::known(), InitFeatures::known());
+       let (chan_4_update, _, chan_4_id, _) = create_announced_chan_between_nodes(&nodes, 3, 2, InitFeatures::known(), InitFeatures::known());
        // Rebalance
        send_payment(&nodes[3], &vec!(&nodes[2])[..], 1_500_000);
 
@@ -128,11 +131,11 @@ fn mpp_retry() {
        let path = route.paths[0].clone();
        route.paths.push(path);
        route.paths[0][0].pubkey = nodes[1].node.get_our_node_id();
-       route.paths[0][0].short_channel_id = chan_1_id;
-       route.paths[0][1].short_channel_id = chan_3_id;
+       route.paths[0][0].short_channel_id = chan_1_update.contents.short_channel_id;
+       route.paths[0][1].short_channel_id = chan_3_update.contents.short_channel_id;
        route.paths[1][0].pubkey = nodes[2].node.get_our_node_id();
-       route.paths[1][0].short_channel_id = chan_2_id;
-       route.paths[1][1].short_channel_id = chan_4_id;
+       route.paths[1][0].short_channel_id = chan_2_update.contents.short_channel_id;
+       route.paths[1][1].short_channel_id = chan_4_update.contents.short_channel_id;
 
        // Initiate the MPP payment.
        let payment_id = nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap();
@@ -162,7 +165,7 @@ fn mpp_retry() {
 
        // Attempt to forward the payment and complete the 2nd path's failure.
        expect_pending_htlcs_forwardable!(&nodes[2]);
-       expect_pending_htlcs_forwardable!(&nodes[2]);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[2], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_4_id }]);
        let htlc_updates = get_htlc_update_msgs!(nodes[2], nodes[0].node.get_our_node_id());
        assert!(htlc_updates.update_add_htlcs.is_empty());
        assert_eq!(htlc_updates.update_fail_htlcs.len(), 1);
@@ -171,7 +174,7 @@ fn mpp_retry() {
        check_added_monitors!(nodes[2], 1);
        nodes[0].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &htlc_updates.update_fail_htlcs[0]);
        commitment_signed_dance!(nodes[0], nodes[2], htlc_updates.commitment_signed, false);
-       expect_payment_failed_conditions!(nodes[0], payment_hash, false, PaymentFailedConditions::new().mpp_parts_remain());
+       expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new().mpp_parts_remain());
 
        // Rebalance the channel so the second half of the payment can succeed.
        send_payment(&nodes[3], &vec!(&nodes[2])[..], 1_500_000);
@@ -197,6 +200,78 @@ fn mpp_retry() {
        claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_preimage);
 }
 
+fn do_mpp_receive_timeout(send_partial_mpp: bool) {
+       let chanmon_cfgs = create_chanmon_cfgs(4);
+       let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
+       let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
+
+       let (chan_1_update, _, _, _) = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
+       let (chan_2_update, _, _, _) = create_announced_chan_between_nodes(&nodes, 0, 2, InitFeatures::known(), InitFeatures::known());
+       let (chan_3_update, _, chan_3_id, _) = create_announced_chan_between_nodes(&nodes, 1, 3, InitFeatures::known(), InitFeatures::known());
+       let (chan_4_update, _, _, _) = create_announced_chan_between_nodes(&nodes, 2, 3, InitFeatures::known(), InitFeatures::known());
+
+       let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[3], 100_000);
+       let path = route.paths[0].clone();
+       route.paths.push(path);
+       route.paths[0][0].pubkey = nodes[1].node.get_our_node_id();
+       route.paths[0][0].short_channel_id = chan_1_update.contents.short_channel_id;
+       route.paths[0][1].short_channel_id = chan_3_update.contents.short_channel_id;
+       route.paths[1][0].pubkey = nodes[2].node.get_our_node_id();
+       route.paths[1][0].short_channel_id = chan_2_update.contents.short_channel_id;
+       route.paths[1][1].short_channel_id = chan_4_update.contents.short_channel_id;
+
+       // Initiate the MPP payment.
+       let _ = nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap();
+       check_added_monitors!(nodes[0], 2); // one monitor per path
+       let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+       assert_eq!(events.len(), 2);
+
+       // Pass half of the payment along the first path.
+       pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 200_000, payment_hash, Some(payment_secret), events.remove(0), false, None);
+
+       if send_partial_mpp {
+               // Time out the partial MPP
+               for _ in 0..MPP_TIMEOUT_TICKS {
+                       nodes[3].node.timer_tick_occurred();
+               }
+
+               // Failed HTLC from node 3 -> 1
+               expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], vec![HTLCDestination::FailedPayment { payment_hash }]);
+               let htlc_fail_updates_3_1 = get_htlc_update_msgs!(nodes[3], nodes[1].node.get_our_node_id());
+               assert_eq!(htlc_fail_updates_3_1.update_fail_htlcs.len(), 1);
+               nodes[1].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &htlc_fail_updates_3_1.update_fail_htlcs[0]);
+               check_added_monitors!(nodes[3], 1);
+               commitment_signed_dance!(nodes[1], nodes[3], htlc_fail_updates_3_1.commitment_signed, false);
+
+               // Failed HTLC from node 1 -> 0
+               expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_3_id }]);
+               let htlc_fail_updates_1_0 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+               assert_eq!(htlc_fail_updates_1_0.update_fail_htlcs.len(), 1);
+               nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &htlc_fail_updates_1_0.update_fail_htlcs[0]);
+               check_added_monitors!(nodes[1], 1);
+               commitment_signed_dance!(nodes[0], nodes[1], htlc_fail_updates_1_0.commitment_signed, false);
+
+               expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new().mpp_parts_remain().expected_htlc_error_data(23, &[][..]));
+       } else {
+               // Pass half of the payment along the second path.
+               pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 200_000, payment_hash, Some(payment_secret), events.remove(0), true, None);
+
+               // Even after MPP_TIMEOUT_TICKS we should not timeout the MPP if we have all the parts
+               for _ in 0..MPP_TIMEOUT_TICKS {
+                       nodes[3].node.timer_tick_occurred();
+               }
+
+               claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_preimage);
+       }
+}
+
+#[test]
+fn mpp_receive_timeout() {
+       do_mpp_receive_timeout(true);
+       do_mpp_receive_timeout(false);
+}
+
 #[test]
 fn retry_expired_payment() {
        let chanmon_cfgs = create_chanmon_cfgs(3);
@@ -205,7 +280,7 @@ fn retry_expired_payment() {
        let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
 
        let _chan_0 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
-       let _chan_1 = create_announced_chan_between_nodes(&nodes, 2, 1, InitFeatures::known(), InitFeatures::known());
+       let chan_1 = create_announced_chan_between_nodes(&nodes, 2, 1, InitFeatures::known(), InitFeatures::known());
        // Rebalance to find a route
        send_payment(&nodes[2], &vec!(&nodes[1])[..], 3_000_000);
 
@@ -224,7 +299,7 @@ fn retry_expired_payment() {
        check_added_monitors!(nodes[1], 0);
        commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
        expect_pending_htlcs_forwardable!(nodes[1]);
-       expect_pending_htlcs_forwardable!(&nodes[1]);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_1.2 }]);
        let htlc_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
        assert!(htlc_updates.update_add_htlcs.is_empty());
        assert_eq!(htlc_updates.update_fail_htlcs.len(), 1);
@@ -293,7 +368,7 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) {
        let nodes_0_deserialized: ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
        let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
 
-       let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
+       let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2;
        let (_, _, chan_id_2, _) = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
 
        // Serialize the ChannelManager prior to sending payments
@@ -302,7 +377,7 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) {
        // Send two payments - one which will get to nodes[2] and will be claimed, one which we'll time
        // out and retry.
        let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 1_000_000);
-       let (payment_preimage_1, _, _, payment_id_1) = send_along_route(&nodes[0], route.clone(), &[&nodes[1], &nodes[2]], 1_000_000);
+       let (payment_preimage_1, payment_hash_1, _, payment_id_1) = send_along_route(&nodes[0], route.clone(), &[&nodes[1], &nodes[2]], 1_000_000);
        let payment_id = nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap();
        check_added_monitors!(nodes[0], 1);
 
@@ -375,12 +450,12 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) {
        assert_eq!(as_broadcasted_txn[0], as_commitment_tx);
 
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
-       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known()});
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known(), remote_network_address: None });
        assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
 
        // Now nodes[1] should send a channel reestablish, which nodes[0] will respond to with an
        // error, as the channel has hit the chain.
-       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known()});
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known(), remote_network_address: None });
        let bs_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
        nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish);
        let as_err = nodes[0].node.get_and_clear_pending_msg_events();
@@ -401,6 +476,8 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) {
        // we close in a moment.
        nodes[2].node.claim_funds(payment_preimage_1);
        check_added_monitors!(nodes[2], 1);
+       expect_payment_claimed!(nodes[2], payment_hash_1, 1_000_000);
+
        let htlc_fulfill_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
        nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &htlc_fulfill_updates.update_fulfill_htlcs[0]);
        check_added_monitors!(nodes[1], 1);
@@ -408,7 +485,7 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) {
 
        if confirm_before_reload {
                let best_block = nodes[0].blocks.lock().unwrap().last().unwrap().clone();
-               nodes[0].node.best_block_updated(&best_block.0, best_block.1);
+               nodes[0].node.best_block_updated(&best_block.0.header, best_block.1);
        }
 
        // Create a new channel on which to retry the payment before we fail the payment via the
@@ -421,24 +498,31 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) {
        let bs_htlc_claim_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
        assert_eq!(bs_htlc_claim_txn.len(), 1);
        check_spends!(bs_htlc_claim_txn[0], as_commitment_tx);
-       expect_payment_forwarded!(nodes[1], None, false);
+       expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], None, false, false);
 
-       mine_transaction(&nodes[0], &as_commitment_tx);
+       if !confirm_before_reload {
+               mine_transaction(&nodes[0], &as_commitment_tx);
+       }
        mine_transaction(&nodes[0], &bs_htlc_claim_txn[0]);
        expect_payment_sent!(nodes[0], payment_preimage_1);
        connect_blocks(&nodes[0], TEST_FINAL_CLTV*4 + 20);
        let as_htlc_timeout_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
-       check_spends!(as_htlc_timeout_txn[2], funding_tx);
-       check_spends!(as_htlc_timeout_txn[0], as_commitment_tx);
-       check_spends!(as_htlc_timeout_txn[1], as_commitment_tx);
        assert_eq!(as_htlc_timeout_txn.len(), 3);
-       if as_htlc_timeout_txn[0].input[0].previous_output == bs_htlc_claim_txn[0].input[0].previous_output {
-               confirm_transaction(&nodes[0], &as_htlc_timeout_txn[1]);
+       let (first_htlc_timeout_tx, second_htlc_timeout_tx) = if as_htlc_timeout_txn[0] == as_commitment_tx {
+               (&as_htlc_timeout_txn[1], &as_htlc_timeout_txn[2])
        } else {
-               confirm_transaction(&nodes[0], &as_htlc_timeout_txn[0]);
+               assert_eq!(as_htlc_timeout_txn[2], as_commitment_tx);
+               (&as_htlc_timeout_txn[0], &as_htlc_timeout_txn[1])
+       };
+       check_spends!(first_htlc_timeout_tx, as_commitment_tx);
+       check_spends!(second_htlc_timeout_tx, as_commitment_tx);
+       if first_htlc_timeout_tx.input[0].previous_output == bs_htlc_claim_txn[0].input[0].previous_output {
+               confirm_transaction(&nodes[0], &second_htlc_timeout_tx);
+       } else {
+               confirm_transaction(&nodes[0], &first_htlc_timeout_tx);
        }
        nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
-       expect_payment_failed_conditions!(nodes[0], payment_hash, false, PaymentFailedConditions::new().mpp_parts_remain());
+       expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new().mpp_parts_remain());
 
        // Finally, retry the payment (which was reloaded from the ChannelMonitor when nodes[0] was
        // reloaded) via a route over the new channel, which work without issue and eventually be
@@ -448,8 +532,19 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) {
        // Update the fee on the middle hop to ensure PaymentSent events have the correct (retried) fee
        // and not the original fee. We also update node[1]'s relevant config as
        // do_claim_payment_along_route expects us to never overpay.
-       nodes[1].node.channel_state.lock().unwrap().by_id.get_mut(&chan_id_2).unwrap().config.forwarding_fee_base_msat += 100_000;
-       new_route.paths[0][0].fee_msat += 100_000;
+       {
+               let mut channel_state = nodes[1].node.channel_state.lock().unwrap();
+               let mut channel = channel_state.by_id.get_mut(&chan_id_2).unwrap();
+               let mut new_config = channel.config();
+               new_config.forwarding_fee_base_msat += 100_000;
+               channel.update_config(&new_config);
+               new_route.paths[0][0].fee_msat += 100_000;
+       }
+
+       // Force expiration of the channel's previous config.
+       for _ in 0..EXPIRE_PREV_CONFIG_TICKS {
+               nodes[1].node.timer_tick_occurred();
+       }
 
        assert!(nodes[0].node.retry_payment(&new_route, payment_id_1).is_err()); // Shouldn't be allowed to retry a fulfilled payment
        nodes[0].node.retry_payment(&new_route, payment_id).unwrap();
@@ -488,8 +583,8 @@ fn do_test_dup_htlc_onchain_fails_on_reload(persist_manager_post_event: bool, co
 
        // Route a payment, but force-close the channel before the HTLC fulfill message arrives at
        // nodes[0].
-       let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 10000000);
-       nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id).unwrap();
+       let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 10_000_000);
+       nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
        check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
@@ -506,8 +601,9 @@ fn do_test_dup_htlc_onchain_fails_on_reload(persist_manager_post_event: bool, co
        check_spends!(node_txn[2], node_txn[1]);
        let timeout_txn = vec![node_txn[2].clone()];
 
-       assert!(nodes[1].node.claim_funds(payment_preimage));
+       nodes[1].node.claim_funds(payment_preimage);
        check_added_monitors!(nodes[1], 1);
+       expect_payment_claimed!(nodes[1], payment_hash, 10_000_000);
 
        let mut header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
        connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[1].clone()]});
@@ -515,6 +611,10 @@ fn do_test_dup_htlc_onchain_fails_on_reload(persist_manager_post_event: bool, co
        check_added_monitors!(nodes[1], 1);
        check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        let claim_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
+       assert_eq!(claim_txn.len(), 3);
+       check_spends!(claim_txn[0], node_txn[1]);
+       check_spends!(claim_txn[1], funding_tx);
+       check_spends!(claim_txn[2], claim_txn[1]);
 
        header.prev_blockhash = nodes[0].best_block_hash();
        connect_block(&nodes[0], &Block { header, txdata: vec![node_txn[1].clone()]});
@@ -524,7 +624,7 @@ fn do_test_dup_htlc_onchain_fails_on_reload(persist_manager_post_event: bool, co
        }
 
        header.prev_blockhash = nodes[0].best_block_hash();
-       let claim_block = Block { header, txdata: if payment_timeout { timeout_txn } else { claim_txn } };
+       let claim_block = Block { header, txdata: if payment_timeout { timeout_txn } else { vec![claim_txn[0].clone()] } };
 
        if payment_timeout {
                assert!(confirm_commitment_tx); // Otherwise we're spending below our CSV!
@@ -547,7 +647,8 @@ fn do_test_dup_htlc_onchain_fails_on_reload(persist_manager_post_event: bool, co
        let funding_txo = OutPoint { txid: funding_tx.txid(), index: 0 };
        let mon_updates: Vec<_> = chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap()
                .get_mut(&funding_txo).unwrap().drain().collect();
-       assert_eq!(mon_updates.len(), 1);
+       // If we are using chain::Confirm instead of chain::Listen, we will get the same update twice
+       assert!(mon_updates.len() == 1 || mon_updates.len() == 2);
        assert!(nodes[0].chain_monitor.release_pending_monitor_events().is_empty());
        assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
 
@@ -563,7 +664,9 @@ fn do_test_dup_htlc_onchain_fails_on_reload(persist_manager_post_event: bool, co
        chanmon_cfgs[0].persister.set_update_ret(Ok(()));
        let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new());
        get_monitor!(nodes[0], chan_id).write(&mut chan_0_monitor_serialized).unwrap();
-       nodes[0].chain_monitor.chain_monitor.channel_monitor_updated(funding_txo, mon_updates[0]).unwrap();
+       for update in mon_updates {
+               nodes[0].chain_monitor.chain_monitor.channel_monitor_updated(funding_txo, update).unwrap();
+       }
        if payment_timeout {
                expect_payment_failed!(nodes[0], payment_hash, true);
        } else {
@@ -660,6 +763,8 @@ fn test_fulfill_restart_failure() {
 
        nodes[1].node.claim_funds(payment_preimage);
        check_added_monitors!(nodes[1], 1);
+       expect_payment_claimed!(nodes[1], payment_hash, 100_000);
+
        let htlc_fulfill_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
        nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &htlc_fulfill_updates.update_fulfill_htlcs[0]);
        expect_payment_sent_without_paths!(nodes[0], payment_preimage);
@@ -698,7 +803,7 @@ fn test_fulfill_restart_failure() {
        reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
 
        nodes[1].node.fail_htlc_backwards(&payment_hash);
-       expect_pending_htlcs_forwardable!(nodes[1]);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]);
        check_added_monitors!(nodes[1], 1);
        let htlc_fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
        nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &htlc_fail_updates.update_fail_htlcs[0]);
@@ -723,11 +828,13 @@ fn get_ldk_payment_preimage() {
 
        let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id())
                .with_features(InvoiceFeatures::known());
-       let scorer = test_utils::TestScorer::with_fixed_penalty(0);
+       let scorer = test_utils::TestScorer::with_penalty(0);
+       let keys_manager = test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
+       let random_seed_bytes = keys_manager.get_secure_random_bytes();
        let route = get_route(
-               &nodes[0].node.get_our_node_id(), &payment_params, &nodes[0].network_graph,
+               &nodes[0].node.get_our_node_id(), &payment_params, &nodes[0].network_graph.read_only(),
                Some(&nodes[0].node.list_usable_channels().iter().collect::<Vec<_>>()),
-               amt_msat, TEST_FINAL_CLTV, nodes[0].logger, &scorer).unwrap();
+               amt_msat, TEST_FINAL_CLTV, nodes[0].logger, &scorer, &random_seed_bytes).unwrap();
        let _payment_id = nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap();
        check_added_monitors!(nodes[0], 1);
 
@@ -738,3 +845,132 @@ fn get_ldk_payment_preimage() {
        pass_along_path(&nodes[0], &[&nodes[1]], amt_msat, payment_hash, Some(payment_secret), events.pop().unwrap(), true, Some(payment_preimage));
        claim_payment_along_route(&nodes[0], &[&[&nodes[1]]], false, payment_preimage);
 }
+
+#[test]
+fn sent_probe_is_probe_of_sending_node() {
+       let chanmon_cfgs = create_chanmon_cfgs(3);
+       let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None, None]);
+       let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+       create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
+       create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
+
+       // First check we refuse to build a single-hop probe
+       let (route, _, _, _) = get_route_and_payment_hash!(&nodes[0], nodes[1], 100_000);
+       assert!(nodes[0].node.send_probe(route.paths[0].clone()).is_err());
+
+       // Then build an actual two-hop probing path
+       let (route, _, _, _) = get_route_and_payment_hash!(&nodes[0], nodes[2], 100_000);
+
+       match nodes[0].node.send_probe(route.paths[0].clone()) {
+               Ok((payment_hash, payment_id)) => {
+                       assert!(nodes[0].node.payment_is_probe(&payment_hash, &payment_id));
+                       assert!(!nodes[1].node.payment_is_probe(&payment_hash, &payment_id));
+                       assert!(!nodes[2].node.payment_is_probe(&payment_hash, &payment_id));
+               },
+               _ => panic!(),
+       }
+
+       get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+       check_added_monitors!(nodes[0], 1);
+}
+
+#[test]
+fn successful_probe_yields_event() {
+       let chanmon_cfgs = create_chanmon_cfgs(3);
+       let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None, None]);
+       let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+       create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
+       create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
+
+       let (route, _, _, _) = get_route_and_payment_hash!(&nodes[0], nodes[2], 100_000);
+
+       let (payment_hash, payment_id) = nodes[0].node.send_probe(route.paths[0].clone()).unwrap();
+
+       // node[0] -- update_add_htlcs -> node[1]
+       check_added_monitors!(nodes[0], 1);
+       let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+       let probe_event = SendEvent::from_commitment_update(nodes[1].node.get_our_node_id(), updates);
+       nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &probe_event.msgs[0]);
+       check_added_monitors!(nodes[1], 0);
+       commitment_signed_dance!(nodes[1], nodes[0], probe_event.commitment_msg, false);
+       expect_pending_htlcs_forwardable!(nodes[1]);
+
+       // node[1] -- update_add_htlcs -> node[2]
+       check_added_monitors!(nodes[1], 1);
+       let updates = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
+       let probe_event = SendEvent::from_commitment_update(nodes[1].node.get_our_node_id(), updates);
+       nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &probe_event.msgs[0]);
+       check_added_monitors!(nodes[2], 0);
+       commitment_signed_dance!(nodes[2], nodes[1], probe_event.commitment_msg, true, true);
+
+       // node[1] <- update_fail_htlcs -- node[2]
+       let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
+       nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
+       check_added_monitors!(nodes[1], 0);
+       commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, true);
+
+       // node[0] <- update_fail_htlcs -- node[1]
+       let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+       nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
+       check_added_monitors!(nodes[0], 0);
+       commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false);
+
+       let mut events = nodes[0].node.get_and_clear_pending_events();
+       assert_eq!(events.len(), 1);
+       match events.drain(..).next().unwrap() {
+               crate::util::events::Event::ProbeSuccessful { payment_id: ev_pid, payment_hash: ev_ph, .. } => {
+                       assert_eq!(payment_id, ev_pid);
+                       assert_eq!(payment_hash, ev_ph);
+               },
+               _ => panic!(),
+       };
+}
+
+#[test]
+fn failed_probe_yields_event() {
+       let chanmon_cfgs = create_chanmon_cfgs(3);
+       let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None, None]);
+       let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+       create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
+       create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 100000, 90000000, InitFeatures::known(), InitFeatures::known());
+
+       let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id());
+
+       let (route, _, _, _) = get_route_and_payment_hash!(&nodes[0], nodes[2], &payment_params, 9_998_000, 42);
+
+       let (payment_hash, payment_id) = nodes[0].node.send_probe(route.paths[0].clone()).unwrap();
+
+       // node[0] -- update_add_htlcs -> node[1]
+       check_added_monitors!(nodes[0], 1);
+       let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+       let probe_event = SendEvent::from_commitment_update(nodes[1].node.get_our_node_id(), updates);
+       nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &probe_event.msgs[0]);
+       check_added_monitors!(nodes[1], 0);
+       commitment_signed_dance!(nodes[1], nodes[0], probe_event.commitment_msg, false);
+       expect_pending_htlcs_forwardable!(nodes[1]);
+
+       // node[0] <- update_fail_htlcs -- node[1]
+       check_added_monitors!(nodes[1], 1);
+       let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+       // Skip the PendingHTLCsForwardable event
+       let _events = nodes[1].node.get_and_clear_pending_events();
+       nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
+       check_added_monitors!(nodes[0], 0);
+       commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false);
+
+       let mut events = nodes[0].node.get_and_clear_pending_events();
+       assert_eq!(events.len(), 1);
+       match events.drain(..).next().unwrap() {
+               crate::util::events::Event::ProbeFailed { payment_id: ev_pid, payment_hash: ev_ph, .. } => {
+                       assert_eq!(payment_id, ev_pid);
+                       assert_eq!(payment_hash, ev_ph);
+               },
+               _ => panic!(),
+       };
+}