Merge pull request #2006 from TheBlueMatt/2023-02-no-recursive-read-locks
[rust-lightning] / lightning / src / ln / payment_tests.rs
index d9d22b6ca6b5bac82ac8c66b26b1e08124c4c878..c4cd0fc1b09d1ca691654c7fbc32e256bb39ed41 100644 (file)
 
 use crate::chain::{ChannelMonitorUpdateStatus, Confirm, Listen, Watch};
 use crate::chain::channelmonitor::{ANTI_REORG_DELAY, LATENCY_GRACE_PERIOD_BLOCKS};
+use crate::chain::keysinterface::EntropySource;
 use crate::chain::transaction::OutPoint;
-use crate::chain::keysinterface::KeysInterface;
 use crate::ln::channel::EXPIRE_PREV_CONFIG_TICKS;
-use crate::ln::channelmanager::{self, BREAKDOWN_TIMEOUT, ChannelManager, MPP_TIMEOUT_TICKS, MIN_CLTV_EXPIRY_DELTA, PaymentId, PaymentSendFailure, IDEMPOTENCY_TIMEOUT_TICKS};
+use crate::ln::channelmanager::{BREAKDOWN_TIMEOUT, ChannelManager, MPP_TIMEOUT_TICKS, MIN_CLTV_EXPIRY_DELTA, PaymentId, PaymentSendFailure, IDEMPOTENCY_TIMEOUT_TICKS, RecentPaymentDetails};
+use crate::ln::features::InvoiceFeatures;
 use crate::ln::msgs;
 use crate::ln::msgs::ChannelMessageHandler;
-use crate::routing::gossip::RoutingFees;
-use crate::routing::router::{get_route, PaymentParameters, RouteHint, RouteHintHop, RouteParameters};
-use crate::util::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider};
+use crate::ln::outbound_payment::Retry;
+use crate::routing::gossip::{EffectiveCapacity, RoutingFees};
+use crate::routing::router::{get_route, PaymentParameters, Route, RouteHint, RouteHintHop, RouteHop, RouteParameters};
+use crate::routing::scoring::ChannelUsage;
+use crate::util::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, PathFailure};
 use crate::util::test_utils;
 use crate::util::errors::APIError;
 use crate::util::ser::Writeable;
@@ -34,60 +37,11 @@ use crate::prelude::*;
 
 use crate::ln::functional_test_utils::*;
 use crate::routing::gossip::NodeId;
-
-#[test]
-fn retry_single_path_payment() {
-       let chanmon_cfgs = create_chanmon_cfgs(3);
-       let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
-       let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
-       let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
-
-       let _chan_0 = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features());
-       let chan_1 = create_announced_chan_between_nodes(&nodes, 2, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features());
-       // Rebalance to find a route
-       send_payment(&nodes[2], &vec!(&nodes[1])[..], 3_000_000);
-
-       let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 100_000);
-
-       // Rebalance so that the first hop fails.
-       send_payment(&nodes[1], &vec!(&nodes[2])[..], 2_000_000);
-
-       // Make sure the payment fails on the first hop.
-       let payment_id = PaymentId(payment_hash.0);
-       nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), payment_id).unwrap();
-       check_added_monitors!(nodes[0], 1);
-       let mut events = nodes[0].node.get_and_clear_pending_msg_events();
-       assert_eq!(events.len(), 1);
-       let mut payment_event = SendEvent::from_event(events.pop().unwrap());
-       nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
-       check_added_monitors!(nodes[1], 0);
-       commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
-       expect_pending_htlcs_forwardable!(nodes[1]);
-       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_1.2 }]);
-       let htlc_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
-       assert!(htlc_updates.update_add_htlcs.is_empty());
-       assert_eq!(htlc_updates.update_fail_htlcs.len(), 1);
-       assert!(htlc_updates.update_fulfill_htlcs.is_empty());
-       assert!(htlc_updates.update_fail_malformed_htlcs.is_empty());
-       check_added_monitors!(nodes[1], 1);
-       nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &htlc_updates.update_fail_htlcs[0]);
-       commitment_signed_dance!(nodes[0], nodes[1], htlc_updates.commitment_signed, false);
-       expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new().mpp_parts_remain());
-
-       // Rebalance the channel so the retry succeeds.
-       send_payment(&nodes[2], &vec!(&nodes[1])[..], 3_000_000);
-
-       // Mine two blocks (we expire retries after 3, so this will check that we don't expire early)
-       connect_blocks(&nodes[0], 2);
-
-       // Retry the payment and make sure it succeeds.
-       nodes[0].node.retry_payment(&route, payment_id).unwrap();
-       check_added_monitors!(nodes[0], 1);
-       let mut events = nodes[0].node.get_and_clear_pending_msg_events();
-       assert_eq!(events.len(), 1);
-       pass_along_path(&nodes[0], &[&nodes[1], &nodes[2]], 100_000, payment_hash, Some(payment_secret), events.pop().unwrap(), true, None);
-       claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], false, payment_preimage);
-}
+#[cfg(feature = "std")]
+use {
+       crate::util::time::tests::SinceEpoch,
+       std::time::{SystemTime, Instant, Duration}
+};
 
 #[test]
 fn mpp_failure() {
@@ -96,10 +50,10 @@ fn mpp_failure() {
        let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
        let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
 
-       let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()).0.contents.short_channel_id;
-       let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2, channelmanager::provided_init_features(), channelmanager::provided_init_features()).0.contents.short_channel_id;
-       let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3, channelmanager::provided_init_features(), channelmanager::provided_init_features()).0.contents.short_channel_id;
-       let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3, channelmanager::provided_init_features(), channelmanager::provided_init_features()).0.contents.short_channel_id;
+       let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
+       let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2).0.contents.short_channel_id;
+       let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3).0.contents.short_channel_id;
+       let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3).0.contents.short_channel_id;
 
        let (mut route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[3], 100000);
        let path = route.paths[0].clone();
@@ -121,14 +75,15 @@ fn mpp_retry() {
        let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
        let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
 
-       let (chan_1_update, _, _, _) = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features());
-       let (chan_2_update, _, _, _) = create_announced_chan_between_nodes(&nodes, 0, 2, channelmanager::provided_init_features(), channelmanager::provided_init_features());
-       let (chan_3_update, _, _, _) = create_announced_chan_between_nodes(&nodes, 1, 3, channelmanager::provided_init_features(), channelmanager::provided_init_features());
-       let (chan_4_update, _, chan_4_id, _) = create_announced_chan_between_nodes(&nodes, 3, 2, channelmanager::provided_init_features(), channelmanager::provided_init_features());
+       let (chan_1_update, _, _, _) = create_announced_chan_between_nodes(&nodes, 0, 1);
+       let (chan_2_update, _, _, _) = create_announced_chan_between_nodes(&nodes, 0, 2);
+       let (chan_3_update, _, _, _) = create_announced_chan_between_nodes(&nodes, 1, 3);
+       let (chan_4_update, _, chan_4_id, _) = create_announced_chan_between_nodes(&nodes, 3, 2);
        // Rebalance
        send_payment(&nodes[3], &vec!(&nodes[2])[..], 1_500_000);
 
-       let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[3], 1_000_000);
+       let amt_msat = 1_000_000;
+       let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[3], amt_msat);
        let path = route.paths[0].clone();
        route.paths.push(path);
        route.paths[0][0].pubkey = nodes[1].node.get_our_node_id();
@@ -140,17 +95,23 @@ fn mpp_retry() {
 
        // Initiate the MPP payment.
        let payment_id = PaymentId(payment_hash.0);
-       nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), payment_id).unwrap();
+       let mut route_params = RouteParameters {
+               payment_params: route.payment_params.clone().unwrap(),
+               final_value_msat: amt_msat,
+       };
+
+       nodes[0].router.expect_find_route(route_params.clone(), Ok(route.clone()));
+       nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), payment_id, route_params.clone(), Retry::Attempts(1)).unwrap();
        check_added_monitors!(nodes[0], 2); // one monitor per path
        let mut events = nodes[0].node.get_and_clear_pending_msg_events();
        assert_eq!(events.len(), 2);
 
        // Pass half of the payment along the success path.
-       let success_path_msgs = events.remove(0);
+       let success_path_msgs = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events);
        pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 2_000_000, payment_hash, Some(payment_secret), success_path_msgs, false, None);
 
        // Add the HTLC along the first hop.
-       let fail_path_msgs_1 = events.remove(0);
+       let fail_path_msgs_1 = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events);
        let (update_add, commitment_signed) = match fail_path_msgs_1 {
                MessageSendEvent::UpdateHTLCs { node_id: _, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
                        assert_eq!(update_add_htlcs.len(), 1);
@@ -176,25 +137,23 @@ fn mpp_retry() {
        check_added_monitors!(nodes[2], 1);
        nodes[0].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &htlc_updates.update_fail_htlcs[0]);
        commitment_signed_dance!(nodes[0], nodes[2], htlc_updates.commitment_signed, false);
-       expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new().mpp_parts_remain());
+       let mut events = nodes[0].node.get_and_clear_pending_events();
+       match events[1] {
+               Event::PendingHTLCsForwardable { .. } => {},
+               _ => panic!("Unexpected event")
+       }
+       events.remove(1);
+       expect_payment_failed_conditions_event(events, payment_hash, false, PaymentFailedConditions::new().mpp_parts_remain());
 
        // Rebalance the channel so the second half of the payment can succeed.
        send_payment(&nodes[3], &vec!(&nodes[2])[..], 1_500_000);
 
-       // Make sure it errors as expected given a too-large amount.
-       if let Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError { err })) = nodes[0].node.retry_payment(&route, payment_id) {
-               assert!(err.contains("over total_payment_amt_msat"));
-       } else { panic!("Unexpected error"); }
-
-       // Make sure it errors as expected given the wrong payment_id.
-       if let Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError { err })) = nodes[0].node.retry_payment(&route, PaymentId([0; 32])) {
-               assert!(err.contains("not found"));
-       } else { panic!("Unexpected error"); }
-
        // Retry the second half of the payment and make sure it succeeds.
-       let mut path = route.clone();
-       path.paths.remove(0);
-       nodes[0].node.retry_payment(&path, payment_id).unwrap();
+       route.paths.remove(0);
+       route_params.final_value_msat = 1_000_000;
+       route_params.payment_params.previously_failed_channels.push(chan_4_update.contents.short_channel_id);
+       nodes[0].router.expect_find_route(route_params, Ok(route));
+       nodes[0].node.process_pending_htlc_forwards();
        check_added_monitors!(nodes[0], 1);
        let mut events = nodes[0].node.get_and_clear_pending_msg_events();
        assert_eq!(events.len(), 1);
@@ -208,10 +167,10 @@ fn do_mpp_receive_timeout(send_partial_mpp: bool) {
        let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
        let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
 
-       let (chan_1_update, _, _, _) = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features());
-       let (chan_2_update, _, _, _) = create_announced_chan_between_nodes(&nodes, 0, 2, channelmanager::provided_init_features(), channelmanager::provided_init_features());
-       let (chan_3_update, _, chan_3_id, _) = create_announced_chan_between_nodes(&nodes, 1, 3, channelmanager::provided_init_features(), channelmanager::provided_init_features());
-       let (chan_4_update, _, _, _) = create_announced_chan_between_nodes(&nodes, 2, 3, channelmanager::provided_init_features(), channelmanager::provided_init_features());
+       let (chan_1_update, _, _, _) = create_announced_chan_between_nodes(&nodes, 0, 1);
+       let (chan_2_update, _, _, _) = create_announced_chan_between_nodes(&nodes, 0, 2);
+       let (chan_3_update, _, chan_3_id, _) = create_announced_chan_between_nodes(&nodes, 1, 3);
+       let (chan_4_update, _, _, _) = create_announced_chan_between_nodes(&nodes, 2, 3);
 
        let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[3], 100_000);
        let path = route.paths[0].clone();
@@ -230,7 +189,8 @@ fn do_mpp_receive_timeout(send_partial_mpp: bool) {
        assert_eq!(events.len(), 2);
 
        // Pass half of the payment along the first path.
-       pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 200_000, payment_hash, Some(payment_secret), events.remove(0), false, None);
+       let node_1_msgs = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events);
+       pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 200_000, payment_hash, Some(payment_secret), node_1_msgs, false, None);
 
        if send_partial_mpp {
                // Time out the partial MPP
@@ -257,7 +217,8 @@ fn do_mpp_receive_timeout(send_partial_mpp: bool) {
                expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new().mpp_parts_remain().expected_htlc_error_data(23, &[][..]));
        } else {
                // Pass half of the payment along the second path.
-               pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 200_000, payment_hash, Some(payment_secret), events.remove(0), true, None);
+               let node_2_msgs = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events);
+               pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 200_000, payment_hash, Some(payment_secret), node_2_msgs, true, None);
 
                // Even after MPP_TIMEOUT_TICKS we should not timeout the MPP if we have all the parts
                for _ in 0..MPP_TIMEOUT_TICKS {
@@ -274,55 +235,6 @@ fn mpp_receive_timeout() {
        do_mpp_receive_timeout(false);
 }
 
-#[test]
-fn retry_expired_payment() {
-       let chanmon_cfgs = create_chanmon_cfgs(3);
-       let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
-       let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
-       let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
-
-       let _chan_0 = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features());
-       let chan_1 = create_announced_chan_between_nodes(&nodes, 2, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features());
-       // Rebalance to find a route
-       send_payment(&nodes[2], &vec!(&nodes[1])[..], 3_000_000);
-
-       let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 100_000);
-
-       // Rebalance so that the first hop fails.
-       send_payment(&nodes[1], &vec!(&nodes[2])[..], 2_000_000);
-
-       // Make sure the payment fails on the first hop.
-       nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), PaymentId(payment_hash.0)).unwrap();
-       check_added_monitors!(nodes[0], 1);
-       let mut events = nodes[0].node.get_and_clear_pending_msg_events();
-       assert_eq!(events.len(), 1);
-       let mut payment_event = SendEvent::from_event(events.pop().unwrap());
-       nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
-       check_added_monitors!(nodes[1], 0);
-       commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
-       expect_pending_htlcs_forwardable!(nodes[1]);
-       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_1.2 }]);
-       let htlc_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
-       assert!(htlc_updates.update_add_htlcs.is_empty());
-       assert_eq!(htlc_updates.update_fail_htlcs.len(), 1);
-       assert!(htlc_updates.update_fulfill_htlcs.is_empty());
-       assert!(htlc_updates.update_fail_malformed_htlcs.is_empty());
-       check_added_monitors!(nodes[1], 1);
-       nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &htlc_updates.update_fail_htlcs[0]);
-       commitment_signed_dance!(nodes[0], nodes[1], htlc_updates.commitment_signed, false);
-       expect_payment_failed!(nodes[0], payment_hash, false);
-
-       // Mine blocks so the payment will have expired.
-       connect_blocks(&nodes[0], 3);
-
-       // Retry the payment and make sure it errors as expected.
-       if let Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError { err })) = nodes[0].node.retry_payment(&route, PaymentId(payment_hash.0)) {
-               assert!(err.contains("not found"));
-       } else {
-               panic!("Unexpected error");
-       }
-}
-
 #[test]
 fn no_pending_leak_on_initial_send_failure() {
        // In an earlier version of our payment tracking, we'd have a retry entry even when the initial
@@ -336,16 +248,16 @@ fn no_pending_leak_on_initial_send_failure() {
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
-       create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features());
+       create_announced_chan_between_nodes(&nodes, 0, 1);
 
        let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000);
 
-       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
-       nodes[1].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
        unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), PaymentId(payment_hash.0)),
                true, APIError::ChannelUnavailable { ref err },
-               assert_eq!(err, "Peer for first hop currently disconnected/pending monitor update!"));
+               assert_eq!(err, "Peer for first hop currently disconnected"));
 
        assert!(!nodes[0].node.has_pending_payments());
 }
@@ -367,20 +279,25 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) {
        let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
        let persister: test_utils::TestPersister;
        let new_chain_monitor: test_utils::TestChainMonitor;
-       let nodes_0_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
+       let nodes_0_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestRouter, &test_utils::TestLogger>;
        let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
 
-       let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2;
-       let (_, _, chan_id_2, _) = create_announced_chan_between_nodes(&nodes, 1, 2, channelmanager::provided_init_features(), channelmanager::provided_init_features());
+       let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
+       let (_, _, chan_id_2, _) = create_announced_chan_between_nodes(&nodes, 1, 2);
 
        // Serialize the ChannelManager prior to sending payments
        let nodes_0_serialized = nodes[0].node.encode();
 
        // Send two payments - one which will get to nodes[2] and will be claimed, one which we'll time
        // out and retry.
-       let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 1_000_000);
+       let amt_msat = 1_000_000;
+       let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat);
        let (payment_preimage_1, payment_hash_1, _, payment_id_1) = send_along_route(&nodes[0], route.clone(), &[&nodes[1], &nodes[2]], 1_000_000);
-       nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), PaymentId(payment_hash.0)).unwrap();
+       let route_params = RouteParameters {
+               payment_params: route.payment_params.clone().unwrap(),
+               final_value_msat: amt_msat,
+       };
+       nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap();
        check_added_monitors!(nodes[0], 1);
 
        let mut events = nodes[0].node.get_and_clear_pending_msg_events();
@@ -391,8 +308,8 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) {
        // We relay the payment to nodes[1] while its disconnected from nodes[2], causing the payment
        // to be returned immediately to nodes[0], without having nodes[2] fail the inbound payment
        // which would prevent retry.
-       nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id(), false);
-       nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+       nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id());
+       nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id());
 
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
        commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false, true);
@@ -421,13 +338,13 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) {
        assert_eq!(as_broadcasted_txn.len(), 1);
        assert_eq!(as_broadcasted_txn[0], as_commitment_tx);
 
-       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
-       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
+       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
        assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
 
        // Now nodes[1] should send a channel reestablish, which nodes[0] will respond to with an
        // error, as the channel has hit the chain.
-       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
        let bs_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
        nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish);
        let as_err = nodes[0].node.get_and_clear_pending_msg_events();
@@ -436,7 +353,7 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) {
                MessageSendEvent::HandleError { node_id, action: msgs::ErrorAction::SendErrorMessage { ref msg } } => {
                        assert_eq!(node_id, nodes[1].node.get_our_node_id());
                        nodes[1].node.handle_error(&nodes[0].node.get_our_node_id(), msg);
-                       check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: "Failed to find corresponding channel".to_string() });
+                       check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &nodes[1].node.get_our_node_id()) });
                        check_added_monitors!(nodes[1], 1);
                        assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1);
                },
@@ -464,7 +381,7 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) {
        // Create a new channel on which to retry the payment before we fail the payment via the
        // HTLC-Timeout transaction. This avoids ChannelManager timing out the payment due to us
        // connecting several blocks while creating the channel (implying time has passed).
-       create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features());
+       create_announced_chan_between_nodes(&nodes, 0, 1);
        assert_eq!(nodes[0].node.list_usable_channels().len(), 1);
 
        mine_transaction(&nodes[1], &as_commitment_tx);
@@ -489,7 +406,7 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) {
                confirm_transaction(&nodes[0], &first_htlc_timeout_tx);
        }
        nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
-       expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new().mpp_parts_remain());
+       expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new());
 
        // Finally, retry the payment (which was reloaded from the ChannelMonitor when nodes[0] was
        // reloaded) via a route over the new channel, which work without issue and eventually be
@@ -500,8 +417,10 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) {
        // and not the original fee. We also update node[1]'s relevant config as
        // do_claim_payment_along_route expects us to never overpay.
        {
-               let mut channel_state = nodes[1].node.channel_state.lock().unwrap();
-               let mut channel = channel_state.by_id.get_mut(&chan_id_2).unwrap();
+               let per_peer_state = nodes[1].node.per_peer_state.read().unwrap();
+               let mut peer_state = per_peer_state.get(&nodes[2].node.get_our_node_id())
+                       .unwrap().lock().unwrap();
+               let mut channel = peer_state.channel_by_id.get_mut(&chan_id_2).unwrap();
                let mut new_config = channel.config();
                new_config.forwarding_fee_base_msat += 100_000;
                channel.update_config(&new_config);
@@ -513,8 +432,8 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) {
                nodes[1].node.timer_tick_occurred();
        }
 
-       assert!(nodes[0].node.retry_payment(&new_route, payment_id_1).is_err()); // Shouldn't be allowed to retry a fulfilled payment
-       nodes[0].node.retry_payment(&new_route, PaymentId(payment_hash.0)).unwrap();
+       assert!(nodes[0].node.send_payment(&new_route, payment_hash, &Some(payment_secret), payment_id_1).is_err()); // Shouldn't be allowed to retry a fulfilled payment
+       nodes[0].node.send_payment(&new_route, payment_hash, &Some(payment_secret), PaymentId(payment_hash.0)).unwrap();
        check_added_monitors!(nodes[0], 1);
        let mut events = nodes[0].node.get_and_clear_pending_msg_events();
        assert_eq!(events.len(), 1);
@@ -545,13 +464,13 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) {
 
        let first_persister: test_utils::TestPersister;
        let first_new_chain_monitor: test_utils::TestChainMonitor;
-       let first_nodes_0_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
+       let first_nodes_0_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestRouter, &test_utils::TestLogger>;
        let second_persister: test_utils::TestPersister;
        let second_new_chain_monitor: test_utils::TestChainMonitor;
-       let second_nodes_0_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
+       let second_nodes_0_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestRouter, &test_utils::TestLogger>;
        let third_persister: test_utils::TestPersister;
        let third_new_chain_monitor: test_utils::TestChainMonitor;
-       let third_nodes_0_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
+       let third_nodes_0_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestRouter, &test_utils::TestLogger>;
 
        let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
 
@@ -562,7 +481,7 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) {
        // Ignore the announcement_signatures messages
        nodes[0].node.get_and_clear_pending_msg_events();
        nodes[1].node.get_and_clear_pending_msg_events();
-       let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2;
+       let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2).2;
 
        // Serialize the ChannelManager prior to sending payments
        let mut nodes_0_serialized = nodes[0].node.encode();
@@ -575,7 +494,7 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) {
        let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
 
        reload_node!(nodes[0], test_default_channel_config(), nodes_0_serialized, &[&chan_0_monitor_serialized], first_persister, first_new_chain_monitor, first_nodes_0_deserialized);
-       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
        // On reload, the ChannelManager should realize it is stale compared to the ChannelMonitor and
        // force-close the channel.
@@ -584,12 +503,12 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) {
        assert!(nodes[0].node.has_pending_payments());
        assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1);
 
-       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
        assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
 
        // Now nodes[1] should send a channel reestablish, which nodes[0] will respond to with an
        // error, as the channel has hit the chain.
-       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
        let bs_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
        nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish);
        let as_err = nodes[0].node.get_and_clear_pending_msg_events();
@@ -599,7 +518,7 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) {
                MessageSendEvent::HandleError { node_id, action: msgs::ErrorAction::SendErrorMessage { ref msg } } => {
                        assert_eq!(node_id, nodes[1].node.get_our_node_id());
                        nodes[1].node.handle_error(&nodes[0].node.get_our_node_id(), msg);
-                       check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: "Failed to find corresponding channel".to_string() });
+                       check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &nodes[1].node.get_our_node_id()) });
                        check_added_monitors!(nodes[1], 1);
                        bs_commitment_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
                },
@@ -648,7 +567,10 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) {
        // If we attempt to retry prior to the HTLC-Timeout (or commitment transaction, for dust HTLCs)
        // confirming, we will fail as it's considered still-pending...
        let (new_route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[2], if use_dust { 1_000 } else { 1_000_000 });
-       assert!(nodes[0].node.retry_payment(&new_route, payment_id).is_err());
+       match nodes[0].node.send_payment(&new_route, payment_hash, &Some(payment_secret), payment_id) {
+               Err(PaymentSendFailure::DuplicatePayment) => {},
+               _ => panic!("Unexpected error")
+       }
        assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
 
        // After ANTI_REORG_DELAY confirmations, the HTLC should be failed and we can try the payment
@@ -656,42 +578,49 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) {
        // (which should also still work).
        connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
        connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
-       // We set mpp_parts_remain to avoid having abandon_payment called
-       expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new().mpp_parts_remain());
+       expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new());
 
        let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
        let chan_1_monitor_serialized = get_monitor!(nodes[0], chan_id_3).encode();
        nodes_0_serialized = nodes[0].node.encode();
 
-       assert!(nodes[0].node.retry_payment(&new_route, payment_id).is_ok());
+       // After the payment failed, we're free to send it again.
+       assert!(nodes[0].node.send_payment(&new_route, payment_hash, &Some(payment_secret), payment_id).is_ok());
        assert!(!nodes[0].node.get_and_clear_pending_msg_events().is_empty());
 
        reload_node!(nodes[0], test_default_channel_config(), nodes_0_serialized, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], second_persister, second_new_chain_monitor, second_nodes_0_deserialized);
-       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
        reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
 
        // Now resend the payment, delivering the HTLC and actually claiming it this time. This ensures
        // the payment is not (spuriously) listed as still pending.
-       assert!(nodes[0].node.retry_payment(&new_route, payment_id).is_ok());
+       assert!(nodes[0].node.send_payment(&new_route, payment_hash, &Some(payment_secret), payment_id).is_ok());
        check_added_monitors!(nodes[0], 1);
        pass_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], if use_dust { 1_000 } else { 1_000_000 }, payment_hash, payment_secret);
        claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage);
 
-       assert!(nodes[0].node.retry_payment(&new_route, payment_id).is_err());
+       match nodes[0].node.send_payment(&new_route, payment_hash, &Some(payment_secret), payment_id) {
+               Err(PaymentSendFailure::DuplicatePayment) => {},
+               _ => panic!("Unexpected error")
+       }
        assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
 
        let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
        let chan_1_monitor_serialized = get_monitor!(nodes[0], chan_id_3).encode();
        nodes_0_serialized = nodes[0].node.encode();
 
-       // Ensure that after reload we cannot retry the payment.
+       // Check that after reload we can send the payment again (though we shouldn't, since it was
+       // claimed previously).
        reload_node!(nodes[0], test_default_channel_config(), nodes_0_serialized, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], third_persister, third_new_chain_monitor, third_nodes_0_deserialized);
-       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
        reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
 
-       assert!(nodes[0].node.retry_payment(&new_route, payment_id).is_err());
+       match nodes[0].node.send_payment(&new_route, payment_hash, &Some(payment_secret), payment_id) {
+               Err(PaymentSendFailure::DuplicatePayment) => {},
+               _ => panic!("Unexpected error")
+       }
        assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
 }
 
@@ -716,10 +645,10 @@ fn do_test_dup_htlc_onchain_fails_on_reload(persist_manager_post_event: bool, co
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let persister: test_utils::TestPersister;
        let new_chain_monitor: test_utils::TestChainMonitor;
-       let nodes_0_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
+       let nodes_0_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestRouter, &test_utils::TestLogger>;
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
-       let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features());
+       let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1);
 
        // Route a payment, but force-close the channel before the HTLC fulfill message arrives at
        // nodes[0].
@@ -729,8 +658,8 @@ fn do_test_dup_htlc_onchain_fails_on_reload(persist_manager_post_event: bool, co
        check_added_monitors!(nodes[0], 1);
        check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
 
-       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
-       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
        // Connect blocks until the CLTV timeout is up so that we get an HTLC-Timeout transaction
        connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
@@ -751,10 +680,8 @@ fn do_test_dup_htlc_onchain_fails_on_reload(persist_manager_post_event: bool, co
        check_added_monitors!(nodes[1], 1);
        check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        let claim_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
-       assert_eq!(claim_txn.len(), 3);
+       assert_eq!(claim_txn.len(), 1);
        check_spends!(claim_txn[0], node_txn[1]);
-       check_spends!(claim_txn[1], funding_tx);
-       check_spends!(claim_txn[2], claim_txn[1]);
 
        header.prev_blockhash = nodes[0].best_block_hash();
        connect_block(&nodes[0], &Block { header, txdata: vec![node_txn[1].clone()]});
@@ -861,10 +788,10 @@ fn test_fulfill_restart_failure() {
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let persister: test_utils::TestPersister;
        let new_chain_monitor: test_utils::TestChainMonitor;
-       let nodes_1_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
+       let nodes_1_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestRouter, &test_utils::TestLogger>;
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
-       let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2;
+       let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
        let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 100_000);
 
        // The simplest way to get a failure after a fulfill is to reload nodes[1] from a state
@@ -883,7 +810,7 @@ fn test_fulfill_restart_failure() {
        // Now reload nodes[1]...
        reload_node!(nodes[1], &chan_manager_serialized, &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_1_deserialized);
 
-       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
        reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
 
        nodes[1].node.fail_htlc_backwards(&payment_hash);
@@ -904,15 +831,15 @@ fn get_ldk_payment_preimage() {
        let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
-       create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features());
+       create_announced_chan_between_nodes(&nodes, 0, 1);
 
        let amt_msat = 60_000;
        let expiry_secs = 60 * 60;
-       let (payment_hash, payment_secret) = nodes[1].node.create_inbound_payment(Some(amt_msat), expiry_secs).unwrap();
+       let (payment_hash, payment_secret) = nodes[1].node.create_inbound_payment(Some(amt_msat), expiry_secs, None).unwrap();
 
-       let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id())
-               .with_features(channelmanager::provided_invoice_features());
-       let scorer = test_utils::TestScorer::with_penalty(0);
+       let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV)
+               .with_features(nodes[1].node.invoice_features());
+       let scorer = test_utils::TestScorer::new();
        let keys_manager = test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
        let random_seed_bytes = keys_manager.get_secure_random_bytes();
        let route = get_route(
@@ -937,8 +864,8 @@ fn sent_probe_is_probe_of_sending_node() {
        let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None, None]);
        let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
 
-       create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features());
-       create_announced_chan_between_nodes(&nodes, 1, 2, channelmanager::provided_init_features(), channelmanager::provided_init_features());
+       create_announced_chan_between_nodes(&nodes, 0, 1);
+       create_announced_chan_between_nodes(&nodes, 1, 2);
 
        // First check we refuse to build a single-hop probe
        let (route, _, _, _) = get_route_and_payment_hash!(&nodes[0], nodes[1], 100_000);
@@ -967,8 +894,8 @@ fn successful_probe_yields_event() {
        let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None, None]);
        let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
 
-       create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features());
-       create_announced_chan_between_nodes(&nodes, 1, 2, channelmanager::provided_init_features(), channelmanager::provided_init_features());
+       create_announced_chan_between_nodes(&nodes, 0, 1);
+       create_announced_chan_between_nodes(&nodes, 1, 2);
 
        let (route, _, _, _) = get_route_and_payment_hash!(&nodes[0], nodes[2], 100_000);
 
@@ -1012,6 +939,7 @@ fn successful_probe_yields_event() {
                },
                _ => panic!(),
        };
+       assert!(!nodes[0].node.has_pending_payments());
 }
 
 #[test]
@@ -1021,10 +949,10 @@ fn failed_probe_yields_event() {
        let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None, None]);
        let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
 
-       create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features());
-       create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 100000, 90000000, channelmanager::provided_init_features(), channelmanager::provided_init_features());
+       create_announced_chan_between_nodes(&nodes, 0, 1);
+       create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 100000, 90000000);
 
-       let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id());
+       let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), 42);
 
        let (route, _, _, _) = get_route_and_payment_hash!(&nodes[0], nodes[2], &payment_params, 9_998_000, 42);
 
@@ -1057,6 +985,7 @@ fn failed_probe_yields_event() {
                },
                _ => panic!(),
        };
+       assert!(!nodes[0].node.has_pending_payments());
 }
 
 #[test]
@@ -1068,10 +997,10 @@ fn onchain_failed_probe_yields_event() {
        let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
        let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
 
-       let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2;
-       create_announced_chan_between_nodes(&nodes, 1, 2, channelmanager::provided_init_features(), channelmanager::provided_init_features());
+       let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
+       create_announced_chan_between_nodes(&nodes, 1, 2);
 
-       let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id());
+       let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), 42);
 
        // Send a dust HTLC, which will be treated as if it timed out once the channel hits the chain.
        let (route, _, _, _) = get_route_and_payment_hash!(&nodes[0], nodes[2], &payment_params, 1_000, 42);
@@ -1111,6 +1040,7 @@ fn onchain_failed_probe_yields_event() {
                }
        }
        assert!(found_probe_failed);
+       assert!(!nodes[0].node.has_pending_payments());
 }
 
 #[test]
@@ -1121,7 +1051,7 @@ fn claimed_send_payment_idempotent() {
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
-       create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2;
+       create_announced_chan_between_nodes(&nodes, 0, 1).2;
 
        let (route, second_payment_hash, second_payment_preimage, second_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000);
        let (first_payment_preimage, _, _, payment_id) = send_along_route(&nodes[0], route.clone(), &[&nodes[1]], 100_000);
@@ -1193,7 +1123,7 @@ fn abandoned_send_payment_idempotent() {
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
-       create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2;
+       create_announced_chan_between_nodes(&nodes, 0, 1).2;
 
        let (route, second_payment_hash, second_payment_preimage, second_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000);
        let (_, first_payment_hash, _, payment_id) = send_along_route(&nodes[0], route.clone(), &[&nodes[1]], 100_000);
@@ -1223,20 +1153,17 @@ fn abandoned_send_payment_idempotent() {
        nodes[1].node.fail_htlc_backwards(&first_payment_hash);
        expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [HTLCDestination::FailedPayment { payment_hash: first_payment_hash }]);
 
-       pass_failed_payment_back_no_abandon(&nodes[0], &[&[&nodes[1]]], false, first_payment_hash);
-       check_send_rejected!();
-
-       // Until we abandon the payment, no matter how many timer ticks pass, we still cannot reuse the
+       // Until we abandon the payment upon path failure, no matter how many timer ticks pass, we still cannot reuse the
        // PaymentId.
        for _ in 0..=IDEMPOTENCY_TIMEOUT_TICKS {
                nodes[0].node.timer_tick_occurred();
        }
        check_send_rejected!();
 
-       nodes[0].node.abandon_payment(payment_id);
-       get_event!(nodes[0], Event::PaymentFailed);
+       pass_failed_payment_back(&nodes[0], &[&[&nodes[1]]], false, first_payment_hash);
 
-       // However, we can reuse the PaymentId immediately after we `abandon_payment`.
+       // However, we can reuse the PaymentId immediately after we `abandon_payment` upon passing the
+       // failed payment back.
        nodes[0].node.send_payment(&route, second_payment_hash, &Some(second_payment_secret), payment_id).unwrap();
        check_added_monitors!(nodes[0], 1);
        pass_along_route(&nodes[0], &[&[&nodes[1]]], 100_000, second_payment_hash, second_payment_secret);
@@ -1261,84 +1188,115 @@ fn test_trivial_inflight_htlc_tracking(){
        let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
        let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
 
-       let (_, _, chan_1_id, _) = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features());
-       let (_, _, chan_2_id, _) = create_announced_chan_between_nodes(&nodes, 1, 2, channelmanager::provided_init_features(), channelmanager::provided_init_features());
+       let (_, _, chan_1_id, _) = create_announced_chan_between_nodes(&nodes, 0, 1);
+       let (_, _, chan_2_id, _) = create_announced_chan_between_nodes(&nodes, 1, 2);
 
        // Send and claim the payment. Inflight HTLCs should be empty.
-       send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 500000);
+       let payment_hash = send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 500000).1;
+       let inflight_htlcs = node_chanmgrs[0].compute_inflight_htlcs();
        {
-               let inflight_htlcs = node_chanmgrs[0].compute_inflight_htlcs();
-
-               let node_0_channel_lock = nodes[0].node.channel_state.lock().unwrap();
-               let node_1_channel_lock = nodes[1].node.channel_state.lock().unwrap();
-               let channel_1 = node_0_channel_lock.by_id.get(&chan_1_id).unwrap();
-               let channel_2 = node_1_channel_lock.by_id.get(&chan_2_id).unwrap();
+               let mut node_0_per_peer_lock;
+               let mut node_0_peer_state_lock;
+               let channel_1 =  get_channel_ref!(&nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1_id);
 
                let chan_1_used_liquidity = inflight_htlcs.used_liquidity_msat(
                        &NodeId::from_pubkey(&nodes[0].node.get_our_node_id()) ,
                        &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()),
                        channel_1.get_short_channel_id().unwrap()
                );
+               assert_eq!(chan_1_used_liquidity, None);
+       }
+       {
+               let mut node_1_per_peer_lock;
+               let mut node_1_peer_state_lock;
+               let channel_2 =  get_channel_ref!(&nodes[1], nodes[2], node_1_per_peer_lock, node_1_peer_state_lock, chan_2_id);
+
                let chan_2_used_liquidity = inflight_htlcs.used_liquidity_msat(
                        &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()) ,
                        &NodeId::from_pubkey(&nodes[2].node.get_our_node_id()),
                        channel_2.get_short_channel_id().unwrap()
                );
 
-               assert_eq!(chan_1_used_liquidity, None);
                assert_eq!(chan_2_used_liquidity, None);
        }
+       let pending_payments = nodes[0].node.list_recent_payments();
+       assert_eq!(pending_payments.len(), 1);
+       assert_eq!(pending_payments[0], RecentPaymentDetails::Fulfilled { payment_hash: Some(payment_hash) });
+
+       // Remove fulfilled payment
+       for _ in 0..=IDEMPOTENCY_TIMEOUT_TICKS {
+               nodes[0].node.timer_tick_occurred();
+       }
 
        // Send the payment, but do not claim it. Our inflight HTLCs should contain the pending payment.
-       let (payment_preimage, _,  _) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 500000);
+       let (payment_preimage, payment_hash,  _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 500000);
+       let inflight_htlcs = node_chanmgrs[0].compute_inflight_htlcs();
        {
-               let inflight_htlcs = node_chanmgrs[0].compute_inflight_htlcs();
-
-               let node_0_channel_lock = nodes[0].node.channel_state.lock().unwrap();
-               let node_1_channel_lock = nodes[1].node.channel_state.lock().unwrap();
-               let channel_1 = node_0_channel_lock.by_id.get(&chan_1_id).unwrap();
-               let channel_2 = node_1_channel_lock.by_id.get(&chan_2_id).unwrap();
+               let mut node_0_per_peer_lock;
+               let mut node_0_peer_state_lock;
+               let channel_1 =  get_channel_ref!(&nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1_id);
 
                let chan_1_used_liquidity = inflight_htlcs.used_liquidity_msat(
                        &NodeId::from_pubkey(&nodes[0].node.get_our_node_id()) ,
                        &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()),
                        channel_1.get_short_channel_id().unwrap()
                );
+               // First hop accounts for expected 1000 msat fee
+               assert_eq!(chan_1_used_liquidity, Some(501000));
+       }
+       {
+               let mut node_1_per_peer_lock;
+               let mut node_1_peer_state_lock;
+               let channel_2 =  get_channel_ref!(&nodes[1], nodes[2], node_1_per_peer_lock, node_1_peer_state_lock, chan_2_id);
+
                let chan_2_used_liquidity = inflight_htlcs.used_liquidity_msat(
                        &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()) ,
                        &NodeId::from_pubkey(&nodes[2].node.get_our_node_id()),
                        channel_2.get_short_channel_id().unwrap()
                );
 
-               // First hop accounts for expected 1000 msat fee
-               assert_eq!(chan_1_used_liquidity, Some(501000));
                assert_eq!(chan_2_used_liquidity, Some(500000));
        }
+       let pending_payments = nodes[0].node.list_recent_payments();
+       assert_eq!(pending_payments.len(), 1);
+       assert_eq!(pending_payments[0], RecentPaymentDetails::Pending { payment_hash, total_msat: 500000 });
 
        // Now, let's claim the payment. This should result in the used liquidity to return `None`.
        claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage);
-       {
-               let inflight_htlcs = node_chanmgrs[0].compute_inflight_htlcs();
 
-               let node_0_channel_lock = nodes[0].node.channel_state.lock().unwrap();
-               let node_1_channel_lock = nodes[1].node.channel_state.lock().unwrap();
-               let channel_1 = node_0_channel_lock.by_id.get(&chan_1_id).unwrap();
-               let channel_2 = node_1_channel_lock.by_id.get(&chan_2_id).unwrap();
+       // Remove fulfilled payment
+       for _ in 0..=IDEMPOTENCY_TIMEOUT_TICKS {
+               nodes[0].node.timer_tick_occurred();
+       }
+
+       let inflight_htlcs = node_chanmgrs[0].compute_inflight_htlcs();
+       {
+               let mut node_0_per_peer_lock;
+               let mut node_0_peer_state_lock;
+               let channel_1 =  get_channel_ref!(&nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1_id);
 
                let chan_1_used_liquidity = inflight_htlcs.used_liquidity_msat(
                        &NodeId::from_pubkey(&nodes[0].node.get_our_node_id()) ,
                        &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()),
                        channel_1.get_short_channel_id().unwrap()
                );
+               assert_eq!(chan_1_used_liquidity, None);
+       }
+       {
+               let mut node_1_per_peer_lock;
+               let mut node_1_peer_state_lock;
+               let channel_2 =  get_channel_ref!(&nodes[1], nodes[2], node_1_per_peer_lock, node_1_peer_state_lock, chan_2_id);
+
                let chan_2_used_liquidity = inflight_htlcs.used_liquidity_msat(
                        &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()) ,
                        &NodeId::from_pubkey(&nodes[2].node.get_our_node_id()),
                        channel_2.get_short_channel_id().unwrap()
                );
-
-               assert_eq!(chan_1_used_liquidity, None);
                assert_eq!(chan_2_used_liquidity, None);
        }
+
+       let pending_payments = nodes[0].node.list_recent_payments();
+       assert_eq!(pending_payments.len(), 0);
 }
 
 #[test]
@@ -1347,7 +1305,7 @@ fn test_holding_cell_inflight_htlcs() {
        let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
-       let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2;
+       let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
 
        let (route, payment_hash_1, _, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
        let (_, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(nodes[1]);
@@ -1364,8 +1322,9 @@ fn test_holding_cell_inflight_htlcs() {
        let inflight_htlcs = node_chanmgrs[0].compute_inflight_htlcs();
 
        {
-               let channel_lock = nodes[0].node.channel_state.lock().unwrap();
-               let channel = channel_lock.by_id.get(&channel_id).unwrap();
+               let mut node_0_per_peer_lock;
+               let mut node_0_peer_state_lock;
+               let channel =  get_channel_ref!(&nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, channel_id);
 
                let used_liquidity = inflight_htlcs.used_liquidity_msat(
                        &NodeId::from_pubkey(&nodes[0].node.get_our_node_id()) ,
@@ -1402,14 +1361,14 @@ fn do_test_intercepted_payment(test: InterceptTest) {
        let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, Some(intercept_forwards_config), Some(zero_conf_chan_config)]);
 
        let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
-       let scorer = test_utils::TestScorer::with_penalty(0);
+       let scorer = test_utils::TestScorer::new();
        let random_seed_bytes = chanmon_cfgs[0].keys_manager.get_secure_random_bytes();
 
-       let _ = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2;
+       let _ = create_announced_chan_between_nodes(&nodes, 0, 1).2;
 
        let amt_msat = 100_000;
        let intercept_scid = nodes[1].node.get_intercept_scid();
-       let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id())
+       let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV)
                .with_route_hints(vec![
                        RouteHint(vec![RouteHintHop {
                                src_node_id: nodes[1].node.get_our_node_id(),
@@ -1423,19 +1382,19 @@ fn do_test_intercepted_payment(test: InterceptTest) {
                                htlc_maximum_msat: None,
                        }])
                ])
-               .with_features(channelmanager::provided_invoice_features());
+               .with_features(nodes[2].node.invoice_features());
        let route_params = RouteParameters {
                payment_params,
                final_value_msat: amt_msat,
-               final_cltv_expiry_delta: TEST_FINAL_CLTV,
        };
        let route = get_route(
                &nodes[0].node.get_our_node_id(), &route_params.payment_params,
                &nodes[0].network_graph.read_only(), None, route_params.final_value_msat,
-               route_params.final_cltv_expiry_delta, nodes[0].logger, &scorer, &random_seed_bytes
+               route_params.payment_params.final_cltv_expiry_delta, nodes[0].logger, &scorer,
+               &random_seed_bytes,
        ).unwrap();
 
-       let (payment_hash, payment_secret) = nodes[2].node.create_inbound_payment(Some(amt_msat), 60 * 60).unwrap();
+       let (payment_hash, payment_secret) = nodes[2].node.create_inbound_payment(Some(amt_msat), 60 * 60, None).unwrap();
        nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), PaymentId(payment_hash.0)).unwrap();
        let payment_event = {
                {
@@ -1467,7 +1426,7 @@ fn do_test_intercepted_payment(test: InterceptTest) {
 
        // Check for unknown channel id error.
        let unknown_chan_id_err = nodes[1].node.forward_intercepted_htlc(intercept_id, &[42; 32], nodes[2].node.get_our_node_id(), expected_outbound_amount_msat).unwrap_err();
-       assert_eq!(unknown_chan_id_err , APIError::ChannelUnavailable  { err: format!("Channel with id {} not found", log_bytes!([42; 32])) });
+       assert_eq!(unknown_chan_id_err , APIError::ChannelUnavailable  { err: format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!([42; 32]), nodes[2].node.get_our_node_id()) });
 
        if test == InterceptTest::Fail {
                // Ensure we can fail the intercepted payment back.
@@ -1516,7 +1475,7 @@ fn do_test_intercepted_payment(test: InterceptTest) {
                expect_pending_htlcs_forwardable!(nodes[2]);
 
                let payment_preimage = nodes[2].node.get_payment_preimage(payment_hash, payment_secret).unwrap();
-               expect_payment_received!(&nodes[2], payment_hash, payment_secret, amt_msat, Some(payment_preimage), nodes[2].node.get_our_node_id());
+               expect_payment_claimable!(&nodes[2], payment_hash, payment_secret, amt_msat, Some(payment_preimage), nodes[2].node.get_our_node_id());
                do_claim_payment_along_route(&nodes[0], &vec!(&vec!(&nodes[1], &nodes[2])[..]), false, payment_preimage);
                let events = nodes[0].node.get_and_clear_pending_events();
                assert_eq!(events.len(), 2);
@@ -1566,3 +1525,1226 @@ fn do_test_intercepted_payment(test: InterceptTest) {
                assert_eq!(unknown_intercept_id_err , APIError::APIMisuseError { err: format!("Payment with intercept id {} not found", log_bytes!(intercept_id.0)) });
        }
 }
+
+#[derive(PartialEq)]
+enum AutoRetry {
+       Success,
+       Spontaneous,
+       FailAttempts,
+       FailTimeout,
+       FailOnRestart,
+       FailOnRetry,
+}
+
+#[test]
+fn automatic_retries() {
+       do_automatic_retries(AutoRetry::Success);
+       do_automatic_retries(AutoRetry::Spontaneous);
+       do_automatic_retries(AutoRetry::FailAttempts);
+       do_automatic_retries(AutoRetry::FailTimeout);
+       do_automatic_retries(AutoRetry::FailOnRestart);
+       do_automatic_retries(AutoRetry::FailOnRetry);
+}
+fn do_automatic_retries(test: AutoRetry) {
+       // Test basic automatic payment retries in ChannelManager. See individual `test` variant comments
+       // below.
+       let chanmon_cfgs = create_chanmon_cfgs(3);
+       let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+
+       let persister;
+       let new_chain_monitor;
+       let node_0_deserialized;
+
+       let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+       let channel_id_1 = create_announced_chan_between_nodes(&nodes, 0, 1).2;
+       let channel_id_2 = create_announced_chan_between_nodes(&nodes, 2, 1).2;
+
+       // Marshall data to send the payment
+       #[cfg(feature = "std")]
+       let payment_expiry_secs = SystemTime::UNIX_EPOCH.elapsed().unwrap().as_secs() + 60 * 60;
+       #[cfg(not(feature = "std"))]
+       let payment_expiry_secs = 60 * 60;
+       let amt_msat = 1000;
+       let mut invoice_features = InvoiceFeatures::empty();
+       invoice_features.set_variable_length_onion_required();
+       invoice_features.set_payment_secret_required();
+       invoice_features.set_basic_mpp_optional();
+       let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV)
+               .with_expiry_time(payment_expiry_secs as u64)
+               .with_features(invoice_features);
+       let route_params = RouteParameters {
+               payment_params,
+               final_value_msat: amt_msat,
+       };
+       let (_, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat);
+
+       macro_rules! pass_failed_attempt_with_retry_along_path {
+               ($failing_channel_id: expr, $expect_pending_htlcs_forwardable: expr) => {
+                       // Send a payment attempt that fails due to lack of liquidity on the second hop
+                       check_added_monitors!(nodes[0], 1);
+                       let update_0 = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+                       let mut update_add = update_0.update_add_htlcs[0].clone();
+                       nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &update_add);
+                       commitment_signed_dance!(nodes[1], nodes[0], &update_0.commitment_signed, false, true);
+                       expect_pending_htlcs_forwardable_ignore!(nodes[1]);
+                       nodes[1].node.process_pending_htlc_forwards();
+                       expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1],
+                               vec![HTLCDestination::NextHopChannel {
+                                       node_id: Some(nodes[2].node.get_our_node_id()),
+                                       channel_id: $failing_channel_id,
+                               }]);
+                       nodes[1].node.process_pending_htlc_forwards();
+                       let update_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+                       check_added_monitors!(&nodes[1], 1);
+                       assert!(update_1.update_fail_htlcs.len() == 1);
+                       let fail_msg = update_1.update_fail_htlcs[0].clone();
+                       nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_msg);
+                       commitment_signed_dance!(nodes[0], nodes[1], update_1.commitment_signed, false);
+
+                       // Ensure the attempt fails and a new PendingHTLCsForwardable event is generated for the retry
+                       let mut events = nodes[0].node.get_and_clear_pending_events();
+                       assert_eq!(events.len(), 2);
+                       match events[0] {
+                               Event::PaymentPathFailed { payment_hash: ev_payment_hash, payment_failed_permanently, ..  } => {
+                                       assert_eq!(payment_hash, ev_payment_hash);
+                                       assert_eq!(payment_failed_permanently, false);
+                               },
+                               _ => panic!("Unexpected event"),
+                       }
+                       if $expect_pending_htlcs_forwardable {
+                               match events[1] {
+                                       Event::PendingHTLCsForwardable { .. } => {},
+                                       _ => panic!("Unexpected event"),
+                               }
+                       } else {
+                               match events[1] {
+                                       Event::PaymentFailed { payment_hash: ev_payment_hash, .. } => {
+                                               assert_eq!(payment_hash, ev_payment_hash);
+                                       },
+                                       _ => panic!("Unexpected event"),
+                               }
+                       }
+               }
+       }
+
+       if test == AutoRetry::Success {
+               // Test that we can succeed on the first retry.
+               nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap();
+               pass_failed_attempt_with_retry_along_path!(channel_id_2, true);
+
+               // Open a new channel with liquidity on the second hop so we can find a route for the retry
+               // attempt, since the initial second hop channel will be excluded from pathfinding
+               create_announced_chan_between_nodes(&nodes, 1, 2);
+
+               // We retry payments in `process_pending_htlc_forwards`
+               nodes[0].node.process_pending_htlc_forwards();
+               check_added_monitors!(nodes[0], 1);
+               let mut msg_events = nodes[0].node.get_and_clear_pending_msg_events();
+               assert_eq!(msg_events.len(), 1);
+               pass_along_path(&nodes[0], &[&nodes[1], &nodes[2]], amt_msat, payment_hash, Some(payment_secret), msg_events.pop().unwrap(), true, None);
+               claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], false, payment_preimage);
+       } else if test == AutoRetry::Spontaneous {
+               nodes[0].node.send_spontaneous_payment_with_retry(Some(payment_preimage), PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap();
+               pass_failed_attempt_with_retry_along_path!(channel_id_2, true);
+
+               // Open a new channel with liquidity on the second hop so we can find a route for the retry
+               // attempt, since the initial second hop channel will be excluded from pathfinding
+               create_announced_chan_between_nodes(&nodes, 1, 2);
+
+               // We retry payments in `process_pending_htlc_forwards`
+               nodes[0].node.process_pending_htlc_forwards();
+               check_added_monitors!(nodes[0], 1);
+               let mut msg_events = nodes[0].node.get_and_clear_pending_msg_events();
+               assert_eq!(msg_events.len(), 1);
+               pass_along_path(&nodes[0], &[&nodes[1], &nodes[2]], amt_msat, payment_hash, None, msg_events.pop().unwrap(), true, Some(payment_preimage));
+               claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], false, payment_preimage);
+       } else if test == AutoRetry::FailAttempts {
+               // Ensure ChannelManager will not retry a payment if it has run out of payment attempts.
+               nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap();
+               pass_failed_attempt_with_retry_along_path!(channel_id_2, true);
+
+               // Open a new channel with no liquidity on the second hop so we can find a (bad) route for
+               // the retry attempt, since the initial second hop channel will be excluded from pathfinding
+               let channel_id_3 = create_announced_chan_between_nodes(&nodes, 2, 1).2;
+
+               // We retry payments in `process_pending_htlc_forwards`
+               nodes[0].node.process_pending_htlc_forwards();
+               pass_failed_attempt_with_retry_along_path!(channel_id_3, false);
+
+               // Ensure we won't retry a second time.
+               nodes[0].node.process_pending_htlc_forwards();
+               let mut msg_events = nodes[0].node.get_and_clear_pending_msg_events();
+               assert_eq!(msg_events.len(), 0);
+       } else if test == AutoRetry::FailTimeout {
+               #[cfg(not(feature = "no-std"))] {
+                       // Ensure ChannelManager will not retry a payment if it times out due to Retry::Timeout.
+                       nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Timeout(Duration::from_secs(60))).unwrap();
+                       pass_failed_attempt_with_retry_along_path!(channel_id_2, true);
+
+                       // Advance the time so the second attempt fails due to timeout.
+                       SinceEpoch::advance(Duration::from_secs(61));
+
+                       // Make sure we don't retry again.
+                       nodes[0].node.process_pending_htlc_forwards();
+                       let mut msg_events = nodes[0].node.get_and_clear_pending_msg_events();
+                       assert_eq!(msg_events.len(), 0);
+
+                       let mut events = nodes[0].node.get_and_clear_pending_events();
+                       assert_eq!(events.len(), 1);
+                       match events[0] {
+                               Event::PaymentFailed { payment_hash: ref ev_payment_hash, payment_id: ref ev_payment_id } => {
+                                       assert_eq!(payment_hash, *ev_payment_hash);
+                                       assert_eq!(PaymentId(payment_hash.0), *ev_payment_id);
+                               },
+                               _ => panic!("Unexpected event"),
+                       }
+               }
+       } else if test == AutoRetry::FailOnRestart {
+               // Ensure ChannelManager will not retry a payment after restart, even if there were retry
+               // attempts remaining prior to restart.
+               nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Attempts(2)).unwrap();
+               pass_failed_attempt_with_retry_along_path!(channel_id_2, true);
+
+               // Open a new channel with no liquidity on the second hop so we can find a (bad) route for
+               // the retry attempt, since the initial second hop channel will be excluded from pathfinding
+               let channel_id_3 = create_announced_chan_between_nodes(&nodes, 2, 1).2;
+
+               // Ensure the first retry attempt fails, with 1 retry attempt remaining
+               nodes[0].node.process_pending_htlc_forwards();
+               pass_failed_attempt_with_retry_along_path!(channel_id_3, true);
+
+               // Restart the node and ensure that ChannelManager does not use its remaining retry attempt
+               let node_encoded = nodes[0].node.encode();
+               let chan_1_monitor_serialized = get_monitor!(nodes[0], channel_id_1).encode();
+               reload_node!(nodes[0], node_encoded, &[&chan_1_monitor_serialized], persister, new_chain_monitor, node_0_deserialized);
+
+               let mut events = nodes[0].node.get_and_clear_pending_events();
+               expect_pending_htlcs_forwardable_from_events!(nodes[0], events, true);
+               // Make sure we don't retry again.
+               let mut msg_events = nodes[0].node.get_and_clear_pending_msg_events();
+               assert_eq!(msg_events.len(), 0);
+
+               let mut events = nodes[0].node.get_and_clear_pending_events();
+               assert_eq!(events.len(), 1);
+               match events[0] {
+                       Event::PaymentFailed { payment_hash: ref ev_payment_hash, payment_id: ref ev_payment_id } => {
+                               assert_eq!(payment_hash, *ev_payment_hash);
+                               assert_eq!(PaymentId(payment_hash.0), *ev_payment_id);
+                       },
+                       _ => panic!("Unexpected event"),
+               }
+       } else if test == AutoRetry::FailOnRetry {
+               nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap();
+               pass_failed_attempt_with_retry_along_path!(channel_id_2, true);
+
+               // We retry payments in `process_pending_htlc_forwards`. Since our channel closed, we should
+               // fail to find a route.
+               nodes[0].node.process_pending_htlc_forwards();
+               let mut msg_events = nodes[0].node.get_and_clear_pending_msg_events();
+               assert_eq!(msg_events.len(), 0);
+
+               let mut events = nodes[0].node.get_and_clear_pending_events();
+               assert_eq!(events.len(), 1);
+               match events[0] {
+                       Event::PaymentFailed { payment_hash: ref ev_payment_hash, payment_id: ref ev_payment_id } => {
+                               assert_eq!(payment_hash, *ev_payment_hash);
+                               assert_eq!(PaymentId(payment_hash.0), *ev_payment_id);
+                       },
+                       _ => panic!("Unexpected event"),
+               }
+       }
+}
+
+#[test]
+fn auto_retry_partial_failure() {
+       // Test that we'll retry appropriately on send partial failure and retry partial failure.
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+       let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+       let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
+       let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
+       let chan_3_id = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
+
+       // Marshall data to send the payment
+       let amt_msat = 20_000;
+       let (_, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[1], amt_msat);
+       #[cfg(feature = "std")]
+       let payment_expiry_secs = SystemTime::UNIX_EPOCH.elapsed().unwrap().as_secs() + 60 * 60;
+       #[cfg(not(feature = "std"))]
+       let payment_expiry_secs = 60 * 60;
+       let mut invoice_features = InvoiceFeatures::empty();
+       invoice_features.set_variable_length_onion_required();
+       invoice_features.set_payment_secret_required();
+       invoice_features.set_basic_mpp_optional();
+       let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV)
+               .with_expiry_time(payment_expiry_secs as u64)
+               .with_features(invoice_features);
+       let route_params = RouteParameters {
+               payment_params,
+               final_value_msat: amt_msat,
+       };
+
+       // Ensure the first monitor update (for the initial send path1 over chan_1) succeeds, but the
+       // second (for the initial send path2 over chan_2) fails.
+       chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
+       chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::PermanentFailure);
+       // Ensure third monitor update (for the retry1's path1 over chan_1) succeeds, but the fourth (for
+       // the retry1's path2 over chan_3) fails, and monitor updates succeed after that.
+       chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
+       chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::PermanentFailure);
+       chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
+
+       // Configure the initial send, retry1 and retry2's paths.
+       let send_route = Route {
+               paths: vec![
+                       vec![RouteHop {
+                               pubkey: nodes[1].node.get_our_node_id(),
+                               node_features: nodes[1].node.node_features(),
+                               short_channel_id: chan_1_id,
+                               channel_features: nodes[1].node.channel_features(),
+                               fee_msat: amt_msat / 2,
+                               cltv_expiry_delta: 100,
+                       }],
+                       vec![RouteHop {
+                               pubkey: nodes[1].node.get_our_node_id(),
+                               node_features: nodes[1].node.node_features(),
+                               short_channel_id: chan_2_id,
+                               channel_features: nodes[1].node.channel_features(),
+                               fee_msat: amt_msat / 2,
+                               cltv_expiry_delta: 100,
+                       }],
+               ],
+               payment_params: Some(route_params.payment_params.clone()),
+       };
+       let retry_1_route = Route {
+               paths: vec![
+                       vec![RouteHop {
+                               pubkey: nodes[1].node.get_our_node_id(),
+                               node_features: nodes[1].node.node_features(),
+                               short_channel_id: chan_1_id,
+                               channel_features: nodes[1].node.channel_features(),
+                               fee_msat: amt_msat / 4,
+                               cltv_expiry_delta: 100,
+                       }],
+                       vec![RouteHop {
+                               pubkey: nodes[1].node.get_our_node_id(),
+                               node_features: nodes[1].node.node_features(),
+                               short_channel_id: chan_3_id,
+                               channel_features: nodes[1].node.channel_features(),
+                               fee_msat: amt_msat / 4,
+                               cltv_expiry_delta: 100,
+                       }],
+               ],
+               payment_params: Some(route_params.payment_params.clone()),
+       };
+       let retry_2_route = Route {
+               paths: vec![
+                       vec![RouteHop {
+                               pubkey: nodes[1].node.get_our_node_id(),
+                               node_features: nodes[1].node.node_features(),
+                               short_channel_id: chan_1_id,
+                               channel_features: nodes[1].node.channel_features(),
+                               fee_msat: amt_msat / 4,
+                               cltv_expiry_delta: 100,
+                       }],
+               ],
+               payment_params: Some(route_params.payment_params.clone()),
+       };
+       nodes[0].router.expect_find_route(route_params.clone(), Ok(send_route));
+       let mut payment_params = route_params.payment_params.clone();
+       payment_params.previously_failed_channels.push(chan_2_id);
+       nodes[0].router.expect_find_route(RouteParameters {
+                       payment_params, final_value_msat: amt_msat / 2,
+               }, Ok(retry_1_route));
+       let mut payment_params = route_params.payment_params.clone();
+       payment_params.previously_failed_channels.push(chan_3_id);
+       nodes[0].router.expect_find_route(RouteParameters {
+                       payment_params, final_value_msat: amt_msat / 4,
+               }, Ok(retry_2_route));
+
+       // Send a payment that will partially fail on send, then partially fail on retry, then succeed.
+       nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Attempts(3)).unwrap();
+       let closed_chan_events = nodes[0].node.get_and_clear_pending_events();
+       assert_eq!(closed_chan_events.len(), 4);
+       match closed_chan_events[0] {
+               Event::ChannelClosed { .. } => {},
+               _ => panic!("Unexpected event"),
+       }
+       match closed_chan_events[1] {
+               Event::PaymentPathFailed { .. } => {},
+               _ => panic!("Unexpected event"),
+       }
+       match closed_chan_events[2] {
+               Event::ChannelClosed { .. } => {},
+               _ => panic!("Unexpected event"),
+       }
+       match closed_chan_events[3] {
+               Event::PaymentPathFailed { .. } => {},
+               _ => panic!("Unexpected event"),
+       }
+
+       // Pass the first part of the payment along the path.
+       check_added_monitors!(nodes[0], 5); // three outbound channel updates succeeded, two permanently failed
+       let mut msg_events = nodes[0].node.get_and_clear_pending_msg_events();
+
+       // First message is the first update_add, remaining messages are broadcasting channel updates and
+       // errors for the permfailed channels
+       assert_eq!(msg_events.len(), 5);
+       let mut payment_event = SendEvent::from_event(msg_events.remove(0));
+
+       nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
+       nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
+       check_added_monitors!(nodes[1], 1);
+       let (bs_first_raa, bs_first_cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+
+       nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_first_raa);
+       check_added_monitors!(nodes[0], 1);
+       let as_second_htlc_updates = SendEvent::from_node(&nodes[0]);
+
+       nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_first_cs);
+       check_added_monitors!(nodes[0], 1);
+       let as_first_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
+
+       nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_first_raa);
+       check_added_monitors!(nodes[1], 1);
+
+       nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &as_second_htlc_updates.msgs[0]);
+       nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &as_second_htlc_updates.msgs[1]);
+       nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_second_htlc_updates.commitment_msg);
+       check_added_monitors!(nodes[1], 1);
+       let (bs_second_raa, bs_second_cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+
+       nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_raa);
+       check_added_monitors!(nodes[0], 1);
+
+       nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_cs);
+       check_added_monitors!(nodes[0], 1);
+       let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
+
+       nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_raa);
+       check_added_monitors!(nodes[1], 1);
+
+       expect_pending_htlcs_forwardable_ignore!(nodes[1]);
+       nodes[1].node.process_pending_htlc_forwards();
+       expect_payment_claimable!(nodes[1], payment_hash, payment_secret, amt_msat);
+       nodes[1].node.claim_funds(payment_preimage);
+       expect_payment_claimed!(nodes[1], payment_hash, amt_msat);
+       let bs_claim_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+       assert_eq!(bs_claim_update.update_fulfill_htlcs.len(), 1);
+
+       nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_claim_update.update_fulfill_htlcs[0]);
+       nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_claim_update.commitment_signed);
+       check_added_monitors!(nodes[0], 1);
+       let (as_third_raa, as_third_cs) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+
+       nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_third_raa);
+       check_added_monitors!(nodes[1], 4);
+       let bs_second_claim_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+
+       nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_third_cs);
+       check_added_monitors!(nodes[1], 1);
+       let bs_third_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
+
+       nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_third_raa);
+       check_added_monitors!(nodes[0], 1);
+
+       nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_second_claim_update.update_fulfill_htlcs[0]);
+       nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_second_claim_update.update_fulfill_htlcs[1]);
+       nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_claim_update.commitment_signed);
+       check_added_monitors!(nodes[0], 1);
+       let (as_fourth_raa, as_fourth_cs) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+
+       nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_fourth_raa);
+       check_added_monitors!(nodes[1], 1);
+
+       nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_fourth_cs);
+       check_added_monitors!(nodes[1], 1);
+       let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
+
+       nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_raa);
+       check_added_monitors!(nodes[0], 1);
+       expect_payment_sent!(nodes[0], payment_preimage);
+}
+
+#[test]
+fn auto_retry_zero_attempts_send_error() {
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+       let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+       create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
+       create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
+
+       // Marshall data to send the payment
+       let amt_msat = 20_000;
+       let (_, payment_hash, _, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[1], amt_msat);
+       #[cfg(feature = "std")]
+       let payment_expiry_secs = SystemTime::UNIX_EPOCH.elapsed().unwrap().as_secs() + 60 * 60;
+       #[cfg(not(feature = "std"))]
+       let payment_expiry_secs = 60 * 60;
+       let mut invoice_features = InvoiceFeatures::empty();
+       invoice_features.set_variable_length_onion_required();
+       invoice_features.set_payment_secret_required();
+       invoice_features.set_basic_mpp_optional();
+       let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV)
+               .with_expiry_time(payment_expiry_secs as u64)
+               .with_features(invoice_features);
+       let route_params = RouteParameters {
+               payment_params,
+               final_value_msat: amt_msat,
+       };
+
+       chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::PermanentFailure);
+       nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Attempts(0)).unwrap();
+       assert_eq!(nodes[0].node.get_and_clear_pending_msg_events().len(), 2); // channel close messages
+       let events = nodes[0].node.get_and_clear_pending_events();
+       assert_eq!(events.len(), 3);
+       if let Event::ChannelClosed { .. } = events[0] { } else { panic!(); }
+       if let Event::PaymentPathFailed { .. } = events[1] { } else { panic!(); }
+       if let Event::PaymentFailed { .. } = events[2] { } else { panic!(); }
+       check_added_monitors!(nodes[0], 2);
+}
+
+#[test]
+fn fails_paying_after_rejected_by_payee() {
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+       let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+       create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
+
+       // Marshall data to send the payment
+       let amt_msat = 20_000;
+       let (_, payment_hash, _, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[1], amt_msat);
+       #[cfg(feature = "std")]
+       let payment_expiry_secs = SystemTime::UNIX_EPOCH.elapsed().unwrap().as_secs() + 60 * 60;
+       #[cfg(not(feature = "std"))]
+       let payment_expiry_secs = 60 * 60;
+       let mut invoice_features = InvoiceFeatures::empty();
+       invoice_features.set_variable_length_onion_required();
+       invoice_features.set_payment_secret_required();
+       invoice_features.set_basic_mpp_optional();
+       let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV)
+               .with_expiry_time(payment_expiry_secs as u64)
+               .with_features(invoice_features);
+       let route_params = RouteParameters {
+               payment_params,
+               final_value_msat: amt_msat,
+       };
+
+       nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap();
+       check_added_monitors!(nodes[0], 1);
+       let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+       assert_eq!(events.len(), 1);
+       let mut payment_event = SendEvent::from_event(events.pop().unwrap());
+       nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
+       check_added_monitors!(nodes[1], 0);
+       commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
+       expect_pending_htlcs_forwardable!(nodes[1]);
+       expect_payment_claimable!(&nodes[1], payment_hash, payment_secret, amt_msat);
+
+       nodes[1].node.fail_htlc_backwards(&payment_hash);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [HTLCDestination::FailedPayment { payment_hash }]);
+       pass_failed_payment_back(&nodes[0], &[&[&nodes[1]]], false, payment_hash);
+}
+
+#[test]
+fn retry_multi_path_single_failed_payment() {
+       // Tests that we can/will retry after a single path of an MPP payment failed immediately
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None, None]);
+       let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+       create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0);
+       create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0);
+
+       let amt_msat = 100_010_000;
+
+       let (_, payment_hash, _, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[1], amt_msat);
+       #[cfg(feature = "std")]
+       let payment_expiry_secs = SystemTime::UNIX_EPOCH.elapsed().unwrap().as_secs() + 60 * 60;
+       #[cfg(not(feature = "std"))]
+       let payment_expiry_secs = 60 * 60;
+       let mut invoice_features = InvoiceFeatures::empty();
+       invoice_features.set_variable_length_onion_required();
+       invoice_features.set_payment_secret_required();
+       invoice_features.set_basic_mpp_optional();
+       let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV)
+               .with_expiry_time(payment_expiry_secs as u64)
+               .with_features(invoice_features);
+       let route_params = RouteParameters {
+               payment_params: payment_params.clone(),
+               final_value_msat: amt_msat,
+       };
+
+       let chans = nodes[0].node.list_usable_channels();
+       let mut route = Route {
+               paths: vec![
+                       vec![RouteHop {
+                               pubkey: nodes[1].node.get_our_node_id(),
+                               node_features: nodes[1].node.node_features(),
+                               short_channel_id: chans[0].short_channel_id.unwrap(),
+                               channel_features: nodes[1].node.channel_features(),
+                               fee_msat: 10_000,
+                               cltv_expiry_delta: 100,
+                       }],
+                       vec![RouteHop {
+                               pubkey: nodes[1].node.get_our_node_id(),
+                               node_features: nodes[1].node.node_features(),
+                               short_channel_id: chans[1].short_channel_id.unwrap(),
+                               channel_features: nodes[1].node.channel_features(),
+                               fee_msat: 100_000_001, // Our default max-HTLC-value is 10% of the channel value, which this is one more than
+                               cltv_expiry_delta: 100,
+                       }],
+               ],
+               payment_params: Some(payment_params),
+       };
+       nodes[0].router.expect_find_route(route_params.clone(), Ok(route.clone()));
+       // On retry, split the payment across both channels.
+       route.paths[0][0].fee_msat = 50_000_001;
+       route.paths[1][0].fee_msat = 50_000_000;
+       let mut pay_params = route.payment_params.clone().unwrap();
+       pay_params.previously_failed_channels.push(chans[1].short_channel_id.unwrap());
+       nodes[0].router.expect_find_route(RouteParameters {
+                       payment_params: pay_params,
+                       // Note that the second request here requests the amount we originally failed to send,
+                       // not the amount remaining on the full payment, which should be changed.
+                       final_value_msat: 100_000_001,
+               }, Ok(route.clone()));
+
+       {
+               let scorer = chanmon_cfgs[0].scorer.lock().unwrap();
+               // The initial send attempt, 2 paths
+               scorer.expect_usage(chans[0].short_channel_id.unwrap(), ChannelUsage { amount_msat: 10_000, inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Unknown });
+               scorer.expect_usage(chans[1].short_channel_id.unwrap(), ChannelUsage { amount_msat: 100_000_001, inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Unknown });
+               // The retry, 2 paths. Ensure that the in-flight HTLC amount is factored in.
+               scorer.expect_usage(chans[0].short_channel_id.unwrap(), ChannelUsage { amount_msat: 50_000_001, inflight_htlc_msat: 10_000, effective_capacity: EffectiveCapacity::Unknown });
+               scorer.expect_usage(chans[1].short_channel_id.unwrap(), ChannelUsage { amount_msat: 50_000_000, inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Unknown });
+       }
+
+       nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap();
+       let events = nodes[0].node.get_and_clear_pending_events();
+       assert_eq!(events.len(), 1);
+       match events[0] {
+               Event::PaymentPathFailed { payment_hash: ev_payment_hash, payment_failed_permanently: false,
+                       failure: PathFailure::InitialSend { err: APIError::ChannelUnavailable { err: ref err_msg }},
+                       short_channel_id: Some(expected_scid), .. } =>
+               {
+                       assert_eq!(payment_hash, ev_payment_hash);
+                       assert_eq!(expected_scid, route.paths[1][0].short_channel_id);
+                       assert!(err_msg.contains("max HTLC"));
+               },
+               _ => panic!("Unexpected event"),
+       }
+       let htlc_msgs = nodes[0].node.get_and_clear_pending_msg_events();
+       assert_eq!(htlc_msgs.len(), 2);
+       check_added_monitors!(nodes[0], 2);
+}
+
+#[test]
+fn immediate_retry_on_failure() {
+       // Tests that we can/will retry immediately after a failure
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None, None]);
+       let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+       create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0);
+       create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0);
+
+       let amt_msat = 100_000_001;
+       let (_, payment_hash, _, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[1], amt_msat);
+       #[cfg(feature = "std")]
+       let payment_expiry_secs = SystemTime::UNIX_EPOCH.elapsed().unwrap().as_secs() + 60 * 60;
+       #[cfg(not(feature = "std"))]
+       let payment_expiry_secs = 60 * 60;
+       let mut invoice_features = InvoiceFeatures::empty();
+       invoice_features.set_variable_length_onion_required();
+       invoice_features.set_payment_secret_required();
+       invoice_features.set_basic_mpp_optional();
+       let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV)
+               .with_expiry_time(payment_expiry_secs as u64)
+               .with_features(invoice_features);
+       let route_params = RouteParameters {
+               payment_params,
+               final_value_msat: amt_msat,
+       };
+
+       let chans = nodes[0].node.list_usable_channels();
+       let mut route = Route {
+               paths: vec![
+                       vec![RouteHop {
+                               pubkey: nodes[1].node.get_our_node_id(),
+                               node_features: nodes[1].node.node_features(),
+                               short_channel_id: chans[0].short_channel_id.unwrap(),
+                               channel_features: nodes[1].node.channel_features(),
+                               fee_msat: 100_000_001, // Our default max-HTLC-value is 10% of the channel value, which this is one more than
+                               cltv_expiry_delta: 100,
+                       }],
+               ],
+               payment_params: Some(PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV)),
+       };
+       nodes[0].router.expect_find_route(route_params.clone(), Ok(route.clone()));
+       // On retry, split the payment across both channels.
+       route.paths.push(route.paths[0].clone());
+       route.paths[0][0].short_channel_id = chans[1].short_channel_id.unwrap();
+       route.paths[0][0].fee_msat = 50_000_000;
+       route.paths[1][0].fee_msat = 50_000_001;
+       let mut pay_params = route_params.payment_params.clone();
+       pay_params.previously_failed_channels.push(chans[0].short_channel_id.unwrap());
+       nodes[0].router.expect_find_route(RouteParameters {
+                       payment_params: pay_params, final_value_msat: amt_msat,
+               }, Ok(route.clone()));
+
+       nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap();
+       let events = nodes[0].node.get_and_clear_pending_events();
+       assert_eq!(events.len(), 1);
+       match events[0] {
+               Event::PaymentPathFailed { payment_hash: ev_payment_hash, payment_failed_permanently: false,
+                       failure: PathFailure::InitialSend { err: APIError::ChannelUnavailable { err: ref err_msg }},
+                       short_channel_id: Some(expected_scid), .. } =>
+               {
+                       assert_eq!(payment_hash, ev_payment_hash);
+                       assert_eq!(expected_scid, route.paths[1][0].short_channel_id);
+                       assert!(err_msg.contains("max HTLC"));
+               },
+               _ => panic!("Unexpected event"),
+       }
+       let htlc_msgs = nodes[0].node.get_and_clear_pending_msg_events();
+       assert_eq!(htlc_msgs.len(), 2);
+       check_added_monitors!(nodes[0], 2);
+}
+
+#[test]
+fn no_extra_retries_on_back_to_back_fail() {
+       // In a previous release, we had a race where we may exceed the payment retry count if we
+       // get two failures in a row with the second indicating that all paths had failed (this field,
+       // `all_paths_failed`, has since been removed).
+       // Generally, when we give up trying to retry a payment, we don't know for sure what the
+       // current state of the ChannelManager event queue is. Specifically, we cannot be sure that
+       // there are not multiple additional `PaymentPathFailed` or even `PaymentSent` events
+       // pending which we will see later. Thus, when we previously removed the retry tracking map
+       // entry after a `all_paths_failed` `PaymentPathFailed` event, we may have dropped the
+       // retry entry even though more events for the same payment were still pending. This led to
+       // us retrying a payment again even though we'd already given up on it.
+       //
+       // We now have a separate event - `PaymentFailed` which indicates no HTLCs remain and which
+       // is used to remove the payment retry counter entries instead. This tests for the specific
+       // excess-retry case while also testing `PaymentFailed` generation.
+
+       let chanmon_cfgs = create_chanmon_cfgs(3);
+       let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+       let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+       let chan_1_scid = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 0).0.contents.short_channel_id;
+       let chan_2_scid = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 10_000_000, 0).0.contents.short_channel_id;
+
+       let amt_msat = 200_000_000;
+       let (_, payment_hash, _, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[1], amt_msat);
+       #[cfg(feature = "std")]
+       let payment_expiry_secs = SystemTime::UNIX_EPOCH.elapsed().unwrap().as_secs() + 60 * 60;
+       #[cfg(not(feature = "std"))]
+       let payment_expiry_secs = 60 * 60;
+       let mut invoice_features = InvoiceFeatures::empty();
+       invoice_features.set_variable_length_onion_required();
+       invoice_features.set_payment_secret_required();
+       invoice_features.set_basic_mpp_optional();
+       let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV)
+               .with_expiry_time(payment_expiry_secs as u64)
+               .with_features(invoice_features);
+       let route_params = RouteParameters {
+               payment_params,
+               final_value_msat: amt_msat,
+       };
+
+       let mut route = Route {
+               paths: vec![
+                       vec![RouteHop {
+                               pubkey: nodes[1].node.get_our_node_id(),
+                               node_features: nodes[1].node.node_features(),
+                               short_channel_id: chan_1_scid,
+                               channel_features: nodes[1].node.channel_features(),
+                               fee_msat: 0, // nodes[1] will fail the payment as we don't pay its fee
+                               cltv_expiry_delta: 100,
+                       }, RouteHop {
+                               pubkey: nodes[2].node.get_our_node_id(),
+                               node_features: nodes[2].node.node_features(),
+                               short_channel_id: chan_2_scid,
+                               channel_features: nodes[2].node.channel_features(),
+                               fee_msat: 100_000_000,
+                               cltv_expiry_delta: 100,
+                       }],
+                       vec![RouteHop {
+                               pubkey: nodes[1].node.get_our_node_id(),
+                               node_features: nodes[1].node.node_features(),
+                               short_channel_id: chan_1_scid,
+                               channel_features: nodes[1].node.channel_features(),
+                               fee_msat: 0, // nodes[1] will fail the payment as we don't pay its fee
+                               cltv_expiry_delta: 100,
+                       }, RouteHop {
+                               pubkey: nodes[2].node.get_our_node_id(),
+                               node_features: nodes[2].node.node_features(),
+                               short_channel_id: chan_2_scid,
+                               channel_features: nodes[2].node.channel_features(),
+                               fee_msat: 100_000_000,
+                               cltv_expiry_delta: 100,
+                       }]
+               ],
+               payment_params: Some(PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV)),
+       };
+       nodes[0].router.expect_find_route(route_params.clone(), Ok(route.clone()));
+       let mut second_payment_params = route_params.payment_params.clone();
+       second_payment_params.previously_failed_channels = vec![chan_2_scid, chan_2_scid];
+       // On retry, we'll only return one path
+       route.paths.remove(1);
+       route.paths[0][1].fee_msat = amt_msat;
+       nodes[0].router.expect_find_route(RouteParameters {
+                       payment_params: second_payment_params,
+                       final_value_msat: amt_msat,
+               }, Ok(route.clone()));
+
+       nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap();
+       let htlc_updates = SendEvent::from_node(&nodes[0]);
+       check_added_monitors!(nodes[0], 1);
+       assert_eq!(htlc_updates.msgs.len(), 1);
+
+       nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &htlc_updates.msgs[0]);
+       nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &htlc_updates.commitment_msg);
+       check_added_monitors!(nodes[1], 1);
+       let (bs_first_raa, bs_first_cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+
+       nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_first_raa);
+       check_added_monitors!(nodes[0], 1);
+       let second_htlc_updates = SendEvent::from_node(&nodes[0]);
+
+       nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_first_cs);
+       check_added_monitors!(nodes[0], 1);
+       let as_first_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
+
+       nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &second_htlc_updates.msgs[0]);
+       nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &second_htlc_updates.commitment_msg);
+       check_added_monitors!(nodes[1], 1);
+       let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
+
+       nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_first_raa);
+       check_added_monitors!(nodes[1], 1);
+       let bs_fail_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+
+       nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_raa);
+       check_added_monitors!(nodes[0], 1);
+
+       nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_fail_update.update_fail_htlcs[0]);
+       nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_fail_update.commitment_signed);
+       check_added_monitors!(nodes[0], 1);
+       let (as_second_raa, as_third_cs) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+
+       nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_raa);
+       check_added_monitors!(nodes[1], 1);
+       let bs_second_fail_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+
+       nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_third_cs);
+       check_added_monitors!(nodes[1], 1);
+       let bs_third_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
+
+       nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_second_fail_update.update_fail_htlcs[0]);
+       nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_fail_update.commitment_signed);
+       check_added_monitors!(nodes[0], 1);
+
+       nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_third_raa);
+       check_added_monitors!(nodes[0], 1);
+       let (as_third_raa, as_fourth_cs) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+
+       nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_third_raa);
+       check_added_monitors!(nodes[1], 1);
+       nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_fourth_cs);
+       check_added_monitors!(nodes[1], 1);
+       let bs_fourth_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
+
+       nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_fourth_raa);
+       check_added_monitors!(nodes[0], 1);
+
+       // At this point A has sent two HTLCs which both failed due to lack of fee. It now has two
+       // pending `PaymentPathFailed` events, one with `all_paths_failed` unset, and the second
+       // with it set.
+       //
+       // Previously, we retried payments in an event consumer, which would retry each
+       // `PaymentPathFailed` individually. In that setup, we had retried the payment in response to
+       // the first `PaymentPathFailed`, then seen the second `PaymentPathFailed` with
+       // `all_paths_failed` set and assumed the payment was completely failed. We ultimately fixed it
+       // by adding the `PaymentFailed` event.
+       //
+       // Because we now retry payments as a batch, we simply return a single-path route in the
+       // second, batched, request, have that fail, ensure the payment was abandoned.
+       let mut events = nodes[0].node.get_and_clear_pending_events();
+       assert_eq!(events.len(), 3);
+       match events[0] {
+               Event::PaymentPathFailed { payment_hash: ev_payment_hash, payment_failed_permanently, ..  } => {
+                       assert_eq!(payment_hash, ev_payment_hash);
+                       assert_eq!(payment_failed_permanently, false);
+               },
+               _ => panic!("Unexpected event"),
+       }
+       match events[1] {
+               Event::PendingHTLCsForwardable { .. } => {},
+               _ => panic!("Unexpected event"),
+       }
+       match events[2] {
+               Event::PaymentPathFailed { payment_hash: ev_payment_hash, payment_failed_permanently, ..  } => {
+                       assert_eq!(payment_hash, ev_payment_hash);
+                       assert_eq!(payment_failed_permanently, false);
+               },
+               _ => panic!("Unexpected event"),
+       }
+
+       nodes[0].node.process_pending_htlc_forwards();
+       let retry_htlc_updates = SendEvent::from_node(&nodes[0]);
+       check_added_monitors!(nodes[0], 1);
+
+       nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &retry_htlc_updates.msgs[0]);
+       commitment_signed_dance!(nodes[1], nodes[0], &retry_htlc_updates.commitment_msg, false, true);
+       let bs_fail_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+       nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_fail_update.update_fail_htlcs[0]);
+       commitment_signed_dance!(nodes[0], nodes[1], &bs_fail_update.commitment_signed, false, true);
+
+       let mut events = nodes[0].node.get_and_clear_pending_events();
+       assert_eq!(events.len(), 2);
+       match events[0] {
+               Event::PaymentPathFailed { payment_hash: ev_payment_hash, payment_failed_permanently, ..  } => {
+                       assert_eq!(payment_hash, ev_payment_hash);
+                       assert_eq!(payment_failed_permanently, false);
+               },
+               _ => panic!("Unexpected event"),
+       }
+       match events[1] {
+               Event::PaymentFailed { payment_hash: ref ev_payment_hash, payment_id: ref ev_payment_id } => {
+                       assert_eq!(payment_hash, *ev_payment_hash);
+                       assert_eq!(PaymentId(payment_hash.0), *ev_payment_id);
+               },
+               _ => panic!("Unexpected event"),
+       }
+}
+
+#[test]
+fn test_simple_partial_retry() {
+       // In the first version of the in-`ChannelManager` payment retries, retries were sent for the
+       // full amount of the payment, rather than only the missing amount. Here we simply test for
+       // this by sending a payment with two parts, failing one, and retrying the second. Note that
+       // `TestRouter` will check that the `RouteParameters` (which contain the amount) matches the
+       // request.
+       let chanmon_cfgs = create_chanmon_cfgs(3);
+       let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+       let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+       let chan_1_scid = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 0).0.contents.short_channel_id;
+       let chan_2_scid = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 10_000_000, 0).0.contents.short_channel_id;
+
+       let amt_msat = 200_000_000;
+       let (_, payment_hash, _, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[2], amt_msat);
+       #[cfg(feature = "std")]
+       let payment_expiry_secs = SystemTime::UNIX_EPOCH.elapsed().unwrap().as_secs() + 60 * 60;
+       #[cfg(not(feature = "std"))]
+       let payment_expiry_secs = 60 * 60;
+       let mut invoice_features = InvoiceFeatures::empty();
+       invoice_features.set_variable_length_onion_required();
+       invoice_features.set_payment_secret_required();
+       invoice_features.set_basic_mpp_optional();
+       let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV)
+               .with_expiry_time(payment_expiry_secs as u64)
+               .with_features(invoice_features);
+       let route_params = RouteParameters {
+               payment_params,
+               final_value_msat: amt_msat,
+       };
+
+       let mut route = Route {
+               paths: vec![
+                       vec![RouteHop {
+                               pubkey: nodes[1].node.get_our_node_id(),
+                               node_features: nodes[1].node.node_features(),
+                               short_channel_id: chan_1_scid,
+                               channel_features: nodes[1].node.channel_features(),
+                               fee_msat: 0, // nodes[1] will fail the payment as we don't pay its fee
+                               cltv_expiry_delta: 100,
+                       }, RouteHop {
+                               pubkey: nodes[2].node.get_our_node_id(),
+                               node_features: nodes[2].node.node_features(),
+                               short_channel_id: chan_2_scid,
+                               channel_features: nodes[2].node.channel_features(),
+                               fee_msat: 100_000_000,
+                               cltv_expiry_delta: 100,
+                       }],
+                       vec![RouteHop {
+                               pubkey: nodes[1].node.get_our_node_id(),
+                               node_features: nodes[1].node.node_features(),
+                               short_channel_id: chan_1_scid,
+                               channel_features: nodes[1].node.channel_features(),
+                               fee_msat: 100_000,
+                               cltv_expiry_delta: 100,
+                       }, RouteHop {
+                               pubkey: nodes[2].node.get_our_node_id(),
+                               node_features: nodes[2].node.node_features(),
+                               short_channel_id: chan_2_scid,
+                               channel_features: nodes[2].node.channel_features(),
+                               fee_msat: 100_000_000,
+                               cltv_expiry_delta: 100,
+                       }]
+               ],
+               payment_params: Some(PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV)),
+       };
+       nodes[0].router.expect_find_route(route_params.clone(), Ok(route.clone()));
+       let mut second_payment_params = route_params.payment_params.clone();
+       second_payment_params.previously_failed_channels = vec![chan_2_scid];
+       // On retry, we'll only be asked for one path (or 100k sats)
+       route.paths.remove(0);
+       nodes[0].router.expect_find_route(RouteParameters {
+                       payment_params: second_payment_params,
+                       final_value_msat: amt_msat / 2,
+               }, Ok(route.clone()));
+
+       nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap();
+       let htlc_updates = SendEvent::from_node(&nodes[0]);
+       check_added_monitors!(nodes[0], 1);
+       assert_eq!(htlc_updates.msgs.len(), 1);
+
+       nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &htlc_updates.msgs[0]);
+       nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &htlc_updates.commitment_msg);
+       check_added_monitors!(nodes[1], 1);
+       let (bs_first_raa, bs_first_cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+
+       nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_first_raa);
+       check_added_monitors!(nodes[0], 1);
+       let second_htlc_updates = SendEvent::from_node(&nodes[0]);
+
+       nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_first_cs);
+       check_added_monitors!(nodes[0], 1);
+       let as_first_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
+
+       nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &second_htlc_updates.msgs[0]);
+       nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &second_htlc_updates.commitment_msg);
+       check_added_monitors!(nodes[1], 1);
+       let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
+
+       nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_first_raa);
+       check_added_monitors!(nodes[1], 1);
+       let bs_fail_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+
+       nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_raa);
+       check_added_monitors!(nodes[0], 1);
+
+       nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_fail_update.update_fail_htlcs[0]);
+       nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_fail_update.commitment_signed);
+       check_added_monitors!(nodes[0], 1);
+       let (as_second_raa, as_third_cs) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+
+       nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_raa);
+       check_added_monitors!(nodes[1], 1);
+
+       nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_third_cs);
+       check_added_monitors!(nodes[1], 1);
+
+       let bs_third_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
+
+       nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_third_raa);
+       check_added_monitors!(nodes[0], 1);
+
+       let mut events = nodes[0].node.get_and_clear_pending_events();
+       assert_eq!(events.len(), 2);
+       match events[0] {
+               Event::PaymentPathFailed { payment_hash: ev_payment_hash, payment_failed_permanently, ..  } => {
+                       assert_eq!(payment_hash, ev_payment_hash);
+                       assert_eq!(payment_failed_permanently, false);
+               },
+               _ => panic!("Unexpected event"),
+       }
+       match events[1] {
+               Event::PendingHTLCsForwardable { .. } => {},
+               _ => panic!("Unexpected event"),
+       }
+
+       nodes[0].node.process_pending_htlc_forwards();
+       let retry_htlc_updates = SendEvent::from_node(&nodes[0]);
+       check_added_monitors!(nodes[0], 1);
+
+       nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &retry_htlc_updates.msgs[0]);
+       commitment_signed_dance!(nodes[1], nodes[0], &retry_htlc_updates.commitment_msg, false, true);
+
+       expect_pending_htlcs_forwardable!(nodes[1]);
+       check_added_monitors!(nodes[1], 1);
+
+       let bs_forward_update = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
+       nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_forward_update.update_add_htlcs[0]);
+       nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_forward_update.update_add_htlcs[1]);
+       commitment_signed_dance!(nodes[2], nodes[1], &bs_forward_update.commitment_signed, false);
+
+       expect_pending_htlcs_forwardable!(nodes[2]);
+       expect_payment_claimable!(nodes[2], payment_hash, payment_secret, amt_msat);
+}
+
+#[test]
+#[cfg(feature = "std")]
+fn test_threaded_payment_retries() {
+       // In the first version of the in-`ChannelManager` payment retries, retries weren't limited to
+       // a single thread and would happily let multiple threads run retries at the same time. Because
+       // retries are done by first calculating the amount we need to retry, then dropping the
+       // relevant lock, then actually sending, we would happily let multiple threads retry the same
+       // amount at the same time, overpaying our original HTLC!
+       let chanmon_cfgs = create_chanmon_cfgs(4);
+       let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
+       let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
+
+       // There is one mitigating guardrail when retrying payments - we can never over-pay by more
+       // than 10% of the original value. Thus, we want all our retries to be below that. In order to
+       // keep things simple, we route one HTLC for 0.1% of the payment over channel 1 and the rest
+       // out over channel 3+4. This will let us ignore 99% of the payment value and deal with only
+       // our channel.
+       let chan_1_scid = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 0).0.contents.short_channel_id;
+       create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 10_000_000, 0);
+       let chan_3_scid = create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 10_000_000, 0).0.contents.short_channel_id;
+       let chan_4_scid = create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 10_000_000, 0).0.contents.short_channel_id;
+
+       let amt_msat = 100_000_000;
+       let (_, payment_hash, _, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[2], amt_msat);
+       #[cfg(feature = "std")]
+       let payment_expiry_secs = SystemTime::UNIX_EPOCH.elapsed().unwrap().as_secs() + 60 * 60;
+       #[cfg(not(feature = "std"))]
+       let payment_expiry_secs = 60 * 60;
+       let mut invoice_features = InvoiceFeatures::empty();
+       invoice_features.set_variable_length_onion_required();
+       invoice_features.set_payment_secret_required();
+       invoice_features.set_basic_mpp_optional();
+       let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV)
+               .with_expiry_time(payment_expiry_secs as u64)
+               .with_features(invoice_features);
+       let mut route_params = RouteParameters {
+               payment_params,
+               final_value_msat: amt_msat,
+       };
+
+       let mut route = Route {
+               paths: vec![
+                       vec![RouteHop {
+                               pubkey: nodes[1].node.get_our_node_id(),
+                               node_features: nodes[1].node.node_features(),
+                               short_channel_id: chan_1_scid,
+                               channel_features: nodes[1].node.channel_features(),
+                               fee_msat: 0,
+                               cltv_expiry_delta: 100,
+                       }, RouteHop {
+                               pubkey: nodes[3].node.get_our_node_id(),
+                               node_features: nodes[2].node.node_features(),
+                               short_channel_id: 42, // Set a random SCID which nodes[1] will fail as unknown
+                               channel_features: nodes[2].node.channel_features(),
+                               fee_msat: amt_msat / 1000,
+                               cltv_expiry_delta: 100,
+                       }],
+                       vec![RouteHop {
+                               pubkey: nodes[2].node.get_our_node_id(),
+                               node_features: nodes[2].node.node_features(),
+                               short_channel_id: chan_3_scid,
+                               channel_features: nodes[2].node.channel_features(),
+                               fee_msat: 100_000,
+                               cltv_expiry_delta: 100,
+                       }, RouteHop {
+                               pubkey: nodes[3].node.get_our_node_id(),
+                               node_features: nodes[3].node.node_features(),
+                               short_channel_id: chan_4_scid,
+                               channel_features: nodes[3].node.channel_features(),
+                               fee_msat: amt_msat - amt_msat / 1000,
+                               cltv_expiry_delta: 100,
+                       }]
+               ],
+               payment_params: Some(PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV)),
+       };
+       nodes[0].router.expect_find_route(route_params.clone(), Ok(route.clone()));
+
+       nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params.clone(), Retry::Attempts(0xdeadbeef)).unwrap();
+       check_added_monitors!(nodes[0], 2);
+       let mut send_msg_events = nodes[0].node.get_and_clear_pending_msg_events();
+       assert_eq!(send_msg_events.len(), 2);
+       send_msg_events.retain(|msg|
+               if let MessageSendEvent::UpdateHTLCs { node_id, .. } = msg {
+                       // Drop the commitment update for nodes[2], we can just let that one sit pending
+                       // forever.
+                       *node_id == nodes[1].node.get_our_node_id()
+               } else { panic!(); }
+       );
+
+       // from here on out, the retry `RouteParameters` amount will be amt/1000
+       route_params.final_value_msat /= 1000;
+       route.paths.pop();
+
+       let end_time = Instant::now() + Duration::from_secs(1);
+       macro_rules! thread_body { () => { {
+               // We really want std::thread::scope, but its not stable until 1.63. Until then, we get unsafe.
+               let node_ref = NodePtr::from_node(&nodes[0]);
+               move || {
+                       let node_a = unsafe { &*node_ref.0 };
+                       while Instant::now() < end_time {
+                               node_a.node.get_and_clear_pending_events(); // wipe the PendingHTLCsForwardable
+                               // Ignore if we have any pending events, just always pretend we just got a
+                               // PendingHTLCsForwardable
+                               node_a.node.process_pending_htlc_forwards();
+                       }
+               }
+       } } }
+       let mut threads = Vec::new();
+       for _ in 0..16 { threads.push(std::thread::spawn(thread_body!())); }
+
+       // Back in the main thread, poll pending messages and make sure that we never have more than
+       // one HTLC pending at a time. Note that the commitment_signed_dance will fail horribly if
+       // there are HTLC messages shoved in while its running. This allows us to test that we never
+       // generate an additional update_add_htlc until we've fully failed the first.
+       let mut previously_failed_channels = Vec::new();
+       loop {
+               assert_eq!(send_msg_events.len(), 1);
+               let send_event = SendEvent::from_event(send_msg_events.pop().unwrap());
+               assert_eq!(send_event.msgs.len(), 1);
+
+               nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
+               commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true);
+
+               // Note that we only push one route into `expect_find_route` at a time, because that's all
+               // the retries (should) need. If the bug is reintroduced "real" routes may be selected, but
+               // we should still ultimately fail for the same reason - because we're trying to send too
+               // many HTLCs at once.
+               let mut new_route_params = route_params.clone();
+               previously_failed_channels.push(route.paths[0][1].short_channel_id);
+               new_route_params.payment_params.previously_failed_channels = previously_failed_channels.clone();
+               route.paths[0][1].short_channel_id += 1;
+               nodes[0].router.expect_find_route(new_route_params, Ok(route.clone()));
+
+               let bs_fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+               nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_fail_updates.update_fail_htlcs[0]);
+               // The "normal" commitment_signed_dance delivers the final RAA and then calls
+               // `check_added_monitors` to ensure only the one RAA-generated monitor update was created.
+               // This races with our other threads which may generate an add-HTLCs commitment update via
+               // `process_pending_htlc_forwards`. Instead, we defer the monitor update check until after
+               // *we've* called `process_pending_htlc_forwards` when its guaranteed to have two updates.
+               let last_raa = commitment_signed_dance!(nodes[0], nodes[1], bs_fail_updates.commitment_signed, false, true, false, true);
+               nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &last_raa);
+
+               let cur_time = Instant::now();
+               if cur_time > end_time {
+                       for thread in threads.drain(..) { thread.join().unwrap(); }
+               }
+
+               // Make sure we have some events to handle when we go around...
+               nodes[0].node.get_and_clear_pending_events(); // wipe the PendingHTLCsForwardable
+               nodes[0].node.process_pending_htlc_forwards();
+               send_msg_events = nodes[0].node.get_and_clear_pending_msg_events();
+               check_added_monitors!(nodes[0], 2);
+
+               if cur_time > end_time {
+                       break;
+               }
+       }
+}