Merge pull request #1216 from valentinewallace/2021-12-preimage-getter
[rust-lightning] / lightning / src / ln / functional_tests.rs
index c593d7af8329fe7ac7e1e7bf9e0a60022abfc48a..4b38cb32c27658225f8b61e61da78002b269693f 100644 (file)
@@ -18,12 +18,12 @@ use chain::channelmonitor::{ChannelMonitor, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PER
 use chain::transaction::OutPoint;
 use chain::keysinterface::BaseSign;
 use ln::{PaymentPreimage, PaymentSecret, PaymentHash};
-use ln::channel::{COMMITMENT_TX_BASE_WEIGHT, COMMITMENT_TX_WEIGHT_PER_HTLC, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, MIN_AFFORDABLE_HTLC_COUNT};
-use ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, PaymentId, RAACommitmentOrder, PaymentSendFailure, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA};
+use ln::channel::{COMMITMENT_TX_BASE_WEIGHT, COMMITMENT_TX_WEIGHT_PER_HTLC, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT};
+use ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, PaymentId, RAACommitmentOrder, PaymentSendFailure, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, PAYMENT_EXPIRY_BLOCKS };
 use ln::channel::{Channel, ChannelError};
 use ln::{chan_utils, onion_utils};
 use ln::chan_utils::{HTLC_SUCCESS_TX_WEIGHT, HTLC_TIMEOUT_TX_WEIGHT, HTLCOutputInCommitment};
-use routing::network_graph::{NetworkUpdate, RoutingFees};
+use routing::network_graph::RoutingFees;
 use routing::router::{Payee, Route, RouteHop, RouteHint, RouteHintHop, RouteParameters, find_route, get_route};
 use ln::features::{ChannelFeatures, InitFeatures, InvoiceFeatures, NodeFeatures};
 use ln::msgs;
@@ -42,9 +42,6 @@ use bitcoin::blockdata::opcodes;
 use bitcoin::blockdata::constants::genesis_block;
 use bitcoin::network::constants::Network;
 
-use bitcoin::hashes::sha256::Hash as Sha256;
-use bitcoin::hashes::Hash;
-
 use bitcoin::secp256k1::Secp256k1;
 use bitcoin::secp256k1::key::{PublicKey,SecretKey};
 
@@ -108,8 +105,6 @@ fn test_insane_channel_opens() {
 
        insane_open_helper("Peer never wants payout outputs?", |mut msg| { msg.dust_limit_satoshis = msg.funding_satoshis + 1 ; msg });
 
-       insane_open_helper(r"Bogus; channel reserve \(\d+\) is less than dust limit \(\d+\)", |mut msg| { msg.dust_limit_satoshis = msg.channel_reserve_satoshis + 1; msg });
-
        insane_open_helper(r"Minimum htlc value \(\d+\) was larger than full channel value \(\d+\)", |mut msg| { msg.htlc_minimum_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000; msg });
 
        insane_open_helper("They wanted our payments to be delayed by a needlessly long period", |mut msg| { msg.to_self_delay = MAX_LOCAL_BREAKDOWN_TIMEOUT + 1; msg });
@@ -119,6 +114,67 @@ fn test_insane_channel_opens() {
        insane_open_helper("max_accepted_htlcs was 484. It must not be larger than 483", |mut msg| { msg.max_accepted_htlcs = 484; msg });
 }
 
+fn do_test_counterparty_no_reserve(send_from_initiator: bool) {
+       // A peer providing a channel_reserve_satoshis of 0 (or less than our dust limit) is insecure,
+       // but only for them. Because some LSPs do it with some level of trust of the clients (for a
+       // substantial UX improvement), we explicitly allow it. Because it's unlikely to happen often
+       // in normal testing, we test it explicitly here.
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+       let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+       // Have node0 initiate a channel to node1 with aforementioned parameters
+       let mut push_amt = 100_000_000;
+       let feerate_per_kw = 253;
+       push_amt -= feerate_per_kw as u64 * (COMMITMENT_TX_BASE_WEIGHT + 4 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000 * 1000;
+       push_amt -= Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(100_000) * 1000;
+
+       let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, if send_from_initiator { 0 } else { push_amt }, 42, None).unwrap();
+       let mut open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+       if !send_from_initiator {
+               open_channel_message.channel_reserve_satoshis = 0;
+               open_channel_message.max_htlc_value_in_flight_msat = 100_000_000;
+       }
+       nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &open_channel_message);
+
+       // Extract the channel accept message from node1 to node0
+       let mut accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
+       if send_from_initiator {
+               accept_channel_message.channel_reserve_satoshis = 0;
+               accept_channel_message.max_htlc_value_in_flight_msat = 100_000_000;
+       }
+       nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &accept_channel_message);
+       {
+               let mut lock;
+               let mut chan = get_channel_ref!(if send_from_initiator { &nodes[1] } else { &nodes[0] }, lock, temp_channel_id);
+               chan.holder_selected_channel_reserve_satoshis = 0;
+               chan.holder_max_htlc_value_in_flight_msat = 100_000_000;
+       }
+
+       let funding_tx = sign_funding_transaction(&nodes[0], &nodes[1], 100_000, temp_channel_id);
+       let funding_msgs = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &funding_tx);
+       create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &funding_msgs.0);
+
+       // nodes[0] should now be able to send the full balance to nodes[1], violating nodes[1]'s
+       // security model if it ever tries to send funds back to nodes[0] (but that's not our problem).
+       if send_from_initiator {
+               send_payment(&nodes[0], &[&nodes[1]], 100_000_000
+                       // Note that for outbound channels we have to consider the commitment tx fee and the
+                       // "fee spike buffer", which is currently a multiple of the total commitment tx fee as
+                       // well as an additional HTLC.
+                       - FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE * commit_tx_fee_msat(feerate_per_kw, 2));
+       } else {
+               send_payment(&nodes[1], &[&nodes[0]], push_amt);
+       }
+}
+
+#[test]
+fn test_counterparty_no_reserve() {
+       do_test_counterparty_no_reserve(true);
+       do_test_counterparty_no_reserve(false);
+}
+
 #[test]
 fn test_async_inbound_update_fee() {
        let chanmon_cfgs = create_chanmon_cfgs(2);
@@ -1560,6 +1616,41 @@ fn test_chan_reserve_dust_inbound_htlcs_outbound_chan() {
                assert_eq!(err, "Cannot send value that would put counterparty balance under holder-announced channel reserve value"));
 }
 
+#[test]
+fn test_chan_init_feerate_unaffordability() {
+       // Test that we will reject channel opens which do not leave enough to pay for any HTLCs due to
+       // channel reserve and feerate requirements.
+       let mut chanmon_cfgs = create_chanmon_cfgs(2);
+       let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+       let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+       // Set the push_msat amount such that nodes[0] will not be able to afford to add even a single
+       // HTLC.
+       let mut push_amt = 100_000_000;
+       push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64);
+       assert_eq!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, push_amt + 1, 42, None).unwrap_err(),
+               APIError::APIMisuseError { err: "Funding amount (356) can't even pay fee for initial commitment transaction fee of 357.".to_string() });
+
+       // During open, we don't have a "counterparty channel reserve" to check against, so that
+       // requirement only comes into play on the open_channel handling side.
+       push_amt -= Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(100_000) * 1000;
+       nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, push_amt, 42, None).unwrap();
+       let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+       open_channel_msg.push_msat += 1;
+       nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &open_channel_msg);
+
+       let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
+       assert_eq!(msg_events.len(), 1);
+       match msg_events[0] {
+               MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id: _ } => {
+                       assert_eq!(msg.data, "Insufficient funding amount for initial reserve");
+               },
+               _ => panic!("Unexpected event"),
+       }
+}
+
 #[test]
 fn test_chan_reserve_dust_inbound_htlcs_inbound_chan() {
        // Test that if we receive many dust HTLCs over an inbound channel, they don't count when
@@ -1973,7 +2064,7 @@ fn channel_reserve_in_flight_removes() {
        nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_removes.commitment_signed);
        check_added_monitors!(nodes[0], 1);
        let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
-       expect_payment_sent!(nodes[0], payment_preimage_1);
+       expect_payment_sent_without_paths!(nodes[0], payment_preimage_1);
 
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_1.msgs[0]);
        nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_1.commitment_msg);
@@ -2002,7 +2093,7 @@ fn channel_reserve_in_flight_removes() {
        nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_cs.commitment_signed);
        check_added_monitors!(nodes[0], 1);
        let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
-       expect_payment_sent!(nodes[0], payment_preimage_2);
+       expect_payment_sent_without_paths!(nodes[0], payment_preimage_2);
 
        nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
        check_added_monitors!(nodes[1], 1);
@@ -2015,6 +2106,7 @@ fn channel_reserve_in_flight_removes() {
        // resolve the second HTLC from A's point of view.
        nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
        check_added_monitors!(nodes[0], 1);
+       expect_payment_path_successful!(nodes[0]);
        let as_cs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
 
        // Now that B doesn't have the second RAA anymore, but A still does, send a payment from B back
@@ -2044,6 +2136,7 @@ fn channel_reserve_in_flight_removes() {
 
        nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
        check_added_monitors!(nodes[0], 1);
+       expect_payment_path_successful!(nodes[0]);
        let as_cs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
 
        nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_cs.commitment_signed);
@@ -2723,7 +2816,7 @@ fn test_htlc_on_chain_success() {
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
        let events = nodes[0].node.get_and_clear_pending_events();
-       assert_eq!(events.len(), 3);
+       assert_eq!(events.len(), 5);
        let mut first_claimed = false;
        for event in events {
                match event {
@@ -2736,6 +2829,7 @@ fn test_htlc_on_chain_success() {
                                        assert_eq!(payment_hash, payment_hash_2);
                                }
                        },
+                       Event::PaymentPathSuccessful { .. } => {},
                        Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {},
                        _ => panic!("Unexpected event"),
                }
@@ -3055,9 +3149,10 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use
        mine_transaction(&nodes[1], &revoked_local_txn[0]);
        check_added_monitors!(nodes[1], 1);
        connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
+       assert!(ANTI_REORG_DELAY > PAYMENT_EXPIRY_BLOCKS); // We assume payments will also expire
 
        let events = nodes[1].node.get_and_clear_pending_events();
-       assert_eq!(events.len(), if deliver_bs_raa { 2 } else { 3 });
+       assert_eq!(events.len(), if deliver_bs_raa { 2 } else { 4 });
        match events[0] {
                Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => { },
                _ => panic!("Unexepected event"),
@@ -3070,6 +3165,12 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use
        }
        if !deliver_bs_raa {
                match events[2] {
+                       Event::PaymentFailed { ref payment_hash, .. } => {
+                               assert_eq!(*payment_hash, fourth_payment_hash);
+                       },
+                       _ => panic!("Unexpected event"),
+               }
+               match events[3] {
                        Event::PendingHTLCsForwardable { .. } => { },
                        _ => panic!("Unexpected event"),
                };
@@ -3370,13 +3471,13 @@ fn test_dup_events_on_peer_disconnect() {
        check_added_monitors!(nodes[1], 1);
        let claim_msgs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
        nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &claim_msgs.update_fulfill_htlcs[0]);
-       expect_payment_sent!(nodes[0], payment_preimage);
+       expect_payment_sent_without_paths!(nodes[0], payment_preimage);
 
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
 
        reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (1, 0), (0, 0), (0, 0), (0, 0), (false, false));
-       assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
+       expect_payment_path_successful!(nodes[0]);
 }
 
 #[test]
@@ -3416,7 +3517,7 @@ fn test_simple_peer_disconnect() {
        reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (1, 0), (1, 0), (false, false));
        {
                let events = nodes[0].node.get_and_clear_pending_events();
-               assert_eq!(events.len(), 2);
+               assert_eq!(events.len(), 3);
                match events[0] {
                        Event::PaymentSent { payment_preimage, payment_hash, .. } => {
                                assert_eq!(payment_preimage, payment_preimage_3);
@@ -3431,6 +3532,10 @@ fn test_simple_peer_disconnect() {
                        },
                        _ => panic!("Unexpected event"),
                }
+               match events[2] {
+                       Event::PaymentPathSuccessful { .. } => {},
+                       _ => panic!("Unexpected event"),
+               }
        }
 
        claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_preimage_4);
@@ -3620,15 +3725,7 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken
        if messages_delivered < 2 {
                reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (1, 0), (0, 0), (0, 0), (0, 0), (false, false));
                if messages_delivered < 1 {
-                       let events_4 = nodes[0].node.get_and_clear_pending_events();
-                       assert_eq!(events_4.len(), 1);
-                       match events_4[0] {
-                               Event::PaymentSent { ref payment_preimage, ref payment_hash, .. } => {
-                                       assert_eq!(payment_preimage_1, *payment_preimage);
-                                       assert_eq!(payment_hash_1, *payment_hash);
-                               },
-                               _ => panic!("Unexpected event"),
-                       }
+                       expect_payment_sent!(nodes[0], payment_preimage_1);
                } else {
                        assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
                }
@@ -3646,10 +3743,18 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken
                reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
        }
 
+       if messages_delivered == 1 || messages_delivered == 2 {
+               expect_payment_path_successful!(nodes[0]);
+       }
+
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
        reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
 
+       if messages_delivered > 2 {
+               expect_payment_path_successful!(nodes[0]);
+       }
+
        // Channel should still work fine...
        let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
        let payment_preimage_2 = send_along_route(&nodes[0], route, &[&nodes[1]], 1000000).0;
@@ -3961,6 +4066,7 @@ fn test_drop_messages_peer_disconnect_dual_htlc() {
        assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
        check_added_monitors!(nodes[0], 1);
 
+       expect_payment_path_successful!(nodes[0]);
        claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
 }
 
@@ -4082,7 +4188,14 @@ fn do_test_holding_cell_htlc_add_timeouts(forwarded_htlc: bool) {
                }
                expect_payment_failed_with_update!(nodes[0], second_payment_hash, false, chan_2.0.contents.short_channel_id, false);
        } else {
-               expect_payment_failed!(nodes[1], second_payment_hash, true);
+               let events = nodes[1].node.get_and_clear_pending_events();
+               assert_eq!(events.len(), 2);
+               if let Event::PaymentPathFailed { ref payment_hash, .. } = events[0] {
+                       assert_eq!(*payment_hash, second_payment_hash);
+               } else { panic!("Unexpected event"); }
+               if let Event::PaymentFailed { ref payment_hash, .. } = events[1] {
+                       assert_eq!(*payment_hash, second_payment_hash);
+               } else { panic!("Unexpected event"); }
        }
 }
 
@@ -5628,24 +5741,16 @@ fn do_htlc_claim_local_commitment_only(use_dust: bool) {
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
 
-       let (our_payment_preimage, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], if use_dust { 50000 } else { 3000000 });
+       let (payment_preimage, _, _) = route_payment(&nodes[0], &[&nodes[1]], if use_dust { 50000 } else { 3000000 });
 
        // Claim the payment, but don't deliver A's commitment_signed, resulting in the HTLC only being
        // present in B's local commitment transaction, but none of A's commitment transactions.
-       assert!(nodes[1].node.claim_funds(our_payment_preimage));
+       assert!(nodes[1].node.claim_funds(payment_preimage));
        check_added_monitors!(nodes[1], 1);
 
        let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
        nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]);
-       let events = nodes[0].node.get_and_clear_pending_events();
-       assert_eq!(events.len(), 1);
-       match events[0] {
-               Event::PaymentSent { payment_preimage, payment_hash, .. } => {
-                       assert_eq!(payment_preimage, our_payment_preimage);
-                       assert_eq!(payment_hash, our_payment_hash);
-               },
-               _ => panic!("Unexpected event"),
-       }
+       expect_payment_sent_without_paths!(nodes[0], payment_preimage);
 
        nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_updates.commitment_signed);
        check_added_monitors!(nodes[0], 1);
@@ -5746,7 +5851,14 @@ fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no
                check_added_monitors!(nodes[0], 1);
                check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
        } else {
-               expect_payment_failed!(nodes[0], our_payment_hash, true);
+               let events = nodes[0].node.get_and_clear_pending_events();
+               assert_eq!(events.len(), 2);
+               if let Event::PaymentPathFailed { ref payment_hash, .. } = events[0] {
+                       assert_eq!(*payment_hash, our_payment_hash);
+               } else { panic!("Unexpected event"); }
+               if let Event::PaymentFailed { ref payment_hash, .. } = events[1] {
+                       assert_eq!(*payment_hash, our_payment_hash);
+               } else { panic!("Unexpected event"); }
        }
 }
 
@@ -6067,15 +6179,7 @@ fn test_free_and_fail_holding_cell_htlcs() {
        let update_msgs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
        nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_msgs.update_fulfill_htlcs[0]);
        commitment_signed_dance!(nodes[0], nodes[1], update_msgs.commitment_signed, false, true);
-       let events = nodes[0].node.get_and_clear_pending_events();
-       assert_eq!(events.len(), 1);
-       match events[0] {
-               Event::PaymentSent { ref payment_preimage, ref payment_hash, .. } => {
-                       assert_eq!(*payment_preimage, payment_preimage_1);
-                       assert_eq!(*payment_hash, payment_hash_1);
-               }
-               _ => panic!("Unexpected event"),
-       }
+       expect_payment_sent!(nodes[0], payment_preimage_1);
 }
 
 // Test that if we fail to forward an HTLC that is being freed from the holding cell that the
@@ -7062,7 +7166,7 @@ fn test_user_configurable_csv_delay() {
        nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None).unwrap();
        let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
        open_channel.to_self_delay = 200;
-       if let Err(error) = Channel::new_from_req(&&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &InitFeatures::known(), &open_channel, 0, &low_our_to_self_config, 0) {
+       if let Err(error) = Channel::new_from_req(&&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &InitFeatures::known(), &open_channel, 0, &low_our_to_self_config, 0, &nodes[0].logger) {
                match error {
                        ChannelError::Close(err) => { assert!(regex::Regex::new(r"Configured with an unreasonable our_to_self_delay \(\d+\) putting user funds at risks").unwrap().is_match(err.as_str()));  },
                        _ => panic!("Unexpected event"),
@@ -7091,7 +7195,7 @@ fn test_user_configurable_csv_delay() {
        nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None).unwrap();
        let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
        open_channel.to_self_delay = 200;
-       if let Err(error) = Channel::new_from_req(&&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &InitFeatures::known(), &open_channel, 0, &high_their_to_self_config, 0) {
+       if let Err(error) = Channel::new_from_req(&&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &InitFeatures::known(), &open_channel, 0, &high_their_to_self_config, 0, &nodes[0].logger) {
                match error {
                        ChannelError::Close(err) => { assert!(regex::Regex::new(r"They wanted our payments to be delayed by a needlessly long period\. Upper limit: \d+\. Actual: \d+").unwrap().is_match(err.as_str())); },
                        _ => panic!("Unexpected event"),
@@ -7286,9 +7390,9 @@ fn test_announce_disable_channels() {
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
-       let short_id_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
-       let short_id_2 = create_announced_chan_between_nodes(&nodes, 1, 0, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
-       let short_id_3 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
+       create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
+       create_announced_chan_between_nodes(&nodes, 1, 0, InitFeatures::known(), InitFeatures::known());
+       create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
 
        // Disconnect peers
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
@@ -7298,13 +7402,13 @@ fn test_announce_disable_channels() {
        nodes[0].node.timer_tick_occurred(); // DisabledStaged -> Disabled
        let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
        assert_eq!(msg_events.len(), 3);
-       let mut chans_disabled: HashSet<u64> = [short_id_1, short_id_2, short_id_3].iter().map(|a| *a).collect();
+       let mut chans_disabled = HashMap::new();
        for e in msg_events {
                match e {
                        MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
                                assert_eq!(msg.contents.flags & (1<<1), 1<<1); // The "channel disabled" bit should be set
                                // Check that each channel gets updated exactly once
-                               if !chans_disabled.remove(&msg.contents.short_channel_id) {
+                               if chans_disabled.insert(msg.contents.short_channel_id, msg.contents.timestamp).is_some() {
                                        panic!("Generated ChannelUpdate for wrong chan!");
                                }
                        },
@@ -7340,19 +7444,22 @@ fn test_announce_disable_channels() {
        nodes[0].node.timer_tick_occurred();
        let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
        assert_eq!(msg_events.len(), 3);
-       chans_disabled = [short_id_1, short_id_2, short_id_3].iter().map(|a| *a).collect();
        for e in msg_events {
                match e {
                        MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
                                assert_eq!(msg.contents.flags & (1<<1), 0); // The "channel disabled" bit should be off
-                               // Check that each channel gets updated exactly once
-                               if !chans_disabled.remove(&msg.contents.short_channel_id) {
-                                       panic!("Generated ChannelUpdate for wrong chan!");
+                               match chans_disabled.remove(&msg.contents.short_channel_id) {
+                                       // Each update should have a higher timestamp than the previous one, replacing
+                                       // the old one.
+                                       Some(prev_timestamp) => assert!(msg.contents.timestamp > prev_timestamp),
+                                       None => panic!("Generated ChannelUpdate for wrong chan!"),
                                }
                        },
                        _ => panic!("Unexpected event"),
                }
        }
+       // Check that each channel gets updated exactly once
+       assert!(chans_disabled.is_empty());
 }
 
 #[test]
@@ -8087,7 +8194,7 @@ fn test_preimage_storage() {
        create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
 
        {
-               let (payment_hash, payment_secret) = nodes[1].node.create_inbound_payment(Some(100_000), 7200);
+               let (payment_hash, payment_secret) = nodes[1].node.create_inbound_payment(Some(100_000), 7200).unwrap();
                let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000);
                nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap();
                check_added_monitors!(nodes[0], 1);
@@ -8115,8 +8222,10 @@ fn test_preimage_storage() {
 }
 
 #[test]
+#[allow(deprecated)]
 fn test_secret_timeout() {
-       // Simple test of payment secret storage time outs
+       // Simple test of payment secret storage time outs. After
+       // `create_inbound_payment(_for_hash)_legacy` is removed, this test will be removed as well.
        let chanmon_cfgs = create_chanmon_cfgs(2);
        let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
@@ -8124,11 +8233,11 @@ fn test_secret_timeout() {
 
        create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
 
-       let (payment_hash, payment_secret_1) = nodes[1].node.create_inbound_payment(Some(100_000), 2);
+       let (payment_hash, payment_secret_1) = nodes[1].node.create_inbound_payment_legacy(Some(100_000), 2).unwrap();
 
        // We should fail to register the same payment hash twice, at least until we've connected a
        // block with time 7200 + CHAN_CONFIRM_DEPTH + 1.
-       if let Err(APIError::APIMisuseError { err }) = nodes[1].node.create_inbound_payment_for_hash(payment_hash, Some(100_000), 2) {
+       if let Err(APIError::APIMisuseError { err }) = nodes[1].node.create_inbound_payment_for_hash_legacy(payment_hash, Some(100_000), 2) {
                assert_eq!(err, "Duplicate payment hash");
        } else { panic!(); }
        let mut block = {
@@ -8143,7 +8252,7 @@ fn test_secret_timeout() {
                }
        };
        connect_block(&nodes[1], &block);
-       if let Err(APIError::APIMisuseError { err }) = nodes[1].node.create_inbound_payment_for_hash(payment_hash, Some(100_000), 2) {
+       if let Err(APIError::APIMisuseError { err }) = nodes[1].node.create_inbound_payment_for_hash_legacy(payment_hash, Some(100_000), 2) {
                assert_eq!(err, "Duplicate payment hash");
        } else { panic!(); }
 
@@ -8152,7 +8261,7 @@ fn test_secret_timeout() {
        block.header.prev_blockhash = block.header.block_hash();
        block.header.time += 1;
        connect_block(&nodes[1], &block);
-       let our_payment_secret = nodes[1].node.create_inbound_payment_for_hash(payment_hash, Some(100_000), 2).unwrap();
+       let our_payment_secret = nodes[1].node.create_inbound_payment_for_hash_legacy(payment_hash, Some(100_000), 2).unwrap();
        assert_ne!(payment_secret_1, our_payment_secret);
 
        {
@@ -8191,7 +8300,7 @@ fn test_bad_secret_hash() {
 
        let random_payment_hash = PaymentHash([42; 32]);
        let random_payment_secret = PaymentSecret([43; 32]);
-       let (our_payment_hash, our_payment_secret) = nodes[1].node.create_inbound_payment(Some(100_000), 2);
+       let (our_payment_hash, our_payment_secret) = nodes[1].node.create_inbound_payment(Some(100_000), 2).unwrap();
        let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000);
 
        // All the below cases should end up being handled exactly identically, so we macro the