Merge pull request #2441 from arik-so/2023-07-taproot-signer-wrapped
[rust-lightning] / lightning / src / ln / functional_tests.rs
index 81c42861a8d8df22b1881b81552f92d0e43a2591..f5ba7166ca71b80c0732c0f758001b82f9aa371c 100644 (file)
@@ -17,17 +17,17 @@ use crate::chain::chaininterface::LowerBoundedFeeEstimator;
 use crate::chain::channelmonitor;
 use crate::chain::channelmonitor::{CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY};
 use crate::chain::transaction::OutPoint;
-use crate::sign::{ChannelSigner, EcdsaChannelSigner, EntropySource};
+use crate::sign::{EcdsaChannelSigner, EntropySource};
 use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentPurpose, ClosureReason, HTLCDestination, PaymentFailureReason};
 use crate::ln::{PaymentPreimage, PaymentSecret, PaymentHash};
-use crate::ln::channel::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT};
+use crate::ln::channel::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT, get_holder_selected_channel_reserve_satoshis, OutboundV1Channel, InboundV1Channel};
 use crate::ln::channelmanager::{self, PaymentId, RAACommitmentOrder, PaymentSendFailure, RecipientOnionFields, BREAKDOWN_TIMEOUT, ENABLE_GOSSIP_TICKS, DISABLE_GOSSIP_TICKS, MIN_CLTV_EXPIRY_DELTA};
-use crate::ln::channel::{Channel, ChannelError};
+use crate::ln::channel::{DISCONNECT_PEER_AWAITING_RESPONSE_TICKS, ChannelError};
 use crate::ln::{chan_utils, onion_utils};
 use crate::ln::chan_utils::{OFFERED_HTLC_SCRIPT_WEIGHT, htlc_success_tx_weight, htlc_timeout_tx_weight, HTLCOutputInCommitment};
 use crate::routing::gossip::{NetworkGraph, NetworkUpdate};
-use crate::routing::router::{Path, PaymentParameters, Route, RouteHop, RouteParameters, find_route, get_route};
-use crate::ln::features::{ChannelFeatures, NodeFeatures};
+use crate::routing::router::{Path, PaymentParameters, Route, RouteHop, get_route};
+use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, NodeFeatures};
 use crate::ln::msgs;
 use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ErrorAction};
 use crate::util::enforcing_trait_impls::EnforcingSigner;
@@ -35,7 +35,7 @@ use crate::util::test_utils;
 use crate::util::errors::APIError;
 use crate::util::ser::{Writeable, ReadableArgs};
 use crate::util::string::UntrustedString;
-use crate::util::config::UserConfig;
+use crate::util::config::{UserConfig, MaxDustHTLCExposure};
 
 use bitcoin::hash_types::BlockHash;
 use bitcoin::blockdata::script::{Builder, Script};
@@ -61,6 +61,8 @@ use crate::sync::{Arc, Mutex};
 use crate::ln::functional_test_utils::*;
 use crate::ln::chan_utils::CommitmentTransaction;
 
+use super::channel::UNFUNDED_CHANNEL_AGE_LIMIT_TICKS;
+
 #[test]
 fn test_insane_channel_opens() {
        // Stand up a network of 2 nodes
@@ -75,7 +77,7 @@ fn test_insane_channel_opens() {
        // Instantiate channel parameters where we push the maximum msats given our
        // funding satoshis
        let channel_value_sat = 31337; // same as funding satoshis
-       let channel_reserve_satoshis = Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(channel_value_sat, &cfg);
+       let channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_sat, &cfg);
        let push_msat = (channel_value_sat - channel_reserve_satoshis) * 1000;
 
        // Have node0 initiate a channel to node1 with aforementioned parameters
@@ -155,9 +157,9 @@ fn do_test_counterparty_no_reserve(send_from_initiator: bool) {
        // Have node0 initiate a channel to node1 with aforementioned parameters
        let mut push_amt = 100_000_000;
        let feerate_per_kw = 253;
-       let opt_anchors = false;
-       push_amt -= feerate_per_kw as u64 * (commitment_tx_base_weight(opt_anchors) + 4 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000 * 1000;
-       push_amt -= Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
+       let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
+       push_amt -= feerate_per_kw as u64 * (commitment_tx_base_weight(&channel_type_features) + 4 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000 * 1000;
+       push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
 
        let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, if send_from_initiator { 0 } else { push_amt }, 42, None).unwrap();
        let mut open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
@@ -179,9 +181,15 @@ fn do_test_counterparty_no_reserve(send_from_initiator: bool) {
                let counterparty_node = if send_from_initiator { &nodes[0] } else { &nodes[1] };
                let mut sender_node_per_peer_lock;
                let mut sender_node_peer_state_lock;
-               let mut chan = get_channel_ref!(sender_node, counterparty_node, sender_node_per_peer_lock, sender_node_peer_state_lock, temp_channel_id);
-               chan.holder_selected_channel_reserve_satoshis = 0;
-               chan.holder_max_htlc_value_in_flight_msat = 100_000_000;
+               if send_from_initiator {
+                       let chan = get_inbound_v1_channel_ref!(sender_node, counterparty_node, sender_node_per_peer_lock, sender_node_peer_state_lock, temp_channel_id);
+                       chan.context.holder_selected_channel_reserve_satoshis = 0;
+                       chan.context.holder_max_htlc_value_in_flight_msat = 100_000_000;
+               } else {
+                       let chan = get_outbound_v1_channel_ref!(sender_node, counterparty_node, sender_node_per_peer_lock, sender_node_peer_state_lock, temp_channel_id);
+                       chan.context.holder_selected_channel_reserve_satoshis = 0;
+                       chan.context.holder_max_htlc_value_in_flight_msat = 100_000_000;
+               }
        }
 
        let funding_tx = sign_funding_transaction(&nodes[0], &nodes[1], 100_000, temp_channel_id);
@@ -195,7 +203,7 @@ fn do_test_counterparty_no_reserve(send_from_initiator: bool) {
                        // Note that for outbound channels we have to consider the commitment tx fee and the
                        // "fee spike buffer", which is currently a multiple of the total commitment tx fee as
                        // well as an additional HTLC.
-                       - FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE * commit_tx_fee_msat(feerate_per_kw, 2, opt_anchors));
+                       - FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE * commit_tx_fee_msat(feerate_per_kw, 2, &channel_type_features));
        } else {
                send_payment(&nodes[1], &[&nodes[0]], push_amt);
        }
@@ -643,16 +651,16 @@ fn test_update_fee_that_funder_cannot_afford() {
        let channel_id = chan.2;
        let secp_ctx = Secp256k1::new();
        let default_config = UserConfig::default();
-       let bs_channel_reserve_sats = Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(channel_value, &default_config);
+       let bs_channel_reserve_sats = get_holder_selected_channel_reserve_satoshis(channel_value, &default_config);
 
-       let opt_anchors = false;
+       let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
 
        // Calculate the maximum feerate that A can afford. Note that we don't send an update_fee
        // CONCURRENT_INBOUND_HTLC_FEE_BUFFER HTLCs before actually running out of local balance, so we
        // calculate two different feerates here - the expected local limit as well as the expected
        // remote limit.
-       let feerate = ((channel_value - bs_channel_reserve_sats - push_sats) * 1000 / (commitment_tx_base_weight(opt_anchors) + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC)) as u32;
-       let non_buffer_feerate = ((channel_value - bs_channel_reserve_sats - push_sats) * 1000 / commitment_tx_base_weight(opt_anchors)) as u32;
+       let feerate = ((channel_value - bs_channel_reserve_sats - push_sats) * 1000 / (commitment_tx_base_weight(&channel_type_features) + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC)) as u32;
+       let non_buffer_feerate = ((channel_value - bs_channel_reserve_sats - push_sats) * 1000 / commitment_tx_base_weight(&channel_type_features)) as u32;
        {
                let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
                *feerate_lock = feerate;
@@ -671,7 +679,7 @@ fn test_update_fee_that_funder_cannot_afford() {
 
                //We made sure neither party's funds are below the dust limit and there are no HTLCs here
                assert_eq!(commitment_tx.output.len(), 2);
-               let total_fee: u64 = commit_tx_fee_msat(feerate, 0, opt_anchors) / 1000;
+               let total_fee: u64 = commit_tx_fee_msat(feerate, 0, &channel_type_features) / 1000;
                let mut actual_fee = commitment_tx.output.iter().fold(0, |acc, output| acc + output.value);
                actual_fee = channel_value - actual_fee;
                assert_eq!(total_fee, actual_fee);
@@ -695,7 +703,7 @@ fn test_update_fee_that_funder_cannot_afford() {
                let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
                let local_chan = chan_lock.channel_by_id.get(&chan.2).unwrap();
                let chan_signer = local_chan.get_signer();
-               let pubkeys = chan_signer.pubkeys();
+               let pubkeys = chan_signer.as_ref().pubkeys();
                (pubkeys.revocation_basepoint, pubkeys.htlc_basepoint,
                 pubkeys.funding_pubkey)
        };
@@ -704,9 +712,9 @@ fn test_update_fee_that_funder_cannot_afford() {
                let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap();
                let remote_chan = chan_lock.channel_by_id.get(&chan.2).unwrap();
                let chan_signer = remote_chan.get_signer();
-               let pubkeys = chan_signer.pubkeys();
+               let pubkeys = chan_signer.as_ref().pubkeys();
                (pubkeys.delayed_payment_basepoint, pubkeys.htlc_basepoint,
-                chan_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &secp_ctx),
+                chan_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &secp_ctx),
                 pubkeys.funding_pubkey)
        };
 
@@ -723,14 +731,14 @@ fn test_update_fee_that_funder_cannot_afford() {
                let commitment_tx = CommitmentTransaction::new_with_auxiliary_htlc_data(
                        INITIAL_COMMITMENT_NUMBER - 1,
                        push_sats,
-                       channel_value - push_sats - commit_tx_fee_msat(non_buffer_feerate + 4, 0, opt_anchors) / 1000,
-                       opt_anchors, local_funding, remote_funding,
+                       channel_value - push_sats - commit_tx_fee_msat(non_buffer_feerate + 4, 0, &channel_type_features) / 1000,
+                       local_funding, remote_funding,
                        commit_tx_keys.clone(),
                        non_buffer_feerate + 4,
                        &mut htlcs,
-                       &local_chan.channel_transaction_parameters.as_counterparty_broadcastable()
+                       &local_chan.context.channel_transaction_parameters.as_counterparty_broadcastable()
                );
-               local_chan_signer.sign_counterparty_commitment(&commitment_tx, Vec::new(), &secp_ctx).unwrap()
+               local_chan_signer.as_ecdsa().unwrap().sign_counterparty_commitment(&commitment_tx, Vec::new(), &secp_ctx).unwrap()
        };
 
        let commit_signed_msg = msgs::CommitmentSigned {
@@ -755,7 +763,8 @@ fn test_update_fee_that_funder_cannot_afford() {
        nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Funding remote cannot afford proposed new fee".to_string(), 1);
        check_added_monitors!(nodes[1], 1);
        check_closed_broadcast!(nodes[1], true);
-       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: String::from("Funding remote cannot afford proposed new fee") });
+       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: String::from("Funding remote cannot afford proposed new fee") },
+               [nodes[0].node.get_our_node_id()], channel_value);
 }
 
 #[test]
@@ -854,8 +863,8 @@ fn test_update_fee_with_fundee_update_add_htlc() {
        send_payment(&nodes[1], &vec!(&nodes[0])[..], 800000);
        send_payment(&nodes[0], &vec!(&nodes[1])[..], 800000);
        close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true);
-       check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
-       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
+       check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
 }
 
 #[test]
@@ -968,8 +977,8 @@ fn test_update_fee() {
        assert_eq!(get_feerate!(nodes[0], nodes[1], channel_id), feerate + 30);
        assert_eq!(get_feerate!(nodes[1], nodes[0], channel_id), feerate + 30);
        close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true);
-       check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
-       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
+       check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
 }
 
 #[test]
@@ -1077,17 +1086,17 @@ fn fake_network_test() {
 
        // Close down the channels...
        close_channel(&nodes[0], &nodes[1], &chan_1.2, chan_1.3, true);
-       check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
-       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
+       check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
        close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, false);
-       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
-       check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure);
+       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[2].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
        close_channel(&nodes[2], &nodes[3], &chan_3.2, chan_3.3, true);
-       check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure);
-       check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure);
+       check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure, [nodes[3].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure, [nodes[2].node.get_our_node_id()], 100000);
        close_channel(&nodes[1], &nodes[3], &chan_4.2, chan_4.3, false);
-       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
-       check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure);
+       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[3].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
 }
 
 #[test]
@@ -1102,6 +1111,9 @@ fn holding_cell_htlc_counting() {
        create_announced_chan_between_nodes(&nodes, 0, 1);
        let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
 
+       // Fetch a route in advance as we will be unable to once we're unable to send.
+       let (route, payment_hash_1, _, payment_secret_1) = get_route_and_payment_hash!(nodes[1], nodes[2], 100000);
+
        let mut payments = Vec::new();
        for _ in 0..50 {
                let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[2], 100000);
@@ -1119,14 +1131,11 @@ fn holding_cell_htlc_counting() {
        // There is now one HTLC in an outbound commitment transaction and (OUR_MAX_HTLCS - 1) HTLCs in
        // the holding cell waiting on B's RAA to send. At this point we should not be able to add
        // another HTLC.
-       let (route, payment_hash_1, _, payment_secret_1) = get_route_and_payment_hash!(nodes[1], nodes[2], 100000);
        {
                unwrap_send_err!(nodes[1].node.send_payment_with_route(&route, payment_hash_1,
                                RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)
-                       ), true, APIError::ChannelUnavailable { ref err },
-                       assert!(regex::Regex::new(r"Cannot push more than their max accepted HTLCs \(\d+\)").unwrap().is_match(err)));
+                       ), true, APIError::ChannelUnavailable { .. }, {});
                assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
-               nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot push more than their max accepted HTLCs", 1);
        }
 
        // This should also be true if we try to forward a payment.
@@ -1279,7 +1288,7 @@ fn test_duplicate_htlc_different_direction_onchain() {
 
        mine_transaction(&nodes[0], &remote_txn[0]);
        check_added_monitors!(nodes[0], 1);
-       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
        connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
 
        let claim_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
@@ -1340,7 +1349,7 @@ fn test_basic_channel_reserve() {
        let channel_reserve = chan_stat.channel_reserve_msat;
 
        // The 2* and +1 are for the fee spike reserve.
-       let commit_tx_fee = 2 * commit_tx_fee_msat(get_feerate!(nodes[0], nodes[1], chan.2), 1 + 1, get_opt_anchors!(nodes[0], nodes[1], chan.2));
+       let commit_tx_fee = 2 * commit_tx_fee_msat(get_feerate!(nodes[0], nodes[1], chan.2), 1 + 1, &get_channel_type_features!(nodes[0], nodes[1], chan.2));
        let max_can_send = 5000000 - channel_reserve - commit_tx_fee;
        let (mut route, our_payment_hash, _, our_payment_secret) =
                get_route_and_payment_hash!(nodes[0], nodes[1], max_can_send);
@@ -1349,16 +1358,12 @@ fn test_basic_channel_reserve() {
                RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).err().unwrap();
        match err {
                PaymentSendFailure::AllFailedResendSafe(ref fails) => {
-                       match &fails[0] {
-                               &APIError::ChannelUnavailable{ref err} =>
-                                       assert!(regex::Regex::new(r"Cannot send value that would put our balance under counterparty-announced channel reserve value \(\d+\)").unwrap().is_match(err)),
-                               _ => panic!("Unexpected error variant"),
-                       }
+                       if let &APIError::ChannelUnavailable { .. } = &fails[0] {}
+                       else { panic!("Unexpected error variant"); }
                },
                _ => panic!("Unexpected error variant"),
        }
        assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
-       nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot send value that would put our balance under counterparty-announced channel reserve value", 1);
 
        send_payment(&nodes[0], &vec![&nodes[1]], max_can_send);
 }
@@ -1391,6 +1396,7 @@ fn test_fee_spike_violation_fails_htlc() {
                payment_hash: payment_hash,
                cltv_expiry: htlc_cltv,
                onion_routing_packet: onion_packet,
+               skimmed_fee_msat: None,
        };
 
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg);
@@ -1411,23 +1417,23 @@ fn test_fee_spike_violation_fails_htlc() {
                let local_chan = chan_lock.channel_by_id.get(&chan.2).unwrap();
                let chan_signer = local_chan.get_signer();
                // Make the signer believe we validated another commitment, so we can release the secret
-               chan_signer.get_enforcement_state().last_holder_commitment -= 1;
+               chan_signer.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1;
 
-               let pubkeys = chan_signer.pubkeys();
+               let pubkeys = chan_signer.as_ref().pubkeys();
                (pubkeys.revocation_basepoint, pubkeys.htlc_basepoint,
-                chan_signer.release_commitment_secret(INITIAL_COMMITMENT_NUMBER),
-                chan_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 2, &secp_ctx),
-                chan_signer.pubkeys().funding_pubkey)
+                chan_signer.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER),
+                chan_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 2, &secp_ctx),
+                chan_signer.as_ref().pubkeys().funding_pubkey)
        };
        let (remote_delayed_payment_basepoint, remote_htlc_basepoint, remote_point, remote_funding) = {
                let per_peer_state = nodes[1].node.per_peer_state.read().unwrap();
                let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap();
                let remote_chan = chan_lock.channel_by_id.get(&chan.2).unwrap();
                let chan_signer = remote_chan.get_signer();
-               let pubkeys = chan_signer.pubkeys();
+               let pubkeys = chan_signer.as_ref().pubkeys();
                (pubkeys.delayed_payment_basepoint, pubkeys.htlc_basepoint,
-                chan_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &secp_ctx),
-                chan_signer.pubkeys().funding_pubkey)
+                chan_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &secp_ctx),
+                chan_signer.as_ref().pubkeys().funding_pubkey)
        };
 
        // Assemble the set of keys we can use for signatures for our commitment_signed message.
@@ -1457,13 +1463,13 @@ fn test_fee_spike_violation_fails_htlc() {
                        commitment_number,
                        95000,
                        local_chan_balance,
-                       local_chan.opt_anchors(), local_funding, remote_funding,
+                       local_funding, remote_funding,
                        commit_tx_keys.clone(),
                        feerate_per_kw,
                        &mut vec![(accepted_htlc_info, ())],
-                       &local_chan.channel_transaction_parameters.as_counterparty_broadcastable()
+                       &local_chan.context.channel_transaction_parameters.as_counterparty_broadcastable()
                );
-               local_chan_signer.sign_counterparty_commitment(&commitment_tx, Vec::new(), &secp_ctx).unwrap()
+               local_chan_signer.as_ecdsa().unwrap().sign_counterparty_commitment(&commitment_tx, Vec::new(), &secp_ctx).unwrap()
        };
 
        let commit_signed_msg = msgs::CommitmentSigned {
@@ -1516,28 +1522,27 @@ fn test_chan_reserve_violation_outbound_htlc_inbound_chan() {
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        let default_config = UserConfig::default();
-       let opt_anchors = false;
+       let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
 
        let mut push_amt = 100_000_000;
-       push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, opt_anchors);
+       push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, &channel_type_features);
 
-       push_amt -= Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
+       push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
 
        let _ = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, push_amt);
 
+       // Fetch a route in advance as we will be unable to once we're unable to send.
+       let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 1_000_000);
        // Sending exactly enough to hit the reserve amount should be accepted
        for _ in 0..MIN_AFFORDABLE_HTLC_COUNT {
                let (_, _, _) = route_payment(&nodes[1], &[&nodes[0]], 1_000_000);
        }
 
        // However one more HTLC should be significantly over the reserve amount and fail.
-       let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 1_000_000);
        unwrap_send_err!(nodes[1].node.send_payment_with_route(&route, our_payment_hash,
                        RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
-               ), true, APIError::ChannelUnavailable { ref err },
-               assert_eq!(err, "Cannot send value that would put counterparty balance under holder-announced channel reserve value"));
+               ), true, APIError::ChannelUnavailable { .. }, {});
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
-       nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Cannot send value that would put counterparty balance under holder-announced channel reserve value".to_string(), 1);
 }
 
 #[test]
@@ -1548,14 +1553,14 @@ fn test_chan_reserve_violation_inbound_htlc_outbound_channel() {
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        let default_config = UserConfig::default();
-       let opt_anchors = false;
+       let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
 
        // Set nodes[0]'s balance such that they will consider any above-dust received HTLC to be a
        // channel reserve violation (so their balance is channel reserve (1000 sats) + commitment
        // transaction fee with 0 HTLCs (183 sats)).
        let mut push_amt = 100_000_000;
-       push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, opt_anchors);
-       push_amt -= Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
+       push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, &channel_type_features);
+       push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
        let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, push_amt);
 
        // Send four HTLCs to cover the initial push_msat buffer we're required to include
@@ -1563,7 +1568,9 @@ fn test_chan_reserve_violation_inbound_htlc_outbound_channel() {
                let (_, _, _) = route_payment(&nodes[1], &[&nodes[0]], 1_000_000);
        }
 
-       let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 700_000);
+       let (mut route, payment_hash, _, payment_secret) =
+               get_route_and_payment_hash!(nodes[1], nodes[0], 1000);
+       route.paths[0].hops[0].fee_msat = 700_000;
        // Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc()
        let secp_ctx = Secp256k1::new();
        let session_priv = SecretKey::from_slice(&[42; 32]).unwrap();
@@ -1579,6 +1586,7 @@ fn test_chan_reserve_violation_inbound_htlc_outbound_channel() {
                payment_hash: payment_hash,
                cltv_expiry: htlc_cltv,
                onion_routing_packet: onion_packet,
+               skimmed_fee_msat: None,
        };
 
        nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &msg);
@@ -1588,7 +1596,8 @@ fn test_chan_reserve_violation_inbound_htlc_outbound_channel() {
        let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
        assert_eq!(err_msg.data, "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value");
        check_added_monitors!(nodes[0], 1);
-       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_string() });
+       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_string() },
+               [nodes[1].node.get_our_node_id()], 100000);
 }
 
 #[test]
@@ -1602,18 +1611,18 @@ fn test_chan_reserve_dust_inbound_htlcs_outbound_chan() {
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None, None]);
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        let default_config = UserConfig::default();
-       let opt_anchors = false;
+       let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
 
        // Set nodes[0]'s balance such that they will consider any above-dust received HTLC to be a
        // channel reserve violation (so their balance is channel reserve (1000 sats) + commitment
        // transaction fee with 0 HTLCs (183 sats)).
        let mut push_amt = 100_000_000;
-       push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, opt_anchors);
-       push_amt -= Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
+       push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, &channel_type_features);
+       push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
        create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, push_amt);
 
        let dust_amt = crate::ln::channel::MIN_CHAN_DUST_LIMIT_SATOSHIS * 1000
-               + feerate_per_kw as u64 * htlc_success_tx_weight(opt_anchors) / 1000 * 1000 - 1;
+               + feerate_per_kw as u64 * htlc_success_tx_weight(&channel_type_features) / 1000 * 1000 - 1;
        // In the previous code, routing this dust payment would cause nodes[0] to perceive a channel
        // reserve violation even though it's a dust HTLC and therefore shouldn't count towards the
        // commitment transaction fee.
@@ -1625,11 +1634,12 @@ fn test_chan_reserve_dust_inbound_htlcs_outbound_chan() {
        }
 
        // One more than the dust amt should fail, however.
-       let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], dust_amt + 1);
+       let (mut route, our_payment_hash, _, our_payment_secret) =
+               get_route_and_payment_hash!(nodes[1], nodes[0], dust_amt);
+       route.paths[0].hops[0].fee_msat += 1;
        unwrap_send_err!(nodes[1].node.send_payment_with_route(&route, our_payment_hash,
                        RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
-               ), true, APIError::ChannelUnavailable { ref err },
-               assert_eq!(err, "Cannot send value that would put counterparty balance under holder-announced channel reserve value"));
+               ), true, APIError::ChannelUnavailable { .. }, {});
 }
 
 #[test]
@@ -1642,18 +1652,18 @@ fn test_chan_init_feerate_unaffordability() {
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        let default_config = UserConfig::default();
-       let opt_anchors = false;
+       let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
 
        // Set the push_msat amount such that nodes[0] will not be able to afford to add even a single
        // HTLC.
        let mut push_amt = 100_000_000;
-       push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, opt_anchors);
+       push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, &channel_type_features);
        assert_eq!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, push_amt + 1, 42, None).unwrap_err(),
                APIError::APIMisuseError { err: "Funding amount (356) can't even pay fee for initial commitment transaction fee of 357.".to_string() });
 
        // During open, we don't have a "counterparty channel reserve" to check against, so that
        // requirement only comes into play on the open_channel handling side.
-       push_amt -= Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
+       push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
        nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, push_amt, 42, None).unwrap();
        let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
        open_channel_msg.push_msat += 1;
@@ -1712,10 +1722,10 @@ fn test_chan_reserve_violation_inbound_htlc_inbound_chan() {
        let total_routing_fee_msat = (nodes.len() - 2) as u64 * feemsat;
        let chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
        let feerate = get_feerate!(nodes[0], nodes[1], chan.2);
-       let opt_anchors = get_opt_anchors!(nodes[0], nodes[1], chan.2);
+       let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan.2);
 
        // Add a 2* and +1 for the fee spike reserve.
-       let commit_tx_fee_2_htlc = 2*commit_tx_fee_msat(feerate, 2 + 1, opt_anchors);
+       let commit_tx_fee_2_htlc = 2*commit_tx_fee_msat(feerate, 2 + 1, &channel_type_features);
        let recv_value_1 = (chan_stat.value_to_self_msat - chan_stat.channel_reserve_msat - total_routing_fee_msat - commit_tx_fee_2_htlc)/2;
        let amt_msat_1 = recv_value_1 + total_routing_fee_msat;
 
@@ -1733,7 +1743,7 @@ fn test_chan_reserve_violation_inbound_htlc_inbound_chan() {
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event_1.msgs[0]);
 
        // Attempt to trigger a channel reserve violation --> payment failure.
-       let commit_tx_fee_2_htlcs = commit_tx_fee_msat(feerate, 2, opt_anchors);
+       let commit_tx_fee_2_htlcs = commit_tx_fee_msat(feerate, 2, &channel_type_features);
        let recv_value_2 = chan_stat.value_to_self_msat - amt_msat_1 - chan_stat.channel_reserve_msat - total_routing_fee_msat - commit_tx_fee_2_htlcs + 1;
        let amt_msat_2 = recv_value_2 + total_routing_fee_msat;
        let mut route_2 = route_1.clone();
@@ -1754,6 +1764,7 @@ fn test_chan_reserve_violation_inbound_htlc_inbound_chan() {
                payment_hash: our_payment_hash_1,
                cltv_expiry: htlc_cltv,
                onion_routing_packet: onion_packet,
+               skimmed_fee_msat: None,
        };
 
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg);
@@ -1763,7 +1774,8 @@ fn test_chan_reserve_violation_inbound_htlc_inbound_chan() {
        let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
        assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value");
        check_added_monitors!(nodes[1], 1);
-       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote HTLC add would put them under remote reserve value".to_string() });
+       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote HTLC add would put them under remote reserve value".to_string() },
+               [nodes[0].node.get_our_node_id()], 100000);
 }
 
 #[test]
@@ -1779,7 +1791,7 @@ fn test_inbound_outbound_capacity_is_not_zero() {
        assert_eq!(channels0.len(), 1);
        assert_eq!(channels1.len(), 1);
 
-       let reserve = Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(100_000, &default_config);
+       let reserve = get_holder_selected_channel_reserve_satoshis(100_000, &default_config);
        assert_eq!(channels0[0].inbound_capacity_msat, 95000000 - reserve*1000);
        assert_eq!(channels1[0].outbound_capacity_msat, 95000000 - reserve*1000);
 
@@ -1787,8 +1799,8 @@ fn test_inbound_outbound_capacity_is_not_zero() {
        assert_eq!(channels1[0].inbound_capacity_msat, 100000 * 1000 - 95000000 - reserve*1000);
 }
 
-fn commit_tx_fee_msat(feerate: u32, num_htlcs: u64, opt_anchors: bool) -> u64 {
-       (commitment_tx_base_weight(opt_anchors) + num_htlcs * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate as u64 / 1000 * 1000
+fn commit_tx_fee_msat(feerate: u32, num_htlcs: u64, channel_type_features: &ChannelTypeFeatures) -> u64 {
+       (commitment_tx_base_weight(channel_type_features) + num_htlcs * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate as u64 / 1000 * 1000
 }
 
 #[test]
@@ -1823,7 +1835,7 @@ fn test_channel_reserve_holding_cell_htlcs() {
        let feemsat = 239; // set above
        let total_fee_msat = (nodes.len() - 2) as u64 * feemsat;
        let feerate = get_feerate!(nodes[0], nodes[1], chan_1.2);
-       let opt_anchors = get_opt_anchors!(nodes[0], nodes[1], chan_1.2);
+       let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan_1.2);
 
        let recv_value_0 = stat01.counterparty_max_htlc_value_in_flight_msat - total_fee_msat;
 
@@ -1837,10 +1849,8 @@ fn test_channel_reserve_holding_cell_htlcs() {
 
                unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
                                RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
-                       ), true, APIError::ChannelUnavailable { ref err },
-                       assert!(regex::Regex::new(r"Cannot send value that would put us over the max HTLC value in flight our peer will accept \(\d+\)").unwrap().is_match(err)));
+                       ), true, APIError::ChannelUnavailable { .. }, {});
                assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
-               nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot send value that would put us over the max HTLC value in flight our peer will accept", 1);
        }
 
        // channel reserve is bigger than their_max_htlc_value_in_flight_msat so loop to deplete
@@ -1850,7 +1860,7 @@ fn test_channel_reserve_holding_cell_htlcs() {
                // 3 for the 3 HTLCs that will be sent, 2* and +1 for the fee spike reserve.
                // Also, ensure that each payment has enough to be over the dust limit to
                // ensure it'll be included in each commit tx fee calculation.
-               let commit_tx_fee_all_htlcs = 2*commit_tx_fee_msat(feerate, 3 + 1, opt_anchors);
+               let commit_tx_fee_all_htlcs = 2*commit_tx_fee_msat(feerate, 3 + 1, &channel_type_features);
                let ensure_htlc_amounts_above_dust_buffer = 3 * (stat01.counterparty_dust_limit_msat + 1000);
                if stat01.value_to_self_msat < stat01.channel_reserve_msat + commit_tx_fee_all_htlcs + ensure_htlc_amounts_above_dust_buffer + amt_msat {
                        break;
@@ -1887,7 +1897,7 @@ fn test_channel_reserve_holding_cell_htlcs() {
        // the amount of the first of these aforementioned 3 payments. The reason we split into 3 payments
        // is to test the behavior of the holding cell with respect to channel reserve and commit tx fee
        // policy.
-       let commit_tx_fee_2_htlcs = 2*commit_tx_fee_msat(feerate, 2 + 1, opt_anchors);
+       let commit_tx_fee_2_htlcs = 2*commit_tx_fee_msat(feerate, 2 + 1, &channel_type_features);
        let recv_value_1 = (stat01.value_to_self_msat - stat01.channel_reserve_msat - total_fee_msat - commit_tx_fee_2_htlcs)/2;
        let amt_msat_1 = recv_value_1 + total_fee_msat;
 
@@ -1911,13 +1921,12 @@ fn test_channel_reserve_holding_cell_htlcs() {
                let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[2]);
                unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
                                RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
-                       ), true, APIError::ChannelUnavailable { ref err },
-                       assert!(regex::Regex::new(r"Cannot send value that would put our balance under counterparty-announced channel reserve value \(\d+\)").unwrap().is_match(err)));
+                       ), true, APIError::ChannelUnavailable { .. }, {});
                assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
        }
 
        // split the rest to test holding cell
-       let commit_tx_fee_3_htlcs = 2*commit_tx_fee_msat(feerate, 3 + 1, opt_anchors);
+       let commit_tx_fee_3_htlcs = 2*commit_tx_fee_msat(feerate, 3 + 1, &channel_type_features);
        let additional_htlc_cost_msat = commit_tx_fee_3_htlcs - commit_tx_fee_2_htlcs;
        let recv_value_21 = recv_value_2/2 - additional_htlc_cost_msat/2;
        let recv_value_22 = recv_value_2 - recv_value_21 - total_fee_msat - additional_htlc_cost_msat;
@@ -1942,10 +1951,8 @@ fn test_channel_reserve_holding_cell_htlcs() {
                route.paths[0].hops.last_mut().unwrap().fee_msat += 1;
                unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
                                RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
-                       ), true, APIError::ChannelUnavailable { ref err },
-                       assert!(regex::Regex::new(r"Cannot send value that would put our balance under counterparty-announced channel reserve value \(\d+\)").unwrap().is_match(err)));
+                       ), true, APIError::ChannelUnavailable { .. }, {});
                assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
-               nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot send value that would put our balance under counterparty-announced channel reserve value", 2);
        }
 
        let (route_22, our_payment_hash_22, our_payment_preimage_22, our_payment_secret_22) = get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_22);
@@ -2038,11 +2045,11 @@ fn test_channel_reserve_holding_cell_htlcs() {
        claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_21);
        claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_22);
 
-       let commit_tx_fee_0_htlcs = 2*commit_tx_fee_msat(feerate, 1, opt_anchors);
+       let commit_tx_fee_0_htlcs = 2*commit_tx_fee_msat(feerate, 1, &channel_type_features);
        let recv_value_3 = commit_tx_fee_2_htlcs - commit_tx_fee_0_htlcs - total_fee_msat;
        send_payment(&nodes[0], &vec![&nodes[1], &nodes[2]][..], recv_value_3);
 
-       let commit_tx_fee_1_htlc = 2*commit_tx_fee_msat(feerate, 1 + 1, opt_anchors);
+       let commit_tx_fee_1_htlc = 2*commit_tx_fee_msat(feerate, 1 + 1, &channel_type_features);
        let expected_value_to_self = stat01.value_to_self_msat - (recv_value_1 + total_fee_msat) - (recv_value_21 + total_fee_msat) - (recv_value_22 + total_fee_msat) - (recv_value_3 + total_fee_msat);
        let stat0 = get_channel_value_stat!(nodes[0], nodes[1], chan_1.2);
        assert_eq!(stat0.value_to_self_msat, expected_value_to_self);
@@ -2117,7 +2124,7 @@ fn channel_reserve_in_flight_removes() {
        nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_removes.commitment_signed);
        check_added_monitors!(nodes[0], 1);
        let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
-       expect_payment_sent_without_paths!(nodes[0], payment_preimage_1);
+       expect_payment_sent(&nodes[0], payment_preimage_1, None, false, false);
 
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_1.msgs[0]);
        nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_1.commitment_msg);
@@ -2146,7 +2153,7 @@ fn channel_reserve_in_flight_removes() {
        nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_cs.commitment_signed);
        check_added_monitors!(nodes[0], 1);
        let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
-       expect_payment_sent_without_paths!(nodes[0], payment_preimage_2);
+       expect_payment_sent(&nodes[0], payment_preimage_2, None, false, false);
 
        nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
        check_added_monitors!(nodes[1], 1);
@@ -2249,8 +2256,8 @@ fn channel_monitor_network_test() {
        check_closed_broadcast!(nodes[0], true);
        assert_eq!(nodes[0].node.list_channels().len(), 0);
        assert_eq!(nodes[1].node.list_channels().len(), 1);
-       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
-       check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
+       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
 
        // One pending HTLC is discarded by the force-close:
        let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[1], &[&nodes[2], &nodes[3]], 3_000_000);
@@ -2271,8 +2278,8 @@ fn channel_monitor_network_test() {
        check_closed_broadcast!(nodes[2], true);
        assert_eq!(nodes[1].node.list_channels().len(), 0);
        assert_eq!(nodes[2].node.list_channels().len(), 1);
-       check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
-       check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[2].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
 
        macro_rules! claim_funds {
                ($node: expr, $prev_node: expr, $preimage: expr, $payment_hash: expr) => {
@@ -2316,8 +2323,8 @@ fn channel_monitor_network_test() {
        check_closed_broadcast!(nodes[3], true);
        assert_eq!(nodes[2].node.list_channels().len(), 0);
        assert_eq!(nodes[3].node.list_channels().len(), 1);
-       check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed);
-       check_closed_event!(nodes[3], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed, [nodes[3].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[3], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000);
 
        // Drop the ChannelMonitor for the previous channel to avoid it broadcasting transactions and
        // confusing us in the following tests.
@@ -2390,8 +2397,8 @@ fn channel_monitor_network_test() {
 
        assert_eq!(nodes[3].chain_monitor.chain_monitor.watch_channel(OutPoint { txid: chan_3.3.txid(), index: 0 }, chan_3_mon),
                ChannelMonitorUpdateStatus::Completed);
-       check_closed_event!(nodes[3], 1, ClosureReason::CommitmentTxConfirmed);
-       check_closed_event!(nodes[4], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_event!(nodes[3], 1, ClosureReason::CommitmentTxConfirmed, [nodes[4].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[4], 1, ClosureReason::CommitmentTxConfirmed, [nodes[3].node.get_our_node_id()], 100000);
 }
 
 #[test]
@@ -2439,7 +2446,7 @@ fn test_justice_tx_htlc_timeout() {
                        node_txn.swap_remove(0);
                }
                check_added_monitors!(nodes[1], 1);
-               check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+               check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
                test_txn_broadcast(&nodes[1], &chan_5, Some(revoked_local_txn[0].clone()), HTLCType::NONE);
 
                mine_transaction(&nodes[0], &revoked_local_txn[0]);
@@ -2447,7 +2454,7 @@ fn test_justice_tx_htlc_timeout() {
                // Verify broadcast of revoked HTLC-timeout
                let node_txn = test_txn_broadcast(&nodes[0], &chan_5, Some(revoked_local_txn[0].clone()), HTLCType::TIMEOUT);
                check_added_monitors!(nodes[0], 1);
-               check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+               check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
                // Broadcast revoked HTLC-timeout on node 1
                mine_transaction(&nodes[1], &node_txn[1]);
                test_revoked_htlc_claim_txn_broadcast(&nodes[1], node_txn[1].clone(), revoked_local_txn[0].clone());
@@ -2502,11 +2509,11 @@ fn test_justice_tx_htlc_success() {
                test_txn_broadcast(&nodes[0], &chan_6, Some(revoked_local_txn[0].clone()), HTLCType::NONE);
 
                mine_transaction(&nodes[1], &revoked_local_txn[0]);
-               check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+               check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
                let node_txn = test_txn_broadcast(&nodes[1], &chan_6, Some(revoked_local_txn[0].clone()), HTLCType::SUCCESS);
                check_added_monitors!(nodes[1], 1);
                mine_transaction(&nodes[0], &node_txn[1]);
-               check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+               check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
                test_revoked_htlc_claim_txn_broadcast(&nodes[0], node_txn[1].clone(), revoked_local_txn[0].clone());
        }
        get_announce_close_broadcast_events(&nodes, 0, 1);
@@ -2534,7 +2541,7 @@ fn revoked_output_claim() {
        // Inform nodes[1] that nodes[0] broadcast a stale tx
        mine_transaction(&nodes[1], &revoked_local_txn[0]);
        check_added_monitors!(nodes[1], 1);
-       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
        assert_eq!(node_txn.len(), 1); // ChannelMonitor: justice tx against revoked to_local output
 
@@ -2544,7 +2551,7 @@ fn revoked_output_claim() {
        mine_transaction(&nodes[0], &revoked_local_txn[0]);
        get_announce_close_broadcast_events(&nodes, 0, 1);
        check_added_monitors!(nodes[0], 1);
-       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
 }
 
 #[test]
@@ -2581,10 +2588,10 @@ fn claim_htlc_outputs_shared_tx() {
        {
                mine_transaction(&nodes[0], &revoked_local_txn[0]);
                check_added_monitors!(nodes[0], 1);
-               check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+               check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
                mine_transaction(&nodes[1], &revoked_local_txn[0]);
                check_added_monitors!(nodes[1], 1);
-               check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+               check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
                connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
                assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
 
@@ -2643,7 +2650,7 @@ fn claim_htlc_outputs_single_tx() {
                check_added_monitors!(nodes[0], 1);
                confirm_transaction_at(&nodes[1], &revoked_local_txn[0], 100);
                check_added_monitors!(nodes[1], 1);
-               check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+               check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
                let mut events = nodes[0].node.get_and_clear_pending_events();
                expect_pending_htlcs_forwardable_from_events!(nodes[0], events[0..1], true);
                match events.last().unwrap() {
@@ -2755,7 +2762,7 @@ fn test_htlc_on_chain_success() {
        mine_transaction(&nodes[2], &commitment_tx[0]);
        check_closed_broadcast!(nodes[2], true);
        check_added_monitors!(nodes[2], 1);
-       check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
        let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelMonitor: 2 (2 * HTLC-Success tx)
        assert_eq!(node_txn.len(), 2);
        check_spends!(node_txn[0], commitment_tx[0]);
@@ -2872,7 +2879,7 @@ fn test_htlc_on_chain_success() {
        mine_transaction(&nodes[1], &node_a_commitment_tx[0]);
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
-       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
        assert!(node_txn.len() == 1 || node_txn.len() == 3); // HTLC-Success, 2* RBF bumps of above HTLC txn
        let commitment_spend =
@@ -2980,14 +2987,15 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) {
        mine_transaction(&nodes[2], &commitment_tx[0]);
        check_closed_broadcast!(nodes[2], true);
        check_added_monitors!(nodes[2], 1);
-       check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
        let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
        assert_eq!(node_txn.len(), 0);
 
        // Broadcast timeout transaction by B on received output from C's commitment tx on B's chain
        // Verify that B's ChannelManager is able to detect that HTLC is timeout by its own tx and react backward in consequence
        mine_transaction(&nodes[1], &commitment_tx[0]);
-       check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false);
+       check_closed_event!(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false
+               , [nodes[2].node.get_our_node_id()], 100000);
        connect_blocks(&nodes[1], 200 - nodes[2].best_block_info().1);
        let timeout_tx = {
                let mut txn = nodes[1].tx_broadcaster.txn_broadcast();
@@ -3031,7 +3039,7 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) {
 
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
-       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
        let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // 1 timeout tx
        assert_eq!(node_txn.len(), 1);
        check_spends!(node_txn[0], commitment_tx[0]);
@@ -3068,7 +3076,7 @@ fn test_simple_commitment_revoked_fail_backward() {
        let (_, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000);
 
        mine_transaction(&nodes[1], &revoked_local_txn[0]);
-       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000);
        connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
        check_added_monitors!(nodes[1], 1);
        check_closed_broadcast!(nodes[1], true);
@@ -3129,7 +3137,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use
                // The dust limit applied to HTLC outputs considers the fee of the HTLC transaction as
                // well, so HTLCs at exactly the dust limit will not be included in commitment txn.
                nodes[2].node.per_peer_state.read().unwrap().get(&nodes[1].node.get_our_node_id())
-                       .unwrap().lock().unwrap().channel_by_id.get(&chan_2.2).unwrap().holder_dust_limit_satoshis * 1000
+                       .unwrap().lock().unwrap().channel_by_id.get(&chan_2.2).unwrap().context.holder_dust_limit_satoshis * 1000
        } else { 3000000 };
 
        let (_, first_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
@@ -3411,6 +3419,7 @@ fn fail_backward_pending_htlc_upon_channel_failure() {
                        payment_hash,
                        cltv_expiry,
                        onion_routing_packet,
+                       skimmed_fee_msat: None,
                };
                nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &update_add_htlc);
        }
@@ -3460,7 +3469,7 @@ fn test_htlc_ignore_latest_remote_commitment() {
        connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
-       check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
+       check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
 
        let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
        assert_eq!(node_txn.len(), 3);
@@ -3470,7 +3479,7 @@ fn test_htlc_ignore_latest_remote_commitment() {
        connect_block(&nodes[1], &block);
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
-       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
 
        // Duplicate the connect_block call since this may happen due to other listeners
        // registering new transactions
@@ -3522,7 +3531,7 @@ fn test_force_close_fail_back() {
        nodes[2].node.force_close_broadcasting_latest_txn(&payment_event.commitment_msg.channel_id, &nodes[1].node.get_our_node_id()).unwrap();
        check_closed_broadcast!(nodes[2], true);
        check_added_monitors!(nodes[2], 1);
-       check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed);
+       check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
        let tx = {
                let mut node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
                // Note that we don't bother broadcasting the HTLC-Success transaction here as we don't
@@ -3537,7 +3546,7 @@ fn test_force_close_fail_back() {
        // Note no UpdateHTLCs event here from nodes[1] to nodes[0]!
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
-       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000);
 
        // Now check that if we add the preimage to ChannelMonitor it broadcasts our HTLC-Success..
        {
@@ -3575,12 +3584,14 @@ fn test_dup_events_on_peer_disconnect() {
        check_added_monitors!(nodes[1], 1);
        let claim_msgs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
        nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &claim_msgs.update_fulfill_htlcs[0]);
-       expect_payment_sent_without_paths!(nodes[0], payment_preimage);
+       expect_payment_sent(&nodes[0], payment_preimage, None, false, false);
 
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
-       reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (1, 0), (0, 0), (0, 0), (0, 0), (false, false));
+       let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
+       reconnect_args.pending_htlc_claims.0 = 1;
+       reconnect_nodes(reconnect_args);
        expect_payment_path_successful!(nodes[0]);
 }
 
@@ -3621,8 +3632,10 @@ fn test_peer_disconnected_before_funding_broadcasted() {
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
-       check_closed_event!(nodes[0], 1, ClosureReason::DisconnectedPeer);
-       check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer);
+       check_closed_event!(&nodes[0], 1, ClosureReason::DisconnectedPeer, false
+               , [nodes[1].node.get_our_node_id()], 1000000);
+       check_closed_event!(&nodes[1], 1, ClosureReason::DisconnectedPeer, false
+               , [nodes[0].node.get_our_node_id()], 1000000);
 }
 
 #[test]
@@ -3637,7 +3650,9 @@ fn test_simple_peer_disconnect() {
 
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
-       reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+       let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
+       reconnect_args.send_channel_ready = (true, true);
+       reconnect_nodes(reconnect_args);
 
        let payment_preimage_1 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
        let payment_hash_2 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
@@ -3646,7 +3661,7 @@ fn test_simple_peer_disconnect() {
 
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
-       reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+       reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
 
        let (payment_preimage_3, payment_hash_3, _) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000);
        let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
@@ -3659,7 +3674,10 @@ fn test_simple_peer_disconnect() {
        claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], true, payment_preimage_3);
        fail_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], true, payment_hash_5);
 
-       reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (1, 0), (1, 0), (false, false));
+       let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
+       reconnect_args.pending_cell_htlc_fails.0 = 1;
+       reconnect_args.pending_cell_htlc_claims.0 = 1;
+       reconnect_nodes(reconnect_args);
        {
                let events = nodes[0].node.get_and_clear_pending_events();
                assert_eq!(events.len(), 4);
@@ -3688,6 +3706,7 @@ fn test_simple_peer_disconnect() {
                        _ => panic!("Unexpected event"),
                }
        }
+       check_added_monitors(&nodes[0], 1);
 
        claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_preimage_4);
        fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_hash_6);
@@ -3771,19 +3790,29 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken
                }
                // Even if the channel_ready messages get exchanged, as long as nothing further was
                // received on either side, both sides will need to resend them.
-               reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 1), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+               let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
+               reconnect_args.send_channel_ready = (true, true);
+               reconnect_args.pending_htlc_adds.1 = 1;
+               reconnect_nodes(reconnect_args);
        } else if messages_delivered == 3 {
                // nodes[0] still wants its RAA + commitment_signed
-               reconnect_nodes(&nodes[0], &nodes[1], (false, false), (-1, 0), (0, 0), (0, 0), (0, 0), (0, 0), (true, false));
+               let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
+               reconnect_args.pending_htlc_adds.0 = -1;
+               reconnect_args.pending_raa.0 = true;
+               reconnect_nodes(reconnect_args);
        } else if messages_delivered == 4 {
                // nodes[0] still wants its commitment_signed
-               reconnect_nodes(&nodes[0], &nodes[1], (false, false), (-1, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+               let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
+               reconnect_args.pending_htlc_adds.0 = -1;
+               reconnect_nodes(reconnect_args);
        } else if messages_delivered == 5 {
                // nodes[1] still wants its final RAA
-               reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, true));
+               let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
+               reconnect_args.pending_raa.1 = true;
+               reconnect_nodes(reconnect_args);
        } else if messages_delivered == 6 {
                // Everything was delivered...
-               reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+               reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
        }
 
        let events_1 = nodes[1].node.get_and_clear_pending_events();
@@ -3807,7 +3836,7 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken
 
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
-       reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+       reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
 
        nodes[1].node.process_pending_htlc_forwards();
 
@@ -3891,7 +3920,9 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
        if messages_delivered < 2 {
-               reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (1, 0), (0, 0), (0, 0), (0, 0), (false, false));
+               let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
+               reconnect_args.pending_htlc_claims.0 = 1;
+               reconnect_nodes(reconnect_args);
                if messages_delivered < 1 {
                        expect_payment_sent!(nodes[0], payment_preimage_1);
                } else {
@@ -3899,16 +3930,23 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken
                }
        } else if messages_delivered == 2 {
                // nodes[0] still wants its RAA + commitment_signed
-               reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, -1), (0, 0), (0, 0), (0, 0), (0, 0), (false, true));
+               let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
+               reconnect_args.pending_htlc_adds.1 = -1;
+               reconnect_args.pending_raa.1 = true;
+               reconnect_nodes(reconnect_args);
        } else if messages_delivered == 3 {
                // nodes[0] still wants its commitment_signed
-               reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, -1), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+               let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
+               reconnect_args.pending_htlc_adds.1 = -1;
+               reconnect_nodes(reconnect_args);
        } else if messages_delivered == 4 {
                // nodes[1] still wants its final RAA
-               reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (true, false));
+               let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
+               reconnect_args.pending_raa.0 = true;
+               reconnect_nodes(reconnect_args);
        } else if messages_delivered == 5 {
                // Everything was delivered...
-               reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+               reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
        }
 
        if messages_delivered == 1 || messages_delivered == 2 {
@@ -3918,7 +3956,7 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken
                nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
                nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
        }
-       reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+       reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
 
        if messages_delivered > 2 {
                expect_payment_path_successful!(nodes[0]);
@@ -4034,10 +4072,14 @@ fn test_drop_messages_peer_disconnect_dual_htlc() {
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
-       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
+               features: nodes[1].node.init_features(), networks: None, remote_network_address: None
+       }, true).unwrap();
        let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
        assert_eq!(reestablish_1.len(), 1);
-       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
+               features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+       }, false).unwrap();
        let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
        assert_eq!(reestablish_2.len(), 1);
 
@@ -4261,7 +4303,7 @@ macro_rules! check_spendable_outputs {
                        let secp_ctx = Secp256k1::new();
                        for event in events.drain(..) {
                                match event {
-                                       Event::SpendableOutputs { mut outputs } => {
+                                       Event::SpendableOutputs { mut outputs, channel_id: _ } => {
                                                for outp in outputs.drain(..) {
                                                        txn.push($keysinterface.backing.spend_spendable_outputs(&[&outp], Vec::new(), Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, None, &secp_ctx).unwrap());
                                                        all_outputs.push(outp);
@@ -4292,7 +4334,7 @@ fn test_claim_sizeable_push_msat() {
        nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
-       check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
+       check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
        assert_eq!(node_txn.len(), 1);
        check_spends!(node_txn[0], chan.3);
@@ -4321,7 +4363,7 @@ fn test_claim_on_remote_sizeable_push_msat() {
        nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[1].node.get_our_node_id()).unwrap();
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
-       check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
+       check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
 
        let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
        assert_eq!(node_txn.len(), 1);
@@ -4331,7 +4373,7 @@ fn test_claim_on_remote_sizeable_push_msat() {
        mine_transaction(&nodes[1], &node_txn[0]);
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
-       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
        connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
 
        let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
@@ -4359,7 +4401,7 @@ fn test_claim_on_remote_revoked_sizeable_push_msat() {
        mine_transaction(&nodes[1], &revoked_local_txn[0]);
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
-       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
 
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
        mine_transaction(&nodes[1], &node_txn[0]);
@@ -4411,7 +4453,7 @@ fn test_static_spendable_outputs_preimage_tx() {
        assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
 
        mine_transaction(&nodes[1], &node_txn[0]);
-       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
        connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
 
        let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
@@ -4455,7 +4497,7 @@ fn test_static_spendable_outputs_timeout_tx() {
        assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
 
        mine_transaction(&nodes[1], &node_txn[0]);
-       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
        connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
        expect_payment_failed!(nodes[1], our_payment_hash, false);
 
@@ -4486,7 +4528,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_commitment_tx() {
        mine_transaction(&nodes[1], &revoked_local_txn[0]);
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
-       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
 
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
        assert_eq!(node_txn.len(), 1);
@@ -4523,7 +4565,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() {
        mine_transaction(&nodes[0], &revoked_local_txn[0]);
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
-       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
        connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
 
        let revoked_htlc_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
@@ -4537,7 +4579,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() {
        connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()]));
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
-       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
 
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
        assert_eq!(node_txn.len(), 2); // ChannelMonitor: bogus justice tx, justice tx on revoked outputs
@@ -4591,7 +4633,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() {
        mine_transaction(&nodes[1], &revoked_local_txn[0]);
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
-       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
        let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
 
        assert_eq!(revoked_htlc_txn.len(), 1);
@@ -4607,7 +4649,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() {
        connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()]));
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
-       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
 
        let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
        assert_eq!(node_txn.len(), 2); // ChannelMonitor: justice tx on revoked commitment, justice tx on revoked HTLC-success
@@ -4687,7 +4729,7 @@ fn test_onchain_to_onchain_claim() {
        mine_transaction(&nodes[2], &commitment_tx[0]);
        check_closed_broadcast!(nodes[2], true);
        check_added_monitors!(nodes[2], 1);
-       check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
 
        let c_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelMonitor: 1 (HTLC-Success tx)
        assert_eq!(c_txn.len(), 1);
@@ -4746,7 +4788,7 @@ fn test_onchain_to_onchain_claim() {
        // Broadcast A's commitment tx on B's chain to see if we are able to claim inbound HTLC with our HTLC-Success tx
        let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2);
        mine_transaction(&nodes[1], &commitment_tx[0]);
-       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
        let b_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
        // ChannelMonitor: HTLC-Success tx
        assert_eq!(b_txn.len(), 1);
@@ -4803,7 +4845,7 @@ fn test_duplicate_payment_hash_one_failure_one_success() {
        mine_transaction(&nodes[1], &commitment_txn[0]);
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
-       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000);
        connect_blocks(&nodes[1], TEST_FINAL_CLTV - 40 + MIN_CLTV_EXPIRY_DELTA as u32); // Confirm blocks until the HTLC expires
 
        let htlc_timeout_tx;
@@ -4850,7 +4892,7 @@ fn test_duplicate_payment_hash_one_failure_one_success() {
 
        mine_transaction(&nodes[2], &commitment_txn[0]);
        check_added_monitors!(nodes[2], 2);
-       check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
        let events = nodes[2].node.get_and_clear_pending_msg_events();
        match events[0] {
                MessageSendEvent::UpdateHTLCs { .. } => {},
@@ -4902,7 +4944,7 @@ fn test_duplicate_payment_hash_one_failure_one_success() {
 
        nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
        commitment_signed_dance!(nodes[0], nodes[1], &updates.commitment_signed, false);
-       expect_payment_sent(&nodes[0], our_payment_preimage, None, true);
+       expect_payment_sent(&nodes[0], our_payment_preimage, None, true, true);
 }
 
 #[test]
@@ -4928,7 +4970,7 @@ fn test_dynamic_spendable_outputs_local_htlc_success_tx() {
 
        mine_transaction(&nodes[1], &local_txn[0]);
        check_added_monitors!(nodes[1], 1);
-       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
        let events = nodes[1].node.get_and_clear_pending_msg_events();
        match events[0] {
                MessageSendEvent::UpdateHTLCs { .. } => {},
@@ -4993,7 +5035,7 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno
        assert_eq!(get_local_commitment_txn!(nodes[3], chan_2_3.2)[0].output.len(), 2);
 
        let ds_dust_limit = nodes[3].node.per_peer_state.read().unwrap().get(&nodes[2].node.get_our_node_id())
-               .unwrap().lock().unwrap().channel_by_id.get(&chan_2_3.2).unwrap().holder_dust_limit_satoshis;
+               .unwrap().lock().unwrap().channel_by_id.get(&chan_2_3.2).unwrap().context.holder_dust_limit_satoshis;
        // 0th HTLC:
        let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
        // 1st HTLC:
@@ -5283,7 +5325,7 @@ fn test_dynamic_spendable_outputs_local_htlc_timeout_tx() {
        mine_transaction(&nodes[0], &local_txn[0]);
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
-       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
        connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
 
        let htlc_timeout = {
@@ -5370,7 +5412,7 @@ fn test_key_derivation_params() {
        connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
-       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
 
        let htlc_timeout = {
                let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
@@ -5412,7 +5454,7 @@ fn test_static_output_closing_tx() {
        let closing_tx = close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true).2;
 
        mine_transaction(&nodes[0], &closing_tx);
-       check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
+       check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
        connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
 
        let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager);
@@ -5420,7 +5462,7 @@ fn test_static_output_closing_tx() {
        check_spends!(spend_txn[0], closing_tx);
 
        mine_transaction(&nodes[1], &closing_tx);
-       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
+       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
        connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
 
        let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
@@ -5445,7 +5487,7 @@ fn do_htlc_claim_local_commitment_only(use_dust: bool) {
 
        let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
        nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]);
-       expect_payment_sent_without_paths!(nodes[0], payment_preimage);
+       expect_payment_sent(&nodes[0], payment_preimage, None, false, false);
 
        nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_updates.commitment_signed);
        check_added_monitors!(nodes[0], 1);
@@ -5462,7 +5504,7 @@ fn do_htlc_claim_local_commitment_only(use_dust: bool) {
        test_txn_broadcast(&nodes[1], &chan, None, if use_dust { HTLCType::NONE } else { HTLCType::SUCCESS });
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
-       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
 }
 
 fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) {
@@ -5493,7 +5535,7 @@ fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) {
        test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
-       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
 }
 
 fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no_close: bool) {
@@ -5539,7 +5581,7 @@ fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no
                test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
                check_closed_broadcast!(nodes[0], true);
                check_added_monitors!(nodes[0], 1);
-               check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+               check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
        } else {
                expect_payment_failed!(nodes[0], our_payment_hash, true);
        }
@@ -5703,10 +5745,10 @@ fn test_fail_holding_cell_htlc_upon_free() {
        let mut chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
        let channel_reserve = chan_stat.channel_reserve_msat;
        let feerate = get_feerate!(nodes[0], nodes[1], chan.2);
-       let opt_anchors = get_opt_anchors!(nodes[0], nodes[1], chan.2);
+       let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan.2);
 
        // 2* and +1 HTLCs on the commit tx fee calculation for the fee spike reserve.
-       let max_can_send = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 1 + 1, opt_anchors);
+       let max_can_send = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 1 + 1, &channel_type_features);
        let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], max_can_send);
 
        // Send a payment which passes reserve checks but gets stuck in the holding cell.
@@ -5728,9 +5770,6 @@ fn test_fail_holding_cell_htlc_upon_free() {
        chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
        assert_eq!(chan_stat.holding_cell_outbound_amount_msat, 0);
        nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Freeing holding cell with 1 HTLC updates in channel {}", hex::encode(chan.2)), 1);
-       let failure_log = format!("Failed to send HTLC with payment_hash {} due to Cannot send value that would put our balance under counterparty-announced channel reserve value ({}) in channel {}",
-               hex::encode(our_payment_hash.0), chan_stat.channel_reserve_msat, hex::encode(chan.2));
-       nodes[0].logger.assert_log("lightning::ln::channel".to_string(), failure_log.to_string(), 1);
 
        // Check that the payment failed to be sent out.
        let events = nodes[0].node.get_and_clear_pending_events();
@@ -5786,11 +5825,11 @@ fn test_free_and_fail_holding_cell_htlcs() {
        let mut chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
        let channel_reserve = chan_stat.channel_reserve_msat;
        let feerate = get_feerate!(nodes[0], nodes[1], chan.2);
-       let opt_anchors = get_opt_anchors!(nodes[0], nodes[1], chan.2);
+       let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan.2);
 
        // 2* and +1 HTLCs on the commit tx fee calculation for the fee spike reserve.
        let amt_1 = 20000;
-       let amt_2 = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 2 + 1, opt_anchors) - amt_1;
+       let amt_2 = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 2 + 1, &channel_type_features) - amt_1;
        let (route_1, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], amt_1);
        let (route_2, payment_hash_2, _, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], amt_2);
 
@@ -5819,9 +5858,6 @@ fn test_free_and_fail_holding_cell_htlcs() {
        chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
        assert_eq!(chan_stat.holding_cell_outbound_amount_msat, 0);
        nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Freeing holding cell with 2 HTLC updates in channel {}", hex::encode(chan.2)), 1);
-       let failure_log = format!("Failed to send HTLC with payment_hash {} due to Cannot send value that would put our balance under counterparty-announced channel reserve value ({}) in channel {}",
-               hex::encode(payment_hash_2.0), chan_stat.channel_reserve_msat, hex::encode(chan.2));
-       nodes[0].logger.assert_log("lightning::ln::channel".to_string(), failure_log.to_string(), 1);
 
        // Check that the second payment failed to be sent out.
        let events = nodes[0].node.get_and_clear_pending_events();
@@ -5919,10 +5955,10 @@ fn test_fail_holding_cell_htlc_upon_free_multihop() {
        let mut chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan_0_1.2);
        let channel_reserve = chan_stat.channel_reserve_msat;
        let feerate = get_feerate!(nodes[0], nodes[1], chan_0_1.2);
-       let opt_anchors = get_opt_anchors!(nodes[0], nodes[1], chan_0_1.2);
+       let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan_0_1.2);
 
        // Send a payment which passes reserve checks but gets stuck in the holding cell.
-       let max_can_send = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 1 + 1, opt_anchors);
+       let max_can_send = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 1 + 1, &channel_type_features);
        let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], max_can_send);
        let payment_event = {
                nodes[0].node.send_payment_with_route(&route, our_payment_hash,
@@ -6030,10 +6066,8 @@ fn test_update_add_htlc_bolt2_sender_value_below_minimum_msat() {
 
        unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
                        RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
-               ), true, APIError::ChannelUnavailable { ref err },
-               assert!(regex::Regex::new(r"Cannot send less than their minimum HTLC value \(\d+\)").unwrap().is_match(err)));
+               ), true, APIError::ChannelUnavailable { .. }, {});
        assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
-       nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot send less than their minimum HTLC value", 1);
 }
 
 #[test]
@@ -6076,7 +6110,8 @@ fn test_update_add_htlc_bolt2_receiver_zero_value_msat() {
        nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Remote side tried to send a 0-msat HTLC".to_string(), 1);
        check_closed_broadcast!(nodes[1], true).unwrap();
        check_added_monitors!(nodes[1], 1);
-       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote side tried to send a 0-msat HTLC".to_string() });
+       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote side tried to send a 0-msat HTLC".to_string() },
+               [nodes[0].node.get_our_node_id()], 100000);
 }
 
 #[test]
@@ -6110,8 +6145,10 @@ fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_num_and_htlc_id_increment()
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 0);
        let max_accepted_htlcs = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id())
-               .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().counterparty_max_accepted_htlcs as u64;
+               .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context.counterparty_max_accepted_htlcs as u64;
 
+       // Fetch a route in advance as we will be unable to once we're unable to send.
+       let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000);
        for i in 0..max_accepted_htlcs {
                let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000);
                let payment_event = {
@@ -6135,14 +6172,11 @@ fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_num_and_htlc_id_increment()
                expect_pending_htlcs_forwardable!(nodes[1]);
                expect_payment_claimable!(nodes[1], our_payment_hash, our_payment_secret, 100000);
        }
-       let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000);
        unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
                        RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
-               ), true, APIError::ChannelUnavailable { ref err },
-               assert!(regex::Regex::new(r"Cannot push more than their max accepted HTLCs \(\d+\)").unwrap().is_match(err)));
+               ), true, APIError::ChannelUnavailable { .. }, {});
 
        assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
-       nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot push more than their max accepted HTLCs", 1);
 }
 
 #[test]
@@ -6164,11 +6198,8 @@ fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_value_in_flight() {
        route.paths[0].hops[0].fee_msat =  max_in_flight + 1;
        unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
                        RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
-               ), true, APIError::ChannelUnavailable { ref err },
-               assert!(regex::Regex::new(r"Cannot send value that would put us over the max HTLC value in flight our peer will accept \(\d+\)").unwrap().is_match(err)));
-
+               ), true, APIError::ChannelUnavailable { .. }, {});
        assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
-       nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot send value that would put us over the max HTLC value in flight our peer will accept", 1);
 
        send_payment(&nodes[0], &[&nodes[1]], max_in_flight);
 }
@@ -6187,7 +6218,7 @@ fn test_update_add_htlc_bolt2_receiver_check_amount_received_more_than_min() {
                let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
                let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
                let channel = chan_lock.channel_by_id.get(&chan.2).unwrap();
-               htlc_minimum_msat = channel.get_holder_htlc_minimum_msat();
+               htlc_minimum_msat = channel.context.get_holder_htlc_minimum_msat();
        }
 
        let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], htlc_minimum_msat);
@@ -6201,7 +6232,7 @@ fn test_update_add_htlc_bolt2_receiver_check_amount_received_more_than_min() {
        let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
        assert!(regex::Regex::new(r"Remote side tried to send less than our minimum HTLC value\. Lower limit: \(\d+\)\. Actual: \(\d+\)").unwrap().is_match(err_msg.data.as_str()));
        check_added_monitors!(nodes[1], 1);
-       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data });
+       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000);
 }
 
 #[test]
@@ -6216,9 +6247,9 @@ fn test_update_add_htlc_bolt2_receiver_sender_can_afford_amount_sent() {
        let chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
        let channel_reserve = chan_stat.channel_reserve_msat;
        let feerate = get_feerate!(nodes[0], nodes[1], chan.2);
-       let opt_anchors = get_opt_anchors!(nodes[0], nodes[1], chan.2);
+       let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan.2);
        // The 2* and +1 are for the fee spike reserve.
-       let commit_tx_fee_outbound = 2 * commit_tx_fee_msat(feerate, 1 + 1, opt_anchors);
+       let commit_tx_fee_outbound = 2 * commit_tx_fee_msat(feerate, 1 + 1, &channel_type_features);
 
        let max_can_send = 5000000 - channel_reserve - commit_tx_fee_outbound;
        let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], max_can_send);
@@ -6237,7 +6268,7 @@ fn test_update_add_htlc_bolt2_receiver_sender_can_afford_amount_sent() {
        let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
        assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value");
        check_added_monitors!(nodes[1], 1);
-       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data });
+       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000);
 }
 
 #[test]
@@ -6268,6 +6299,7 @@ fn test_update_add_htlc_bolt2_receiver_check_max_htlc_limit() {
                payment_hash: our_payment_hash,
                cltv_expiry: htlc_cltv,
                onion_routing_packet: onion_packet.clone(),
+               skimmed_fee_msat: None,
        };
 
        for i in 0..50 {
@@ -6281,7 +6313,7 @@ fn test_update_add_htlc_bolt2_receiver_check_max_htlc_limit() {
        let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
        assert!(regex::Regex::new(r"Remote tried to push more than our max accepted HTLCs \(\d+\)").unwrap().is_match(err_msg.data.as_str()));
        check_added_monitors!(nodes[1], 1);
-       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data });
+       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000);
 }
 
 #[test]
@@ -6305,7 +6337,7 @@ fn test_update_add_htlc_bolt2_receiver_check_max_in_flight_msat() {
        let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
        assert!(regex::Regex::new("Remote HTLC add would put them over our max HTLC value").unwrap().is_match(err_msg.data.as_str()));
        check_added_monitors!(nodes[1], 1);
-       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data });
+       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 1000000);
 }
 
 #[test]
@@ -6329,7 +6361,7 @@ fn test_update_add_htlc_bolt2_receiver_check_cltv_expiry() {
        let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
        assert_eq!(err_msg.data,"Remote provided CLTV expiry in seconds instead of block height");
        check_added_monitors!(nodes[1], 1);
-       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data });
+       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000);
 }
 
 #[test]
@@ -6353,10 +6385,14 @@ fn test_update_add_htlc_bolt2_receiver_check_repeated_id_ignore() {
        //Disconnect and Reconnect
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
-       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
+               features: nodes[1].node.init_features(), networks: None, remote_network_address: None
+       }, true).unwrap();
        let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
        assert_eq!(reestablish_1.len(), 1);
-       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
+               features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+       }, false).unwrap();
        let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
        assert_eq!(reestablish_2.len(), 1);
        nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
@@ -6377,7 +6413,7 @@ fn test_update_add_htlc_bolt2_receiver_check_repeated_id_ignore() {
        let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
        assert!(regex::Regex::new(r"Remote skipped HTLC ID \(skipped ID: \d+\)").unwrap().is_match(err_msg.data.as_str()));
        check_added_monitors!(nodes[1], 1);
-       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data });
+       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000);
 }
 
 #[test]
@@ -6409,7 +6445,7 @@ fn test_update_fulfill_htlc_bolt2_update_fulfill_htlc_before_commitment() {
        let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
        assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str()));
        check_added_monitors!(nodes[0], 1);
-       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data });
+       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000);
 }
 
 #[test]
@@ -6441,7 +6477,7 @@ fn test_update_fulfill_htlc_bolt2_update_fail_htlc_before_commitment() {
        let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
        assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str()));
        check_added_monitors!(nodes[0], 1);
-       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data });
+       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000);
 }
 
 #[test]
@@ -6473,7 +6509,7 @@ fn test_update_fulfill_htlc_bolt2_update_fail_malformed_htlc_before_commitment()
        let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
        assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str()));
        check_added_monitors!(nodes[0], 1);
-       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data });
+       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000);
 }
 
 #[test]
@@ -6516,7 +6552,7 @@ fn test_update_fulfill_htlc_bolt2_incorrect_htlc_id() {
        let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
        assert_eq!(err_msg.data, "Remote tried to fulfill/fail an HTLC we couldn't find");
        check_added_monitors!(nodes[0], 1);
-       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data });
+       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000);
 }
 
 #[test]
@@ -6559,7 +6595,7 @@ fn test_update_fulfill_htlc_bolt2_wrong_preimage() {
        let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
        assert!(regex::Regex::new(r"Remote tried to fulfill HTLC \(\d+\) with an incorrect preimage").unwrap().is_match(err_msg.data.as_str()));
        check_added_monitors!(nodes[0], 1);
-       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data });
+       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000);
 }
 
 #[test]
@@ -6606,7 +6642,7 @@ fn test_update_fulfill_htlc_bolt2_missing_badonion_bit_for_malformed_htlc_messag
        let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
        assert_eq!(err_msg.data, "Got update_fail_malformed_htlc with BADONION not set");
        check_added_monitors!(nodes[0], 1);
-       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data });
+       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 1000000);
 }
 
 #[test]
@@ -6785,7 +6821,7 @@ fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) {
        let chan =create_announced_chan_between_nodes(&nodes, 0, 1);
 
        let bs_dust_limit = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id())
-               .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().holder_dust_limit_satoshis;
+               .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context.holder_dust_limit_satoshis;
 
        // We route 2 dust-HTLCs between A and B
        let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
@@ -6833,7 +6869,7 @@ fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) {
 
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
-       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
 
        assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
        connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
@@ -6878,7 +6914,7 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) {
        let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
 
        let bs_dust_limit = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id())
-               .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().holder_dust_limit_satoshis;
+               .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context.holder_dust_limit_satoshis;
 
        let (_payment_preimage_1, dust_hash, _payment_secret_1) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
        let (_payment_preimage_2, non_dust_hash, _payment_secret_2) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
@@ -6896,7 +6932,7 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) {
        if local {
                // We fail dust-HTLC 1 by broadcast of local commitment tx
                mine_transaction(&nodes[0], &as_commitment_tx[0]);
-               check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+               check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
                connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
                expect_payment_failed!(nodes[0], dust_hash, false);
 
@@ -6916,7 +6952,7 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) {
                mine_transaction(&nodes[0], &bs_commitment_tx[0]);
                check_closed_broadcast!(nodes[0], true);
                check_added_monitors!(nodes[0], 1);
-               check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+               check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
                assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
 
                connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
@@ -6961,8 +6997,8 @@ fn test_user_configurable_csv_delay() {
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &user_cfgs);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
-       // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in Channel::new_outbound()
-       if let Err(error) = Channel::new_outbound(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
+       // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in OutboundV1Channel::new()
+       if let Err(error) = OutboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
                &nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[1].node.init_features(), 1000000, 1000000, 0,
                &low_our_to_self_config, 0, 42)
        {
@@ -6972,13 +7008,13 @@ fn test_user_configurable_csv_delay() {
                }
        } else { assert!(false) }
 
-       // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in Channel::new_from_req()
+       // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in InboundV1Channel::new()
        nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None).unwrap();
        let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
        open_channel.to_self_delay = 200;
-       if let Err(error) = Channel::new_from_req(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
+       if let Err(error) = InboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
                &nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[0].node.channel_type_features(), &nodes[1].node.init_features(), &open_channel, 0,
-               &low_our_to_self_config, 0, &nodes[0].logger, 42)
+               &low_our_to_self_config, 0, &nodes[0].logger, /*is_0conf=*/false)
        {
                match error {
                        ChannelError::Close(err) => { assert!(regex::Regex::new(r"Configured with an unreasonable our_to_self_delay \(\d+\) putting user funds at risks").unwrap().is_match(err.as_str()));  },
@@ -7002,15 +7038,15 @@ fn test_user_configurable_csv_delay() {
                        _ => { panic!(); }
                }
        } else { panic!(); }
-       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: reason_msg });
+       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: reason_msg }, [nodes[1].node.get_our_node_id()], 1000000);
 
-       // We test msg.to_self_delay <= config.their_to_self_delay is enforced in Channel::new_from_req()
+       // We test msg.to_self_delay <= config.their_to_self_delay is enforced in InboundV1Channel::new()
        nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None).unwrap();
        let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
        open_channel.to_self_delay = 200;
-       if let Err(error) = Channel::new_from_req(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
+       if let Err(error) = InboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
                &nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[0].node.channel_type_features(), &nodes[1].node.init_features(), &open_channel, 0,
-               &high_their_to_self_config, 0, &nodes[0].logger, 42)
+               &high_their_to_self_config, 0, &nodes[0].logger, /*is_0conf=*/false)
        {
                match error {
                        ChannelError::Close(err) => { assert!(regex::Regex::new(r"They wanted our payments to be delayed by a needlessly long period\. Upper limit: \d+\. Actual: \d+").unwrap().is_match(err.as_str())); },
@@ -7119,10 +7155,14 @@ fn test_announce_disable_channels() {
                }
        }
        // Reconnect peers
-       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
+               features: nodes[1].node.init_features(), networks: None, remote_network_address: None
+       }, true).unwrap();
        let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
        assert_eq!(reestablish_1.len(), 3);
-       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
+               features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+       }, false).unwrap();
        let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
        assert_eq!(reestablish_2.len(), 3);
 
@@ -7307,7 +7347,7 @@ fn test_bump_penalty_txn_on_revoked_htlcs() {
        connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![revoked_local_txn[0].clone()]));
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
-       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 1000000);
        connect_blocks(&nodes[1], 50); // Confirm blocks until the HTLC expires (note CLTV was explicitly 50 above)
 
        let revoked_htlc_txn = {
@@ -7553,16 +7593,16 @@ fn test_counterparty_raa_skip_no_crash() {
                const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
 
                // Make signer believe we got a counterparty signature, so that it allows the revocation
-               keys.get_enforcement_state().last_holder_commitment -= 1;
-               per_commitment_secret = keys.release_commitment_secret(INITIAL_COMMITMENT_NUMBER);
+               keys.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1;
+               per_commitment_secret = keys.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER);
 
                // Must revoke without gaps
-               keys.get_enforcement_state().last_holder_commitment -= 1;
-               keys.release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 1);
+               keys.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1;
+               keys.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 1);
 
-               keys.get_enforcement_state().last_holder_commitment -= 1;
+               keys.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1;
                next_per_commitment_point = PublicKey::from_secret_key(&Secp256k1::new(),
-                       &SecretKey::from_slice(&keys.release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 2)).unwrap());
+                       &SecretKey::from_slice(&keys.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 2)).unwrap());
        }
 
        nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(),
@@ -7575,7 +7615,8 @@ fn test_counterparty_raa_skip_no_crash() {
                });
        assert_eq!(check_closed_broadcast!(nodes[1], true).unwrap().data, "Received an unexpected revoke_and_ack");
        check_added_monitors!(nodes[1], 1);
-       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Received an unexpected revoke_and_ack".to_string() });
+       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Received an unexpected revoke_and_ack".to_string() }
+               , [nodes[0].node.get_our_node_id()], 100000);
 }
 
 #[test]
@@ -7608,7 +7649,7 @@ fn test_bump_txn_sanitize_tracking_maps() {
        mine_transaction(&nodes[0], &revoked_local_txn[0]);
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
-       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 1000000);
        let penalty_txn = {
                let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
                assert_eq!(node_txn.len(), 3); //ChannelMonitor: justice txn * 3
@@ -7652,7 +7693,7 @@ fn test_channel_conf_timeout() {
 
        connect_blocks(&nodes[1], 1);
        check_added_monitors!(nodes[1], 1);
-       check_closed_event!(nodes[1], 1, ClosureReason::FundingTimedOut);
+       check_closed_event!(nodes[1], 1, ClosureReason::FundingTimedOut, [nodes[0].node.get_our_node_id()], 1000000);
        let close_ev = nodes[1].node.get_and_clear_pending_msg_events();
        assert_eq!(close_ev.len(), 1);
        match close_ev[0] {
@@ -7838,70 +7879,9 @@ fn test_manually_reject_inbound_channel_request() {
                }
                _ => panic!("Unexpected event"),
        }
-       check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
-}
-
-#[test]
-fn test_reject_funding_before_inbound_channel_accepted() {
-       // This tests that when `UserConfig::manually_accept_inbound_channels` is set to true, inbound
-       // channels must to be manually accepted through `ChannelManager::accept_inbound_channel` by
-       // the node operator before the counterparty sends a `FundingCreated` message. If a
-       // `FundingCreated` message is received before the channel is accepted, it should be rejected
-       // and the channel should be closed.
-       let mut manually_accept_conf = UserConfig::default();
-       manually_accept_conf.manually_accept_inbound_channels = true;
-       let chanmon_cfgs = create_chanmon_cfgs(2);
-       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
-       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_conf.clone())]);
-       let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
-
-       nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, Some(manually_accept_conf)).unwrap();
-       let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
-       let temp_channel_id = res.temporary_channel_id;
-
-       nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &res);
-
-       // Assert that `nodes[1]` has no `MessageSendEvent::SendAcceptChannel` in the `msg_events`.
-       assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
-
-       // Clear the `Event::OpenChannelRequest` event without responding to the request.
-       nodes[1].node.get_and_clear_pending_events();
-
-       // Get the `AcceptChannel` message of `nodes[1]` without calling
-       // `ChannelManager::accept_inbound_channel`, which generates a
-       // `MessageSendEvent::SendAcceptChannel` event. The message is passed to `nodes[0]`
-       // `handle_accept_channel`, which is required in order for `create_funding_transaction` to
-       // succeed when `nodes[0]` is passed to it.
-       let accept_chan_msg = {
-               let mut node_1_per_peer_lock;
-               let mut node_1_peer_state_lock;
-               let channel =  get_channel_ref!(&nodes[1], nodes[0], node_1_per_peer_lock, node_1_peer_state_lock, temp_channel_id);
-               channel.get_accept_channel_message()
-       };
-       nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_chan_msg);
-
-       let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42);
-
-       nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
-       let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
-
-       // The `funding_created_msg` should be rejected by `nodes[1]` as it hasn't accepted the channel
-       nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
-
-       let close_msg_ev = nodes[1].node.get_and_clear_pending_msg_events();
-       assert_eq!(close_msg_ev.len(), 1);
-
-       let expected_err = "FundingCreated message received before the channel was accepted";
-       match close_msg_ev[0] {
-               MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, ref node_id, } => {
-                       assert_eq!(msg.channel_id, temp_channel_id);
-                       assert_eq!(*node_id, nodes[0].node.get_our_node_id());
-                       assert_eq!(msg.data, expected_err);
-               }
-               _ => panic!("Unexpected event"),
-       }
 
-       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: expected_err.to_string() });
+       // There should be no more events to process, as the channel was never opened.
+       assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
 }
 
 #[test]
@@ -7929,10 +7909,10 @@ fn test_can_not_accept_inbound_channel_twice() {
                        let api_res = nodes[1].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 0);
                        match api_res {
                                Err(APIError::APIMisuseError { err }) => {
-                                       assert_eq!(err, "The channel isn't currently awaiting to be accepted.");
+                                       assert_eq!(err, "No such channel awaiting to be accepted.");
                                },
                                Ok(_) => panic!("Channel shouldn't be possible to be accepted twice"),
-                               Err(_) => panic!("Unexpected Error"),
+                               Err(e) => panic!("Unexpected Error {:?}", e),
                        }
                }
                _ => panic!("Unexpected event"),
@@ -7960,11 +7940,11 @@ fn test_can_not_accept_unknown_inbound_channel() {
        let unknown_channel_id = [0; 32];
        let api_res = nodes[0].node.accept_inbound_channel(&unknown_channel_id, &nodes[1].node.get_our_node_id(), 0);
        match api_res {
-               Err(APIError::ChannelUnavailable { err }) => {
-                       assert_eq!(err, format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!(unknown_channel_id), nodes[1].node.get_our_node_id()));
+               Err(APIError::APIMisuseError { err }) => {
+                       assert_eq!(err, "No such channel awaiting to be accepted.");
                },
                Ok(_) => panic!("It shouldn't be possible to accept an unkown channel"),
-               Err(_) => panic!("Unexpected Error"),
+               Err(e) => panic!("Unexpected Error: {:?}", e),
        }
 }
 
@@ -8035,7 +8015,9 @@ fn test_onion_value_mpp_set_calculation() {
                                RecipientOnionFields::secret_only(our_payment_secret), height + 1, &None).unwrap();
                        // Edit amt_to_forward to simulate the sender having set
                        // the final amount and the routing node taking less fee
-                       onion_payloads[1].amt_to_forward = 99_000;
+                       if let msgs::OutboundOnionPayload::Receive { ref mut amt_msat, .. } = onion_payloads[1] {
+                               *amt_msat = 99_000;
+                       } else { panic!() }
                        let new_onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &our_payment_hash).unwrap();
                        payment_event.msgs[0].onion_routing_packet = new_onion_packet;
                }
@@ -8203,67 +8185,6 @@ fn test_preimage_storage() {
        }
 }
 
-#[test]
-#[allow(deprecated)]
-fn test_secret_timeout() {
-       // Simple test of payment secret storage time outs. After
-       // `create_inbound_payment(_for_hash)_legacy` is removed, this test will be removed as well.
-       let chanmon_cfgs = create_chanmon_cfgs(2);
-       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
-       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
-       let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
-
-       create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
-
-       let (payment_hash, payment_secret_1) = nodes[1].node.create_inbound_payment_legacy(Some(100_000), 2).unwrap();
-
-       // We should fail to register the same payment hash twice, at least until we've connected a
-       // block with time 7200 + CHAN_CONFIRM_DEPTH + 1.
-       if let Err(APIError::APIMisuseError { err }) = nodes[1].node.create_inbound_payment_for_hash_legacy(payment_hash, Some(100_000), 2) {
-               assert_eq!(err, "Duplicate payment hash");
-       } else { panic!(); }
-       let mut block = {
-               let node_1_blocks = nodes[1].blocks.lock().unwrap();
-               create_dummy_block(node_1_blocks.last().unwrap().0.block_hash(), node_1_blocks.len() as u32 + 7200, Vec::new())
-       };
-       connect_block(&nodes[1], &block);
-       if let Err(APIError::APIMisuseError { err }) = nodes[1].node.create_inbound_payment_for_hash_legacy(payment_hash, Some(100_000), 2) {
-               assert_eq!(err, "Duplicate payment hash");
-       } else { panic!(); }
-
-       // If we then connect the second block, we should be able to register the same payment hash
-       // again (this time getting a new payment secret).
-       block.header.prev_blockhash = block.header.block_hash();
-       block.header.time += 1;
-       connect_block(&nodes[1], &block);
-       let our_payment_secret = nodes[1].node.create_inbound_payment_for_hash_legacy(payment_hash, Some(100_000), 2).unwrap();
-       assert_ne!(payment_secret_1, our_payment_secret);
-
-       {
-               let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000);
-               nodes[0].node.send_payment_with_route(&route, payment_hash,
-                       RecipientOnionFields::secret_only(our_payment_secret), PaymentId(payment_hash.0)).unwrap();
-               check_added_monitors!(nodes[0], 1);
-               let mut events = nodes[0].node.get_and_clear_pending_msg_events();
-               let mut payment_event = SendEvent::from_event(events.pop().unwrap());
-               nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
-               commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
-       }
-       // Note that after leaving the above scope we have no knowledge of any arguments or return
-       // values from previous calls.
-       expect_pending_htlcs_forwardable!(nodes[1]);
-       let events = nodes[1].node.get_and_clear_pending_events();
-       assert_eq!(events.len(), 1);
-       match events[0] {
-               Event::PaymentClaimable { purpose: PaymentPurpose::InvoicePayment { payment_preimage, payment_secret }, .. } => {
-                       assert!(payment_preimage.is_none());
-                       assert_eq!(payment_secret, our_payment_secret);
-                       // We don't actually have the payment preimage with which to claim this payment!
-               },
-               _ => panic!("Unexpected event"),
-       }
-}
-
 #[test]
 fn test_bad_secret_hash() {
        // Simple test of unregistered payment hash/invalid payment secret handling
@@ -8513,7 +8434,8 @@ fn test_concurrent_monitor_claim() {
        let height = HTLC_TIMEOUT_BROADCAST + 1;
        connect_blocks(&nodes[0], height - nodes[0].best_block_info().1);
        check_closed_broadcast(&nodes[0], 1, true);
-       check_closed_event(&nodes[0], 1, ClosureReason::CommitmentTxConfirmed, false);
+       check_closed_event!(&nodes[0], 1, ClosureReason::CommitmentTxConfirmed, false,
+               [nodes[1].node.get_our_node_id()], 100000);
        watchtower_alice.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, vec![bob_state_y.clone()]), height);
        check_added_monitors(&nodes[0], 1);
        {
@@ -8561,7 +8483,8 @@ fn test_pre_lockin_no_chan_closed_update() {
        let channel_id = crate::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }.to_channel_id();
        nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id, data: "Hi".to_owned() });
        assert!(nodes[0].chain_monitor.added_monitors.lock().unwrap().is_empty());
-       check_closed_event!(nodes[0], 2, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("Hi".to_string()) }, true);
+       check_closed_event!(nodes[0], 2, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("Hi".to_string()) }, true,
+               [nodes[1].node.get_our_node_id(); 2], 100000);
 }
 
 #[test]
@@ -8596,7 +8519,7 @@ fn test_htlc_no_detection() {
        chain::Listen::block_connected(&nodes[0].chain_monitor.chain_monitor, &block, nodes[0].best_block_info().1 + 1);
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
-       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
        connect_blocks(&nodes[0], TEST_FINAL_CLTV);
 
        let htlc_timeout = {
@@ -8662,7 +8585,7 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain
        nodes[force_closing_node].node.force_close_broadcasting_latest_txn(&chan_ab.2, &nodes[counterparty_node].node.get_our_node_id()).unwrap();
        check_closed_broadcast!(nodes[force_closing_node], true);
        check_added_monitors!(nodes[force_closing_node], 1);
-       check_closed_event!(nodes[force_closing_node], 1, ClosureReason::HolderForceClosed);
+       check_closed_event!(nodes[force_closing_node], 1, ClosureReason::HolderForceClosed, [nodes[counterparty_node].node.get_our_node_id()], 100000);
        if go_onchain_before_fulfill {
                let txn_to_broadcast = match broadcast_alice {
                        true => alice_txn.clone(),
@@ -8672,7 +8595,7 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain
                if broadcast_alice {
                        check_closed_broadcast!(nodes[1], true);
                        check_added_monitors!(nodes[1], 1);
-                       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+                       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
                }
        }
 
@@ -8752,7 +8675,7 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain
                if broadcast_alice {
                        check_closed_broadcast!(nodes[1], true);
                        check_added_monitors!(nodes[1], 1);
-                       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+                       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
                }
                let mut bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
                if broadcast_alice {
@@ -8935,16 +8858,16 @@ fn test_duplicate_chan_id() {
        nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
        create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42); // Get and check the FundingGenerationReady event
 
-       let funding_created = {
+       let (_, funding_created) = {
                let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
                let mut a_peer_state = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
-               // Once we call `get_outbound_funding_created` the channel has a duplicate channel_id as
+               // Once we call `get_funding_created` the channel has a duplicate channel_id as
                // another channel in the ChannelManager - an invalid state. Thus, we'd panic later when we
                // try to create another channel. Instead, we drop the channel entirely here (leaving the
                // channelmanager in a possibly nonsense state instead).
-               let mut as_chan = a_peer_state.channel_by_id.remove(&open_chan_2_msg.temporary_channel_id).unwrap();
+               let mut as_chan = a_peer_state.outbound_v1_channel_by_id.remove(&open_chan_2_msg.temporary_channel_id).unwrap();
                let logger = test_utils::TestLogger::new();
-               as_chan.get_outbound_funding_created(tx.clone(), funding_outpoint, &&logger).unwrap()
+               as_chan.get_funding_created(tx.clone(), funding_outpoint, &&logger).map_err(|_| ()).unwrap()
        };
        check_added_monitors!(nodes[0], 0);
        nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created);
@@ -9019,7 +8942,8 @@ fn test_error_chans_closed() {
        nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: chan_2.2, data: "ERR".to_owned() });
        check_added_monitors!(nodes[0], 1);
        check_closed_broadcast!(nodes[0], false);
-       check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("ERR".to_string()) });
+       check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("ERR".to_string()) },
+               [nodes[1].node.get_our_node_id()], 100000);
        assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1);
        assert_eq!(nodes[0].node.list_usable_channels().len(), 2);
        assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_1.2 || nodes[0].node.list_usable_channels()[1].channel_id == chan_1.2);
@@ -9029,7 +8953,8 @@ fn test_error_chans_closed() {
        let _chan_4 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001);
        nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: [0; 32], data: "ERR".to_owned() });
        check_added_monitors!(nodes[0], 2);
-       check_closed_event!(nodes[0], 2, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("ERR".to_string()) });
+       check_closed_event!(nodes[0], 2, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("ERR".to_string()) },
+               [nodes[1].node.get_our_node_id(); 2], 100000);
        let events = nodes[0].node.get_and_clear_pending_msg_events();
        assert_eq!(events.len(), 2);
        match events[0] {
@@ -9106,7 +9031,8 @@ fn test_invalid_funding_tx() {
 
        let expected_err = "funding tx had wrong script/value or output index";
        confirm_transaction_at(&nodes[1], &tx, 1);
-       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: expected_err.to_string() });
+       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: expected_err.to_string() },
+               [nodes[0].node.get_our_node_id()], 100000);
        check_added_monitors!(nodes[1], 1);
        let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
        assert_eq!(events_2.len(), 1);
@@ -9172,7 +9098,7 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t
 
        nodes[1].node.force_close_broadcasting_latest_txn(&channel_id, &nodes[2].node.get_our_node_id()).unwrap();
        check_closed_broadcast!(nodes[1], true);
-       check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
+       check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[2].node.get_our_node_id()], 100000);
        check_added_monitors!(nodes[1], 1);
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
        assert_eq!(node_txn.len(), 1);
@@ -9434,74 +9360,7 @@ fn test_inconsistent_mpp_params() {
        pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 15_000_000, our_payment_hash, Some(our_payment_secret), events.pop().unwrap(), true, None);
 
        do_claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, our_payment_preimage);
-       expect_payment_sent(&nodes[0], our_payment_preimage, Some(None), true);
-}
-
-#[test]
-fn test_keysend_payments_to_public_node() {
-       let chanmon_cfgs = create_chanmon_cfgs(2);
-       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
-       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
-       let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
-
-       let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001);
-       let network_graph = nodes[0].network_graph.clone();
-       let payer_pubkey = nodes[0].node.get_our_node_id();
-       let payee_pubkey = nodes[1].node.get_our_node_id();
-       let route_params = RouteParameters {
-               payment_params: PaymentParameters::for_keysend(payee_pubkey, 40),
-               final_value_msat: 10000,
-       };
-       let scorer = test_utils::TestScorer::new();
-       let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
-       let route = find_route(&payer_pubkey, &route_params, &network_graph, None, nodes[0].logger, &scorer, &(), &random_seed_bytes).unwrap();
-
-       let test_preimage = PaymentPreimage([42; 32]);
-       let payment_hash = nodes[0].node.send_spontaneous_payment(&route, Some(test_preimage),
-               RecipientOnionFields::spontaneous_empty(), PaymentId(test_preimage.0)).unwrap();
-       check_added_monitors!(nodes[0], 1);
-       let mut events = nodes[0].node.get_and_clear_pending_msg_events();
-       assert_eq!(events.len(), 1);
-       let event = events.pop().unwrap();
-       let path = vec![&nodes[1]];
-       pass_along_path(&nodes[0], &path, 10000, payment_hash, None, event, true, Some(test_preimage));
-       claim_payment(&nodes[0], &path, test_preimage);
-}
-
-#[test]
-fn test_keysend_payments_to_private_node() {
-       let chanmon_cfgs = create_chanmon_cfgs(2);
-       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
-       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
-       let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
-
-       let payer_pubkey = nodes[0].node.get_our_node_id();
-       let payee_pubkey = nodes[1].node.get_our_node_id();
-
-       let _chan = create_chan_between_nodes(&nodes[0], &nodes[1]);
-       let route_params = RouteParameters {
-               payment_params: PaymentParameters::for_keysend(payee_pubkey, 40),
-               final_value_msat: 10000,
-       };
-       let network_graph = nodes[0].network_graph.clone();
-       let first_hops = nodes[0].node.list_usable_channels();
-       let scorer = test_utils::TestScorer::new();
-       let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
-       let route = find_route(
-               &payer_pubkey, &route_params, &network_graph, Some(&first_hops.iter().collect::<Vec<_>>()),
-               nodes[0].logger, &scorer, &(), &random_seed_bytes
-       ).unwrap();
-
-       let test_preimage = PaymentPreimage([42; 32]);
-       let payment_hash = nodes[0].node.send_spontaneous_payment(&route, Some(test_preimage),
-               RecipientOnionFields::spontaneous_empty(), PaymentId(test_preimage.0)).unwrap();
-       check_added_monitors!(nodes[0], 1);
-       let mut events = nodes[0].node.get_and_clear_pending_msg_events();
-       assert_eq!(events.len(), 1);
-       let event = events.pop().unwrap();
-       let path = vec![&nodes[1]];
-       pass_along_path(&nodes[0], &path, 10000, payment_hash, None, event, true, Some(test_preimage));
-       claim_payment(&nodes[0], &path, test_preimage);
+       expect_payment_sent(&nodes[0], our_payment_preimage, Some(None), true, true);
 }
 
 #[test]
@@ -9572,7 +9431,7 @@ enum ExposureEvent {
        AtUpdateFeeOutbound,
 }
 
-fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_event: ExposureEvent, on_holder_tx: bool) {
+fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_event: ExposureEvent, on_holder_tx: bool, multiplier_dust_limit: bool) {
        // Test that we properly reject dust HTLC violating our `max_dust_htlc_exposure_msat`
        // policy.
        //
@@ -9587,7 +9446,12 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e
 
        let chanmon_cfgs = create_chanmon_cfgs(2);
        let mut config = test_default_channel_config();
-       config.channel_config.max_dust_htlc_exposure_msat = 5_000_000; // default setting value
+       config.channel_config.max_dust_htlc_exposure = if multiplier_dust_limit {
+               // Default test fee estimator rate is 253 sat/kw, so we set the multiplier to 5_000_000 / 253
+               // to get roughly the same initial value as the default setting when this test was
+               // originally written.
+               MaxDustHTLCExposure::FeeRateMultiplier(5_000_000 / 253)
+       } else { MaxDustHTLCExposure::FixedLimitMsat(5_000_000) }; // initial default setting value
        let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config), None]);
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
@@ -9603,15 +9467,15 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e
        let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
        nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
 
-       let opt_anchors = false;
+       let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
 
        let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42);
 
        if on_holder_tx {
                let mut node_0_per_peer_lock;
                let mut node_0_peer_state_lock;
-               let mut chan = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, temporary_channel_id);
-               chan.holder_dust_limit_satoshis = 546;
+               let mut chan = get_outbound_v1_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, temporary_channel_id);
+               chan.context.holder_dust_limit_satoshis = 546;
        }
 
        nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
@@ -9627,20 +9491,25 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e
        let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready);
        update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &as_update, &bs_update);
 
-       let dust_buffer_feerate = {
+       // Fetch a route in advance as we will be unable to once we're unable to send.
+       let (mut route, payment_hash, _, payment_secret) =
+               get_route_and_payment_hash!(nodes[0], nodes[1], 1000);
+
+       let (dust_buffer_feerate, max_dust_htlc_exposure_msat) = {
                let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
                let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
                let chan = chan_lock.channel_by_id.get(&channel_id).unwrap();
-               chan.get_dust_buffer_feerate(None) as u64
+               (chan.context.get_dust_buffer_feerate(None) as u64,
+               chan.context.get_max_dust_htlc_exposure_msat(&LowerBoundedFeeEstimator(nodes[0].fee_estimator)))
        };
-       let dust_outbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_timeout_tx_weight(opt_anchors) / 1000 + open_channel.dust_limit_satoshis - 1) * 1000;
-       let dust_outbound_htlc_on_holder_tx: u64 = config.channel_config.max_dust_htlc_exposure_msat / dust_outbound_htlc_on_holder_tx_msat;
+       let dust_outbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_timeout_tx_weight(&channel_type_features) / 1000 + open_channel.dust_limit_satoshis - 1) * 1000;
+       let dust_outbound_htlc_on_holder_tx: u64 = max_dust_htlc_exposure_msat / dust_outbound_htlc_on_holder_tx_msat;
 
-       let dust_inbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_success_tx_weight(opt_anchors) / 1000 + open_channel.dust_limit_satoshis - 1) * 1000;
-       let dust_inbound_htlc_on_holder_tx: u64 = config.channel_config.max_dust_htlc_exposure_msat / dust_inbound_htlc_on_holder_tx_msat;
+       let dust_inbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_success_tx_weight(&channel_type_features) / 1000 + open_channel.dust_limit_satoshis - 1) * 1000;
+       let dust_inbound_htlc_on_holder_tx: u64 = max_dust_htlc_exposure_msat / dust_inbound_htlc_on_holder_tx_msat;
 
-       let dust_htlc_on_counterparty_tx: u64 = 25;
-       let dust_htlc_on_counterparty_tx_msat: u64 = config.channel_config.max_dust_htlc_exposure_msat / dust_htlc_on_counterparty_tx;
+       let dust_htlc_on_counterparty_tx: u64 = 4;
+       let dust_htlc_on_counterparty_tx_msat: u64 = max_dust_htlc_exposure_msat / dust_htlc_on_counterparty_tx;
 
        if on_holder_tx {
                if dust_outbound_balance {
@@ -9664,7 +9533,7 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e
                if dust_outbound_balance {
                        // Outbound dust threshold: 2132 sats (`dust_buffer_feerate` * HTLC_TIMEOUT_TX_WEIGHT / 1000 + counteparty's `dust_limit_satoshis`)
                        // Outbound dust balance: 5000 sats
-                       for _ in 0..dust_htlc_on_counterparty_tx {
+                       for _ in 0..dust_htlc_on_counterparty_tx - 1 {
                                let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], dust_htlc_on_counterparty_tx_msat);
                                nodes[0].node.send_payment_with_route(&route, payment_hash,
                                        RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
@@ -9672,32 +9541,27 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e
                } else {
                        // Inbound dust threshold: 2031 sats (`dust_buffer_feerate` * HTLC_TIMEOUT_TX_WEIGHT / 1000 + counteparty's `dust_limit_satoshis`)
                        // Inbound dust balance: 5000 sats
-                       for _ in 0..dust_htlc_on_counterparty_tx {
+                       for _ in 0..dust_htlc_on_counterparty_tx - 1 {
                                route_payment(&nodes[1], &[&nodes[0]], dust_htlc_on_counterparty_tx_msat);
                        }
                }
        }
 
-       let dust_overflow = dust_htlc_on_counterparty_tx_msat * (dust_htlc_on_counterparty_tx + 1);
        if exposure_breach_event == ExposureEvent::AtHTLCForward {
-               let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], if on_holder_tx { dust_outbound_htlc_on_holder_tx_msat } else { dust_htlc_on_counterparty_tx_msat });
-               let mut config = UserConfig::default();
+               route.paths[0].hops.last_mut().unwrap().fee_msat =
+                       if on_holder_tx { dust_outbound_htlc_on_holder_tx_msat } else { dust_htlc_on_counterparty_tx_msat + 1 };
                // With default dust exposure: 5000 sats
                if on_holder_tx {
-                       let dust_outbound_overflow = dust_outbound_htlc_on_holder_tx_msat * (dust_outbound_htlc_on_holder_tx + 1);
-                       let dust_inbound_overflow = dust_inbound_htlc_on_holder_tx_msat * dust_inbound_htlc_on_holder_tx + dust_outbound_htlc_on_holder_tx_msat;
                        unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, payment_hash,
                                        RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)
-                               ), true, APIError::ChannelUnavailable { ref err },
-                               assert_eq!(err, &format!("Cannot send value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", if dust_outbound_balance { dust_outbound_overflow } else { dust_inbound_overflow }, config.channel_config.max_dust_htlc_exposure_msat)));
+                               ), true, APIError::ChannelUnavailable { .. }, {});
                } else {
                        unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, payment_hash,
                                        RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)
-                               ), true, APIError::ChannelUnavailable { ref err },
-                               assert_eq!(err, &format!("Cannot send value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx", dust_overflow, config.channel_config.max_dust_htlc_exposure_msat)));
+                               ), true, APIError::ChannelUnavailable { .. }, {});
                }
        } else if exposure_breach_event == ExposureEvent::AtHTLCReception {
-               let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], if on_holder_tx { dust_inbound_htlc_on_holder_tx_msat } else { dust_htlc_on_counterparty_tx_msat });
+               let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], if on_holder_tx { dust_inbound_htlc_on_holder_tx_msat } else { dust_htlc_on_counterparty_tx_msat + 4 });
                nodes[1].node.send_payment_with_route(&route, payment_hash,
                        RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
                check_added_monitors!(nodes[1], 1);
@@ -9710,15 +9574,24 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e
                        // Outbound dust balance: 6399 sats
                        let dust_inbound_overflow = dust_inbound_htlc_on_holder_tx_msat * (dust_inbound_htlc_on_holder_tx + 1);
                        let dust_outbound_overflow = dust_outbound_htlc_on_holder_tx_msat * dust_outbound_htlc_on_holder_tx + dust_inbound_htlc_on_holder_tx_msat;
-                       nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", if dust_outbound_balance { dust_outbound_overflow } else { dust_inbound_overflow }, config.channel_config.max_dust_htlc_exposure_msat), 1);
+                       nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", if dust_outbound_balance { dust_outbound_overflow } else { dust_inbound_overflow }, max_dust_htlc_exposure_msat), 1);
                } else {
                        // Outbound dust balance: 5200 sats
-                       nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx", dust_overflow, config.channel_config.max_dust_htlc_exposure_msat), 1);
+                       nodes[0].logger.assert_log("lightning::ln::channel".to_string(),
+                               format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
+                                       dust_htlc_on_counterparty_tx_msat * (dust_htlc_on_counterparty_tx - 1) + dust_htlc_on_counterparty_tx_msat + 4,
+                                       max_dust_htlc_exposure_msat), 1);
                }
        } else if exposure_breach_event == ExposureEvent::AtUpdateFeeOutbound {
-               let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 2_500_000);
-               nodes[0].node.send_payment_with_route(&route, payment_hash,
-                       RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
+               route.paths[0].hops.last_mut().unwrap().fee_msat = 2_500_000;
+               // For the multiplier dust exposure limit, since it scales with feerate,
+               // we need to add a lot of HTLCs that will become dust at the new feerate
+               // to cross the threshold.
+               for _ in 0..20 {
+                       let (_, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[1], Some(1_000), None);
+                       nodes[0].node.send_payment_with_route(&route, payment_hash,
+                               RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
+               }
                {
                        let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
                        *feerate_lock = *feerate_lock * 10;
@@ -9733,20 +9606,25 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e
        added_monitors.clear();
 }
 
+fn do_test_max_dust_htlc_exposure_by_threshold_type(multiplier_dust_limit: bool) {
+       do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCForward, true, multiplier_dust_limit);
+       do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCForward, true, multiplier_dust_limit);
+       do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCReception, true, multiplier_dust_limit);
+       do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCReception, false, multiplier_dust_limit);
+       do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCForward, false, multiplier_dust_limit);
+       do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCReception, false, multiplier_dust_limit);
+       do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCReception, true, multiplier_dust_limit);
+       do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCForward, false, multiplier_dust_limit);
+       do_test_max_dust_htlc_exposure(true, ExposureEvent::AtUpdateFeeOutbound, true, multiplier_dust_limit);
+       do_test_max_dust_htlc_exposure(true, ExposureEvent::AtUpdateFeeOutbound, false, multiplier_dust_limit);
+       do_test_max_dust_htlc_exposure(false, ExposureEvent::AtUpdateFeeOutbound, false, multiplier_dust_limit);
+       do_test_max_dust_htlc_exposure(false, ExposureEvent::AtUpdateFeeOutbound, true, multiplier_dust_limit);
+}
+
 #[test]
 fn test_max_dust_htlc_exposure() {
-       do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCForward, true);
-       do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCForward, true);
-       do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCReception, true);
-       do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCReception, false);
-       do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCForward, false);
-       do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCReception, false);
-       do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCReception, true);
-       do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCForward, false);
-       do_test_max_dust_htlc_exposure(true, ExposureEvent::AtUpdateFeeOutbound, true);
-       do_test_max_dust_htlc_exposure(true, ExposureEvent::AtUpdateFeeOutbound, false);
-       do_test_max_dust_htlc_exposure(false, ExposureEvent::AtUpdateFeeOutbound, false);
-       do_test_max_dust_htlc_exposure(false, ExposureEvent::AtUpdateFeeOutbound, true);
+       do_test_max_dust_htlc_exposure_by_threshold_type(false);
+       do_test_max_dust_htlc_exposure_by_threshold_type(true);
 }
 
 #[test]
@@ -9861,7 +9739,8 @@ fn accept_busted_but_better_fee() {
                MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, .. }, .. } => {
                        nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_fee.as_ref().unwrap());
                        check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError {
-                               err: "Peer's feerate much too low. Actual: 1000. Our expected lower limit: 5000 (- 250)".to_owned() });
+                               err: "Peer's feerate much too low. Actual: 1000. Our expected lower limit: 5000 (- 250)".to_owned() },
+                               [nodes[0].node.get_our_node_id()], 100000);
                        check_closed_broadcast!(nodes[1], true);
                        check_added_monitors!(nodes[1], 1);
                },
@@ -9926,3 +9805,321 @@ fn test_payment_with_custom_min_cltv_expiry_delta() {
        do_payment_with_custom_min_final_cltv_expiry(true, false);
        do_payment_with_custom_min_final_cltv_expiry(true, true);
 }
+
+#[test]
+fn test_disconnects_peer_awaiting_response_ticks() {
+       // Tests that nodes which are awaiting on a response critical for channel responsiveness
+       // disconnect their counterparty after `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`.
+       let mut chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+       let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+       // Asserts a disconnect event is queued to the user.
+       let check_disconnect_event = |node: &Node, should_disconnect: bool| {
+               let disconnect_event = node.node.get_and_clear_pending_msg_events().iter().find_map(|event|
+                       if let MessageSendEvent::HandleError { action, .. } = event {
+                               if let msgs::ErrorAction::DisconnectPeerWithWarning { .. } = action {
+                                       Some(())
+                               } else {
+                                       None
+                               }
+                       } else {
+                               None
+                       }
+               );
+               assert_eq!(disconnect_event.is_some(), should_disconnect);
+       };
+
+       // Fires timer ticks ensuring we only attempt to disconnect peers after reaching
+       // `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`.
+       let check_disconnect = |node: &Node| {
+               // No disconnect without any timer ticks.
+               check_disconnect_event(node, false);
+
+               // No disconnect with 1 timer tick less than required.
+               for _ in 0..DISCONNECT_PEER_AWAITING_RESPONSE_TICKS - 1 {
+                       node.node.timer_tick_occurred();
+                       check_disconnect_event(node, false);
+               }
+
+               // Disconnect after reaching the required ticks.
+               node.node.timer_tick_occurred();
+               check_disconnect_event(node, true);
+
+               // Disconnect again on the next tick if the peer hasn't been disconnected yet.
+               node.node.timer_tick_occurred();
+               check_disconnect_event(node, true);
+       };
+
+       create_chan_between_nodes(&nodes[0], &nodes[1]);
+
+       // We'll start by performing a fee update with Alice (nodes[0]) on the channel.
+       *nodes[0].fee_estimator.sat_per_kw.lock().unwrap() *= 2;
+       nodes[0].node.timer_tick_occurred();
+       check_added_monitors!(&nodes[0], 1);
+       let alice_fee_update = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id());
+       nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), alice_fee_update.update_fee.as_ref().unwrap());
+       nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &alice_fee_update.commitment_signed);
+       check_added_monitors!(&nodes[1], 1);
+
+       // This will prompt Bob (nodes[1]) to respond with his `CommitmentSigned` and `RevokeAndACK`.
+       let (bob_revoke_and_ack, bob_commitment_signed) = get_revoke_commit_msgs!(&nodes[1], nodes[0].node.get_our_node_id());
+       nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bob_revoke_and_ack);
+       check_added_monitors!(&nodes[0], 1);
+       nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bob_commitment_signed);
+       check_added_monitors(&nodes[0], 1);
+
+       // Alice then needs to send her final `RevokeAndACK` to complete the commitment dance. We
+       // pretend Bob hasn't received the message and check whether he'll disconnect Alice after
+       // reaching `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`.
+       let alice_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
+       check_disconnect(&nodes[1]);
+
+       // Now, we'll reconnect them to test awaiting a `ChannelReestablish` message.
+       //
+       // Note that since the commitment dance didn't complete above, Alice is expected to resend her
+       // final `RevokeAndACK` to Bob to complete it.
+       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
+       let bob_init = msgs::Init {
+               features: nodes[1].node.init_features(), networks: None, remote_network_address: None
+       };
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &bob_init, true).unwrap();
+       let alice_init = msgs::Init {
+               features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+       };
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &alice_init, true).unwrap();
+
+       // Upon reconnection, Alice sends her `ChannelReestablish` to Bob. Alice, however, hasn't
+       // received Bob's yet, so she should disconnect him after reaching
+       // `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`.
+       let alice_channel_reestablish = get_event_msg!(
+               nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id()
+       );
+       nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &alice_channel_reestablish);
+       check_disconnect(&nodes[0]);
+
+       // Bob now sends his `ChannelReestablish` to Alice to resume the channel and consider it "live".
+       let bob_channel_reestablish = nodes[1].node.get_and_clear_pending_msg_events().iter().find_map(|event|
+               if let MessageSendEvent::SendChannelReestablish { node_id, msg } = event {
+                       assert_eq!(*node_id, nodes[0].node.get_our_node_id());
+                       Some(msg.clone())
+               } else {
+                       None
+               }
+       ).unwrap();
+       nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bob_channel_reestablish);
+
+       // Sanity check that Alice won't disconnect Bob since she's no longer waiting for any messages.
+       for _ in 0..DISCONNECT_PEER_AWAITING_RESPONSE_TICKS {
+               nodes[0].node.timer_tick_occurred();
+               check_disconnect_event(&nodes[0], false);
+       }
+
+       // However, Bob is still waiting on Alice's `RevokeAndACK`, so he should disconnect her after
+       // reaching `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`.
+       check_disconnect(&nodes[1]);
+
+       // Finally, have Bob process the last message.
+       nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &alice_revoke_and_ack);
+       check_added_monitors(&nodes[1], 1);
+
+       // At this point, neither node should attempt to disconnect each other, since they aren't
+       // waiting on any messages.
+       for node in &nodes {
+               for _ in 0..DISCONNECT_PEER_AWAITING_RESPONSE_TICKS {
+                       node.node.timer_tick_occurred();
+                       check_disconnect_event(node, false);
+               }
+       }
+}
+
+#[test]
+fn test_remove_expired_outbound_unfunded_channels() {
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+       let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+       let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None).unwrap();
+       let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+       nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_message);
+       let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
+       nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel_message);
+
+       let events = nodes[0].node.get_and_clear_pending_events();
+       assert_eq!(events.len(), 1);
+       match events[0] {
+               Event::FundingGenerationReady { .. } => (),
+               _ => panic!("Unexpected event"),
+       };
+
+       // Asserts the outbound channel has been removed from a nodes[0]'s peer state map.
+       let check_outbound_channel_existence = |should_exist: bool| {
+               let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
+               let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
+               assert_eq!(chan_lock.outbound_v1_channel_by_id.contains_key(&temp_channel_id), should_exist);
+       };
+
+       // Channel should exist without any timer ticks.
+       check_outbound_channel_existence(true);
+
+       // Channel should exist with 1 timer tick less than required.
+       for _ in 0..UNFUNDED_CHANNEL_AGE_LIMIT_TICKS - 1 {
+               nodes[0].node.timer_tick_occurred();
+               check_outbound_channel_existence(true)
+       }
+
+       // Remove channel after reaching the required ticks.
+       nodes[0].node.timer_tick_occurred();
+       check_outbound_channel_existence(false);
+
+       let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
+       assert_eq!(msg_events.len(), 1);
+       match msg_events[0] {
+               MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id: _ } => {
+                       assert_eq!(msg.data, "Force-closing pending channel due to timeout awaiting establishment handshake");
+               },
+               _ => panic!("Unexpected event"),
+       }
+       check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100000);
+}
+
+#[test]
+fn test_remove_expired_inbound_unfunded_channels() {
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+       let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+       let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None).unwrap();
+       let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+       nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_message);
+       let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
+       nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel_message);
+
+       let events = nodes[0].node.get_and_clear_pending_events();
+       assert_eq!(events.len(), 1);
+       match events[0] {
+               Event::FundingGenerationReady { .. } => (),
+               _ => panic!("Unexpected event"),
+       };
+
+       // Asserts the inbound channel has been removed from a nodes[1]'s peer state map.
+       let check_inbound_channel_existence = |should_exist: bool| {
+               let per_peer_state = nodes[1].node.per_peer_state.read().unwrap();
+               let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap();
+               assert_eq!(chan_lock.inbound_v1_channel_by_id.contains_key(&temp_channel_id), should_exist);
+       };
+
+       // Channel should exist without any timer ticks.
+       check_inbound_channel_existence(true);
+
+       // Channel should exist with 1 timer tick less than required.
+       for _ in 0..UNFUNDED_CHANNEL_AGE_LIMIT_TICKS - 1 {
+               nodes[1].node.timer_tick_occurred();
+               check_inbound_channel_existence(true)
+       }
+
+       // Remove channel after reaching the required ticks.
+       nodes[1].node.timer_tick_occurred();
+       check_inbound_channel_existence(false);
+
+       let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
+       assert_eq!(msg_events.len(), 1);
+       match msg_events[0] {
+               MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id: _ } => {
+                       assert_eq!(msg.data, "Force-closing pending channel due to timeout awaiting establishment handshake");
+               },
+               _ => panic!("Unexpected event"),
+       }
+       check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false, &[nodes[0].node.get_our_node_id()], 100000);
+}
+
+fn do_test_multi_post_event_actions(do_reload: bool) {
+       // Tests handling multiple post-Event actions at once.
+       // There is specific code in ChannelManager to handle channels where multiple post-Event
+       // `ChannelMonitorUpdates` are pending at once. This test exercises that code.
+       //
+       // Specifically, we test calling `get_and_clear_pending_events` while there are two
+       // PaymentSents from different channels and one channel has two pending `ChannelMonitorUpdate`s
+       // - one from an RAA and one from an inbound commitment_signed.
+       let chanmon_cfgs = create_chanmon_cfgs(3);
+       let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+       let (persister, chain_monitor);
+       let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+       let nodes_0_deserialized;
+       let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+       let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
+       let chan_id_2 = create_announced_chan_between_nodes(&nodes, 0, 2).2;
+
+       send_payment(&nodes[0], &[&nodes[1]], 1_000_000);
+       send_payment(&nodes[0], &[&nodes[2]], 1_000_000);
+
+       let (our_payment_preimage, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
+       let (payment_preimage_2, payment_hash_2, _) = route_payment(&nodes[0], &[&nodes[2]], 1_000_000);
+
+       nodes[1].node.claim_funds(our_payment_preimage);
+       check_added_monitors!(nodes[1], 1);
+       expect_payment_claimed!(nodes[1], our_payment_hash, 1_000_000);
+
+       nodes[2].node.claim_funds(payment_preimage_2);
+       check_added_monitors!(nodes[2], 1);
+       expect_payment_claimed!(nodes[2], payment_hash_2, 1_000_000);
+
+       for dest in &[1, 2] {
+               let htlc_fulfill_updates = get_htlc_update_msgs!(nodes[*dest], nodes[0].node.get_our_node_id());
+               nodes[0].node.handle_update_fulfill_htlc(&nodes[*dest].node.get_our_node_id(), &htlc_fulfill_updates.update_fulfill_htlcs[0]);
+               commitment_signed_dance!(nodes[0], nodes[*dest], htlc_fulfill_updates.commitment_signed, false);
+               check_added_monitors(&nodes[0], 0);
+       }
+
+       let (route, payment_hash_3, _, payment_secret_3) =
+               get_route_and_payment_hash!(nodes[1], nodes[0], 100_000);
+       let payment_id = PaymentId(payment_hash_3.0);
+       nodes[1].node.send_payment_with_route(&route, payment_hash_3,
+               RecipientOnionFields::secret_only(payment_secret_3), payment_id).unwrap();
+       check_added_monitors(&nodes[1], 1);
+
+       let send_event = SendEvent::from_node(&nodes[1]);
+       nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event.msgs[0]);
+       nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event.commitment_msg);
+       assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
+
+       if do_reload {
+               let nodes_0_serialized = nodes[0].node.encode();
+               let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
+               let chan_1_monitor_serialized = get_monitor!(nodes[0], chan_id_2).encode();
+               reload_node!(nodes[0], test_default_channel_config(), &nodes_0_serialized, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], persister, chain_monitor, nodes_0_deserialized);
+
+               nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
+               nodes[2].node.peer_disconnected(&nodes[0].node.get_our_node_id());
+
+               reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
+               reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[2]));
+       }
+
+       let events = nodes[0].node.get_and_clear_pending_events();
+       assert_eq!(events.len(), 4);
+       if let Event::PaymentSent { payment_preimage, .. } = events[0] {
+               assert!(payment_preimage == our_payment_preimage || payment_preimage == payment_preimage_2);
+       } else { panic!(); }
+       if let Event::PaymentSent { payment_preimage, .. } = events[1] {
+               assert!(payment_preimage == our_payment_preimage || payment_preimage == payment_preimage_2);
+       } else { panic!(); }
+       if let Event::PaymentPathSuccessful { .. } = events[2] {} else { panic!(); }
+       if let Event::PaymentPathSuccessful { .. } = events[3] {} else { panic!(); }
+
+       // After the events are processed, the ChannelMonitorUpdates will be released and, upon their
+       // completion, we'll respond to nodes[1] with an RAA + CS.
+       get_revoke_commit_msgs(&nodes[0], &nodes[1].node.get_our_node_id());
+       check_added_monitors(&nodes[0], 3);
+}
+
+#[test]
+fn test_multi_post_event_actions() {
+       do_test_multi_post_event_actions(true);
+       do_test_multi_post_event_actions(false);
+}