Merge pull request #2441 from arik-so/2023-07-taproot-signer-wrapped
[rust-lightning] / lightning / src / ln / functional_tests.rs
index 9d894a7ac3ffc859013a7cc4e9237630d8ba8c19..f5ba7166ca71b80c0732c0f758001b82f9aa371c 100644 (file)
@@ -17,7 +17,7 @@ use crate::chain::chaininterface::LowerBoundedFeeEstimator;
 use crate::chain::channelmonitor;
 use crate::chain::channelmonitor::{CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY};
 use crate::chain::transaction::OutPoint;
-use crate::sign::{ChannelSigner, EcdsaChannelSigner, EntropySource};
+use crate::sign::{EcdsaChannelSigner, EntropySource};
 use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentPurpose, ClosureReason, HTLCDestination, PaymentFailureReason};
 use crate::ln::{PaymentPreimage, PaymentSecret, PaymentHash};
 use crate::ln::channel::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT, get_holder_selected_channel_reserve_satoshis, OutboundV1Channel, InboundV1Channel};
@@ -26,8 +26,8 @@ use crate::ln::channel::{DISCONNECT_PEER_AWAITING_RESPONSE_TICKS, ChannelError};
 use crate::ln::{chan_utils, onion_utils};
 use crate::ln::chan_utils::{OFFERED_HTLC_SCRIPT_WEIGHT, htlc_success_tx_weight, htlc_timeout_tx_weight, HTLCOutputInCommitment};
 use crate::routing::gossip::{NetworkGraph, NetworkUpdate};
-use crate::routing::router::{Path, PaymentParameters, Route, RouteHop, RouteParameters, find_route, get_route};
-use crate::ln::features::{ChannelFeatures, NodeFeatures};
+use crate::routing::router::{Path, PaymentParameters, Route, RouteHop, get_route};
+use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, NodeFeatures};
 use crate::ln::msgs;
 use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ErrorAction};
 use crate::util::enforcing_trait_impls::EnforcingSigner;
@@ -35,7 +35,7 @@ use crate::util::test_utils;
 use crate::util::errors::APIError;
 use crate::util::ser::{Writeable, ReadableArgs};
 use crate::util::string::UntrustedString;
-use crate::util::config::UserConfig;
+use crate::util::config::{UserConfig, MaxDustHTLCExposure};
 
 use bitcoin::hash_types::BlockHash;
 use bitcoin::blockdata::script::{Builder, Script};
@@ -61,6 +61,8 @@ use crate::sync::{Arc, Mutex};
 use crate::ln::functional_test_utils::*;
 use crate::ln::chan_utils::CommitmentTransaction;
 
+use super::channel::UNFUNDED_CHANNEL_AGE_LIMIT_TICKS;
+
 #[test]
 fn test_insane_channel_opens() {
        // Stand up a network of 2 nodes
@@ -155,8 +157,8 @@ fn do_test_counterparty_no_reserve(send_from_initiator: bool) {
        // Have node0 initiate a channel to node1 with aforementioned parameters
        let mut push_amt = 100_000_000;
        let feerate_per_kw = 253;
-       let opt_anchors = false;
-       push_amt -= feerate_per_kw as u64 * (commitment_tx_base_weight(opt_anchors) + 4 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000 * 1000;
+       let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
+       push_amt -= feerate_per_kw as u64 * (commitment_tx_base_weight(&channel_type_features) + 4 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000 * 1000;
        push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
 
        let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, if send_from_initiator { 0 } else { push_amt }, 42, None).unwrap();
@@ -201,7 +203,7 @@ fn do_test_counterparty_no_reserve(send_from_initiator: bool) {
                        // Note that for outbound channels we have to consider the commitment tx fee and the
                        // "fee spike buffer", which is currently a multiple of the total commitment tx fee as
                        // well as an additional HTLC.
-                       - FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE * commit_tx_fee_msat(feerate_per_kw, 2, opt_anchors));
+                       - FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE * commit_tx_fee_msat(feerate_per_kw, 2, &channel_type_features));
        } else {
                send_payment(&nodes[1], &[&nodes[0]], push_amt);
        }
@@ -651,14 +653,14 @@ fn test_update_fee_that_funder_cannot_afford() {
        let default_config = UserConfig::default();
        let bs_channel_reserve_sats = get_holder_selected_channel_reserve_satoshis(channel_value, &default_config);
 
-       let opt_anchors = false;
+       let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
 
        // Calculate the maximum feerate that A can afford. Note that we don't send an update_fee
        // CONCURRENT_INBOUND_HTLC_FEE_BUFFER HTLCs before actually running out of local balance, so we
        // calculate two different feerates here - the expected local limit as well as the expected
        // remote limit.
-       let feerate = ((channel_value - bs_channel_reserve_sats - push_sats) * 1000 / (commitment_tx_base_weight(opt_anchors) + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC)) as u32;
-       let non_buffer_feerate = ((channel_value - bs_channel_reserve_sats - push_sats) * 1000 / commitment_tx_base_weight(opt_anchors)) as u32;
+       let feerate = ((channel_value - bs_channel_reserve_sats - push_sats) * 1000 / (commitment_tx_base_weight(&channel_type_features) + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC)) as u32;
+       let non_buffer_feerate = ((channel_value - bs_channel_reserve_sats - push_sats) * 1000 / commitment_tx_base_weight(&channel_type_features)) as u32;
        {
                let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
                *feerate_lock = feerate;
@@ -677,7 +679,7 @@ fn test_update_fee_that_funder_cannot_afford() {
 
                //We made sure neither party's funds are below the dust limit and there are no HTLCs here
                assert_eq!(commitment_tx.output.len(), 2);
-               let total_fee: u64 = commit_tx_fee_msat(feerate, 0, opt_anchors) / 1000;
+               let total_fee: u64 = commit_tx_fee_msat(feerate, 0, &channel_type_features) / 1000;
                let mut actual_fee = commitment_tx.output.iter().fold(0, |acc, output| acc + output.value);
                actual_fee = channel_value - actual_fee;
                assert_eq!(total_fee, actual_fee);
@@ -701,7 +703,7 @@ fn test_update_fee_that_funder_cannot_afford() {
                let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
                let local_chan = chan_lock.channel_by_id.get(&chan.2).unwrap();
                let chan_signer = local_chan.get_signer();
-               let pubkeys = chan_signer.pubkeys();
+               let pubkeys = chan_signer.as_ref().pubkeys();
                (pubkeys.revocation_basepoint, pubkeys.htlc_basepoint,
                 pubkeys.funding_pubkey)
        };
@@ -710,9 +712,9 @@ fn test_update_fee_that_funder_cannot_afford() {
                let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap();
                let remote_chan = chan_lock.channel_by_id.get(&chan.2).unwrap();
                let chan_signer = remote_chan.get_signer();
-               let pubkeys = chan_signer.pubkeys();
+               let pubkeys = chan_signer.as_ref().pubkeys();
                (pubkeys.delayed_payment_basepoint, pubkeys.htlc_basepoint,
-                chan_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &secp_ctx),
+                chan_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &secp_ctx),
                 pubkeys.funding_pubkey)
        };
 
@@ -729,14 +731,14 @@ fn test_update_fee_that_funder_cannot_afford() {
                let commitment_tx = CommitmentTransaction::new_with_auxiliary_htlc_data(
                        INITIAL_COMMITMENT_NUMBER - 1,
                        push_sats,
-                       channel_value - push_sats - commit_tx_fee_msat(non_buffer_feerate + 4, 0, opt_anchors) / 1000,
-                       opt_anchors, local_funding, remote_funding,
+                       channel_value - push_sats - commit_tx_fee_msat(non_buffer_feerate + 4, 0, &channel_type_features) / 1000,
+                       local_funding, remote_funding,
                        commit_tx_keys.clone(),
                        non_buffer_feerate + 4,
                        &mut htlcs,
                        &local_chan.context.channel_transaction_parameters.as_counterparty_broadcastable()
                );
-               local_chan_signer.sign_counterparty_commitment(&commitment_tx, Vec::new(), &secp_ctx).unwrap()
+               local_chan_signer.as_ecdsa().unwrap().sign_counterparty_commitment(&commitment_tx, Vec::new(), &secp_ctx).unwrap()
        };
 
        let commit_signed_msg = msgs::CommitmentSigned {
@@ -761,7 +763,8 @@ fn test_update_fee_that_funder_cannot_afford() {
        nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Funding remote cannot afford proposed new fee".to_string(), 1);
        check_added_monitors!(nodes[1], 1);
        check_closed_broadcast!(nodes[1], true);
-       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: String::from("Funding remote cannot afford proposed new fee") });
+       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: String::from("Funding remote cannot afford proposed new fee") },
+               [nodes[0].node.get_our_node_id()], channel_value);
 }
 
 #[test]
@@ -860,8 +863,8 @@ fn test_update_fee_with_fundee_update_add_htlc() {
        send_payment(&nodes[1], &vec!(&nodes[0])[..], 800000);
        send_payment(&nodes[0], &vec!(&nodes[1])[..], 800000);
        close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true);
-       check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
-       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
+       check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
 }
 
 #[test]
@@ -974,8 +977,8 @@ fn test_update_fee() {
        assert_eq!(get_feerate!(nodes[0], nodes[1], channel_id), feerate + 30);
        assert_eq!(get_feerate!(nodes[1], nodes[0], channel_id), feerate + 30);
        close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true);
-       check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
-       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
+       check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
 }
 
 #[test]
@@ -1083,17 +1086,17 @@ fn fake_network_test() {
 
        // Close down the channels...
        close_channel(&nodes[0], &nodes[1], &chan_1.2, chan_1.3, true);
-       check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
-       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
+       check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
        close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, false);
-       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
-       check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure);
+       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[2].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
        close_channel(&nodes[2], &nodes[3], &chan_3.2, chan_3.3, true);
-       check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure);
-       check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure);
+       check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure, [nodes[3].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure, [nodes[2].node.get_our_node_id()], 100000);
        close_channel(&nodes[1], &nodes[3], &chan_4.2, chan_4.3, false);
-       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
-       check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure);
+       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[3].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
 }
 
 #[test]
@@ -1285,7 +1288,7 @@ fn test_duplicate_htlc_different_direction_onchain() {
 
        mine_transaction(&nodes[0], &remote_txn[0]);
        check_added_monitors!(nodes[0], 1);
-       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
        connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
 
        let claim_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
@@ -1346,7 +1349,7 @@ fn test_basic_channel_reserve() {
        let channel_reserve = chan_stat.channel_reserve_msat;
 
        // The 2* and +1 are for the fee spike reserve.
-       let commit_tx_fee = 2 * commit_tx_fee_msat(get_feerate!(nodes[0], nodes[1], chan.2), 1 + 1, get_opt_anchors!(nodes[0], nodes[1], chan.2));
+       let commit_tx_fee = 2 * commit_tx_fee_msat(get_feerate!(nodes[0], nodes[1], chan.2), 1 + 1, &get_channel_type_features!(nodes[0], nodes[1], chan.2));
        let max_can_send = 5000000 - channel_reserve - commit_tx_fee;
        let (mut route, our_payment_hash, _, our_payment_secret) =
                get_route_and_payment_hash!(nodes[0], nodes[1], max_can_send);
@@ -1414,23 +1417,23 @@ fn test_fee_spike_violation_fails_htlc() {
                let local_chan = chan_lock.channel_by_id.get(&chan.2).unwrap();
                let chan_signer = local_chan.get_signer();
                // Make the signer believe we validated another commitment, so we can release the secret
-               chan_signer.get_enforcement_state().last_holder_commitment -= 1;
+               chan_signer.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1;
 
-               let pubkeys = chan_signer.pubkeys();
+               let pubkeys = chan_signer.as_ref().pubkeys();
                (pubkeys.revocation_basepoint, pubkeys.htlc_basepoint,
-                chan_signer.release_commitment_secret(INITIAL_COMMITMENT_NUMBER),
-                chan_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 2, &secp_ctx),
-                chan_signer.pubkeys().funding_pubkey)
+                chan_signer.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER),
+                chan_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 2, &secp_ctx),
+                chan_signer.as_ref().pubkeys().funding_pubkey)
        };
        let (remote_delayed_payment_basepoint, remote_htlc_basepoint, remote_point, remote_funding) = {
                let per_peer_state = nodes[1].node.per_peer_state.read().unwrap();
                let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap();
                let remote_chan = chan_lock.channel_by_id.get(&chan.2).unwrap();
                let chan_signer = remote_chan.get_signer();
-               let pubkeys = chan_signer.pubkeys();
+               let pubkeys = chan_signer.as_ref().pubkeys();
                (pubkeys.delayed_payment_basepoint, pubkeys.htlc_basepoint,
-                chan_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &secp_ctx),
-                chan_signer.pubkeys().funding_pubkey)
+                chan_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &secp_ctx),
+                chan_signer.as_ref().pubkeys().funding_pubkey)
        };
 
        // Assemble the set of keys we can use for signatures for our commitment_signed message.
@@ -1460,13 +1463,13 @@ fn test_fee_spike_violation_fails_htlc() {
                        commitment_number,
                        95000,
                        local_chan_balance,
-                       local_chan.context.opt_anchors(), local_funding, remote_funding,
+                       local_funding, remote_funding,
                        commit_tx_keys.clone(),
                        feerate_per_kw,
                        &mut vec![(accepted_htlc_info, ())],
                        &local_chan.context.channel_transaction_parameters.as_counterparty_broadcastable()
                );
-               local_chan_signer.sign_counterparty_commitment(&commitment_tx, Vec::new(), &secp_ctx).unwrap()
+               local_chan_signer.as_ecdsa().unwrap().sign_counterparty_commitment(&commitment_tx, Vec::new(), &secp_ctx).unwrap()
        };
 
        let commit_signed_msg = msgs::CommitmentSigned {
@@ -1519,10 +1522,10 @@ fn test_chan_reserve_violation_outbound_htlc_inbound_chan() {
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        let default_config = UserConfig::default();
-       let opt_anchors = false;
+       let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
 
        let mut push_amt = 100_000_000;
-       push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, opt_anchors);
+       push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, &channel_type_features);
 
        push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
 
@@ -1550,13 +1553,13 @@ fn test_chan_reserve_violation_inbound_htlc_outbound_channel() {
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        let default_config = UserConfig::default();
-       let opt_anchors = false;
+       let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
 
        // Set nodes[0]'s balance such that they will consider any above-dust received HTLC to be a
        // channel reserve violation (so their balance is channel reserve (1000 sats) + commitment
        // transaction fee with 0 HTLCs (183 sats)).
        let mut push_amt = 100_000_000;
-       push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, opt_anchors);
+       push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, &channel_type_features);
        push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
        let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, push_amt);
 
@@ -1593,7 +1596,8 @@ fn test_chan_reserve_violation_inbound_htlc_outbound_channel() {
        let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
        assert_eq!(err_msg.data, "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value");
        check_added_monitors!(nodes[0], 1);
-       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_string() });
+       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_string() },
+               [nodes[1].node.get_our_node_id()], 100000);
 }
 
 #[test]
@@ -1607,18 +1611,18 @@ fn test_chan_reserve_dust_inbound_htlcs_outbound_chan() {
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None, None]);
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        let default_config = UserConfig::default();
-       let opt_anchors = false;
+       let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
 
        // Set nodes[0]'s balance such that they will consider any above-dust received HTLC to be a
        // channel reserve violation (so their balance is channel reserve (1000 sats) + commitment
        // transaction fee with 0 HTLCs (183 sats)).
        let mut push_amt = 100_000_000;
-       push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, opt_anchors);
+       push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, &channel_type_features);
        push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
        create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, push_amt);
 
        let dust_amt = crate::ln::channel::MIN_CHAN_DUST_LIMIT_SATOSHIS * 1000
-               + feerate_per_kw as u64 * htlc_success_tx_weight(opt_anchors) / 1000 * 1000 - 1;
+               + feerate_per_kw as u64 * htlc_success_tx_weight(&channel_type_features) / 1000 * 1000 - 1;
        // In the previous code, routing this dust payment would cause nodes[0] to perceive a channel
        // reserve violation even though it's a dust HTLC and therefore shouldn't count towards the
        // commitment transaction fee.
@@ -1648,12 +1652,12 @@ fn test_chan_init_feerate_unaffordability() {
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        let default_config = UserConfig::default();
-       let opt_anchors = false;
+       let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
 
        // Set the push_msat amount such that nodes[0] will not be able to afford to add even a single
        // HTLC.
        let mut push_amt = 100_000_000;
-       push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, opt_anchors);
+       push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, &channel_type_features);
        assert_eq!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, push_amt + 1, 42, None).unwrap_err(),
                APIError::APIMisuseError { err: "Funding amount (356) can't even pay fee for initial commitment transaction fee of 357.".to_string() });
 
@@ -1718,10 +1722,10 @@ fn test_chan_reserve_violation_inbound_htlc_inbound_chan() {
        let total_routing_fee_msat = (nodes.len() - 2) as u64 * feemsat;
        let chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
        let feerate = get_feerate!(nodes[0], nodes[1], chan.2);
-       let opt_anchors = get_opt_anchors!(nodes[0], nodes[1], chan.2);
+       let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan.2);
 
        // Add a 2* and +1 for the fee spike reserve.
-       let commit_tx_fee_2_htlc = 2*commit_tx_fee_msat(feerate, 2 + 1, opt_anchors);
+       let commit_tx_fee_2_htlc = 2*commit_tx_fee_msat(feerate, 2 + 1, &channel_type_features);
        let recv_value_1 = (chan_stat.value_to_self_msat - chan_stat.channel_reserve_msat - total_routing_fee_msat - commit_tx_fee_2_htlc)/2;
        let amt_msat_1 = recv_value_1 + total_routing_fee_msat;
 
@@ -1739,7 +1743,7 @@ fn test_chan_reserve_violation_inbound_htlc_inbound_chan() {
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event_1.msgs[0]);
 
        // Attempt to trigger a channel reserve violation --> payment failure.
-       let commit_tx_fee_2_htlcs = commit_tx_fee_msat(feerate, 2, opt_anchors);
+       let commit_tx_fee_2_htlcs = commit_tx_fee_msat(feerate, 2, &channel_type_features);
        let recv_value_2 = chan_stat.value_to_self_msat - amt_msat_1 - chan_stat.channel_reserve_msat - total_routing_fee_msat - commit_tx_fee_2_htlcs + 1;
        let amt_msat_2 = recv_value_2 + total_routing_fee_msat;
        let mut route_2 = route_1.clone();
@@ -1770,7 +1774,8 @@ fn test_chan_reserve_violation_inbound_htlc_inbound_chan() {
        let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
        assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value");
        check_added_monitors!(nodes[1], 1);
-       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote HTLC add would put them under remote reserve value".to_string() });
+       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote HTLC add would put them under remote reserve value".to_string() },
+               [nodes[0].node.get_our_node_id()], 100000);
 }
 
 #[test]
@@ -1794,8 +1799,8 @@ fn test_inbound_outbound_capacity_is_not_zero() {
        assert_eq!(channels1[0].inbound_capacity_msat, 100000 * 1000 - 95000000 - reserve*1000);
 }
 
-fn commit_tx_fee_msat(feerate: u32, num_htlcs: u64, opt_anchors: bool) -> u64 {
-       (commitment_tx_base_weight(opt_anchors) + num_htlcs * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate as u64 / 1000 * 1000
+fn commit_tx_fee_msat(feerate: u32, num_htlcs: u64, channel_type_features: &ChannelTypeFeatures) -> u64 {
+       (commitment_tx_base_weight(channel_type_features) + num_htlcs * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate as u64 / 1000 * 1000
 }
 
 #[test]
@@ -1830,7 +1835,7 @@ fn test_channel_reserve_holding_cell_htlcs() {
        let feemsat = 239; // set above
        let total_fee_msat = (nodes.len() - 2) as u64 * feemsat;
        let feerate = get_feerate!(nodes[0], nodes[1], chan_1.2);
-       let opt_anchors = get_opt_anchors!(nodes[0], nodes[1], chan_1.2);
+       let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan_1.2);
 
        let recv_value_0 = stat01.counterparty_max_htlc_value_in_flight_msat - total_fee_msat;
 
@@ -1855,7 +1860,7 @@ fn test_channel_reserve_holding_cell_htlcs() {
                // 3 for the 3 HTLCs that will be sent, 2* and +1 for the fee spike reserve.
                // Also, ensure that each payment has enough to be over the dust limit to
                // ensure it'll be included in each commit tx fee calculation.
-               let commit_tx_fee_all_htlcs = 2*commit_tx_fee_msat(feerate, 3 + 1, opt_anchors);
+               let commit_tx_fee_all_htlcs = 2*commit_tx_fee_msat(feerate, 3 + 1, &channel_type_features);
                let ensure_htlc_amounts_above_dust_buffer = 3 * (stat01.counterparty_dust_limit_msat + 1000);
                if stat01.value_to_self_msat < stat01.channel_reserve_msat + commit_tx_fee_all_htlcs + ensure_htlc_amounts_above_dust_buffer + amt_msat {
                        break;
@@ -1892,7 +1897,7 @@ fn test_channel_reserve_holding_cell_htlcs() {
        // the amount of the first of these aforementioned 3 payments. The reason we split into 3 payments
        // is to test the behavior of the holding cell with respect to channel reserve and commit tx fee
        // policy.
-       let commit_tx_fee_2_htlcs = 2*commit_tx_fee_msat(feerate, 2 + 1, opt_anchors);
+       let commit_tx_fee_2_htlcs = 2*commit_tx_fee_msat(feerate, 2 + 1, &channel_type_features);
        let recv_value_1 = (stat01.value_to_self_msat - stat01.channel_reserve_msat - total_fee_msat - commit_tx_fee_2_htlcs)/2;
        let amt_msat_1 = recv_value_1 + total_fee_msat;
 
@@ -1921,7 +1926,7 @@ fn test_channel_reserve_holding_cell_htlcs() {
        }
 
        // split the rest to test holding cell
-       let commit_tx_fee_3_htlcs = 2*commit_tx_fee_msat(feerate, 3 + 1, opt_anchors);
+       let commit_tx_fee_3_htlcs = 2*commit_tx_fee_msat(feerate, 3 + 1, &channel_type_features);
        let additional_htlc_cost_msat = commit_tx_fee_3_htlcs - commit_tx_fee_2_htlcs;
        let recv_value_21 = recv_value_2/2 - additional_htlc_cost_msat/2;
        let recv_value_22 = recv_value_2 - recv_value_21 - total_fee_msat - additional_htlc_cost_msat;
@@ -2040,11 +2045,11 @@ fn test_channel_reserve_holding_cell_htlcs() {
        claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_21);
        claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_22);
 
-       let commit_tx_fee_0_htlcs = 2*commit_tx_fee_msat(feerate, 1, opt_anchors);
+       let commit_tx_fee_0_htlcs = 2*commit_tx_fee_msat(feerate, 1, &channel_type_features);
        let recv_value_3 = commit_tx_fee_2_htlcs - commit_tx_fee_0_htlcs - total_fee_msat;
        send_payment(&nodes[0], &vec![&nodes[1], &nodes[2]][..], recv_value_3);
 
-       let commit_tx_fee_1_htlc = 2*commit_tx_fee_msat(feerate, 1 + 1, opt_anchors);
+       let commit_tx_fee_1_htlc = 2*commit_tx_fee_msat(feerate, 1 + 1, &channel_type_features);
        let expected_value_to_self = stat01.value_to_self_msat - (recv_value_1 + total_fee_msat) - (recv_value_21 + total_fee_msat) - (recv_value_22 + total_fee_msat) - (recv_value_3 + total_fee_msat);
        let stat0 = get_channel_value_stat!(nodes[0], nodes[1], chan_1.2);
        assert_eq!(stat0.value_to_self_msat, expected_value_to_self);
@@ -2119,7 +2124,7 @@ fn channel_reserve_in_flight_removes() {
        nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_removes.commitment_signed);
        check_added_monitors!(nodes[0], 1);
        let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
-       expect_payment_sent_without_paths!(nodes[0], payment_preimage_1);
+       expect_payment_sent(&nodes[0], payment_preimage_1, None, false, false);
 
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_1.msgs[0]);
        nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_1.commitment_msg);
@@ -2148,7 +2153,7 @@ fn channel_reserve_in_flight_removes() {
        nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_cs.commitment_signed);
        check_added_monitors!(nodes[0], 1);
        let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
-       expect_payment_sent_without_paths!(nodes[0], payment_preimage_2);
+       expect_payment_sent(&nodes[0], payment_preimage_2, None, false, false);
 
        nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
        check_added_monitors!(nodes[1], 1);
@@ -2251,8 +2256,8 @@ fn channel_monitor_network_test() {
        check_closed_broadcast!(nodes[0], true);
        assert_eq!(nodes[0].node.list_channels().len(), 0);
        assert_eq!(nodes[1].node.list_channels().len(), 1);
-       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
-       check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
+       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
 
        // One pending HTLC is discarded by the force-close:
        let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[1], &[&nodes[2], &nodes[3]], 3_000_000);
@@ -2273,8 +2278,8 @@ fn channel_monitor_network_test() {
        check_closed_broadcast!(nodes[2], true);
        assert_eq!(nodes[1].node.list_channels().len(), 0);
        assert_eq!(nodes[2].node.list_channels().len(), 1);
-       check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
-       check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[2].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
 
        macro_rules! claim_funds {
                ($node: expr, $prev_node: expr, $preimage: expr, $payment_hash: expr) => {
@@ -2318,8 +2323,8 @@ fn channel_monitor_network_test() {
        check_closed_broadcast!(nodes[3], true);
        assert_eq!(nodes[2].node.list_channels().len(), 0);
        assert_eq!(nodes[3].node.list_channels().len(), 1);
-       check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed);
-       check_closed_event!(nodes[3], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed, [nodes[3].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[3], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000);
 
        // Drop the ChannelMonitor for the previous channel to avoid it broadcasting transactions and
        // confusing us in the following tests.
@@ -2392,8 +2397,8 @@ fn channel_monitor_network_test() {
 
        assert_eq!(nodes[3].chain_monitor.chain_monitor.watch_channel(OutPoint { txid: chan_3.3.txid(), index: 0 }, chan_3_mon),
                ChannelMonitorUpdateStatus::Completed);
-       check_closed_event!(nodes[3], 1, ClosureReason::CommitmentTxConfirmed);
-       check_closed_event!(nodes[4], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_event!(nodes[3], 1, ClosureReason::CommitmentTxConfirmed, [nodes[4].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[4], 1, ClosureReason::CommitmentTxConfirmed, [nodes[3].node.get_our_node_id()], 100000);
 }
 
 #[test]
@@ -2441,7 +2446,7 @@ fn test_justice_tx_htlc_timeout() {
                        node_txn.swap_remove(0);
                }
                check_added_monitors!(nodes[1], 1);
-               check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+               check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
                test_txn_broadcast(&nodes[1], &chan_5, Some(revoked_local_txn[0].clone()), HTLCType::NONE);
 
                mine_transaction(&nodes[0], &revoked_local_txn[0]);
@@ -2449,7 +2454,7 @@ fn test_justice_tx_htlc_timeout() {
                // Verify broadcast of revoked HTLC-timeout
                let node_txn = test_txn_broadcast(&nodes[0], &chan_5, Some(revoked_local_txn[0].clone()), HTLCType::TIMEOUT);
                check_added_monitors!(nodes[0], 1);
-               check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+               check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
                // Broadcast revoked HTLC-timeout on node 1
                mine_transaction(&nodes[1], &node_txn[1]);
                test_revoked_htlc_claim_txn_broadcast(&nodes[1], node_txn[1].clone(), revoked_local_txn[0].clone());
@@ -2504,11 +2509,11 @@ fn test_justice_tx_htlc_success() {
                test_txn_broadcast(&nodes[0], &chan_6, Some(revoked_local_txn[0].clone()), HTLCType::NONE);
 
                mine_transaction(&nodes[1], &revoked_local_txn[0]);
-               check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+               check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
                let node_txn = test_txn_broadcast(&nodes[1], &chan_6, Some(revoked_local_txn[0].clone()), HTLCType::SUCCESS);
                check_added_monitors!(nodes[1], 1);
                mine_transaction(&nodes[0], &node_txn[1]);
-               check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+               check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
                test_revoked_htlc_claim_txn_broadcast(&nodes[0], node_txn[1].clone(), revoked_local_txn[0].clone());
        }
        get_announce_close_broadcast_events(&nodes, 0, 1);
@@ -2536,7 +2541,7 @@ fn revoked_output_claim() {
        // Inform nodes[1] that nodes[0] broadcast a stale tx
        mine_transaction(&nodes[1], &revoked_local_txn[0]);
        check_added_monitors!(nodes[1], 1);
-       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
        assert_eq!(node_txn.len(), 1); // ChannelMonitor: justice tx against revoked to_local output
 
@@ -2546,7 +2551,7 @@ fn revoked_output_claim() {
        mine_transaction(&nodes[0], &revoked_local_txn[0]);
        get_announce_close_broadcast_events(&nodes, 0, 1);
        check_added_monitors!(nodes[0], 1);
-       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
 }
 
 #[test]
@@ -2583,10 +2588,10 @@ fn claim_htlc_outputs_shared_tx() {
        {
                mine_transaction(&nodes[0], &revoked_local_txn[0]);
                check_added_monitors!(nodes[0], 1);
-               check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+               check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
                mine_transaction(&nodes[1], &revoked_local_txn[0]);
                check_added_monitors!(nodes[1], 1);
-               check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+               check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
                connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
                assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
 
@@ -2645,7 +2650,7 @@ fn claim_htlc_outputs_single_tx() {
                check_added_monitors!(nodes[0], 1);
                confirm_transaction_at(&nodes[1], &revoked_local_txn[0], 100);
                check_added_monitors!(nodes[1], 1);
-               check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+               check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
                let mut events = nodes[0].node.get_and_clear_pending_events();
                expect_pending_htlcs_forwardable_from_events!(nodes[0], events[0..1], true);
                match events.last().unwrap() {
@@ -2757,7 +2762,7 @@ fn test_htlc_on_chain_success() {
        mine_transaction(&nodes[2], &commitment_tx[0]);
        check_closed_broadcast!(nodes[2], true);
        check_added_monitors!(nodes[2], 1);
-       check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
        let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelMonitor: 2 (2 * HTLC-Success tx)
        assert_eq!(node_txn.len(), 2);
        check_spends!(node_txn[0], commitment_tx[0]);
@@ -2874,7 +2879,7 @@ fn test_htlc_on_chain_success() {
        mine_transaction(&nodes[1], &node_a_commitment_tx[0]);
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
-       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
        assert!(node_txn.len() == 1 || node_txn.len() == 3); // HTLC-Success, 2* RBF bumps of above HTLC txn
        let commitment_spend =
@@ -2982,14 +2987,15 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) {
        mine_transaction(&nodes[2], &commitment_tx[0]);
        check_closed_broadcast!(nodes[2], true);
        check_added_monitors!(nodes[2], 1);
-       check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
        let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
        assert_eq!(node_txn.len(), 0);
 
        // Broadcast timeout transaction by B on received output from C's commitment tx on B's chain
        // Verify that B's ChannelManager is able to detect that HTLC is timeout by its own tx and react backward in consequence
        mine_transaction(&nodes[1], &commitment_tx[0]);
-       check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false);
+       check_closed_event!(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false
+               , [nodes[2].node.get_our_node_id()], 100000);
        connect_blocks(&nodes[1], 200 - nodes[2].best_block_info().1);
        let timeout_tx = {
                let mut txn = nodes[1].tx_broadcaster.txn_broadcast();
@@ -3033,7 +3039,7 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) {
 
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
-       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
        let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // 1 timeout tx
        assert_eq!(node_txn.len(), 1);
        check_spends!(node_txn[0], commitment_tx[0]);
@@ -3070,7 +3076,7 @@ fn test_simple_commitment_revoked_fail_backward() {
        let (_, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000);
 
        mine_transaction(&nodes[1], &revoked_local_txn[0]);
-       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000);
        connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
        check_added_monitors!(nodes[1], 1);
        check_closed_broadcast!(nodes[1], true);
@@ -3463,7 +3469,7 @@ fn test_htlc_ignore_latest_remote_commitment() {
        connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
-       check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
+       check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
 
        let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
        assert_eq!(node_txn.len(), 3);
@@ -3473,7 +3479,7 @@ fn test_htlc_ignore_latest_remote_commitment() {
        connect_block(&nodes[1], &block);
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
-       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
 
        // Duplicate the connect_block call since this may happen due to other listeners
        // registering new transactions
@@ -3525,7 +3531,7 @@ fn test_force_close_fail_back() {
        nodes[2].node.force_close_broadcasting_latest_txn(&payment_event.commitment_msg.channel_id, &nodes[1].node.get_our_node_id()).unwrap();
        check_closed_broadcast!(nodes[2], true);
        check_added_monitors!(nodes[2], 1);
-       check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed);
+       check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
        let tx = {
                let mut node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
                // Note that we don't bother broadcasting the HTLC-Success transaction here as we don't
@@ -3540,7 +3546,7 @@ fn test_force_close_fail_back() {
        // Note no UpdateHTLCs event here from nodes[1] to nodes[0]!
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
-       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000);
 
        // Now check that if we add the preimage to ChannelMonitor it broadcasts our HTLC-Success..
        {
@@ -3578,12 +3584,14 @@ fn test_dup_events_on_peer_disconnect() {
        check_added_monitors!(nodes[1], 1);
        let claim_msgs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
        nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &claim_msgs.update_fulfill_htlcs[0]);
-       expect_payment_sent_without_paths!(nodes[0], payment_preimage);
+       expect_payment_sent(&nodes[0], payment_preimage, None, false, false);
 
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
-       reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (1, 0), (0, 0), (0, 0), (0, 0), (false, false));
+       let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
+       reconnect_args.pending_htlc_claims.0 = 1;
+       reconnect_nodes(reconnect_args);
        expect_payment_path_successful!(nodes[0]);
 }
 
@@ -3624,8 +3632,10 @@ fn test_peer_disconnected_before_funding_broadcasted() {
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
-       check_closed_event(&nodes[0], 1, ClosureReason::DisconnectedPeer, false);
-       check_closed_event(&nodes[1], 1, ClosureReason::DisconnectedPeer, false);
+       check_closed_event!(&nodes[0], 1, ClosureReason::DisconnectedPeer, false
+               , [nodes[1].node.get_our_node_id()], 1000000);
+       check_closed_event!(&nodes[1], 1, ClosureReason::DisconnectedPeer, false
+               , [nodes[0].node.get_our_node_id()], 1000000);
 }
 
 #[test]
@@ -3640,7 +3650,9 @@ fn test_simple_peer_disconnect() {
 
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
-       reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+       let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
+       reconnect_args.send_channel_ready = (true, true);
+       reconnect_nodes(reconnect_args);
 
        let payment_preimage_1 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
        let payment_hash_2 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
@@ -3649,7 +3661,7 @@ fn test_simple_peer_disconnect() {
 
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
-       reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+       reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
 
        let (payment_preimage_3, payment_hash_3, _) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000);
        let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
@@ -3662,7 +3674,10 @@ fn test_simple_peer_disconnect() {
        claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], true, payment_preimage_3);
        fail_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], true, payment_hash_5);
 
-       reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (1, 0), (1, 0), (false, false));
+       let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
+       reconnect_args.pending_cell_htlc_fails.0 = 1;
+       reconnect_args.pending_cell_htlc_claims.0 = 1;
+       reconnect_nodes(reconnect_args);
        {
                let events = nodes[0].node.get_and_clear_pending_events();
                assert_eq!(events.len(), 4);
@@ -3691,6 +3706,7 @@ fn test_simple_peer_disconnect() {
                        _ => panic!("Unexpected event"),
                }
        }
+       check_added_monitors(&nodes[0], 1);
 
        claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_preimage_4);
        fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_hash_6);
@@ -3774,19 +3790,29 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken
                }
                // Even if the channel_ready messages get exchanged, as long as nothing further was
                // received on either side, both sides will need to resend them.
-               reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 1), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+               let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
+               reconnect_args.send_channel_ready = (true, true);
+               reconnect_args.pending_htlc_adds.1 = 1;
+               reconnect_nodes(reconnect_args);
        } else if messages_delivered == 3 {
                // nodes[0] still wants its RAA + commitment_signed
-               reconnect_nodes(&nodes[0], &nodes[1], (false, false), (-1, 0), (0, 0), (0, 0), (0, 0), (0, 0), (true, false));
+               let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
+               reconnect_args.pending_htlc_adds.0 = -1;
+               reconnect_args.pending_raa.0 = true;
+               reconnect_nodes(reconnect_args);
        } else if messages_delivered == 4 {
                // nodes[0] still wants its commitment_signed
-               reconnect_nodes(&nodes[0], &nodes[1], (false, false), (-1, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+               let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
+               reconnect_args.pending_htlc_adds.0 = -1;
+               reconnect_nodes(reconnect_args);
        } else if messages_delivered == 5 {
                // nodes[1] still wants its final RAA
-               reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, true));
+               let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
+               reconnect_args.pending_raa.1 = true;
+               reconnect_nodes(reconnect_args);
        } else if messages_delivered == 6 {
                // Everything was delivered...
-               reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+               reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
        }
 
        let events_1 = nodes[1].node.get_and_clear_pending_events();
@@ -3810,7 +3836,7 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken
 
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
-       reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+       reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
 
        nodes[1].node.process_pending_htlc_forwards();
 
@@ -3894,7 +3920,9 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
        if messages_delivered < 2 {
-               reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (1, 0), (0, 0), (0, 0), (0, 0), (false, false));
+               let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
+               reconnect_args.pending_htlc_claims.0 = 1;
+               reconnect_nodes(reconnect_args);
                if messages_delivered < 1 {
                        expect_payment_sent!(nodes[0], payment_preimage_1);
                } else {
@@ -3902,16 +3930,23 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken
                }
        } else if messages_delivered == 2 {
                // nodes[0] still wants its RAA + commitment_signed
-               reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, -1), (0, 0), (0, 0), (0, 0), (0, 0), (false, true));
+               let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
+               reconnect_args.pending_htlc_adds.1 = -1;
+               reconnect_args.pending_raa.1 = true;
+               reconnect_nodes(reconnect_args);
        } else if messages_delivered == 3 {
                // nodes[0] still wants its commitment_signed
-               reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, -1), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+               let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
+               reconnect_args.pending_htlc_adds.1 = -1;
+               reconnect_nodes(reconnect_args);
        } else if messages_delivered == 4 {
                // nodes[1] still wants its final RAA
-               reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (true, false));
+               let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
+               reconnect_args.pending_raa.0 = true;
+               reconnect_nodes(reconnect_args);
        } else if messages_delivered == 5 {
                // Everything was delivered...
-               reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+               reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
        }
 
        if messages_delivered == 1 || messages_delivered == 2 {
@@ -3921,7 +3956,7 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken
                nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
                nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
        }
-       reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+       reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
 
        if messages_delivered > 2 {
                expect_payment_path_successful!(nodes[0]);
@@ -4268,7 +4303,7 @@ macro_rules! check_spendable_outputs {
                        let secp_ctx = Secp256k1::new();
                        for event in events.drain(..) {
                                match event {
-                                       Event::SpendableOutputs { mut outputs } => {
+                                       Event::SpendableOutputs { mut outputs, channel_id: _ } => {
                                                for outp in outputs.drain(..) {
                                                        txn.push($keysinterface.backing.spend_spendable_outputs(&[&outp], Vec::new(), Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, None, &secp_ctx).unwrap());
                                                        all_outputs.push(outp);
@@ -4299,7 +4334,7 @@ fn test_claim_sizeable_push_msat() {
        nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
-       check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
+       check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
        assert_eq!(node_txn.len(), 1);
        check_spends!(node_txn[0], chan.3);
@@ -4328,7 +4363,7 @@ fn test_claim_on_remote_sizeable_push_msat() {
        nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[1].node.get_our_node_id()).unwrap();
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
-       check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
+       check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
 
        let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
        assert_eq!(node_txn.len(), 1);
@@ -4338,7 +4373,7 @@ fn test_claim_on_remote_sizeable_push_msat() {
        mine_transaction(&nodes[1], &node_txn[0]);
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
-       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
        connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
 
        let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
@@ -4366,7 +4401,7 @@ fn test_claim_on_remote_revoked_sizeable_push_msat() {
        mine_transaction(&nodes[1], &revoked_local_txn[0]);
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
-       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
 
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
        mine_transaction(&nodes[1], &node_txn[0]);
@@ -4418,7 +4453,7 @@ fn test_static_spendable_outputs_preimage_tx() {
        assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
 
        mine_transaction(&nodes[1], &node_txn[0]);
-       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
        connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
 
        let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
@@ -4462,7 +4497,7 @@ fn test_static_spendable_outputs_timeout_tx() {
        assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
 
        mine_transaction(&nodes[1], &node_txn[0]);
-       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
        connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
        expect_payment_failed!(nodes[1], our_payment_hash, false);
 
@@ -4493,7 +4528,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_commitment_tx() {
        mine_transaction(&nodes[1], &revoked_local_txn[0]);
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
-       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
 
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
        assert_eq!(node_txn.len(), 1);
@@ -4530,7 +4565,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() {
        mine_transaction(&nodes[0], &revoked_local_txn[0]);
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
-       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
        connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
 
        let revoked_htlc_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
@@ -4544,7 +4579,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() {
        connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()]));
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
-       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
 
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
        assert_eq!(node_txn.len(), 2); // ChannelMonitor: bogus justice tx, justice tx on revoked outputs
@@ -4598,7 +4633,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() {
        mine_transaction(&nodes[1], &revoked_local_txn[0]);
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
-       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
        let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
 
        assert_eq!(revoked_htlc_txn.len(), 1);
@@ -4614,7 +4649,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() {
        connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()]));
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
-       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
 
        let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
        assert_eq!(node_txn.len(), 2); // ChannelMonitor: justice tx on revoked commitment, justice tx on revoked HTLC-success
@@ -4694,7 +4729,7 @@ fn test_onchain_to_onchain_claim() {
        mine_transaction(&nodes[2], &commitment_tx[0]);
        check_closed_broadcast!(nodes[2], true);
        check_added_monitors!(nodes[2], 1);
-       check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
 
        let c_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelMonitor: 1 (HTLC-Success tx)
        assert_eq!(c_txn.len(), 1);
@@ -4753,7 +4788,7 @@ fn test_onchain_to_onchain_claim() {
        // Broadcast A's commitment tx on B's chain to see if we are able to claim inbound HTLC with our HTLC-Success tx
        let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2);
        mine_transaction(&nodes[1], &commitment_tx[0]);
-       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
        let b_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
        // ChannelMonitor: HTLC-Success tx
        assert_eq!(b_txn.len(), 1);
@@ -4810,7 +4845,7 @@ fn test_duplicate_payment_hash_one_failure_one_success() {
        mine_transaction(&nodes[1], &commitment_txn[0]);
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
-       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000);
        connect_blocks(&nodes[1], TEST_FINAL_CLTV - 40 + MIN_CLTV_EXPIRY_DELTA as u32); // Confirm blocks until the HTLC expires
 
        let htlc_timeout_tx;
@@ -4857,7 +4892,7 @@ fn test_duplicate_payment_hash_one_failure_one_success() {
 
        mine_transaction(&nodes[2], &commitment_txn[0]);
        check_added_monitors!(nodes[2], 2);
-       check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
        let events = nodes[2].node.get_and_clear_pending_msg_events();
        match events[0] {
                MessageSendEvent::UpdateHTLCs { .. } => {},
@@ -4909,7 +4944,7 @@ fn test_duplicate_payment_hash_one_failure_one_success() {
 
        nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
        commitment_signed_dance!(nodes[0], nodes[1], &updates.commitment_signed, false);
-       expect_payment_sent(&nodes[0], our_payment_preimage, None, true);
+       expect_payment_sent(&nodes[0], our_payment_preimage, None, true, true);
 }
 
 #[test]
@@ -4935,7 +4970,7 @@ fn test_dynamic_spendable_outputs_local_htlc_success_tx() {
 
        mine_transaction(&nodes[1], &local_txn[0]);
        check_added_monitors!(nodes[1], 1);
-       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
        let events = nodes[1].node.get_and_clear_pending_msg_events();
        match events[0] {
                MessageSendEvent::UpdateHTLCs { .. } => {},
@@ -5290,7 +5325,7 @@ fn test_dynamic_spendable_outputs_local_htlc_timeout_tx() {
        mine_transaction(&nodes[0], &local_txn[0]);
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
-       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
        connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
 
        let htlc_timeout = {
@@ -5377,7 +5412,7 @@ fn test_key_derivation_params() {
        connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
-       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
 
        let htlc_timeout = {
                let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
@@ -5419,7 +5454,7 @@ fn test_static_output_closing_tx() {
        let closing_tx = close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true).2;
 
        mine_transaction(&nodes[0], &closing_tx);
-       check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
+       check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
        connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
 
        let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager);
@@ -5427,7 +5462,7 @@ fn test_static_output_closing_tx() {
        check_spends!(spend_txn[0], closing_tx);
 
        mine_transaction(&nodes[1], &closing_tx);
-       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
+       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
        connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
 
        let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
@@ -5452,7 +5487,7 @@ fn do_htlc_claim_local_commitment_only(use_dust: bool) {
 
        let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
        nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]);
-       expect_payment_sent_without_paths!(nodes[0], payment_preimage);
+       expect_payment_sent(&nodes[0], payment_preimage, None, false, false);
 
        nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_updates.commitment_signed);
        check_added_monitors!(nodes[0], 1);
@@ -5469,7 +5504,7 @@ fn do_htlc_claim_local_commitment_only(use_dust: bool) {
        test_txn_broadcast(&nodes[1], &chan, None, if use_dust { HTLCType::NONE } else { HTLCType::SUCCESS });
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
-       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
 }
 
 fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) {
@@ -5500,7 +5535,7 @@ fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) {
        test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
-       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
 }
 
 fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no_close: bool) {
@@ -5546,7 +5581,7 @@ fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no
                test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
                check_closed_broadcast!(nodes[0], true);
                check_added_monitors!(nodes[0], 1);
-               check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+               check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
        } else {
                expect_payment_failed!(nodes[0], our_payment_hash, true);
        }
@@ -5710,10 +5745,10 @@ fn test_fail_holding_cell_htlc_upon_free() {
        let mut chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
        let channel_reserve = chan_stat.channel_reserve_msat;
        let feerate = get_feerate!(nodes[0], nodes[1], chan.2);
-       let opt_anchors = get_opt_anchors!(nodes[0], nodes[1], chan.2);
+       let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan.2);
 
        // 2* and +1 HTLCs on the commit tx fee calculation for the fee spike reserve.
-       let max_can_send = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 1 + 1, opt_anchors);
+       let max_can_send = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 1 + 1, &channel_type_features);
        let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], max_can_send);
 
        // Send a payment which passes reserve checks but gets stuck in the holding cell.
@@ -5790,11 +5825,11 @@ fn test_free_and_fail_holding_cell_htlcs() {
        let mut chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
        let channel_reserve = chan_stat.channel_reserve_msat;
        let feerate = get_feerate!(nodes[0], nodes[1], chan.2);
-       let opt_anchors = get_opt_anchors!(nodes[0], nodes[1], chan.2);
+       let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan.2);
 
        // 2* and +1 HTLCs on the commit tx fee calculation for the fee spike reserve.
        let amt_1 = 20000;
-       let amt_2 = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 2 + 1, opt_anchors) - amt_1;
+       let amt_2 = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 2 + 1, &channel_type_features) - amt_1;
        let (route_1, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], amt_1);
        let (route_2, payment_hash_2, _, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], amt_2);
 
@@ -5920,10 +5955,10 @@ fn test_fail_holding_cell_htlc_upon_free_multihop() {
        let mut chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan_0_1.2);
        let channel_reserve = chan_stat.channel_reserve_msat;
        let feerate = get_feerate!(nodes[0], nodes[1], chan_0_1.2);
-       let opt_anchors = get_opt_anchors!(nodes[0], nodes[1], chan_0_1.2);
+       let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan_0_1.2);
 
        // Send a payment which passes reserve checks but gets stuck in the holding cell.
-       let max_can_send = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 1 + 1, opt_anchors);
+       let max_can_send = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 1 + 1, &channel_type_features);
        let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], max_can_send);
        let payment_event = {
                nodes[0].node.send_payment_with_route(&route, our_payment_hash,
@@ -6075,7 +6110,8 @@ fn test_update_add_htlc_bolt2_receiver_zero_value_msat() {
        nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Remote side tried to send a 0-msat HTLC".to_string(), 1);
        check_closed_broadcast!(nodes[1], true).unwrap();
        check_added_monitors!(nodes[1], 1);
-       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote side tried to send a 0-msat HTLC".to_string() });
+       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote side tried to send a 0-msat HTLC".to_string() },
+               [nodes[0].node.get_our_node_id()], 100000);
 }
 
 #[test]
@@ -6196,7 +6232,7 @@ fn test_update_add_htlc_bolt2_receiver_check_amount_received_more_than_min() {
        let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
        assert!(regex::Regex::new(r"Remote side tried to send less than our minimum HTLC value\. Lower limit: \(\d+\)\. Actual: \(\d+\)").unwrap().is_match(err_msg.data.as_str()));
        check_added_monitors!(nodes[1], 1);
-       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data });
+       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000);
 }
 
 #[test]
@@ -6211,9 +6247,9 @@ fn test_update_add_htlc_bolt2_receiver_sender_can_afford_amount_sent() {
        let chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
        let channel_reserve = chan_stat.channel_reserve_msat;
        let feerate = get_feerate!(nodes[0], nodes[1], chan.2);
-       let opt_anchors = get_opt_anchors!(nodes[0], nodes[1], chan.2);
+       let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan.2);
        // The 2* and +1 are for the fee spike reserve.
-       let commit_tx_fee_outbound = 2 * commit_tx_fee_msat(feerate, 1 + 1, opt_anchors);
+       let commit_tx_fee_outbound = 2 * commit_tx_fee_msat(feerate, 1 + 1, &channel_type_features);
 
        let max_can_send = 5000000 - channel_reserve - commit_tx_fee_outbound;
        let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], max_can_send);
@@ -6232,7 +6268,7 @@ fn test_update_add_htlc_bolt2_receiver_sender_can_afford_amount_sent() {
        let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
        assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value");
        check_added_monitors!(nodes[1], 1);
-       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data });
+       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000);
 }
 
 #[test]
@@ -6277,7 +6313,7 @@ fn test_update_add_htlc_bolt2_receiver_check_max_htlc_limit() {
        let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
        assert!(regex::Regex::new(r"Remote tried to push more than our max accepted HTLCs \(\d+\)").unwrap().is_match(err_msg.data.as_str()));
        check_added_monitors!(nodes[1], 1);
-       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data });
+       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000);
 }
 
 #[test]
@@ -6301,7 +6337,7 @@ fn test_update_add_htlc_bolt2_receiver_check_max_in_flight_msat() {
        let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
        assert!(regex::Regex::new("Remote HTLC add would put them over our max HTLC value").unwrap().is_match(err_msg.data.as_str()));
        check_added_monitors!(nodes[1], 1);
-       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data });
+       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 1000000);
 }
 
 #[test]
@@ -6325,7 +6361,7 @@ fn test_update_add_htlc_bolt2_receiver_check_cltv_expiry() {
        let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
        assert_eq!(err_msg.data,"Remote provided CLTV expiry in seconds instead of block height");
        check_added_monitors!(nodes[1], 1);
-       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data });
+       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000);
 }
 
 #[test]
@@ -6377,7 +6413,7 @@ fn test_update_add_htlc_bolt2_receiver_check_repeated_id_ignore() {
        let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
        assert!(regex::Regex::new(r"Remote skipped HTLC ID \(skipped ID: \d+\)").unwrap().is_match(err_msg.data.as_str()));
        check_added_monitors!(nodes[1], 1);
-       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data });
+       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000);
 }
 
 #[test]
@@ -6409,7 +6445,7 @@ fn test_update_fulfill_htlc_bolt2_update_fulfill_htlc_before_commitment() {
        let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
        assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str()));
        check_added_monitors!(nodes[0], 1);
-       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data });
+       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000);
 }
 
 #[test]
@@ -6441,7 +6477,7 @@ fn test_update_fulfill_htlc_bolt2_update_fail_htlc_before_commitment() {
        let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
        assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str()));
        check_added_monitors!(nodes[0], 1);
-       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data });
+       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000);
 }
 
 #[test]
@@ -6473,7 +6509,7 @@ fn test_update_fulfill_htlc_bolt2_update_fail_malformed_htlc_before_commitment()
        let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
        assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str()));
        check_added_monitors!(nodes[0], 1);
-       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data });
+       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000);
 }
 
 #[test]
@@ -6516,7 +6552,7 @@ fn test_update_fulfill_htlc_bolt2_incorrect_htlc_id() {
        let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
        assert_eq!(err_msg.data, "Remote tried to fulfill/fail an HTLC we couldn't find");
        check_added_monitors!(nodes[0], 1);
-       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data });
+       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000);
 }
 
 #[test]
@@ -6559,7 +6595,7 @@ fn test_update_fulfill_htlc_bolt2_wrong_preimage() {
        let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
        assert!(regex::Regex::new(r"Remote tried to fulfill HTLC \(\d+\) with an incorrect preimage").unwrap().is_match(err_msg.data.as_str()));
        check_added_monitors!(nodes[0], 1);
-       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data });
+       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000);
 }
 
 #[test]
@@ -6606,7 +6642,7 @@ fn test_update_fulfill_htlc_bolt2_missing_badonion_bit_for_malformed_htlc_messag
        let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
        assert_eq!(err_msg.data, "Got update_fail_malformed_htlc with BADONION not set");
        check_added_monitors!(nodes[0], 1);
-       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data });
+       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 1000000);
 }
 
 #[test]
@@ -6833,7 +6869,7 @@ fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) {
 
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
-       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
 
        assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
        connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
@@ -6896,7 +6932,7 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) {
        if local {
                // We fail dust-HTLC 1 by broadcast of local commitment tx
                mine_transaction(&nodes[0], &as_commitment_tx[0]);
-               check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+               check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
                connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
                expect_payment_failed!(nodes[0], dust_hash, false);
 
@@ -6916,7 +6952,7 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) {
                mine_transaction(&nodes[0], &bs_commitment_tx[0]);
                check_closed_broadcast!(nodes[0], true);
                check_added_monitors!(nodes[0], 1);
-               check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+               check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
                assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
 
                connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires
@@ -6978,7 +7014,7 @@ fn test_user_configurable_csv_delay() {
        open_channel.to_self_delay = 200;
        if let Err(error) = InboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
                &nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[0].node.channel_type_features(), &nodes[1].node.init_features(), &open_channel, 0,
-               &low_our_to_self_config, 0, &nodes[0].logger, 42)
+               &low_our_to_self_config, 0, &nodes[0].logger, /*is_0conf=*/false)
        {
                match error {
                        ChannelError::Close(err) => { assert!(regex::Regex::new(r"Configured with an unreasonable our_to_self_delay \(\d+\) putting user funds at risks").unwrap().is_match(err.as_str()));  },
@@ -7002,7 +7038,7 @@ fn test_user_configurable_csv_delay() {
                        _ => { panic!(); }
                }
        } else { panic!(); }
-       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: reason_msg });
+       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: reason_msg }, [nodes[1].node.get_our_node_id()], 1000000);
 
        // We test msg.to_self_delay <= config.their_to_self_delay is enforced in InboundV1Channel::new()
        nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None).unwrap();
@@ -7010,7 +7046,7 @@ fn test_user_configurable_csv_delay() {
        open_channel.to_self_delay = 200;
        if let Err(error) = InboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
                &nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[0].node.channel_type_features(), &nodes[1].node.init_features(), &open_channel, 0,
-               &high_their_to_self_config, 0, &nodes[0].logger, 42)
+               &high_their_to_self_config, 0, &nodes[0].logger, /*is_0conf=*/false)
        {
                match error {
                        ChannelError::Close(err) => { assert!(regex::Regex::new(r"They wanted our payments to be delayed by a needlessly long period\. Upper limit: \d+\. Actual: \d+").unwrap().is_match(err.as_str())); },
@@ -7311,7 +7347,7 @@ fn test_bump_penalty_txn_on_revoked_htlcs() {
        connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![revoked_local_txn[0].clone()]));
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
-       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 1000000);
        connect_blocks(&nodes[1], 50); // Confirm blocks until the HTLC expires (note CLTV was explicitly 50 above)
 
        let revoked_htlc_txn = {
@@ -7557,16 +7593,16 @@ fn test_counterparty_raa_skip_no_crash() {
                const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
 
                // Make signer believe we got a counterparty signature, so that it allows the revocation
-               keys.get_enforcement_state().last_holder_commitment -= 1;
-               per_commitment_secret = keys.release_commitment_secret(INITIAL_COMMITMENT_NUMBER);
+               keys.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1;
+               per_commitment_secret = keys.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER);
 
                // Must revoke without gaps
-               keys.get_enforcement_state().last_holder_commitment -= 1;
-               keys.release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 1);
+               keys.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1;
+               keys.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 1);
 
-               keys.get_enforcement_state().last_holder_commitment -= 1;
+               keys.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1;
                next_per_commitment_point = PublicKey::from_secret_key(&Secp256k1::new(),
-                       &SecretKey::from_slice(&keys.release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 2)).unwrap());
+                       &SecretKey::from_slice(&keys.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 2)).unwrap());
        }
 
        nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(),
@@ -7579,7 +7615,8 @@ fn test_counterparty_raa_skip_no_crash() {
                });
        assert_eq!(check_closed_broadcast!(nodes[1], true).unwrap().data, "Received an unexpected revoke_and_ack");
        check_added_monitors!(nodes[1], 1);
-       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Received an unexpected revoke_and_ack".to_string() });
+       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Received an unexpected revoke_and_ack".to_string() }
+               , [nodes[0].node.get_our_node_id()], 100000);
 }
 
 #[test]
@@ -7612,7 +7649,7 @@ fn test_bump_txn_sanitize_tracking_maps() {
        mine_transaction(&nodes[0], &revoked_local_txn[0]);
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
-       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 1000000);
        let penalty_txn = {
                let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
                assert_eq!(node_txn.len(), 3); //ChannelMonitor: justice txn * 3
@@ -7656,7 +7693,7 @@ fn test_channel_conf_timeout() {
 
        connect_blocks(&nodes[1], 1);
        check_added_monitors!(nodes[1], 1);
-       check_closed_event!(nodes[1], 1, ClosureReason::FundingTimedOut);
+       check_closed_event!(nodes[1], 1, ClosureReason::FundingTimedOut, [nodes[0].node.get_our_node_id()], 1000000);
        let close_ev = nodes[1].node.get_and_clear_pending_msg_events();
        assert_eq!(close_ev.len(), 1);
        match close_ev[0] {
@@ -7842,70 +7879,9 @@ fn test_manually_reject_inbound_channel_request() {
                }
                _ => panic!("Unexpected event"),
        }
-       check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
-}
-
-#[test]
-fn test_reject_funding_before_inbound_channel_accepted() {
-       // This tests that when `UserConfig::manually_accept_inbound_channels` is set to true, inbound
-       // channels must to be manually accepted through `ChannelManager::accept_inbound_channel` by
-       // the node operator before the counterparty sends a `FundingCreated` message. If a
-       // `FundingCreated` message is received before the channel is accepted, it should be rejected
-       // and the channel should be closed.
-       let mut manually_accept_conf = UserConfig::default();
-       manually_accept_conf.manually_accept_inbound_channels = true;
-       let chanmon_cfgs = create_chanmon_cfgs(2);
-       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
-       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_conf.clone())]);
-       let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
-
-       nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, Some(manually_accept_conf)).unwrap();
-       let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
-       let temp_channel_id = res.temporary_channel_id;
-
-       nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &res);
-
-       // Assert that `nodes[1]` has no `MessageSendEvent::SendAcceptChannel` in the `msg_events`.
-       assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 
-       // Clear the `Event::OpenChannelRequest` event without responding to the request.
-       nodes[1].node.get_and_clear_pending_events();
-
-       // Get the `AcceptChannel` message of `nodes[1]` without calling
-       // `ChannelManager::accept_inbound_channel`, which generates a
-       // `MessageSendEvent::SendAcceptChannel` event. The message is passed to `nodes[0]`
-       // `handle_accept_channel`, which is required in order for `create_funding_transaction` to
-       // succeed when `nodes[0]` is passed to it.
-       let accept_chan_msg = {
-               let mut node_1_per_peer_lock;
-               let mut node_1_peer_state_lock;
-               let channel =  get_inbound_v1_channel_ref!(&nodes[1], nodes[0], node_1_per_peer_lock, node_1_peer_state_lock, temp_channel_id);
-               channel.get_accept_channel_message()
-       };
-       nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_chan_msg);
-
-       let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42);
-
-       nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
-       let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
-
-       // The `funding_created_msg` should be rejected by `nodes[1]` as it hasn't accepted the channel
-       nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
-
-       let close_msg_ev = nodes[1].node.get_and_clear_pending_msg_events();
-       assert_eq!(close_msg_ev.len(), 1);
-
-       let expected_err = "FundingCreated message received before the channel was accepted";
-       match close_msg_ev[0] {
-               MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, ref node_id, } => {
-                       assert_eq!(msg.channel_id, temp_channel_id);
-                       assert_eq!(*node_id, nodes[0].node.get_our_node_id());
-                       assert_eq!(msg.data, expected_err);
-               }
-               _ => panic!("Unexpected event"),
-       }
-
-       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: expected_err.to_string() });
+       // There should be no more events to process, as the channel was never opened.
+       assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
 }
 
 #[test]
@@ -7933,10 +7909,10 @@ fn test_can_not_accept_inbound_channel_twice() {
                        let api_res = nodes[1].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 0);
                        match api_res {
                                Err(APIError::APIMisuseError { err }) => {
-                                       assert_eq!(err, "The channel isn't currently awaiting to be accepted.");
+                                       assert_eq!(err, "No such channel awaiting to be accepted.");
                                },
                                Ok(_) => panic!("Channel shouldn't be possible to be accepted twice"),
-                               Err(_) => panic!("Unexpected Error"),
+                               Err(e) => panic!("Unexpected Error {:?}", e),
                        }
                }
                _ => panic!("Unexpected event"),
@@ -7964,11 +7940,11 @@ fn test_can_not_accept_unknown_inbound_channel() {
        let unknown_channel_id = [0; 32];
        let api_res = nodes[0].node.accept_inbound_channel(&unknown_channel_id, &nodes[1].node.get_our_node_id(), 0);
        match api_res {
-               Err(APIError::ChannelUnavailable { err }) => {
-                       assert_eq!(err, format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!(unknown_channel_id), nodes[1].node.get_our_node_id()));
+               Err(APIError::APIMisuseError { err }) => {
+                       assert_eq!(err, "No such channel awaiting to be accepted.");
                },
                Ok(_) => panic!("It shouldn't be possible to accept an unkown channel"),
-               Err(_) => panic!("Unexpected Error"),
+               Err(e) => panic!("Unexpected Error: {:?}", e),
        }
 }
 
@@ -8039,7 +8015,9 @@ fn test_onion_value_mpp_set_calculation() {
                                RecipientOnionFields::secret_only(our_payment_secret), height + 1, &None).unwrap();
                        // Edit amt_to_forward to simulate the sender having set
                        // the final amount and the routing node taking less fee
-                       onion_payloads[1].amt_to_forward = 99_000;
+                       if let msgs::OutboundOnionPayload::Receive { ref mut amt_msat, .. } = onion_payloads[1] {
+                               *amt_msat = 99_000;
+                       } else { panic!() }
                        let new_onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &our_payment_hash).unwrap();
                        payment_event.msgs[0].onion_routing_packet = new_onion_packet;
                }
@@ -8456,7 +8434,8 @@ fn test_concurrent_monitor_claim() {
        let height = HTLC_TIMEOUT_BROADCAST + 1;
        connect_blocks(&nodes[0], height - nodes[0].best_block_info().1);
        check_closed_broadcast(&nodes[0], 1, true);
-       check_closed_event(&nodes[0], 1, ClosureReason::CommitmentTxConfirmed, false);
+       check_closed_event!(&nodes[0], 1, ClosureReason::CommitmentTxConfirmed, false,
+               [nodes[1].node.get_our_node_id()], 100000);
        watchtower_alice.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, vec![bob_state_y.clone()]), height);
        check_added_monitors(&nodes[0], 1);
        {
@@ -8504,7 +8483,8 @@ fn test_pre_lockin_no_chan_closed_update() {
        let channel_id = crate::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }.to_channel_id();
        nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id, data: "Hi".to_owned() });
        assert!(nodes[0].chain_monitor.added_monitors.lock().unwrap().is_empty());
-       check_closed_event!(nodes[0], 2, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("Hi".to_string()) }, true);
+       check_closed_event!(nodes[0], 2, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("Hi".to_string()) }, true,
+               [nodes[1].node.get_our_node_id(); 2], 100000);
 }
 
 #[test]
@@ -8539,7 +8519,7 @@ fn test_htlc_no_detection() {
        chain::Listen::block_connected(&nodes[0].chain_monitor.chain_monitor, &block, nodes[0].best_block_info().1 + 1);
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
-       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
+       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
        connect_blocks(&nodes[0], TEST_FINAL_CLTV);
 
        let htlc_timeout = {
@@ -8605,7 +8585,7 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain
        nodes[force_closing_node].node.force_close_broadcasting_latest_txn(&chan_ab.2, &nodes[counterparty_node].node.get_our_node_id()).unwrap();
        check_closed_broadcast!(nodes[force_closing_node], true);
        check_added_monitors!(nodes[force_closing_node], 1);
-       check_closed_event!(nodes[force_closing_node], 1, ClosureReason::HolderForceClosed);
+       check_closed_event!(nodes[force_closing_node], 1, ClosureReason::HolderForceClosed, [nodes[counterparty_node].node.get_our_node_id()], 100000);
        if go_onchain_before_fulfill {
                let txn_to_broadcast = match broadcast_alice {
                        true => alice_txn.clone(),
@@ -8615,7 +8595,7 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain
                if broadcast_alice {
                        check_closed_broadcast!(nodes[1], true);
                        check_added_monitors!(nodes[1], 1);
-                       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+                       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
                }
        }
 
@@ -8695,7 +8675,7 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain
                if broadcast_alice {
                        check_closed_broadcast!(nodes[1], true);
                        check_added_monitors!(nodes[1], 1);
-                       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
+                       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
                }
                let mut bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
                if broadcast_alice {
@@ -8881,13 +8861,13 @@ fn test_duplicate_chan_id() {
        let (_, funding_created) = {
                let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
                let mut a_peer_state = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
-               // Once we call `get_outbound_funding_created` the channel has a duplicate channel_id as
+               // Once we call `get_funding_created` the channel has a duplicate channel_id as
                // another channel in the ChannelManager - an invalid state. Thus, we'd panic later when we
                // try to create another channel. Instead, we drop the channel entirely here (leaving the
                // channelmanager in a possibly nonsense state instead).
                let mut as_chan = a_peer_state.outbound_v1_channel_by_id.remove(&open_chan_2_msg.temporary_channel_id).unwrap();
                let logger = test_utils::TestLogger::new();
-               as_chan.get_outbound_funding_created(tx.clone(), funding_outpoint, &&logger).map_err(|_| ()).unwrap()
+               as_chan.get_funding_created(tx.clone(), funding_outpoint, &&logger).map_err(|_| ()).unwrap()
        };
        check_added_monitors!(nodes[0], 0);
        nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created);
@@ -8962,7 +8942,8 @@ fn test_error_chans_closed() {
        nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: chan_2.2, data: "ERR".to_owned() });
        check_added_monitors!(nodes[0], 1);
        check_closed_broadcast!(nodes[0], false);
-       check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("ERR".to_string()) });
+       check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("ERR".to_string()) },
+               [nodes[1].node.get_our_node_id()], 100000);
        assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1);
        assert_eq!(nodes[0].node.list_usable_channels().len(), 2);
        assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_1.2 || nodes[0].node.list_usable_channels()[1].channel_id == chan_1.2);
@@ -8972,7 +8953,8 @@ fn test_error_chans_closed() {
        let _chan_4 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001);
        nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: [0; 32], data: "ERR".to_owned() });
        check_added_monitors!(nodes[0], 2);
-       check_closed_event!(nodes[0], 2, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("ERR".to_string()) });
+       check_closed_event!(nodes[0], 2, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("ERR".to_string()) },
+               [nodes[1].node.get_our_node_id(); 2], 100000);
        let events = nodes[0].node.get_and_clear_pending_msg_events();
        assert_eq!(events.len(), 2);
        match events[0] {
@@ -9049,7 +9031,8 @@ fn test_invalid_funding_tx() {
 
        let expected_err = "funding tx had wrong script/value or output index";
        confirm_transaction_at(&nodes[1], &tx, 1);
-       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: expected_err.to_string() });
+       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: expected_err.to_string() },
+               [nodes[0].node.get_our_node_id()], 100000);
        check_added_monitors!(nodes[1], 1);
        let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
        assert_eq!(events_2.len(), 1);
@@ -9115,7 +9098,7 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t
 
        nodes[1].node.force_close_broadcasting_latest_txn(&channel_id, &nodes[2].node.get_our_node_id()).unwrap();
        check_closed_broadcast!(nodes[1], true);
-       check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
+       check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[2].node.get_our_node_id()], 100000);
        check_added_monitors!(nodes[1], 1);
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
        assert_eq!(node_txn.len(), 1);
@@ -9377,74 +9360,7 @@ fn test_inconsistent_mpp_params() {
        pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 15_000_000, our_payment_hash, Some(our_payment_secret), events.pop().unwrap(), true, None);
 
        do_claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, our_payment_preimage);
-       expect_payment_sent(&nodes[0], our_payment_preimage, Some(None), true);
-}
-
-#[test]
-fn test_keysend_payments_to_public_node() {
-       let chanmon_cfgs = create_chanmon_cfgs(2);
-       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
-       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
-       let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
-
-       let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001);
-       let network_graph = nodes[0].network_graph.clone();
-       let payer_pubkey = nodes[0].node.get_our_node_id();
-       let payee_pubkey = nodes[1].node.get_our_node_id();
-       let route_params = RouteParameters {
-               payment_params: PaymentParameters::for_keysend(payee_pubkey, 40, false),
-               final_value_msat: 10000,
-       };
-       let scorer = test_utils::TestScorer::new();
-       let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
-       let route = find_route(&payer_pubkey, &route_params, &network_graph, None, nodes[0].logger, &scorer, &(), &random_seed_bytes).unwrap();
-
-       let test_preimage = PaymentPreimage([42; 32]);
-       let payment_hash = nodes[0].node.send_spontaneous_payment(&route, Some(test_preimage),
-               RecipientOnionFields::spontaneous_empty(), PaymentId(test_preimage.0)).unwrap();
-       check_added_monitors!(nodes[0], 1);
-       let mut events = nodes[0].node.get_and_clear_pending_msg_events();
-       assert_eq!(events.len(), 1);
-       let event = events.pop().unwrap();
-       let path = vec![&nodes[1]];
-       pass_along_path(&nodes[0], &path, 10000, payment_hash, None, event, true, Some(test_preimage));
-       claim_payment(&nodes[0], &path, test_preimage);
-}
-
-#[test]
-fn test_keysend_payments_to_private_node() {
-       let chanmon_cfgs = create_chanmon_cfgs(2);
-       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
-       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
-       let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
-
-       let payer_pubkey = nodes[0].node.get_our_node_id();
-       let payee_pubkey = nodes[1].node.get_our_node_id();
-
-       let _chan = create_chan_between_nodes(&nodes[0], &nodes[1]);
-       let route_params = RouteParameters {
-               payment_params: PaymentParameters::for_keysend(payee_pubkey, 40, false),
-               final_value_msat: 10000,
-       };
-       let network_graph = nodes[0].network_graph.clone();
-       let first_hops = nodes[0].node.list_usable_channels();
-       let scorer = test_utils::TestScorer::new();
-       let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
-       let route = find_route(
-               &payer_pubkey, &route_params, &network_graph, Some(&first_hops.iter().collect::<Vec<_>>()),
-               nodes[0].logger, &scorer, &(), &random_seed_bytes
-       ).unwrap();
-
-       let test_preimage = PaymentPreimage([42; 32]);
-       let payment_hash = nodes[0].node.send_spontaneous_payment(&route, Some(test_preimage),
-               RecipientOnionFields::spontaneous_empty(), PaymentId(test_preimage.0)).unwrap();
-       check_added_monitors!(nodes[0], 1);
-       let mut events = nodes[0].node.get_and_clear_pending_msg_events();
-       assert_eq!(events.len(), 1);
-       let event = events.pop().unwrap();
-       let path = vec![&nodes[1]];
-       pass_along_path(&nodes[0], &path, 10000, payment_hash, None, event, true, Some(test_preimage));
-       claim_payment(&nodes[0], &path, test_preimage);
+       expect_payment_sent(&nodes[0], our_payment_preimage, Some(None), true, true);
 }
 
 #[test]
@@ -9515,7 +9431,7 @@ enum ExposureEvent {
        AtUpdateFeeOutbound,
 }
 
-fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_event: ExposureEvent, on_holder_tx: bool) {
+fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_event: ExposureEvent, on_holder_tx: bool, multiplier_dust_limit: bool) {
        // Test that we properly reject dust HTLC violating our `max_dust_htlc_exposure_msat`
        // policy.
        //
@@ -9530,7 +9446,12 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e
 
        let chanmon_cfgs = create_chanmon_cfgs(2);
        let mut config = test_default_channel_config();
-       config.channel_config.max_dust_htlc_exposure_msat = 5_000_000; // default setting value
+       config.channel_config.max_dust_htlc_exposure = if multiplier_dust_limit {
+               // Default test fee estimator rate is 253 sat/kw, so we set the multiplier to 5_000_000 / 253
+               // to get roughly the same initial value as the default setting when this test was
+               // originally written.
+               MaxDustHTLCExposure::FeeRateMultiplier(5_000_000 / 253)
+       } else { MaxDustHTLCExposure::FixedLimitMsat(5_000_000) }; // initial default setting value
        let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config), None]);
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
@@ -9546,7 +9467,7 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e
        let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
        nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
 
-       let opt_anchors = false;
+       let channel_type_features = ChannelTypeFeatures::only_static_remote_key();
 
        let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42);
 
@@ -9574,20 +9495,21 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e
        let (mut route, payment_hash, _, payment_secret) =
                get_route_and_payment_hash!(nodes[0], nodes[1], 1000);
 
-       let dust_buffer_feerate = {
+       let (dust_buffer_feerate, max_dust_htlc_exposure_msat) = {
                let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
                let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
                let chan = chan_lock.channel_by_id.get(&channel_id).unwrap();
-               chan.context.get_dust_buffer_feerate(None) as u64
+               (chan.context.get_dust_buffer_feerate(None) as u64,
+               chan.context.get_max_dust_htlc_exposure_msat(&LowerBoundedFeeEstimator(nodes[0].fee_estimator)))
        };
-       let dust_outbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_timeout_tx_weight(opt_anchors) / 1000 + open_channel.dust_limit_satoshis - 1) * 1000;
-       let dust_outbound_htlc_on_holder_tx: u64 = config.channel_config.max_dust_htlc_exposure_msat / dust_outbound_htlc_on_holder_tx_msat;
+       let dust_outbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_timeout_tx_weight(&channel_type_features) / 1000 + open_channel.dust_limit_satoshis - 1) * 1000;
+       let dust_outbound_htlc_on_holder_tx: u64 = max_dust_htlc_exposure_msat / dust_outbound_htlc_on_holder_tx_msat;
 
-       let dust_inbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_success_tx_weight(opt_anchors) / 1000 + open_channel.dust_limit_satoshis - 1) * 1000;
-       let dust_inbound_htlc_on_holder_tx: u64 = config.channel_config.max_dust_htlc_exposure_msat / dust_inbound_htlc_on_holder_tx_msat;
+       let dust_inbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_success_tx_weight(&channel_type_features) / 1000 + open_channel.dust_limit_satoshis - 1) * 1000;
+       let dust_inbound_htlc_on_holder_tx: u64 = max_dust_htlc_exposure_msat / dust_inbound_htlc_on_holder_tx_msat;
 
        let dust_htlc_on_counterparty_tx: u64 = 4;
-       let dust_htlc_on_counterparty_tx_msat: u64 = config.channel_config.max_dust_htlc_exposure_msat / dust_htlc_on_counterparty_tx;
+       let dust_htlc_on_counterparty_tx_msat: u64 = max_dust_htlc_exposure_msat / dust_htlc_on_counterparty_tx;
 
        if on_holder_tx {
                if dust_outbound_balance {
@@ -9639,7 +9561,7 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e
                                ), true, APIError::ChannelUnavailable { .. }, {});
                }
        } else if exposure_breach_event == ExposureEvent::AtHTLCReception {
-               let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], if on_holder_tx { dust_inbound_htlc_on_holder_tx_msat } else { dust_htlc_on_counterparty_tx_msat + 1 });
+               let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], if on_holder_tx { dust_inbound_htlc_on_holder_tx_msat } else { dust_htlc_on_counterparty_tx_msat + 4 });
                nodes[1].node.send_payment_with_route(&route, payment_hash,
                        RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
                check_added_monitors!(nodes[1], 1);
@@ -9652,18 +9574,24 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e
                        // Outbound dust balance: 6399 sats
                        let dust_inbound_overflow = dust_inbound_htlc_on_holder_tx_msat * (dust_inbound_htlc_on_holder_tx + 1);
                        let dust_outbound_overflow = dust_outbound_htlc_on_holder_tx_msat * dust_outbound_htlc_on_holder_tx + dust_inbound_htlc_on_holder_tx_msat;
-                       nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", if dust_outbound_balance { dust_outbound_overflow } else { dust_inbound_overflow }, config.channel_config.max_dust_htlc_exposure_msat), 1);
+                       nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", if dust_outbound_balance { dust_outbound_overflow } else { dust_inbound_overflow }, max_dust_htlc_exposure_msat), 1);
                } else {
                        // Outbound dust balance: 5200 sats
                        nodes[0].logger.assert_log("lightning::ln::channel".to_string(),
                                format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
-                                       dust_htlc_on_counterparty_tx_msat * (dust_htlc_on_counterparty_tx - 1) + dust_htlc_on_counterparty_tx_msat + 1,
-                                       config.channel_config.max_dust_htlc_exposure_msat), 1);
+                                       dust_htlc_on_counterparty_tx_msat * (dust_htlc_on_counterparty_tx - 1) + dust_htlc_on_counterparty_tx_msat + 4,
+                                       max_dust_htlc_exposure_msat), 1);
                }
        } else if exposure_breach_event == ExposureEvent::AtUpdateFeeOutbound {
                route.paths[0].hops.last_mut().unwrap().fee_msat = 2_500_000;
-               nodes[0].node.send_payment_with_route(&route, payment_hash,
-                       RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
+               // For the multiplier dust exposure limit, since it scales with feerate,
+               // we need to add a lot of HTLCs that will become dust at the new feerate
+               // to cross the threshold.
+               for _ in 0..20 {
+                       let (_, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[1], Some(1_000), None);
+                       nodes[0].node.send_payment_with_route(&route, payment_hash,
+                               RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
+               }
                {
                        let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
                        *feerate_lock = *feerate_lock * 10;
@@ -9678,20 +9606,25 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e
        added_monitors.clear();
 }
 
+fn do_test_max_dust_htlc_exposure_by_threshold_type(multiplier_dust_limit: bool) {
+       do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCForward, true, multiplier_dust_limit);
+       do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCForward, true, multiplier_dust_limit);
+       do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCReception, true, multiplier_dust_limit);
+       do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCReception, false, multiplier_dust_limit);
+       do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCForward, false, multiplier_dust_limit);
+       do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCReception, false, multiplier_dust_limit);
+       do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCReception, true, multiplier_dust_limit);
+       do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCForward, false, multiplier_dust_limit);
+       do_test_max_dust_htlc_exposure(true, ExposureEvent::AtUpdateFeeOutbound, true, multiplier_dust_limit);
+       do_test_max_dust_htlc_exposure(true, ExposureEvent::AtUpdateFeeOutbound, false, multiplier_dust_limit);
+       do_test_max_dust_htlc_exposure(false, ExposureEvent::AtUpdateFeeOutbound, false, multiplier_dust_limit);
+       do_test_max_dust_htlc_exposure(false, ExposureEvent::AtUpdateFeeOutbound, true, multiplier_dust_limit);
+}
+
 #[test]
 fn test_max_dust_htlc_exposure() {
-       do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCForward, true);
-       do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCForward, true);
-       do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCReception, true);
-       do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCReception, false);
-       do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCForward, false);
-       do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCReception, false);
-       do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCReception, true);
-       do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCForward, false);
-       do_test_max_dust_htlc_exposure(true, ExposureEvent::AtUpdateFeeOutbound, true);
-       do_test_max_dust_htlc_exposure(true, ExposureEvent::AtUpdateFeeOutbound, false);
-       do_test_max_dust_htlc_exposure(false, ExposureEvent::AtUpdateFeeOutbound, false);
-       do_test_max_dust_htlc_exposure(false, ExposureEvent::AtUpdateFeeOutbound, true);
+       do_test_max_dust_htlc_exposure_by_threshold_type(false);
+       do_test_max_dust_htlc_exposure_by_threshold_type(true);
 }
 
 #[test]
@@ -9806,7 +9739,8 @@ fn accept_busted_but_better_fee() {
                MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, .. }, .. } => {
                        nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_fee.as_ref().unwrap());
                        check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError {
-                               err: "Peer's feerate much too low. Actual: 1000. Our expected lower limit: 5000 (- 250)".to_owned() });
+                               err: "Peer's feerate much too low. Actual: 1000. Our expected lower limit: 5000 (- 250)".to_owned() },
+                               [nodes[0].node.get_our_node_id()], 100000);
                        check_closed_broadcast!(nodes[1], true);
                        check_added_monitors!(nodes[1], 1);
                },
@@ -10000,3 +9934,192 @@ fn test_disconnects_peer_awaiting_response_ticks() {
                }
        }
 }
+
+#[test]
+fn test_remove_expired_outbound_unfunded_channels() {
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+       let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+       let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None).unwrap();
+       let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+       nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_message);
+       let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
+       nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel_message);
+
+       let events = nodes[0].node.get_and_clear_pending_events();
+       assert_eq!(events.len(), 1);
+       match events[0] {
+               Event::FundingGenerationReady { .. } => (),
+               _ => panic!("Unexpected event"),
+       };
+
+       // Asserts the outbound channel has been removed from a nodes[0]'s peer state map.
+       let check_outbound_channel_existence = |should_exist: bool| {
+               let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
+               let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
+               assert_eq!(chan_lock.outbound_v1_channel_by_id.contains_key(&temp_channel_id), should_exist);
+       };
+
+       // Channel should exist without any timer ticks.
+       check_outbound_channel_existence(true);
+
+       // Channel should exist with 1 timer tick less than required.
+       for _ in 0..UNFUNDED_CHANNEL_AGE_LIMIT_TICKS - 1 {
+               nodes[0].node.timer_tick_occurred();
+               check_outbound_channel_existence(true)
+       }
+
+       // Remove channel after reaching the required ticks.
+       nodes[0].node.timer_tick_occurred();
+       check_outbound_channel_existence(false);
+
+       let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
+       assert_eq!(msg_events.len(), 1);
+       match msg_events[0] {
+               MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id: _ } => {
+                       assert_eq!(msg.data, "Force-closing pending channel due to timeout awaiting establishment handshake");
+               },
+               _ => panic!("Unexpected event"),
+       }
+       check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100000);
+}
+
+#[test]
+fn test_remove_expired_inbound_unfunded_channels() {
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+       let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+       let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None).unwrap();
+       let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+       nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_message);
+       let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
+       nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel_message);
+
+       let events = nodes[0].node.get_and_clear_pending_events();
+       assert_eq!(events.len(), 1);
+       match events[0] {
+               Event::FundingGenerationReady { .. } => (),
+               _ => panic!("Unexpected event"),
+       };
+
+       // Asserts the inbound channel has been removed from a nodes[1]'s peer state map.
+       let check_inbound_channel_existence = |should_exist: bool| {
+               let per_peer_state = nodes[1].node.per_peer_state.read().unwrap();
+               let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap();
+               assert_eq!(chan_lock.inbound_v1_channel_by_id.contains_key(&temp_channel_id), should_exist);
+       };
+
+       // Channel should exist without any timer ticks.
+       check_inbound_channel_existence(true);
+
+       // Channel should exist with 1 timer tick less than required.
+       for _ in 0..UNFUNDED_CHANNEL_AGE_LIMIT_TICKS - 1 {
+               nodes[1].node.timer_tick_occurred();
+               check_inbound_channel_existence(true)
+       }
+
+       // Remove channel after reaching the required ticks.
+       nodes[1].node.timer_tick_occurred();
+       check_inbound_channel_existence(false);
+
+       let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
+       assert_eq!(msg_events.len(), 1);
+       match msg_events[0] {
+               MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id: _ } => {
+                       assert_eq!(msg.data, "Force-closing pending channel due to timeout awaiting establishment handshake");
+               },
+               _ => panic!("Unexpected event"),
+       }
+       check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false, &[nodes[0].node.get_our_node_id()], 100000);
+}
+
+fn do_test_multi_post_event_actions(do_reload: bool) {
+       // Tests handling multiple post-Event actions at once.
+       // There is specific code in ChannelManager to handle channels where multiple post-Event
+       // `ChannelMonitorUpdates` are pending at once. This test exercises that code.
+       //
+       // Specifically, we test calling `get_and_clear_pending_events` while there are two
+       // PaymentSents from different channels and one channel has two pending `ChannelMonitorUpdate`s
+       // - one from an RAA and one from an inbound commitment_signed.
+       let chanmon_cfgs = create_chanmon_cfgs(3);
+       let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+       let (persister, chain_monitor);
+       let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+       let nodes_0_deserialized;
+       let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+       let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
+       let chan_id_2 = create_announced_chan_between_nodes(&nodes, 0, 2).2;
+
+       send_payment(&nodes[0], &[&nodes[1]], 1_000_000);
+       send_payment(&nodes[0], &[&nodes[2]], 1_000_000);
+
+       let (our_payment_preimage, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
+       let (payment_preimage_2, payment_hash_2, _) = route_payment(&nodes[0], &[&nodes[2]], 1_000_000);
+
+       nodes[1].node.claim_funds(our_payment_preimage);
+       check_added_monitors!(nodes[1], 1);
+       expect_payment_claimed!(nodes[1], our_payment_hash, 1_000_000);
+
+       nodes[2].node.claim_funds(payment_preimage_2);
+       check_added_monitors!(nodes[2], 1);
+       expect_payment_claimed!(nodes[2], payment_hash_2, 1_000_000);
+
+       for dest in &[1, 2] {
+               let htlc_fulfill_updates = get_htlc_update_msgs!(nodes[*dest], nodes[0].node.get_our_node_id());
+               nodes[0].node.handle_update_fulfill_htlc(&nodes[*dest].node.get_our_node_id(), &htlc_fulfill_updates.update_fulfill_htlcs[0]);
+               commitment_signed_dance!(nodes[0], nodes[*dest], htlc_fulfill_updates.commitment_signed, false);
+               check_added_monitors(&nodes[0], 0);
+       }
+
+       let (route, payment_hash_3, _, payment_secret_3) =
+               get_route_and_payment_hash!(nodes[1], nodes[0], 100_000);
+       let payment_id = PaymentId(payment_hash_3.0);
+       nodes[1].node.send_payment_with_route(&route, payment_hash_3,
+               RecipientOnionFields::secret_only(payment_secret_3), payment_id).unwrap();
+       check_added_monitors(&nodes[1], 1);
+
+       let send_event = SendEvent::from_node(&nodes[1]);
+       nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event.msgs[0]);
+       nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event.commitment_msg);
+       assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
+
+       if do_reload {
+               let nodes_0_serialized = nodes[0].node.encode();
+               let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
+               let chan_1_monitor_serialized = get_monitor!(nodes[0], chan_id_2).encode();
+               reload_node!(nodes[0], test_default_channel_config(), &nodes_0_serialized, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], persister, chain_monitor, nodes_0_deserialized);
+
+               nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
+               nodes[2].node.peer_disconnected(&nodes[0].node.get_our_node_id());
+
+               reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
+               reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[2]));
+       }
+
+       let events = nodes[0].node.get_and_clear_pending_events();
+       assert_eq!(events.len(), 4);
+       if let Event::PaymentSent { payment_preimage, .. } = events[0] {
+               assert!(payment_preimage == our_payment_preimage || payment_preimage == payment_preimage_2);
+       } else { panic!(); }
+       if let Event::PaymentSent { payment_preimage, .. } = events[1] {
+               assert!(payment_preimage == our_payment_preimage || payment_preimage == payment_preimage_2);
+       } else { panic!(); }
+       if let Event::PaymentPathSuccessful { .. } = events[2] {} else { panic!(); }
+       if let Event::PaymentPathSuccessful { .. } = events[3] {} else { panic!(); }
+
+       // After the events are processed, the ChannelMonitorUpdates will be released and, upon their
+       // completion, we'll respond to nodes[1] with an RAA + CS.
+       get_revoke_commit_msgs(&nodes[0], &nodes[1].node.get_our_node_id());
+       check_added_monitors(&nodes[0], 3);
+}
+
+#[test]
+fn test_multi_post_event_actions() {
+       do_test_multi_post_event_actions(true);
+       do_test_multi_post_event_actions(false);
+}