Merge pull request #2699 from mhrheaume/mhr/temporary_channel_id
[rust-lightning] / lightning / src / ln / functional_tests.rs
index 5b1895ac15c94eeaa8f4289db47a541924fe84bd..98bb83a7f9d466f802499328dcabd9452528f9a9 100644 (file)
@@ -15,12 +15,12 @@ use crate::chain;
 use crate::chain::{ChannelMonitorUpdateStatus, Confirm, Listen, Watch};
 use crate::chain::chaininterface::LowerBoundedFeeEstimator;
 use crate::chain::channelmonitor;
-use crate::chain::channelmonitor::{CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY};
+use crate::chain::channelmonitor::{CLOSED_CHANNEL_UPDATE_ID, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY};
 use crate::chain::transaction::OutPoint;
-use crate::sign::{ChannelSigner, EcdsaChannelSigner, EntropySource, SignerProvider};
+use crate::sign::{EcdsaChannelSigner, EntropySource, SignerProvider};
 use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentPurpose, ClosureReason, HTLCDestination, PaymentFailureReason};
 use crate::ln::{ChannelId, PaymentPreimage, PaymentSecret, PaymentHash};
-use crate::ln::channel::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT, get_holder_selected_channel_reserve_satoshis, OutboundV1Channel, InboundV1Channel};
+use crate::ln::channel::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT, get_holder_selected_channel_reserve_satoshis, OutboundV1Channel, InboundV1Channel, COINBASE_MATURITY, ChannelPhase};
 use crate::ln::channelmanager::{self, PaymentId, RAACommitmentOrder, PaymentSendFailure, RecipientOnionFields, BREAKDOWN_TIMEOUT, ENABLE_GOSSIP_TICKS, DISABLE_GOSSIP_TICKS, MIN_CLTV_EXPIRY_DELTA};
 use crate::ln::channel::{DISCONNECT_PEER_AWAITING_RESPONSE_TICKS, ChannelError};
 use crate::ln::{chan_utils, onion_utils};
@@ -40,7 +40,7 @@ use crate::util::config::{UserConfig, MaxDustHTLCExposure};
 use bitcoin::hash_types::BlockHash;
 use bitcoin::blockdata::script::{Builder, Script};
 use bitcoin::blockdata::opcodes;
-use bitcoin::blockdata::constants::genesis_block;
+use bitcoin::blockdata::constants::ChainHash;
 use bitcoin::network::constants::Network;
 use bitcoin::{PackedLockTime, Sequence, Transaction, TxIn, TxOut, Witness};
 use bitcoin::OutPoint as BitcoinOutPoint;
@@ -81,7 +81,7 @@ fn test_insane_channel_opens() {
        let push_msat = (channel_value_sat - channel_reserve_satoshis) * 1000;
 
        // Have node0 initiate a channel to node1 with aforementioned parameters
-       nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_sat, push_msat, 42, None).unwrap();
+       nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_sat, push_msat, 42, None, None).unwrap();
 
        // Extract the channel open message from node0 to node1
        let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
@@ -135,7 +135,7 @@ fn test_funding_exceeds_no_wumbo_limit() {
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
-       match nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), MAX_FUNDING_SATOSHIS_NO_WUMBO + 1, 0, 42, None) {
+       match nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), MAX_FUNDING_SATOSHIS_NO_WUMBO + 1, 0, 42, None, None) {
                Err(APIError::APIMisuseError { err }) => {
                        assert_eq!(format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, MAX_FUNDING_SATOSHIS_NO_WUMBO + 1), err);
                },
@@ -161,7 +161,7 @@ fn do_test_counterparty_no_reserve(send_from_initiator: bool) {
        push_amt -= feerate_per_kw as u64 * (commitment_tx_base_weight(&channel_type_features) + 4 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000 * 1000;
        push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
 
-       let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, if send_from_initiator { 0 } else { push_amt }, 42, None).unwrap();
+       let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, if send_from_initiator { 0 } else { push_amt }, 42, None, None).unwrap();
        let mut open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
        if !send_from_initiator {
                open_channel_message.channel_reserve_satoshis = 0;
@@ -181,14 +181,15 @@ fn do_test_counterparty_no_reserve(send_from_initiator: bool) {
                let counterparty_node = if send_from_initiator { &nodes[0] } else { &nodes[1] };
                let mut sender_node_per_peer_lock;
                let mut sender_node_peer_state_lock;
-               if send_from_initiator {
-                       let chan = get_inbound_v1_channel_ref!(sender_node, counterparty_node, sender_node_per_peer_lock, sender_node_peer_state_lock, temp_channel_id);
-                       chan.context.holder_selected_channel_reserve_satoshis = 0;
-                       chan.context.holder_max_htlc_value_in_flight_msat = 100_000_000;
-               } else {
-                       let chan = get_outbound_v1_channel_ref!(sender_node, counterparty_node, sender_node_per_peer_lock, sender_node_peer_state_lock, temp_channel_id);
-                       chan.context.holder_selected_channel_reserve_satoshis = 0;
-                       chan.context.holder_max_htlc_value_in_flight_msat = 100_000_000;
+
+               let channel_phase = get_channel_ref!(sender_node, counterparty_node, sender_node_per_peer_lock, sender_node_peer_state_lock, temp_channel_id);
+               match channel_phase {
+                       ChannelPhase::UnfundedInboundV1(_) | ChannelPhase::UnfundedOutboundV1(_) => {
+                               let chan_context = channel_phase.context_mut();
+                               chan_context.holder_selected_channel_reserve_satoshis = 0;
+                               chan_context.holder_max_htlc_value_in_flight_msat = 100_000_000;
+                       },
+                       ChannelPhase::Funded(_) => assert!(false),
                }
        }
 
@@ -522,7 +523,7 @@ fn do_test_sanity_on_in_flight_opens(steps: u8) {
        }
 
        if steps & 0x0f == 0 { return; }
-       nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None).unwrap();
+       nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap();
        let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
 
        if steps & 0x0f == 1 { return; }
@@ -701,7 +702,9 @@ fn test_update_fee_that_funder_cannot_afford() {
        let (local_revocation_basepoint, local_htlc_basepoint, local_funding) = {
                let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
                let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
-               let local_chan = chan_lock.channel_by_id.get(&chan.2).unwrap();
+               let local_chan = chan_lock.channel_by_id.get(&chan.2).map(
+                       |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
+               ).flatten().unwrap();
                let chan_signer = local_chan.get_signer();
                let pubkeys = chan_signer.as_ref().pubkeys();
                (pubkeys.revocation_basepoint, pubkeys.htlc_basepoint,
@@ -710,7 +713,9 @@ fn test_update_fee_that_funder_cannot_afford() {
        let (remote_delayed_payment_basepoint, remote_htlc_basepoint,remote_point, remote_funding) = {
                let per_peer_state = nodes[1].node.per_peer_state.read().unwrap();
                let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap();
-               let remote_chan = chan_lock.channel_by_id.get(&chan.2).unwrap();
+               let remote_chan = chan_lock.channel_by_id.get(&chan.2).map(
+                       |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
+               ).flatten().unwrap();
                let chan_signer = remote_chan.get_signer();
                let pubkeys = chan_signer.as_ref().pubkeys();
                (pubkeys.delayed_payment_basepoint, pubkeys.htlc_basepoint,
@@ -725,7 +730,9 @@ fn test_update_fee_that_funder_cannot_afford() {
        let res = {
                let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
                let local_chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
-               let local_chan = local_chan_lock.channel_by_id.get(&chan.2).unwrap();
+               let local_chan = local_chan_lock.channel_by_id.get(&chan.2).map(
+                       |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
+               ).flatten().unwrap();
                let local_chan_signer = local_chan.get_signer();
                let mut htlcs: Vec<(HTLCOutputInCommitment, ())> = vec![];
                let commitment_tx = CommitmentTransaction::new_with_auxiliary_htlc_data(
@@ -1029,7 +1036,8 @@ fn fake_network_test() {
                short_channel_id: chan_2.0.contents.short_channel_id,
                channel_features: ChannelFeatures::empty(),
                fee_msat: 0,
-               cltv_expiry_delta: chan_3.0.contents.cltv_expiry_delta as u32
+               cltv_expiry_delta: chan_3.0.contents.cltv_expiry_delta as u32,
+               maybe_announced_channel: true,
        });
        hops.push(RouteHop {
                pubkey: nodes[3].node.get_our_node_id(),
@@ -1037,7 +1045,8 @@ fn fake_network_test() {
                short_channel_id: chan_3.0.contents.short_channel_id,
                channel_features: ChannelFeatures::empty(),
                fee_msat: 0,
-               cltv_expiry_delta: chan_4.1.contents.cltv_expiry_delta as u32
+               cltv_expiry_delta: chan_4.1.contents.cltv_expiry_delta as u32,
+               maybe_announced_channel: true,
        });
        hops.push(RouteHop {
                pubkey: nodes[1].node.get_our_node_id(),
@@ -1046,6 +1055,7 @@ fn fake_network_test() {
                channel_features: nodes[1].node.channel_features(),
                fee_msat: 1000000,
                cltv_expiry_delta: TEST_FINAL_CLTV,
+               maybe_announced_channel: true,
        });
        hops[1].fee_msat = chan_4.1.contents.fee_base_msat as u64 + chan_4.1.contents.fee_proportional_millionths as u64 * hops[2].fee_msat as u64 / 1000000;
        hops[0].fee_msat = chan_3.0.contents.fee_base_msat as u64 + chan_3.0.contents.fee_proportional_millionths as u64 * hops[1].fee_msat as u64 / 1000000;
@@ -1060,7 +1070,8 @@ fn fake_network_test() {
                short_channel_id: chan_4.0.contents.short_channel_id,
                channel_features: ChannelFeatures::empty(),
                fee_msat: 0,
-               cltv_expiry_delta: chan_3.1.contents.cltv_expiry_delta as u32
+               cltv_expiry_delta: chan_3.1.contents.cltv_expiry_delta as u32,
+               maybe_announced_channel: true,
        });
        hops.push(RouteHop {
                pubkey: nodes[2].node.get_our_node_id(),
@@ -1068,7 +1079,8 @@ fn fake_network_test() {
                short_channel_id: chan_3.0.contents.short_channel_id,
                channel_features: ChannelFeatures::empty(),
                fee_msat: 0,
-               cltv_expiry_delta: chan_2.1.contents.cltv_expiry_delta as u32
+               cltv_expiry_delta: chan_2.1.contents.cltv_expiry_delta as u32,
+               maybe_announced_channel: true,
        });
        hops.push(RouteHop {
                pubkey: nodes[1].node.get_our_node_id(),
@@ -1077,6 +1089,7 @@ fn fake_network_test() {
                channel_features: nodes[1].node.channel_features(),
                fee_msat: 1000000,
                cltv_expiry_delta: TEST_FINAL_CLTV,
+               maybe_announced_channel: true,
        });
        hops[1].fee_msat = chan_2.1.contents.fee_base_msat as u64 + chan_2.1.contents.fee_proportional_millionths as u64 * hops[2].fee_msat as u64 / 1000000;
        hops[0].fee_msat = chan_3.1.contents.fee_base_msat as u64 + chan_3.1.contents.fee_proportional_millionths as u64 * hops[1].fee_msat as u64 / 1000000;
@@ -1237,7 +1250,7 @@ fn duplicate_htlc_test() {
        create_announced_chan_between_nodes(&nodes, 3, 4);
        create_announced_chan_between_nodes(&nodes, 3, 5);
 
-       let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &vec!(&nodes[3], &nodes[4])[..], 1000000);
+       let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &vec!(&nodes[3], &nodes[4])[..], 1000000);
 
        *nodes[0].network_payment_count.borrow_mut() -= 1;
        assert_eq!(route_payment(&nodes[1], &vec!(&nodes[3])[..], 1000000).0, payment_preimage);
@@ -1265,7 +1278,7 @@ fn test_duplicate_htlc_different_direction_onchain() {
        // balancing
        send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
 
-       let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 900_000);
+       let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 900_000);
 
        let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[0], 800_000);
        let node_a_payment_secret = nodes[0].node.create_inbound_payment_for_hash(payment_hash, None, 7200, None).unwrap();
@@ -1325,9 +1338,9 @@ fn test_duplicate_htlc_different_direction_onchain() {
        for e in events {
                match e {
                        MessageSendEvent::BroadcastChannelUpdate { .. } => {},
-                       MessageSendEvent::HandleError { node_id, action: msgs::ErrorAction::SendErrorMessage { ref msg } } => {
+                       MessageSendEvent::HandleError { node_id, action: msgs::ErrorAction::DisconnectPeer { ref msg } } => {
                                assert_eq!(node_id, nodes[1].node.get_our_node_id());
-                               assert_eq!(msg.data, "Channel closed because commitment or closing transaction was confirmed on chain.");
+                               assert_eq!(msg.as_ref().unwrap().data, "Channel closed because commitment or closing transaction was confirmed on chain.");
                        },
                        MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. } } => {
                                assert!(update_add_htlcs.is_empty());
@@ -1418,7 +1431,9 @@ fn test_fee_spike_violation_fails_htlc() {
        let (local_revocation_basepoint, local_htlc_basepoint, local_secret, next_local_point, local_funding) = {
                let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
                let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
-               let local_chan = chan_lock.channel_by_id.get(&chan.2).unwrap();
+               let local_chan = chan_lock.channel_by_id.get(&chan.2).map(
+                       |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
+               ).flatten().unwrap();
                let chan_signer = local_chan.get_signer();
                // Make the signer believe we validated another commitment, so we can release the secret
                chan_signer.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1;
@@ -1432,7 +1447,9 @@ fn test_fee_spike_violation_fails_htlc() {
        let (remote_delayed_payment_basepoint, remote_htlc_basepoint, remote_point, remote_funding) = {
                let per_peer_state = nodes[1].node.per_peer_state.read().unwrap();
                let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap();
-               let remote_chan = chan_lock.channel_by_id.get(&chan.2).unwrap();
+               let remote_chan = chan_lock.channel_by_id.get(&chan.2).map(
+                       |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
+               ).flatten().unwrap();
                let chan_signer = remote_chan.get_signer();
                let pubkeys = chan_signer.as_ref().pubkeys();
                (pubkeys.delayed_payment_basepoint, pubkeys.htlc_basepoint,
@@ -1461,7 +1478,9 @@ fn test_fee_spike_violation_fails_htlc() {
        let res = {
                let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
                let local_chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
-               let local_chan = local_chan_lock.channel_by_id.get(&chan.2).unwrap();
+               let local_chan = local_chan_lock.channel_by_id.get(&chan.2).map(
+                       |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
+               ).flatten().unwrap();
                let local_chan_signer = local_chan.get_signer();
                let commitment_tx = CommitmentTransaction::new_with_auxiliary_htlc_data(
                        commitment_number,
@@ -1539,7 +1558,7 @@ fn test_chan_reserve_violation_outbound_htlc_inbound_chan() {
        let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 1_000_000);
        // Sending exactly enough to hit the reserve amount should be accepted
        for _ in 0..MIN_AFFORDABLE_HTLC_COUNT {
-               let (_, _, _) = route_payment(&nodes[1], &[&nodes[0]], 1_000_000);
+               route_payment(&nodes[1], &[&nodes[0]], 1_000_000);
        }
 
        // However one more HTLC should be significantly over the reserve amount and fail.
@@ -1569,7 +1588,7 @@ fn test_chan_reserve_violation_inbound_htlc_outbound_channel() {
 
        // Send four HTLCs to cover the initial push_msat buffer we're required to include
        for _ in 0..MIN_AFFORDABLE_HTLC_COUNT {
-               let (_, _, _) = route_payment(&nodes[1], &[&nodes[0]], 1_000_000);
+               route_payment(&nodes[1], &[&nodes[0]], 1_000_000);
        }
 
        let (mut route, payment_hash, _, payment_secret) =
@@ -1630,11 +1649,11 @@ fn test_chan_reserve_dust_inbound_htlcs_outbound_chan() {
        // In the previous code, routing this dust payment would cause nodes[0] to perceive a channel
        // reserve violation even though it's a dust HTLC and therefore shouldn't count towards the
        // commitment transaction fee.
-       let (_, _, _) = route_payment(&nodes[1], &[&nodes[0]], dust_amt);
+       route_payment(&nodes[1], &[&nodes[0]], dust_amt);
 
        // Send four HTLCs to cover the initial push_msat buffer we're required to include
        for _ in 0..MIN_AFFORDABLE_HTLC_COUNT {
-               let (_, _, _) = route_payment(&nodes[1], &[&nodes[0]], 1_000_000);
+               route_payment(&nodes[1], &[&nodes[0]], 1_000_000);
        }
 
        // One more than the dust amt should fail, however.
@@ -1662,13 +1681,13 @@ fn test_chan_init_feerate_unaffordability() {
        // HTLC.
        let mut push_amt = 100_000_000;
        push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, &channel_type_features);
-       assert_eq!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, push_amt + 1, 42, None).unwrap_err(),
+       assert_eq!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, push_amt + 1, 42, None, None).unwrap_err(),
                APIError::APIMisuseError { err: "Funding amount (356) can't even pay fee for initial commitment transaction fee of 357.".to_string() });
 
        // During open, we don't have a "counterparty channel reserve" to check against, so that
        // requirement only comes into play on the open_channel handling side.
        push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
-       nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, push_amt, 42, None).unwrap();
+       nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, push_amt, 42, None, None).unwrap();
        let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
        open_channel_msg.push_msat += 1;
        nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
@@ -1695,22 +1714,22 @@ fn test_chan_reserve_dust_inbound_htlcs_inbound_chan() {
 
        let payment_amt = 46000; // Dust amount
        // In the previous code, these first four payments would succeed.
-       let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
-       let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
-       let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
-       let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
+       route_payment(&nodes[0], &[&nodes[1]], payment_amt);
+       route_payment(&nodes[0], &[&nodes[1]], payment_amt);
+       route_payment(&nodes[0], &[&nodes[1]], payment_amt);
+       route_payment(&nodes[0], &[&nodes[1]], payment_amt);
 
        // Then these next 5 would be interpreted by nodes[1] as violating the fee spike buffer.
-       let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
-       let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
-       let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
-       let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
-       let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
+       route_payment(&nodes[0], &[&nodes[1]], payment_amt);
+       route_payment(&nodes[0], &[&nodes[1]], payment_amt);
+       route_payment(&nodes[0], &[&nodes[1]], payment_amt);
+       route_payment(&nodes[0], &[&nodes[1]], payment_amt);
+       route_payment(&nodes[0], &[&nodes[1]], payment_amt);
 
        // And this last payment previously resulted in nodes[1] closing on its inbound-channel
        // counterparty, because it counted all the previous dust HTLCs against nodes[0]'s commitment
        // transaction fee and therefore perceived this next payment as a channel reserve violation.
-       let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
+       route_payment(&nodes[0], &[&nodes[1]], payment_amt);
 }
 
 #[test]
@@ -1846,7 +1865,7 @@ fn test_channel_reserve_holding_cell_htlcs() {
        // attempt to send amt_msat > their_max_htlc_value_in_flight_msat
        {
                let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV)
-                       .with_bolt11_features(nodes[2].node.invoice_features()).unwrap().with_max_channel_saturation_power_of_half(0);
+                       .with_bolt11_features(nodes[2].node.bolt11_invoice_features()).unwrap().with_max_channel_saturation_power_of_half(0);
                let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], payment_params, recv_value_0);
                route.paths[0].hops.last_mut().unwrap().fee_msat += 1;
                assert!(route.paths[0].hops.iter().rev().skip(1).all(|h| h.fee_msat == feemsat));
@@ -1871,7 +1890,7 @@ fn test_channel_reserve_holding_cell_htlcs() {
                }
 
                let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV)
-                       .with_bolt11_features(nodes[2].node.invoice_features()).unwrap().with_max_channel_saturation_power_of_half(0);
+                       .with_bolt11_features(nodes[2].node.bolt11_invoice_features()).unwrap().with_max_channel_saturation_power_of_half(0);
                let route = get_route!(nodes[0], payment_params, recv_value_0).unwrap();
                let (payment_preimage, ..) = send_along_route(&nodes[0], route, &[&nodes[1], &nodes[2]], recv_value_0);
                claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage);
@@ -2096,8 +2115,8 @@ fn channel_reserve_in_flight_removes() {
        let b_chan_values = get_channel_value_stat!(nodes[1], nodes[0], chan_1.2);
        // Route the first two HTLCs.
        let payment_value_1 = b_chan_values.channel_reserve_msat - b_chan_values.value_to_self_msat - 10000;
-       let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], payment_value_1);
-       let (payment_preimage_2, payment_hash_2, _) = route_payment(&nodes[0], &[&nodes[1]], 20_000);
+       let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], payment_value_1);
+       let (payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[1]], 20_000);
 
        // Start routing the third HTLC (this is just used to get everyone in the right state).
        let (route, payment_hash_3, payment_preimage_3, payment_secret_3) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000);
@@ -2264,7 +2283,7 @@ fn channel_monitor_network_test() {
        check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
 
        // One pending HTLC is discarded by the force-close:
-       let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[1], &[&nodes[2], &nodes[3]], 3_000_000);
+       let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[1], &[&nodes[2], &nodes[3]], 3_000_000);
 
        // Simple case of one pending HTLC to HTLC-Timeout (note that the HTLC-Timeout is not
        // broadcasted until we reach the timelock time).
@@ -2335,7 +2354,7 @@ fn channel_monitor_network_test() {
        let chan_3_mon = nodes[3].chain_monitor.chain_monitor.remove_monitor(&OutPoint { txid: chan_3.3.txid(), index: 0 });
 
        // One pending HTLC to time out:
-       let (payment_preimage_2, payment_hash_2, _) = route_payment(&nodes[3], &[&nodes[4]], 3_000_000);
+       let (payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[3], &[&nodes[4]], 3_000_000);
        // CLTV expires at TEST_FINAL_CLTV + 1 (current height) + 1 (added in send_payment for
        // buffer space).
 
@@ -2350,7 +2369,7 @@ fn channel_monitor_network_test() {
                        _ => panic!("Unexpected event"),
                };
                match events[1] {
-                       MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { .. }, node_id } => {
+                       MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { .. }, node_id } => {
                                assert_eq!(node_id, nodes[4].node.get_our_node_id());
                        },
                        _ => panic!("Unexpected event"),
@@ -2382,13 +2401,14 @@ fn channel_monitor_network_test() {
                        _ => panic!("Unexpected event"),
                };
                match events[1] {
-                       MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { .. }, node_id } => {
+                       MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { .. }, node_id } => {
                                assert_eq!(node_id, nodes[3].node.get_our_node_id());
                        },
                        _ => panic!("Unexpected event"),
                }
                check_added_monitors!(nodes[4], 1);
                test_txn_broadcast(&nodes[4], &chan_4, None, HTLCType::SUCCESS);
+               check_closed_event!(nodes[4], 1, ClosureReason::HolderForceClosed, [nodes[3].node.get_our_node_id()], 100000);
 
                mine_transaction(&nodes[4], &node_txn[0]);
                check_preimage_claim(&nodes[4], &node_txn);
@@ -2400,9 +2420,8 @@ fn channel_monitor_network_test() {
        assert_eq!(nodes[4].node.list_channels().len(), 0);
 
        assert_eq!(nodes[3].chain_monitor.chain_monitor.watch_channel(OutPoint { txid: chan_3.3.txid(), index: 0 }, chan_3_mon),
-               ChannelMonitorUpdateStatus::Completed);
-       check_closed_event!(nodes[3], 1, ClosureReason::CommitmentTxConfirmed, [nodes[4].node.get_our_node_id()], 100000);
-       check_closed_event!(nodes[4], 1, ClosureReason::CommitmentTxConfirmed, [nodes[3].node.get_our_node_id()], 100000);
+               Ok(ChannelMonitorUpdateStatus::Completed));
+       check_closed_event!(nodes[3], 1, ClosureReason::HolderForceClosed, [nodes[4].node.get_our_node_id()], 100000);
 }
 
 #[test]
@@ -2640,7 +2659,7 @@ fn claim_htlc_outputs_shared_tx() {
        send_payment(&nodes[0], &[&nodes[1]], 8_000_000);
        // node[0] is gonna to revoke an old state thus node[1] should be able to claim both offered/received HTLC outputs on top of commitment tx
        let payment_preimage_1 = route_payment(&nodes[0], &[&nodes[1]], 3_000_000).0;
-       let (_payment_preimage_2, payment_hash_2, _) = route_payment(&nodes[1], &[&nodes[0]], 3_000_000);
+       let (_payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[1], &[&nodes[0]], 3_000_000);
 
        // Get the will-be-revoked local txn from node[0]
        let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
@@ -2707,7 +2726,7 @@ fn claim_htlc_outputs_single_tx() {
        // node[0] is gonna to revoke an old state thus node[1] should be able to claim both offered/received HTLC outputs on top of commitment tx, but this
        // time as two different claim transactions as we're gonna to timeout htlc with given a high current height
        let payment_preimage_1 = route_payment(&nodes[0], &[&nodes[1]], 3_000_000).0;
-       let (_payment_preimage_2, payment_hash_2, _payment_secret_2) = route_payment(&nodes[1], &[&nodes[0]], 3_000_000);
+       let (_payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[1], &[&nodes[0]], 3_000_000);
 
        // Get the will-be-revoked local txn from node[0]
        let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
@@ -2810,8 +2829,8 @@ fn test_htlc_on_chain_success() {
        send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
        send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
 
-       let (our_payment_preimage, payment_hash_1, _payment_secret) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000);
-       let (our_payment_preimage_2, payment_hash_2, _payment_secret_2) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000);
+       let (our_payment_preimage, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000);
+       let (our_payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000);
 
        // Broadcast legit commitment tx from C on B's chain
        // Broadcast HTLC Success transaction by C on received output from C's commitment tx on B's chain
@@ -2894,7 +2913,7 @@ fn test_htlc_on_chain_success() {
        let nodes_0_event = remove_first_msg_event_to_node(&nodes[0].node.get_our_node_id(), &mut events);
 
        match nodes_2_event {
-               MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { .. }, node_id: _ } => {},
+               MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { .. }, node_id: _ } => {},
                _ => panic!("Unexpected event"),
        }
 
@@ -3032,7 +3051,7 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) {
        send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
        send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
 
-       let (_payment_preimage, payment_hash, _payment_secret) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), 3000000);
+       let (_payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), 3000000);
 
        // Broadcast legit commitment tx from C on B's chain
        let commitment_tx = get_local_commitment_txn!(nodes[2], chan_2.2);
@@ -3137,13 +3156,13 @@ fn test_simple_commitment_revoked_fail_backward() {
        create_announced_chan_between_nodes(&nodes, 0, 1);
        let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
 
-       let (payment_preimage, _payment_hash, _payment_secret) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000);
+       let (payment_preimage, _payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000);
        // Get the will-be-revoked local txn from nodes[2]
        let revoked_local_txn = get_local_commitment_txn!(nodes[2], chan_2.2);
        // Revoke the old state
        claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage);
 
-       let (_, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000);
+       let (_, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000);
 
        mine_transaction(&nodes[1], &revoked_local_txn[0]);
        check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000);
@@ -3196,7 +3215,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use
        create_announced_chan_between_nodes(&nodes, 0, 1);
        let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
 
-       let (payment_preimage, _payment_hash, _payment_secret) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], if no_to_remote { 10_000 } else { 3_000_000 });
+       let (payment_preimage, _payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], if no_to_remote { 10_000 } else { 3_000_000 });
        // Get the will-be-revoked local txn from nodes[2]
        let revoked_local_txn = get_local_commitment_txn!(nodes[2], chan_2.2);
        assert_eq!(revoked_local_txn[0].output.len(), if no_to_remote { 1 } else { 2 });
@@ -3207,12 +3226,12 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use
                // The dust limit applied to HTLC outputs considers the fee of the HTLC transaction as
                // well, so HTLCs at exactly the dust limit will not be included in commitment txn.
                nodes[2].node.per_peer_state.read().unwrap().get(&nodes[1].node.get_our_node_id())
-                       .unwrap().lock().unwrap().channel_by_id.get(&chan_2.2).unwrap().context.holder_dust_limit_satoshis * 1000
+                       .unwrap().lock().unwrap().channel_by_id.get(&chan_2.2).unwrap().context().holder_dust_limit_satoshis * 1000
        } else { 3000000 };
 
-       let (_, first_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
-       let (_, second_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
-       let (_, third_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
+       let (_, first_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
+       let (_, second_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
+       let (_, third_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
 
        nodes[2].node.fail_htlc_backwards(&first_payment_hash);
        expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: first_payment_hash }]);
@@ -3339,7 +3358,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use
 
        let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events);
        match nodes_2_event {
-               MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { msg: msgs::ErrorMessage { channel_id, ref data } }, node_id: _ } => {
+               MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { msg: Some(msgs::ErrorMessage { channel_id, ref data }) }, node_id: _ } => {
                        assert_eq!(channel_id, chan_2.2);
                        assert_eq!(data.as_str(), "Channel closed because commitment or closing transaction was confirmed on chain.");
                },
@@ -3647,7 +3666,7 @@ fn test_dup_events_on_peer_disconnect() {
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        create_announced_chan_between_nodes(&nodes, 0, 1);
 
-       let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
+       let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
 
        nodes[1].node.claim_funds(payment_preimage);
        expect_payment_claimed!(nodes[1], payment_hash, 1_000_000);
@@ -3676,7 +3695,7 @@ fn test_peer_disconnected_before_funding_broadcasted() {
 
        // Open a channel between `nodes[0]` and `nodes[1]`, for which the funding transaction is never
        // broadcasted, even though it's created by `nodes[0]`.
-       let expected_temporary_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 500_000_000, 42, None).unwrap();
+       let expected_temporary_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 500_000_000, 42, None, None).unwrap();
        let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
        nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel);
        let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
@@ -3702,7 +3721,7 @@ fn test_peer_disconnected_before_funding_broadcasted() {
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
-       check_closed_event!(&nodes[0], 1, ClosureReason::DisconnectedPeer, false
+       check_closed_event!(&nodes[0], 2, ClosureReason::DisconnectedPeer, true
                , [nodes[1].node.get_our_node_id()], 1000000);
        check_closed_event!(&nodes[1], 1, ClosureReason::DisconnectedPeer, false
                , [nodes[0].node.get_our_node_id()], 1000000);
@@ -3733,7 +3752,7 @@ fn test_simple_peer_disconnect() {
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
        reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
 
-       let (payment_preimage_3, payment_hash_3, _) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000);
+       let (payment_preimage_3, payment_hash_3, ..) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000);
        let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
        let payment_hash_5 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
        let payment_hash_6 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
@@ -3867,13 +3886,13 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken
        } else if messages_delivered == 3 {
                // nodes[0] still wants its RAA + commitment_signed
                let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
-               reconnect_args.pending_htlc_adds.0 = -1;
+               reconnect_args.pending_responding_commitment_signed.0 = true;
                reconnect_args.pending_raa.0 = true;
                reconnect_nodes(reconnect_args);
        } else if messages_delivered == 4 {
                // nodes[0] still wants its commitment_signed
                let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
-               reconnect_args.pending_htlc_adds.0 = -1;
+               reconnect_args.pending_responding_commitment_signed.0 = true;
                reconnect_nodes(reconnect_args);
        } else if messages_delivered == 5 {
                // nodes[1] still wants its final RAA
@@ -4001,13 +4020,13 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken
        } else if messages_delivered == 2 {
                // nodes[0] still wants its RAA + commitment_signed
                let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
-               reconnect_args.pending_htlc_adds.1 = -1;
+               reconnect_args.pending_responding_commitment_signed.1 = true;
                reconnect_args.pending_raa.1 = true;
                reconnect_nodes(reconnect_args);
        } else if messages_delivered == 3 {
                // nodes[0] still wants its commitment_signed
                let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
-               reconnect_args.pending_htlc_adds.1 = -1;
+               reconnect_args.pending_responding_commitment_signed.1 = true;
                reconnect_nodes(reconnect_args);
        } else if messages_delivered == 4 {
                // nodes[1] still wants its final RAA
@@ -4080,6 +4099,73 @@ fn test_channel_ready_without_best_block_updated() {
        nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_channel_ready);
 }
 
+#[test]
+fn test_channel_monitor_skipping_block_when_channel_manager_is_leading() {
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+       let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+       // Let channel_manager get ahead of chain_monitor by 1 block.
+       // This is to emulate race-condition where newly added channel_monitor skips processing 1 block,
+       // in case where client calls block_connect on channel_manager first and then on chain_monitor.
+       let height_1 = nodes[0].best_block_info().1 + 1;
+       let mut block_1 = create_dummy_block(nodes[0].best_block_hash(), height_1, Vec::new());
+
+       nodes[0].blocks.lock().unwrap().push((block_1.clone(), height_1));
+       nodes[0].node.block_connected(&block_1, height_1);
+
+       // Create channel, and it gets added to chain_monitor in funding_created.
+       let funding_tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 0);
+
+       // Now, newly added channel_monitor in chain_monitor hasn't processed block_1,
+       // but it's best_block is block_1, since that was populated by channel_manager, and channel_manager
+       // was running ahead of chain_monitor at the time of funding_created.
+       // Later on, subsequent blocks are connected to both channel_manager and chain_monitor.
+       // Hence, this channel's channel_monitor skipped block_1, directly tries to process subsequent blocks.
+       confirm_transaction_at(&nodes[0], &funding_tx, nodes[0].best_block_info().1 + 1);
+       connect_blocks(&nodes[0], CHAN_CONFIRM_DEPTH);
+
+       // Ensure nodes[0] generates a channel_ready after the transactions_confirmed
+       let as_channel_ready = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id());
+       nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_channel_ready);
+}
+
+#[test]
+fn test_channel_monitor_skipping_block_when_channel_manager_is_lagging() {
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+       let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+       // Let chain_monitor get ahead of channel_manager by 1 block.
+       // This is to emulate race-condition where newly added channel_monitor skips processing 1 block,
+       // in case where client calls block_connect on chain_monitor first and then on channel_manager.
+       let height_1 = nodes[0].best_block_info().1 + 1;
+       let mut block_1 = create_dummy_block(nodes[0].best_block_hash(), height_1, Vec::new());
+
+       nodes[0].blocks.lock().unwrap().push((block_1.clone(), height_1));
+       nodes[0].chain_monitor.chain_monitor.block_connected(&block_1, height_1);
+
+       // Create channel, and it gets added to chain_monitor in funding_created.
+       let funding_tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 0);
+
+       // channel_manager can't really skip block_1, it should get it eventually.
+       nodes[0].node.block_connected(&block_1, height_1);
+
+       // Now, newly added channel_monitor in chain_monitor hasn't processed block_1, it's best_block is
+       // the block before block_1, since that was populated by channel_manager, and channel_manager was
+       // running behind at the time of funding_created.
+       // Later on, subsequent blocks are connected to both channel_manager and chain_monitor.
+       // Hence, this channel's channel_monitor skipped block_1, directly tries to process subsequent blocks.
+       confirm_transaction_at(&nodes[0], &funding_tx, nodes[0].best_block_info().1 + 1);
+       connect_blocks(&nodes[0], CHAN_CONFIRM_DEPTH);
+
+       // Ensure nodes[0] generates a channel_ready after the transactions_confirmed
+       let as_channel_ready = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id());
+       nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_channel_ready);
+}
+
 #[test]
 fn test_drop_messages_peer_disconnect_dual_htlc() {
        // Test that we can handle reconnecting when both sides of a channel have pending
@@ -4090,7 +4176,7 @@ fn test_drop_messages_peer_disconnect_dual_htlc() {
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        create_announced_chan_between_nodes(&nodes, 0, 1);
 
-       let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
+       let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
 
        // Now try to send a second payment which will fail to send
        let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
@@ -4494,7 +4580,7 @@ fn test_static_spendable_outputs_preimage_tx() {
        // Create some initial channels
        let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
 
-       let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 3_000_000);
+       let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 3_000_000);
 
        let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2);
        assert_eq!(commitment_tx[0].input.len(), 1);
@@ -4544,7 +4630,7 @@ fn test_static_spendable_outputs_timeout_tx() {
        // Rebalance the network a bit by relaying one payment through all the channels ...
        send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
 
-       let (_, our_payment_hash, _) = route_payment(&nodes[1], &vec!(&nodes[0])[..], 3_000_000);
+       let (_, our_payment_hash, ..) = route_payment(&nodes[1], &vec!(&nodes[0])[..], 3_000_000);
 
        let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2);
        assert_eq!(commitment_tx[0].input.len(), 1);
@@ -4784,7 +4870,7 @@ fn test_onchain_to_onchain_claim() {
        send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
        send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
 
-       let (payment_preimage, payment_hash, _payment_secret) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000);
+       let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000);
        let commitment_tx = get_local_commitment_txn!(nodes[2], chan_2.2);
        check_spends!(commitment_tx[0], chan_2.3);
        nodes[2].node.claim_funds(payment_preimage);
@@ -4834,7 +4920,7 @@ fn test_onchain_to_onchain_claim() {
        let nodes_0_event = remove_first_msg_event_to_node(&nodes[0].node.get_our_node_id(), &mut msg_events);
 
        match nodes_2_event {
-               MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { .. }, node_id: _ } => {},
+               MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { .. }, node_id: _ } => {},
                _ => panic!("Unexpected event"),
        }
 
@@ -4897,14 +4983,14 @@ fn test_duplicate_payment_hash_one_failure_one_success() {
        connect_blocks(&nodes[2], node_max_height - nodes[2].best_block_info().1);
        connect_blocks(&nodes[3], node_max_height - nodes[3].best_block_info().1);
 
-       let (our_payment_preimage, duplicate_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 900_000);
+       let (our_payment_preimage, duplicate_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 900_000);
 
        let payment_secret = nodes[3].node.create_inbound_payment_for_hash(duplicate_payment_hash, None, 7200, None).unwrap();
        // We reduce the final CLTV here by a somewhat arbitrary constant to keep it under the one-byte
        // script push size limit so that the below script length checks match
        // ACCEPTED_HTLC_SCRIPT_WEIGHT.
        let payment_params = PaymentParameters::from_node_id(nodes[3].node.get_our_node_id(), TEST_FINAL_CLTV - 40)
-               .with_bolt11_features(nodes[3].node.invoice_features()).unwrap();
+               .with_bolt11_features(nodes[3].node.bolt11_invoice_features()).unwrap();
        let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[3], payment_params, 800_000);
        send_along_route_with_secret(&nodes[0], route, &[&[&nodes[1], &nodes[2], &nodes[3]]], 800_000, duplicate_payment_hash, payment_secret);
 
@@ -5027,7 +5113,7 @@ fn test_dynamic_spendable_outputs_local_htlc_success_tx() {
        // Create some initial channels
        let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
 
-       let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 9_000_000);
+       let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 9_000_000);
        let local_txn = get_local_commitment_txn!(nodes[1], chan_1.2);
        assert_eq!(local_txn.len(), 1);
        assert_eq!(local_txn[0].input.len(), 1);
@@ -5105,20 +5191,20 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno
        assert_eq!(get_local_commitment_txn!(nodes[3], chan_2_3.2)[0].output.len(), 2);
 
        let ds_dust_limit = nodes[3].node.per_peer_state.read().unwrap().get(&nodes[2].node.get_our_node_id())
-               .unwrap().lock().unwrap().channel_by_id.get(&chan_2_3.2).unwrap().context.holder_dust_limit_satoshis;
+               .unwrap().lock().unwrap().channel_by_id.get(&chan_2_3.2).unwrap().context().holder_dust_limit_satoshis;
        // 0th HTLC:
-       let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
+       let (_, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
        // 1st HTLC:
-       let (_, payment_hash_2, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
+       let (_, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
        let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], ds_dust_limit*1000);
        // 2nd HTLC:
        send_along_route_with_secret(&nodes[1], route.clone(), &[&[&nodes[2], &nodes[3], &nodes[5]]], ds_dust_limit*1000, payment_hash_1, nodes[5].node.create_inbound_payment_for_hash(payment_hash_1, None, 7200, None).unwrap()); // not added < dust limit + HTLC tx fee
        // 3rd HTLC:
        send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], ds_dust_limit*1000, payment_hash_2, nodes[5].node.create_inbound_payment_for_hash(payment_hash_2, None, 7200, None).unwrap()); // not added < dust limit + HTLC tx fee
        // 4th HTLC:
-       let (_, payment_hash_3, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000);
+       let (_, payment_hash_3, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000);
        // 5th HTLC:
-       let (_, payment_hash_4, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000);
+       let (_, payment_hash_4, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000);
        let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], 1000000);
        // 6th HTLC:
        send_along_route_with_secret(&nodes[1], route.clone(), &[&[&nodes[2], &nodes[3], &nodes[5]]], 1000000, payment_hash_3, nodes[5].node.create_inbound_payment_for_hash(payment_hash_3, None, 7200, None).unwrap());
@@ -5126,13 +5212,13 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno
        send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], 1000000, payment_hash_4, nodes[5].node.create_inbound_payment_for_hash(payment_hash_4, None, 7200, None).unwrap());
 
        // 8th HTLC:
-       let (_, payment_hash_5, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000);
+       let (_, payment_hash_5, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000);
        // 9th HTLC:
        let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], ds_dust_limit*1000);
        send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], ds_dust_limit*1000, payment_hash_5, nodes[5].node.create_inbound_payment_for_hash(payment_hash_5, None, 7200, None).unwrap()); // not added < dust limit + HTLC tx fee
 
        // 10th HTLC:
-       let (_, payment_hash_6, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
+       let (_, payment_hash_6, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
        // 11th HTLC:
        let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], 1000000);
        send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], 1000000, payment_hash_6, nodes[5].node.create_inbound_payment_for_hash(payment_hash_6, None, 7200, None).unwrap());
@@ -5386,7 +5472,7 @@ fn test_dynamic_spendable_outputs_local_htlc_timeout_tx() {
        // Create some initial channels
        let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
 
-       let (_, our_payment_hash, _) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9000000);
+       let (_, our_payment_hash, ..) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9000000);
        let local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
        assert_eq!(local_txn[0].input.len(), 1);
        check_spends!(local_txn[0], chan_1.3);
@@ -5461,7 +5547,7 @@ fn test_key_derivation_params() {
        connect_blocks(&nodes[1], node_max_height - nodes[1].best_block_info().1);
        connect_blocks(&nodes[2], node_max_height - nodes[2].best_block_info().1);
 
-       let (_, our_payment_hash, _) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9000000);
+       let (_, our_payment_hash, ..) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9000000);
        let local_txn_0 = get_local_commitment_txn!(nodes[0], chan_0.2);
        let local_txn_1 = get_local_commitment_txn!(nodes[0], chan_1.2);
        assert_eq!(local_txn_1[0].input.len(), 1);
@@ -5547,7 +5633,7 @@ fn do_htlc_claim_local_commitment_only(use_dust: bool) {
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
 
-       let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], if use_dust { 50000 } else { 3_000_000 });
+       let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], if use_dust { 50000 } else { 3_000_000 });
 
        // Claim the payment, but don't deliver A's commitment_signed, resulting in the HTLC only being
        // present in B's local commitment transaction, but none of A's commitment transactions.
@@ -5574,7 +5660,7 @@ fn do_htlc_claim_local_commitment_only(use_dust: bool) {
        test_txn_broadcast(&nodes[1], &chan, None, if use_dust { HTLCType::NONE } else { HTLCType::SUCCESS });
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
-       check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
 }
 
 fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) {
@@ -5605,7 +5691,7 @@ fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) {
        test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
-       check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
 }
 
 fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no_close: bool) {
@@ -5620,7 +5706,7 @@ fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no
        // Also optionally test that we *don't* fail the channel in case the commitment transaction was
        // actually revoked.
        let htlc_value = if use_dust { 50000 } else { 3000000 };
-       let (_, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], htlc_value);
+       let (_, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], htlc_value);
        nodes[1].node.fail_htlc_backwards(&our_payment_hash);
        expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
        check_added_monitors!(nodes[1], 1);
@@ -5651,7 +5737,7 @@ fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no
                test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
                check_closed_broadcast!(nodes[0], true);
                check_added_monitors!(nodes[0], 1);
-               check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
+               check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
        } else {
                expect_payment_failed!(nodes[0], our_payment_hash, true);
        }
@@ -5700,14 +5786,14 @@ fn bolt2_open_channel_sending_node_checks_part1() { //This test needs to be on i
        // BOLT #2 spec: Sending node must ensure temporary_channel_id is unique from any other channel ID with the same peer.
        let channel_value_satoshis=10000;
        let push_msat=10001;
-       nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None).unwrap();
+       nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None, None).unwrap();
        let node0_to_1_send_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
        nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &node0_to_1_send_open_channel);
        get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
 
        // Create a second channel with the same random values. This used to panic due to a colliding
        // channel_id, but now panics due to a colliding outbound SCID alias.
-       assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None).is_err());
+       assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None, None).is_err());
 }
 
 #[test]
@@ -5720,18 +5806,18 @@ fn bolt2_open_channel_sending_node_checks_part2() {
        // BOLT #2 spec: Sending node must set funding_satoshis to less than 2^24 satoshis
        let channel_value_satoshis=2^24;
        let push_msat=10001;
-       assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None).is_err());
+       assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None, None).is_err());
 
        // BOLT #2 spec: Sending node must set push_msat to equal or less than 1000 * funding_satoshis
        let channel_value_satoshis=10000;
        // Test when push_msat is equal to 1000 * funding_satoshis.
        let push_msat=1000*channel_value_satoshis+1;
-       assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None).is_err());
+       assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None, None).is_err());
 
        // BOLT #2 spec: Sending node must set set channel_reserve_satoshis greater than or equal to dust_limit_satoshis
        let channel_value_satoshis=10000;
        let push_msat=10001;
-       assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None).is_ok()); //Create a valid channel
+       assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None, None).is_ok()); //Create a valid channel
        let node0_to_1_send_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
        assert!(node0_to_1_send_open_channel.channel_reserve_satoshis>=node0_to_1_send_open_channel.dust_limit_satoshis);
 
@@ -5744,8 +5830,8 @@ fn bolt2_open_channel_sending_node_checks_part2() {
        assert!(node0_to_1_send_open_channel.to_self_delay==BREAKDOWN_TIMEOUT);
 
        // BOLT #2 spec: Sending node must ensure the chain_hash value identifies the chain it wishes to open the channel within.
-       let chain_hash=genesis_block(Network::Testnet).header.block_hash();
-       assert_eq!(node0_to_1_send_open_channel.chain_hash,chain_hash);
+       let chain_hash = ChainHash::using_genesis_block(Network::Testnet);
+       assert_eq!(node0_to_1_send_open_channel.chain_hash, chain_hash);
 
        // BOLT #2 spec: Sending node must set funding_pubkey, revocation_basepoint, htlc_basepoint, payment_basepoint, and delayed_payment_basepoint to valid DER-encoded, compressed, secp256k1 pubkeys.
        assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.funding_pubkey.serialize()).is_ok());
@@ -5764,7 +5850,7 @@ fn bolt2_open_channel_sane_dust_limit() {
 
        let channel_value_satoshis=1000000;
        let push_msat=10001;
-       nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None).unwrap();
+       nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None, None).unwrap();
        let mut node0_to_1_send_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
        node0_to_1_send_open_channel.dust_limit_satoshis = 547;
        node0_to_1_send_open_channel.channel_reserve_satoshis = 100001;
@@ -6195,7 +6281,7 @@ fn test_update_add_htlc_bolt2_sender_cltv_expiry_too_high() {
        let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 0);
 
        let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), 0)
-               .with_bolt11_features(nodes[1].node.invoice_features()).unwrap();
+               .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap();
        let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], payment_params, 100000000);
        route.paths[0].hops.last_mut().unwrap().cltv_expiry_delta = 500000001;
        unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
@@ -6215,7 +6301,7 @@ fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_num_and_htlc_id_increment()
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 0);
        let max_accepted_htlcs = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id())
-               .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context.counterparty_max_accepted_htlcs as u64;
+               .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context().counterparty_max_accepted_htlcs as u64;
 
        // Fetch a route in advance as we will be unable to once we're unable to send.
        let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000);
@@ -6288,7 +6374,7 @@ fn test_update_add_htlc_bolt2_receiver_check_amount_received_more_than_min() {
                let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
                let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
                let channel = chan_lock.channel_by_id.get(&chan.2).unwrap();
-               htlc_minimum_msat = channel.context.get_holder_htlc_minimum_msat();
+               htlc_minimum_msat = channel.context().get_holder_htlc_minimum_msat();
        }
 
        let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], htlc_minimum_msat);
@@ -6592,7 +6678,7 @@ fn test_update_fulfill_htlc_bolt2_incorrect_htlc_id() {
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        create_announced_chan_between_nodes(&nodes, 0, 1);
 
-       let (our_payment_preimage, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 100_000);
+       let (our_payment_preimage, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 100_000);
 
        nodes[1].node.claim_funds(our_payment_preimage);
        check_added_monitors!(nodes[1], 1);
@@ -6635,7 +6721,7 @@ fn test_update_fulfill_htlc_bolt2_wrong_preimage() {
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        create_announced_chan_between_nodes(&nodes, 0, 1);
 
-       let (our_payment_preimage, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 100_000);
+       let (our_payment_preimage, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 100_000);
 
        nodes[1].node.claim_funds(our_payment_preimage);
        check_added_monitors!(nodes[1], 1);
@@ -6891,11 +6977,11 @@ fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) {
        let chan =create_announced_chan_between_nodes(&nodes, 0, 1);
 
        let bs_dust_limit = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id())
-               .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context.holder_dust_limit_satoshis;
+               .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context().holder_dust_limit_satoshis;
 
        // We route 2 dust-HTLCs between A and B
-       let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
-       let (_, payment_hash_2, _) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
+       let (_, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
+       let (_, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
        route_payment(&nodes[0], &[&nodes[1]], 1000000);
 
        // Cache one local commitment tx as previous
@@ -6984,17 +7070,17 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) {
        let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
 
        let bs_dust_limit = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id())
-               .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context.holder_dust_limit_satoshis;
+               .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context().holder_dust_limit_satoshis;
 
-       let (_payment_preimage_1, dust_hash, _payment_secret_1) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
-       let (_payment_preimage_2, non_dust_hash, _payment_secret_2) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
+       let (_payment_preimage_1, dust_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
+       let (_payment_preimage_2, non_dust_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
 
        let as_commitment_tx = get_local_commitment_txn!(nodes[0], chan.2);
        let bs_commitment_tx = get_local_commitment_txn!(nodes[1], chan.2);
 
        // We revoked bs_commitment_tx
        if revoked {
-               let (payment_preimage_3, _, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
+               let (payment_preimage_3, ..) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
                claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_3);
        }
 
@@ -7070,7 +7156,7 @@ fn test_user_configurable_csv_delay() {
        // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in OutboundV1Channel::new()
        if let Err(error) = OutboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
                &nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[1].node.init_features(), 1000000, 1000000, 0,
-               &low_our_to_self_config, 0, 42)
+               &low_our_to_self_config, 0, 42, None)
        {
                match error {
                        APIError::APIMisuseError { err } => { assert!(regex::Regex::new(r"Configured with an unreasonable our_to_self_delay \(\d+\) putting user funds at risks").unwrap().is_match(err.as_str())); },
@@ -7079,7 +7165,7 @@ fn test_user_configurable_csv_delay() {
        } else { assert!(false) }
 
        // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in InboundV1Channel::new()
-       nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None).unwrap();
+       nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None, None).unwrap();
        let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
        open_channel.to_self_delay = 200;
        if let Err(error) = InboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
@@ -7093,7 +7179,7 @@ fn test_user_configurable_csv_delay() {
        } else { assert!(false); }
 
        // We test msg.to_self_delay <= config.their_to_self_delay is enforced in Chanel::accept_channel()
-       nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1000000, 1000000, 42, None).unwrap();
+       nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1000000, 1000000, 42, None, None).unwrap();
        nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()));
        let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
        accept_channel.to_self_delay = 200;
@@ -7111,7 +7197,7 @@ fn test_user_configurable_csv_delay() {
        check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: reason_msg }, [nodes[1].node.get_our_node_id()], 1000000);
 
        // We test msg.to_self_delay <= config.their_to_self_delay is enforced in InboundV1Channel::new()
-       nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None).unwrap();
+       nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None, None).unwrap();
        let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
        open_channel.to_self_delay = 200;
        if let Err(error) = InboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
@@ -7141,11 +7227,11 @@ fn test_check_htlc_underpaying() {
 
        let scorer = test_utils::TestScorer::new();
        let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
-       let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(),
-               TEST_FINAL_CLTV).with_bolt11_features(nodes[1].node.invoice_features()).unwrap();
+       let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV)
+               .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap();
        let route_params = RouteParameters::from_payment_params_and_value(payment_params, 10_000);
        let route = get_route(&nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph.read_only(),
-               None, nodes[0].logger, &scorer, &(), &random_seed_bytes).unwrap();
+               None, nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes).unwrap();
        let (_, our_payment_hash, _) = get_payment_preimage_hash!(nodes[0]);
        let our_payment_secret = nodes[1].node.create_inbound_payment_for_hash(our_payment_hash, Some(100_000), 7200, None).unwrap();
        nodes[0].node.send_payment_with_route(&route, our_payment_hash,
@@ -7294,7 +7380,7 @@ fn test_bump_penalty_txn_on_revoked_commitment() {
 
        let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
        let payment_params = PaymentParameters::from_node_id(nodes[0].node.get_our_node_id(), 30)
-               .with_bolt11_features(nodes[0].node.invoice_features()).unwrap();
+               .with_bolt11_features(nodes[0].node.bolt11_invoice_features()).unwrap();
        let (route,_, _, _) = get_route_and_payment_hash!(nodes[1], nodes[0], payment_params, 3000000);
        send_along_route(&nodes[1], route, &vec!(&nodes[0])[..], 3000000);
 
@@ -7398,17 +7484,18 @@ fn test_bump_penalty_txn_on_revoked_htlcs() {
 
        let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000);
        // Lock HTLC in both directions (using a slightly lower CLTV delay to provide timely RBF bumps)
-       let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), 50).with_bolt11_features(nodes[1].node.invoice_features()).unwrap();
+       let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), 50).with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap();
        let scorer = test_utils::TestScorer::new();
        let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
        let route_params = RouteParameters::from_payment_params_and_value(payment_params, 3_000_000);
        let route = get_route(&nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph.read_only(), None,
-               nodes[0].logger, &scorer, &(), &random_seed_bytes).unwrap();
+               nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes).unwrap();
        let payment_preimage = send_along_route(&nodes[0], route, &[&nodes[1]], 3_000_000).0;
-       let payment_params = PaymentParameters::from_node_id(nodes[0].node.get_our_node_id(), 50).with_bolt11_features(nodes[0].node.invoice_features()).unwrap();
+       let payment_params = PaymentParameters::from_node_id(nodes[0].node.get_our_node_id(), 50)
+               .with_bolt11_features(nodes[0].node.bolt11_invoice_features()).unwrap();
        let route_params = RouteParameters::from_payment_params_and_value(payment_params, 3_000_000);
        let route = get_route(&nodes[1].node.get_our_node_id(), &route_params, &nodes[1].network_graph.read_only(), None,
-               nodes[0].logger, &scorer, &(), &random_seed_bytes).unwrap();
+               nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes).unwrap();
        send_along_route(&nodes[1], route, &[&nodes[0]], 3_000_000);
 
        let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan.2);
@@ -7554,7 +7641,7 @@ fn test_bump_penalty_txn_on_remote_commitment() {
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
        let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000);
-       let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 3_000_000);
+       let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 3_000_000);
        route_payment(&nodes[1], &vec!(&nodes[0])[..], 3000000).0;
 
        // Remote commitment txn with 4 outputs : to_local, to_remote, 1 outgoing HTLC, 1 incoming HTLC
@@ -7663,7 +7750,9 @@ fn test_counterparty_raa_skip_no_crash() {
        {
                let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
                let mut guard = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
-               let keys = guard.channel_by_id.get_mut(&channel_id).unwrap().get_signer();
+               let keys = guard.channel_by_id.get_mut(&channel_id).map(
+                       |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
+               ).flatten().unwrap().get_signer();
 
                const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
 
@@ -7706,8 +7795,8 @@ fn test_bump_txn_sanitize_tracking_maps() {
 
        let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000);
        // Lock HTLC in both directions
-       let (payment_preimage_1, _, _) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9_000_000);
-       let (_, payment_hash_2, _) = route_payment(&nodes[1], &vec!(&nodes[0])[..], 9_000_000);
+       let (payment_preimage_1, ..) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9_000_000);
+       let (_, payment_hash_2, ..) = route_payment(&nodes[1], &vec!(&nodes[0])[..], 9_000_000);
 
        let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan.2);
        assert_eq!(revoked_local_txn[0].input.len(), 1);
@@ -7772,9 +7861,9 @@ fn test_channel_conf_timeout() {
        let close_ev = nodes[1].node.get_and_clear_pending_msg_events();
        assert_eq!(close_ev.len(), 1);
        match close_ev[0] {
-               MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, ref node_id } => {
+               MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { ref msg }, ref node_id } => {
                        assert_eq!(*node_id, nodes[0].node.get_our_node_id());
-                       assert_eq!(msg.data, "Channel closed because funding transaction failed to confirm within 2016 blocks");
+                       assert_eq!(msg.as_ref().unwrap().data, "Channel closed because funding transaction failed to confirm within 2016 blocks");
                },
                _ => panic!("Unexpected event"),
        }
@@ -7791,7 +7880,7 @@ fn test_override_channel_config() {
        let mut override_config = UserConfig::default();
        override_config.channel_handshake_config.our_to_self_delay = 200;
 
-       nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 16_000_000, 12_000_000, 42, Some(override_config)).unwrap();
+       nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 16_000_000, 12_000_000, 42, None, Some(override_config)).unwrap();
 
        // Assert the channel created by node0 is using the override config.
        let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
@@ -7808,7 +7897,7 @@ fn test_override_0msat_htlc_minimum() {
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(zero_config.clone())]);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
-       nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 16_000_000, 12_000_000, 42, Some(zero_config)).unwrap();
+       nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 16_000_000, 12_000_000, 42, None, Some(zero_config)).unwrap();
        let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
        assert_eq!(res.htlc_minimum_msat, 1);
 
@@ -7878,7 +7967,7 @@ fn test_manually_accept_inbound_channel_request() {
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_conf.clone())]);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
-       let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, Some(manually_accept_conf)).unwrap();
+       let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, Some(manually_accept_conf)).unwrap();
        let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
 
        nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &res);
@@ -7928,7 +8017,7 @@ fn test_manually_reject_inbound_channel_request() {
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_conf.clone())]);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
-       nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, Some(manually_accept_conf)).unwrap();
+       nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, Some(manually_accept_conf)).unwrap();
        let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
 
        nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &res);
@@ -7968,7 +8057,7 @@ fn test_can_not_accept_inbound_channel_twice() {
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_conf.clone())]);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
-       nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, Some(manually_accept_conf)).unwrap();
+       nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, Some(manually_accept_conf)).unwrap();
        let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
 
        nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &res);
@@ -8350,7 +8439,7 @@ fn test_update_err_monitor_lockdown() {
        send_payment(&nodes[0], &vec!(&nodes[1])[..], 10_000_000);
 
        // Route a HTLC from node 0 to node 1 (but don't settle)
-       let (preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 9_000_000);
+       let (preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 9_000_000);
 
        // Copy ChainMonitor to simulate a watchtower and update block height of node 0 until its ChannelMonitor timeout HTLC onchain
        let chain_source = test_utils::TestChainSource::new(Network::Testnet);
@@ -8365,7 +8454,7 @@ fn test_update_err_monitor_lockdown() {
                        new_monitor
                };
                let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
-               assert_eq!(watchtower.watch_channel(outpoint, new_monitor), ChannelMonitorUpdateStatus::Completed);
+               assert_eq!(watchtower.watch_channel(outpoint, new_monitor), Ok(ChannelMonitorUpdateStatus::Completed));
                watchtower
        };
        let block = create_dummy_block(BlockHash::all_zeros(), 42, Vec::new());
@@ -8385,11 +8474,14 @@ fn test_update_err_monitor_lockdown() {
        {
                let mut node_0_per_peer_lock;
                let mut node_0_peer_state_lock;
-               let mut channel = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2);
-               if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
-                       assert_eq!(watchtower.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::PermanentFailure);
-                       assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
-               } else { assert!(false); }
+               if let ChannelPhase::Funded(ref mut channel) = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2) {
+                       if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
+                               assert_eq!(watchtower.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::InProgress);
+                               assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
+                       } else { assert!(false); }
+               } else {
+                       assert!(false);
+               }
        }
        // Our local monitor is in-sync and hasn't processed yet timeout
        check_added_monitors!(nodes[0], 1);
@@ -8435,7 +8527,7 @@ fn test_concurrent_monitor_claim() {
                        new_monitor
                };
                let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &alice_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
-               assert_eq!(watchtower.watch_channel(outpoint, new_monitor), ChannelMonitorUpdateStatus::Completed);
+               assert_eq!(watchtower.watch_channel(outpoint, new_monitor), Ok(ChannelMonitorUpdateStatus::Completed));
                watchtower
        };
        let block = create_dummy_block(BlockHash::all_zeros(), 42, Vec::new());
@@ -8466,7 +8558,7 @@ fn test_concurrent_monitor_claim() {
                        new_monitor
                };
                let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &bob_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
-               assert_eq!(watchtower.watch_channel(outpoint, new_monitor), ChannelMonitorUpdateStatus::Completed);
+               assert_eq!(watchtower.watch_channel(outpoint, new_monitor), Ok(ChannelMonitorUpdateStatus::Completed));
                watchtower
        };
        watchtower_bob.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, Vec::new()), HTLC_TIMEOUT_BROADCAST - 1);
@@ -8483,13 +8575,16 @@ fn test_concurrent_monitor_claim() {
        {
                let mut node_0_per_peer_lock;
                let mut node_0_peer_state_lock;
-               let mut channel = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2);
-               if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
-                       // Watchtower Alice should already have seen the block and reject the update
-                       assert_eq!(watchtower_alice.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::PermanentFailure);
-                       assert_eq!(watchtower_bob.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
-                       assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
-               } else { assert!(false); }
+               if let ChannelPhase::Funded(ref mut channel) = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2) {
+                       if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
+                               // Watchtower Alice should already have seen the block and reject the update
+                               assert_eq!(watchtower_alice.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::InProgress);
+                               assert_eq!(watchtower_bob.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
+                               assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
+                       } else { assert!(false); }
+               } else {
+                       assert!(false);
+               }
        }
        // Our local monitor is in-sync and hasn't processed yet timeout
        check_added_monitors!(nodes[0], 1);
@@ -8509,7 +8604,7 @@ fn test_concurrent_monitor_claim() {
        let height = HTLC_TIMEOUT_BROADCAST + 1;
        connect_blocks(&nodes[0], height - nodes[0].best_block_info().1);
        check_closed_broadcast(&nodes[0], 1, true);
-       check_closed_event!(&nodes[0], 1, ClosureReason::CommitmentTxConfirmed, false,
+       check_closed_event!(&nodes[0], 1, ClosureReason::HolderForceClosed, false,
                [nodes[1].node.get_our_node_id()], 100000);
        watchtower_alice.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, vec![bob_state_y.clone()]), height);
        check_added_monitors(&nodes[0], 1);
@@ -8542,7 +8637,7 @@ fn test_pre_lockin_no_chan_closed_update() {
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
        // Create an initial channel
-       nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None).unwrap();
+       nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap();
        let mut open_chan_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
        nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_chan_msg);
        let accept_chan_msg = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
@@ -8559,7 +8654,7 @@ fn test_pre_lockin_no_chan_closed_update() {
        nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id, data: "Hi".to_owned() });
        assert!(nodes[0].chain_monitor.added_monitors.lock().unwrap().is_empty());
        check_closed_event!(nodes[0], 2, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("Hi".to_string()) }, true,
-               [nodes[1].node.get_our_node_id(); 2], 100000);
+               [nodes[1].node.get_our_node_id()], 100000);
 }
 
 #[test]
@@ -8580,7 +8675,7 @@ fn test_htlc_no_detection() {
        let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001);
 
        send_payment(&nodes[0], &vec!(&nodes[1])[..], 1_000_000);
-       let (_, our_payment_hash, _) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 2_000_000);
+       let (_, our_payment_hash, ..) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 2_000_000);
        let local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
        assert_eq!(local_txn[0].input.len(), 1);
        assert_eq!(local_txn[0].output.len(), 3);
@@ -8636,7 +8731,7 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain
 
        // Steps (1) and (2):
        // Send an HTLC Alice --> Bob --> Carol, but Carol doesn't settle the HTLC back.
-       let (payment_preimage, payment_hash, _payment_secret) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000);
+       let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000);
 
        // Check that Alice's commitment transaction now contains an output for this HTLC.
        let alice_txn = get_local_commitment_txn!(nodes[0], chan_ab.2);
@@ -8689,7 +8784,8 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain
        assert_eq!(carol_updates.update_fulfill_htlcs.len(), 1);
 
        nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &carol_updates.update_fulfill_htlcs[0]);
-       expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], if go_onchain_before_fulfill || force_closing_node == 1 { None } else { Some(1000) }, false, false);
+       let went_onchain = go_onchain_before_fulfill || force_closing_node == 1;
+       expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], if went_onchain { None } else { Some(1000) }, went_onchain, false);
        // If Alice broadcasted but Bob doesn't know yet, here he prepares to tell her about the preimage.
        if !go_onchain_before_fulfill && broadcast_alice {
                let events = nodes[1].node.get_and_clear_pending_msg_events();
@@ -8804,11 +8900,11 @@ fn test_duplicate_temporary_channel_id_from_different_peers() {
        let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
 
        // Create an first channel channel
-       nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 100000, 10001, 42, None).unwrap();
+       nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap();
        let mut open_chan_msg_chan_1_0 = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
 
        // Create an second channel
-       nodes[2].node.create_channel(nodes[0].node.get_our_node_id(), 100000, 10001, 43, None).unwrap();
+       nodes[2].node.create_channel(nodes[0].node.get_our_node_id(), 100000, 10001, 43, None, None).unwrap();
        let mut open_chan_msg_chan_2_0 = get_event_msg!(nodes[2], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
 
        // Modify the `OpenChannel` from `nodes[2]` to `nodes[0]` to ensure that it uses the same
@@ -8859,7 +8955,7 @@ fn test_duplicate_chan_id() {
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
        // Create an initial channel
-       nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None).unwrap();
+       nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap();
        let mut open_chan_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
        nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_chan_msg);
        nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
@@ -8927,7 +9023,7 @@ fn test_duplicate_chan_id() {
        }
 
        // Now try to create a second channel which has a duplicate funding output.
-       nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None).unwrap();
+       nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap();
        let open_chan_2_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
        nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_chan_2_msg);
        nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
@@ -8940,12 +9036,16 @@ fn test_duplicate_chan_id() {
                // another channel in the ChannelManager - an invalid state. Thus, we'd panic later when we
                // try to create another channel. Instead, we drop the channel entirely here (leaving the
                // channelmanager in a possibly nonsense state instead).
-               let mut as_chan = a_peer_state.outbound_v1_channel_by_id.remove(&open_chan_2_msg.temporary_channel_id).unwrap();
-               let logger = test_utils::TestLogger::new();
-               as_chan.get_funding_created(tx.clone(), funding_outpoint, &&logger).map_err(|_| ()).unwrap()
+               match a_peer_state.channel_by_id.remove(&open_chan_2_msg.temporary_channel_id).unwrap() {
+                       ChannelPhase::UnfundedOutboundV1(chan) => {
+                               let logger = test_utils::TestLogger::new();
+                               chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap()
+                       },
+                       _ => panic!("Unexpected ChannelPhase variant"),
+               }
        };
        check_added_monitors!(nodes[0], 0);
-       nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created);
+       nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created.unwrap());
        // At this point we'll look up if the channel_id is present and immediately fail the channel
        // without trying to persist the `ChannelMonitor`.
        check_added_monitors!(nodes[1], 0);
@@ -9071,7 +9171,7 @@ fn test_invalid_funding_tx() {
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
-       nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 10_000, 42, None).unwrap();
+       nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 10_000, 42, None, None).unwrap();
        nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()));
        nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
 
@@ -9113,8 +9213,8 @@ fn test_invalid_funding_tx() {
        assert_eq!(events_2.len(), 1);
        if let MessageSendEvent::HandleError { node_id, action } = &events_2[0] {
                assert_eq!(*node_id, nodes[0].node.get_our_node_id());
-               if let msgs::ErrorAction::SendErrorMessage { msg } = action {
-                       assert_eq!(msg.data, "Channel closed because of an exception: ".to_owned() + expected_err);
+               if let msgs::ErrorAction::DisconnectPeer { msg } = action {
+                       assert_eq!(msg.as_ref().unwrap().data, "Channel closed because of an exception: ".to_owned() + expected_err);
                } else { panic!(); }
        } else { panic!(); }
        assert_eq!(nodes[1].node.list_channels().len(), 0);
@@ -9142,6 +9242,66 @@ fn test_invalid_funding_tx() {
        mine_transaction(&nodes[1], &spend_tx);
 }
 
+#[test]
+fn test_coinbase_funding_tx() {
+       // Miners are able to fund channels directly from coinbase transactions, however
+       // by consensus rules, outputs of a coinbase transaction are encumbered by a 100
+       // block maturity timelock. To ensure that a (non-0conf) channel like this is enforceable
+       // on-chain, the minimum depth is updated to 100 blocks for coinbase funding transactions.
+       //
+       // Note that 0conf channels with coinbase funding transactions are unaffected and are
+       // immediately operational after opening.
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+       let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+       nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap();
+       let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+
+       nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel);
+       let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
+
+       nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
+
+       // Create the coinbase funding transaction.
+       let (temporary_channel_id, tx, _) = create_coinbase_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42);
+
+       nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
+       check_added_monitors!(nodes[0], 0);
+       let funding_created = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
+
+       nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created);
+       check_added_monitors!(nodes[1], 1);
+       expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
+
+       let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
+
+       nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed);
+       check_added_monitors!(nodes[0], 1);
+
+       expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
+       assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
+
+       // Starting at height 0, we "confirm" the coinbase at height 1.
+       confirm_transaction_at(&nodes[0], &tx, 1);
+       // We connect 98 more blocks to have 99 confirmations for the coinbase transaction.
+       connect_blocks(&nodes[0], COINBASE_MATURITY - 2);
+       // Check that we have no pending message events (we have not queued a `channel_ready` yet).
+       assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
+       // Now connect one more block which results in 100 confirmations of the coinbase transaction.
+       connect_blocks(&nodes[0], 1);
+       // There should now be a `channel_ready` which can be handled.
+       let _ = &nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &get_event_msg!(&nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id()));
+
+       confirm_transaction_at(&nodes[1], &tx, 1);
+       connect_blocks(&nodes[1], COINBASE_MATURITY - 2);
+       assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+       connect_blocks(&nodes[1], 1);
+       expect_channel_ready_event(&nodes[1], &nodes[0].node.get_our_node_id());
+       create_chan_between_nodes_with_value_confirm_second(&nodes[0], &nodes[1]);
+}
+
 fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_timelock: bool) {
        // In the first version of the chain::Confirm interface, after a refactor was made to not
        // broadcast CSV-locked transactions until their CSV lock is up, we wouldn't reliably broadcast
@@ -9167,7 +9327,7 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t
 
        create_announced_chan_between_nodes(&nodes, 0, 1);
        let (chan_announce, _, channel_id, _) = create_announced_chan_between_nodes(&nodes, 1, 2);
-       let (_, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
+       let (_, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
        nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id());
        nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id());
 
@@ -9235,7 +9395,7 @@ fn do_test_dup_htlc_second_rejected(test_for_second_fail_panic: bool) {
        let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001);
 
        let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV)
-               .with_bolt11_features(nodes[1].node.invoice_features()).unwrap();
+               .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap();
        let route = get_route!(nodes[0], payment_params, 10_000).unwrap();
 
        let (our_payment_preimage, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(&nodes[1]);
@@ -9344,7 +9504,7 @@ fn test_inconsistent_mpp_params() {
        let chan_2_3 =create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 100_000, 0);
 
        let payment_params = PaymentParameters::from_node_id(nodes[3].node.get_our_node_id(), TEST_FINAL_CLTV)
-               .with_bolt11_features(nodes[3].node.invoice_features()).unwrap();
+               .with_bolt11_features(nodes[3].node.bolt11_invoice_features()).unwrap();
        let mut route = get_route!(nodes[0], payment_params, 15_000_000).unwrap();
        assert_eq!(route.paths.len(), 2);
        route.paths.sort_by(|path_a, _| {
@@ -9531,7 +9691,7 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config), None]);
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
-       nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 500_000_000, 42, None).unwrap();
+       nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 500_000_000, 42, None, None).unwrap();
        let mut open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
        open_channel.max_htlc_value_in_flight_msat = 50_000_000;
        open_channel.max_accepted_htlcs = 60;
@@ -9549,8 +9709,12 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e
        if on_holder_tx {
                let mut node_0_per_peer_lock;
                let mut node_0_peer_state_lock;
-               let mut chan = get_outbound_v1_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, temporary_channel_id);
-               chan.context.holder_dust_limit_satoshis = 546;
+               match get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, temporary_channel_id) {
+                       ChannelPhase::UnfundedOutboundV1(chan) => {
+                               chan.context.holder_dust_limit_satoshis = 546;
+                       },
+                       _ => panic!("Unexpected ChannelPhase variant"),
+               }
        }
 
        nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
@@ -9574,8 +9738,8 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e
                let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
                let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
                let chan = chan_lock.channel_by_id.get(&channel_id).unwrap();
-               (chan.context.get_dust_buffer_feerate(None) as u64,
-               chan.context.get_max_dust_htlc_exposure_msat(&LowerBoundedFeeEstimator(nodes[0].fee_estimator)))
+               (chan.context().get_dust_buffer_feerate(None) as u64,
+               chan.context().get_max_dust_htlc_exposure_msat(&LowerBoundedFeeEstimator(nodes[0].fee_estimator)))
        };
        let dust_outbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_timeout_tx_weight(&channel_type_features) / 1000 + open_channel.dust_limit_satoshis - 1) * 1000;
        let dust_outbound_htlc_on_holder_tx: u64 = max_dust_htlc_exposure_msat / dust_outbound_htlc_on_holder_tx_msat;
@@ -9709,7 +9873,7 @@ fn test_non_final_funding_tx() {
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
-       let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None).unwrap();
+       let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
        let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
        nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_message);
        let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
@@ -9737,9 +9901,46 @@ fn test_non_final_funding_tx() {
                },
                _ => panic!()
        }
+       let events = nodes[0].node.get_and_clear_pending_events();
+       assert_eq!(events.len(), 1);
+       match events[0] {
+               Event::ChannelClosed { channel_id, .. } => {
+                       assert_eq!(channel_id, temp_channel_id);
+               },
+               _ => panic!("Unexpected event"),
+       }
+}
+
+#[test]
+fn test_non_final_funding_tx_within_headroom() {
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+       let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+       let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
+       let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+       nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_message);
+       let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
+       nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel_message);
+
+       let best_height = nodes[0].node.best_block.read().unwrap().height();
+
+       let chan_id = *nodes[0].network_chan_count.borrow();
+       let events = nodes[0].node.get_and_clear_pending_events();
+       let input = TxIn { previous_output: BitcoinOutPoint::null(), script_sig: bitcoin::Script::new(), sequence: Sequence(1), witness: Witness::from_vec(vec!(vec!(1))) };
+       assert_eq!(events.len(), 1);
+       let mut tx = match events[0] {
+               Event::FundingGenerationReady { ref channel_value_satoshis, ref output_script, .. } => {
+                       // Timelock the transaction within a +1 headroom from the best block.
+                       Transaction { version: chan_id as i32, lock_time: PackedLockTime(best_height + 1), input: vec![input], output: vec![TxOut {
+                               value: *channel_value_satoshis, script_pubkey: output_script.clone(),
+                       }]}
+               },
+               _ => panic!("Unexpected event"),
+       };
 
-       // However, transaction should be accepted if it's in a +1 headroom from best block.
-       tx.lock_time = PackedLockTime(tx.lock_time.0 - 1);
+       // Transaction should be accepted if it's in a +1 headroom from best block.
        assert!(nodes[0].node.funding_transaction_generated(&temp_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).is_ok());
        get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
 }
@@ -9814,7 +10015,7 @@ fn accept_busted_but_better_fee() {
                MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, .. }, .. } => {
                        nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_fee.as_ref().unwrap());
                        check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError {
-                               err: "Peer's feerate much too low. Actual: 1000. Our expected lower limit: 5000 (- 250)".to_owned() },
+                               err: "Peer's feerate much too low. Actual: 1000. Our expected lower limit: 5000".to_owned() },
                                [nodes[0].node.get_our_node_id()], 100000);
                        check_closed_broadcast!(nodes[1], true);
                        check_added_monitors!(nodes[1], 1);
@@ -10017,7 +10218,7 @@ fn test_remove_expired_outbound_unfunded_channels() {
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
-       let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None).unwrap();
+       let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
        let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
        nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_message);
        let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
@@ -10034,7 +10235,7 @@ fn test_remove_expired_outbound_unfunded_channels() {
        let check_outbound_channel_existence = |should_exist: bool| {
                let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
                let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
-               assert_eq!(chan_lock.outbound_v1_channel_by_id.contains_key(&temp_channel_id), should_exist);
+               assert_eq!(chan_lock.channel_by_id.contains_key(&temp_channel_id), should_exist);
        };
 
        // Channel should exist without any timer ticks.
@@ -10068,7 +10269,7 @@ fn test_remove_expired_inbound_unfunded_channels() {
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
-       let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None).unwrap();
+       let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
        let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
        nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_message);
        let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
@@ -10085,7 +10286,7 @@ fn test_remove_expired_inbound_unfunded_channels() {
        let check_inbound_channel_existence = |should_exist: bool| {
                let per_peer_state = nodes[1].node.per_peer_state.read().unwrap();
                let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap();
-               assert_eq!(chan_lock.inbound_v1_channel_by_id.contains_key(&temp_channel_id), should_exist);
+               assert_eq!(chan_lock.channel_by_id.contains_key(&temp_channel_id), should_exist);
        };
 
        // Channel should exist without any timer ticks.
@@ -10133,8 +10334,8 @@ fn do_test_multi_post_event_actions(do_reload: bool) {
        send_payment(&nodes[0], &[&nodes[1]], 1_000_000);
        send_payment(&nodes[0], &[&nodes[2]], 1_000_000);
 
-       let (our_payment_preimage, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
-       let (payment_preimage_2, payment_hash_2, _) = route_payment(&nodes[0], &[&nodes[2]], 1_000_000);
+       let (our_payment_preimage, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
+       let (payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[2]], 1_000_000);
 
        nodes[1].node.claim_funds(our_payment_preimage);
        check_added_monitors!(nodes[1], 1);
@@ -10198,3 +10399,271 @@ fn test_multi_post_event_actions() {
        do_test_multi_post_event_actions(true);
        do_test_multi_post_event_actions(false);
 }
+
+#[test]
+fn test_batch_channel_open() {
+       let chanmon_cfgs = create_chanmon_cfgs(3);
+       let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+       let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+       // Initiate channel opening and create the batch channel funding transaction.
+       let (tx, funding_created_msgs) = create_batch_channel_funding(&nodes[0], &[
+               (&nodes[1], 100_000, 0, 42, None),
+               (&nodes[2], 200_000, 0, 43, None),
+       ]);
+
+       // Go through the funding_created and funding_signed flow with node 1.
+       nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msgs[0]);
+       check_added_monitors(&nodes[1], 1);
+       expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
+
+       let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
+       nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed_msg);
+       check_added_monitors(&nodes[0], 1);
+
+       // The transaction should not have been broadcast before all channels are ready.
+       assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 0);
+
+       // Go through the funding_created and funding_signed flow with node 2.
+       nodes[2].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msgs[1]);
+       check_added_monitors(&nodes[2], 1);
+       expect_channel_pending_event(&nodes[2], &nodes[0].node.get_our_node_id());
+
+       let funding_signed_msg = get_event_msg!(nodes[2], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
+       chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
+       nodes[0].node.handle_funding_signed(&nodes[2].node.get_our_node_id(), &funding_signed_msg);
+       check_added_monitors(&nodes[0], 1);
+
+       // The transaction should not have been broadcast before persisting all monitors has been
+       // completed.
+       assert_eq!(nodes[0].tx_broadcaster.txn_broadcast().len(), 0);
+       assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
+
+       // Complete the persistence of the monitor.
+       nodes[0].chain_monitor.complete_sole_pending_chan_update(
+               &OutPoint { txid: tx.txid(), index: 1 }.to_channel_id()
+       );
+       let events = nodes[0].node.get_and_clear_pending_events();
+
+       // The transaction should only have been broadcast now.
+       let broadcasted_txs = nodes[0].tx_broadcaster.txn_broadcast();
+       assert_eq!(broadcasted_txs.len(), 1);
+       assert_eq!(broadcasted_txs[0], tx);
+
+       assert_eq!(events.len(), 2);
+       assert!(events.iter().any(|e| matches!(
+               *e,
+               crate::events::Event::ChannelPending {
+                       ref counterparty_node_id,
+                       ..
+               } if counterparty_node_id == &nodes[1].node.get_our_node_id(),
+       )));
+       assert!(events.iter().any(|e| matches!(
+               *e,
+               crate::events::Event::ChannelPending {
+                       ref counterparty_node_id,
+                       ..
+               } if counterparty_node_id == &nodes[2].node.get_our_node_id(),
+       )));
+}
+
+#[test]
+fn test_disconnect_in_funding_batch() {
+       let chanmon_cfgs = create_chanmon_cfgs(3);
+       let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+       let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+       // Initiate channel opening and create the batch channel funding transaction.
+       let (tx, funding_created_msgs) = create_batch_channel_funding(&nodes[0], &[
+               (&nodes[1], 100_000, 0, 42, None),
+               (&nodes[2], 200_000, 0, 43, None),
+       ]);
+
+       // Go through the funding_created and funding_signed flow with node 1.
+       nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msgs[0]);
+       check_added_monitors(&nodes[1], 1);
+       expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
+
+       let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
+       nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed_msg);
+       check_added_monitors(&nodes[0], 1);
+
+       // The transaction should not have been broadcast before all channels are ready.
+       assert_eq!(nodes[0].tx_broadcaster.txn_broadcast().len(), 0);
+
+       // The remaining peer in the batch disconnects.
+       nodes[0].node.peer_disconnected(&nodes[2].node.get_our_node_id());
+
+       // The channels in the batch will close immediately.
+       let channel_id_1 = OutPoint { txid: tx.txid(), index: 0 }.to_channel_id();
+       let channel_id_2 = OutPoint { txid: tx.txid(), index: 1 }.to_channel_id();
+       check_closed_events(&nodes[0], &[
+               ExpectedCloseEvent {
+                       channel_id: Some(channel_id_1),
+                       discard_funding: true,
+                       ..Default::default()
+               },
+               ExpectedCloseEvent {
+                       channel_id: Some(channel_id_2),
+                       discard_funding: true,
+                       ..Default::default()
+               },
+       ]);
+
+       // The monitor should become closed.
+       check_added_monitors(&nodes[0], 1);
+       {
+               let mut monitor_updates = nodes[0].chain_monitor.monitor_updates.lock().unwrap();
+               let monitor_updates_1 = monitor_updates.get(&channel_id_1).unwrap();
+               assert_eq!(monitor_updates_1.len(), 1);
+               assert_eq!(monitor_updates_1[0].update_id, CLOSED_CHANNEL_UPDATE_ID);
+       }
+
+       // The funding transaction should not have been broadcast, and therefore, we don't need
+       // to broadcast a force-close transaction for the closed monitor.
+       assert_eq!(nodes[0].tx_broadcaster.txn_broadcast().len(), 0);
+
+       // Ensure the channels don't exist anymore.
+       assert!(nodes[0].node.list_channels().is_empty());
+}
+
+#[test]
+fn test_batch_funding_close_after_funding_signed() {
+       let chanmon_cfgs = create_chanmon_cfgs(3);
+       let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+       let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+       // Initiate channel opening and create the batch channel funding transaction.
+       let (tx, funding_created_msgs) = create_batch_channel_funding(&nodes[0], &[
+               (&nodes[1], 100_000, 0, 42, None),
+               (&nodes[2], 200_000, 0, 43, None),
+       ]);
+
+       // Go through the funding_created and funding_signed flow with node 1.
+       nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msgs[0]);
+       check_added_monitors(&nodes[1], 1);
+       expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
+
+       let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
+       nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed_msg);
+       check_added_monitors(&nodes[0], 1);
+
+       // Go through the funding_created and funding_signed flow with node 2.
+       nodes[2].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msgs[1]);
+       check_added_monitors(&nodes[2], 1);
+       expect_channel_pending_event(&nodes[2], &nodes[0].node.get_our_node_id());
+
+       let funding_signed_msg = get_event_msg!(nodes[2], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
+       chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
+       nodes[0].node.handle_funding_signed(&nodes[2].node.get_our_node_id(), &funding_signed_msg);
+       check_added_monitors(&nodes[0], 1);
+
+       // The transaction should not have been broadcast before all channels are ready.
+       assert_eq!(nodes[0].tx_broadcaster.txn_broadcast().len(), 0);
+
+       // Force-close the channel for which we've completed the initial monitor.
+       let channel_id_1 = OutPoint { txid: tx.txid(), index: 0 }.to_channel_id();
+       let channel_id_2 = OutPoint { txid: tx.txid(), index: 1 }.to_channel_id();
+       nodes[0].node.force_close_broadcasting_latest_txn(&channel_id_1, &nodes[1].node.get_our_node_id()).unwrap();
+       check_added_monitors(&nodes[0], 2);
+       {
+               let mut monitor_updates = nodes[0].chain_monitor.monitor_updates.lock().unwrap();
+               let monitor_updates_1 = monitor_updates.get(&channel_id_1).unwrap();
+               assert_eq!(monitor_updates_1.len(), 1);
+               assert_eq!(monitor_updates_1[0].update_id, CLOSED_CHANNEL_UPDATE_ID);
+               let monitor_updates_2 = monitor_updates.get(&channel_id_2).unwrap();
+               assert_eq!(monitor_updates_2.len(), 1);
+               assert_eq!(monitor_updates_2[0].update_id, CLOSED_CHANNEL_UPDATE_ID);
+       }
+       let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
+       match msg_events[0] {
+               MessageSendEvent::HandleError { .. } => (),
+               _ => panic!("Unexpected message."),
+       }
+
+       // We broadcast the commitment transaction as part of the force-close.
+       {
+               let broadcasted_txs = nodes[0].tx_broadcaster.txn_broadcast();
+               assert_eq!(broadcasted_txs.len(), 1);
+               assert!(broadcasted_txs[0].txid() != tx.txid());
+               assert_eq!(broadcasted_txs[0].input.len(), 1);
+               assert_eq!(broadcasted_txs[0].input[0].previous_output.txid, tx.txid());
+       }
+
+       // All channels in the batch should close immediately.
+       check_closed_events(&nodes[0], &[
+               ExpectedCloseEvent {
+                       channel_id: Some(channel_id_1),
+                       discard_funding: true,
+                       ..Default::default()
+               },
+               ExpectedCloseEvent {
+                       channel_id: Some(channel_id_2),
+                       discard_funding: true,
+                       ..Default::default()
+               },
+       ]);
+
+       // Ensure the channels don't exist anymore.
+       assert!(nodes[0].node.list_channels().is_empty());
+}
+
+fn do_test_funding_and_commitment_tx_confirm_same_block(confirm_remote_commitment: bool) {
+       // Tests that a node will forget the channel (when it only requires 1 confirmation) if the
+       // funding and commitment transaction confirm in the same block.
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let mut min_depth_1_block_cfg = test_default_channel_config();
+       min_depth_1_block_cfg.channel_handshake_config.minimum_depth = 1;
+       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(min_depth_1_block_cfg), Some(min_depth_1_block_cfg)]);
+       let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+       let funding_tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 0);
+       let chan_id = chain::transaction::OutPoint { txid: funding_tx.txid(), index: 0 }.to_channel_id();
+
+       assert_eq!(nodes[0].node.list_channels().len(), 1);
+       assert_eq!(nodes[1].node.list_channels().len(), 1);
+
+       let (closing_node, other_node) = if confirm_remote_commitment {
+               (&nodes[1], &nodes[0])
+       } else {
+               (&nodes[0], &nodes[1])
+       };
+
+       closing_node.node.force_close_broadcasting_latest_txn(&chan_id, &other_node.node.get_our_node_id()).unwrap();
+       let mut msg_events = closing_node.node.get_and_clear_pending_msg_events();
+       assert_eq!(msg_events.len(), 1);
+       match msg_events.pop().unwrap() {
+               MessageSendEvent::HandleError { action: msgs::ErrorAction::DisconnectPeer { .. }, .. } => {},
+               _ => panic!("Unexpected event"),
+       }
+       check_added_monitors(closing_node, 1);
+       check_closed_event(closing_node, 1, ClosureReason::HolderForceClosed, false, &[other_node.node.get_our_node_id()], 1_000_000);
+
+       let commitment_tx = {
+               let mut txn = closing_node.tx_broadcaster.txn_broadcast();
+               assert_eq!(txn.len(), 1);
+               let commitment_tx = txn.pop().unwrap();
+               check_spends!(commitment_tx, funding_tx);
+               commitment_tx
+       };
+
+       mine_transactions(&nodes[0], &[&funding_tx, &commitment_tx]);
+       mine_transactions(&nodes[1], &[&funding_tx, &commitment_tx]);
+
+       check_closed_broadcast(other_node, 1, true);
+       check_added_monitors(other_node, 1);
+       check_closed_event(other_node, 1, ClosureReason::CommitmentTxConfirmed, false, &[closing_node.node.get_our_node_id()], 1_000_000);
+
+       assert!(nodes[0].node.list_channels().is_empty());
+       assert!(nodes[1].node.list_channels().is_empty());
+}
+
+#[test]
+fn test_funding_and_commitment_tx_confirm_same_block() {
+       do_test_funding_and_commitment_tx_confirm_same_block(false);
+       do_test_funding_and_commitment_tx_confirm_same_block(true);
+}