Use `crate::prelude::*` rather than specific imports
[rust-lightning] / lightning / src / ln / functional_tests.rs
index 5d8cc59978eb1d8f01379b47be6be41900aa8cd1..5ea3e6372c0d1c497c0b8fcde11961cfdb4206b0 100644 (file)
@@ -49,12 +49,9 @@ use bitcoin::OutPoint as BitcoinOutPoint;
 use bitcoin::secp256k1::Secp256k1;
 use bitcoin::secp256k1::{PublicKey,SecretKey};
 
-use regex;
-
 use crate::io;
 use crate::prelude::*;
 use alloc::collections::BTreeSet;
-use core::default::Default;
 use core::iter::repeat;
 use bitcoin::hashes::Hash;
 use crate::sync::{Arc, Mutex, RwLock};
@@ -107,22 +104,22 @@ fn test_insane_channel_opens() {
        use crate::ln::channelmanager::MAX_LOCAL_BREAKDOWN_TIMEOUT;
 
        // Test all mutations that would make the channel open message insane
-       insane_open_helper(format!("Per our config, funding must be at most {}. It was {}", TOTAL_BITCOIN_SUPPLY_SATOSHIS + 1, TOTAL_BITCOIN_SUPPLY_SATOSHIS + 2).as_str(), |mut msg| { msg.funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS + 2; msg });
-       insane_open_helper(format!("Funding must be smaller than the total bitcoin supply. It was {}", TOTAL_BITCOIN_SUPPLY_SATOSHIS).as_str(), |mut msg| { msg.funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS; msg });
+       insane_open_helper(format!("Per our config, funding must be at most {}. It was {}", TOTAL_BITCOIN_SUPPLY_SATOSHIS + 1, TOTAL_BITCOIN_SUPPLY_SATOSHIS + 2).as_str(), |mut msg| { msg.common_fields.funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS + 2; msg });
+       insane_open_helper(format!("Funding must be smaller than the total bitcoin supply. It was {}", TOTAL_BITCOIN_SUPPLY_SATOSHIS).as_str(), |mut msg| { msg.common_fields.funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS; msg });
 
-       insane_open_helper("Bogus channel_reserve_satoshis", |mut msg| { msg.channel_reserve_satoshis = msg.funding_satoshis + 1; msg });
+       insane_open_helper("Bogus channel_reserve_satoshis", |mut msg| { msg.channel_reserve_satoshis = msg.common_fields.funding_satoshis + 1; msg });
 
-       insane_open_helper(r"push_msat \d+ was larger than channel amount minus reserve \(\d+\)", |mut msg| { msg.push_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000 + 1; msg });
+       insane_open_helper(r"push_msat \d+ was larger than channel amount minus reserve \(\d+\)", |mut msg| { msg.push_msat = (msg.common_fields.funding_satoshis - msg.channel_reserve_satoshis) * 1000 + 1; msg });
 
-       insane_open_helper("Peer never wants payout outputs?", |mut msg| { msg.dust_limit_satoshis = msg.funding_satoshis + 1 ; msg });
+       insane_open_helper("Peer never wants payout outputs?", |mut msg| { msg.common_fields.dust_limit_satoshis = msg.common_fields.funding_satoshis + 1 ; msg });
 
-       insane_open_helper(r"Minimum htlc value \(\d+\) was larger than full channel value \(\d+\)", |mut msg| { msg.htlc_minimum_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000; msg });
+       insane_open_helper(r"Minimum htlc value \(\d+\) was larger than full channel value \(\d+\)", |mut msg| { msg.common_fields.htlc_minimum_msat = (msg.common_fields.funding_satoshis - msg.channel_reserve_satoshis) * 1000; msg });
 
-       insane_open_helper("They wanted our payments to be delayed by a needlessly long period", |mut msg| { msg.to_self_delay = MAX_LOCAL_BREAKDOWN_TIMEOUT + 1; msg });
+       insane_open_helper("They wanted our payments to be delayed by a needlessly long period", |mut msg| { msg.common_fields.to_self_delay = MAX_LOCAL_BREAKDOWN_TIMEOUT + 1; msg });
 
-       insane_open_helper("0 max_accepted_htlcs makes for a useless channel", |mut msg| { msg.max_accepted_htlcs = 0; msg });
+       insane_open_helper("0 max_accepted_htlcs makes for a useless channel", |mut msg| { msg.common_fields.max_accepted_htlcs = 0; msg });
 
-       insane_open_helper("max_accepted_htlcs was 484. It must not be larger than 483", |mut msg| { msg.max_accepted_htlcs = 484; msg });
+       insane_open_helper("max_accepted_htlcs was 484. It must not be larger than 483", |mut msg| { msg.common_fields.max_accepted_htlcs = 484; msg });
 }
 
 #[test]
@@ -166,7 +163,7 @@ fn do_test_counterparty_no_reserve(send_from_initiator: bool) {
        let mut open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
        if !send_from_initiator {
                open_channel_message.channel_reserve_satoshis = 0;
-               open_channel_message.max_htlc_value_in_flight_msat = 100_000_000;
+               open_channel_message.common_fields.max_htlc_value_in_flight_msat = 100_000_000;
        }
        nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_message);
 
@@ -174,7 +171,7 @@ fn do_test_counterparty_no_reserve(send_from_initiator: bool) {
        let mut accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
        if send_from_initiator {
                accept_channel_message.channel_reserve_satoshis = 0;
-               accept_channel_message.max_htlc_value_in_flight_msat = 100_000_000;
+               accept_channel_message.common_fields.max_htlc_value_in_flight_msat = 100_000_000;
        }
        nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel_message);
        {
@@ -190,7 +187,7 @@ fn do_test_counterparty_no_reserve(send_from_initiator: bool) {
                                chan_context.holder_selected_channel_reserve_satoshis = 0;
                                chan_context.holder_max_htlc_value_in_flight_msat = 100_000_000;
                        },
-                       ChannelPhase::Funded(_) => assert!(false),
+                       _ => assert!(false),
                }
        }
 
@@ -768,7 +765,7 @@ fn test_update_fee_that_funder_cannot_afford() {
        //check to see if the funder, who sent the update_fee request, can afford the new fee (funder_balance >= fee+channel_reserve)
        //Should produce and error.
        nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commit_signed_msg);
-       nodes[1].logger.assert_log("lightning::ln::channelmanager", "Funding remote cannot afford proposed new fee".to_string(), 1);
+       nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Funding remote cannot afford proposed new fee", 3);
        check_added_monitors!(nodes[1], 1);
        check_closed_broadcast!(nodes[1], true);
        check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: String::from("Funding remote cannot afford proposed new fee") },
@@ -871,8 +868,8 @@ fn test_update_fee_with_fundee_update_add_htlc() {
        send_payment(&nodes[1], &vec!(&nodes[0])[..], 800000);
        send_payment(&nodes[0], &vec!(&nodes[1])[..], 800000);
        close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true);
-       check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
-       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
 }
 
 #[test]
@@ -985,8 +982,8 @@ fn test_update_fee() {
        assert_eq!(get_feerate!(nodes[0], nodes[1], channel_id), feerate + 30);
        assert_eq!(get_feerate!(nodes[1], nodes[0], channel_id), feerate + 30);
        close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true);
-       check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
-       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
 }
 
 #[test]
@@ -1104,17 +1101,17 @@ fn fake_network_test() {
 
        // Close down the channels...
        close_channel(&nodes[0], &nodes[1], &chan_1.2, chan_1.3, true);
-       check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
-       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
        close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, false);
-       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[2].node.get_our_node_id()], 100000);
-       check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[2].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[2], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
        close_channel(&nodes[2], &nodes[3], &chan_3.2, chan_3.3, true);
-       check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure, [nodes[3].node.get_our_node_id()], 100000);
-       check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure, [nodes[2].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[2], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[3].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[3], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[2].node.get_our_node_id()], 100000);
        close_channel(&nodes[1], &nodes[3], &chan_4.2, chan_4.3, false);
-       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[3].node.get_our_node_id()], 100000);
-       check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[3].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[3], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
 }
 
 #[test]
@@ -1401,7 +1398,7 @@ fn test_fee_spike_violation_fails_htlc() {
        let secp_ctx = Secp256k1::new();
        let session_priv = SecretKey::from_slice(&[42; 32]).expect("RNG is bad!");
 
-       let cur_height = nodes[1].node.best_block.read().unwrap().height() + 1;
+       let cur_height = nodes[1].node.best_block.read().unwrap().height + 1;
 
        let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv).unwrap();
        let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route.paths[0],
@@ -1599,7 +1596,7 @@ fn test_chan_reserve_violation_inbound_htlc_outbound_channel() {
        // Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc()
        let secp_ctx = Secp256k1::new();
        let session_priv = SecretKey::from_slice(&[42; 32]).unwrap();
-       let cur_height = nodes[1].node.best_block.read().unwrap().height() + 1;
+       let cur_height = nodes[1].node.best_block.read().unwrap().height + 1;
        let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv).unwrap();
        let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route.paths[0],
                700_000, RecipientOnionFields::secret_only(payment_secret), cur_height, &None).unwrap();
@@ -1617,7 +1614,7 @@ fn test_chan_reserve_violation_inbound_htlc_outbound_channel() {
 
        nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &msg);
        // Check that the payment failed and the channel is closed in response to the malicious UpdateAdd.
-       nodes[0].logger.assert_log("lightning::ln::channelmanager", "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_string(), 1);
+       nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value", 3);
        assert_eq!(nodes[0].node.list_channels().len(), 0);
        let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
        assert_eq!(err_msg.data, "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value");
@@ -1778,7 +1775,7 @@ fn test_chan_reserve_violation_inbound_htlc_inbound_chan() {
        // Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc()
        let secp_ctx = Secp256k1::new();
        let session_priv = SecretKey::from_slice(&[42; 32]).unwrap();
-       let cur_height = nodes[0].node.best_block.read().unwrap().height() + 1;
+       let cur_height = nodes[0].node.best_block.read().unwrap().height + 1;
        let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route_2.paths[0], &session_priv).unwrap();
        let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(
                &route_2.paths[0], recv_value_2, RecipientOnionFields::spontaneous_empty(), cur_height, &None).unwrap();
@@ -1796,7 +1793,7 @@ fn test_chan_reserve_violation_inbound_htlc_inbound_chan() {
 
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg);
        // Check that the payment failed and the channel is closed in response to the malicious UpdateAdd.
-       nodes[1].logger.assert_log("lightning::ln::channelmanager", "Remote HTLC add would put them under remote reserve value".to_string(), 1);
+       nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Remote HTLC add would put them under remote reserve value", 3);
        assert_eq!(nodes[1].node.list_channels().len(), 1);
        let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
        assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value");
@@ -2371,13 +2368,13 @@ fn channel_monitor_network_test() {
                connect_blocks(&nodes[3], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
                let events = nodes[3].node.get_and_clear_pending_msg_events();
                assert_eq!(events.len(), 2);
-               let close_chan_update_1 = match events[0] {
+               let close_chan_update_1 = match events[1] {
                        MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
                                msg.clone()
                        },
                        _ => panic!("Unexpected event"),
                };
-               match events[1] {
+               match events[0] {
                        MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { .. }, node_id } => {
                                assert_eq!(node_id, nodes[4].node.get_our_node_id());
                        },
@@ -2403,13 +2400,13 @@ fn channel_monitor_network_test() {
                connect_blocks(&nodes[4], TEST_FINAL_CLTV - CLTV_CLAIM_BUFFER + 2);
                let events = nodes[4].node.get_and_clear_pending_msg_events();
                assert_eq!(events.len(), 2);
-               let close_chan_update_2 = match events[0] {
+               let close_chan_update_2 = match events[1] {
                        MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
                                msg.clone()
                        },
                        _ => panic!("Unexpected event"),
                };
-               match events[1] {
+               match events[0] {
                        MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { .. }, node_id } => {
                                assert_eq!(node_id, nodes[3].node.get_our_node_id());
                        },
@@ -2417,7 +2414,7 @@ fn channel_monitor_network_test() {
                }
                check_added_monitors!(nodes[4], 1);
                test_txn_broadcast(&nodes[4], &chan_4, None, HTLCType::SUCCESS);
-               check_closed_event!(nodes[4], 1, ClosureReason::HolderForceClosed, [nodes[3].node.get_our_node_id()], 100000);
+               check_closed_event!(nodes[4], 1, ClosureReason::HTLCsTimedOut, [nodes[3].node.get_our_node_id()], 100000);
 
                mine_transaction(&nodes[4], &node_txn[0]);
                check_preimage_claim(&nodes[4], &node_txn);
@@ -2430,7 +2427,7 @@ fn channel_monitor_network_test() {
 
        assert_eq!(nodes[3].chain_monitor.chain_monitor.watch_channel(OutPoint { txid: chan_3.3.txid(), index: 0 }, chan_3_mon),
                Ok(ChannelMonitorUpdateStatus::Completed));
-       check_closed_event!(nodes[3], 1, ClosureReason::HolderForceClosed, [nodes[4].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[3], 1, ClosureReason::HTLCsTimedOut, [nodes[4].node.get_our_node_id()], 100000);
 }
 
 #[test]
@@ -2750,7 +2747,7 @@ fn claim_htlc_outputs_single_tx() {
                check_added_monitors!(nodes[1], 1);
                check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
                let mut events = nodes[0].node.get_and_clear_pending_events();
-               expect_pending_htlcs_forwardable_from_events!(nodes[0], events[0..1], true);
+               expect_pending_htlcs_forwardable_conditions(events[0..2].to_vec(), &[HTLCDestination::FailedPayment { payment_hash: payment_hash_2 }]);
                match events.last().unwrap() {
                        Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
                        _ => panic!("Unexpected event"),
@@ -2889,8 +2886,10 @@ fn test_htlc_on_chain_success() {
        }
        let chan_id = Some(chan_1.2);
        match forwarded_events[1] {
-               Event::PaymentForwarded { fee_earned_msat, prev_channel_id, claim_from_onchain_tx, next_channel_id, outbound_amount_forwarded_msat } => {
-                       assert_eq!(fee_earned_msat, Some(1000));
+               Event::PaymentForwarded { total_fee_earned_msat, prev_channel_id, claim_from_onchain_tx,
+                       next_channel_id, outbound_amount_forwarded_msat, ..
+               } => {
+                       assert_eq!(total_fee_earned_msat, Some(1000));
                        assert_eq!(prev_channel_id, chan_id);
                        assert_eq!(claim_from_onchain_tx, true);
                        assert_eq!(next_channel_id, Some(chan_2.2));
@@ -2899,8 +2898,10 @@ fn test_htlc_on_chain_success() {
                _ => panic!()
        }
        match forwarded_events[2] {
-               Event::PaymentForwarded { fee_earned_msat, prev_channel_id, claim_from_onchain_tx, next_channel_id, outbound_amount_forwarded_msat } => {
-                       assert_eq!(fee_earned_msat, Some(1000));
+               Event::PaymentForwarded { total_fee_earned_msat, prev_channel_id, claim_from_onchain_tx,
+                       next_channel_id, outbound_amount_forwarded_msat, ..
+               } => {
+                       assert_eq!(total_fee_earned_msat, Some(1000));
                        assert_eq!(prev_channel_id, chan_id);
                        assert_eq!(claim_from_onchain_tx, true);
                        assert_eq!(next_channel_id, Some(chan_2.2));
@@ -3308,18 +3309,18 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use
                let events = nodes[1].node.get_and_clear_pending_events();
                assert_eq!(events.len(), 2);
                match events[0] {
-                       Event::PendingHTLCsForwardable { .. } => { },
-                       _ => panic!("Unexpected event"),
-               };
-               match events[1] {
                        Event::HTLCHandlingFailed { .. } => { },
                        _ => panic!("Unexpected event"),
                }
+               match events[1] {
+                       Event::PendingHTLCsForwardable { .. } => { },
+                       _ => panic!("Unexpected event"),
+               };
                // Deliberately don't process the pending fail-back so they all fail back at once after
                // block connection just like the !deliver_bs_raa case
        }
 
-       let mut failed_htlcs = HashSet::new();
+       let mut failed_htlcs = new_hash_set();
        assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
 
        mine_transaction(&nodes[1], &revoked_local_txn[0]);
@@ -3328,22 +3329,18 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use
 
        let events = nodes[1].node.get_and_clear_pending_events();
        assert_eq!(events.len(), if deliver_bs_raa { 3 + nodes.len() - 1 } else { 4 + nodes.len() });
-       match events[0] {
-               Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => { },
-               _ => panic!("Unexepected event"),
-       }
-       match events[1] {
-               Event::PaymentPathFailed { ref payment_hash, .. } => {
-                       assert_eq!(*payment_hash, fourth_payment_hash);
-               },
-               _ => panic!("Unexpected event"),
-       }
-       match events[2] {
-               Event::PaymentFailed { ref payment_hash, .. } => {
-                       assert_eq!(*payment_hash, fourth_payment_hash);
-               },
-               _ => panic!("Unexpected event"),
-       }
+       assert!(events.iter().any(|ev| matches!(
+               ev,
+               Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. }
+       )));
+       assert!(events.iter().any(|ev| matches!(
+               ev,
+               Event::PaymentPathFailed { ref payment_hash, .. } if *payment_hash == fourth_payment_hash
+       )));
+       assert!(events.iter().any(|ev| matches!(
+               ev,
+               Event::PaymentFailed { ref payment_hash, .. } if *payment_hash == fourth_payment_hash
+       )));
 
        nodes[1].node.process_pending_htlc_forwards();
        check_added_monitors!(nodes[1], 1);
@@ -3503,7 +3500,7 @@ fn fail_backward_pending_htlc_upon_channel_failure() {
 
                let secp_ctx = Secp256k1::new();
                let session_priv = SecretKey::from_slice(&[42; 32]).unwrap();
-               let current_height = nodes[1].node.best_block.read().unwrap().height() + 1;
+               let current_height = nodes[1].node.best_block.read().unwrap().height + 1;
                let (onion_payloads, _amount_msat, cltv_expiry) = onion_utils::build_onion_payloads(
                        &route.paths[0], 50_000, RecipientOnionFields::secret_only(payment_secret), current_height, &None).unwrap();
                let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv).unwrap();
@@ -3699,7 +3696,7 @@ fn test_dup_events_on_peer_disconnect() {
 #[test]
 fn test_peer_disconnected_before_funding_broadcasted() {
        // Test that channels are closed with `ClosureReason::DisconnectedPeer` if the peer disconnects
-       // before the funding transaction has been broadcasted.
+       // before the funding transaction has been broadcasted, and doesn't reconnect back within time.
        let chanmon_cfgs = create_chanmon_cfgs(2);
        let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
@@ -3728,12 +3725,19 @@ fn test_peer_disconnected_before_funding_broadcasted() {
                assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 0);
        }
 
-       // Ensure that the channel is closed with `ClosureReason::DisconnectedPeer` when the peers are
-       // disconnected before the funding transaction was broadcasted.
+       // The peers disconnect before the funding is broadcasted.
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
-       check_closed_event!(&nodes[0], 2, ClosureReason::DisconnectedPeer, true
+       // The time for peers to reconnect expires.
+       for _ in 0..UNFUNDED_CHANNEL_AGE_LIMIT_TICKS {
+               nodes[0].node.timer_tick_occurred();
+       }
+
+       // Ensure that the channel is closed with `ClosureReason::HolderForceClosed`
+       // when the peers are disconnected and do not reconnect before the funding
+       // transaction is broadcasted.
+       check_closed_event!(&nodes[0], 2, ClosureReason::HolderForceClosed, true
                , [nodes[1].node.get_our_node_id()], 1000000);
        check_closed_event!(&nodes[1], 1, ClosureReason::DisconnectedPeer, false
                , [nodes[0].node.get_our_node_id()], 1000000);
@@ -4609,7 +4613,7 @@ fn test_static_spendable_outputs_preimage_tx() {
                MessageSendEvent::UpdateHTLCs { .. } => {},
                _ => panic!("Unexpected event"),
        }
-       match events[1] {
+       match events[2] {
                MessageSendEvent::BroadcastChannelUpdate { .. } => {},
                _ => panic!("Unexepected event"),
        }
@@ -4652,7 +4656,7 @@ fn test_static_spendable_outputs_timeout_tx() {
        mine_transaction(&nodes[1], &commitment_tx[0]);
        check_added_monitors!(nodes[1], 1);
        let events = nodes[1].node.get_and_clear_pending_msg_events();
-       match events[0] {
+       match events[1] {
                MessageSendEvent::BroadcastChannelUpdate { .. } => {},
                _ => panic!("Unexpected event"),
        }
@@ -4916,8 +4920,10 @@ fn test_onchain_to_onchain_claim() {
                _ => panic!("Unexpected event"),
        }
        match events[1] {
-               Event::PaymentForwarded { fee_earned_msat, prev_channel_id, claim_from_onchain_tx, next_channel_id, outbound_amount_forwarded_msat } => {
-                       assert_eq!(fee_earned_msat, Some(1000));
+               Event::PaymentForwarded { total_fee_earned_msat, prev_channel_id, claim_from_onchain_tx,
+                       next_channel_id, outbound_amount_forwarded_msat, ..
+               } => {
+                       assert_eq!(total_fee_earned_msat, Some(1000));
                        assert_eq!(prev_channel_id, Some(chan_1.2));
                        assert_eq!(claim_from_onchain_tx, true);
                        assert_eq!(next_channel_id, Some(chan_2.2));
@@ -5066,7 +5072,7 @@ fn test_duplicate_payment_hash_one_failure_one_success() {
                MessageSendEvent::UpdateHTLCs { .. } => {},
                _ => panic!("Unexpected event"),
        }
-       match events[1] {
+       match events[2] {
                MessageSendEvent::BroadcastChannelUpdate { .. } => {},
                _ => panic!("Unexepected event"),
        }
@@ -5144,7 +5150,7 @@ fn test_dynamic_spendable_outputs_local_htlc_success_tx() {
                MessageSendEvent::UpdateHTLCs { .. } => {},
                _ => panic!("Unexpected event"),
        }
-       match events[1] {
+       match events[2] {
                MessageSendEvent::BroadcastChannelUpdate { .. } => {},
                _ => panic!("Unexepected event"),
        }
@@ -5342,7 +5348,7 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno
        connect_blocks(&nodes[2], ANTI_REORG_DELAY - 1);
        check_closed_broadcast!(nodes[2], true);
        if deliver_last_raa {
-               expect_pending_htlcs_forwardable_from_events!(nodes[2], events[0..1], true);
+               expect_pending_htlcs_forwardable_from_events!(nodes[2], events[1..2], true);
 
                let expected_destinations: Vec<HTLCDestination> = repeat(HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(3).collect();
                expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), expected_destinations);
@@ -5400,11 +5406,11 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno
 
        let as_events = nodes[0].node.get_and_clear_pending_events();
        assert_eq!(as_events.len(), if announce_latest { 10 } else { 6 });
-       let mut as_failds = HashSet::new();
+       let mut as_faileds = new_hash_set();
        let mut as_updates = 0;
        for event in as_events.iter() {
                if let &Event::PaymentPathFailed { ref payment_hash, ref payment_failed_permanently, ref failure, .. } = event {
-                       assert!(as_failds.insert(*payment_hash));
+                       assert!(as_faileds.insert(*payment_hash));
                        if *payment_hash != payment_hash_2 {
                                assert_eq!(*payment_failed_permanently, deliver_last_raa);
                        } else {
@@ -5416,21 +5422,21 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno
                } else if let &Event::PaymentFailed { .. } = event {
                } else { panic!("Unexpected event"); }
        }
-       assert!(as_failds.contains(&payment_hash_1));
-       assert!(as_failds.contains(&payment_hash_2));
+       assert!(as_faileds.contains(&payment_hash_1));
+       assert!(as_faileds.contains(&payment_hash_2));
        if announce_latest {
-               assert!(as_failds.contains(&payment_hash_3));
-               assert!(as_failds.contains(&payment_hash_5));
+               assert!(as_faileds.contains(&payment_hash_3));
+               assert!(as_faileds.contains(&payment_hash_5));
        }
-       assert!(as_failds.contains(&payment_hash_6));
+       assert!(as_faileds.contains(&payment_hash_6));
 
        let bs_events = nodes[1].node.get_and_clear_pending_events();
        assert_eq!(bs_events.len(), if announce_latest { 8 } else { 6 });
-       let mut bs_failds = HashSet::new();
+       let mut bs_faileds = new_hash_set();
        let mut bs_updates = 0;
        for event in bs_events.iter() {
                if let &Event::PaymentPathFailed { ref payment_hash, ref payment_failed_permanently, ref failure, .. } = event {
-                       assert!(bs_failds.insert(*payment_hash));
+                       assert!(bs_faileds.insert(*payment_hash));
                        if *payment_hash != payment_hash_1 && *payment_hash != payment_hash_5 {
                                assert_eq!(*payment_failed_permanently, deliver_last_raa);
                        } else {
@@ -5442,12 +5448,12 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno
                } else if let &Event::PaymentFailed { .. } = event {
                } else { panic!("Unexpected event"); }
        }
-       assert!(bs_failds.contains(&payment_hash_1));
-       assert!(bs_failds.contains(&payment_hash_2));
+       assert!(bs_faileds.contains(&payment_hash_1));
+       assert!(bs_faileds.contains(&payment_hash_2));
        if announce_latest {
-               assert!(bs_failds.contains(&payment_hash_4));
+               assert!(bs_faileds.contains(&payment_hash_4));
        }
-       assert!(bs_failds.contains(&payment_hash_5));
+       assert!(bs_faileds.contains(&payment_hash_5));
 
        // For each HTLC which was not failed-back by normal process (ie deliver_last_raa), we should
        // get a NetworkUpdate. A should have gotten 4 HTLCs which were failed-back due to
@@ -5537,8 +5543,9 @@ fn test_key_derivation_params() {
        let chain_monitor = test_utils::TestChainMonitor::new(Some(&chanmon_cfgs[0].chain_source), &chanmon_cfgs[0].tx_broadcaster, &chanmon_cfgs[0].logger, &chanmon_cfgs[0].fee_estimator, &chanmon_cfgs[0].persister, &keys_manager);
        let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &chanmon_cfgs[0].logger));
        let scorer = RwLock::new(test_utils::TestScorer::new());
-       let router = test_utils::TestRouter::new(network_graph.clone(), &scorer);
-       let node = NodeCfg { chain_source: &chanmon_cfgs[0].chain_source, logger: &chanmon_cfgs[0].logger, tx_broadcaster: &chanmon_cfgs[0].tx_broadcaster, fee_estimator: &chanmon_cfgs[0].fee_estimator, router, chain_monitor, keys_manager: &keys_manager, network_graph, node_seed: seed, override_init_features: alloc::rc::Rc::new(core::cell::RefCell::new(None)) };
+       let router = test_utils::TestRouter::new(network_graph.clone(), &chanmon_cfgs[0].logger, &scorer);
+       let message_router = test_utils::TestMessageRouter::new(network_graph.clone(), &keys_manager);
+       let node = NodeCfg { chain_source: &chanmon_cfgs[0].chain_source, logger: &chanmon_cfgs[0].logger, tx_broadcaster: &chanmon_cfgs[0].tx_broadcaster, fee_estimator: &chanmon_cfgs[0].fee_estimator, router, message_router, chain_monitor, keys_manager: &keys_manager, network_graph, node_seed: seed, override_init_features: alloc::rc::Rc::new(core::cell::RefCell::new(None)) };
        let mut node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
        node_cfgs.remove(0);
        node_cfgs.insert(0, node);
@@ -5622,7 +5629,7 @@ fn test_static_output_closing_tx() {
        let closing_tx = close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true).2;
 
        mine_transaction(&nodes[0], &closing_tx);
-       check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
        connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
 
        let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager);
@@ -5630,7 +5637,7 @@ fn test_static_output_closing_tx() {
        check_spends!(spend_txn[0], closing_tx);
 
        mine_transaction(&nodes[1], &closing_tx);
-       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
        connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
 
        let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
@@ -5672,7 +5679,7 @@ fn do_htlc_claim_local_commitment_only(use_dust: bool) {
        test_txn_broadcast(&nodes[1], &chan, None, if use_dust { HTLCType::NONE } else { HTLCType::SUCCESS });
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
-       check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[1], 1, ClosureReason::HTLCsTimedOut, [nodes[0].node.get_our_node_id()], 100000);
 }
 
 fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) {
@@ -5703,7 +5710,7 @@ fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) {
        test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
-       check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[0], 1, ClosureReason::HTLCsTimedOut, [nodes[1].node.get_our_node_id()], 100000);
 }
 
 fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no_close: bool) {
@@ -5749,7 +5756,7 @@ fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no
                test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
                check_closed_broadcast!(nodes[0], true);
                check_added_monitors!(nodes[0], 1);
-               check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
+               check_closed_event!(nodes[0], 1, ClosureReason::HTLCsTimedOut, [nodes[1].node.get_our_node_id()], 100000);
        } else {
                expect_payment_failed!(nodes[0], our_payment_hash, true);
        }
@@ -5831,26 +5838,26 @@ fn bolt2_open_channel_sending_node_checks_part2() {
        let push_msat=10001;
        assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None, None).is_ok()); //Create a valid channel
        let node0_to_1_send_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
-       assert!(node0_to_1_send_open_channel.channel_reserve_satoshis>=node0_to_1_send_open_channel.dust_limit_satoshis);
+       assert!(node0_to_1_send_open_channel.channel_reserve_satoshis>=node0_to_1_send_open_channel.common_fields.dust_limit_satoshis);
 
        // BOLT #2 spec: Sending node must set undefined bits in channel_flags to 0
        // Only the least-significant bit of channel_flags is currently defined resulting in channel_flags only having one of two possible states 0 or 1
-       assert!(node0_to_1_send_open_channel.channel_flags<=1);
+       assert!(node0_to_1_send_open_channel.common_fields.channel_flags<=1);
 
        // BOLT #2 spec: Sending node should set to_self_delay sufficient to ensure the sender can irreversibly spend a commitment transaction output, in case of misbehaviour by the receiver.
        assert!(BREAKDOWN_TIMEOUT>0);
-       assert!(node0_to_1_send_open_channel.to_self_delay==BREAKDOWN_TIMEOUT);
+       assert!(node0_to_1_send_open_channel.common_fields.to_self_delay==BREAKDOWN_TIMEOUT);
 
        // BOLT #2 spec: Sending node must ensure the chain_hash value identifies the chain it wishes to open the channel within.
        let chain_hash = ChainHash::using_genesis_block(Network::Testnet);
-       assert_eq!(node0_to_1_send_open_channel.chain_hash, chain_hash);
+       assert_eq!(node0_to_1_send_open_channel.common_fields.chain_hash, chain_hash);
 
        // BOLT #2 spec: Sending node must set funding_pubkey, revocation_basepoint, htlc_basepoint, payment_basepoint, and delayed_payment_basepoint to valid DER-encoded, compressed, secp256k1 pubkeys.
-       assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.funding_pubkey.serialize()).is_ok());
-       assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.revocation_basepoint.serialize()).is_ok());
-       assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.htlc_basepoint.serialize()).is_ok());
-       assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.payment_point.serialize()).is_ok());
-       assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.delayed_payment_basepoint.serialize()).is_ok());
+       assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.common_fields.funding_pubkey.serialize()).is_ok());
+       assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.common_fields.revocation_basepoint.serialize()).is_ok());
+       assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.common_fields.htlc_basepoint.serialize()).is_ok());
+       assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.common_fields.payment_basepoint.serialize()).is_ok());
+       assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.common_fields.delayed_payment_basepoint.serialize()).is_ok());
 }
 
 #[test]
@@ -5864,7 +5871,7 @@ fn bolt2_open_channel_sane_dust_limit() {
        let push_msat=10001;
        nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None, None).unwrap();
        let mut node0_to_1_send_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
-       node0_to_1_send_open_channel.dust_limit_satoshis = 547;
+       node0_to_1_send_open_channel.common_fields.dust_limit_satoshis = 547;
        node0_to_1_send_open_channel.channel_reserve_satoshis = 100001;
 
        nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &node0_to_1_send_open_channel);
@@ -6172,7 +6179,7 @@ fn test_fail_holding_cell_htlc_upon_free_multihop() {
        // nodes[1]'s ChannelManager will now signal that we have HTLC forwards to process.
        let process_htlc_forwards_event = nodes[1].node.get_and_clear_pending_events();
        assert_eq!(process_htlc_forwards_event.len(), 2);
-       match &process_htlc_forwards_event[0] {
+       match &process_htlc_forwards_event[1] {
                &Event::PendingHTLCsForwardable { .. } => {},
                _ => panic!("Unexpected event"),
        }
@@ -6299,7 +6306,7 @@ fn test_update_add_htlc_bolt2_receiver_zero_value_msat() {
        updates.update_add_htlcs[0].amount_msat = 0;
 
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
-       nodes[1].logger.assert_log("lightning::ln::channelmanager", "Remote side tried to send a 0-msat HTLC".to_string(), 1);
+       nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Remote side tried to send a 0-msat HTLC", 3);
        check_closed_broadcast!(nodes[1], true).unwrap();
        check_added_monitors!(nodes[1], 1);
        check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote side tried to send a 0-msat HTLC".to_string() },
@@ -6478,7 +6485,7 @@ fn test_update_add_htlc_bolt2_receiver_check_max_htlc_limit() {
                get_route_and_payment_hash!(nodes[0], nodes[1], 1000);
        route.paths[0].hops[0].fee_msat = send_amt;
        let session_priv = SecretKey::from_slice(&[42; 32]).unwrap();
-       let cur_height = nodes[0].node.best_block.read().unwrap().height() + 1;
+       let cur_height = nodes[0].node.best_block.read().unwrap().height + 1;
        let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::signing_only(), &route.paths[0], &session_priv).unwrap();
        let (onion_payloads, _htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(
                &route.paths[0], send_amt, RecipientOnionFields::secret_only(our_payment_secret), cur_height, &None).unwrap();
@@ -7204,7 +7211,7 @@ fn test_user_configurable_csv_delay() {
        // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in InboundV1Channel::new()
        nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None, None).unwrap();
        let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
-       open_channel.to_self_delay = 200;
+       open_channel.common_fields.to_self_delay = 200;
        if let Err(error) = InboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
                &nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[0].node.channel_type_features(), &nodes[1].node.init_features(), &open_channel, 0,
                &low_our_to_self_config, 0, &nodes[0].logger, /*is_0conf=*/false)
@@ -7219,7 +7226,7 @@ fn test_user_configurable_csv_delay() {
        nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1000000, 1000000, 42, None, None).unwrap();
        nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()));
        let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
-       accept_channel.to_self_delay = 200;
+       accept_channel.common_fields.to_self_delay = 200;
        nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
        let reason_msg;
        if let MessageSendEvent::HandleError { ref action, .. } = nodes[0].node.get_and_clear_pending_msg_events()[0] {
@@ -7236,7 +7243,7 @@ fn test_user_configurable_csv_delay() {
        // We test msg.to_self_delay <= config.their_to_self_delay is enforced in InboundV1Channel::new()
        nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None, None).unwrap();
        let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
-       open_channel.to_self_delay = 200;
+       open_channel.common_fields.to_self_delay = 200;
        if let Err(error) = InboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
                &nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[0].node.channel_type_features(), &nodes[1].node.init_features(), &open_channel, 0,
                &high_their_to_self_config, 0, &nodes[0].logger, /*is_0conf=*/false)
@@ -7324,6 +7331,9 @@ fn test_announce_disable_channels() {
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
+       // Connect a dummy node for proper future events broadcasting
+       connect_dummy_node(&nodes[0]);
+
        create_announced_chan_between_nodes(&nodes, 0, 1);
        create_announced_chan_between_nodes(&nodes, 1, 0);
        create_announced_chan_between_nodes(&nodes, 0, 1);
@@ -7337,7 +7347,7 @@ fn test_announce_disable_channels() {
        }
        let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
        assert_eq!(msg_events.len(), 3);
-       let mut chans_disabled = HashMap::new();
+       let mut chans_disabled = new_hash_map();
        for e in msg_events {
                match e {
                        MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
@@ -7533,7 +7543,7 @@ fn test_bump_penalty_txn_on_revoked_htlcs() {
        let route_params = RouteParameters::from_payment_params_and_value(payment_params, 3_000_000);
        let route = get_route(&nodes[1].node.get_our_node_id(), &route_params, &nodes[1].network_graph.read_only(), None,
                nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes).unwrap();
-       send_along_route(&nodes[1], route, &[&nodes[0]], 3_000_000);
+       let failed_payment_hash = send_along_route(&nodes[1], route, &[&nodes[0]], 3_000_000).1;
 
        let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan.2);
        assert_eq!(revoked_local_txn[0].input.len(), 1);
@@ -7572,7 +7582,7 @@ fn test_bump_penalty_txn_on_revoked_htlcs() {
        let block_129 = create_dummy_block(block_11.block_hash(), 42, vec![revoked_htlc_txn[0].clone(), revoked_htlc_txn[1].clone()]);
        connect_block(&nodes[0], &block_129);
        let events = nodes[0].node.get_and_clear_pending_events();
-       expect_pending_htlcs_forwardable_from_events!(nodes[0], events[0..1], true);
+       expect_pending_htlcs_forwardable_conditions(events[0..2].to_vec(), &[HTLCDestination::FailedPayment { payment_hash: failed_payment_hash }]);
        match events.last().unwrap() {
                Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
                _ => panic!("Unexpected event"),
@@ -7921,8 +7931,8 @@ fn test_override_channel_config() {
 
        // Assert the channel created by node0 is using the override config.
        let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
-       assert_eq!(res.channel_flags, 0);
-       assert_eq!(res.to_self_delay, 200);
+       assert_eq!(res.common_fields.channel_flags, 0);
+       assert_eq!(res.common_fields.to_self_delay, 200);
 }
 
 #[test]
@@ -7936,11 +7946,11 @@ fn test_override_0msat_htlc_minimum() {
 
        nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 16_000_000, 12_000_000, 42, None, Some(zero_config)).unwrap();
        let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
-       assert_eq!(res.htlc_minimum_msat, 1);
+       assert_eq!(res.common_fields.htlc_minimum_msat, 1);
 
        nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &res);
        let res = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
-       assert_eq!(res.htlc_minimum_msat, 1);
+       assert_eq!(res.common_fields.htlc_minimum_msat, 1);
 }
 
 #[test]
@@ -8216,8 +8226,10 @@ fn test_onion_value_mpp_set_calculation() {
                                RecipientOnionFields::secret_only(our_payment_secret), height + 1, &None).unwrap();
                        // Edit amt_to_forward to simulate the sender having set
                        // the final amount and the routing node taking less fee
-                       if let msgs::OutboundOnionPayload::Receive { ref mut amt_msat, .. } = onion_payloads[1] {
-                               *amt_msat = 99_000;
+                       if let msgs::OutboundOnionPayload::Receive {
+                               ref mut sender_intended_htlc_amt_msat, ..
+                       } = onion_payloads[1] {
+                               *sender_intended_htlc_amt_msat = 99_000;
                        } else { panic!() }
                        let new_onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &our_payment_hash).unwrap();
                        payment_event.msgs[0].onion_routing_packet = new_onion_packet;
@@ -8642,7 +8654,7 @@ fn test_concurrent_monitor_claim() {
        let height = HTLC_TIMEOUT_BROADCAST + 1;
        connect_blocks(&nodes[0], height - nodes[0].best_block_info().1);
        check_closed_broadcast(&nodes[0], 1, true);
-       check_closed_event!(&nodes[0], 1, ClosureReason::HolderForceClosed, false,
+       check_closed_event!(&nodes[0], 1, ClosureReason::HTLCsTimedOut, false,
                [nodes[1].node.get_our_node_id()], 100000);
        watchtower_alice.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, vec![bob_state_y.clone()]), height);
        check_added_monitors(&nodes[0], 1);
@@ -8685,7 +8697,7 @@ fn test_pre_lockin_no_chan_closed_update() {
        check_added_monitors!(nodes[0], 0);
 
        let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
-       let channel_id = crate::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }.to_channel_id();
+       let channel_id = ChannelId::v1_from_funding_outpoint(crate::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index });
        nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id, data: "Hi".to_owned() });
        assert!(nodes[0].chain_monitor.added_monitors.lock().unwrap().is_empty());
        check_closed_event!(nodes[0], 2, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("Hi".to_string()) }, true,
@@ -8950,7 +8962,7 @@ fn test_duplicate_temporary_channel_id_from_different_peers() {
 
        // Modify the `OpenChannel` from `nodes[2]` to `nodes[0]` to ensure that it uses the same
        // `temporary_channel_id` as the `OpenChannel` from nodes[1] to nodes[0].
-       open_chan_msg_chan_2_0.temporary_channel_id = open_chan_msg_chan_1_0.temporary_channel_id;
+       open_chan_msg_chan_2_0.common_fields.temporary_channel_id = open_chan_msg_chan_1_0.common_fields.temporary_channel_id;
 
        // Assert that `nodes[0]` can accept both `OpenChannel` requests, even though they use the same
        // `temporary_channel_id` as they are from different peers.
@@ -8961,7 +8973,7 @@ fn test_duplicate_temporary_channel_id_from_different_peers() {
                match &events[0] {
                        MessageSendEvent::SendAcceptChannel { node_id, msg } => {
                                assert_eq!(node_id, &nodes[1].node.get_our_node_id());
-                               assert_eq!(msg.temporary_channel_id, open_chan_msg_chan_1_0.temporary_channel_id);
+                               assert_eq!(msg.common_fields.temporary_channel_id, open_chan_msg_chan_1_0.common_fields.temporary_channel_id);
                        },
                        _ => panic!("Unexpected event"),
                }
@@ -8974,13 +8986,156 @@ fn test_duplicate_temporary_channel_id_from_different_peers() {
                match &events[0] {
                        MessageSendEvent::SendAcceptChannel { node_id, msg } => {
                                assert_eq!(node_id, &nodes[2].node.get_our_node_id());
-                               assert_eq!(msg.temporary_channel_id, open_chan_msg_chan_1_0.temporary_channel_id);
+                               assert_eq!(msg.common_fields.temporary_channel_id, open_chan_msg_chan_1_0.common_fields.temporary_channel_id);
                        },
                        _ => panic!("Unexpected event"),
                }
        }
 }
 
+#[test]
+fn test_peer_funding_sidechannel() {
+       // Test that if a peer somehow learns which txid we'll use for our channel funding before we
+       // receive `funding_transaction_generated` the peer cannot cause us to crash. We'd previously
+       // assumed that LDK would receive `funding_transaction_generated` prior to our peer learning
+       // the txid and panicked if the peer tried to open a redundant channel to us with the same
+       // funding outpoint.
+       //
+       // While this assumption is generally safe, some users may have out-of-band protocols where
+       // they notify their LSP about a funding outpoint first, or this may be violated in the future
+       // with collaborative transaction construction protocols, i.e. dual-funding.
+       let chanmon_cfgs = create_chanmon_cfgs(3);
+       let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+       let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+       let temp_chan_id_ab = exchange_open_accept_chan(&nodes[0], &nodes[1], 1_000_000, 0);
+       let temp_chan_id_ca = exchange_open_accept_chan(&nodes[2], &nodes[0], 1_000_000, 0);
+
+       let (_, tx, funding_output) =
+               create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42);
+
+       let cs_funding_events = nodes[2].node.get_and_clear_pending_events();
+       assert_eq!(cs_funding_events.len(), 1);
+       match cs_funding_events[0] {
+               Event::FundingGenerationReady { .. } => {}
+               _ => panic!("Unexpected event {:?}", cs_funding_events),
+       }
+
+       nodes[2].node.funding_transaction_generated_unchecked(&temp_chan_id_ca, &nodes[0].node.get_our_node_id(), tx.clone(), funding_output.index).unwrap();
+       let funding_created_msg = get_event_msg!(nodes[2], MessageSendEvent::SendFundingCreated, nodes[0].node.get_our_node_id());
+       nodes[0].node.handle_funding_created(&nodes[2].node.get_our_node_id(), &funding_created_msg);
+       get_event_msg!(nodes[0], MessageSendEvent::SendFundingSigned, nodes[2].node.get_our_node_id());
+       expect_channel_pending_event(&nodes[0], &nodes[2].node.get_our_node_id());
+       check_added_monitors!(nodes[0], 1);
+
+       let res = nodes[0].node.funding_transaction_generated(&temp_chan_id_ab, &nodes[1].node.get_our_node_id(), tx.clone());
+       let err_msg = format!("{:?}", res.unwrap_err());
+       assert!(err_msg.contains("An existing channel using outpoint "));
+       assert!(err_msg.contains(" is open with peer"));
+       // Even though the last funding_transaction_generated errored, it still generated a
+       // SendFundingCreated. However, when the peer responds with a funding_signed it will send the
+       // appropriate error message.
+       let as_funding_created = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
+       nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &as_funding_created);
+       check_added_monitors!(nodes[1], 1);
+       expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
+       let reason = ClosureReason::ProcessingError { err: format!("An existing channel using outpoint {} is open with peer {}", funding_output, nodes[2].node.get_our_node_id()), };
+       check_closed_events(&nodes[0], &[ExpectedCloseEvent::from_id_reason(ChannelId::v1_from_funding_outpoint(funding_output), true, reason)]);
+
+       let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
+       nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed);
+       get_err_msg(&nodes[0], &nodes[1].node.get_our_node_id());
+}
+
+#[test]
+fn test_duplicate_conflicting_funding_from_second_peer() {
+       // Test that if a user tries to fund a channel with a funding outpoint they'd previously used
+       // we don't try to remove the previous ChannelMonitor. This is largely a test to ensure we
+       // don't regress in the fuzzer, as such funding getting passed our outpoint-matches checks
+       // implies the user (and our counterparty) has reused cryptographic keys across channels, which
+       // we require the user not do.
+       let chanmon_cfgs = create_chanmon_cfgs(4);
+       let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
+       let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
+
+       let temp_chan_id = exchange_open_accept_chan(&nodes[0], &nodes[1], 1_000_000, 0);
+
+       let (_, tx, funding_output) =
+               create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42);
+
+       // Now that we have a funding outpoint, create a dummy `ChannelMonitor` and insert it into
+       // nodes[0]'s ChainMonitor so that the initial `ChannelMonitor` write fails.
+       let dummy_chan_id = create_chan_between_nodes(&nodes[2], &nodes[3]).3;
+       let dummy_monitor = get_monitor!(nodes[2], dummy_chan_id).clone();
+       nodes[0].chain_monitor.chain_monitor.watch_channel(funding_output, dummy_monitor).unwrap();
+
+       nodes[0].node.funding_transaction_generated(&temp_chan_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
+
+       let mut funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
+       nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
+       let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
+       check_added_monitors!(nodes[1], 1);
+       expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
+
+       nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed_msg);
+       // At this point, the channel should be closed, after having generated one monitor write (the
+       // watch_channel call which failed), but zero monitor updates.
+       check_added_monitors!(nodes[0], 1);
+       get_err_msg(&nodes[0], &nodes[1].node.get_our_node_id());
+       let err_reason = ClosureReason::ProcessingError { err: "Channel funding outpoint was a duplicate".to_owned() };
+       check_closed_events(&nodes[0], &[ExpectedCloseEvent::from_id_reason(funding_signed_msg.channel_id, true, err_reason)]);
+}
+
+#[test]
+fn test_duplicate_funding_err_in_funding() {
+       // Test that if we have a live channel with one peer, then another peer comes along and tries
+       // to create a second channel with the same txid we'll fail and not overwrite the
+       // outpoint_to_peer map in `ChannelManager`.
+       //
+       // This was previously broken.
+       let chanmon_cfgs = create_chanmon_cfgs(3);
+       let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+       let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+       let (_, _, _, real_channel_id, funding_tx) = create_chan_between_nodes(&nodes[0], &nodes[1]);
+       let real_chan_funding_txo = chain::transaction::OutPoint { txid: funding_tx.txid(), index: 0 };
+       assert_eq!(ChannelId::v1_from_funding_outpoint(real_chan_funding_txo), real_channel_id);
+
+       nodes[2].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
+       let mut open_chan_msg = get_event_msg!(nodes[2], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+       let node_c_temp_chan_id = open_chan_msg.common_fields.temporary_channel_id;
+       open_chan_msg.common_fields.temporary_channel_id = real_channel_id;
+       nodes[1].node.handle_open_channel(&nodes[2].node.get_our_node_id(), &open_chan_msg);
+       let mut accept_chan_msg = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[2].node.get_our_node_id());
+       accept_chan_msg.common_fields.temporary_channel_id = node_c_temp_chan_id;
+       nodes[2].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_chan_msg);
+
+       // Now that we have a second channel with the same funding txo, send a bogus funding message
+       // and let nodes[1] remove the inbound channel.
+       let (_, funding_tx, _) = create_funding_transaction(&nodes[2], &nodes[1].node.get_our_node_id(), 100_000, 42);
+
+       nodes[2].node.funding_transaction_generated(&node_c_temp_chan_id, &nodes[1].node.get_our_node_id(), funding_tx).unwrap();
+
+       let mut funding_created_msg = get_event_msg!(nodes[2], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
+       funding_created_msg.temporary_channel_id = real_channel_id;
+       // Make the signature invalid by changing the funding output
+       funding_created_msg.funding_output_index += 10;
+       nodes[1].node.handle_funding_created(&nodes[2].node.get_our_node_id(), &funding_created_msg);
+       get_err_msg(&nodes[1], &nodes[2].node.get_our_node_id());
+       let err = "Invalid funding_created signature from peer".to_owned();
+       let reason = ClosureReason::ProcessingError { err };
+       let expected_closing = ExpectedCloseEvent::from_id_reason(real_channel_id, false, reason);
+       check_closed_events(&nodes[1], &[expected_closing]);
+
+       assert_eq!(
+               *nodes[1].node.outpoint_to_peer.lock().unwrap().get(&real_chan_funding_txo).unwrap(),
+               nodes[0].node.get_our_node_id()
+       );
+}
+
 #[test]
 fn test_duplicate_chan_id() {
        // Test that if a given peer tries to open a channel with the same channel_id as one that is
@@ -9013,7 +9168,7 @@ fn test_duplicate_chan_id() {
                                // first (valid) and second (invalid) channels are closed, given they both have
                                // the same non-temporary channel_id. However, currently we do not, so we just
                                // move forward with it.
-                               assert_eq!(msg.channel_id, open_chan_msg.temporary_channel_id);
+                               assert_eq!(msg.channel_id, open_chan_msg.common_fields.temporary_channel_id);
                                assert_eq!(node_id, nodes[0].node.get_our_node_id());
                        },
                        _ => panic!("Unexpected event"),
@@ -9039,7 +9194,7 @@ fn test_duplicate_chan_id() {
        let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
 
        let funding_outpoint = crate::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index };
-       let channel_id = funding_outpoint.to_channel_id();
+       let channel_id = ChannelId::v1_from_funding_outpoint(funding_outpoint);
 
        // Now we have the first channel past funding_created (ie it has a txid-based channel_id, not a
        // temporary one).
@@ -9047,7 +9202,7 @@ fn test_duplicate_chan_id() {
        // First try to open a second channel with a temporary channel id equal to the txid-based one.
        // Technically this is allowed by the spec, but we don't support it and there's little reason
        // to. Still, it shouldn't cause any other issues.
-       open_chan_msg.temporary_channel_id = channel_id;
+       open_chan_msg.common_fields.temporary_channel_id = channel_id;
        nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_chan_msg);
        {
                let events = nodes[1].node.get_and_clear_pending_msg_events();
@@ -9056,7 +9211,7 @@ fn test_duplicate_chan_id() {
                        MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => {
                                // Technically, at this point, nodes[1] would be justified in thinking both
                                // channels are closed, but currently we do not, so we just move forward with it.
-                               assert_eq!(msg.channel_id, open_chan_msg.temporary_channel_id);
+                               assert_eq!(msg.channel_id, open_chan_msg.common_fields.temporary_channel_id);
                                assert_eq!(node_id, nodes[0].node.get_our_node_id());
                        },
                        _ => panic!("Unexpected event"),
@@ -9077,20 +9232,26 @@ fn test_duplicate_chan_id() {
                // another channel in the ChannelManager - an invalid state. Thus, we'd panic later when we
                // try to create another channel. Instead, we drop the channel entirely here (leaving the
                // channelmanager in a possibly nonsense state instead).
-               match a_peer_state.channel_by_id.remove(&open_chan_2_msg.temporary_channel_id).unwrap() {
+               match a_peer_state.channel_by_id.remove(&open_chan_2_msg.common_fields.temporary_channel_id).unwrap() {
                        ChannelPhase::UnfundedOutboundV1(mut chan) => {
                                let logger = test_utils::TestLogger::new();
                                chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap()
                        },
                        _ => panic!("Unexpected ChannelPhase variant"),
-               }
+               }.unwrap()
        };
        check_added_monitors!(nodes[0], 0);
-       nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created.unwrap());
+       nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created);
        // At this point we'll look up if the channel_id is present and immediately fail the channel
        // without trying to persist the `ChannelMonitor`.
        check_added_monitors!(nodes[1], 0);
 
+       check_closed_events(&nodes[1], &[
+               ExpectedCloseEvent::from_id_reason(funding_created.temporary_channel_id, false, ClosureReason::ProcessingError {
+                       err: "Already had channel with the new channel_id".to_owned()
+               })
+       ]);
+
        // ...still, nodes[1] will reject the duplicate channel.
        {
                let events = nodes[1].node.get_and_clear_pending_msg_events();
@@ -9738,10 +9899,10 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e
 
        nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 500_000_000, 42, None, None).unwrap();
        let mut open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
-       open_channel.max_htlc_value_in_flight_msat = 50_000_000;
-       open_channel.max_accepted_htlcs = 60;
+       open_channel.common_fields.max_htlc_value_in_flight_msat = 50_000_000;
+       open_channel.common_fields.max_accepted_htlcs = 60;
        if on_holder_tx {
-               open_channel.dust_limit_satoshis = 546;
+               open_channel.common_fields.dust_limit_satoshis = 546;
        }
        nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel);
        let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
@@ -9786,10 +9947,10 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e
                (chan.context().get_dust_buffer_feerate(None) as u64,
                chan.context().get_max_dust_htlc_exposure_msat(&LowerBoundedFeeEstimator(nodes[0].fee_estimator)))
        };
-       let dust_outbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_timeout_tx_weight(&channel_type_features) / 1000 + open_channel.dust_limit_satoshis - 1) * 1000;
+       let dust_outbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_timeout_tx_weight(&channel_type_features) / 1000 + open_channel.common_fields.dust_limit_satoshis - 1) * 1000;
        let dust_outbound_htlc_on_holder_tx: u64 = max_dust_htlc_exposure_msat / dust_outbound_htlc_on_holder_tx_msat;
 
-       let dust_inbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_success_tx_weight(&channel_type_features) / 1000 + open_channel.dust_limit_satoshis - 1) * 1000;
+       let dust_inbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_success_tx_weight(&channel_type_features) / 1000 + open_channel.common_fields.dust_limit_satoshis - 1) * 1000;
        let dust_inbound_htlc_on_holder_tx: u64 = max_dust_htlc_exposure_msat / dust_inbound_htlc_on_holder_tx_msat;
 
        let dust_htlc_on_counterparty_tx: u64 = 4;
@@ -9924,7 +10085,7 @@ fn test_non_final_funding_tx() {
        let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
        nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel_message);
 
-       let best_height = nodes[0].node.best_block.read().unwrap().height();
+       let best_height = nodes[0].node.best_block.read().unwrap().height;
 
        let chan_id = *nodes[0].network_chan_count.borrow();
        let events = nodes[0].node.get_and_clear_pending_events();
@@ -9969,7 +10130,7 @@ fn test_non_final_funding_tx_within_headroom() {
        let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
        nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel_message);
 
-       let best_height = nodes[0].node.best_block.read().unwrap().height();
+       let best_height = nodes[0].node.best_block.read().unwrap().height;
 
        let chan_id = *nodes[0].network_chan_count.borrow();
        let events = nodes[0].node.get_and_clear_pending_events();
@@ -10358,6 +10519,90 @@ fn test_remove_expired_inbound_unfunded_channels() {
        check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false, &[nodes[0].node.get_our_node_id()], 100000);
 }
 
+#[test]
+fn test_channel_close_when_not_timely_accepted() {
+       // Create network of two nodes
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+       let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+       // Simulate peer-disconnects mid-handshake
+       // The channel is initiated from the node 0 side,
+       // but the nodes disconnect before node 1 could send accept channel
+       let create_chan_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap();
+       let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+       assert_eq!(open_channel_msg.common_fields.temporary_channel_id, create_chan_id);
+
+       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
+
+       // Make sure that we have not removed the OutboundV1Channel from node[0] immediately.
+       assert_eq!(nodes[0].node.list_channels().len(), 1);
+
+       // Since channel was inbound from node[1] perspective, it should have been dropped immediately.
+       assert_eq!(nodes[1].node.list_channels().len(), 0);
+
+       // In the meantime, some time passes.
+       for _ in 0..UNFUNDED_CHANNEL_AGE_LIMIT_TICKS {
+               nodes[0].node.timer_tick_occurred();
+       }
+
+       // Since we disconnected from peer and did not connect back within time,
+       // we should have forced-closed the channel by now.
+       check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
+       assert_eq!(nodes[0].node.list_channels().len(), 0);
+
+       {
+               // Since accept channel message was never received
+               // The channel should be forced close by now from node 0 side
+               // and the peer removed from per_peer_state
+               let node_0_per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
+               assert_eq!(node_0_per_peer_state.len(), 0);
+       }
+}
+
+#[test]
+fn test_rebroadcast_open_channel_when_reconnect_mid_handshake() {
+       // Create network of two nodes
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+       let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+       // Simulate peer-disconnects mid-handshake
+       // The channel is initiated from the node 0 side,
+       // but the nodes disconnect before node 1 could send accept channel
+       let create_chan_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap();
+       let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+       assert_eq!(open_channel_msg.common_fields.temporary_channel_id, create_chan_id);
+
+       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
+
+       // Make sure that we have not removed the OutboundV1Channel from node[0] immediately.
+       assert_eq!(nodes[0].node.list_channels().len(), 1);
+
+       // Since channel was inbound from node[1] perspective, it should have been immediately dropped.
+       assert_eq!(nodes[1].node.list_channels().len(), 0);
+
+       // The peers now reconnect
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
+               features: nodes[1].node.init_features(), networks: None, remote_network_address: None
+       }, true).unwrap();
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
+               features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+       }, false).unwrap();
+
+       // Make sure the SendOpenChannel message is added to node_0 pending message events
+       let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
+       assert_eq!(msg_events.len(), 1);
+       match &msg_events[0] {
+               MessageSendEvent::SendOpenChannel { msg, .. } => assert_eq!(msg, &open_channel_msg),
+               _ => panic!("Unexpected message."),
+       }
+}
+
 fn do_test_multi_post_event_actions(do_reload: bool) {
        // Tests handling multiple post-Event actions at once.
        // There is specific code in ChannelManager to handle channels where multiple post-Event
@@ -10487,7 +10732,7 @@ fn test_batch_channel_open() {
 
        // Complete the persistence of the monitor.
        nodes[0].chain_monitor.complete_sole_pending_chan_update(
-               &OutPoint { txid: tx.txid(), index: 1 }.to_channel_id()
+               &ChannelId::v1_from_funding_outpoint(OutPoint { txid: tx.txid(), index: 1 })
        );
        let events = nodes[0].node.get_and_clear_pending_events();
 
@@ -10514,7 +10759,9 @@ fn test_batch_channel_open() {
 }
 
 #[test]
-fn test_disconnect_in_funding_batch() {
+fn test_close_in_funding_batch() {
+       // This test ensures that if one of the channels
+       // in the batch closes, the complete batch will close.
        let chanmon_cfgs = create_chanmon_cfgs(3);
        let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
@@ -10538,38 +10785,56 @@ fn test_disconnect_in_funding_batch() {
        // The transaction should not have been broadcast before all channels are ready.
        assert_eq!(nodes[0].tx_broadcaster.txn_broadcast().len(), 0);
 
-       // The remaining peer in the batch disconnects.
-       nodes[0].node.peer_disconnected(&nodes[2].node.get_our_node_id());
+       // Force-close the channel for which we've completed the initial monitor.
+       let funding_txo_1 = OutPoint { txid: tx.txid(), index: 0 };
+       let funding_txo_2 = OutPoint { txid: tx.txid(), index: 1 };
+       let channel_id_1 = ChannelId::v1_from_funding_outpoint(funding_txo_1);
+       let channel_id_2 = ChannelId::v1_from_funding_outpoint(funding_txo_2);
+
+       nodes[0].node.force_close_broadcasting_latest_txn(&channel_id_1, &nodes[1].node.get_our_node_id()).unwrap();
+
+       // The monitor should become closed.
+       check_added_monitors(&nodes[0], 1);
+       {
+               let mut monitor_updates = nodes[0].chain_monitor.monitor_updates.lock().unwrap();
+               let monitor_updates_1 = monitor_updates.get(&channel_id_1).unwrap();
+               assert_eq!(monitor_updates_1.len(), 1);
+               assert_eq!(monitor_updates_1[0].update_id, CLOSED_CHANNEL_UPDATE_ID);
+       }
+
+       let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
+       match msg_events[0] {
+               MessageSendEvent::HandleError { .. } => (),
+               _ => panic!("Unexpected message."),
+       }
+
+       // We broadcast the commitment transaction as part of the force-close.
+       {
+               let broadcasted_txs = nodes[0].tx_broadcaster.txn_broadcast();
+               assert_eq!(broadcasted_txs.len(), 1);
+               assert!(broadcasted_txs[0].txid() != tx.txid());
+               assert_eq!(broadcasted_txs[0].input.len(), 1);
+               assert_eq!(broadcasted_txs[0].input[0].previous_output.txid, tx.txid());
+       }
 
-       // The channels in the batch will close immediately.
-       let channel_id_1 = OutPoint { txid: tx.txid(), index: 0 }.to_channel_id();
-       let channel_id_2 = OutPoint { txid: tx.txid(), index: 1 }.to_channel_id();
+       // All channels in the batch should close immediately.
        check_closed_events(&nodes[0], &[
                ExpectedCloseEvent {
                        channel_id: Some(channel_id_1),
                        discard_funding: true,
+                       channel_funding_txo: Some(funding_txo_1),
+                       user_channel_id: Some(42),
                        ..Default::default()
                },
                ExpectedCloseEvent {
                        channel_id: Some(channel_id_2),
                        discard_funding: true,
+                       channel_funding_txo: Some(funding_txo_2),
+                       user_channel_id: Some(43),
                        ..Default::default()
                },
        ]);
 
-       // The monitor should become closed.
-       check_added_monitors(&nodes[0], 1);
-       {
-               let mut monitor_updates = nodes[0].chain_monitor.monitor_updates.lock().unwrap();
-               let monitor_updates_1 = monitor_updates.get(&channel_id_1).unwrap();
-               assert_eq!(monitor_updates_1.len(), 1);
-               assert_eq!(monitor_updates_1[0].update_id, CLOSED_CHANNEL_UPDATE_ID);
-       }
-
-       // The funding transaction should not have been broadcast, and therefore, we don't need
-       // to broadcast a force-close transaction for the closed monitor.
-       assert_eq!(nodes[0].tx_broadcaster.txn_broadcast().len(), 0);
-
        // Ensure the channels don't exist anymore.
        assert!(nodes[0].node.list_channels().is_empty());
 }
@@ -10610,8 +10875,10 @@ fn test_batch_funding_close_after_funding_signed() {
        assert_eq!(nodes[0].tx_broadcaster.txn_broadcast().len(), 0);
 
        // Force-close the channel for which we've completed the initial monitor.
-       let channel_id_1 = OutPoint { txid: tx.txid(), index: 0 }.to_channel_id();
-       let channel_id_2 = OutPoint { txid: tx.txid(), index: 1 }.to_channel_id();
+       let funding_txo_1 = OutPoint { txid: tx.txid(), index: 0 };
+       let funding_txo_2 = OutPoint { txid: tx.txid(), index: 1 };
+       let channel_id_1 = ChannelId::v1_from_funding_outpoint(funding_txo_1);
+       let channel_id_2 = ChannelId::v1_from_funding_outpoint(funding_txo_2);
        nodes[0].node.force_close_broadcasting_latest_txn(&channel_id_1, &nodes[1].node.get_our_node_id()).unwrap();
        check_added_monitors(&nodes[0], 2);
        {
@@ -10643,11 +10910,15 @@ fn test_batch_funding_close_after_funding_signed() {
                ExpectedCloseEvent {
                        channel_id: Some(channel_id_1),
                        discard_funding: true,
+                       channel_funding_txo: Some(funding_txo_1),
+                       user_channel_id: Some(42),
                        ..Default::default()
                },
                ExpectedCloseEvent {
                        channel_id: Some(channel_id_2),
                        discard_funding: true,
+                       channel_funding_txo: Some(funding_txo_2),
+                       user_channel_id: Some(43),
                        ..Default::default()
                },
        ]);
@@ -10667,7 +10938,7 @@ fn do_test_funding_and_commitment_tx_confirm_same_block(confirm_remote_commitmen
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
        let funding_tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 0);
-       let chan_id = chain::transaction::OutPoint { txid: funding_tx.txid(), index: 0 }.to_channel_id();
+       let chan_id = ChannelId::v1_from_funding_outpoint(chain::transaction::OutPoint { txid: funding_tx.txid(), index: 0 });
 
        assert_eq!(nodes[0].node.list_channels().len(), 1);
        assert_eq!(nodes[1].node.list_channels().len(), 1);
@@ -10712,3 +10983,36 @@ fn test_funding_and_commitment_tx_confirm_same_block() {
        do_test_funding_and_commitment_tx_confirm_same_block(false);
        do_test_funding_and_commitment_tx_confirm_same_block(true);
 }
+
+#[test]
+fn test_accept_inbound_channel_errors_queued() {
+       // For manually accepted inbound channels, tests that a close error is correctly handled
+       // and the channel fails for the initiator.
+       let mut config0 = test_default_channel_config();
+       let mut config1 = config0.clone();
+       config1.channel_handshake_limits.their_to_self_delay = 1000;
+       config1.manually_accept_inbound_channels = true;
+       config0.channel_handshake_config.our_to_self_delay = 2000;
+
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config0), Some(config1)]);
+       let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+       nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
+       let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+
+       nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
+       let events = nodes[1].node.get_and_clear_pending_events();
+       match events[0] {
+               Event::OpenChannelRequest { temporary_channel_id, .. } => {
+                       match nodes[1].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 23) {
+                               Err(APIError::ChannelUnavailable { err: _ }) => (),
+                               _ => panic!(),
+                       }
+               }
+               _ => panic!("Unexpected event"),
+       }
+       assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id,
+               open_channel_msg.common_fields.temporary_channel_id);
+}