Fix `cmp::max` execution in `ChannelContext::get_dust_buffer_feerate`
[rust-lightning] / lightning / src / ln / functional_tests.rs
index 60613a86da6f3bb1b789da3d08e7891535d268d7..8dd3f1fc9124f9ed72b105b14dff7b0ee9e858af 100644 (file)
@@ -49,12 +49,9 @@ use bitcoin::OutPoint as BitcoinOutPoint;
 use bitcoin::secp256k1::Secp256k1;
 use bitcoin::secp256k1::{PublicKey,SecretKey};
 
-use regex;
-
 use crate::io;
 use crate::prelude::*;
 use alloc::collections::BTreeSet;
-use core::default::Default;
 use core::iter::repeat;
 use bitcoin::hashes::Hash;
 use crate::sync::{Arc, Mutex, RwLock};
@@ -107,22 +104,22 @@ fn test_insane_channel_opens() {
        use crate::ln::channelmanager::MAX_LOCAL_BREAKDOWN_TIMEOUT;
 
        // Test all mutations that would make the channel open message insane
-       insane_open_helper(format!("Per our config, funding must be at most {}. It was {}", TOTAL_BITCOIN_SUPPLY_SATOSHIS + 1, TOTAL_BITCOIN_SUPPLY_SATOSHIS + 2).as_str(), |mut msg| { msg.funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS + 2; msg });
-       insane_open_helper(format!("Funding must be smaller than the total bitcoin supply. It was {}", TOTAL_BITCOIN_SUPPLY_SATOSHIS).as_str(), |mut msg| { msg.funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS; msg });
+       insane_open_helper(format!("Per our config, funding must be at most {}. It was {}", TOTAL_BITCOIN_SUPPLY_SATOSHIS + 1, TOTAL_BITCOIN_SUPPLY_SATOSHIS + 2).as_str(), |mut msg| { msg.common_fields.funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS + 2; msg });
+       insane_open_helper(format!("Funding must be smaller than the total bitcoin supply. It was {}", TOTAL_BITCOIN_SUPPLY_SATOSHIS).as_str(), |mut msg| { msg.common_fields.funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS; msg });
 
-       insane_open_helper("Bogus channel_reserve_satoshis", |mut msg| { msg.channel_reserve_satoshis = msg.funding_satoshis + 1; msg });
+       insane_open_helper("Bogus channel_reserve_satoshis", |mut msg| { msg.channel_reserve_satoshis = msg.common_fields.funding_satoshis + 1; msg });
 
-       insane_open_helper(r"push_msat \d+ was larger than channel amount minus reserve \(\d+\)", |mut msg| { msg.push_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000 + 1; msg });
+       insane_open_helper(r"push_msat \d+ was larger than channel amount minus reserve \(\d+\)", |mut msg| { msg.push_msat = (msg.common_fields.funding_satoshis - msg.channel_reserve_satoshis) * 1000 + 1; msg });
 
-       insane_open_helper("Peer never wants payout outputs?", |mut msg| { msg.dust_limit_satoshis = msg.funding_satoshis + 1 ; msg });
+       insane_open_helper("Peer never wants payout outputs?", |mut msg| { msg.common_fields.dust_limit_satoshis = msg.common_fields.funding_satoshis + 1 ; msg });
 
-       insane_open_helper(r"Minimum htlc value \(\d+\) was larger than full channel value \(\d+\)", |mut msg| { msg.htlc_minimum_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000; msg });
+       insane_open_helper(r"Minimum htlc value \(\d+\) was larger than full channel value \(\d+\)", |mut msg| { msg.common_fields.htlc_minimum_msat = (msg.common_fields.funding_satoshis - msg.channel_reserve_satoshis) * 1000; msg });
 
-       insane_open_helper("They wanted our payments to be delayed by a needlessly long period", |mut msg| { msg.to_self_delay = MAX_LOCAL_BREAKDOWN_TIMEOUT + 1; msg });
+       insane_open_helper("They wanted our payments to be delayed by a needlessly long period", |mut msg| { msg.common_fields.to_self_delay = MAX_LOCAL_BREAKDOWN_TIMEOUT + 1; msg });
 
-       insane_open_helper("0 max_accepted_htlcs makes for a useless channel", |mut msg| { msg.max_accepted_htlcs = 0; msg });
+       insane_open_helper("0 max_accepted_htlcs makes for a useless channel", |mut msg| { msg.common_fields.max_accepted_htlcs = 0; msg });
 
-       insane_open_helper("max_accepted_htlcs was 484. It must not be larger than 483", |mut msg| { msg.max_accepted_htlcs = 484; msg });
+       insane_open_helper("max_accepted_htlcs was 484. It must not be larger than 483", |mut msg| { msg.common_fields.max_accepted_htlcs = 484; msg });
 }
 
 #[test]
@@ -166,7 +163,7 @@ fn do_test_counterparty_no_reserve(send_from_initiator: bool) {
        let mut open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
        if !send_from_initiator {
                open_channel_message.channel_reserve_satoshis = 0;
-               open_channel_message.max_htlc_value_in_flight_msat = 100_000_000;
+               open_channel_message.common_fields.max_htlc_value_in_flight_msat = 100_000_000;
        }
        nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_message);
 
@@ -174,7 +171,7 @@ fn do_test_counterparty_no_reserve(send_from_initiator: bool) {
        let mut accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
        if send_from_initiator {
                accept_channel_message.channel_reserve_satoshis = 0;
-               accept_channel_message.max_htlc_value_in_flight_msat = 100_000_000;
+               accept_channel_message.common_fields.max_htlc_value_in_flight_msat = 100_000_000;
        }
        nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel_message);
        {
@@ -190,7 +187,7 @@ fn do_test_counterparty_no_reserve(send_from_initiator: bool) {
                                chan_context.holder_selected_channel_reserve_satoshis = 0;
                                chan_context.holder_max_htlc_value_in_flight_msat = 100_000_000;
                        },
-                       ChannelPhase::Funded(_) => assert!(false),
+                       _ => assert!(false),
                }
        }
 
@@ -1401,7 +1398,7 @@ fn test_fee_spike_violation_fails_htlc() {
        let secp_ctx = Secp256k1::new();
        let session_priv = SecretKey::from_slice(&[42; 32]).expect("RNG is bad!");
 
-       let cur_height = nodes[1].node.best_block.read().unwrap().height() + 1;
+       let cur_height = nodes[1].node.best_block.read().unwrap().height + 1;
 
        let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv).unwrap();
        let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route.paths[0],
@@ -1599,7 +1596,7 @@ fn test_chan_reserve_violation_inbound_htlc_outbound_channel() {
        // Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc()
        let secp_ctx = Secp256k1::new();
        let session_priv = SecretKey::from_slice(&[42; 32]).unwrap();
-       let cur_height = nodes[1].node.best_block.read().unwrap().height() + 1;
+       let cur_height = nodes[1].node.best_block.read().unwrap().height + 1;
        let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv).unwrap();
        let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route.paths[0],
                700_000, RecipientOnionFields::secret_only(payment_secret), cur_height, &None).unwrap();
@@ -1778,7 +1775,7 @@ fn test_chan_reserve_violation_inbound_htlc_inbound_chan() {
        // Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc()
        let secp_ctx = Secp256k1::new();
        let session_priv = SecretKey::from_slice(&[42; 32]).unwrap();
-       let cur_height = nodes[0].node.best_block.read().unwrap().height() + 1;
+       let cur_height = nodes[0].node.best_block.read().unwrap().height + 1;
        let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route_2.paths[0], &session_priv).unwrap();
        let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(
                &route_2.paths[0], recv_value_2, RecipientOnionFields::spontaneous_empty(), cur_height, &None).unwrap();
@@ -2371,13 +2368,13 @@ fn channel_monitor_network_test() {
                connect_blocks(&nodes[3], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
                let events = nodes[3].node.get_and_clear_pending_msg_events();
                assert_eq!(events.len(), 2);
-               let close_chan_update_1 = match events[0] {
+               let close_chan_update_1 = match events[1] {
                        MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
                                msg.clone()
                        },
                        _ => panic!("Unexpected event"),
                };
-               match events[1] {
+               match events[0] {
                        MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { .. }, node_id } => {
                                assert_eq!(node_id, nodes[4].node.get_our_node_id());
                        },
@@ -2403,13 +2400,13 @@ fn channel_monitor_network_test() {
                connect_blocks(&nodes[4], TEST_FINAL_CLTV - CLTV_CLAIM_BUFFER + 2);
                let events = nodes[4].node.get_and_clear_pending_msg_events();
                assert_eq!(events.len(), 2);
-               let close_chan_update_2 = match events[0] {
+               let close_chan_update_2 = match events[1] {
                        MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
                                msg.clone()
                        },
                        _ => panic!("Unexpected event"),
                };
-               match events[1] {
+               match events[0] {
                        MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { .. }, node_id } => {
                                assert_eq!(node_id, nodes[3].node.get_our_node_id());
                        },
@@ -2417,7 +2414,7 @@ fn channel_monitor_network_test() {
                }
                check_added_monitors!(nodes[4], 1);
                test_txn_broadcast(&nodes[4], &chan_4, None, HTLCType::SUCCESS);
-               check_closed_event!(nodes[4], 1, ClosureReason::HolderForceClosed, [nodes[3].node.get_our_node_id()], 100000);
+               check_closed_event!(nodes[4], 1, ClosureReason::HTLCsTimedOut, [nodes[3].node.get_our_node_id()], 100000);
 
                mine_transaction(&nodes[4], &node_txn[0]);
                check_preimage_claim(&nodes[4], &node_txn);
@@ -2430,7 +2427,7 @@ fn channel_monitor_network_test() {
 
        assert_eq!(nodes[3].chain_monitor.chain_monitor.watch_channel(OutPoint { txid: chan_3.3.txid(), index: 0 }, chan_3_mon),
                Ok(ChannelMonitorUpdateStatus::Completed));
-       check_closed_event!(nodes[3], 1, ClosureReason::HolderForceClosed, [nodes[4].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[3], 1, ClosureReason::HTLCsTimedOut, [nodes[4].node.get_our_node_id()], 100000);
 }
 
 #[test]
@@ -2750,7 +2747,7 @@ fn claim_htlc_outputs_single_tx() {
                check_added_monitors!(nodes[1], 1);
                check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
                let mut events = nodes[0].node.get_and_clear_pending_events();
-               expect_pending_htlcs_forwardable_from_events!(nodes[0], events[0..1], true);
+               expect_pending_htlcs_forwardable_conditions(events[0..2].to_vec(), &[HTLCDestination::FailedPayment { payment_hash: payment_hash_2 }]);
                match events.last().unwrap() {
                        Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
                        _ => panic!("Unexpected event"),
@@ -3312,13 +3309,13 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use
                let events = nodes[1].node.get_and_clear_pending_events();
                assert_eq!(events.len(), 2);
                match events[0] {
-                       Event::PendingHTLCsForwardable { .. } => { },
-                       _ => panic!("Unexpected event"),
-               };
-               match events[1] {
                        Event::HTLCHandlingFailed { .. } => { },
                        _ => panic!("Unexpected event"),
                }
+               match events[1] {
+                       Event::PendingHTLCsForwardable { .. } => { },
+                       _ => panic!("Unexpected event"),
+               };
                // Deliberately don't process the pending fail-back so they all fail back at once after
                // block connection just like the !deliver_bs_raa case
        }
@@ -3503,7 +3500,7 @@ fn fail_backward_pending_htlc_upon_channel_failure() {
 
                let secp_ctx = Secp256k1::new();
                let session_priv = SecretKey::from_slice(&[42; 32]).unwrap();
-               let current_height = nodes[1].node.best_block.read().unwrap().height() + 1;
+               let current_height = nodes[1].node.best_block.read().unwrap().height + 1;
                let (onion_payloads, _amount_msat, cltv_expiry) = onion_utils::build_onion_payloads(
                        &route.paths[0], 50_000, RecipientOnionFields::secret_only(payment_secret), current_height, &None).unwrap();
                let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv).unwrap();
@@ -4616,7 +4613,7 @@ fn test_static_spendable_outputs_preimage_tx() {
                MessageSendEvent::UpdateHTLCs { .. } => {},
                _ => panic!("Unexpected event"),
        }
-       match events[1] {
+       match events[2] {
                MessageSendEvent::BroadcastChannelUpdate { .. } => {},
                _ => panic!("Unexepected event"),
        }
@@ -4659,7 +4656,7 @@ fn test_static_spendable_outputs_timeout_tx() {
        mine_transaction(&nodes[1], &commitment_tx[0]);
        check_added_monitors!(nodes[1], 1);
        let events = nodes[1].node.get_and_clear_pending_msg_events();
-       match events[0] {
+       match events[1] {
                MessageSendEvent::BroadcastChannelUpdate { .. } => {},
                _ => panic!("Unexpected event"),
        }
@@ -5075,7 +5072,7 @@ fn test_duplicate_payment_hash_one_failure_one_success() {
                MessageSendEvent::UpdateHTLCs { .. } => {},
                _ => panic!("Unexpected event"),
        }
-       match events[1] {
+       match events[2] {
                MessageSendEvent::BroadcastChannelUpdate { .. } => {},
                _ => panic!("Unexepected event"),
        }
@@ -5153,7 +5150,7 @@ fn test_dynamic_spendable_outputs_local_htlc_success_tx() {
                MessageSendEvent::UpdateHTLCs { .. } => {},
                _ => panic!("Unexpected event"),
        }
-       match events[1] {
+       match events[2] {
                MessageSendEvent::BroadcastChannelUpdate { .. } => {},
                _ => panic!("Unexepected event"),
        }
@@ -5351,7 +5348,7 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno
        connect_blocks(&nodes[2], ANTI_REORG_DELAY - 1);
        check_closed_broadcast!(nodes[2], true);
        if deliver_last_raa {
-               expect_pending_htlcs_forwardable_from_events!(nodes[2], events[0..1], true);
+               expect_pending_htlcs_forwardable_from_events!(nodes[2], events[1..2], true);
 
                let expected_destinations: Vec<HTLCDestination> = repeat(HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(3).collect();
                expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), expected_destinations);
@@ -5682,7 +5679,7 @@ fn do_htlc_claim_local_commitment_only(use_dust: bool) {
        test_txn_broadcast(&nodes[1], &chan, None, if use_dust { HTLCType::NONE } else { HTLCType::SUCCESS });
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
-       check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[1], 1, ClosureReason::HTLCsTimedOut, [nodes[0].node.get_our_node_id()], 100000);
 }
 
 fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) {
@@ -5713,7 +5710,7 @@ fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) {
        test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
-       check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[0], 1, ClosureReason::HTLCsTimedOut, [nodes[1].node.get_our_node_id()], 100000);
 }
 
 fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no_close: bool) {
@@ -5759,7 +5756,7 @@ fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no
                test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
                check_closed_broadcast!(nodes[0], true);
                check_added_monitors!(nodes[0], 1);
-               check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
+               check_closed_event!(nodes[0], 1, ClosureReason::HTLCsTimedOut, [nodes[1].node.get_our_node_id()], 100000);
        } else {
                expect_payment_failed!(nodes[0], our_payment_hash, true);
        }
@@ -5841,26 +5838,26 @@ fn bolt2_open_channel_sending_node_checks_part2() {
        let push_msat=10001;
        assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None, None).is_ok()); //Create a valid channel
        let node0_to_1_send_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
-       assert!(node0_to_1_send_open_channel.channel_reserve_satoshis>=node0_to_1_send_open_channel.dust_limit_satoshis);
+       assert!(node0_to_1_send_open_channel.channel_reserve_satoshis>=node0_to_1_send_open_channel.common_fields.dust_limit_satoshis);
 
        // BOLT #2 spec: Sending node must set undefined bits in channel_flags to 0
        // Only the least-significant bit of channel_flags is currently defined resulting in channel_flags only having one of two possible states 0 or 1
-       assert!(node0_to_1_send_open_channel.channel_flags<=1);
+       assert!(node0_to_1_send_open_channel.common_fields.channel_flags<=1);
 
        // BOLT #2 spec: Sending node should set to_self_delay sufficient to ensure the sender can irreversibly spend a commitment transaction output, in case of misbehaviour by the receiver.
        assert!(BREAKDOWN_TIMEOUT>0);
-       assert!(node0_to_1_send_open_channel.to_self_delay==BREAKDOWN_TIMEOUT);
+       assert!(node0_to_1_send_open_channel.common_fields.to_self_delay==BREAKDOWN_TIMEOUT);
 
        // BOLT #2 spec: Sending node must ensure the chain_hash value identifies the chain it wishes to open the channel within.
        let chain_hash = ChainHash::using_genesis_block(Network::Testnet);
-       assert_eq!(node0_to_1_send_open_channel.chain_hash, chain_hash);
+       assert_eq!(node0_to_1_send_open_channel.common_fields.chain_hash, chain_hash);
 
        // BOLT #2 spec: Sending node must set funding_pubkey, revocation_basepoint, htlc_basepoint, payment_basepoint, and delayed_payment_basepoint to valid DER-encoded, compressed, secp256k1 pubkeys.
-       assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.funding_pubkey.serialize()).is_ok());
-       assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.revocation_basepoint.serialize()).is_ok());
-       assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.htlc_basepoint.serialize()).is_ok());
-       assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.payment_point.serialize()).is_ok());
-       assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.delayed_payment_basepoint.serialize()).is_ok());
+       assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.common_fields.funding_pubkey.serialize()).is_ok());
+       assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.common_fields.revocation_basepoint.serialize()).is_ok());
+       assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.common_fields.htlc_basepoint.serialize()).is_ok());
+       assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.common_fields.payment_basepoint.serialize()).is_ok());
+       assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.common_fields.delayed_payment_basepoint.serialize()).is_ok());
 }
 
 #[test]
@@ -5874,7 +5871,7 @@ fn bolt2_open_channel_sane_dust_limit() {
        let push_msat=10001;
        nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None, None).unwrap();
        let mut node0_to_1_send_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
-       node0_to_1_send_open_channel.dust_limit_satoshis = 547;
+       node0_to_1_send_open_channel.common_fields.dust_limit_satoshis = 547;
        node0_to_1_send_open_channel.channel_reserve_satoshis = 100001;
 
        nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &node0_to_1_send_open_channel);
@@ -6182,7 +6179,7 @@ fn test_fail_holding_cell_htlc_upon_free_multihop() {
        // nodes[1]'s ChannelManager will now signal that we have HTLC forwards to process.
        let process_htlc_forwards_event = nodes[1].node.get_and_clear_pending_events();
        assert_eq!(process_htlc_forwards_event.len(), 2);
-       match &process_htlc_forwards_event[0] {
+       match &process_htlc_forwards_event[1] {
                &Event::PendingHTLCsForwardable { .. } => {},
                _ => panic!("Unexpected event"),
        }
@@ -6488,7 +6485,7 @@ fn test_update_add_htlc_bolt2_receiver_check_max_htlc_limit() {
                get_route_and_payment_hash!(nodes[0], nodes[1], 1000);
        route.paths[0].hops[0].fee_msat = send_amt;
        let session_priv = SecretKey::from_slice(&[42; 32]).unwrap();
-       let cur_height = nodes[0].node.best_block.read().unwrap().height() + 1;
+       let cur_height = nodes[0].node.best_block.read().unwrap().height + 1;
        let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::signing_only(), &route.paths[0], &session_priv).unwrap();
        let (onion_payloads, _htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(
                &route.paths[0], send_amt, RecipientOnionFields::secret_only(our_payment_secret), cur_height, &None).unwrap();
@@ -7214,7 +7211,7 @@ fn test_user_configurable_csv_delay() {
        // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in InboundV1Channel::new()
        nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None, None).unwrap();
        let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
-       open_channel.to_self_delay = 200;
+       open_channel.common_fields.to_self_delay = 200;
        if let Err(error) = InboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
                &nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[0].node.channel_type_features(), &nodes[1].node.init_features(), &open_channel, 0,
                &low_our_to_self_config, 0, &nodes[0].logger, /*is_0conf=*/false)
@@ -7229,7 +7226,7 @@ fn test_user_configurable_csv_delay() {
        nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1000000, 1000000, 42, None, None).unwrap();
        nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()));
        let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
-       accept_channel.to_self_delay = 200;
+       accept_channel.common_fields.to_self_delay = 200;
        nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
        let reason_msg;
        if let MessageSendEvent::HandleError { ref action, .. } = nodes[0].node.get_and_clear_pending_msg_events()[0] {
@@ -7246,7 +7243,7 @@ fn test_user_configurable_csv_delay() {
        // We test msg.to_self_delay <= config.their_to_self_delay is enforced in InboundV1Channel::new()
        nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None, None).unwrap();
        let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
-       open_channel.to_self_delay = 200;
+       open_channel.common_fields.to_self_delay = 200;
        if let Err(error) = InboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
                &nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[0].node.channel_type_features(), &nodes[1].node.init_features(), &open_channel, 0,
                &high_their_to_self_config, 0, &nodes[0].logger, /*is_0conf=*/false)
@@ -7334,6 +7331,9 @@ fn test_announce_disable_channels() {
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
+       // Connect a dummy node for proper future events broadcasting
+       connect_dummy_node(&nodes[0]);
+
        create_announced_chan_between_nodes(&nodes, 0, 1);
        create_announced_chan_between_nodes(&nodes, 1, 0);
        create_announced_chan_between_nodes(&nodes, 0, 1);
@@ -7543,7 +7543,7 @@ fn test_bump_penalty_txn_on_revoked_htlcs() {
        let route_params = RouteParameters::from_payment_params_and_value(payment_params, 3_000_000);
        let route = get_route(&nodes[1].node.get_our_node_id(), &route_params, &nodes[1].network_graph.read_only(), None,
                nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes).unwrap();
-       send_along_route(&nodes[1], route, &[&nodes[0]], 3_000_000);
+       let failed_payment_hash = send_along_route(&nodes[1], route, &[&nodes[0]], 3_000_000).1;
 
        let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan.2);
        assert_eq!(revoked_local_txn[0].input.len(), 1);
@@ -7582,7 +7582,7 @@ fn test_bump_penalty_txn_on_revoked_htlcs() {
        let block_129 = create_dummy_block(block_11.block_hash(), 42, vec![revoked_htlc_txn[0].clone(), revoked_htlc_txn[1].clone()]);
        connect_block(&nodes[0], &block_129);
        let events = nodes[0].node.get_and_clear_pending_events();
-       expect_pending_htlcs_forwardable_from_events!(nodes[0], events[0..1], true);
+       expect_pending_htlcs_forwardable_conditions(events[0..2].to_vec(), &[HTLCDestination::FailedPayment { payment_hash: failed_payment_hash }]);
        match events.last().unwrap() {
                Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
                _ => panic!("Unexpected event"),
@@ -7931,8 +7931,8 @@ fn test_override_channel_config() {
 
        // Assert the channel created by node0 is using the override config.
        let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
-       assert_eq!(res.channel_flags, 0);
-       assert_eq!(res.to_self_delay, 200);
+       assert_eq!(res.common_fields.channel_flags, 0);
+       assert_eq!(res.common_fields.to_self_delay, 200);
 }
 
 #[test]
@@ -7946,11 +7946,11 @@ fn test_override_0msat_htlc_minimum() {
 
        nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 16_000_000, 12_000_000, 42, None, Some(zero_config)).unwrap();
        let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
-       assert_eq!(res.htlc_minimum_msat, 1);
+       assert_eq!(res.common_fields.htlc_minimum_msat, 1);
 
        nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &res);
        let res = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
-       assert_eq!(res.htlc_minimum_msat, 1);
+       assert_eq!(res.common_fields.htlc_minimum_msat, 1);
 }
 
 #[test]
@@ -8654,7 +8654,7 @@ fn test_concurrent_monitor_claim() {
        let height = HTLC_TIMEOUT_BROADCAST + 1;
        connect_blocks(&nodes[0], height - nodes[0].best_block_info().1);
        check_closed_broadcast(&nodes[0], 1, true);
-       check_closed_event!(&nodes[0], 1, ClosureReason::HolderForceClosed, false,
+       check_closed_event!(&nodes[0], 1, ClosureReason::HTLCsTimedOut, false,
                [nodes[1].node.get_our_node_id()], 100000);
        watchtower_alice.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, vec![bob_state_y.clone()]), height);
        check_added_monitors(&nodes[0], 1);
@@ -8962,7 +8962,7 @@ fn test_duplicate_temporary_channel_id_from_different_peers() {
 
        // Modify the `OpenChannel` from `nodes[2]` to `nodes[0]` to ensure that it uses the same
        // `temporary_channel_id` as the `OpenChannel` from nodes[1] to nodes[0].
-       open_chan_msg_chan_2_0.temporary_channel_id = open_chan_msg_chan_1_0.temporary_channel_id;
+       open_chan_msg_chan_2_0.common_fields.temporary_channel_id = open_chan_msg_chan_1_0.common_fields.temporary_channel_id;
 
        // Assert that `nodes[0]` can accept both `OpenChannel` requests, even though they use the same
        // `temporary_channel_id` as they are from different peers.
@@ -8973,7 +8973,7 @@ fn test_duplicate_temporary_channel_id_from_different_peers() {
                match &events[0] {
                        MessageSendEvent::SendAcceptChannel { node_id, msg } => {
                                assert_eq!(node_id, &nodes[1].node.get_our_node_id());
-                               assert_eq!(msg.temporary_channel_id, open_chan_msg_chan_1_0.temporary_channel_id);
+                               assert_eq!(msg.common_fields.temporary_channel_id, open_chan_msg_chan_1_0.common_fields.temporary_channel_id);
                        },
                        _ => panic!("Unexpected event"),
                }
@@ -8986,7 +8986,7 @@ fn test_duplicate_temporary_channel_id_from_different_peers() {
                match &events[0] {
                        MessageSendEvent::SendAcceptChannel { node_id, msg } => {
                                assert_eq!(node_id, &nodes[2].node.get_our_node_id());
-                               assert_eq!(msg.temporary_channel_id, open_chan_msg_chan_1_0.temporary_channel_id);
+                               assert_eq!(msg.common_fields.temporary_channel_id, open_chan_msg_chan_1_0.common_fields.temporary_channel_id);
                        },
                        _ => panic!("Unexpected event"),
                }
@@ -9106,11 +9106,11 @@ fn test_duplicate_funding_err_in_funding() {
 
        nodes[2].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
        let mut open_chan_msg = get_event_msg!(nodes[2], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
-       let node_c_temp_chan_id = open_chan_msg.temporary_channel_id;
-       open_chan_msg.temporary_channel_id = real_channel_id;
+       let node_c_temp_chan_id = open_chan_msg.common_fields.temporary_channel_id;
+       open_chan_msg.common_fields.temporary_channel_id = real_channel_id;
        nodes[1].node.handle_open_channel(&nodes[2].node.get_our_node_id(), &open_chan_msg);
        let mut accept_chan_msg = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[2].node.get_our_node_id());
-       accept_chan_msg.temporary_channel_id = node_c_temp_chan_id;
+       accept_chan_msg.common_fields.temporary_channel_id = node_c_temp_chan_id;
        nodes[2].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_chan_msg);
 
        // Now that we have a second channel with the same funding txo, send a bogus funding message
@@ -9168,7 +9168,7 @@ fn test_duplicate_chan_id() {
                                // first (valid) and second (invalid) channels are closed, given they both have
                                // the same non-temporary channel_id. However, currently we do not, so we just
                                // move forward with it.
-                               assert_eq!(msg.channel_id, open_chan_msg.temporary_channel_id);
+                               assert_eq!(msg.channel_id, open_chan_msg.common_fields.temporary_channel_id);
                                assert_eq!(node_id, nodes[0].node.get_our_node_id());
                        },
                        _ => panic!("Unexpected event"),
@@ -9202,7 +9202,7 @@ fn test_duplicate_chan_id() {
        // First try to open a second channel with a temporary channel id equal to the txid-based one.
        // Technically this is allowed by the spec, but we don't support it and there's little reason
        // to. Still, it shouldn't cause any other issues.
-       open_chan_msg.temporary_channel_id = channel_id;
+       open_chan_msg.common_fields.temporary_channel_id = channel_id;
        nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_chan_msg);
        {
                let events = nodes[1].node.get_and_clear_pending_msg_events();
@@ -9211,7 +9211,7 @@ fn test_duplicate_chan_id() {
                        MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => {
                                // Technically, at this point, nodes[1] would be justified in thinking both
                                // channels are closed, but currently we do not, so we just move forward with it.
-                               assert_eq!(msg.channel_id, open_chan_msg.temporary_channel_id);
+                               assert_eq!(msg.channel_id, open_chan_msg.common_fields.temporary_channel_id);
                                assert_eq!(node_id, nodes[0].node.get_our_node_id());
                        },
                        _ => panic!("Unexpected event"),
@@ -9232,7 +9232,7 @@ fn test_duplicate_chan_id() {
                // another channel in the ChannelManager - an invalid state. Thus, we'd panic later when we
                // try to create another channel. Instead, we drop the channel entirely here (leaving the
                // channelmanager in a possibly nonsense state instead).
-               match a_peer_state.channel_by_id.remove(&open_chan_2_msg.temporary_channel_id).unwrap() {
+               match a_peer_state.channel_by_id.remove(&open_chan_2_msg.common_fields.temporary_channel_id).unwrap() {
                        ChannelPhase::UnfundedOutboundV1(mut chan) => {
                                let logger = test_utils::TestLogger::new();
                                chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap()
@@ -9899,10 +9899,10 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e
 
        nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 500_000_000, 42, None, None).unwrap();
        let mut open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
-       open_channel.max_htlc_value_in_flight_msat = 50_000_000;
-       open_channel.max_accepted_htlcs = 60;
+       open_channel.common_fields.max_htlc_value_in_flight_msat = 50_000_000;
+       open_channel.common_fields.max_accepted_htlcs = 60;
        if on_holder_tx {
-               open_channel.dust_limit_satoshis = 546;
+               open_channel.common_fields.dust_limit_satoshis = 546;
        }
        nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel);
        let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
@@ -9947,10 +9947,13 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e
                (chan.context().get_dust_buffer_feerate(None) as u64,
                chan.context().get_max_dust_htlc_exposure_msat(&LowerBoundedFeeEstimator(nodes[0].fee_estimator)))
        };
-       let dust_outbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_timeout_tx_weight(&channel_type_features) / 1000 + open_channel.dust_limit_satoshis - 1) * 1000;
+       let dust_outbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_timeout_tx_weight(&channel_type_features) / 1000 + open_channel.common_fields.dust_limit_satoshis - 1) * 1000;
        let dust_outbound_htlc_on_holder_tx: u64 = max_dust_htlc_exposure_msat / dust_outbound_htlc_on_holder_tx_msat;
 
-       let dust_inbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_success_tx_weight(&channel_type_features) / 1000 + open_channel.dust_limit_satoshis - 1) * 1000;
+       // Substract 3 sats for multiplier and 2 sats for fixed limit to make sure we are 50% below the dust limit.
+       // This is to make sure we fully use the dust limit. If we don't, we could end up with `dust_ibd_htlc_on_holder_tx` being 1 
+       // while `max_dust_htlc_exposure_msat` is not equal to `dust_outbound_htlc_on_holder_tx_msat`.
+       let dust_inbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_success_tx_weight(&channel_type_features) / 1000 + open_channel.common_fields.dust_limit_satoshis - if multiplier_dust_limit { 3 } else { 2 }) * 1000; 
        let dust_inbound_htlc_on_holder_tx: u64 = max_dust_htlc_exposure_msat / dust_inbound_htlc_on_holder_tx_msat;
 
        let dust_htlc_on_counterparty_tx: u64 = 4;
@@ -10085,7 +10088,7 @@ fn test_non_final_funding_tx() {
        let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
        nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel_message);
 
-       let best_height = nodes[0].node.best_block.read().unwrap().height();
+       let best_height = nodes[0].node.best_block.read().unwrap().height;
 
        let chan_id = *nodes[0].network_chan_count.borrow();
        let events = nodes[0].node.get_and_clear_pending_events();
@@ -10130,7 +10133,7 @@ fn test_non_final_funding_tx_within_headroom() {
        let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
        nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel_message);
 
-       let best_height = nodes[0].node.best_block.read().unwrap().height();
+       let best_height = nodes[0].node.best_block.read().unwrap().height;
 
        let chan_id = *nodes[0].network_chan_count.borrow();
        let events = nodes[0].node.get_and_clear_pending_events();
@@ -10532,7 +10535,7 @@ fn test_channel_close_when_not_timely_accepted() {
        // but the nodes disconnect before node 1 could send accept channel
        let create_chan_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap();
        let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
-       assert_eq!(open_channel_msg.temporary_channel_id, create_chan_id);
+       assert_eq!(open_channel_msg.common_fields.temporary_channel_id, create_chan_id);
 
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
@@ -10575,7 +10578,7 @@ fn test_rebroadcast_open_channel_when_reconnect_mid_handshake() {
        // but the nodes disconnect before node 1 could send accept channel
        let create_chan_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap();
        let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
-       assert_eq!(open_channel_msg.temporary_channel_id, create_chan_id);
+       assert_eq!(open_channel_msg.common_fields.temporary_channel_id, create_chan_id);
 
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
@@ -10983,3 +10986,36 @@ fn test_funding_and_commitment_tx_confirm_same_block() {
        do_test_funding_and_commitment_tx_confirm_same_block(false);
        do_test_funding_and_commitment_tx_confirm_same_block(true);
 }
+
+#[test]
+fn test_accept_inbound_channel_errors_queued() {
+       // For manually accepted inbound channels, tests that a close error is correctly handled
+       // and the channel fails for the initiator.
+       let mut config0 = test_default_channel_config();
+       let mut config1 = config0.clone();
+       config1.channel_handshake_limits.their_to_self_delay = 1000;
+       config1.manually_accept_inbound_channels = true;
+       config0.channel_handshake_config.our_to_self_delay = 2000;
+
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config0), Some(config1)]);
+       let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+       nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
+       let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+
+       nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
+       let events = nodes[1].node.get_and_clear_pending_events();
+       match events[0] {
+               Event::OpenChannelRequest { temporary_channel_id, .. } => {
+                       match nodes[1].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 23) {
+                               Err(APIError::ChannelUnavailable { err: _ }) => (),
+                               _ => panic!(),
+                       }
+               }
+               _ => panic!("Unexpected event"),
+       }
+       assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id,
+               open_channel_msg.common_fields.temporary_channel_id);
+}