Merge pull request #1503 from valentinewallace/2022-05-onion-msgs
[rust-lightning] / lightning / src / ln / functional_tests.rs
index 5a88d1b9efcd9792cf2c73db93d7b5fec8abb5ca..be517ea4a7698de94cd9fca02f7b57157a17e99f 100644 (file)
@@ -13,6 +13,7 @@
 
 use chain;
 use chain::{Confirm, Listen, Watch};
+use chain::chaininterface::LowerBoundedFeeEstimator;
 use chain::channelmonitor;
 use chain::channelmonitor::{ChannelMonitor, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY};
 use chain::transaction::OutPoint;
@@ -27,20 +28,22 @@ use routing::gossip::NetworkGraph;
 use routing::router::{PaymentParameters, Route, RouteHop, RouteParameters, find_route, get_route};
 use ln::features::{ChannelFeatures, InitFeatures, InvoiceFeatures, NodeFeatures};
 use ln::msgs;
-use ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, OptionalField, ErrorAction};
+use ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ErrorAction};
 use util::enforcing_trait_impls::EnforcingSigner;
 use util::{byte_utils, test_utils};
-use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason};
+use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason, HTLCDestination};
 use util::errors::APIError;
 use util::ser::{Writeable, ReadableArgs};
 use util::config::UserConfig;
 
 use bitcoin::hash_types::BlockHash;
 use bitcoin::blockdata::block::{Block, BlockHeader};
-use bitcoin::blockdata::script::Builder;
+use bitcoin::blockdata::script::{Builder, Script};
 use bitcoin::blockdata::opcodes;
 use bitcoin::blockdata::constants::genesis_block;
 use bitcoin::network::constants::Network;
+use bitcoin::{Transaction, TxIn, TxOut, Witness};
+use bitcoin::OutPoint as BitcoinOutPoint;
 
 use bitcoin::secp256k1::Secp256k1;
 use bitcoin::secp256k1::{PublicKey,SecretKey};
@@ -51,6 +54,7 @@ use io;
 use prelude::*;
 use alloc::collections::BTreeSet;
 use core::default::Default;
+use core::iter::repeat;
 use sync::{Arc, Mutex};
 
 use ln::functional_test_utils::*;
@@ -61,7 +65,7 @@ fn test_insane_channel_opens() {
        // Stand up a network of 2 nodes
        use ln::channel::TOTAL_BITCOIN_SUPPLY_SATOSHIS;
        let mut cfg = UserConfig::default();
-       cfg.peer_channel_config_limits.max_funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS + 1;
+       cfg.channel_handshake_limits.max_funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS + 1;
        let chanmon_cfgs = create_chanmon_cfgs(2);
        let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(cfg)]);
@@ -1056,26 +1060,6 @@ fn fake_network_test() {
        fail_payment(&nodes[1], &vec!(&nodes[3], &nodes[2], &nodes[1])[..], payment_hash_2);
        claim_payment(&nodes[1], &vec!(&nodes[2], &nodes[3], &nodes[1])[..], payment_preimage_1);
 
-       // Add a duplicate new channel from 2 to 4
-       let chan_5 = create_announced_chan_between_nodes(&nodes, 1, 3, InitFeatures::known(), InitFeatures::known());
-
-       // Send some payments across both channels
-       let payment_preimage_3 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000).0;
-       let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000).0;
-       let payment_preimage_5 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000).0;
-
-
-       route_over_limit(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000);
-       let events = nodes[0].node.get_and_clear_pending_msg_events();
-       assert_eq!(events.len(), 0);
-       nodes[0].logger.assert_log_regex("lightning::ln::channelmanager".to_string(), regex::Regex::new(r"Cannot send value that would put us over the max HTLC value in flight our peer will accept \(\d+\)").unwrap(), 1);
-
-       //TODO: Test that routes work again here as we've been notified that the channel is full
-
-       claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], payment_preimage_3);
-       claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], payment_preimage_4);
-       claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], payment_preimage_5);
-
        // Close down the channels...
        close_channel(&nodes[0], &nodes[1], &chan_1.2, chan_1.3, true);
        check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
@@ -1089,9 +1073,6 @@ fn fake_network_test() {
        close_channel(&nodes[1], &nodes[3], &chan_4.2, chan_4.3, false);
        check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
        check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure);
-       close_channel(&nodes[1], &nodes[3], &chan_5.2, chan_5.3, false);
-       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
-       check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure);
 }
 
 #[test]
@@ -1147,7 +1128,7 @@ fn holding_cell_htlc_counting() {
        // We have to forward pending HTLCs twice - once tries to forward the payment forward (and
        // fails), the second will process the resulting failure and fail the HTLC backward.
        expect_pending_htlcs_forwardable!(nodes[1]);
-       expect_pending_htlcs_forwardable!(nodes[1]);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
        check_added_monitors!(nodes[1], 1);
 
        let bs_fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
@@ -1790,7 +1771,7 @@ fn test_channel_reserve_holding_cell_htlcs() {
        // When this test was written, the default base fee floated based on the HTLC count.
        // It is now fixed, so we simply set the fee to the expected value here.
        let mut config = test_default_channel_config();
-       config.channel_options.forwarding_fee_base_msat = 239;
+       config.channel_config.forwarding_fee_base_msat = 239;
        let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[Some(config.clone()), Some(config.clone()), Some(config.clone())]);
        let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
        let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 190000, 1001, InitFeatures::known(), InitFeatures::known());
@@ -1821,9 +1802,12 @@ fn test_channel_reserve_holding_cell_htlcs() {
 
        // attempt to send amt_msat > their_max_htlc_value_in_flight_msat
        {
-               let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_0);
+               let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id())
+                       .with_features(InvoiceFeatures::known()).with_max_channel_saturation_power_of_half(0);
+               let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], payment_params, recv_value_0, TEST_FINAL_CLTV);
                route.paths[0].last_mut().unwrap().fee_msat += 1;
                assert!(route.paths[0].iter().rev().skip(1).all(|h| h.fee_msat == feemsat));
+
                unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)), true, APIError::ChannelUnavailable { ref err },
                        assert!(regex::Regex::new(r"Cannot send value that would put us over the max HTLC value in flight our peer will accept \(\d+\)").unwrap().is_match(err)));
                assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
@@ -1842,7 +1826,12 @@ fn test_channel_reserve_holding_cell_htlcs() {
                if stat01.value_to_self_msat < stat01.channel_reserve_msat + commit_tx_fee_all_htlcs + ensure_htlc_amounts_above_dust_buffer + amt_msat {
                        break;
                }
-               send_payment(&nodes[0], &vec![&nodes[1], &nodes[2]][..], recv_value_0);
+
+               let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id())
+                       .with_features(InvoiceFeatures::known()).with_max_channel_saturation_power_of_half(0);
+               let route = get_route!(nodes[0], payment_params, recv_value_0, TEST_FINAL_CLTV).unwrap();
+               let (payment_preimage, ..) = send_along_route(&nodes[0], route, &[&nodes[1], &nodes[2]], recv_value_0);
+               claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage);
 
                let (stat01_, stat11_, stat12_, stat22_) = (
                        get_channel_value_stat!(nodes[0], chan_1.2),
@@ -2201,7 +2190,7 @@ fn channel_monitor_network_test() {
        send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
 
        // Simple case with no pending HTLCs:
-       nodes[1].node.force_close_channel(&chan_1.2, &nodes[0].node.get_our_node_id()).unwrap();
+       nodes[1].node.force_close_broadcasting_latest_txn(&chan_1.2, &nodes[0].node.get_our_node_id()).unwrap();
        check_added_monitors!(nodes[1], 1);
        check_closed_broadcast!(nodes[1], true);
        {
@@ -2222,7 +2211,7 @@ fn channel_monitor_network_test() {
 
        // Simple case of one pending HTLC to HTLC-Timeout (note that the HTLC-Timeout is not
        // broadcasted until we reach the timelock time).
-       nodes[1].node.force_close_channel(&chan_2.2, &nodes[2].node.get_our_node_id()).unwrap();
+       nodes[1].node.force_close_broadcasting_latest_txn(&chan_2.2, &nodes[2].node.get_our_node_id()).unwrap();
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
        {
@@ -2262,7 +2251,7 @@ fn channel_monitor_network_test() {
 
        // nodes[3] gets the preimage, but nodes[2] already disconnected, resulting in a nodes[2]
        // HTLC-Timeout and a nodes[3] claim against it (+ its own announces)
-       nodes[2].node.force_close_channel(&chan_3.2, &nodes[3].node.get_our_node_id()).unwrap();
+       nodes[2].node.force_close_broadcasting_latest_txn(&chan_3.2, &nodes[3].node.get_our_node_id()).unwrap();
        check_added_monitors!(nodes[2], 1);
        check_closed_broadcast!(nodes[2], true);
        let node2_commitment_txid;
@@ -2362,13 +2351,13 @@ fn channel_monitor_network_test() {
 fn test_justice_tx() {
        // Test justice txn built on revoked HTLC-Success tx, against both sides
        let mut alice_config = UserConfig::default();
-       alice_config.channel_options.announced_channel = true;
-       alice_config.peer_channel_config_limits.force_announced_channel_preference = false;
-       alice_config.own_channel_config.our_to_self_delay = 6 * 24 * 5;
+       alice_config.channel_handshake_config.announced_channel = true;
+       alice_config.channel_handshake_limits.force_announced_channel_preference = false;
+       alice_config.channel_handshake_config.our_to_self_delay = 6 * 24 * 5;
        let mut bob_config = UserConfig::default();
-       bob_config.channel_options.announced_channel = true;
-       bob_config.peer_channel_config_limits.force_announced_channel_preference = false;
-       bob_config.own_channel_config.our_to_self_delay = 6 * 24 * 3;
+       bob_config.channel_handshake_config.announced_channel = true;
+       bob_config.channel_handshake_limits.force_announced_channel_preference = false;
+       bob_config.channel_handshake_config.our_to_self_delay = 6 * 24 * 3;
        let user_cfgs = [Some(alice_config), Some(bob_config)];
        let mut chanmon_cfgs = create_chanmon_cfgs(2);
        chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
@@ -2515,10 +2504,10 @@ fn claim_htlc_outputs_shared_tx() {
        let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
 
        // Rebalance the network to generate htlc in the two directions
-       send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
+       send_payment(&nodes[0], &[&nodes[1]], 8_000_000);
        // node[0] is gonna to revoke an old state thus node[1] should be able to claim both offered/received HTLC outputs on top of commitment tx
-       let payment_preimage_1 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
-       let (_payment_preimage_2, payment_hash_2, _) = route_payment(&nodes[1], &vec!(&nodes[0])[..], 3000000);
+       let payment_preimage_1 = route_payment(&nodes[0], &[&nodes[1]], 3_000_000).0;
+       let (_payment_preimage_2, payment_hash_2, _) = route_payment(&nodes[1], &[&nodes[0]], 3_000_000);
 
        // Get the will-be-revoked local txn from node[0]
        let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
@@ -2541,9 +2530,9 @@ fn claim_htlc_outputs_shared_tx() {
                check_added_monitors!(nodes[1], 1);
                check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
                connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
-               expect_payment_failed!(nodes[1], payment_hash_2, true);
+               assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
 
-               let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
+               let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
                assert_eq!(node_txn.len(), 2); // ChannelMonitor: penalty tx, ChannelManager: local commitment
 
                assert_eq!(node_txn[0].input.len(), 3); // Claim the revoked output + both revoked HTLC outputs
@@ -2560,7 +2549,13 @@ fn claim_htlc_outputs_shared_tx() {
 
                // Next nodes[1] broadcasts its current local tx state:
                assert_eq!(node_txn[1].input.len(), 1);
-               assert_eq!(node_txn[1].input[0].previous_output.txid, chan_1.3.txid()); //Spending funding tx unique txouput, tx broadcasted by ChannelManager
+               check_spends!(node_txn[1], chan_1.3);
+
+               // Finally, mine the penalty transaction and check that we get an HTLC failure after
+               // ANTI_REORG_DELAY confirmations.
+               mine_transaction(&nodes[1], &node_txn[0]);
+               connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
+               expect_payment_failed!(nodes[1], payment_hash_2, true);
        }
        get_announce_close_broadcast_events(&nodes, 0, 1);
        assert_eq!(nodes[0].node.list_channels().len(), 0);
@@ -2579,11 +2574,11 @@ fn claim_htlc_outputs_single_tx() {
        let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
 
        // Rebalance the network to generate htlc in the two directions
-       send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
+       send_payment(&nodes[0], &[&nodes[1]], 8_000_000);
        // node[0] is gonna to revoke an old state thus node[1] should be able to claim both offered/received HTLC outputs on top of commitment tx, but this
        // time as two different claim transactions as we're gonna to timeout htlc with given a high current height
-       let payment_preimage_1 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
-       let (_payment_preimage_2, payment_hash_2, _payment_secret_2) = route_payment(&nodes[1], &vec!(&nodes[0])[..], 3000000);
+       let payment_preimage_1 = route_payment(&nodes[0], &[&nodes[1]], 3_000_000).0;
+       let (_payment_preimage_2, payment_hash_2, _payment_secret_2) = route_payment(&nodes[1], &[&nodes[0]], 3_000_000);
 
        // Get the will-be-revoked local txn from node[0]
        let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
@@ -2599,15 +2594,15 @@ fn claim_htlc_outputs_single_tx() {
                check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
                let mut events = nodes[0].node.get_and_clear_pending_events();
                expect_pending_htlcs_forwardable_from_events!(nodes[0], events[0..1], true);
-               match events[1] {
+               match events.last().unwrap() {
                        Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
                        _ => panic!("Unexpected event"),
                }
 
                connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
-               expect_payment_failed!(nodes[1], payment_hash_2, true);
+               assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
 
-               let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
+               let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
                assert!(node_txn.len() == 9 || node_txn.len() == 10);
 
                // Check the pair local commitment and HTLC-timeout broadcast due to HTLC expiration
@@ -2635,6 +2630,14 @@ fn claim_htlc_outputs_single_tx() {
                assert_eq!(*witness_lens.iter().skip(0).next().unwrap(), 77); // revoked to_local
                assert_eq!(*witness_lens.iter().skip(1).next().unwrap(), OFFERED_HTLC_SCRIPT_WEIGHT); // revoked offered HTLC
                assert_eq!(*witness_lens.iter().skip(2).next().unwrap(), ACCEPTED_HTLC_SCRIPT_WEIGHT); // revoked received HTLC
+
+               // Finally, mine the penalty transactions and check that we get an HTLC failure after
+               // ANTI_REORG_DELAY confirmations.
+               mine_transaction(&nodes[1], &node_txn[2]);
+               mine_transaction(&nodes[1], &node_txn[3]);
+               mine_transaction(&nodes[1], &node_txn[4]);
+               connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
+               expect_payment_failed!(nodes[1], payment_hash_2, true);
        }
        get_announce_close_broadcast_events(&nodes, 0, 1);
        assert_eq!(nodes[0].node.list_channels().len(), 0);
@@ -2903,7 +2906,7 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) {
        check_spends!(commitment_tx[0], chan_2.3);
        nodes[2].node.fail_htlc_backwards(&payment_hash);
        check_added_monitors!(nodes[2], 0);
-       expect_pending_htlcs_forwardable!(nodes[2]);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() }]);
        check_added_monitors!(nodes[2], 1);
 
        let events = nodes[2].node.get_and_clear_pending_msg_events();
@@ -2975,7 +2978,7 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) {
                }
        }
 
-       expect_pending_htlcs_forwardable!(nodes[1]);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
        check_added_monitors!(nodes[1], 1);
        let events = nodes[1].node.get_and_clear_pending_msg_events();
        assert_eq!(events.len(), 1);
@@ -3043,7 +3046,7 @@ fn test_simple_commitment_revoked_fail_backward() {
        check_added_monitors!(nodes[1], 1);
        check_closed_broadcast!(nodes[1], true);
 
-       expect_pending_htlcs_forwardable!(nodes[1]);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
        check_added_monitors!(nodes[1], 1);
        let events = nodes[1].node.get_and_clear_pending_msg_events();
        assert_eq!(events.len(), 1);
@@ -3106,7 +3109,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use
        let (_, third_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
 
        nodes[2].node.fail_htlc_backwards(&first_payment_hash);
-       expect_pending_htlcs_forwardable!(nodes[2]);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: first_payment_hash }]);
        check_added_monitors!(nodes[2], 1);
        let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
        assert!(updates.update_add_htlcs.is_empty());
@@ -3119,7 +3122,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use
        // Drop the last RAA from 3 -> 2
 
        nodes[2].node.fail_htlc_backwards(&second_payment_hash);
-       expect_pending_htlcs_forwardable!(nodes[2]);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: second_payment_hash }]);
        check_added_monitors!(nodes[2], 1);
        let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
        assert!(updates.update_add_htlcs.is_empty());
@@ -3136,7 +3139,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use
        check_added_monitors!(nodes[2], 1);
 
        nodes[2].node.fail_htlc_backwards(&third_payment_hash);
-       expect_pending_htlcs_forwardable!(nodes[2]);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: third_payment_hash }]);
        check_added_monitors!(nodes[2], 1);
        let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
        assert!(updates.update_add_htlcs.is_empty());
@@ -3168,11 +3171,15 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use
                // commitment transaction for nodes[0] until process_pending_htlc_forwards().
                check_added_monitors!(nodes[1], 1);
                let events = nodes[1].node.get_and_clear_pending_events();
-               assert_eq!(events.len(), 1);
+               assert_eq!(events.len(), 2);
                match events[0] {
                        Event::PendingHTLCsForwardable { .. } => { },
                        _ => panic!("Unexpected event"),
                };
+               match events[1] {
+                       Event::HTLCHandlingFailed { .. } => { },
+                       _ => panic!("Unexpected event"),
+               }
                // Deliberately don't process the pending fail-back so they all fail back at once after
                // block connection just like the !deliver_bs_raa case
        }
@@ -3186,7 +3193,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use
        assert!(ANTI_REORG_DELAY > PAYMENT_EXPIRY_BLOCKS); // We assume payments will also expire
 
        let events = nodes[1].node.get_and_clear_pending_events();
-       assert_eq!(events.len(), if deliver_bs_raa { 2 } else { 4 });
+       assert_eq!(events.len(), if deliver_bs_raa { 2 + (nodes.len() - 1) } else { 4 + nodes.len() });
        match events[0] {
                Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => { },
                _ => panic!("Unexepected event"),
@@ -3387,13 +3394,13 @@ fn test_htlc_ignore_latest_remote_commitment() {
        create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
 
        route_payment(&nodes[0], &[&nodes[1]], 10000000);
-       nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
+       nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
        connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
        check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
 
-       let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
+       let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
        assert_eq!(node_txn.len(), 3);
        assert_eq!(node_txn[0], node_txn[1]);
 
@@ -3450,7 +3457,7 @@ fn test_force_close_fail_back() {
        // state or updated nodes[1]' state. Now force-close and broadcast that commitment/HTLC
        // transaction and ensure nodes[1] doesn't fail-backwards (this was originally a bug!).
 
-       nodes[2].node.force_close_channel(&payment_event.commitment_msg.channel_id, &nodes[1].node.get_our_node_id()).unwrap();
+       nodes[2].node.force_close_broadcasting_latest_txn(&payment_event.commitment_msg.channel_id, &nodes[1].node.get_our_node_id()).unwrap();
        check_closed_broadcast!(nodes[2], true);
        check_added_monitors!(nodes[2], 1);
        check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed);
@@ -3473,7 +3480,7 @@ fn test_force_close_fail_back() {
        // Now check that if we add the preimage to ChannelMonitor it broadcasts our HTLC-Success..
        {
                get_monitor!(nodes[2], payment_event.commitment_msg.channel_id)
-                       .provide_payment_preimage(&our_payment_hash, &our_payment_preimage, &node_cfgs[2].tx_broadcaster, &node_cfgs[2].fee_estimator, &node_cfgs[2].logger);
+                       .provide_payment_preimage(&our_payment_hash, &our_payment_preimage, &node_cfgs[2].tx_broadcaster, &LowerBoundedFeeEstimator::new(node_cfgs[2].fee_estimator), &node_cfgs[2].logger);
        }
        mine_transaction(&nodes[2], &tx);
        let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
@@ -4260,7 +4267,7 @@ fn do_test_htlc_timeout(send_partial_mpp: bool) {
                connect_block(&nodes[1], &block);
        }
 
-       expect_pending_htlcs_forwardable!(nodes[1]);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
 
        check_added_monitors!(nodes[1], 1);
        let htlc_timeout_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
@@ -4324,7 +4331,7 @@ fn do_test_holding_cell_htlc_add_timeouts(forwarded_htlc: bool) {
        connect_blocks(&nodes[1], 1);
 
        if forwarded_htlc {
-               expect_pending_htlcs_forwardable!(nodes[1]);
+               expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
                check_added_monitors!(nodes[1], 1);
                let fail_commit = nodes[1].node.get_and_clear_pending_msg_events();
                assert_eq!(fail_commit.len(), 1);
@@ -4777,7 +4784,7 @@ fn test_claim_sizeable_push_msat() {
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
        let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 98_000_000, InitFeatures::known(), InitFeatures::known());
-       nodes[1].node.force_close_channel(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
+       nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
        check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
@@ -4806,12 +4813,12 @@ fn test_claim_on_remote_sizeable_push_msat() {
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
        let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 98_000_000, InitFeatures::known(), InitFeatures::known());
-       nodes[0].node.force_close_channel(&chan.2, &nodes[1].node.get_our_node_id()).unwrap();
+       nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[1].node.get_our_node_id()).unwrap();
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
        check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
 
-       let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
+       let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
        assert_eq!(node_txn.len(), 1);
        check_spends!(node_txn[0], chan.3);
        assert_eq!(node_txn[0].output.len(), 2); // We can't force trimming of to_remote output as channel_reserve_satoshis block us to do so at channel opening
@@ -5017,7 +5024,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() {
        check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
        connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires
 
-       let revoked_htlc_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
+       let revoked_htlc_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
        assert_eq!(revoked_htlc_txn.len(), 2);
        check_spends!(revoked_htlc_txn[0], chan_1.3);
        assert_eq!(revoked_htlc_txn[1].input.len(), 1);
@@ -5275,7 +5282,7 @@ fn test_duplicate_payment_hash_one_failure_one_success() {
        // When this test was written, the default base fee floated based on the HTLC count.
        // It is now fixed, so we simply set the fee to the expected value here.
        let mut config = test_default_channel_config();
-       config.channel_options.forwarding_fee_base_msat = 196;
+       config.channel_config.forwarding_fee_base_msat = 196;
        let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs,
                &[Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone())]);
        let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs);
@@ -5372,7 +5379,7 @@ fn test_duplicate_payment_hash_one_failure_one_success() {
 
        mine_transaction(&nodes[1], &htlc_timeout_tx);
        connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
-       expect_pending_htlcs_forwardable!(nodes[1]);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
        let htlc_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
        assert!(htlc_updates.update_add_htlcs.is_empty());
        assert_eq!(htlc_updates.update_fail_htlcs.len(), 1);
@@ -5487,23 +5494,23 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno
        // When this test was written, the default base fee floated based on the HTLC count.
        // It is now fixed, so we simply set the fee to the expected value here.
        let mut config = test_default_channel_config();
-       config.channel_options.forwarding_fee_base_msat = 196;
+       config.channel_config.forwarding_fee_base_msat = 196;
        let node_chanmgrs = create_node_chanmgrs(6, &node_cfgs,
                &[Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone())]);
        let nodes = create_network(6, &node_cfgs, &node_chanmgrs);
 
-       create_announced_chan_between_nodes(&nodes, 0, 2, InitFeatures::known(), InitFeatures::known());
-       create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
-       let chan = create_announced_chan_between_nodes(&nodes, 2, 3, InitFeatures::known(), InitFeatures::known());
-       create_announced_chan_between_nodes(&nodes, 3, 4, InitFeatures::known(), InitFeatures::known());
-       create_announced_chan_between_nodes(&nodes, 3, 5, InitFeatures::known(), InitFeatures::known());
+       let _chan_0_2 = create_announced_chan_between_nodes(&nodes, 0, 2, InitFeatures::known(), InitFeatures::known());
+       let _chan_1_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
+       let chan_2_3 = create_announced_chan_between_nodes(&nodes, 2, 3, InitFeatures::known(), InitFeatures::known());
+       let chan_3_4 = create_announced_chan_between_nodes(&nodes, 3, 4, InitFeatures::known(), InitFeatures::known());
+       let chan_3_5  = create_announced_chan_between_nodes(&nodes, 3, 5, InitFeatures::known(), InitFeatures::known());
 
        // Rebalance and check output sanity...
        send_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 500000);
        send_payment(&nodes[1], &[&nodes[2], &nodes[3], &nodes[5]], 500000);
-       assert_eq!(get_local_commitment_txn!(nodes[3], chan.2)[0].output.len(), 2);
+       assert_eq!(get_local_commitment_txn!(nodes[3], chan_2_3.2)[0].output.len(), 2);
 
-       let ds_dust_limit = nodes[3].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().holder_dust_limit_satoshis;
+       let ds_dust_limit = nodes[3].node.channel_state.lock().unwrap().by_id.get(&chan_2_3.2).unwrap().holder_dust_limit_satoshis;
        // 0th HTLC:
        let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
        // 1st HTLC:
@@ -5538,8 +5545,8 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno
        // Double-check that six of the new HTLC were added
        // We now have six HTLCs pending over the dust limit and six HTLCs under the dust limit (ie,
        // with to_local and to_remote outputs, 8 outputs and 6 HTLCs not included).
-       assert_eq!(get_local_commitment_txn!(nodes[3], chan.2).len(), 1);
-       assert_eq!(get_local_commitment_txn!(nodes[3], chan.2)[0].output.len(), 8);
+       assert_eq!(get_local_commitment_txn!(nodes[3], chan_2_3.2).len(), 1);
+       assert_eq!(get_local_commitment_txn!(nodes[3], chan_2_3.2)[0].output.len(), 8);
 
        // Now fail back three of the over-dust-limit and three of the under-dust-limit payments in one go.
        // Fail 0th below-dust, 4th above-dust, 8th above-dust, 10th below-dust HTLCs
@@ -5548,7 +5555,14 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno
        nodes[4].node.fail_htlc_backwards(&payment_hash_5);
        nodes[4].node.fail_htlc_backwards(&payment_hash_6);
        check_added_monitors!(nodes[4], 0);
-       expect_pending_htlcs_forwardable!(nodes[4]);
+
+       let failed_destinations = vec![
+               HTLCDestination::FailedPayment { payment_hash: payment_hash_1 },
+               HTLCDestination::FailedPayment { payment_hash: payment_hash_3 },
+               HTLCDestination::FailedPayment { payment_hash: payment_hash_5 },
+               HTLCDestination::FailedPayment { payment_hash: payment_hash_6 },
+       ];
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[4], failed_destinations);
        check_added_monitors!(nodes[4], 1);
 
        let four_removes = get_htlc_update_msgs!(nodes[4], nodes[3].node.get_our_node_id());
@@ -5562,7 +5576,12 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno
        nodes[5].node.fail_htlc_backwards(&payment_hash_2);
        nodes[5].node.fail_htlc_backwards(&payment_hash_4);
        check_added_monitors!(nodes[5], 0);
-       expect_pending_htlcs_forwardable!(nodes[5]);
+
+       let failed_destinations_2 = vec![
+               HTLCDestination::FailedPayment { payment_hash: payment_hash_2 },
+               HTLCDestination::FailedPayment { payment_hash: payment_hash_4 },
+       ];
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[5], failed_destinations_2);
        check_added_monitors!(nodes[5], 1);
 
        let two_removes = get_htlc_update_msgs!(nodes[5], nodes[3].node.get_our_node_id());
@@ -5570,9 +5589,18 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno
        nodes[3].node.handle_update_fail_htlc(&nodes[5].node.get_our_node_id(), &two_removes.update_fail_htlcs[1]);
        commitment_signed_dance!(nodes[3], nodes[5], two_removes.commitment_signed, false);
 
-       let ds_prev_commitment_tx = get_local_commitment_txn!(nodes[3], chan.2);
-
-       expect_pending_htlcs_forwardable!(nodes[3]);
+       let ds_prev_commitment_tx = get_local_commitment_txn!(nodes[3], chan_2_3.2);
+
+       // After 4 and 2 removes respectively above in nodes[4] and nodes[5], nodes[3] should receive 6 PaymentForwardedFailed events
+       let failed_destinations_3 = vec![
+               HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 },
+               HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 },
+               HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 },
+               HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 },
+               HTLCDestination::NextHopChannel { node_id: Some(nodes[5].node.get_our_node_id()), channel_id: chan_3_5.2 },
+               HTLCDestination::NextHopChannel { node_id: Some(nodes[5].node.get_our_node_id()), channel_id: chan_3_5.2 },
+       ];
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], failed_destinations_3);
        check_added_monitors!(nodes[3], 1);
        let six_removes = get_htlc_update_msgs!(nodes[3], nodes[2].node.get_our_node_id());
        nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[0]);
@@ -5598,7 +5626,7 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno
        //
        // Alternatively, we may broadcast the previous commitment transaction, which should only
        // result in failures for the below-dust HTLCs, ie the 0th, 1st, 2nd, 3rd, 9th, and 10th HTLCs.
-       let ds_last_commitment_tx = get_local_commitment_txn!(nodes[3], chan.2);
+       let ds_last_commitment_tx = get_local_commitment_txn!(nodes[3], chan_2_3.2);
 
        if announce_latest {
                mine_transaction(&nodes[2], &ds_last_commitment_tx[0]);
@@ -5607,11 +5635,11 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno
        }
        let events = nodes[2].node.get_and_clear_pending_events();
        let close_event = if deliver_last_raa {
-               assert_eq!(events.len(), 2);
-               events[1].clone()
+               assert_eq!(events.len(), 2 + 6);
+               events.last().clone().unwrap()
        } else {
                assert_eq!(events.len(), 1);
-               events[0].clone()
+               events.last().clone().unwrap()
        };
        match close_event {
                Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
@@ -5622,8 +5650,17 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno
        check_closed_broadcast!(nodes[2], true);
        if deliver_last_raa {
                expect_pending_htlcs_forwardable_from_events!(nodes[2], events[0..1], true);
+
+               let expected_destinations: Vec<HTLCDestination> = repeat(HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(3).collect();
+               expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), expected_destinations);
        } else {
-               expect_pending_htlcs_forwardable!(nodes[2]);
+               let expected_destinations: Vec<HTLCDestination> = if announce_latest {
+                       repeat(HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(9).collect()
+               } else {
+                       repeat(HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(6).collect()
+               };
+
+               expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], expected_destinations);
        }
        check_added_monitors!(nodes[2], 3);
 
@@ -5987,7 +6024,7 @@ fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no
        let htlc_value = if use_dust { 50000 } else { 3000000 };
        let (_, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], htlc_value);
        nodes[1].node.fail_htlc_backwards(&our_payment_hash);
-       expect_pending_htlcs_forwardable!(nodes[1]);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
        check_added_monitors!(nodes[1], 1);
 
        let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
@@ -6369,7 +6406,7 @@ fn test_fail_holding_cell_htlc_upon_free_multihop() {
        // When this test was written, the default base fee floated based on the HTLC count.
        // It is now fixed, so we simply set the fee to the expected value here.
        let mut config = test_default_channel_config();
-       config.channel_options.forwarding_fee_base_msat = 196;
+       config.channel_config.forwarding_fee_base_msat = 196;
        let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[Some(config.clone()), Some(config.clone()), Some(config.clone())]);
        let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
        let chan_0_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, InitFeatures::known(), InitFeatures::known());
@@ -6447,7 +6484,7 @@ fn test_fail_holding_cell_htlc_upon_free_multihop() {
 
        // nodes[1]'s ChannelManager will now signal that we have HTLC forwards to process.
        let process_htlc_forwards_event = nodes[1].node.get_and_clear_pending_events();
-       assert_eq!(process_htlc_forwards_event.len(), 1);
+       assert_eq!(process_htlc_forwards_event.len(), 2);
        match &process_htlc_forwards_event[0] {
                &Event::PendingHTLCsForwardable { .. } => {},
                _ => panic!("Unexpected event"),
@@ -7072,7 +7109,7 @@ fn test_update_fulfill_htlc_bolt2_after_malformed_htlc_message_must_forward_upda
        let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
        let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
        create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000, InitFeatures::known(), InitFeatures::known());
-       create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1000000, 1000000, InitFeatures::known(), InitFeatures::known());
+       let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1000000, 1000000, InitFeatures::known(), InitFeatures::known());
 
        let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 100000);
 
@@ -7120,7 +7157,7 @@ fn test_update_fulfill_htlc_bolt2_after_malformed_htlc_message_must_forward_upda
 
        check_added_monitors!(nodes[1], 0);
        commitment_signed_dance!(nodes[1], nodes[2], update_msg.1, false, true);
-       expect_pending_htlcs_forwardable!(nodes[1]);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
        let events_4 = nodes[1].node.get_and_clear_pending_msg_events();
        assert_eq!(events_4.len(), 1);
 
@@ -7164,7 +7201,7 @@ fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) {
        // Fail one HTLC to prune it in the will-be-latest-local commitment tx
        nodes[1].node.fail_htlc_backwards(&payment_hash_2);
        check_added_monitors!(nodes[1], 0);
-       expect_pending_htlcs_forwardable!(nodes[1]);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash_2 }]);
        check_added_monitors!(nodes[1], 1);
 
        let remove = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
@@ -7282,37 +7319,25 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) {
                check_added_monitors!(nodes[0], 1);
                check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
                assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
+
                connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires
-               timeout_tx.push(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[1].clone());
+               timeout_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().drain(..)
+                       .filter(|tx| tx.input[0].previous_output.txid == bs_commitment_tx[0].txid()).collect();
+               check_spends!(timeout_tx[0], bs_commitment_tx[0]);
+               // For both a revoked or non-revoked commitment transaction, after ANTI_REORG_DELAY the
+               // dust HTLC should have been failed.
+               expect_payment_failed!(nodes[0], dust_hash, true);
+
                if !revoked {
-                       expect_payment_failed!(nodes[0], dust_hash, true);
                        assert_eq!(timeout_tx[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
-                       // We fail non-dust-HTLC 2 by broadcast of local timeout tx on remote commitment tx
-                       mine_transaction(&nodes[0], &timeout_tx[0]);
-                       assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
-                       connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
-                       expect_payment_failed!(nodes[0], non_dust_hash, true);
                } else {
-                       // If revoked, both dust & non-dust HTLCs should have been failed after ANTI_REORG_DELAY confs of revoked
-                       // commitment tx
-                       let events = nodes[0].node.get_and_clear_pending_events();
-                       assert_eq!(events.len(), 2);
-                       let first;
-                       match events[0] {
-                               Event::PaymentPathFailed { payment_hash, .. } => {
-                                       if payment_hash == dust_hash { first = true; }
-                                       else { first = false; }
-                               },
-                               _ => panic!("Unexpected event"),
-                       }
-                       match events[1] {
-                               Event::PaymentPathFailed { payment_hash, .. } => {
-                                       if first { assert_eq!(payment_hash, non_dust_hash); }
-                                       else { assert_eq!(payment_hash, dust_hash); }
-                               },
-                               _ => panic!("Unexpected event"),
-                       }
+                       assert_eq!(timeout_tx[0].lock_time, 0);
                }
+               // We fail non-dust-HTLC 2 by broadcast of local timeout/revocation-claim tx
+               mine_transaction(&nodes[0], &timeout_tx[0]);
+               assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
+               connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
+               expect_payment_failed!(nodes[0], non_dust_hash, true);
        }
 }
 
@@ -7328,9 +7353,9 @@ fn test_user_configurable_csv_delay() {
        // We test our channel constructors yield errors when we pass them absurd csv delay
 
        let mut low_our_to_self_config = UserConfig::default();
-       low_our_to_self_config.own_channel_config.our_to_self_delay = 6;
+       low_our_to_self_config.channel_handshake_config.our_to_self_delay = 6;
        let mut high_their_to_self_config = UserConfig::default();
-       high_their_to_self_config.peer_channel_config_limits.their_to_self_delay = 100;
+       high_their_to_self_config.channel_handshake_limits.their_to_self_delay = 100;
        let user_cfgs = [Some(high_their_to_self_config.clone()), None];
        let chanmon_cfgs = create_chanmon_cfgs(2);
        let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
@@ -7338,7 +7363,7 @@ fn test_user_configurable_csv_delay() {
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
        // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in Channel::new_outbound()
-       if let Err(error) = Channel::new_outbound(&&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) },
+       if let Err(error) = Channel::new_outbound(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
                &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &InitFeatures::known(), 1000000, 1000000, 0,
                &low_our_to_self_config, 0, 42)
        {
@@ -7352,7 +7377,7 @@ fn test_user_configurable_csv_delay() {
        nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None).unwrap();
        let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
        open_channel.to_self_delay = 200;
-       if let Err(error) = Channel::new_from_req(&&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) },
+       if let Err(error) = Channel::new_from_req(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
                &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &InitFeatures::known(), &open_channel, 0,
                &low_our_to_self_config, 0, &nodes[0].logger, 42)
        {
@@ -7384,7 +7409,7 @@ fn test_user_configurable_csv_delay() {
        nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None).unwrap();
        let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
        open_channel.to_self_delay = 200;
-       if let Err(error) = Channel::new_from_req(&&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) },
+       if let Err(error) = Channel::new_from_req(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
                &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &InitFeatures::known(), &open_channel, 0,
                &high_their_to_self_config, 0, &nodes[0].logger, 42)
        {
@@ -7395,14 +7420,11 @@ fn test_user_configurable_csv_delay() {
        } else { assert!(false); }
 }
 
-#[test]
-fn test_data_loss_protect() {
-       // We want to be sure that :
-       // * we don't broadcast our Local Commitment Tx in case of fallen behind
-       //   (but this is not quite true - we broadcast during Drop because chanmon is out of sync with chanmgr)
-       // * we close channel in case of detecting other being fallen behind
-       // * we are able to claim our own outputs thanks to to_remote being static
-       // TODO: this test is incomplete and the data_loss_protect implementation is incomplete - see issue #775
+fn do_test_data_loss_protect(reconnect_panicing: bool) {
+       // When we get a data_loss_protect proving we're behind, we immediately panic as the
+       // chain::Watch API requirements have been violated (e.g. the user restored from a backup). The
+       // panic message informs the user they should force-close without broadcasting, which is tested
+       // if `reconnect_panicing` is not set.
        let persister;
        let logger;
        let fee_estimator;
@@ -7460,53 +7482,53 @@ fn test_data_loss_protect() {
 
        check_added_monitors!(nodes[0], 1);
 
-       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
-       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
+       if reconnect_panicing {
+               nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
+               nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
 
-       let reestablish_0 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
+               let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
 
-       // Check we don't broadcast any transactions following learning of per_commitment_point from B
-       nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_0[0]);
-       check_added_monitors!(nodes[0], 1);
+               // Check we close channel detecting A is fallen-behind
+               // Check that we sent the warning message when we detected that A has fallen behind,
+               // and give the possibility for A to recover from the warning.
+               nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
+               let warn_msg = "Peer attempted to reestablish channel with a very old local commitment transaction".to_owned();
+               assert!(check_warn_msg!(nodes[1], nodes[0].node.get_our_node_id(), chan.2).contains(&warn_msg));
+
+               {
+                       let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
+                       // The node B should not broadcast the transaction to force close the channel!
+                       assert!(node_txn.is_empty());
+               }
+
+               let reestablish_0 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
+               // Check A panics upon seeing proof it has fallen behind.
+               nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_0[0]);
+               return; // By this point we should have panic'ed!
+       }
 
+       nodes[0].node.force_close_without_broadcasting_txn(&chan.2, &nodes[1].node.get_our_node_id()).unwrap();
+       check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
        {
-               let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
+               let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
                assert_eq!(node_txn.len(), 0);
        }
 
-       let mut reestablish_1 = Vec::with_capacity(1);
        for msg in nodes[0].node.get_and_clear_pending_msg_events() {
-               if let MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } = msg {
-                       assert_eq!(*node_id, nodes[1].node.get_our_node_id());
-                       reestablish_1.push(msg.clone());
-               } else if let MessageSendEvent::BroadcastChannelUpdate { .. } = msg {
+               if let MessageSendEvent::BroadcastChannelUpdate { .. } = msg {
                } else if let MessageSendEvent::HandleError { ref action, .. } = msg {
                        match action {
                                &ErrorAction::SendErrorMessage { ref msg } => {
-                                       assert_eq!(msg.data, "We have fallen behind - we have received proof that if we broadcast remote is going to claim our funds - we can't do any automated broadcasting");
+                                       assert_eq!(msg.data, "Channel force-closed");
                                },
                                _ => panic!("Unexpected event!"),
                        }
                } else {
-                       panic!("Unexpected event")
+                       panic!("Unexpected event {:?}", msg)
                }
        }
 
-       // Check we close channel detecting A is fallen-behind
-       // Check that we sent the warning message when we detected that A has fallen behind,
-       // and give the possibility for A to recover from the warning.
-       nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
-       let warn_msg = "Peer attempted to reestablish channel with a very old local commitment transaction".to_owned();
-       assert!(check_warn_msg!(nodes[1], nodes[0].node.get_our_node_id(), chan.2).contains(&warn_msg));
-
-       // Check A is able to claim to_remote output
-       let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
-       // The node B should not broadcast the transaction to force close the channel!
-       assert!(node_txn.is_empty());
-       // B should now detect that there is something wrong and should force close the channel.
-       let exp_err = "We have fallen behind - we have received proof that if we broadcast remote is going to claim our funds - we can\'t do any automated broadcasting";
-       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: exp_err.to_string() });
-
        // after the warning message sent by B, we should not able to
        // use the channel, or reconnect with success to the channel.
        assert!(nodes[0].node.list_usable_channels().is_empty());
@@ -7537,6 +7559,17 @@ fn test_data_loss_protect() {
        check_closed_broadcast!(nodes[1], false);
 }
 
+#[test]
+#[should_panic]
+fn test_data_loss_protect_showing_stale_state_panics() {
+       do_test_data_loss_protect(true);
+}
+
+#[test]
+fn test_force_close_without_broadcast() {
+       do_test_data_loss_protect(false);
+}
+
 #[test]
 fn test_check_htlc_underpaying() {
        // Send payment through A -> B but A is maliciously
@@ -7569,7 +7602,7 @@ fn test_check_htlc_underpaying() {
        // Note that we first have to wait a random delay before processing the receipt of the HTLC,
        // and then will wait a second random delay before failing the HTLC back:
        expect_pending_htlcs_forwardable!(nodes[1]);
-       expect_pending_htlcs_forwardable!(nodes[1]);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
 
        // Node 3 is expecting payment of 100_000 but received 10_000,
        // it should fail htlc like we didn't know the preimage.
@@ -7826,7 +7859,7 @@ fn test_bump_penalty_txn_on_revoked_htlcs() {
        check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
        connect_blocks(&nodes[1], 49); // Confirm blocks until the HTLC expires (note CLTV was explicitly 50 above)
 
-       let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
+       let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
        assert_eq!(revoked_htlc_txn.len(), 3);
        check_spends!(revoked_htlc_txn[1], chan.3);
 
@@ -7847,7 +7880,7 @@ fn test_bump_penalty_txn_on_revoked_htlcs() {
        connect_block(&nodes[0], &Block { header: header_129, txdata: vec![revoked_htlc_txn[0].clone(), revoked_htlc_txn[2].clone()] });
        let events = nodes[0].node.get_and_clear_pending_events();
        expect_pending_htlcs_forwardable_from_events!(nodes[0], events[0..1], true);
-       match events[1] {
+       match events.last().unwrap() {
                Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
                _ => panic!("Unexpected event"),
        }
@@ -8087,22 +8120,26 @@ fn test_counterparty_raa_skip_no_crash() {
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2;
 
-       let mut guard = nodes[0].node.channel_state.lock().unwrap();
-       let keys = guard.by_id.get_mut(&channel_id).unwrap().get_signer();
+       let per_commitment_secret;
+       let next_per_commitment_point;
+       {
+               let mut guard = nodes[0].node.channel_state.lock().unwrap();
+               let keys = guard.by_id.get_mut(&channel_id).unwrap().get_signer();
 
-       const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
+               const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
 
-       // Make signer believe we got a counterparty signature, so that it allows the revocation
-       keys.get_enforcement_state().last_holder_commitment -= 1;
-       let per_commitment_secret = keys.release_commitment_secret(INITIAL_COMMITMENT_NUMBER);
+               // Make signer believe we got a counterparty signature, so that it allows the revocation
+               keys.get_enforcement_state().last_holder_commitment -= 1;
+               per_commitment_secret = keys.release_commitment_secret(INITIAL_COMMITMENT_NUMBER);
 
-       // Must revoke without gaps
-       keys.get_enforcement_state().last_holder_commitment -= 1;
-       keys.release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 1);
+               // Must revoke without gaps
+               keys.get_enforcement_state().last_holder_commitment -= 1;
+               keys.release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 1);
 
-       keys.get_enforcement_state().last_holder_commitment -= 1;
-       let next_per_commitment_point = PublicKey::from_secret_key(&Secp256k1::new(),
-               &SecretKey::from_slice(&keys.release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 2)).unwrap());
+               keys.get_enforcement_state().last_holder_commitment -= 1;
+               next_per_commitment_point = PublicKey::from_secret_key(&Secp256k1::new(),
+                       &SecretKey::from_slice(&keys.release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 2)).unwrap());
+       }
 
        nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(),
                &msgs::RevokeAndACK { channel_id, per_commitment_secret, next_per_commitment_point });
@@ -8123,19 +8160,19 @@ fn test_bump_txn_sanitize_tracking_maps() {
 
        let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000, InitFeatures::known(), InitFeatures::known());
        // Lock HTLC in both directions
-       let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9_000_000).0;
-       route_payment(&nodes[1], &vec!(&nodes[0])[..], 9_000_000).0;
+       let (payment_preimage_1, _, _) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9_000_000);
+       let (_, payment_hash_2, _) = route_payment(&nodes[1], &vec!(&nodes[0])[..], 9_000_000);
 
        let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan.2);
        assert_eq!(revoked_local_txn[0].input.len(), 1);
        assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan.3.txid());
 
        // Revoke local commitment tx
-       claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
+       claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_1);
 
        // Broadcast set of revoked txn on A
        connect_blocks(&nodes[0], TEST_FINAL_CLTV + 2 - CHAN_CONFIRM_DEPTH);
-       expect_pending_htlcs_forwardable_ignore!(nodes[0]);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[0], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash_2 }]);
        assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 0);
 
        mine_transaction(&nodes[0], &revoked_local_txn[0]);
@@ -8245,7 +8282,7 @@ fn test_override_channel_config() {
 
        // Node0 initiates a channel to node1 using the override config.
        let mut override_config = UserConfig::default();
-       override_config.own_channel_config.our_to_self_delay = 200;
+       override_config.channel_handshake_config.our_to_self_delay = 200;
 
        nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 16_000_000, 12_000_000, 42, Some(override_config)).unwrap();
 
@@ -8258,7 +8295,7 @@ fn test_override_channel_config() {
 #[test]
 fn test_override_0msat_htlc_minimum() {
        let mut zero_config = UserConfig::default();
-       zero_config.own_channel_config.our_htlc_minimum_msat = 0;
+       zero_config.channel_handshake_config.our_htlc_minimum_msat = 0;
        let chanmon_cfgs = create_chanmon_cfgs(2);
        let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(zero_config.clone())]);
@@ -8282,17 +8319,17 @@ fn test_channel_update_has_correct_htlc_maximum_msat() {
        // 2. MUST be set to less than or equal to the `max_htlc_value_in_flight_msat` received from the peer.
 
        let mut config_30_percent = UserConfig::default();
-       config_30_percent.channel_options.announced_channel = true;
-       config_30_percent.own_channel_config.max_inbound_htlc_value_in_flight_percent_of_channel = 30;
+       config_30_percent.channel_handshake_config.announced_channel = true;
+       config_30_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 30;
        let mut config_50_percent = UserConfig::default();
-       config_50_percent.channel_options.announced_channel = true;
-       config_50_percent.own_channel_config.max_inbound_htlc_value_in_flight_percent_of_channel = 50;
+       config_50_percent.channel_handshake_config.announced_channel = true;
+       config_50_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 50;
        let mut config_95_percent = UserConfig::default();
-       config_95_percent.channel_options.announced_channel = true;
-       config_95_percent.own_channel_config.max_inbound_htlc_value_in_flight_percent_of_channel = 95;
+       config_95_percent.channel_handshake_config.announced_channel = true;
+       config_95_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 95;
        let mut config_100_percent = UserConfig::default();
-       config_100_percent.channel_options.announced_channel = true;
-       config_100_percent.own_channel_config.max_inbound_htlc_value_in_flight_percent_of_channel = 100;
+       config_100_percent.channel_handshake_config.announced_channel = true;
+       config_100_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 100;
 
        let chanmon_cfgs = create_chanmon_cfgs(4);
        let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
@@ -8310,19 +8347,19 @@ fn test_channel_update_has_correct_htlc_maximum_msat() {
 
        // Assert that `node[0]`'s `ChannelUpdate` is capped at 50 percent of the `channel_value`, as
        // that's the value of `node[1]`'s `holder_max_htlc_value_in_flight_msat`.
-       assert_eq!(node_0_chan_update.contents.htlc_maximum_msat, OptionalField::Present(channel_value_50_percent_msat));
+       assert_eq!(node_0_chan_update.contents.htlc_maximum_msat, channel_value_50_percent_msat);
        // Assert that `node[1]`'s `ChannelUpdate` is capped at 30 percent of the `channel_value`, as
        // that's the value of `node[0]`'s `holder_max_htlc_value_in_flight_msat`.
-       assert_eq!(node_1_chan_update.contents.htlc_maximum_msat, OptionalField::Present(channel_value_30_percent_msat));
+       assert_eq!(node_1_chan_update.contents.htlc_maximum_msat, channel_value_30_percent_msat);
 
        // Assert that `node[2]`'s `ChannelUpdate` is capped at 90 percent of the `channel_value`, as
        // the value of `node[3]`'s `holder_max_htlc_value_in_flight_msat` (100%), exceeds 90% of the
        // `channel_value`.
-       assert_eq!(node_2_chan_update.contents.htlc_maximum_msat, OptionalField::Present(channel_value_90_percent_msat));
+       assert_eq!(node_2_chan_update.contents.htlc_maximum_msat, channel_value_90_percent_msat);
        // Assert that `node[3]`'s `ChannelUpdate` is capped at 90 percent of the `channel_value`, as
        // the value of `node[2]`'s `holder_max_htlc_value_in_flight_msat` (95%), exceeds 90% of the
        // `channel_value`.
-       assert_eq!(node_3_chan_update.contents.htlc_maximum_msat, OptionalField::Present(channel_value_90_percent_msat));
+       assert_eq!(node_3_chan_update.contents.htlc_maximum_msat, channel_value_90_percent_msat);
 }
 
 #[test]
@@ -8361,7 +8398,7 @@ fn test_manually_accept_inbound_channel_request() {
                _ => panic!("Unexpected event"),
        }
 
-       nodes[1].node.force_close_channel(&temp_channel_id, &nodes[0].node.get_our_node_id()).unwrap();
+       nodes[1].node.force_close_broadcasting_latest_txn(&temp_channel_id, &nodes[0].node.get_our_node_id()).unwrap();
 
        let close_msg_ev = nodes[1].node.get_and_clear_pending_msg_events();
        assert_eq!(close_msg_ev.len(), 1);
@@ -8396,7 +8433,7 @@ fn test_manually_reject_inbound_channel_request() {
        let events = nodes[1].node.get_and_clear_pending_events();
        match events[0] {
                Event::OpenChannelRequest { temporary_channel_id, .. } => {
-                       nodes[1].node.force_close_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id()).unwrap();
+                       nodes[1].node.force_close_broadcasting_latest_txn(&temporary_channel_id, &nodes[0].node.get_our_node_id()).unwrap();
                }
                _ => panic!("Unexpected event"),
        }
@@ -8444,12 +8481,12 @@ fn test_reject_funding_before_inbound_channel_accepted() {
        // `MessageSendEvent::SendAcceptChannel` event. The message is passed to `nodes[0]`
        // `handle_accept_channel`, which is required in order for `create_funding_transaction` to
        // succeed when `nodes[0]` is passed to it.
-       {
+       let accept_chan_msg = {
                let mut lock;
                let channel = get_channel_ref!(&nodes[1], lock, temp_channel_id);
-               let accept_chan_msg = channel.get_accept_channel_message();
-               nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &accept_chan_msg);
-       }
+               channel.get_accept_channel_message()
+       };
+       nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &accept_chan_msg);
 
        let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42);
 
@@ -8688,7 +8725,7 @@ fn test_bad_secret_hash() {
        // All the below cases should end up being handled exactly identically, so we macro the
        // resulting events.
        macro_rules! handle_unknown_invalid_payment_data {
-               () => {
+               ($payment_hash: expr) => {
                        check_added_monitors!(nodes[0], 1);
                        let mut events = nodes[0].node.get_and_clear_pending_msg_events();
                        let payment_event = SendEvent::from_event(events.pop().unwrap());
@@ -8698,7 +8735,7 @@ fn test_bad_secret_hash() {
                        // We have to forward pending HTLCs once to process the receipt of the HTLC and then
                        // again to process the pending backwards-failure of the HTLC
                        expect_pending_htlcs_forwardable!(nodes[1]);
-                       expect_pending_htlcs_forwardable!(nodes[1]);
+                       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment{ payment_hash: $payment_hash }]);
                        check_added_monitors!(nodes[1], 1);
 
                        // We should fail the payment back
@@ -8719,17 +8756,17 @@ fn test_bad_secret_hash() {
 
        // Send a payment with the right payment hash but the wrong payment secret
        nodes[0].node.send_payment(&route, our_payment_hash, &Some(random_payment_secret)).unwrap();
-       handle_unknown_invalid_payment_data!();
+       handle_unknown_invalid_payment_data!(our_payment_hash);
        expect_payment_failed!(nodes[0], our_payment_hash, true, expected_error_code, expected_error_data);
 
        // Send a payment with a random payment hash, but the right payment secret
        nodes[0].node.send_payment(&route, random_payment_hash, &Some(our_payment_secret)).unwrap();
-       handle_unknown_invalid_payment_data!();
+       handle_unknown_invalid_payment_data!(random_payment_hash);
        expect_payment_failed!(nodes[0], random_payment_hash, true, expected_error_code, expected_error_data);
 
        // Send a payment with a random payment hash and random payment secret
        nodes[0].node.send_payment(&route, random_payment_hash, &Some(random_payment_secret)).unwrap();
-       handle_unknown_invalid_payment_data!();
+       handle_unknown_invalid_payment_data!(random_payment_hash);
        expect_payment_failed!(nodes[0], random_payment_hash, true, expected_error_code, expected_error_data);
 }
 
@@ -9049,7 +9086,7 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain
                force_closing_node = 1;
                counterparty_node = 0;
        }
-       nodes[force_closing_node].node.force_close_channel(&chan_ab.2, &nodes[counterparty_node].node.get_our_node_id()).unwrap();
+       nodes[force_closing_node].node.force_close_broadcasting_latest_txn(&chan_ab.2, &nodes[counterparty_node].node.get_our_node_id()).unwrap();
        check_closed_broadcast!(nodes[force_closing_node], true);
        check_added_monitors!(nodes[force_closing_node], 1);
        check_closed_event!(nodes[force_closing_node], 1, ClosureReason::HolderForceClosed);
@@ -9412,6 +9449,10 @@ fn test_invalid_funding_tx() {
        // funding transactions from their counterparties, leading to a multi-implementation critical
        // security vulnerability (though we always sanitized properly, we've previously had
        // un-released crashes in the sanitization process).
+       //
+       // Further, if the funding transaction is consensus-valid, confirms, and is later spent, we'd
+       // previously have crashed in `ChannelMonitor` even though we closed the channel as bogus and
+       // gave up on it. We test this here by generating such a transaction.
        let chanmon_cfgs = create_chanmon_cfgs(2);
        let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
@@ -9422,9 +9463,19 @@ fn test_invalid_funding_tx() {
        nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
 
        let (temporary_channel_id, mut tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100_000, 42);
+
+       // Create a witness program which can be spent by a 4-empty-stack-elements witness and which is
+       // 136 bytes long. This matches our "accepted HTLC preimage spend" matching, previously causing
+       // a panic as we'd try to extract a 32 byte preimage from a witness element without checking
+       // its length.
+       let mut wit_program: Vec<u8> = channelmonitor::deliberately_bogus_accepted_htlc_witness_program();
+       assert!(chan_utils::HTLCType::scriptlen_to_htlctype(wit_program.len()).unwrap() ==
+               chan_utils::HTLCType::AcceptedHTLC);
+
+       let wit_program_script: Script = wit_program.clone().into();
        for output in tx.output.iter_mut() {
                // Make the confirmed funding transaction have a bogus script_pubkey
-               output.script_pubkey = bitcoin::Script::new();
+               output.script_pubkey = Script::new_v0_p2wsh(&wit_program_script.wscript_hash());
        }
 
        nodes[0].node.funding_transaction_generated_unchecked(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone(), 0).unwrap();
@@ -9454,6 +9505,28 @@ fn test_invalid_funding_tx() {
                } else { panic!(); }
        } else { panic!(); }
        assert_eq!(nodes[1].node.list_channels().len(), 0);
+
+       // Now confirm a spend of the (bogus) funding transaction. As long as the witness is 5 elements
+       // long the ChannelMonitor will try to read 32 bytes from the second-to-last element, panicing
+       // as its not 32 bytes long.
+       let mut spend_tx = Transaction {
+               version: 2i32, lock_time: 0,
+               input: tx.output.iter().enumerate().map(|(idx, _)| TxIn {
+                       previous_output: BitcoinOutPoint {
+                               txid: tx.txid(),
+                               vout: idx as u32,
+                       },
+                       script_sig: Script::new(),
+                       sequence: 0xfffffffd,
+                       witness: Witness::from_vec(channelmonitor::deliberately_bogus_accepted_htlc_witness())
+               }).collect(),
+               output: vec![TxOut {
+                       value: 1000,
+                       script_pubkey: Script::new(),
+               }]
+       };
+       check_spends!(spend_tx, tx);
+       mine_transaction(&nodes[1], &spend_tx);
 }
 
 fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_timelock: bool) {
@@ -9485,7 +9558,7 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t
        nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id(), false);
        nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
 
-       nodes[1].node.force_close_channel(&channel_id, &nodes[2].node.get_our_node_id()).unwrap();
+       nodes[1].node.force_close_broadcasting_latest_txn(&channel_id, &nodes[2].node.get_our_node_id()).unwrap();
        check_closed_broadcast!(nodes[1], true);
        check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
        check_added_monitors!(nodes[1], 1);
@@ -9519,7 +9592,7 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t
                // additional block built on top of the current chain.
                nodes[1].chain_monitor.chain_monitor.transactions_confirmed(
                        &nodes[1].get_block_header(conf_height + 1), &[(0, &spending_txn[1])], conf_height + 1);
-               expect_pending_htlcs_forwardable!(nodes[1]);
+               expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: channel_id }]);
                check_added_monitors!(nodes[1], 1);
 
                let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
@@ -9702,7 +9775,11 @@ fn do_test_dup_htlc_second_rejected(test_for_second_fail_panic: bool) {
                // Now we go fail back the first HTLC from the user end.
                nodes[1].node.fail_htlc_backwards(&our_payment_hash);
 
-               expect_pending_htlcs_forwardable_ignore!(nodes[1]);
+               let expected_destinations = vec![
+                       HTLCDestination::FailedPayment { payment_hash: our_payment_hash },
+                       HTLCDestination::FailedPayment { payment_hash: our_payment_hash },
+               ];
+               expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1],  expected_destinations);
                nodes[1].node.process_pending_htlc_forwards();
 
                check_added_monitors!(nodes[1], 1);
@@ -9719,7 +9796,7 @@ fn do_test_dup_htlc_second_rejected(test_for_second_fail_panic: bool) {
                if let Event::PaymentPathFailed { .. } = failure_events[1] {} else { panic!(); }
        } else {
                // Let the second HTLC fail and claim the first
-               expect_pending_htlcs_forwardable_ignore!(nodes[1]);
+               expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
                nodes[1].node.process_pending_htlc_forwards();
 
                check_added_monitors!(nodes[1], 1);
@@ -9727,7 +9804,7 @@ fn do_test_dup_htlc_second_rejected(test_for_second_fail_panic: bool) {
                nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[0]);
                commitment_signed_dance!(nodes[0], nodes[1], fail_updates_1.commitment_signed, false);
 
-               expect_payment_failed_conditions!(nodes[0], our_payment_hash, true, PaymentFailedConditions::new().mpp_parts_remain());
+               expect_payment_failed_conditions(&nodes[0], our_payment_hash, true, PaymentFailedConditions::new().mpp_parts_remain());
 
                claim_payment(&nodes[0], &[&nodes[1]], our_payment_preimage);
        }
@@ -9761,7 +9838,7 @@ fn test_inconsistent_mpp_params() {
        create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 0, InitFeatures::known(), InitFeatures::known());
        create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 100_000, 0, InitFeatures::known(), InitFeatures::known());
        create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 100_000, 0, InitFeatures::known(), InitFeatures::known());
-       create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 100_000, 0, InitFeatures::known(), InitFeatures::known());
+       let chan_2_3 =create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 100_000, 0, InitFeatures::known(), InitFeatures::known());
 
        let payment_params = PaymentParameters::from_node_id(nodes[3].node.get_our_node_id())
                .with_features(InvoiceFeatures::known());
@@ -9816,7 +9893,7 @@ fn test_inconsistent_mpp_params() {
        }
        expect_pending_htlcs_forwardable_ignore!(nodes[3]);
        nodes[3].node.process_pending_htlc_forwards();
-       expect_pending_htlcs_forwardable_ignore!(nodes[3]);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[3], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
        nodes[3].node.process_pending_htlc_forwards();
 
        check_added_monitors!(nodes[3], 1);
@@ -9825,14 +9902,14 @@ fn test_inconsistent_mpp_params() {
        nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[0]);
        commitment_signed_dance!(nodes[2], nodes[3], fail_updates_1.commitment_signed, false);
 
-       expect_pending_htlcs_forwardable!(nodes[2]);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }]);
        check_added_monitors!(nodes[2], 1);
 
        let fail_updates_2 = get_htlc_update_msgs!(nodes[2], nodes[0].node.get_our_node_id());
        nodes[0].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &fail_updates_2.update_fail_htlcs[0]);
        commitment_signed_dance!(nodes[0], nodes[2], fail_updates_2.commitment_signed, false);
 
-       expect_payment_failed_conditions!(nodes[0], our_payment_hash, true, PaymentFailedConditions::new().mpp_parts_remain());
+       expect_payment_failed_conditions(&nodes[0], our_payment_hash, true, PaymentFailedConditions::new().mpp_parts_remain());
 
        nodes[0].node.send_payment_along_path(&route.paths[1], &payment_params_opt, &our_payment_hash, &Some(our_payment_secret), 15_000_000, cur_height, payment_id, &None).unwrap();
        check_added_monitors!(nodes[0], 1);
@@ -9862,7 +9939,7 @@ fn test_keysend_payments_to_public_node() {
        };
        let scorer = test_utils::TestScorer::with_penalty(0);
        let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
-       let route = find_route(&payer_pubkey, &route_params, &network_graph.read_only(), None, nodes[0].logger, &scorer, &random_seed_bytes).unwrap();
+       let route = find_route(&payer_pubkey, &route_params, &network_graph, None, nodes[0].logger, &scorer, &random_seed_bytes).unwrap();
 
        let test_preimage = PaymentPreimage([42; 32]);
        let (payment_hash, _) = nodes[0].node.send_spontaneous_payment(&route, Some(test_preimage)).unwrap();
@@ -9898,8 +9975,8 @@ fn test_keysend_payments_to_private_node() {
        let scorer = test_utils::TestScorer::with_penalty(0);
        let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
        let route = find_route(
-               &payer_pubkey, &route_params, &network_graph.read_only(),
-               Some(&first_hops.iter().collect::<Vec<_>>()), nodes[0].logger, &scorer, &random_seed_bytes
+               &payer_pubkey, &route_params, &network_graph, Some(&first_hops.iter().collect::<Vec<_>>()),
+               nodes[0].logger, &scorer, &random_seed_bytes
        ).unwrap();
 
        let test_preimage = PaymentPreimage([42; 32]);
@@ -9945,7 +10022,11 @@ fn test_double_partial_claim() {
        connect_blocks(&nodes[3], TEST_FINAL_CLTV);
        connect_blocks(&nodes[0], TEST_FINAL_CLTV); // To get the same height for sending later
 
-       expect_pending_htlcs_forwardable!(nodes[3]);
+       let failed_destinations = vec![
+               HTLCDestination::FailedPayment { payment_hash },
+               HTLCDestination::FailedPayment { payment_hash },
+       ];
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], failed_destinations);
 
        pass_failed_payment_back(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_hash);
 
@@ -10182,7 +10263,7 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e
 
        let chanmon_cfgs = create_chanmon_cfgs(2);
        let mut config = test_default_channel_config();
-       config.channel_options.max_dust_htlc_exposure_msat = 5_000_000; // default setting value
+       config.channel_config.max_dust_htlc_exposure_msat = 5_000_000; // default setting value
        let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config), None]);
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
@@ -10225,13 +10306,13 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e
                chan.get_dust_buffer_feerate(None) as u64
        };
        let dust_outbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_timeout_tx_weight(opt_anchors) / 1000 + open_channel.dust_limit_satoshis - 1) * 1000;
-       let dust_outbound_htlc_on_holder_tx: u64 = config.channel_options.max_dust_htlc_exposure_msat / dust_outbound_htlc_on_holder_tx_msat;
+       let dust_outbound_htlc_on_holder_tx: u64 = config.channel_config.max_dust_htlc_exposure_msat / dust_outbound_htlc_on_holder_tx_msat;
 
        let dust_inbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_success_tx_weight(opt_anchors) / 1000 + open_channel.dust_limit_satoshis - 1) * 1000;
-       let dust_inbound_htlc_on_holder_tx: u64 = config.channel_options.max_dust_htlc_exposure_msat / dust_inbound_htlc_on_holder_tx_msat;
+       let dust_inbound_htlc_on_holder_tx: u64 = config.channel_config.max_dust_htlc_exposure_msat / dust_inbound_htlc_on_holder_tx_msat;
 
        let dust_htlc_on_counterparty_tx: u64 = 25;
-       let dust_htlc_on_counterparty_tx_msat: u64 = config.channel_options.max_dust_htlc_exposure_msat / dust_htlc_on_counterparty_tx;
+       let dust_htlc_on_counterparty_tx_msat: u64 = config.channel_config.max_dust_htlc_exposure_msat / dust_htlc_on_counterparty_tx;
 
        if on_holder_tx {
                if dust_outbound_balance {
@@ -10275,9 +10356,9 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e
                if on_holder_tx {
                        let dust_outbound_overflow = dust_outbound_htlc_on_holder_tx_msat * (dust_outbound_htlc_on_holder_tx + 1);
                        let dust_inbound_overflow = dust_inbound_htlc_on_holder_tx_msat * dust_inbound_htlc_on_holder_tx + dust_outbound_htlc_on_holder_tx_msat;
-                       unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)), true, APIError::ChannelUnavailable { ref err }, assert_eq!(err, &format!("Cannot send value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", if dust_outbound_balance { dust_outbound_overflow } else { dust_inbound_overflow }, config.channel_options.max_dust_htlc_exposure_msat)));
+                       unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)), true, APIError::ChannelUnavailable { ref err }, assert_eq!(err, &format!("Cannot send value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", if dust_outbound_balance { dust_outbound_overflow } else { dust_inbound_overflow }, config.channel_config.max_dust_htlc_exposure_msat)));
                } else {
-                       unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)), true, APIError::ChannelUnavailable { ref err }, assert_eq!(err, &format!("Cannot send value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx", dust_overflow, config.channel_options.max_dust_htlc_exposure_msat)));
+                       unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)), true, APIError::ChannelUnavailable { ref err }, assert_eq!(err, &format!("Cannot send value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx", dust_overflow, config.channel_config.max_dust_htlc_exposure_msat)));
                }
        } else if exposure_breach_event == ExposureEvent::AtHTLCReception {
                let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], if on_holder_tx { dust_inbound_htlc_on_holder_tx_msat } else { dust_htlc_on_counterparty_tx_msat });
@@ -10292,10 +10373,10 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e
                        // Outbound dust balance: 6399 sats
                        let dust_inbound_overflow = dust_inbound_htlc_on_holder_tx_msat * (dust_inbound_htlc_on_holder_tx + 1);
                        let dust_outbound_overflow = dust_outbound_htlc_on_holder_tx_msat * dust_outbound_htlc_on_holder_tx + dust_inbound_htlc_on_holder_tx_msat;
-                       nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", if dust_outbound_balance { dust_outbound_overflow } else { dust_inbound_overflow }, config.channel_options.max_dust_htlc_exposure_msat), 1);
+                       nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", if dust_outbound_balance { dust_outbound_overflow } else { dust_inbound_overflow }, config.channel_config.max_dust_htlc_exposure_msat), 1);
                } else {
                        // Outbound dust balance: 5200 sats
-                       nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx", dust_overflow, config.channel_options.max_dust_htlc_exposure_msat), 1);
+                       nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx", dust_overflow, config.channel_config.max_dust_htlc_exposure_msat), 1);
                }
        } else if exposure_breach_event == ExposureEvent::AtUpdateFeeOutbound {
                let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 2_500_000);
@@ -10329,3 +10410,45 @@ fn test_max_dust_htlc_exposure() {
        do_test_max_dust_htlc_exposure(false, ExposureEvent::AtUpdateFeeOutbound, false);
        do_test_max_dust_htlc_exposure(false, ExposureEvent::AtUpdateFeeOutbound, true);
 }
+
+#[test]
+fn test_non_final_funding_tx() {
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+       let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+       let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None).unwrap();
+       let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+       nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &open_channel_message);
+       let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
+       nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &accept_channel_message);
+
+       let best_height = nodes[0].node.best_block.read().unwrap().height();
+
+       let chan_id = *nodes[0].network_chan_count.borrow();
+       let events = nodes[0].node.get_and_clear_pending_events();
+       let input = TxIn { previous_output: BitcoinOutPoint::null(), script_sig: bitcoin::Script::new(), sequence: 0x1, witness: Witness::from_vec(vec!(vec!(1))) };
+       assert_eq!(events.len(), 1);
+       let mut tx = match events[0] {
+               Event::FundingGenerationReady { ref channel_value_satoshis, ref output_script, .. } => {
+                       // Timelock the transaction _beyond_ the best client height + 2.
+                       Transaction { version: chan_id as i32, lock_time: best_height + 3, input: vec![input], output: vec![TxOut {
+                               value: *channel_value_satoshis, script_pubkey: output_script.clone(),
+                       }]}
+               },
+               _ => panic!("Unexpected event"),
+       };
+       // Transaction should fail as it's evaluated as non-final for propagation.
+       match nodes[0].node.funding_transaction_generated(&temp_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()) {
+               Err(APIError::APIMisuseError { err }) => {
+                       assert_eq!(format!("Funding transaction absolute timelock is non-final"), err);
+               },
+               _ => panic!()
+       }
+
+       // However, transaction should be accepted if it's in a +2 headroom from best block.
+       tx.lock_time -= 1;
+       assert!(nodes[0].node.funding_transaction_generated(&temp_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).is_ok());
+       get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
+}