Add `send_probe` and introduce probing cookies
[rust-lightning] / lightning / src / ln / functional_tests.rs
index 76487ddcfd16967369bfde624ac1df4a4b77bb83..9ecbee739f1874eaa0b8cbfca4d19ff7658b5e90 100644 (file)
@@ -37,10 +37,12 @@ use util::config::UserConfig;
 
 use bitcoin::hash_types::BlockHash;
 use bitcoin::blockdata::block::{Block, BlockHeader};
-use bitcoin::blockdata::script::Builder;
+use bitcoin::blockdata::script::{Builder, Script};
 use bitcoin::blockdata::opcodes;
 use bitcoin::blockdata::constants::genesis_block;
 use bitcoin::network::constants::Network;
+use bitcoin::{Transaction, TxIn, TxOut, Witness};
+use bitcoin::OutPoint as BitcoinOutPoint;
 
 use bitcoin::secp256k1::Secp256k1;
 use bitcoin::secp256k1::{PublicKey,SecretKey};
@@ -2201,7 +2203,7 @@ fn channel_monitor_network_test() {
        send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
 
        // Simple case with no pending HTLCs:
-       nodes[1].node.force_close_channel(&chan_1.2, &nodes[0].node.get_our_node_id()).unwrap();
+       nodes[1].node.force_close_broadcasting_latest_txn(&chan_1.2, &nodes[0].node.get_our_node_id()).unwrap();
        check_added_monitors!(nodes[1], 1);
        check_closed_broadcast!(nodes[1], true);
        {
@@ -2222,7 +2224,7 @@ fn channel_monitor_network_test() {
 
        // Simple case of one pending HTLC to HTLC-Timeout (note that the HTLC-Timeout is not
        // broadcasted until we reach the timelock time).
-       nodes[1].node.force_close_channel(&chan_2.2, &nodes[2].node.get_our_node_id()).unwrap();
+       nodes[1].node.force_close_broadcasting_latest_txn(&chan_2.2, &nodes[2].node.get_our_node_id()).unwrap();
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
        {
@@ -2262,7 +2264,7 @@ fn channel_monitor_network_test() {
 
        // nodes[3] gets the preimage, but nodes[2] already disconnected, resulting in a nodes[2]
        // HTLC-Timeout and a nodes[3] claim against it (+ its own announces)
-       nodes[2].node.force_close_channel(&chan_3.2, &nodes[3].node.get_our_node_id()).unwrap();
+       nodes[2].node.force_close_broadcasting_latest_txn(&chan_3.2, &nodes[3].node.get_our_node_id()).unwrap();
        check_added_monitors!(nodes[2], 1);
        check_closed_broadcast!(nodes[2], true);
        let node2_commitment_txid;
@@ -2515,10 +2517,10 @@ fn claim_htlc_outputs_shared_tx() {
        let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
 
        // Rebalance the network to generate htlc in the two directions
-       send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
+       send_payment(&nodes[0], &[&nodes[1]], 8_000_000);
        // node[0] is gonna to revoke an old state thus node[1] should be able to claim both offered/received HTLC outputs on top of commitment tx
-       let payment_preimage_1 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
-       let (_payment_preimage_2, payment_hash_2, _) = route_payment(&nodes[1], &vec!(&nodes[0])[..], 3000000);
+       let payment_preimage_1 = route_payment(&nodes[0], &[&nodes[1]], 3_000_000).0;
+       let (_payment_preimage_2, payment_hash_2, _) = route_payment(&nodes[1], &[&nodes[0]], 3_000_000);
 
        // Get the will-be-revoked local txn from node[0]
        let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
@@ -2541,9 +2543,9 @@ fn claim_htlc_outputs_shared_tx() {
                check_added_monitors!(nodes[1], 1);
                check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
                connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
-               expect_payment_failed!(nodes[1], payment_hash_2, true);
+               assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
 
-               let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
+               let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
                assert_eq!(node_txn.len(), 2); // ChannelMonitor: penalty tx, ChannelManager: local commitment
 
                assert_eq!(node_txn[0].input.len(), 3); // Claim the revoked output + both revoked HTLC outputs
@@ -2560,7 +2562,13 @@ fn claim_htlc_outputs_shared_tx() {
 
                // Next nodes[1] broadcasts its current local tx state:
                assert_eq!(node_txn[1].input.len(), 1);
-               assert_eq!(node_txn[1].input[0].previous_output.txid, chan_1.3.txid()); //Spending funding tx unique txouput, tx broadcasted by ChannelManager
+               check_spends!(node_txn[1], chan_1.3);
+
+               // Finally, mine the penalty transaction and check that we get an HTLC failure after
+               // ANTI_REORG_DELAY confirmations.
+               mine_transaction(&nodes[1], &node_txn[0]);
+               connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
+               expect_payment_failed!(nodes[1], payment_hash_2, true);
        }
        get_announce_close_broadcast_events(&nodes, 0, 1);
        assert_eq!(nodes[0].node.list_channels().len(), 0);
@@ -2579,11 +2587,11 @@ fn claim_htlc_outputs_single_tx() {
        let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
 
        // Rebalance the network to generate htlc in the two directions
-       send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
+       send_payment(&nodes[0], &[&nodes[1]], 8_000_000);
        // node[0] is gonna to revoke an old state thus node[1] should be able to claim both offered/received HTLC outputs on top of commitment tx, but this
        // time as two different claim transactions as we're gonna to timeout htlc with given a high current height
-       let payment_preimage_1 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
-       let (_payment_preimage_2, payment_hash_2, _payment_secret_2) = route_payment(&nodes[1], &vec!(&nodes[0])[..], 3000000);
+       let payment_preimage_1 = route_payment(&nodes[0], &[&nodes[1]], 3_000_000).0;
+       let (_payment_preimage_2, payment_hash_2, _payment_secret_2) = route_payment(&nodes[1], &[&nodes[0]], 3_000_000);
 
        // Get the will-be-revoked local txn from node[0]
        let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
@@ -2605,9 +2613,9 @@ fn claim_htlc_outputs_single_tx() {
                }
 
                connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
-               expect_payment_failed!(nodes[1], payment_hash_2, true);
+               assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
 
-               let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
+               let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
                assert!(node_txn.len() == 9 || node_txn.len() == 10);
 
                // Check the pair local commitment and HTLC-timeout broadcast due to HTLC expiration
@@ -2635,6 +2643,14 @@ fn claim_htlc_outputs_single_tx() {
                assert_eq!(*witness_lens.iter().skip(0).next().unwrap(), 77); // revoked to_local
                assert_eq!(*witness_lens.iter().skip(1).next().unwrap(), OFFERED_HTLC_SCRIPT_WEIGHT); // revoked offered HTLC
                assert_eq!(*witness_lens.iter().skip(2).next().unwrap(), ACCEPTED_HTLC_SCRIPT_WEIGHT); // revoked received HTLC
+
+               // Finally, mine the penalty transactions and check that we get an HTLC failure after
+               // ANTI_REORG_DELAY confirmations.
+               mine_transaction(&nodes[1], &node_txn[2]);
+               mine_transaction(&nodes[1], &node_txn[3]);
+               mine_transaction(&nodes[1], &node_txn[4]);
+               connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
+               expect_payment_failed!(nodes[1], payment_hash_2, true);
        }
        get_announce_close_broadcast_events(&nodes, 0, 1);
        assert_eq!(nodes[0].node.list_channels().len(), 0);
@@ -3387,7 +3403,7 @@ fn test_htlc_ignore_latest_remote_commitment() {
        create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
 
        route_payment(&nodes[0], &[&nodes[1]], 10000000);
-       nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
+       nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
        connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
@@ -3450,7 +3466,7 @@ fn test_force_close_fail_back() {
        // state or updated nodes[1]' state. Now force-close and broadcast that commitment/HTLC
        // transaction and ensure nodes[1] doesn't fail-backwards (this was originally a bug!).
 
-       nodes[2].node.force_close_channel(&payment_event.commitment_msg.channel_id, &nodes[1].node.get_our_node_id()).unwrap();
+       nodes[2].node.force_close_broadcasting_latest_txn(&payment_event.commitment_msg.channel_id, &nodes[1].node.get_our_node_id()).unwrap();
        check_closed_broadcast!(nodes[2], true);
        check_added_monitors!(nodes[2], 1);
        check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed);
@@ -4777,7 +4793,7 @@ fn test_claim_sizeable_push_msat() {
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
        let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 98_000_000, InitFeatures::known(), InitFeatures::known());
-       nodes[1].node.force_close_channel(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
+       nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
        check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
@@ -4806,7 +4822,7 @@ fn test_claim_on_remote_sizeable_push_msat() {
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
        let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 98_000_000, InitFeatures::known(), InitFeatures::known());
-       nodes[0].node.force_close_channel(&chan.2, &nodes[1].node.get_our_node_id()).unwrap();
+       nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[1].node.get_our_node_id()).unwrap();
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
        check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
@@ -7282,37 +7298,25 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) {
                check_added_monitors!(nodes[0], 1);
                check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
                assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
+
                connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires
-               timeout_tx.push(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[1].clone());
+               timeout_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().drain(..)
+                       .filter(|tx| tx.input[0].previous_output.txid == bs_commitment_tx[0].txid()).collect();
+               check_spends!(timeout_tx[0], bs_commitment_tx[0]);
+               // For both a revoked or non-revoked commitment transaction, after ANTI_REORG_DELAY the
+               // dust HTLC should have been failed.
+               expect_payment_failed!(nodes[0], dust_hash, true);
+
                if !revoked {
-                       expect_payment_failed!(nodes[0], dust_hash, true);
                        assert_eq!(timeout_tx[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
-                       // We fail non-dust-HTLC 2 by broadcast of local timeout tx on remote commitment tx
-                       mine_transaction(&nodes[0], &timeout_tx[0]);
-                       assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
-                       connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
-                       expect_payment_failed!(nodes[0], non_dust_hash, true);
                } else {
-                       // If revoked, both dust & non-dust HTLCs should have been failed after ANTI_REORG_DELAY confs of revoked
-                       // commitment tx
-                       let events = nodes[0].node.get_and_clear_pending_events();
-                       assert_eq!(events.len(), 2);
-                       let first;
-                       match events[0] {
-                               Event::PaymentPathFailed { payment_hash, .. } => {
-                                       if payment_hash == dust_hash { first = true; }
-                                       else { first = false; }
-                               },
-                               _ => panic!("Unexpected event"),
-                       }
-                       match events[1] {
-                               Event::PaymentPathFailed { payment_hash, .. } => {
-                                       if first { assert_eq!(payment_hash, non_dust_hash); }
-                                       else { assert_eq!(payment_hash, dust_hash); }
-                               },
-                               _ => panic!("Unexpected event"),
-                       }
+                       assert_eq!(timeout_tx[0].lock_time, 0);
                }
+               // We fail non-dust-HTLC 2 by broadcast of local timeout/revocation-claim tx
+               mine_transaction(&nodes[0], &timeout_tx[0]);
+               assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
+               connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
+               expect_payment_failed!(nodes[0], non_dust_hash, true);
        }
 }
 
@@ -7395,14 +7399,11 @@ fn test_user_configurable_csv_delay() {
        } else { assert!(false); }
 }
 
-#[test]
-fn test_data_loss_protect() {
-       // We want to be sure that :
-       // * we don't broadcast our Local Commitment Tx in case of fallen behind
-       //   (but this is not quite true - we broadcast during Drop because chanmon is out of sync with chanmgr)
-       // * we close channel in case of detecting other being fallen behind
-       // * we are able to claim our own outputs thanks to to_remote being static
-       // TODO: this test is incomplete and the data_loss_protect implementation is incomplete - see issue #775
+fn do_test_data_loss_protect(reconnect_panicing: bool) {
+       // When we get a data_loss_protect proving we're behind, we immediately panic as the
+       // chain::Watch API requirements have been violated (e.g. the user restored from a backup). The
+       // panic message informs the user they should force-close without broadcasting, which is tested
+       // if `reconnect_panicing` is not set.
        let persister;
        let logger;
        let fee_estimator;
@@ -7460,53 +7461,53 @@ fn test_data_loss_protect() {
 
        check_added_monitors!(nodes[0], 1);
 
-       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
-       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
+       if reconnect_panicing {
+               nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
+               nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
 
-       let reestablish_0 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
+               let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
 
-       // Check we don't broadcast any transactions following learning of per_commitment_point from B
-       nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_0[0]);
-       check_added_monitors!(nodes[0], 1);
+               // Check we close channel detecting A is fallen-behind
+               // Check that we sent the warning message when we detected that A has fallen behind,
+               // and give the possibility for A to recover from the warning.
+               nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
+               let warn_msg = "Peer attempted to reestablish channel with a very old local commitment transaction".to_owned();
+               assert!(check_warn_msg!(nodes[1], nodes[0].node.get_our_node_id(), chan.2).contains(&warn_msg));
+
+               {
+                       let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
+                       // The node B should not broadcast the transaction to force close the channel!
+                       assert!(node_txn.is_empty());
+               }
+
+               let reestablish_0 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
+               // Check A panics upon seeing proof it has fallen behind.
+               nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_0[0]);
+               return; // By this point we should have panic'ed!
+       }
 
+       nodes[0].node.force_close_without_broadcasting_txn(&chan.2, &nodes[1].node.get_our_node_id()).unwrap();
+       check_added_monitors!(nodes[0], 1);
+       check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
        {
-               let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
+               let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
                assert_eq!(node_txn.len(), 0);
        }
 
-       let mut reestablish_1 = Vec::with_capacity(1);
        for msg in nodes[0].node.get_and_clear_pending_msg_events() {
-               if let MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } = msg {
-                       assert_eq!(*node_id, nodes[1].node.get_our_node_id());
-                       reestablish_1.push(msg.clone());
-               } else if let MessageSendEvent::BroadcastChannelUpdate { .. } = msg {
+               if let MessageSendEvent::BroadcastChannelUpdate { .. } = msg {
                } else if let MessageSendEvent::HandleError { ref action, .. } = msg {
                        match action {
                                &ErrorAction::SendErrorMessage { ref msg } => {
-                                       assert_eq!(msg.data, "We have fallen behind - we have received proof that if we broadcast remote is going to claim our funds - we can't do any automated broadcasting");
+                                       assert_eq!(msg.data, "Channel force-closed");
                                },
                                _ => panic!("Unexpected event!"),
                        }
                } else {
-                       panic!("Unexpected event")
+                       panic!("Unexpected event {:?}", msg)
                }
        }
 
-       // Check we close channel detecting A is fallen-behind
-       // Check that we sent the warning message when we detected that A has fallen behind,
-       // and give the possibility for A to recover from the warning.
-       nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
-       let warn_msg = "Peer attempted to reestablish channel with a very old local commitment transaction".to_owned();
-       assert!(check_warn_msg!(nodes[1], nodes[0].node.get_our_node_id(), chan.2).contains(&warn_msg));
-
-       // Check A is able to claim to_remote output
-       let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
-       // The node B should not broadcast the transaction to force close the channel!
-       assert!(node_txn.is_empty());
-       // B should now detect that there is something wrong and should force close the channel.
-       let exp_err = "We have fallen behind - we have received proof that if we broadcast remote is going to claim our funds - we can\'t do any automated broadcasting";
-       check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: exp_err.to_string() });
-
        // after the warning message sent by B, we should not able to
        // use the channel, or reconnect with success to the channel.
        assert!(nodes[0].node.list_usable_channels().is_empty());
@@ -7537,6 +7538,17 @@ fn test_data_loss_protect() {
        check_closed_broadcast!(nodes[1], false);
 }
 
+#[test]
+#[should_panic]
+fn test_data_loss_protect_showing_stale_state_panics() {
+       do_test_data_loss_protect(true);
+}
+
+#[test]
+fn test_force_close_without_broadcast() {
+       do_test_data_loss_protect(false);
+}
+
 #[test]
 fn test_check_htlc_underpaying() {
        // Send payment through A -> B but A is maliciously
@@ -8361,7 +8373,7 @@ fn test_manually_accept_inbound_channel_request() {
                _ => panic!("Unexpected event"),
        }
 
-       nodes[1].node.force_close_channel(&temp_channel_id, &nodes[0].node.get_our_node_id()).unwrap();
+       nodes[1].node.force_close_broadcasting_latest_txn(&temp_channel_id, &nodes[0].node.get_our_node_id()).unwrap();
 
        let close_msg_ev = nodes[1].node.get_and_clear_pending_msg_events();
        assert_eq!(close_msg_ev.len(), 1);
@@ -8396,7 +8408,7 @@ fn test_manually_reject_inbound_channel_request() {
        let events = nodes[1].node.get_and_clear_pending_events();
        match events[0] {
                Event::OpenChannelRequest { temporary_channel_id, .. } => {
-                       nodes[1].node.force_close_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id()).unwrap();
+                       nodes[1].node.force_close_broadcasting_latest_txn(&temporary_channel_id, &nodes[0].node.get_our_node_id()).unwrap();
                }
                _ => panic!("Unexpected event"),
        }
@@ -9049,7 +9061,7 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain
                force_closing_node = 1;
                counterparty_node = 0;
        }
-       nodes[force_closing_node].node.force_close_channel(&chan_ab.2, &nodes[counterparty_node].node.get_our_node_id()).unwrap();
+       nodes[force_closing_node].node.force_close_broadcasting_latest_txn(&chan_ab.2, &nodes[counterparty_node].node.get_our_node_id()).unwrap();
        check_closed_broadcast!(nodes[force_closing_node], true);
        check_added_monitors!(nodes[force_closing_node], 1);
        check_closed_event!(nodes[force_closing_node], 1, ClosureReason::HolderForceClosed);
@@ -9412,6 +9424,10 @@ fn test_invalid_funding_tx() {
        // funding transactions from their counterparties, leading to a multi-implementation critical
        // security vulnerability (though we always sanitized properly, we've previously had
        // un-released crashes in the sanitization process).
+       //
+       // Further, if the funding transaction is consensus-valid, confirms, and is later spent, we'd
+       // previously have crashed in `ChannelMonitor` even though we closed the channel as bogus and
+       // gave up on it. We test this here by generating such a transaction.
        let chanmon_cfgs = create_chanmon_cfgs(2);
        let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
@@ -9422,9 +9438,19 @@ fn test_invalid_funding_tx() {
        nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
 
        let (temporary_channel_id, mut tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100_000, 42);
+
+       // Create a witness program which can be spent by a 4-empty-stack-elements witness and which is
+       // 136 bytes long. This matches our "accepted HTLC preimage spend" matching, previously causing
+       // a panic as we'd try to extract a 32 byte preimage from a witness element without checking
+       // its length.
+       let mut wit_program: Vec<u8> = channelmonitor::deliberately_bogus_accepted_htlc_witness_program();
+       assert!(chan_utils::HTLCType::scriptlen_to_htlctype(wit_program.len()).unwrap() ==
+               chan_utils::HTLCType::AcceptedHTLC);
+
+       let wit_program_script: Script = wit_program.clone().into();
        for output in tx.output.iter_mut() {
                // Make the confirmed funding transaction have a bogus script_pubkey
-               output.script_pubkey = bitcoin::Script::new();
+               output.script_pubkey = Script::new_v0_p2wsh(&wit_program_script.wscript_hash());
        }
 
        nodes[0].node.funding_transaction_generated_unchecked(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone(), 0).unwrap();
@@ -9454,6 +9480,28 @@ fn test_invalid_funding_tx() {
                } else { panic!(); }
        } else { panic!(); }
        assert_eq!(nodes[1].node.list_channels().len(), 0);
+
+       // Now confirm a spend of the (bogus) funding transaction. As long as the witness is 5 elements
+       // long the ChannelMonitor will try to read 32 bytes from the second-to-last element, panicing
+       // as its not 32 bytes long.
+       let mut spend_tx = Transaction {
+               version: 2i32, lock_time: 0,
+               input: tx.output.iter().enumerate().map(|(idx, _)| TxIn {
+                       previous_output: BitcoinOutPoint {
+                               txid: tx.txid(),
+                               vout: idx as u32,
+                       },
+                       script_sig: Script::new(),
+                       sequence: 0xfffffffd,
+                       witness: Witness::from_vec(channelmonitor::deliberately_bogus_accepted_htlc_witness())
+               }).collect(),
+               output: vec![TxOut {
+                       value: 1000,
+                       script_pubkey: Script::new(),
+               }]
+       };
+       check_spends!(spend_tx, tx);
+       mine_transaction(&nodes[1], &spend_tx);
 }
 
 fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_timelock: bool) {
@@ -9485,7 +9533,7 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t
        nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id(), false);
        nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
 
-       nodes[1].node.force_close_channel(&channel_id, &nodes[2].node.get_our_node_id()).unwrap();
+       nodes[1].node.force_close_broadcasting_latest_txn(&channel_id, &nodes[2].node.get_our_node_id()).unwrap();
        check_closed_broadcast!(nodes[1], true);
        check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
        check_added_monitors!(nodes[1], 1);
@@ -9862,7 +9910,7 @@ fn test_keysend_payments_to_public_node() {
        };
        let scorer = test_utils::TestScorer::with_penalty(0);
        let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
-       let route = find_route(&payer_pubkey, &route_params, &network_graph.read_only(), None, nodes[0].logger, &scorer, &random_seed_bytes).unwrap();
+       let route = find_route(&payer_pubkey, &route_params, &network_graph, None, nodes[0].logger, &scorer, &random_seed_bytes).unwrap();
 
        let test_preimage = PaymentPreimage([42; 32]);
        let (payment_hash, _) = nodes[0].node.send_spontaneous_payment(&route, Some(test_preimage)).unwrap();
@@ -9898,8 +9946,8 @@ fn test_keysend_payments_to_private_node() {
        let scorer = test_utils::TestScorer::with_penalty(0);
        let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
        let route = find_route(
-               &payer_pubkey, &route_params, &network_graph.read_only(),
-               Some(&first_hops.iter().collect::<Vec<_>>()), nodes[0].logger, &scorer, &random_seed_bytes
+               &payer_pubkey, &route_params, &network_graph, Some(&first_hops.iter().collect::<Vec<_>>()),
+               nodes[0].logger, &scorer, &random_seed_bytes
        ).unwrap();
 
        let test_preimage = PaymentPreimage([42; 32]);
@@ -10329,3 +10377,45 @@ fn test_max_dust_htlc_exposure() {
        do_test_max_dust_htlc_exposure(false, ExposureEvent::AtUpdateFeeOutbound, false);
        do_test_max_dust_htlc_exposure(false, ExposureEvent::AtUpdateFeeOutbound, true);
 }
+
+#[test]
+fn test_non_final_funding_tx() {
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+       let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+       let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None).unwrap();
+       let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+       nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &open_channel_message);
+       let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
+       nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &accept_channel_message);
+
+       let best_height = nodes[0].node.best_block.read().unwrap().height();
+
+       let chan_id = *nodes[0].network_chan_count.borrow();
+       let events = nodes[0].node.get_and_clear_pending_events();
+       let input = TxIn { previous_output: BitcoinOutPoint::null(), script_sig: bitcoin::Script::new(), sequence: 0x1, witness: Witness::from_vec(vec!(vec!(1))) };
+       assert_eq!(events.len(), 1);
+       let mut tx = match events[0] {
+               Event::FundingGenerationReady { ref channel_value_satoshis, ref output_script, .. } => {
+                       // Timelock the transaction _beyond_ the best client height + 2.
+                       Transaction { version: chan_id as i32, lock_time: best_height + 3, input: vec![input], output: vec![TxOut {
+                               value: *channel_value_satoshis, script_pubkey: output_script.clone(),
+                       }]}
+               },
+               _ => panic!("Unexpected event"),
+       };
+       // Transaction should fail as it's evaluated as non-final for propagation.
+       match nodes[0].node.funding_transaction_generated(&temp_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()) {
+               Err(APIError::APIMisuseError { err }) => {
+                       assert_eq!(format!("Funding transaction absolute timelock is non-final"), err);
+               },
+               _ => panic!()
+       }
+
+       // However, transaction should be accepted if it's in a +2 headroom from best block.
+       tx.lock_time -= 1;
+       assert!(nodes[0].node.funding_transaction_generated(&temp_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).is_ok());
+       get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
+}