Revert "Merge pull request #819 from TheBlueMatt/2021-03-810-rebased"
[rust-lightning] / lightning / src / ln / functional_tests.rs
index bfab34511feaeec84d44b81a5b896f76794078aa..4604d741ceff10c0ea456d358d21b811ebb8e6a3 100644 (file)
@@ -51,10 +51,10 @@ use std::collections::{BTreeSet, HashMap, HashSet};
 use std::default::Default;
 use std::sync::Mutex;
 use std::sync::atomic::Ordering;
-use std::mem;
 
 use ln::functional_test_utils::*;
 use ln::chan_utils::CommitmentTransaction;
+use ln::msgs::OptionalField::Present;
 
 #[test]
 fn test_insane_channel_opens() {
@@ -832,9 +832,9 @@ fn pre_funding_lock_shutdown_test() {
 
        nodes[0].node.close_channel(&OutPoint { txid: tx.txid(), index: 0 }.to_channel_id()).unwrap();
        let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
-       nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown);
+       nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown);
        let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
-       nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_shutdown);
+       nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_shutdown);
 
        let node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id());
        nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed);
@@ -862,9 +862,9 @@ fn updates_shutdown_wait() {
 
        nodes[0].node.close_channel(&chan_1.2).unwrap();
        let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
-       nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown);
+       nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown);
        let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
-       nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_shutdown);
+       nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_shutdown);
 
        assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
@@ -949,13 +949,13 @@ fn htlc_fail_async_shutdown() {
 
        nodes[1].node.close_channel(&chan_1.2).unwrap();
        let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
-       nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_shutdown);
+       nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_shutdown);
        let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
 
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
        nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &updates.commitment_signed);
        check_added_monitors!(nodes[1], 1);
-       nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown);
+       nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown);
        commitment_signed_dance!(nodes[1], nodes[0], (), false, true, false);
 
        let updates_2 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
@@ -1017,10 +1017,10 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) {
        nodes[1].node.close_channel(&chan_1.2).unwrap();
        let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
        if recv_count > 0 {
-               nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_shutdown);
+               nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_shutdown);
                let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
                if recv_count > 1 {
-                       nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown);
+                       nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown);
                }
        }
 
@@ -1039,14 +1039,14 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) {
        nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &node_1_reestablish);
        let node_0_2nd_shutdown = if recv_count > 0 {
                let node_0_2nd_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
-               nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_2nd_shutdown);
+               nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_2nd_shutdown);
                node_0_2nd_shutdown
        } else {
                assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
-               nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_2nd_shutdown);
+               nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_2nd_shutdown);
                get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id())
        };
-       nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_2nd_shutdown);
+       nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_2nd_shutdown);
 
        assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
@@ -1106,10 +1106,10 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) {
                let node_1_3rd_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
                assert!(node_1_3rd_shutdown == node_1_2nd_shutdown);
 
-               nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_3rd_shutdown);
+               nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_3rd_shutdown);
                assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 
-               nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_3rd_shutdown);
+               nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_3rd_shutdown);
                let node_0_2nd_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id());
                assert!(node_0_closing_signed == node_0_2nd_closing_signed);
 
@@ -1615,19 +1615,19 @@ fn test_fee_spike_violation_fails_htlc() {
        let (local_revocation_basepoint, local_htlc_basepoint, local_secret, next_local_point) = {
                let chan_lock = nodes[0].node.channel_state.lock().unwrap();
                let local_chan = chan_lock.by_id.get(&chan.2).unwrap();
-               let chan_keys = local_chan.get_keys();
-               let pubkeys = chan_keys.pubkeys();
+               let chan_signer = local_chan.get_signer();
+               let pubkeys = chan_signer.pubkeys();
                (pubkeys.revocation_basepoint, pubkeys.htlc_basepoint,
-                chan_keys.release_commitment_secret(INITIAL_COMMITMENT_NUMBER),
-                chan_keys.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 2, &secp_ctx))
+                chan_signer.release_commitment_secret(INITIAL_COMMITMENT_NUMBER),
+                chan_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 2, &secp_ctx))
        };
        let (remote_delayed_payment_basepoint, remote_htlc_basepoint,remote_point) = {
                let chan_lock = nodes[1].node.channel_state.lock().unwrap();
                let remote_chan = chan_lock.by_id.get(&chan.2).unwrap();
-               let chan_keys = remote_chan.get_keys();
-               let pubkeys = chan_keys.pubkeys();
+               let chan_signer = remote_chan.get_signer();
+               let pubkeys = chan_signer.pubkeys();
                (pubkeys.delayed_payment_basepoint, pubkeys.htlc_basepoint,
-                chan_keys.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &secp_ctx))
+                chan_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &secp_ctx))
        };
 
        // Assemble the set of keys we can use for signatures for our commitment_signed message.
@@ -1651,7 +1651,7 @@ fn test_fee_spike_violation_fails_htlc() {
        let res = {
                let local_chan_lock = nodes[0].node.channel_state.lock().unwrap();
                let local_chan = local_chan_lock.by_id.get(&chan.2).unwrap();
-               let local_chan_keys = local_chan.get_keys();
+               let local_chan_signer = local_chan.get_signer();
                let commitment_tx = CommitmentTransaction::new_with_auxiliary_htlc_data(
                        commitment_number,
                        95000,
@@ -1661,7 +1661,7 @@ fn test_fee_spike_violation_fails_htlc() {
                        &mut vec![(accepted_htlc_info, ())],
                        &local_chan.channel_transaction_parameters.as_counterparty_broadcastable()
                );
-               local_chan_keys.sign_counterparty_commitment(&commitment_tx, &secp_ctx).unwrap()
+               local_chan_signer.sign_counterparty_commitment(&commitment_tx, &secp_ctx).unwrap()
        };
 
        let commit_signed_msg = msgs::CommitmentSigned {
@@ -1968,7 +1968,8 @@ fn test_channel_reserve_holding_cell_htlcs() {
 
        // attempt to send amt_msat > their_max_htlc_value_in_flight_msat
        {
-               let (route, our_payment_hash, _) = get_route_and_payment_hash!(recv_value_0 + 1);
+               let (mut route, our_payment_hash, _) = get_route_and_payment_hash!(recv_value_0);
+               route.paths[0].last_mut().unwrap().fee_msat += 1;
                assert!(route.paths[0].iter().rev().skip(1).all(|h| h.fee_msat == feemsat));
                unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &None), true, APIError::ChannelUnavailable { ref err },
                        assert!(regex::Regex::new(r"Cannot send value that would put us over the max HTLC value in flight our peer will accept \(\d+\)").unwrap().is_match(err)));
@@ -3516,8 +3517,8 @@ fn test_force_close_fail_back() {
 
        // Now check that if we add the preimage to ChannelMonitor it broadcasts our HTLC-Success..
        {
-               let mut monitors = nodes[2].chain_monitor.chain_monitor.monitors.lock().unwrap();
-               monitors.get_mut(&OutPoint{ txid: Txid::from_slice(&payment_event.commitment_msg.channel_id[..]).unwrap(), index: 0 }).unwrap()
+               let mut monitors = nodes[2].chain_monitor.chain_monitor.monitors.read().unwrap();
+               monitors.get(&OutPoint{ txid: Txid::from_slice(&payment_event.commitment_msg.channel_id[..]).unwrap(), index: 0 }).unwrap()
                        .provide_payment_preimage(&our_payment_hash, &our_payment_preimage, &node_cfgs[2].tx_broadcaster, &node_cfgs[2].fee_estimator, &&logger);
        }
        connect_block(&nodes[2], &block, 1);
@@ -3531,37 +3532,6 @@ fn test_force_close_fail_back() {
        check_spends!(node_txn[0], tx);
 }
 
-#[test]
-fn test_unconf_chan() {
-       // After creating a chan between nodes, we disconnect all blocks previously seen to force a channel close on nodes[0] side
-       let chanmon_cfgs = create_chanmon_cfgs(2);
-       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
-       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
-       let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
-       create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
-
-       let channel_state = nodes[0].node.channel_state.lock().unwrap();
-       assert_eq!(channel_state.by_id.len(), 1);
-       assert_eq!(channel_state.short_to_id.len(), 1);
-       mem::drop(channel_state);
-
-       let mut headers = Vec::new();
-       let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-       headers.push(header.clone());
-       for _i in 2..100 {
-               header = BlockHeader { version: 0x20000000, prev_blockhash: header.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-               headers.push(header.clone());
-       }
-       while !headers.is_empty() {
-               nodes[0].node.block_disconnected(&headers.pop().unwrap());
-       }
-       check_closed_broadcast!(nodes[0], false);
-       check_added_monitors!(nodes[0], 1);
-       let channel_state = nodes[0].node.channel_state.lock().unwrap();
-       assert_eq!(channel_state.by_id.len(), 0);
-       assert_eq!(channel_state.short_to_id.len(), 0);
-}
-
 #[test]
 fn test_simple_peer_disconnect() {
        // Test that we can reconnect when there are no lost messages
@@ -4236,8 +4206,8 @@ fn test_invalid_channel_announcement() {
 
        nodes[0].net_graph_msg_handler.handle_htlc_fail_channel_update(&msgs::HTLCFailChannelUpdate::ChannelClosed { short_channel_id : as_chan.get_short_channel_id().unwrap(), is_permanent: false } );
 
-       let as_bitcoin_key = as_chan.get_keys().inner.holder_channel_pubkeys.funding_pubkey;
-       let bs_bitcoin_key = bs_chan.get_keys().inner.holder_channel_pubkeys.funding_pubkey;
+       let as_bitcoin_key = as_chan.get_signer().inner.holder_channel_pubkeys.funding_pubkey;
+       let bs_bitcoin_key = bs_chan.get_signer().inner.holder_channel_pubkeys.funding_pubkey;
 
        let as_network_key = nodes[0].node.get_our_node_id();
        let bs_network_key = nodes[1].node.get_our_node_id();
@@ -4264,8 +4234,8 @@ fn test_invalid_channel_announcement() {
        macro_rules! sign_msg {
                ($unsigned_msg: expr) => {
                        let msghash = Message::from_slice(&Sha256dHash::hash(&$unsigned_msg.encode()[..])[..]).unwrap();
-                       let as_bitcoin_sig = secp_ctx.sign(&msghash, &as_chan.get_keys().inner.funding_key);
-                       let bs_bitcoin_sig = secp_ctx.sign(&msghash, &bs_chan.get_keys().inner.funding_key);
+                       let as_bitcoin_sig = secp_ctx.sign(&msghash, &as_chan.get_signer().inner.funding_key);
+                       let bs_bitcoin_sig = secp_ctx.sign(&msghash, &bs_chan.get_signer().inner.funding_key);
                        let as_node_sig = secp_ctx.sign(&msghash, &nodes[0].keys_manager.get_node_secret());
                        let bs_node_sig = secp_ctx.sign(&msghash, &nodes[1].keys_manager.get_node_secret());
                        chan_announcement = msgs::ChannelAnnouncement {
@@ -4313,7 +4283,7 @@ fn test_no_txn_manager_serialize_deserialize() {
 
        let nodes_0_serialized = nodes[0].node.encode();
        let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new());
-       nodes[0].chain_monitor.chain_monitor.monitors.lock().unwrap().iter().next().unwrap().1.write(&mut chan_0_monitor_serialized).unwrap();
+       nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap().iter().next().unwrap().1.write(&mut chan_0_monitor_serialized).unwrap();
 
        logger = test_utils::TestLogger::new();
        fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: 253 };
@@ -4422,7 +4392,7 @@ fn test_manager_serialize_deserialize_events() {
        // Start the de/seriailization process mid-channel creation to check that the channel manager will hold onto events that are serialized
        let nodes_0_serialized = nodes[0].node.encode();
        let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new());
-       nodes[0].chain_monitor.chain_monitor.monitors.lock().unwrap().iter().next().unwrap().1.write(&mut chan_0_monitor_serialized).unwrap();
+       nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap().iter().next().unwrap().1.write(&mut chan_0_monitor_serialized).unwrap();
 
        fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: 253 };
        logger = test_utils::TestLogger::new();
@@ -4514,7 +4484,7 @@ fn test_simple_manager_serialize_deserialize() {
 
        let nodes_0_serialized = nodes[0].node.encode();
        let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new());
-       nodes[0].chain_monitor.chain_monitor.monitors.lock().unwrap().iter().next().unwrap().1.write(&mut chan_0_monitor_serialized).unwrap();
+       nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap().iter().next().unwrap().1.write(&mut chan_0_monitor_serialized).unwrap();
 
        logger = test_utils::TestLogger::new();
        fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: 253 };
@@ -4571,7 +4541,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() {
        let (_, _, channel_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 3, InitFeatures::known(), InitFeatures::known());
 
        let mut node_0_stale_monitors_serialized = Vec::new();
-       for monitor in nodes[0].chain_monitor.chain_monitor.monitors.lock().unwrap().iter() {
+       for monitor in nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap().iter() {
                let mut writer = test_utils::TestVecWriter(Vec::new());
                monitor.1.write(&mut writer).unwrap();
                node_0_stale_monitors_serialized.push(writer.0);
@@ -4590,7 +4560,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() {
        // Now the ChannelMonitor (which is now out-of-sync with ChannelManager for channel w/
        // nodes[3])
        let mut node_0_monitors_serialized = Vec::new();
-       for monitor in nodes[0].chain_monitor.chain_monitor.monitors.lock().unwrap().iter() {
+       for monitor in nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap().iter() {
                let mut writer = test_utils::TestVecWriter(Vec::new());
                monitor.1.write(&mut writer).unwrap();
                node_0_monitors_serialized.push(writer.0);
@@ -6383,7 +6353,7 @@ fn test_update_add_htlc_bolt2_sender_cltv_expiry_too_high() {
        let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
-       let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 0, InitFeatures::known(), InitFeatures::known());
+       let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 0, InitFeatures::known(), InitFeatures::known());
        let logger = test_utils::TestLogger::new();
 
        let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
@@ -6455,9 +6425,13 @@ fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_value_in_flight() {
        send_payment(&nodes[0], &vec!(&nodes[1])[..], max_in_flight, max_in_flight);
 
        let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
-       let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
-       let logger = test_utils::TestLogger::new();
-       let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), None, &[], max_in_flight+1, TEST_FINAL_CLTV, &logger).unwrap();
+       // Manually create a route over our max in flight (which our router normally automatically
+       // limits us to.
+       let route = Route { paths: vec![vec![RouteHop {
+          pubkey: nodes[1].node.get_our_node_id(), node_features: NodeFeatures::known(), channel_features: ChannelFeatures::known(),
+          short_channel_id: nodes[1].node.list_usable_channels()[0].short_channel_id.unwrap(),
+          fee_msat: max_in_flight + 1, cltv_expiry_delta: TEST_FINAL_CLTV
+       }]] };
        unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &None), true, APIError::ChannelUnavailable { ref err },
                assert!(regex::Regex::new(r"Cannot send value that would put us over the max HTLC value in flight our peer will accept \(\d+\)").unwrap().is_match(err)));
 
@@ -7195,7 +7169,7 @@ fn test_upfront_shutdown_script() {
        let mut node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[2].node.get_our_node_id());
        node_0_shutdown.scriptpubkey = Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script().to_p2sh();
        // Test we enforce upfront_scriptpbukey if by providing a diffrent one at closing that  we disconnect peer
-       nodes[2].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown);
+       nodes[2].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown);
     assert!(regex::Regex::new(r"Got shutdown request with a scriptpubkey \([A-Fa-f0-9]+\) which did not match their previous scriptpubkey.").unwrap().is_match(check_closed_broadcast!(nodes[2], true).unwrap().data.as_str()));
        check_added_monitors!(nodes[2], 1);
 
@@ -7204,7 +7178,7 @@ fn test_upfront_shutdown_script() {
        nodes[0].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap();
        let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[2].node.get_our_node_id());
        // We test that in case of peer committing upfront to a script, if it oesn't change at closing, we sign
-       nodes[2].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown);
+       nodes[2].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown);
        let events = nodes[2].node.get_and_clear_pending_msg_events();
        assert_eq!(events.len(), 1);
        match events[0] {
@@ -7218,7 +7192,7 @@ fn test_upfront_shutdown_script() {
        nodes[0].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap();
        let mut node_1_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
        node_1_shutdown.scriptpubkey = Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script().to_p2sh();
-       nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_1_shutdown);
+       nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_1_shutdown);
        let events = nodes[1].node.get_and_clear_pending_msg_events();
        assert_eq!(events.len(), 1);
        match events[0] {
@@ -7232,7 +7206,7 @@ fn test_upfront_shutdown_script() {
        nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap();
        let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
        node_0_shutdown.scriptpubkey = Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script().to_p2sh();
-       nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_0_shutdown);
+       nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown);
        let events = nodes[0].node.get_and_clear_pending_msg_events();
        assert_eq!(events.len(), 1);
        match events[0] {
@@ -7246,7 +7220,7 @@ fn test_upfront_shutdown_script() {
        nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap();
        let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
        node_0_shutdown.scriptpubkey = Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script().to_p2sh();
-       nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_0_shutdown);
+       nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown);
        let events = nodes[0].node.get_and_clear_pending_msg_events();
        assert_eq!(events.len(), 2);
        match events[0] {
@@ -7259,6 +7233,136 @@ fn test_upfront_shutdown_script() {
        }
 }
 
+#[test]
+fn test_upfront_shutdown_script_unsupport_segwit() {
+       // We test that channel is closed early
+       // if a segwit program is passed as upfront shutdown script,
+       // but the peer does not support segwit.
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+       let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+       nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None).unwrap();
+
+       let mut open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+       open_channel.shutdown_scriptpubkey = Present(Builder::new().push_int(16)
+               .push_slice(&[0, 0])
+               .into_script());
+
+       let features = InitFeatures::known().clear_shutdown_anysegwit();
+       nodes[0].node.handle_open_channel(&nodes[0].node.get_our_node_id(), features, &open_channel);
+
+       let events = nodes[0].node.get_and_clear_pending_msg_events();
+       assert_eq!(events.len(), 1);
+       match events[0] {
+               MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => {
+                       assert_eq!(node_id, nodes[0].node.get_our_node_id());
+                       assert!(regex::Regex::new(r"Peer is signaling upfront_shutdown but has provided a non-accepted scriptpubkey format. script: (\([A-Fa-f0-9]+\))").unwrap().is_match(&*msg.data));
+               },
+               _ => panic!("Unexpected event"),
+       }
+}
+
+#[test]
+fn test_shutdown_script_any_segwit_allowed() {
+       let mut config = UserConfig::default();
+       config.channel_options.announced_channel = true;
+       config.peer_channel_config_limits.force_announced_channel_preference = false;
+       config.channel_options.commit_upfront_shutdown_pubkey = false;
+       let user_cfgs = [None, Some(config), None];
+       let chanmon_cfgs = create_chanmon_cfgs(3);
+       let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &user_cfgs);
+       let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+       //// We test if the remote peer accepts opt_shutdown_anysegwit, a witness program can be used on shutdown
+       let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000, InitFeatures::known(), InitFeatures::known());
+       nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap();
+       let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
+       node_0_shutdown.scriptpubkey = Builder::new().push_int(16)
+               .push_slice(&[0, 0])
+               .into_script();
+       nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown);
+       let events = nodes[0].node.get_and_clear_pending_msg_events();
+       assert_eq!(events.len(), 2);
+       match events[0] {
+               MessageSendEvent::SendShutdown { node_id, .. } => { assert_eq!(node_id, nodes[1].node.get_our_node_id()) }
+               _ => panic!("Unexpected event"),
+       }
+       match events[1] {
+               MessageSendEvent::SendClosingSigned { node_id, .. } => { assert_eq!(node_id, nodes[1].node.get_our_node_id()) }
+               _ => panic!("Unexpected event"),
+       }
+}
+
+#[test]
+fn test_shutdown_script_any_segwit_not_allowed() {
+       let mut config = UserConfig::default();
+       config.channel_options.announced_channel = true;
+       config.peer_channel_config_limits.force_announced_channel_preference = false;
+       config.channel_options.commit_upfront_shutdown_pubkey = false;
+       let user_cfgs = [None, Some(config), None];
+       let chanmon_cfgs = create_chanmon_cfgs(3);
+       let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &user_cfgs);
+       let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+       //// We test that if the remote peer does not accept opt_shutdown_anysegwit, the witness program cannot be used on shutdown
+       let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000, InitFeatures::known(), InitFeatures::known());
+       nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap();
+       let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
+       // Make an any segwit version script
+       node_0_shutdown.scriptpubkey = Builder::new().push_int(16)
+               .push_slice(&[0, 0])
+               .into_script();
+       let flags_no = InitFeatures::known().clear_shutdown_anysegwit();
+       nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &flags_no, &node_0_shutdown);
+       let events = nodes[0].node.get_and_clear_pending_msg_events();
+       assert_eq!(events.len(), 2);
+       match events[1] {
+               MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => {
+                       assert_eq!(node_id, nodes[1].node.get_our_node_id());
+                       assert_eq!(msg.data, "Got a nonstandard scriptpubkey (60020000) from remote peer".to_owned())
+               },
+               _ => panic!("Unexpected event"),
+       }
+       check_added_monitors!(nodes[0], 1);
+}
+
+#[test]
+fn test_shutdown_script_segwit_but_not_anysegwit() {
+       let mut config = UserConfig::default();
+       config.channel_options.announced_channel = true;
+       config.peer_channel_config_limits.force_announced_channel_preference = false;
+       config.channel_options.commit_upfront_shutdown_pubkey = false;
+       let user_cfgs = [None, Some(config), None];
+       let chanmon_cfgs = create_chanmon_cfgs(3);
+       let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &user_cfgs);
+       let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+       //// We test that if shutdown any segwit is supported and we send a witness script with 0 version, this is not accepted
+       let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000, InitFeatures::known(), InitFeatures::known());
+       nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap();
+       let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
+       // Make a segwit script that is not a valid as any segwit
+       node_0_shutdown.scriptpubkey = Builder::new().push_int(0)
+               .push_slice(&[0, 0])
+               .into_script();
+       nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown);
+       let events = nodes[0].node.get_and_clear_pending_msg_events();
+       assert_eq!(events.len(), 2);
+       match events[1] {
+               MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => {
+                       assert_eq!(node_id, nodes[1].node.get_our_node_id());
+                       assert_eq!(msg.data, "Got a nonstandard scriptpubkey (00020000) from remote peer".to_owned())
+               },
+               _ => panic!("Unexpected event"),
+       }
+       check_added_monitors!(nodes[0], 1);
+}
+
 #[test]
 fn test_user_configurable_csv_delay() {
        // We test our channel constructors yield errors when we pass them absurd csv delay
@@ -7348,7 +7452,7 @@ fn test_data_loss_protect() {
        // Cache node A state before any channel update
        let previous_node_state = nodes[0].node.encode();
        let mut previous_chain_monitor_state = test_utils::TestVecWriter(Vec::new());
-       nodes[0].chain_monitor.chain_monitor.monitors.lock().unwrap().iter().next().unwrap().1.write(&mut previous_chain_monitor_state).unwrap();
+       nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap().iter().next().unwrap().1.write(&mut previous_chain_monitor_state).unwrap();
 
        send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000, 8_000_000);
        send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000, 8_000_000);
@@ -7923,103 +8027,6 @@ fn test_bump_penalty_txn_on_remote_commitment() {
        nodes[1].node.get_and_clear_pending_msg_events();
 }
 
-#[test]
-fn test_set_outpoints_partial_claiming() {
-       // - remote party claim tx, new bump tx
-       // - disconnect remote claiming tx, new bump
-       // - disconnect tx, see no tx anymore
-       let chanmon_cfgs = create_chanmon_cfgs(2);
-       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
-       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
-       let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
-
-       let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000, InitFeatures::known(), InitFeatures::known());
-       let payment_preimage_1 = route_payment(&nodes[1], &vec!(&nodes[0])[..], 3_000_000).0;
-       let payment_preimage_2 = route_payment(&nodes[1], &vec!(&nodes[0])[..], 3_000_000).0;
-
-       // Remote commitment txn with 4 outputs: to_local, to_remote, 2 outgoing HTLC
-       let remote_txn = get_local_commitment_txn!(nodes[1], chan.2);
-       assert_eq!(remote_txn.len(), 3);
-       assert_eq!(remote_txn[0].output.len(), 4);
-       assert_eq!(remote_txn[0].input.len(), 1);
-       assert_eq!(remote_txn[0].input[0].previous_output.txid, chan.3.txid());
-       check_spends!(remote_txn[1], remote_txn[0]);
-       check_spends!(remote_txn[2], remote_txn[0]);
-
-       // Connect blocks on node A to advance height towards TEST_FINAL_CLTV
-       let prev_header_100 = connect_blocks(&nodes[1], 100, 0, false, Default::default());
-       // Provide node A with both preimage
-       nodes[0].node.claim_funds(payment_preimage_1, &None, 3_000_000);
-       nodes[0].node.claim_funds(payment_preimage_2, &None, 3_000_000);
-       check_added_monitors!(nodes[0], 2);
-       nodes[0].node.get_and_clear_pending_events();
-       nodes[0].node.get_and_clear_pending_msg_events();
-
-       // Connect blocks on node A commitment transaction
-       let header = BlockHeader { version: 0x20000000, prev_blockhash: prev_header_100, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-       connect_block(&nodes[0], &Block { header, txdata: vec![remote_txn[0].clone()] }, 101);
-       check_closed_broadcast!(nodes[0], false);
-       check_added_monitors!(nodes[0], 1);
-       // Verify node A broadcast tx claiming both HTLCs
-       {
-               let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
-               // ChannelMonitor: claim tx, ChannelManager: local commitment tx + HTLC-Success*2
-               assert_eq!(node_txn.len(), 4);
-               check_spends!(node_txn[0], remote_txn[0]);
-               check_spends!(node_txn[1], chan.3);
-               check_spends!(node_txn[2], node_txn[1]);
-               check_spends!(node_txn[3], node_txn[1]);
-               assert_eq!(node_txn[0].input.len(), 2);
-               node_txn.clear();
-       }
-
-       // Connect blocks on node B
-       connect_blocks(&nodes[1], 135, 0, false, Default::default());
-       check_closed_broadcast!(nodes[1], false);
-       check_added_monitors!(nodes[1], 1);
-       // Verify node B broadcast 2 HTLC-timeout txn
-       let partial_claim_tx = {
-               let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
-               assert_eq!(node_txn.len(), 3);
-               check_spends!(node_txn[1], node_txn[0]);
-               check_spends!(node_txn[2], node_txn[0]);
-               assert_eq!(node_txn[1].input.len(), 1);
-               assert_eq!(node_txn[2].input.len(), 1);
-               node_txn[1].clone()
-       };
-
-       // Broadcast partial claim on node A, should regenerate a claiming tx with HTLC dropped
-       let header = BlockHeader { version: 0x20000000, prev_blockhash: header.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-       connect_block(&nodes[0], &Block { header, txdata: vec![partial_claim_tx.clone()] }, 102);
-       {
-               let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
-               assert_eq!(node_txn.len(), 1);
-               check_spends!(node_txn[0], remote_txn[0]);
-               assert_eq!(node_txn[0].input.len(), 1); //dropped HTLC
-               node_txn.clear();
-       }
-       nodes[0].node.get_and_clear_pending_msg_events();
-
-       // Disconnect last block on node A, should regenerate a claiming tx with HTLC dropped
-       disconnect_block(&nodes[0], &header, 102);
-       {
-               let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
-               assert_eq!(node_txn.len(), 1);
-               check_spends!(node_txn[0], remote_txn[0]);
-               assert_eq!(node_txn[0].input.len(), 2); //resurrected HTLC
-               node_txn.clear();
-       }
-
-       //// Disconnect one more block and then reconnect multiple no transaction should be generated
-       disconnect_block(&nodes[0], &header, 101);
-       connect_blocks(&nodes[1], 15, 101, false, prev_header_100);
-       {
-               let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
-               assert_eq!(node_txn.len(), 0);
-               node_txn.clear();
-       }
-}
-
 #[test]
 fn test_counterparty_raa_skip_no_crash() {
        // Previously, if our counterparty sent two RAAs in a row without us having provided a
@@ -8037,7 +8044,7 @@ fn test_counterparty_raa_skip_no_crash() {
        let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2;
 
        let mut guard = nodes[0].node.channel_state.lock().unwrap();
-       let keys = &guard.by_id.get_mut(&channel_id).unwrap().holder_keys;
+       let keys = &guard.by_id.get_mut(&channel_id).unwrap().get_signer();
        const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
        let per_commitment_secret = keys.release_commitment_secret(INITIAL_COMMITMENT_NUMBER);
        // Must revoke without gaps
@@ -8095,10 +8102,10 @@ fn test_bump_txn_sanitize_tracking_maps() {
        connect_block(&nodes[0], &Block { header: header_130, txdata: penalty_txn }, 130);
        connect_blocks(&nodes[0], 5, 130,  false, header_130.block_hash());
        {
-               let monitors = nodes[0].chain_monitor.chain_monitor.monitors.lock().unwrap();
+               let monitors = nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap();
                if let Some(monitor) = monitors.get(&OutPoint { txid: chan.3.txid(), index: 0 }) {
-                       assert!(monitor.onchain_tx_handler.pending_claim_requests.is_empty());
-                       assert!(monitor.onchain_tx_handler.claimable_outpoints.is_empty());
+                       assert!(monitor.inner.lock().unwrap().onchain_tx_handler.pending_claim_requests.is_empty());
+                       assert!(monitor.inner.lock().unwrap().onchain_tx_handler.claimable_outpoints.is_empty());
                }
        }
 }
@@ -8229,7 +8236,7 @@ fn test_update_err_monitor_lockdown() {
        let logger = test_utils::TestLogger::with_id(format!("node {}", 0));
        let persister = test_utils::TestPersister::new();
        let watchtower = {
-               let monitors = nodes[0].chain_monitor.chain_monitor.monitors.lock().unwrap();
+               let monitors = nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap();
                let monitor = monitors.get(&outpoint).unwrap();
                let mut w = test_utils::TestVecWriter(Vec::new());
                monitor.write(&mut w).unwrap();
@@ -8288,7 +8295,7 @@ fn test_concurrent_monitor_claim() {
        let logger = test_utils::TestLogger::with_id(format!("node {}", "Alice"));
        let persister = test_utils::TestPersister::new();
        let watchtower_alice = {
-               let monitors = nodes[0].chain_monitor.chain_monitor.monitors.lock().unwrap();
+               let monitors = nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap();
                let monitor = monitors.get(&outpoint).unwrap();
                let mut w = test_utils::TestVecWriter(Vec::new());
                monitor.write(&mut w).unwrap();
@@ -8314,7 +8321,7 @@ fn test_concurrent_monitor_claim() {
        let logger = test_utils::TestLogger::with_id(format!("node {}", "Bob"));
        let persister = test_utils::TestPersister::new();
        let watchtower_bob = {
-               let monitors = nodes[0].chain_monitor.chain_monitor.monitors.lock().unwrap();
+               let monitors = nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap();
                let monitor = monitors.get(&outpoint).unwrap();
                let mut w = test_utils::TestVecWriter(Vec::new());
                monitor.write(&mut w).unwrap();
@@ -8415,48 +8422,48 @@ fn test_pre_lockin_no_chan_closed_update() {
 #[test]
 fn test_htlc_no_detection() {
        // This test is a mutation to underscore the detection logic bug we had
-        // before #653. HTLC value routed is above the remaining balance, thus
-        // inverting HTLC and `to_remote` output. HTLC will come second and
-        // it wouldn't be seen by pre-#653 detection as we were enumerate()'ing
-        // on a watched outputs vector (Vec<TxOut>) thus implicitly relying on
-        // outputs order detection for correct spending children filtring.
-
-        let chanmon_cfgs = create_chanmon_cfgs(2);
-        let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
-        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
-        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
-
-        // Create some initial channels
-        let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001, InitFeatures::known(), InitFeatures::known());
-
-        send_payment(&nodes[0], &vec!(&nodes[1])[..], 1_000_000, 1_000_000);
-        let (_, our_payment_hash) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 2_000_000);
-        let local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
-        assert_eq!(local_txn[0].input.len(), 1);
-        assert_eq!(local_txn[0].output.len(), 3);
-        check_spends!(local_txn[0], chan_1.3);
-
-        // Timeout HTLC on A's chain and so it can generate a HTLC-Timeout tx
-        let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-        connect_block(&nodes[0], &Block { header, txdata: vec![local_txn[0].clone()] }, 200);
+       // before #653. HTLC value routed is above the remaining balance, thus
+       // inverting HTLC and `to_remote` output. HTLC will come second and
+       // it wouldn't be seen by pre-#653 detection as we were enumerate()'ing
+       // on a watched outputs vector (Vec<TxOut>) thus implicitly relying on
+       // outputs order detection for correct spending children filtring.
+
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+       let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+       // Create some initial channels
+       let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001, InitFeatures::known(), InitFeatures::known());
+
+       send_payment(&nodes[0], &vec!(&nodes[1])[..], 1_000_000, 1_000_000);
+       let (_, our_payment_hash) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 2_000_000);
+       let local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
+       assert_eq!(local_txn[0].input.len(), 1);
+       assert_eq!(local_txn[0].output.len(), 3);
+       check_spends!(local_txn[0], chan_1.3);
+
+       // Timeout HTLC on A's chain and so it can generate a HTLC-Timeout tx
+       let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+       connect_block(&nodes[0], &Block { header, txdata: vec![local_txn[0].clone()] }, 200);
        // We deliberately connect the local tx twice as this should provoke a failure calling
        // this test before #653 fix.
-        connect_block(&nodes[0], &Block { header, txdata: vec![local_txn[0].clone()] }, 200);
-        check_closed_broadcast!(nodes[0], false);
-        check_added_monitors!(nodes[0], 1);
-
-        let htlc_timeout = {
-                let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
-                assert_eq!(node_txn[0].input.len(), 1);
-                assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
-                check_spends!(node_txn[0], local_txn[0]);
-                node_txn[0].clone()
-        };
-
-        let header_201 = BlockHeader { version: 0x20000000, prev_blockhash: header.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-        connect_block(&nodes[0], &Block { header: header_201, txdata: vec![htlc_timeout.clone()] }, 201);
-        connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1, 201, true, header_201.block_hash());
-        expect_payment_failed!(nodes[0], our_payment_hash, true);
+       connect_block(&nodes[0], &Block { header, txdata: vec![local_txn[0].clone()] }, 200);
+       check_closed_broadcast!(nodes[0], false);
+       check_added_monitors!(nodes[0], 1);
+
+       let htlc_timeout = {
+               let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
+               assert_eq!(node_txn[0].input.len(), 1);
+               assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
+               check_spends!(node_txn[0], local_txn[0]);
+               node_txn[0].clone()
+       };
+
+       let header_201 = BlockHeader { version: 0x20000000, prev_blockhash: header.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+       connect_block(&nodes[0], &Block { header: header_201, txdata: vec![htlc_timeout.clone()] }, 201);
+       connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1, 201, true, header_201.block_hash());
+       expect_payment_failed!(nodes[0], our_payment_hash, true);
 }
 
 fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain_before_fulfill: bool) {