Avoid writing `ChannelManager` when hitting lnd bug 6039
[rust-lightning] / lightning / src / ln / functional_tests.rs
index 2fbdd3c92a0173b55e73817a6e2c6e2e0a300ed7..c5ea582b27f81a95abd50bc760e26b8f30e363e5 100644 (file)
@@ -190,7 +190,7 @@ fn do_test_counterparty_no_reserve(send_from_initiator: bool) {
                                chan_context.holder_selected_channel_reserve_satoshis = 0;
                                chan_context.holder_max_htlc_value_in_flight_msat = 100_000_000;
                        },
-                       ChannelPhase::Funded(_) => assert!(false),
+                       _ => assert!(false),
                }
        }
 
@@ -871,8 +871,8 @@ fn test_update_fee_with_fundee_update_add_htlc() {
        send_payment(&nodes[1], &vec!(&nodes[0])[..], 800000);
        send_payment(&nodes[0], &vec!(&nodes[1])[..], 800000);
        close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true);
-       check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
-       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
 }
 
 #[test]
@@ -985,8 +985,8 @@ fn test_update_fee() {
        assert_eq!(get_feerate!(nodes[0], nodes[1], channel_id), feerate + 30);
        assert_eq!(get_feerate!(nodes[1], nodes[0], channel_id), feerate + 30);
        close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true);
-       check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
-       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
 }
 
 #[test]
@@ -1104,17 +1104,17 @@ fn fake_network_test() {
 
        // Close down the channels...
        close_channel(&nodes[0], &nodes[1], &chan_1.2, chan_1.3, true);
-       check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
-       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
        close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, false);
-       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[2].node.get_our_node_id()], 100000);
-       check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[2].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[2], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
        close_channel(&nodes[2], &nodes[3], &chan_3.2, chan_3.3, true);
-       check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure, [nodes[3].node.get_our_node_id()], 100000);
-       check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure, [nodes[2].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[2], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[3].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[3], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[2].node.get_our_node_id()], 100000);
        close_channel(&nodes[1], &nodes[3], &chan_4.2, chan_4.3, false);
-       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[3].node.get_our_node_id()], 100000);
-       check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[3].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[3], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
 }
 
 #[test]
@@ -1401,7 +1401,7 @@ fn test_fee_spike_violation_fails_htlc() {
        let secp_ctx = Secp256k1::new();
        let session_priv = SecretKey::from_slice(&[42; 32]).expect("RNG is bad!");
 
-       let cur_height = nodes[1].node.best_block.read().unwrap().height() + 1;
+       let cur_height = nodes[1].node.best_block.read().unwrap().height + 1;
 
        let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv).unwrap();
        let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route.paths[0],
@@ -1599,7 +1599,7 @@ fn test_chan_reserve_violation_inbound_htlc_outbound_channel() {
        // Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc()
        let secp_ctx = Secp256k1::new();
        let session_priv = SecretKey::from_slice(&[42; 32]).unwrap();
-       let cur_height = nodes[1].node.best_block.read().unwrap().height() + 1;
+       let cur_height = nodes[1].node.best_block.read().unwrap().height + 1;
        let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv).unwrap();
        let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route.paths[0],
                700_000, RecipientOnionFields::secret_only(payment_secret), cur_height, &None).unwrap();
@@ -1778,7 +1778,7 @@ fn test_chan_reserve_violation_inbound_htlc_inbound_chan() {
        // Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc()
        let secp_ctx = Secp256k1::new();
        let session_priv = SecretKey::from_slice(&[42; 32]).unwrap();
-       let cur_height = nodes[0].node.best_block.read().unwrap().height() + 1;
+       let cur_height = nodes[0].node.best_block.read().unwrap().height + 1;
        let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route_2.paths[0], &session_priv).unwrap();
        let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(
                &route_2.paths[0], recv_value_2, RecipientOnionFields::spontaneous_empty(), cur_height, &None).unwrap();
@@ -3323,7 +3323,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use
                // block connection just like the !deliver_bs_raa case
        }
 
-       let mut failed_htlcs = HashSet::new();
+       let mut failed_htlcs = new_hash_set();
        assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
 
        mine_transaction(&nodes[1], &revoked_local_txn[0]);
@@ -3503,7 +3503,7 @@ fn fail_backward_pending_htlc_upon_channel_failure() {
 
                let secp_ctx = Secp256k1::new();
                let session_priv = SecretKey::from_slice(&[42; 32]).unwrap();
-               let current_height = nodes[1].node.best_block.read().unwrap().height() + 1;
+               let current_height = nodes[1].node.best_block.read().unwrap().height + 1;
                let (onion_payloads, _amount_msat, cltv_expiry) = onion_utils::build_onion_payloads(
                        &route.paths[0], 50_000, RecipientOnionFields::secret_only(payment_secret), current_height, &None).unwrap();
                let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv).unwrap();
@@ -3699,7 +3699,7 @@ fn test_dup_events_on_peer_disconnect() {
 #[test]
 fn test_peer_disconnected_before_funding_broadcasted() {
        // Test that channels are closed with `ClosureReason::DisconnectedPeer` if the peer disconnects
-       // before the funding transaction has been broadcasted.
+       // before the funding transaction has been broadcasted, and doesn't reconnect back within time.
        let chanmon_cfgs = create_chanmon_cfgs(2);
        let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
@@ -3728,12 +3728,19 @@ fn test_peer_disconnected_before_funding_broadcasted() {
                assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 0);
        }
 
-       // Ensure that the channel is closed with `ClosureReason::DisconnectedPeer` when the peers are
-       // disconnected before the funding transaction was broadcasted.
+       // The peers disconnect before the funding is broadcasted.
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
-       check_closed_event!(&nodes[0], 2, ClosureReason::DisconnectedPeer, true
+       // The time for peers to reconnect expires.
+       for _ in 0..UNFUNDED_CHANNEL_AGE_LIMIT_TICKS {
+               nodes[0].node.timer_tick_occurred();
+       }
+
+       // Ensure that the channel is closed with `ClosureReason::HolderForceClosed`
+       // when the peers are disconnected and do not reconnect before the funding
+       // transaction is broadcasted.
+       check_closed_event!(&nodes[0], 2, ClosureReason::HolderForceClosed, true
                , [nodes[1].node.get_our_node_id()], 1000000);
        check_closed_event!(&nodes[1], 1, ClosureReason::DisconnectedPeer, false
                , [nodes[0].node.get_our_node_id()], 1000000);
@@ -5402,11 +5409,11 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno
 
        let as_events = nodes[0].node.get_and_clear_pending_events();
        assert_eq!(as_events.len(), if announce_latest { 10 } else { 6 });
-       let mut as_failds = HashSet::new();
+       let mut as_faileds = new_hash_set();
        let mut as_updates = 0;
        for event in as_events.iter() {
                if let &Event::PaymentPathFailed { ref payment_hash, ref payment_failed_permanently, ref failure, .. } = event {
-                       assert!(as_failds.insert(*payment_hash));
+                       assert!(as_faileds.insert(*payment_hash));
                        if *payment_hash != payment_hash_2 {
                                assert_eq!(*payment_failed_permanently, deliver_last_raa);
                        } else {
@@ -5418,21 +5425,21 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno
                } else if let &Event::PaymentFailed { .. } = event {
                } else { panic!("Unexpected event"); }
        }
-       assert!(as_failds.contains(&payment_hash_1));
-       assert!(as_failds.contains(&payment_hash_2));
+       assert!(as_faileds.contains(&payment_hash_1));
+       assert!(as_faileds.contains(&payment_hash_2));
        if announce_latest {
-               assert!(as_failds.contains(&payment_hash_3));
-               assert!(as_failds.contains(&payment_hash_5));
+               assert!(as_faileds.contains(&payment_hash_3));
+               assert!(as_faileds.contains(&payment_hash_5));
        }
-       assert!(as_failds.contains(&payment_hash_6));
+       assert!(as_faileds.contains(&payment_hash_6));
 
        let bs_events = nodes[1].node.get_and_clear_pending_events();
        assert_eq!(bs_events.len(), if announce_latest { 8 } else { 6 });
-       let mut bs_failds = HashSet::new();
+       let mut bs_faileds = new_hash_set();
        let mut bs_updates = 0;
        for event in bs_events.iter() {
                if let &Event::PaymentPathFailed { ref payment_hash, ref payment_failed_permanently, ref failure, .. } = event {
-                       assert!(bs_failds.insert(*payment_hash));
+                       assert!(bs_faileds.insert(*payment_hash));
                        if *payment_hash != payment_hash_1 && *payment_hash != payment_hash_5 {
                                assert_eq!(*payment_failed_permanently, deliver_last_raa);
                        } else {
@@ -5444,12 +5451,12 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno
                } else if let &Event::PaymentFailed { .. } = event {
                } else { panic!("Unexpected event"); }
        }
-       assert!(bs_failds.contains(&payment_hash_1));
-       assert!(bs_failds.contains(&payment_hash_2));
+       assert!(bs_faileds.contains(&payment_hash_1));
+       assert!(bs_faileds.contains(&payment_hash_2));
        if announce_latest {
-               assert!(bs_failds.contains(&payment_hash_4));
+               assert!(bs_faileds.contains(&payment_hash_4));
        }
-       assert!(bs_failds.contains(&payment_hash_5));
+       assert!(bs_faileds.contains(&payment_hash_5));
 
        // For each HTLC which was not failed-back by normal process (ie deliver_last_raa), we should
        // get a NetworkUpdate. A should have gotten 4 HTLCs which were failed-back due to
@@ -5540,7 +5547,7 @@ fn test_key_derivation_params() {
        let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &chanmon_cfgs[0].logger));
        let scorer = RwLock::new(test_utils::TestScorer::new());
        let router = test_utils::TestRouter::new(network_graph.clone(), &chanmon_cfgs[0].logger, &scorer);
-       let message_router = test_utils::TestMessageRouter::new(network_graph.clone());
+       let message_router = test_utils::TestMessageRouter::new(network_graph.clone(), &keys_manager);
        let node = NodeCfg { chain_source: &chanmon_cfgs[0].chain_source, logger: &chanmon_cfgs[0].logger, tx_broadcaster: &chanmon_cfgs[0].tx_broadcaster, fee_estimator: &chanmon_cfgs[0].fee_estimator, router, message_router, chain_monitor, keys_manager: &keys_manager, network_graph, node_seed: seed, override_init_features: alloc::rc::Rc::new(core::cell::RefCell::new(None)) };
        let mut node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
        node_cfgs.remove(0);
@@ -5625,7 +5632,7 @@ fn test_static_output_closing_tx() {
        let closing_tx = close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true).2;
 
        mine_transaction(&nodes[0], &closing_tx);
-       check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
        connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
 
        let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager);
@@ -5633,7 +5640,7 @@ fn test_static_output_closing_tx() {
        check_spends!(spend_txn[0], closing_tx);
 
        mine_transaction(&nodes[1], &closing_tx);
-       check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
        connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
 
        let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
@@ -6481,7 +6488,7 @@ fn test_update_add_htlc_bolt2_receiver_check_max_htlc_limit() {
                get_route_and_payment_hash!(nodes[0], nodes[1], 1000);
        route.paths[0].hops[0].fee_msat = send_amt;
        let session_priv = SecretKey::from_slice(&[42; 32]).unwrap();
-       let cur_height = nodes[0].node.best_block.read().unwrap().height() + 1;
+       let cur_height = nodes[0].node.best_block.read().unwrap().height + 1;
        let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::signing_only(), &route.paths[0], &session_priv).unwrap();
        let (onion_payloads, _htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(
                &route.paths[0], send_amt, RecipientOnionFields::secret_only(our_payment_secret), cur_height, &None).unwrap();
@@ -7340,7 +7347,7 @@ fn test_announce_disable_channels() {
        }
        let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
        assert_eq!(msg_events.len(), 3);
-       let mut chans_disabled = HashMap::new();
+       let mut chans_disabled = new_hash_map();
        for e in msg_events {
                match e {
                        MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
@@ -10078,7 +10085,7 @@ fn test_non_final_funding_tx() {
        let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
        nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel_message);
 
-       let best_height = nodes[0].node.best_block.read().unwrap().height();
+       let best_height = nodes[0].node.best_block.read().unwrap().height;
 
        let chan_id = *nodes[0].network_chan_count.borrow();
        let events = nodes[0].node.get_and_clear_pending_events();
@@ -10123,7 +10130,7 @@ fn test_non_final_funding_tx_within_headroom() {
        let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
        nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel_message);
 
-       let best_height = nodes[0].node.best_block.read().unwrap().height();
+       let best_height = nodes[0].node.best_block.read().unwrap().height;
 
        let chan_id = *nodes[0].network_chan_count.borrow();
        let events = nodes[0].node.get_and_clear_pending_events();
@@ -10512,6 +10519,90 @@ fn test_remove_expired_inbound_unfunded_channels() {
        check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false, &[nodes[0].node.get_our_node_id()], 100000);
 }
 
+#[test]
+fn test_channel_close_when_not_timely_accepted() {
+       // Create network of two nodes
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+       let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+       // Simulate peer-disconnects mid-handshake
+       // The channel is initiated from the node 0 side,
+       // but the nodes disconnect before node 1 could send accept channel
+       let create_chan_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap();
+       let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+       assert_eq!(open_channel_msg.common_fields.temporary_channel_id, create_chan_id);
+
+       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
+
+       // Make sure that we have not removed the OutboundV1Channel from node[0] immediately.
+       assert_eq!(nodes[0].node.list_channels().len(), 1);
+
+       // Since channel was inbound from node[1] perspective, it should have been dropped immediately.
+       assert_eq!(nodes[1].node.list_channels().len(), 0);
+
+       // In the meantime, some time passes.
+       for _ in 0..UNFUNDED_CHANNEL_AGE_LIMIT_TICKS {
+               nodes[0].node.timer_tick_occurred();
+       }
+
+       // Since we disconnected from peer and did not connect back within time,
+       // we should have forced-closed the channel by now.
+       check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
+       assert_eq!(nodes[0].node.list_channels().len(), 0);
+
+       {
+               // Since accept channel message was never received
+               // The channel should be forced close by now from node 0 side
+               // and the peer removed from per_peer_state
+               let node_0_per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
+               assert_eq!(node_0_per_peer_state.len(), 0);
+       }
+}
+
+#[test]
+fn test_rebroadcast_open_channel_when_reconnect_mid_handshake() {
+       // Create network of two nodes
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+       let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+       // Simulate peer-disconnects mid-handshake
+       // The channel is initiated from the node 0 side,
+       // but the nodes disconnect before node 1 could send accept channel
+       let create_chan_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap();
+       let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+       assert_eq!(open_channel_msg.common_fields.temporary_channel_id, create_chan_id);
+
+       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
+
+       // Make sure that we have not removed the OutboundV1Channel from node[0] immediately.
+       assert_eq!(nodes[0].node.list_channels().len(), 1);
+
+       // Since channel was inbound from node[1] perspective, it should have been immediately dropped.
+       assert_eq!(nodes[1].node.list_channels().len(), 0);
+
+       // The peers now reconnect
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
+               features: nodes[1].node.init_features(), networks: None, remote_network_address: None
+       }, true).unwrap();
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
+               features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+       }, false).unwrap();
+
+       // Make sure the SendOpenChannel message is added to node_0 pending message events
+       let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
+       assert_eq!(msg_events.len(), 1);
+       match &msg_events[0] {
+               MessageSendEvent::SendOpenChannel { msg, .. } => assert_eq!(msg, &open_channel_msg),
+               _ => panic!("Unexpected message."),
+       }
+}
+
 fn do_test_multi_post_event_actions(do_reload: bool) {
        // Tests handling multiple post-Event actions at once.
        // There is specific code in ChannelManager to handle channels where multiple post-Event
@@ -10668,7 +10759,9 @@ fn test_batch_channel_open() {
 }
 
 #[test]
-fn test_disconnect_in_funding_batch() {
+fn test_close_in_funding_batch() {
+       // This test ensures that if one of the channels
+       // in the batch closes, the complete batch will close.
        let chanmon_cfgs = create_chanmon_cfgs(3);
        let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
@@ -10692,14 +10785,39 @@ fn test_disconnect_in_funding_batch() {
        // The transaction should not have been broadcast before all channels are ready.
        assert_eq!(nodes[0].tx_broadcaster.txn_broadcast().len(), 0);
 
-       // The remaining peer in the batch disconnects.
-       nodes[0].node.peer_disconnected(&nodes[2].node.get_our_node_id());
-
-       // The channels in the batch will close immediately.
+       // Force-close the channel for which we've completed the initial monitor.
        let funding_txo_1 = OutPoint { txid: tx.txid(), index: 0 };
        let funding_txo_2 = OutPoint { txid: tx.txid(), index: 1 };
        let channel_id_1 = ChannelId::v1_from_funding_outpoint(funding_txo_1);
        let channel_id_2 = ChannelId::v1_from_funding_outpoint(funding_txo_2);
+
+       nodes[0].node.force_close_broadcasting_latest_txn(&channel_id_1, &nodes[1].node.get_our_node_id()).unwrap();
+
+       // The monitor should become closed.
+       check_added_monitors(&nodes[0], 1);
+       {
+               let mut monitor_updates = nodes[0].chain_monitor.monitor_updates.lock().unwrap();
+               let monitor_updates_1 = monitor_updates.get(&channel_id_1).unwrap();
+               assert_eq!(monitor_updates_1.len(), 1);
+               assert_eq!(monitor_updates_1[0].update_id, CLOSED_CHANNEL_UPDATE_ID);
+       }
+
+       let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
+       match msg_events[0] {
+               MessageSendEvent::HandleError { .. } => (),
+               _ => panic!("Unexpected message."),
+       }
+
+       // We broadcast the commitment transaction as part of the force-close.
+       {
+               let broadcasted_txs = nodes[0].tx_broadcaster.txn_broadcast();
+               assert_eq!(broadcasted_txs.len(), 1);
+               assert!(broadcasted_txs[0].txid() != tx.txid());
+               assert_eq!(broadcasted_txs[0].input.len(), 1);
+               assert_eq!(broadcasted_txs[0].input[0].previous_output.txid, tx.txid());
+       }
+
+       // All channels in the batch should close immediately.
        check_closed_events(&nodes[0], &[
                ExpectedCloseEvent {
                        channel_id: Some(channel_id_1),
@@ -10717,19 +10835,6 @@ fn test_disconnect_in_funding_batch() {
                },
        ]);
 
-       // The monitor should become closed.
-       check_added_monitors(&nodes[0], 1);
-       {
-               let mut monitor_updates = nodes[0].chain_monitor.monitor_updates.lock().unwrap();
-               let monitor_updates_1 = monitor_updates.get(&channel_id_1).unwrap();
-               assert_eq!(monitor_updates_1.len(), 1);
-               assert_eq!(monitor_updates_1[0].update_id, CLOSED_CHANNEL_UPDATE_ID);
-       }
-
-       // The funding transaction should not have been broadcast, and therefore, we don't need
-       // to broadcast a force-close transaction for the closed monitor.
-       assert_eq!(nodes[0].tx_broadcaster.txn_broadcast().len(), 0);
-
        // Ensure the channels don't exist anymore.
        assert!(nodes[0].node.list_channels().is_empty());
 }