Do not remove Outbound Channel immediately when peer disconnects
[rust-lightning] / lightning / src / ln / functional_tests.rs
index be9bfb81f25e77cf01944d228bb8d5ab8888a3dd..8d1912f3f9c704413ec332d56890197aac69c0b7 100644 (file)
@@ -3695,7 +3695,7 @@ fn test_dup_events_on_peer_disconnect() {
 #[test]
 fn test_peer_disconnected_before_funding_broadcasted() {
        // Test that channels are closed with `ClosureReason::DisconnectedPeer` if the peer disconnects
-       // before the funding transaction has been broadcasted.
+       // before the funding transaction has been broadcasted, and doesn't reconnect back within time.
        let chanmon_cfgs = create_chanmon_cfgs(2);
        let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
@@ -3724,12 +3724,19 @@ fn test_peer_disconnected_before_funding_broadcasted() {
                assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 0);
        }
 
-       // Ensure that the channel is closed with `ClosureReason::DisconnectedPeer` when the peers are
-       // disconnected before the funding transaction was broadcasted.
+       // The peers disconnect before the funding is broadcasted.
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
-       check_closed_event!(&nodes[0], 2, ClosureReason::DisconnectedPeer, true
+       // The time for peers to reconnect expires.
+       for _ in 0..UNFUNDED_CHANNEL_AGE_LIMIT_TICKS {
+               nodes[0].node.timer_tick_occurred();
+       }
+
+       // Ensure that the channel is closed with `ClosureReason::HolderForceClosed`
+       // when the peers are disconnected and do not reconnect before the funding
+       // transaction is broadcasted.
+       check_closed_event!(&nodes[0], 2, ClosureReason::HolderForceClosed, true
                , [nodes[1].node.get_our_node_id()], 1000000);
        check_closed_event!(&nodes[1], 1, ClosureReason::DisconnectedPeer, false
                , [nodes[0].node.get_our_node_id()], 1000000);
@@ -8684,7 +8691,7 @@ fn test_pre_lockin_no_chan_closed_update() {
        check_added_monitors!(nodes[0], 0);
 
        let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
-       let channel_id = crate::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }.to_channel_id();
+       let channel_id = ChannelId::v1_from_funding_outpoint(crate::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index });
        nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id, data: "Hi".to_owned() });
        assert!(nodes[0].chain_monitor.added_monitors.lock().unwrap().is_empty());
        check_closed_event!(nodes[0], 2, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("Hi".to_string()) }, true,
@@ -9028,7 +9035,7 @@ fn test_peer_funding_sidechannel() {
        check_added_monitors!(nodes[1], 1);
        expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
        let reason = ClosureReason::ProcessingError { err: format!("An existing channel using outpoint {} is open with peer {}", funding_output, nodes[2].node.get_our_node_id()), };
-       check_closed_events(&nodes[0], &[ExpectedCloseEvent::from_id_reason(funding_output.to_channel_id(), true, reason)]);
+       check_closed_events(&nodes[0], &[ExpectedCloseEvent::from_id_reason(ChannelId::v1_from_funding_outpoint(funding_output), true, reason)]);
 
        let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
        nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed);
@@ -9089,7 +9096,7 @@ fn test_duplicate_funding_err_in_funding() {
 
        let (_, _, _, real_channel_id, funding_tx) = create_chan_between_nodes(&nodes[0], &nodes[1]);
        let real_chan_funding_txo = chain::transaction::OutPoint { txid: funding_tx.txid(), index: 0 };
-       assert_eq!(real_chan_funding_txo.to_channel_id(), real_channel_id);
+       assert_eq!(ChannelId::v1_from_funding_outpoint(real_chan_funding_txo), real_channel_id);
 
        nodes[2].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
        let mut open_chan_msg = get_event_msg!(nodes[2], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
@@ -9181,7 +9188,7 @@ fn test_duplicate_chan_id() {
        let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
 
        let funding_outpoint = crate::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index };
-       let channel_id = funding_outpoint.to_channel_id();
+       let channel_id = ChannelId::v1_from_funding_outpoint(funding_outpoint);
 
        // Now we have the first channel past funding_created (ie it has a txid-based channel_id, not a
        // temporary one).
@@ -10635,7 +10642,7 @@ fn test_batch_channel_open() {
 
        // Complete the persistence of the monitor.
        nodes[0].chain_monitor.complete_sole_pending_chan_update(
-               &OutPoint { txid: tx.txid(), index: 1 }.to_channel_id()
+               &ChannelId::v1_from_funding_outpoint(OutPoint { txid: tx.txid(), index: 1 })
        );
        let events = nodes[0].node.get_and_clear_pending_events();
 
@@ -10662,7 +10669,9 @@ fn test_batch_channel_open() {
 }
 
 #[test]
-fn test_disconnect_in_funding_batch() {
+fn test_close_in_funding_batch() {
+       // This test ensures that if one of the channels
+       // in the batch closes, the complete batch will close.
        let chanmon_cfgs = create_chanmon_cfgs(3);
        let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
@@ -10686,14 +10695,39 @@ fn test_disconnect_in_funding_batch() {
        // The transaction should not have been broadcast before all channels are ready.
        assert_eq!(nodes[0].tx_broadcaster.txn_broadcast().len(), 0);
 
-       // The remaining peer in the batch disconnects.
-       nodes[0].node.peer_disconnected(&nodes[2].node.get_our_node_id());
-
-       // The channels in the batch will close immediately.
+       // Force-close the channel for which we've completed the initial monitor.
        let funding_txo_1 = OutPoint { txid: tx.txid(), index: 0 };
        let funding_txo_2 = OutPoint { txid: tx.txid(), index: 1 };
-       let channel_id_1 = funding_txo_1.to_channel_id();
-       let channel_id_2 = funding_txo_2.to_channel_id();
+       let channel_id_1 = ChannelId::v1_from_funding_outpoint(funding_txo_1);
+       let channel_id_2 = ChannelId::v1_from_funding_outpoint(funding_txo_2);
+
+       nodes[0].node.force_close_broadcasting_latest_txn(&channel_id_1, &nodes[1].node.get_our_node_id()).unwrap();
+
+       // The monitor should become closed.
+       check_added_monitors(&nodes[0], 1);
+       {
+               let mut monitor_updates = nodes[0].chain_monitor.monitor_updates.lock().unwrap();
+               let monitor_updates_1 = monitor_updates.get(&channel_id_1).unwrap();
+               assert_eq!(monitor_updates_1.len(), 1);
+               assert_eq!(monitor_updates_1[0].update_id, CLOSED_CHANNEL_UPDATE_ID);
+       }
+
+       let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
+       match msg_events[0] {
+               MessageSendEvent::HandleError { .. } => (),
+               _ => panic!("Unexpected message."),
+       }
+
+       // We broadcast the commitment transaction as part of the force-close.
+       {
+               let broadcasted_txs = nodes[0].tx_broadcaster.txn_broadcast();
+               assert_eq!(broadcasted_txs.len(), 1);
+               assert!(broadcasted_txs[0].txid() != tx.txid());
+               assert_eq!(broadcasted_txs[0].input.len(), 1);
+               assert_eq!(broadcasted_txs[0].input[0].previous_output.txid, tx.txid());
+       }
+
+       // All channels in the batch should close immediately.
        check_closed_events(&nodes[0], &[
                ExpectedCloseEvent {
                        channel_id: Some(channel_id_1),
@@ -10711,19 +10745,6 @@ fn test_disconnect_in_funding_batch() {
                },
        ]);
 
-       // The monitor should become closed.
-       check_added_monitors(&nodes[0], 1);
-       {
-               let mut monitor_updates = nodes[0].chain_monitor.monitor_updates.lock().unwrap();
-               let monitor_updates_1 = monitor_updates.get(&channel_id_1).unwrap();
-               assert_eq!(monitor_updates_1.len(), 1);
-               assert_eq!(monitor_updates_1[0].update_id, CLOSED_CHANNEL_UPDATE_ID);
-       }
-
-       // The funding transaction should not have been broadcast, and therefore, we don't need
-       // to broadcast a force-close transaction for the closed monitor.
-       assert_eq!(nodes[0].tx_broadcaster.txn_broadcast().len(), 0);
-
        // Ensure the channels don't exist anymore.
        assert!(nodes[0].node.list_channels().is_empty());
 }
@@ -10766,8 +10787,8 @@ fn test_batch_funding_close_after_funding_signed() {
        // Force-close the channel for which we've completed the initial monitor.
        let funding_txo_1 = OutPoint { txid: tx.txid(), index: 0 };
        let funding_txo_2 = OutPoint { txid: tx.txid(), index: 1 };
-       let channel_id_1 = funding_txo_1.to_channel_id();
-       let channel_id_2 = funding_txo_2.to_channel_id();
+       let channel_id_1 = ChannelId::v1_from_funding_outpoint(funding_txo_1);
+       let channel_id_2 = ChannelId::v1_from_funding_outpoint(funding_txo_2);
        nodes[0].node.force_close_broadcasting_latest_txn(&channel_id_1, &nodes[1].node.get_our_node_id()).unwrap();
        check_added_monitors(&nodes[0], 2);
        {
@@ -10827,7 +10848,7 @@ fn do_test_funding_and_commitment_tx_confirm_same_block(confirm_remote_commitmen
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
        let funding_tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 0);
-       let chan_id = chain::transaction::OutPoint { txid: funding_tx.txid(), index: 0 }.to_channel_id();
+       let chan_id = ChannelId::v1_from_funding_outpoint(chain::transaction::OutPoint { txid: funding_tx.txid(), index: 0 });
 
        assert_eq!(nodes[0].node.list_channels().len(), 1);
        assert_eq!(nodes[1].node.list_channels().len(), 1);