Re-order imports
[rust-lightning] / lightning / src / ln / functional_tests.rs
index df9ce5b3985b06d81c657fb7f7c52d707282f85f..38be3d8b17b02db6e80b60b88ed5e060ce189b00 100644 (file)
@@ -2303,10 +2303,11 @@ fn channel_monitor_network_test() {
        send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
 
        // Simple case with no pending HTLCs:
-       nodes[1].node.force_close_broadcasting_latest_txn(&chan_1.2, &nodes[0].node.get_our_node_id()).unwrap();
+       let error_message = "Channel force-closed";
+       nodes[1].node.force_close_broadcasting_latest_txn(&chan_1.2, &nodes[0].node.get_our_node_id(), error_message.to_string()).unwrap();
        check_added_monitors!(nodes[1], 1);
        check_closed_broadcast!(nodes[1], true);
-       check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[0].node.get_our_node_id()], 100000);
        {
                let mut node_txn = test_txn_broadcast(&nodes[1], &chan_1, None, HTLCType::NONE);
                assert_eq!(node_txn.len(), 1);
@@ -2329,7 +2330,8 @@ fn channel_monitor_network_test() {
 
        // Simple case of one pending HTLC to HTLC-Timeout (note that the HTLC-Timeout is not
        // broadcasted until we reach the timelock time).
-       nodes[1].node.force_close_broadcasting_latest_txn(&chan_2.2, &nodes[2].node.get_our_node_id()).unwrap();
+       let error_message = "Channel force-closed";
+       nodes[1].node.force_close_broadcasting_latest_txn(&chan_2.2, &nodes[2].node.get_our_node_id(), error_message.to_string()).unwrap();
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
        {
@@ -2343,7 +2345,7 @@ fn channel_monitor_network_test() {
        check_closed_broadcast!(nodes[2], true);
        assert_eq!(nodes[1].node.list_channels().len(), 0);
        assert_eq!(nodes[2].node.list_channels().len(), 1);
-       check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[2].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[2].node.get_our_node_id()], 100000);
        check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
 
        macro_rules! claim_funds {
@@ -2369,7 +2371,8 @@ fn channel_monitor_network_test() {
 
        // nodes[3] gets the preimage, but nodes[2] already disconnected, resulting in a nodes[2]
        // HTLC-Timeout and a nodes[3] claim against it (+ its own announces)
-       nodes[2].node.force_close_broadcasting_latest_txn(&chan_3.2, &nodes[3].node.get_our_node_id()).unwrap();
+       let error_message = "Channel force-closed";
+       nodes[2].node.force_close_broadcasting_latest_txn(&chan_3.2, &nodes[3].node.get_our_node_id(), error_message.to_string()).unwrap();
        check_added_monitors!(nodes[2], 1);
        check_closed_broadcast!(nodes[2], true);
        let node2_commitment_txid;
@@ -2388,7 +2391,7 @@ fn channel_monitor_network_test() {
        check_closed_broadcast!(nodes[3], true);
        assert_eq!(nodes[2].node.list_channels().len(), 0);
        assert_eq!(nodes[3].node.list_channels().len(), 1);
-       check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed, [nodes[3].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[3].node.get_our_node_id()], 100000);
        check_closed_event!(nodes[3], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000);
 
        // Drop the ChannelMonitor for the previous channel to avoid it broadcasting transactions and
@@ -3596,13 +3599,13 @@ fn test_htlc_ignore_latest_remote_commitment() {
                return;
        }
        let funding_tx = create_announced_chan_between_nodes(&nodes, 0, 1).3;
-
+       let error_message = "Channel force-closed";
        route_payment(&nodes[0], &[&nodes[1]], 10000000);
-       nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
+       nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
        connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
-       check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[1].node.get_our_node_id()], 100000);
 
        let node_txn = nodes[0].tx_broadcaster.unique_txn_broadcast();
        assert_eq!(node_txn.len(), 2);
@@ -3661,11 +3664,11 @@ fn test_force_close_fail_back() {
        // nodes[2] now has the latest commitment transaction, but hasn't revoked its previous
        // state or updated nodes[1]' state. Now force-close and broadcast that commitment/HTLC
        // transaction and ensure nodes[1] doesn't fail-backwards (this was originally a bug!).
-
-       nodes[2].node.force_close_broadcasting_latest_txn(&payment_event.commitment_msg.channel_id, &nodes[1].node.get_our_node_id()).unwrap();
+       let error_message = "Channel force-closed";
+       nodes[2].node.force_close_broadcasting_latest_txn(&payment_event.commitment_msg.channel_id, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
        check_closed_broadcast!(nodes[2], true);
        check_added_monitors!(nodes[2], 1);
-       check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[1].node.get_our_node_id()], 100000);
        let commitment_tx = {
                let mut node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
                // Note that we don't bother broadcasting the HTLC-Success transaction here as we don't
@@ -4543,10 +4546,11 @@ fn test_claim_sizeable_push_msat() {
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
        let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 98_000_000);
-       nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
+       let error_message = "Channel force-closed";
+       nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id(), error_message.to_string()).unwrap();
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
-       check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[0].node.get_our_node_id()], 100000);
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
        assert_eq!(node_txn.len(), 1);
        check_spends!(node_txn[0], chan.3);
@@ -4570,12 +4574,13 @@ fn test_claim_on_remote_sizeable_push_msat() {
        let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+       let error_message = "Channel force-closed";
 
        let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 98_000_000);
-       nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[1].node.get_our_node_id()).unwrap();
+       nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
-       check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[1].node.get_our_node_id()], 100000);
 
        let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
        assert_eq!(node_txn.len(), 1);
@@ -7258,7 +7263,10 @@ fn test_user_configurable_csv_delay() {
                &low_our_to_self_config, 0, &nodes[0].logger, /*is_0conf=*/false)
        {
                match error {
-                       ChannelError::Close(err) => { assert!(regex::Regex::new(r"Configured with an unreasonable our_to_self_delay \(\d+\) putting user funds at risks").unwrap().is_match(err.as_str()));  },
+                       ChannelError::Close((err, _)) => {
+                               let regex = regex::Regex::new(r"Configured with an unreasonable our_to_self_delay \(\d+\) putting user funds at risks").unwrap();
+                               assert!(regex.is_match(err.as_str()));
+                       },
                        _ => panic!("Unexpected event"),
                }
        } else { assert!(false); }
@@ -7290,7 +7298,10 @@ fn test_user_configurable_csv_delay() {
                &high_their_to_self_config, 0, &nodes[0].logger, /*is_0conf=*/false)
        {
                match error {
-                       ChannelError::Close(err) => { assert!(regex::Regex::new(r"They wanted our payments to be delayed by a needlessly long period\. Upper limit: \d+\. Actual: \d+").unwrap().is_match(err.as_str())); },
+                       ChannelError::Close((err, _)) => {
+                               let regex = regex::Regex::new(r"They wanted our payments to be delayed by a needlessly long period\. Upper limit: \d+\. Actual: \d+").unwrap();
+                               assert!(regex.is_match(err.as_str()));
+                       },
                        _ => panic!("Unexpected event"),
                }
        } else { assert!(false); }
@@ -8081,8 +8092,8 @@ fn test_manually_accept_inbound_channel_request() {
                }
                _ => panic!("Unexpected event"),
        }
-
-       nodes[1].node.force_close_broadcasting_latest_txn(&temp_channel_id, &nodes[0].node.get_our_node_id()).unwrap();
+       let error_message = "Channel force-closed";
+       nodes[1].node.force_close_broadcasting_latest_txn(&temp_channel_id, &nodes[0].node.get_our_node_id(), error_message.to_string()).unwrap();
 
        let close_msg_ev = nodes[1].node.get_and_clear_pending_msg_events();
        assert_eq!(close_msg_ev.len(), 1);
@@ -8113,11 +8124,11 @@ fn test_manually_reject_inbound_channel_request() {
        // Assert that `nodes[1]` has no `MessageSendEvent::SendAcceptChannel` in `msg_events` before
        // rejecting the inbound channel request.
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
-
+       let error_message = "Channel force-closed";
        let events = nodes[1].node.get_and_clear_pending_events();
        match events[0] {
                Event::OpenChannelRequest { temporary_channel_id, .. } => {
-                       nodes[1].node.force_close_broadcasting_latest_txn(&temporary_channel_id, &nodes[0].node.get_our_node_id()).unwrap();
+                       nodes[1].node.force_close_broadcasting_latest_txn(&temporary_channel_id, &nodes[0].node.get_our_node_id(), error_message.to_string()).unwrap();
                }
                _ => panic!("Unexpected event"),
        }
@@ -8847,10 +8858,11 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain
                force_closing_node = 1;
                counterparty_node = 0;
        }
-       nodes[force_closing_node].node.force_close_broadcasting_latest_txn(&chan_ab.2, &nodes[counterparty_node].node.get_our_node_id()).unwrap();
+       let error_message = "Channel force-closed";
+       nodes[force_closing_node].node.force_close_broadcasting_latest_txn(&chan_ab.2, &nodes[counterparty_node].node.get_our_node_id(), error_message.to_string()).unwrap();
        check_closed_broadcast!(nodes[force_closing_node], true);
        check_added_monitors!(nodes[force_closing_node], 1);
-       check_closed_event!(nodes[force_closing_node], 1, ClosureReason::HolderForceClosed, [nodes[counterparty_node].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[force_closing_node], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[counterparty_node].node.get_our_node_id()], 100000);
        if go_onchain_before_fulfill {
                let txn_to_broadcast = match broadcast_alice {
                        true => alice_txn.clone(),
@@ -9580,10 +9592,10 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t
        let (_, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
        nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id());
        nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id());
-
-       nodes[1].node.force_close_broadcasting_latest_txn(&channel_id, &nodes[2].node.get_our_node_id()).unwrap();
+       let error_message = "Channel force-closed";
+       nodes[1].node.force_close_broadcasting_latest_txn(&channel_id, &nodes[2].node.get_our_node_id(), error_message.to_string()).unwrap();
        check_closed_broadcast!(nodes[1], true);
-       check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[2].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[2].node.get_our_node_id()], 100000);
        check_added_monitors!(nodes[1], 1);
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
        assert_eq!(node_txn.len(), 1);
@@ -10396,9 +10408,9 @@ fn accept_busted_but_better_fee() {
        match events[0] {
                MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, .. }, .. } => {
                        nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_fee.as_ref().unwrap());
-                       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError {
-                               err: "Peer's feerate much too low. Actual: 1000. Our expected lower limit: 5000".to_owned() },
-                               [nodes[0].node.get_our_node_id()], 100000);
+                       check_closed_event!(nodes[1], 1, ClosureReason::PeerFeerateTooLow {
+                               peer_feerate_sat_per_kw: 1000, required_feerate_sat_per_kw: 5000,
+                       }, [nodes[0].node.get_our_node_id()], 100000);
                        check_closed_broadcast!(nodes[1], true);
                        check_added_monitors!(nodes[1], 1);
                },
@@ -10641,7 +10653,7 @@ fn test_remove_expired_outbound_unfunded_channels() {
                },
                _ => panic!("Unexpected event"),
        }
-       check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100000);
+       check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }, false, &[nodes[1].node.get_our_node_id()], 100000);
 }
 
 #[test]
@@ -10692,7 +10704,7 @@ fn test_remove_expired_inbound_unfunded_channels() {
                },
                _ => panic!("Unexpected event"),
        }
-       check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false, &[nodes[0].node.get_our_node_id()], 100000);
+       check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }, false, &[nodes[0].node.get_our_node_id()], 100000);
 }
 
 #[test]
@@ -10726,7 +10738,7 @@ fn test_channel_close_when_not_timely_accepted() {
 
        // Since we disconnected from peer and did not connect back within time,
        // we should have forced-closed the channel by now.
-       check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }, [nodes[1].node.get_our_node_id()], 100000);
        assert_eq!(nodes[0].node.list_channels().len(), 0);
 
        {
@@ -10966,8 +10978,8 @@ fn test_close_in_funding_batch() {
        let funding_txo_2 = OutPoint { txid: tx.txid(), index: 1 };
        let channel_id_1 = ChannelId::v1_from_funding_outpoint(funding_txo_1);
        let channel_id_2 = ChannelId::v1_from_funding_outpoint(funding_txo_2);
-
-       nodes[0].node.force_close_broadcasting_latest_txn(&channel_id_1, &nodes[1].node.get_our_node_id()).unwrap();
+       let error_message = "Channel force-closed";
+       nodes[0].node.force_close_broadcasting_latest_txn(&channel_id_1, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
 
        // The monitor should become closed.
        check_added_monitors(&nodes[0], 1);
@@ -11055,7 +11067,8 @@ fn test_batch_funding_close_after_funding_signed() {
        let funding_txo_2 = OutPoint { txid: tx.txid(), index: 1 };
        let channel_id_1 = ChannelId::v1_from_funding_outpoint(funding_txo_1);
        let channel_id_2 = ChannelId::v1_from_funding_outpoint(funding_txo_2);
-       nodes[0].node.force_close_broadcasting_latest_txn(&channel_id_1, &nodes[1].node.get_our_node_id()).unwrap();
+       let error_message = "Channel force-closed";
+       nodes[0].node.force_close_broadcasting_latest_txn(&channel_id_1, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
        check_added_monitors(&nodes[0], 2);
        {
                let mut monitor_updates = nodes[0].chain_monitor.monitor_updates.lock().unwrap();
@@ -11124,16 +11137,16 @@ fn do_test_funding_and_commitment_tx_confirm_same_block(confirm_remote_commitmen
        } else {
                (&nodes[0], &nodes[1])
        };
-
-       closing_node.node.force_close_broadcasting_latest_txn(&chan_id, &other_node.node.get_our_node_id()).unwrap();
+       let error_message = "Channel force-closed";
+       closing_node.node.force_close_broadcasting_latest_txn(&chan_id, &other_node.node.get_our_node_id(), error_message.to_string()).unwrap();
        let mut msg_events = closing_node.node.get_and_clear_pending_msg_events();
        assert_eq!(msg_events.len(), 1);
        match msg_events.pop().unwrap() {
-               MessageSendEvent::HandleError { action: msgs::ErrorAction::DisconnectPeer { .. }, .. } => {},
+               MessageSendEvent::HandleError { action: msgs::ErrorAction::SendErrorMessage { .. }, .. } => {},
                _ => panic!("Unexpected event"),
        }
        check_added_monitors(closing_node, 1);
-       check_closed_event(closing_node, 1, ClosureReason::HolderForceClosed, false, &[other_node.node.get_our_node_id()], 1_000_000);
+       check_closed_event(closing_node, 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, false, &[other_node.node.get_our_node_id()], 1_000_000);
 
        let commitment_tx = {
                let mut txn = closing_node.tx_broadcaster.txn_broadcast();