Merge pull request #2136 from marctyndel/2023-03-paymentforwarded-expose-amount-forwarded
[rust-lightning] / lightning / src / ln / payment_tests.rs
index c4cd0fc1b09d1ca691654c7fbc32e256bb39ed41..1ce0cc0345869dd9a392ebf78944c124d471aefc 100644 (file)
@@ -15,6 +15,7 @@ use crate::chain::{ChannelMonitorUpdateStatus, Confirm, Listen, Watch};
 use crate::chain::channelmonitor::{ANTI_REORG_DELAY, LATENCY_GRACE_PERIOD_BLOCKS};
 use crate::chain::keysinterface::EntropySource;
 use crate::chain::transaction::OutPoint;
+use crate::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, PathFailure};
 use crate::ln::channel::EXPIRE_PREV_CONFIG_TICKS;
 use crate::ln::channelmanager::{BREAKDOWN_TIMEOUT, ChannelManager, MPP_TIMEOUT_TICKS, MIN_CLTV_EXPIRY_DELTA, PaymentId, PaymentSendFailure, IDEMPOTENCY_TIMEOUT_TICKS, RecentPaymentDetails};
 use crate::ln::features::InvoiceFeatures;
@@ -24,10 +25,10 @@ use crate::ln::outbound_payment::Retry;
 use crate::routing::gossip::{EffectiveCapacity, RoutingFees};
 use crate::routing::router::{get_route, PaymentParameters, Route, RouteHint, RouteHintHop, RouteHop, RouteParameters};
 use crate::routing::scoring::ChannelUsage;
-use crate::util::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, PathFailure};
 use crate::util::test_utils;
 use crate::util::errors::APIError;
 use crate::util::ser::Writeable;
+use crate::util::string::UntrustedString;
 
 use bitcoin::{Block, BlockHeader, TxMerkleNode};
 use bitcoin::hashes::Hash;
@@ -334,9 +335,15 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) {
        check_closed_event!(nodes[0], 1, ClosureReason::OutdatedChannelManager);
        assert!(nodes[0].node.list_channels().is_empty());
        assert!(nodes[0].node.has_pending_payments());
-       let as_broadcasted_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
-       assert_eq!(as_broadcasted_txn.len(), 1);
-       assert_eq!(as_broadcasted_txn[0], as_commitment_tx);
+       nodes[0].node.timer_tick_occurred();
+       if !confirm_before_reload {
+               let as_broadcasted_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
+               assert_eq!(as_broadcasted_txn.len(), 1);
+               assert_eq!(as_broadcasted_txn[0], as_commitment_tx);
+       } else {
+               assert!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
+       }
+       check_added_monitors!(nodes[0], 1);
 
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
        nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
@@ -353,7 +360,7 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) {
                MessageSendEvent::HandleError { node_id, action: msgs::ErrorAction::SendErrorMessage { ref msg } } => {
                        assert_eq!(node_id, nodes[1].node.get_our_node_id());
                        nodes[1].node.handle_error(&nodes[0].node.get_our_node_id(), msg);
-                       check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &nodes[1].node.get_our_node_id()) });
+                       check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &nodes[1].node.get_our_node_id())) });
                        check_added_monitors!(nodes[1], 1);
                        assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1);
                },
@@ -499,9 +506,11 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) {
        // On reload, the ChannelManager should realize it is stale compared to the ChannelMonitor and
        // force-close the channel.
        check_closed_event!(nodes[0], 1, ClosureReason::OutdatedChannelManager);
+       nodes[0].node.timer_tick_occurred();
        assert!(nodes[0].node.list_channels().is_empty());
        assert!(nodes[0].node.has_pending_payments());
        assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1);
+       check_added_monitors!(nodes[0], 1);
 
        nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
        assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
@@ -518,7 +527,7 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) {
                MessageSendEvent::HandleError { node_id, action: msgs::ErrorAction::SendErrorMessage { ref msg } } => {
                        assert_eq!(node_id, nodes[1].node.get_our_node_id());
                        nodes[1].node.handle_error(&nodes[0].node.get_our_node_id(), msg);
-                       check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &nodes[1].node.get_our_node_id()) });
+                       check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &nodes[1].node.get_our_node_id())) });
                        check_added_monitors!(nodes[1], 1);
                        bs_commitment_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
                },
@@ -933,7 +942,7 @@ fn successful_probe_yields_event() {
        let mut events = nodes[0].node.get_and_clear_pending_events();
        assert_eq!(events.len(), 1);
        match events.drain(..).next().unwrap() {
-               crate::util::events::Event::ProbeSuccessful { payment_id: ev_pid, payment_hash: ev_ph, .. } => {
+               crate::events::Event::ProbeSuccessful { payment_id: ev_pid, payment_hash: ev_ph, .. } => {
                        assert_eq!(payment_id, ev_pid);
                        assert_eq!(payment_hash, ev_ph);
                },
@@ -979,7 +988,7 @@ fn failed_probe_yields_event() {
        let mut events = nodes[0].node.get_and_clear_pending_events();
        assert_eq!(events.len(), 1);
        match events.drain(..).next().unwrap() {
-               crate::util::events::Event::ProbeFailed { payment_id: ev_pid, payment_hash: ev_ph, .. } => {
+               crate::events::Event::ProbeFailed { payment_id: ev_pid, payment_hash: ev_ph, .. } => {
                        assert_eq!(payment_id, ev_pid);
                        assert_eq!(payment_hash, ev_ph);
                },
@@ -1413,7 +1422,7 @@ fn do_test_intercepted_payment(test: InterceptTest) {
        let events = nodes[1].node.get_and_clear_pending_events();
        assert_eq!(events.len(), 1);
        let (intercept_id, expected_outbound_amount_msat) = match events[0] {
-               crate::util::events::Event::HTLCIntercepted {
+               crate::events::Event::HTLCIntercepted {
                        intercept_id, expected_outbound_amount_msat, payment_hash: pmt_hash, inbound_amount_msat, requested_next_hop_scid: short_channel_id
                } => {
                        assert_eq!(pmt_hash, payment_hash);
@@ -2748,3 +2757,84 @@ fn test_threaded_payment_retries() {
                }
        }
 }
+
+fn do_no_missing_sent_on_midpoint_reload(persist_manager_with_payment: bool) {
+       // Test that if we reload in the middle of an HTLC claim commitment signed dance we'll still
+       // receive the PaymentSent event even if the ChannelManager had no idea about the payment when
+       // it was last persisted.
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+       let (persister_a, persister_b, persister_c);
+       let (chain_monitor_a, chain_monitor_b, chain_monitor_c);
+       let (nodes_0_deserialized, nodes_0_deserialized_b, nodes_0_deserialized_c);
+       let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+       let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
+
+       let mut nodes_0_serialized = Vec::new();
+       if !persist_manager_with_payment {
+               nodes_0_serialized = nodes[0].node.encode();
+       }
+
+       let (our_payment_preimage, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
+
+       if persist_manager_with_payment {
+               nodes_0_serialized = nodes[0].node.encode();
+       }
+
+       nodes[1].node.claim_funds(our_payment_preimage);
+       check_added_monitors!(nodes[1], 1);
+       expect_payment_claimed!(nodes[1], our_payment_hash, 1_000_000);
+
+       let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+       nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
+       nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed);
+       check_added_monitors!(nodes[0], 1);
+
+       // The ChannelMonitor should always be the latest version, as we're required to persist it
+       // during the commitment signed handling.
+       let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
+       reload_node!(nodes[0], test_default_channel_config(), &nodes_0_serialized, &[&chan_0_monitor_serialized], persister_a, chain_monitor_a, nodes_0_deserialized);
+
+       let events = nodes[0].node.get_and_clear_pending_events();
+       assert_eq!(events.len(), 2);
+       if let Event::ChannelClosed { reason: ClosureReason::OutdatedChannelManager, .. } = events[0] {} else { panic!(); }
+       if let Event::PaymentSent { payment_preimage, .. } = events[1] { assert_eq!(payment_preimage, our_payment_preimage); } else { panic!(); }
+       // Note that we don't get a PaymentPathSuccessful here as we leave the HTLC pending to avoid
+       // the double-claim that would otherwise appear at the end of this test.
+       nodes[0].node.timer_tick_occurred();
+       let as_broadcasted_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
+       assert_eq!(as_broadcasted_txn.len(), 1);
+
+       // Ensure that, even after some time, if we restart we still include *something* in the current
+       // `ChannelManager` which prevents a `PaymentFailed` when we restart even if pending resolved
+       // payments have since been timed out thanks to `IDEMPOTENCY_TIMEOUT_TICKS`.
+       // A naive implementation of the fix here would wipe the pending payments set, causing a
+       // failure event when we restart.
+       for _ in 0..(IDEMPOTENCY_TIMEOUT_TICKS * 2) { nodes[0].node.timer_tick_occurred(); }
+
+       let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
+       reload_node!(nodes[0], test_default_channel_config(), &nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister_b, chain_monitor_b, nodes_0_deserialized_b);
+       let events = nodes[0].node.get_and_clear_pending_events();
+       assert!(events.is_empty());
+
+       // Ensure that we don't generate any further events even after the channel-closing commitment
+       // transaction is confirmed on-chain.
+       confirm_transaction(&nodes[0], &as_broadcasted_txn[0]);
+       for _ in 0..(IDEMPOTENCY_TIMEOUT_TICKS * 2) { nodes[0].node.timer_tick_occurred(); }
+
+       let events = nodes[0].node.get_and_clear_pending_events();
+       assert!(events.is_empty());
+
+       let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
+       reload_node!(nodes[0], test_default_channel_config(), &nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister_c, chain_monitor_c, nodes_0_deserialized_c);
+       let events = nodes[0].node.get_and_clear_pending_events();
+       assert!(events.is_empty());
+}
+
+#[test]
+fn no_missing_sent_on_midpoint_reload() {
+       do_no_missing_sent_on_midpoint_reload(false);
+       do_no_missing_sent_on_midpoint_reload(true);
+}