//! Functional tests which test for correct behavior across node restarts.
use crate::chain::{ChannelMonitorUpdateStatus, Watch};
+use crate::chain::chaininterface::LowerBoundedFeeEstimator;
use crate::chain::channelmonitor::ChannelMonitor;
+use crate::chain::keysinterface::KeysInterface;
use crate::chain::transaction::OutPoint;
use crate::ln::channelmanager::{self, ChannelManager, ChannelManagerReadArgs, PaymentId};
use crate::ln::msgs;
use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ErrorAction};
use crate::util::enforcing_trait_impls::EnforcingSigner;
use crate::util::test_utils;
-use crate::util::events::{Event, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
+use crate::util::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider};
use crate::util::ser::{Writeable, ReadableArgs};
use crate::util::config::UserConfig;
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
expect_pending_htlcs_forwardable!(nodes[1]);
- expect_payment_received!(nodes[1], payment_hash, payment_secret, 100_000);
+ expect_payment_claimable!(nodes[1], payment_hash, payment_secret, 100_000);
check_added_monitors!(nodes[1], 1);
let mut events = nodes[1].node.get_and_clear_pending_msg_events();
nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
commitment_signed_dance!(nodes[2], nodes[1], payment_event.commitment_msg, false);
expect_pending_htlcs_forwardable!(nodes[2]);
- expect_payment_received!(nodes[2], payment_hash_2, payment_secret_2, 200_000);
+ expect_payment_claimable!(nodes[2], payment_hash_2, payment_secret_2, 200_000);
claim_payment(&nodes[0], &[&nodes[1]], payment_preimage);
claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage_2);
// Test what happens if a node receives an MPP payment, claims it, but crashes before
// persisting the ChannelManager. If `persist_both_monitors` is false, also crash after only
// updating one of the two channels' ChannelMonitors. As a result, on startup, we'll (a) still
- // have the PaymentReceived event, (b) have one (or two) channel(s) that goes on chain with the
+ // have the PaymentClaimable event, (b) have one (or two) channel(s) that goes on chain with the
// HTLC preimage in them, and (c) optionally have one channel that is live off-chain but does
// not have the preimage tied to the still-pending HTLC.
//
nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), PaymentId(payment_hash.0)).unwrap();
check_added_monitors!(nodes[0], 2);
- // Send the payment through to nodes[3] *without* clearing the PaymentReceived event
+ // Send the payment through to nodes[3] *without* clearing the PaymentClaimable event
let mut send_events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(send_events.len(), 2);
do_pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 15_000_000, payment_hash, Some(payment_secret), send_events[0].clone(), true, false, None);
let original_manager = nodes[3].node.encode();
- expect_payment_received!(nodes[3], payment_hash, payment_secret, 15_000_000);
+ expect_payment_claimable!(nodes[3], payment_hash, payment_secret, 15_000_000);
nodes[3].node.claim_funds(payment_preimage);
check_added_monitors!(nodes[3], 2);
nodes[2].node.peer_disconnected(&nodes[3].node.get_our_node_id(), false);
// During deserialization, we should have closed one channel and broadcast its latest
- // commitment transaction. We should also still have the original PaymentReceived event we
+ // commitment transaction. We should also still have the original PaymentClaimable event we
// never finished processing.
let events = nodes[3].node.get_and_clear_pending_events();
assert_eq!(events.len(), if persist_both_monitors { 4 } else { 3 });
- if let Event::PaymentReceived { amount_msat: 15_000_000, .. } = events[0] { } else { panic!(); }
+ if let Event::PaymentClaimable { amount_msat: 15_000_000, .. } = events[0] { } else { panic!(); }
if let Event::ChannelClosed { reason: ClosureReason::OutdatedChannelManager, .. } = events[1] { } else { panic!(); }
if persist_both_monitors {
if let Event::ChannelClosed { reason: ClosureReason::OutdatedChannelManager, .. } = events[2] { } else { panic!(); }
let ds_msgs = nodes[3].node.get_and_clear_pending_msg_events();
check_added_monitors!(nodes[3], 1);
assert_eq!(ds_msgs.len(), 2);
- if let MessageSendEvent::SendChannelUpdate { .. } = ds_msgs[1] {} else { panic!(); }
+ if let MessageSendEvent::SendChannelUpdate { .. } = ds_msgs[0] {} else { panic!(); }
- let cs_updates = match ds_msgs[0] {
+ let cs_updates = match ds_msgs[1] {
MessageSendEvent::UpdateHTLCs { ref updates, .. } => {
nodes[2].node.handle_update_fulfill_htlc(&nodes[3].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
check_added_monitors!(nodes[2], 1);
do_test_partial_claim_before_restart(false);
do_test_partial_claim_before_restart(true);
}
+
+fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_htlc: bool) {
+ if !use_cs_commitment { assert!(!claim_htlc); }
+ // If we go to forward a payment, and the ChannelMonitor persistence completes, but the
+ // ChannelManager does not, we shouldn't try to forward the payment again, nor should we fail
+ // it back until the ChannelMonitor decides the fate of the HTLC.
+ // This was never an issue, but it may be easy to regress here going forward.
+ let chanmon_cfgs = create_chanmon_cfgs(3);
+ let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+
+ let persister;
+ let new_chain_monitor;
+ let nodes_1_deserialized;
+
+ let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+ let chan_id_1 = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2;
+ let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2;
+
+ let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 1_000_000);
+ let payment_id = PaymentId(nodes[0].keys_manager.backing.get_secure_random_bytes());
+ let htlc_expiry = nodes[0].best_block_info().1 + TEST_FINAL_CLTV;
+ nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), payment_id).unwrap();
+ check_added_monitors!(nodes[0], 1);
+
+ let payment_event = SendEvent::from_node(&nodes[0]);
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
+ commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
+
+ let node_encoded = nodes[1].node.encode();
+
+ expect_pending_htlcs_forwardable!(nodes[1]);
+
+ let payment_event = SendEvent::from_node(&nodes[1]);
+ nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
+ nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg);
+ check_added_monitors!(nodes[2], 1);
+
+ if claim_htlc {
+ get_monitor!(nodes[2], chan_id_2).provide_payment_preimage(&payment_hash, &payment_preimage,
+ &nodes[2].tx_broadcaster, &LowerBoundedFeeEstimator(nodes[2].fee_estimator), &nodes[2].logger);
+ }
+ assert!(nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
+
+ let _ = nodes[2].node.get_and_clear_pending_msg_events();
+
+ nodes[2].node.force_close_broadcasting_latest_txn(&chan_id_2, &nodes[1].node.get_our_node_id()).unwrap();
+ let cs_commitment_tx = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
+ assert_eq!(cs_commitment_tx.len(), if claim_htlc { 2 } else { 1 });
+
+ check_added_monitors!(nodes[2], 1);
+ check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed);
+ check_closed_broadcast!(nodes[2], true);
+
+ let chan_0_monitor_serialized = get_monitor!(nodes[1], chan_id_1).encode();
+ let chan_1_monitor_serialized = get_monitor!(nodes[1], chan_id_2).encode();
+ reload_node!(nodes[1], node_encoded, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], persister, new_chain_monitor, nodes_1_deserialized);
+
+ check_closed_event!(nodes[1], 1, ClosureReason::OutdatedChannelManager);
+
+ let bs_commitment_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
+ assert_eq!(bs_commitment_tx.len(), 1);
+
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), true);
+ reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+
+ if use_cs_commitment {
+ // If we confirm a commitment transaction that has the HTLC on-chain, nodes[1] should wait
+ // for an HTLC-spending transaction before it does anything with the HTLC upstream.
+ confirm_transaction(&nodes[1], &cs_commitment_tx[0]);
+ assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+
+ if claim_htlc {
+ confirm_transaction(&nodes[1], &cs_commitment_tx[1]);
+ } else {
+ connect_blocks(&nodes[1], htlc_expiry - nodes[1].best_block_info().1);
+ let bs_htlc_timeout_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
+ assert_eq!(bs_htlc_timeout_tx.len(), 1);
+ confirm_transaction(&nodes[1], &bs_htlc_timeout_tx[0]);
+ }
+ } else {
+ confirm_transaction(&nodes[1], &bs_commitment_tx[0]);
+ }
+
+ if !claim_htlc {
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]);
+ } else {
+ expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, true);
+ }
+ check_added_monitors!(nodes[1], 1);
+
+ let events = nodes[1].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ match &events[0] {
+ MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { update_fulfill_htlcs, update_fail_htlcs, commitment_signed, .. }, .. } => {
+ if claim_htlc {
+ nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlcs[0]);
+ } else {
+ nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]);
+ }
+ commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false);
+ },
+ _ => panic!("Unexpected event"),
+ }
+
+ if claim_htlc {
+ expect_payment_sent!(nodes[0], payment_preimage);
+ } else {
+ expect_payment_failed!(nodes[0], payment_hash, false);
+ }
+}
+
+#[test]
+fn forwarded_payment_no_manager_persistence() {
+ do_forwarded_payment_no_manager_persistence(true, true);
+ do_forwarded_payment_no_manager_persistence(true, false);
+ do_forwarded_payment_no_manager_persistence(false, false);
+}
+
+#[test]
+fn removed_payment_no_manager_persistence() {
+ // If an HTLC is failed to us on a channel, and the ChannelMonitor persistence completes, but
+ // the corresponding ChannelManager persistence does not, we need to ensure that the HTLC is
+ // still failed back to the previous hop even though the ChannelMonitor now no longer is aware
+ // of the HTLC. This was previously broken as no attempt was made to figure out which HTLCs
+ // were left dangling when a channel was force-closed due to a stale ChannelManager.
+ let chanmon_cfgs = create_chanmon_cfgs(3);
+ let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+
+ let persister;
+ let new_chain_monitor;
+ let nodes_1_deserialized;
+
+ let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+ let chan_id_1 = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2;
+ let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2;
+
+ let (_, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
+
+ let node_encoded = nodes[1].node.encode();
+
+ nodes[2].node.fail_htlc_backwards(&payment_hash);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], [HTLCDestination::FailedPayment { payment_hash }]);
+ check_added_monitors!(nodes[2], 1);
+ let events = nodes[2].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ match &events[0] {
+ MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { update_fail_htlcs, commitment_signed, .. }, .. } => {
+ nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &update_fail_htlcs[0]);
+ commitment_signed_dance!(nodes[1], nodes[2], commitment_signed, false);
+ },
+ _ => panic!("Unexpected event"),
+ }
+
+ let chan_0_monitor_serialized = get_monitor!(nodes[1], chan_id_1).encode();
+ let chan_1_monitor_serialized = get_monitor!(nodes[1], chan_id_2).encode();
+ reload_node!(nodes[1], node_encoded, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], persister, new_chain_monitor, nodes_1_deserialized);
+
+ match nodes[1].node.pop_pending_event().unwrap() {
+ Event::ChannelClosed { ref reason, .. } => {
+ assert_eq!(*reason, ClosureReason::OutdatedChannelManager);
+ },
+ _ => panic!("Unexpected event"),
+ }
+
+ // Now that the ChannelManager has force-closed the channel which had the HTLC removed, it is
+ // now forgotten everywhere. The ChannelManager should have, as a side-effect of reload,
+ // learned that the HTLC is gone from the ChannelMonitor and added it to the to-fail-back set.
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), true);
+ reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]);
+ check_added_monitors!(nodes[1], 1);
+ let events = nodes[1].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ match &events[0] {
+ MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { update_fail_htlcs, commitment_signed, .. }, .. } => {
+ nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]);
+ commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false);
+ },
+ _ => panic!("Unexpected event"),
+ }
+
+ expect_payment_failed!(nodes[0], payment_hash, false);
+}