X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Freload_tests.rs;h=cac0c8e94d39a161404b763fce8ff82e4cb0b710;hb=5824e226cad67e32d5e8be71ebbb6f91a3fc2116;hp=1056ceebdd9dc963e9255c6be690c1310de8bd6d;hpb=53eb0d7aa7e7ee2c247a509d259fbdc7cf840426;p=rust-lightning diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index 1056ceeb..cac0c8e9 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -10,14 +10,17 @@ //! Functional tests which test for correct behavior across node restarts. use crate::chain::{ChannelMonitorUpdateStatus, Watch}; +use crate::chain::chaininterface::LowerBoundedFeeEstimator; use crate::chain::channelmonitor::ChannelMonitor; +use crate::chain::keysinterface::EntropySource; use crate::chain::transaction::OutPoint; use crate::ln::channelmanager::{self, ChannelManager, ChannelManagerReadArgs, PaymentId}; use crate::ln::msgs; use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ErrorAction}; use crate::util::enforcing_trait_impls::EnforcingSigner; use crate::util::test_utils; -use crate::util::events::{Event, MessageSendEvent, MessageSendEventsProvider, ClosureReason}; +use crate::util::errors::APIError; +use crate::util::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider}; use crate::util::ser::{Writeable, ReadableArgs}; use crate::util::config::UserConfig; @@ -37,7 +40,7 @@ fn test_funding_peer_disconnect() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let persister: test_utils::TestPersister; let new_chain_monitor: test_utils::TestChainMonitor; - let nodes_0_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>; + let nodes_0_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestRouter, &test_utils::TestLogger>; let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 100000, 10001, channelmanager::provided_init_features(), channelmanager::provided_init_features()); @@ -182,7 +185,7 @@ fn test_no_txn_manager_serialize_deserialize() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let persister: test_utils::TestPersister; let new_chain_monitor: test_utils::TestChainMonitor; - let nodes_0_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>; + let nodes_0_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestRouter, &test_utils::TestLogger>; let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 100000, 10001, channelmanager::provided_init_features(), channelmanager::provided_init_features()); @@ -222,7 +225,7 @@ fn test_manager_serialize_deserialize_events() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let persister: test_utils::TestPersister; let new_chain_monitor: test_utils::TestChainMonitor; - let nodes_0_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>; + let nodes_0_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestRouter, &test_utils::TestLogger>; let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); // Start creating a channel, but stop right before broadcasting the funding transaction @@ -305,7 +308,7 @@ fn test_simple_manager_serialize_deserialize() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let persister: test_utils::TestPersister; let new_chain_monitor: test_utils::TestChainMonitor; - let nodes_0_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>; + let nodes_0_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestRouter, &test_utils::TestLogger>; let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2; @@ -333,7 +336,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { let fee_estimator: test_utils::TestFeeEstimator; let persister: test_utils::TestPersister; let new_chain_monitor: test_utils::TestChainMonitor; - let nodes_0_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>; + let nodes_0_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestRouter, &test_utils::TestLogger>; let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs); let chan_id_1 = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2; let chan_id_2 = create_announced_chan_between_nodes(&nodes, 2, 0, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2; @@ -389,10 +392,11 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { let mut nodes_0_read = &nodes_0_serialized[..]; if let Err(msgs::DecodeError::InvalidValue) = - <(BlockHash, ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs { + <(BlockHash, ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestRouter, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs { default_config: UserConfig::default(), keys_manager, fee_estimator: &fee_estimator, + router: &nodes[0].router, chain_monitor: nodes[0].chain_monitor, tx_broadcaster: nodes[0].tx_broadcaster.clone(), logger: &logger, @@ -403,10 +407,11 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { let mut nodes_0_read = &nodes_0_serialized[..]; let (_, nodes_0_deserialized_tmp) = - <(BlockHash, ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs { + <(BlockHash, ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestRouter, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs { default_config: UserConfig::default(), keys_manager, fee_estimator: &fee_estimator, + router: nodes[0].router, chain_monitor: nodes[0].chain_monitor, tx_broadcaster: nodes[0].tx_broadcaster.clone(), logger: &logger, @@ -546,7 +551,7 @@ fn do_test_data_loss_protect(reconnect_panicing: bool) { if let MessageSendEvent::HandleError { ref action, .. } = msg { match action { &ErrorAction::SendErrorMessage { ref msg } => { - assert_eq!(msg.data, "Failed to find corresponding channel"); + assert_eq!(msg.data, format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &nodes[1].node.get_our_node_id())); err_msgs_0.push(msg.clone()); }, _ => panic!("Unexpected event!"), @@ -559,7 +564,7 @@ fn do_test_data_loss_protect(reconnect_panicing: bool) { nodes[1].node.handle_error(&nodes[0].node.get_our_node_id(), &err_msgs_0[0]); assert!(nodes[1].node.list_usable_channels().is_empty()); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: "Failed to find corresponding channel".to_owned() }); + check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &nodes[1].node.get_our_node_id()) }); check_closed_broadcast!(nodes[1], false); } @@ -586,7 +591,7 @@ fn test_forwardable_regen() { let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let persister: test_utils::TestPersister; let new_chain_monitor: test_utils::TestChainMonitor; - let nodes_1_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>; + let nodes_1_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestRouter, &test_utils::TestLogger>; let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); let chan_id_1 = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2; let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2; @@ -635,7 +640,7 @@ fn test_forwardable_regen() { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); expect_pending_htlcs_forwardable!(nodes[1]); - expect_payment_received!(nodes[1], payment_hash, payment_secret, 100_000); + expect_payment_claimable!(nodes[1], payment_hash, payment_secret, 100_000); check_added_monitors!(nodes[1], 1); let mut events = nodes[1].node.get_and_clear_pending_msg_events(); @@ -644,7 +649,7 @@ fn test_forwardable_regen() { nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]); commitment_signed_dance!(nodes[2], nodes[1], payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[2]); - expect_payment_received!(nodes[2], payment_hash_2, payment_secret_2, 200_000); + expect_payment_claimable!(nodes[2], payment_hash_2, payment_secret_2, 200_000); claim_payment(&nodes[0], &[&nodes[1]], payment_preimage); claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage_2); @@ -654,7 +659,7 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool) { // Test what happens if a node receives an MPP payment, claims it, but crashes before // persisting the ChannelManager. If `persist_both_monitors` is false, also crash after only // updating one of the two channels' ChannelMonitors. As a result, on startup, we'll (a) still - // have the PaymentReceived event, (b) have one (or two) channel(s) that goes on chain with the + // have the PaymentClaimable event, (b) have one (or two) channel(s) that goes on chain with the // HTLC preimage in them, and (c) optionally have one channel that is live off-chain but does // not have the preimage tied to the still-pending HTLC. // @@ -670,7 +675,7 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool) { let persister: test_utils::TestPersister; let new_chain_monitor: test_utils::TestChainMonitor; - let nodes_3_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>; + let nodes_3_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestRouter, &test_utils::TestLogger>; let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs); @@ -691,11 +696,13 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool) { nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), PaymentId(payment_hash.0)).unwrap(); check_added_monitors!(nodes[0], 2); - // Send the payment through to nodes[3] *without* clearing the PaymentReceived event + // Send the payment through to nodes[3] *without* clearing the PaymentClaimable event let mut send_events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(send_events.len(), 2); - do_pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 15_000_000, payment_hash, Some(payment_secret), send_events[0].clone(), true, false, None); - do_pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 15_000_000, payment_hash, Some(payment_secret), send_events[1].clone(), true, false, None); + let (node_1_msgs, mut send_events) = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &send_events); + let (node_2_msgs, _send_events) = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &send_events); + do_pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 15_000_000, payment_hash, Some(payment_secret), node_1_msgs, true, false, None); + do_pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 15_000_000, payment_hash, Some(payment_secret), node_2_msgs, true, false, None); // Now that we have an MPP payment pending, get the latest encoded copies of nodes[3]'s // monitors and ChannelManager, for use later, if we don't want to persist both monitors. @@ -711,7 +718,7 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool) { let original_manager = nodes[3].node.encode(); - expect_payment_received!(nodes[3], payment_hash, payment_secret, 15_000_000); + expect_payment_claimable!(nodes[3], payment_hash, payment_secret, 15_000_000); nodes[3].node.claim_funds(payment_preimage); check_added_monitors!(nodes[3], 2); @@ -748,11 +755,11 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool) { nodes[2].node.peer_disconnected(&nodes[3].node.get_our_node_id(), false); // During deserialization, we should have closed one channel and broadcast its latest - // commitment transaction. We should also still have the original PaymentReceived event we + // commitment transaction. We should also still have the original PaymentClaimable event we // never finished processing. let events = nodes[3].node.get_and_clear_pending_events(); assert_eq!(events.len(), if persist_both_monitors { 4 } else { 3 }); - if let Event::PaymentReceived { amount_msat: 15_000_000, .. } = events[0] { } else { panic!(); } + if let Event::PaymentClaimable { amount_msat: 15_000_000, .. } = events[0] { } else { panic!(); } if let Event::ChannelClosed { reason: ClosureReason::OutdatedChannelManager, .. } = events[1] { } else { panic!(); } if persist_both_monitors { if let Event::ChannelClosed { reason: ClosureReason::OutdatedChannelManager, .. } = events[2] { } else { panic!(); } @@ -811,3 +818,238 @@ fn test_partial_claim_before_restart() { do_test_partial_claim_before_restart(false); do_test_partial_claim_before_restart(true); } + +fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_htlc: bool, use_intercept: bool) { + if !use_cs_commitment { assert!(!claim_htlc); } + // If we go to forward a payment, and the ChannelMonitor persistence completes, but the + // ChannelManager does not, we shouldn't try to forward the payment again, nor should we fail + // it back until the ChannelMonitor decides the fate of the HTLC. + // This was never an issue, but it may be easy to regress here going forward. + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let mut intercept_forwards_config = test_default_channel_config(); + intercept_forwards_config.accept_intercept_htlcs = true; + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, Some(intercept_forwards_config), None]); + + let persister; + let new_chain_monitor; + let nodes_1_deserialized; + + let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let chan_id_1 = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2; + let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2; + + let intercept_scid = nodes[1].node.get_intercept_scid(); + + let (mut route, payment_hash, payment_preimage, payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[2], 1_000_000); + if use_intercept { + route.paths[0][1].short_channel_id = intercept_scid; + } + let payment_id = PaymentId(nodes[0].keys_manager.backing.get_secure_random_bytes()); + let htlc_expiry = nodes[0].best_block_info().1 + TEST_FINAL_CLTV; + nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), payment_id).unwrap(); + check_added_monitors!(nodes[0], 1); + + let payment_event = SendEvent::from_node(&nodes[0]); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); + + // Store the `ChannelManager` before handling the `PendingHTLCsForwardable`/`HTLCIntercepted` + // events, expecting either event (and the HTLC itself) to be missing on reload even though its + // present when we serialized. + let node_encoded = nodes[1].node.encode(); + + let mut intercept_id = None; + let mut expected_outbound_amount_msat = None; + if use_intercept { + let events = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::HTLCIntercepted { intercept_id: ev_id, expected_outbound_amount_msat: ev_amt, .. } => { + intercept_id = Some(ev_id); + expected_outbound_amount_msat = Some(ev_amt); + }, + _ => panic!() + } + nodes[1].node.forward_intercepted_htlc(intercept_id.unwrap(), &chan_id_2, + nodes[2].node.get_our_node_id(), expected_outbound_amount_msat.unwrap()).unwrap(); + } + + expect_pending_htlcs_forwardable!(nodes[1]); + + let payment_event = SendEvent::from_node(&nodes[1]); + nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg); + check_added_monitors!(nodes[2], 1); + + if claim_htlc { + get_monitor!(nodes[2], chan_id_2).provide_payment_preimage(&payment_hash, &payment_preimage, + &nodes[2].tx_broadcaster, &LowerBoundedFeeEstimator(nodes[2].fee_estimator), &nodes[2].logger); + } + assert!(nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty()); + + let _ = nodes[2].node.get_and_clear_pending_msg_events(); + + nodes[2].node.force_close_broadcasting_latest_txn(&chan_id_2, &nodes[1].node.get_our_node_id()).unwrap(); + let cs_commitment_tx = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); + assert_eq!(cs_commitment_tx.len(), if claim_htlc { 2 } else { 1 }); + + check_added_monitors!(nodes[2], 1); + check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed); + check_closed_broadcast!(nodes[2], true); + + let chan_0_monitor_serialized = get_monitor!(nodes[1], chan_id_1).encode(); + let chan_1_monitor_serialized = get_monitor!(nodes[1], chan_id_2).encode(); + reload_node!(nodes[1], node_encoded, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], persister, new_chain_monitor, nodes_1_deserialized); + + // Note that this checks that this is the only event on nodes[1], implying the + // `HTLCIntercepted` event has been removed in the `use_intercept` case. + check_closed_event!(nodes[1], 1, ClosureReason::OutdatedChannelManager); + + if use_intercept { + // Attempt to forward the HTLC back out over nodes[1]' still-open channel, ensuring we get + // a intercept-doesn't-exist error. + let forward_err = nodes[1].node.forward_intercepted_htlc(intercept_id.unwrap(), &chan_id_1, + nodes[0].node.get_our_node_id(), expected_outbound_amount_msat.unwrap()).unwrap_err(); + assert_eq!(forward_err, APIError::APIMisuseError { + err: format!("Payment with intercept id {} not found", log_bytes!(intercept_id.unwrap().0)) + }); + } + + let bs_commitment_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); + assert_eq!(bs_commitment_tx.len(), 1); + + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), true); + reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + + if use_cs_commitment { + // If we confirm a commitment transaction that has the HTLC on-chain, nodes[1] should wait + // for an HTLC-spending transaction before it does anything with the HTLC upstream. + confirm_transaction(&nodes[1], &cs_commitment_tx[0]); + assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + if claim_htlc { + confirm_transaction(&nodes[1], &cs_commitment_tx[1]); + } else { + connect_blocks(&nodes[1], htlc_expiry - nodes[1].best_block_info().1); + let bs_htlc_timeout_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); + assert_eq!(bs_htlc_timeout_tx.len(), 1); + confirm_transaction(&nodes[1], &bs_htlc_timeout_tx[0]); + } + } else { + confirm_transaction(&nodes[1], &bs_commitment_tx[0]); + } + + if !claim_htlc { + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]); + } else { + expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, true); + } + check_added_monitors!(nodes[1], 1); + + let events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + match &events[0] { + MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { update_fulfill_htlcs, update_fail_htlcs, commitment_signed, .. }, .. } => { + if claim_htlc { + nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlcs[0]); + } else { + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]); + } + commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false); + }, + _ => panic!("Unexpected event"), + } + + if claim_htlc { + expect_payment_sent!(nodes[0], payment_preimage); + } else { + expect_payment_failed!(nodes[0], payment_hash, false); + } +} + +#[test] +fn forwarded_payment_no_manager_persistence() { + do_forwarded_payment_no_manager_persistence(true, true, false); + do_forwarded_payment_no_manager_persistence(true, false, false); + do_forwarded_payment_no_manager_persistence(false, false, false); +} + +#[test] +fn intercepted_payment_no_manager_persistence() { + do_forwarded_payment_no_manager_persistence(true, true, true); + do_forwarded_payment_no_manager_persistence(true, false, true); + do_forwarded_payment_no_manager_persistence(false, false, true); +} + +#[test] +fn removed_payment_no_manager_persistence() { + // If an HTLC is failed to us on a channel, and the ChannelMonitor persistence completes, but + // the corresponding ChannelManager persistence does not, we need to ensure that the HTLC is + // still failed back to the previous hop even though the ChannelMonitor now no longer is aware + // of the HTLC. This was previously broken as no attempt was made to figure out which HTLCs + // were left dangling when a channel was force-closed due to a stale ChannelManager. + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + + let persister; + let new_chain_monitor; + let nodes_1_deserialized; + + let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let chan_id_1 = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2; + let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2; + + let (_, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000); + + let node_encoded = nodes[1].node.encode(); + + nodes[2].node.fail_htlc_backwards(&payment_hash); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], [HTLCDestination::FailedPayment { payment_hash }]); + check_added_monitors!(nodes[2], 1); + let events = nodes[2].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + match &events[0] { + MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { update_fail_htlcs, commitment_signed, .. }, .. } => { + nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &update_fail_htlcs[0]); + commitment_signed_dance!(nodes[1], nodes[2], commitment_signed, false); + }, + _ => panic!("Unexpected event"), + } + + let chan_0_monitor_serialized = get_monitor!(nodes[1], chan_id_1).encode(); + let chan_1_monitor_serialized = get_monitor!(nodes[1], chan_id_2).encode(); + reload_node!(nodes[1], node_encoded, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], persister, new_chain_monitor, nodes_1_deserialized); + + match nodes[1].node.pop_pending_event().unwrap() { + Event::ChannelClosed { ref reason, .. } => { + assert_eq!(*reason, ClosureReason::OutdatedChannelManager); + }, + _ => panic!("Unexpected event"), + } + + // Now that the ChannelManager has force-closed the channel which had the HTLC removed, it is + // now forgotten everywhere. The ChannelManager should have, as a side-effect of reload, + // learned that the HTLC is gone from the ChannelMonitor and added it to the to-fail-back set. + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), true); + reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]); + check_added_monitors!(nodes[1], 1); + let events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + match &events[0] { + MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { update_fail_htlcs, commitment_signed, .. }, .. } => { + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]); + commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false); + }, + _ => panic!("Unexpected event"), + } + + expect_payment_failed!(nodes[0], payment_hash, false); +}