X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Freload_tests.rs;h=2c7e8e72bbee663fd824a7f3aad5a27b1c118792;hb=a4df59b37711215ece62ee8da3d2c5d9b537de4e;hp=7e10f970aea6a8c3b284bb586c26920683888d57;hpb=0bb87ddad71d2e33199ebad79e9f709f869f2130;p=rust-lightning diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index 7e10f970..2c7e8e72 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -637,7 +637,7 @@ fn test_forwardable_regen() { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); expect_pending_htlcs_forwardable!(nodes[1]); - expect_payment_received!(nodes[1], payment_hash, payment_secret, 100_000); + expect_payment_claimable!(nodes[1], payment_hash, payment_secret, 100_000); check_added_monitors!(nodes[1], 1); let mut events = nodes[1].node.get_and_clear_pending_msg_events(); @@ -646,7 +646,7 @@ fn test_forwardable_regen() { nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]); commitment_signed_dance!(nodes[2], nodes[1], payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[2]); - expect_payment_received!(nodes[2], payment_hash_2, payment_secret_2, 200_000); + expect_payment_claimable!(nodes[2], payment_hash_2, payment_secret_2, 200_000); claim_payment(&nodes[0], &[&nodes[1]], payment_preimage); claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage_2); @@ -656,7 +656,7 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool) { // Test what happens if a node receives an MPP payment, claims it, but crashes before // persisting the ChannelManager. If `persist_both_monitors` is false, also crash after only // updating one of the two channels' ChannelMonitors. As a result, on startup, we'll (a) still - // have the PaymentReceived event, (b) have one (or two) channel(s) that goes on chain with the + // have the PaymentClaimable event, (b) have one (or two) channel(s) that goes on chain with the // HTLC preimage in them, and (c) optionally have one channel that is live off-chain but does // not have the preimage tied to the still-pending HTLC. // @@ -693,7 +693,7 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool) { nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), PaymentId(payment_hash.0)).unwrap(); check_added_monitors!(nodes[0], 2); - // Send the payment through to nodes[3] *without* clearing the PaymentReceived event + // Send the payment through to nodes[3] *without* clearing the PaymentClaimable event let mut send_events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(send_events.len(), 2); do_pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 15_000_000, payment_hash, Some(payment_secret), send_events[0].clone(), true, false, None); @@ -713,7 +713,7 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool) { let original_manager = nodes[3].node.encode(); - expect_payment_received!(nodes[3], payment_hash, payment_secret, 15_000_000); + expect_payment_claimable!(nodes[3], payment_hash, payment_secret, 15_000_000); nodes[3].node.claim_funds(payment_preimage); check_added_monitors!(nodes[3], 2); @@ -750,11 +750,11 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool) { nodes[2].node.peer_disconnected(&nodes[3].node.get_our_node_id(), false); // During deserialization, we should have closed one channel and broadcast its latest - // commitment transaction. We should also still have the original PaymentReceived event we + // commitment transaction. We should also still have the original PaymentClaimable event we // never finished processing. let events = nodes[3].node.get_and_clear_pending_events(); assert_eq!(events.len(), if persist_both_monitors { 4 } else { 3 }); - if let Event::PaymentReceived { amount_msat: 15_000_000, .. } = events[0] { } else { panic!(); } + if let Event::PaymentClaimable { amount_msat: 15_000_000, .. } = events[0] { } else { panic!(); } if let Event::ChannelClosed { reason: ClosureReason::OutdatedChannelManager, .. } = events[1] { } else { panic!(); } if persist_both_monitors { if let Event::ChannelClosed { reason: ClosureReason::OutdatedChannelManager, .. } = events[2] { } else { panic!(); } @@ -788,9 +788,9 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool) { let ds_msgs = nodes[3].node.get_and_clear_pending_msg_events(); check_added_monitors!(nodes[3], 1); assert_eq!(ds_msgs.len(), 2); - if let MessageSendEvent::SendChannelUpdate { .. } = ds_msgs[1] {} else { panic!(); } + if let MessageSendEvent::SendChannelUpdate { .. } = ds_msgs[0] {} else { panic!(); } - let cs_updates = match ds_msgs[0] { + let cs_updates = match ds_msgs[1] { MessageSendEvent::UpdateHTLCs { ref updates, .. } => { nodes[2].node.handle_update_fulfill_htlc(&nodes[3].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); check_added_monitors!(nodes[2], 1); @@ -933,3 +933,72 @@ fn forwarded_payment_no_manager_persistence() { do_forwarded_payment_no_manager_persistence(true, false); do_forwarded_payment_no_manager_persistence(false, false); } + +#[test] +fn removed_payment_no_manager_persistence() { + // If an HTLC is failed to us on a channel, and the ChannelMonitor persistence completes, but + // the corresponding ChannelManager persistence does not, we need to ensure that the HTLC is + // still failed back to the previous hop even though the ChannelMonitor now no longer is aware + // of the HTLC. This was previously broken as no attempt was made to figure out which HTLCs + // were left dangling when a channel was force-closed due to a stale ChannelManager. + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + + let persister; + let new_chain_monitor; + let nodes_1_deserialized; + + let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let chan_id_1 = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2; + let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2; + + let (_, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000); + + let node_encoded = nodes[1].node.encode(); + + nodes[2].node.fail_htlc_backwards(&payment_hash); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], [HTLCDestination::FailedPayment { payment_hash }]); + check_added_monitors!(nodes[2], 1); + let events = nodes[2].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + match &events[0] { + MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { update_fail_htlcs, commitment_signed, .. }, .. } => { + nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &update_fail_htlcs[0]); + commitment_signed_dance!(nodes[1], nodes[2], commitment_signed, false); + }, + _ => panic!("Unexpected event"), + } + + let chan_0_monitor_serialized = get_monitor!(nodes[1], chan_id_1).encode(); + let chan_1_monitor_serialized = get_monitor!(nodes[1], chan_id_2).encode(); + reload_node!(nodes[1], node_encoded, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], persister, new_chain_monitor, nodes_1_deserialized); + + match nodes[1].node.pop_pending_event().unwrap() { + Event::ChannelClosed { ref reason, .. } => { + assert_eq!(*reason, ClosureReason::OutdatedChannelManager); + }, + _ => panic!("Unexpected event"), + } + + // Now that the ChannelManager has force-closed the channel which had the HTLC removed, it is + // now forgotten everywhere. The ChannelManager should have, as a side-effect of reload, + // learned that the HTLC is gone from the ChannelMonitor and added it to the to-fail-back set. + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), true); + reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]); + check_added_monitors!(nodes[1], 1); + let events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + match &events[0] { + MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { update_fail_htlcs, commitment_signed, .. }, .. } => { + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]); + commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false); + }, + _ => panic!("Unexpected event"), + } + + expect_payment_failed!(nodes[0], payment_hash, false); +}