Ensure all HTLCs for a claimed payment are claimed on startup
[rust-lightning] / lightning / src / ln / functional_tests.rs
index 48b4b07c7d76d1f73238b27eb851ac32e720adbb..e840bef4aac65e0b08edead14a8ac2a5d6447e11 100644 (file)
@@ -9843,6 +9843,186 @@ fn test_keysend_payments_to_private_node() {
        claim_payment(&nodes[0], &path, test_preimage);
 }
 
+fn do_test_partial_claim_before_restart(persist_both_monitors: bool) {
+       // Test what happens if a node receives an MPP payment, claims it, but crashes before
+       // persisting the ChannelManager. If `persist_both_monitors` is false, also crash after only
+       // updating one of the two channels' ChannelMonitors. As a result, on startup, we'll (a) still
+       // have the PaymentReceived event, (b) have one (or two) channel(s) that goes on chain with the
+       // HTLC preimage in them, and (c) optionally have one channel that is live off-chain but does
+       // not have the preimage tied to the still-pending HTLC.
+       //
+       // To get to the correct state, on startup we should propagate the preimage to the
+       // still-off-chain channel, claiming the HTLC as soon as the peer connects, with the monitor
+       // receiving the preimage without a state update.
+       let chanmon_cfgs = create_chanmon_cfgs(4);
+       let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
+
+       let persister: test_utils::TestPersister;
+       let new_chain_monitor: test_utils::TestChainMonitor;
+       let nodes_3_deserialized: ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
+
+       let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs);
+
+       create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 0, InitFeatures::known(), InitFeatures::known());
+       create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 100_000, 0, InitFeatures::known(), InitFeatures::known());
+       let chan_id_persisted = create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 100_000, 0, InitFeatures::known(), InitFeatures::known()).2;
+       let chan_id_not_persisted = create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 100_000, 0, InitFeatures::known(), InitFeatures::known()).2;
+
+       // Create an MPP route for 15k sats, more than the default htlc-max of 10%
+       let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[3], 15_000_000);
+       assert_eq!(route.paths.len(), 2);
+       route.paths.sort_by(|path_a, _| {
+               // Sort the path so that the path through nodes[1] comes first
+               if path_a[0].pubkey == nodes[1].node.get_our_node_id() {
+                       core::cmp::Ordering::Less } else { core::cmp::Ordering::Greater }
+       });
+
+       nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap();
+       check_added_monitors!(nodes[0], 2);
+
+       // Send the payment through to nodes[3] *without* clearing the PaymentReceived event
+       let mut send_events = nodes[0].node.get_and_clear_pending_msg_events();
+       assert_eq!(send_events.len(), 2);
+       do_pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 15_000_000, payment_hash, Some(payment_secret), send_events[0].clone(), true, false, None);
+       do_pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 15_000_000, payment_hash, Some(payment_secret), send_events[1].clone(), true, false, None);
+
+       // Now that we have an MPP payment pending, get the latest encoded copies of nodes[3]'s
+       // monitors and ChannelManager, for use later, if we don't want to persist both monitors.
+       let mut original_monitor = test_utils::TestVecWriter(Vec::new());
+       if !persist_both_monitors {
+               for outpoint in nodes[3].chain_monitor.chain_monitor.list_monitors() {
+                       if outpoint.to_channel_id() == chan_id_not_persisted {
+                               assert!(original_monitor.0.is_empty());
+                               nodes[3].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap().write(&mut original_monitor).unwrap();
+                       }
+               }
+       }
+
+       let mut original_manager = test_utils::TestVecWriter(Vec::new());
+       nodes[3].node.write(&mut original_manager).unwrap();
+
+       expect_payment_received!(nodes[3], payment_hash, payment_secret, 15_000_000);
+
+       nodes[3].node.claim_funds(payment_preimage);
+       check_added_monitors!(nodes[3], 2);
+
+       // Now fetch one of the two updated ChannelMonitors from nodes[3], and restart pretending we
+       // crashed in between the two persistence calls - using one old ChannelMonitor and one new one,
+       // with the old ChannelManager.
+       let mut updated_monitor = test_utils::TestVecWriter(Vec::new());
+       for outpoint in nodes[3].chain_monitor.chain_monitor.list_monitors() {
+               if outpoint.to_channel_id() == chan_id_persisted {
+                       assert!(updated_monitor.0.is_empty());
+                       nodes[3].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap().write(&mut updated_monitor).unwrap();
+               }
+       }
+       // If `persist_both_monitors` is set, get the second monitor here as well
+       if persist_both_monitors {
+               for outpoint in nodes[3].chain_monitor.chain_monitor.list_monitors() {
+                       if outpoint.to_channel_id() == chan_id_not_persisted {
+                               assert!(original_monitor.0.is_empty());
+                               nodes[3].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap().write(&mut original_monitor).unwrap();
+                       }
+               }
+       }
+
+       // Now restart nodes[3].
+       persister = test_utils::TestPersister::new();
+       let keys_manager = &chanmon_cfgs[3].keys_manager;
+       new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[3].chain_source), nodes[3].tx_broadcaster.clone(), nodes[3].logger, node_cfgs[3].fee_estimator, &persister, keys_manager);
+       nodes[3].chain_monitor = &new_chain_monitor;
+       let mut monitors = Vec::new();
+       for mut monitor_data in [original_monitor, updated_monitor].iter() {
+               let (_, mut deserialized_monitor) = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(&mut &monitor_data.0[..], keys_manager).unwrap();
+               monitors.push(deserialized_monitor);
+       }
+
+       let config = UserConfig::default();
+       nodes_3_deserialized = {
+               let mut channel_monitors = HashMap::new();
+               for monitor in monitors.iter_mut() {
+                       channel_monitors.insert(monitor.get_funding_txo().0, monitor);
+               }
+               <(BlockHash, ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut &original_manager.0[..], ChannelManagerReadArgs {
+                       default_config: config,
+                       keys_manager,
+                       fee_estimator: node_cfgs[3].fee_estimator,
+                       chain_monitor: nodes[3].chain_monitor,
+                       tx_broadcaster: nodes[3].tx_broadcaster.clone(),
+                       logger: nodes[3].logger,
+                       channel_monitors,
+               }).unwrap().1
+       };
+       nodes[3].node = &nodes_3_deserialized;
+
+       for monitor in monitors {
+               // On startup the preimage should have been copied into the non-persisted monitor:
+               assert!(monitor.get_stored_preimages().contains_key(&payment_hash));
+               nodes[3].chain_monitor.watch_channel(monitor.get_funding_txo().0.clone(), monitor).unwrap();
+       }
+       check_added_monitors!(nodes[3], 2);
+
+       nodes[1].node.peer_disconnected(&nodes[3].node.get_our_node_id(), false);
+       nodes[2].node.peer_disconnected(&nodes[3].node.get_our_node_id(), false);
+
+       // During deserialization, we should have closed one channel and broadcast its latest
+       // commitment transaction. We should also still have the original PaymentReceived event we
+       // never finished processing.
+       let events = nodes[3].node.get_and_clear_pending_events();
+       assert_eq!(events.len(), if persist_both_monitors { 3 } else { 2 });
+       if let Event::PaymentReceived { amt: 15_000_000, .. } = events[0] { } else { panic!(); }
+       if let Event::ChannelClosed { reason: ClosureReason::OutdatedChannelManager, .. } = events[1] { } else { panic!(); }
+       if persist_both_monitors {
+               if let Event::ChannelClosed { reason: ClosureReason::OutdatedChannelManager, .. } = events[2] { } else { panic!(); }
+       }
+
+       assert_eq!(nodes[3].node.list_channels().len(), if persist_both_monitors { 0 } else { 1 });
+       if !persist_both_monitors {
+               // If one of the two channels is still live, reveal the payment preimage over it.
+
+               nodes[3].node.peer_connected(&nodes[2].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
+               let reestablish_1 = get_chan_reestablish_msgs!(nodes[3], nodes[2]);
+               nodes[2].node.peer_connected(&nodes[3].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
+               let reestablish_2 = get_chan_reestablish_msgs!(nodes[2], nodes[3]);
+
+               nodes[2].node.handle_channel_reestablish(&nodes[3].node.get_our_node_id(), &reestablish_1[0]);
+               get_event_msg!(nodes[2], MessageSendEvent::SendChannelUpdate, nodes[3].node.get_our_node_id());
+               assert!(nodes[2].node.get_and_clear_pending_msg_events().is_empty());
+
+               nodes[3].node.handle_channel_reestablish(&nodes[2].node.get_our_node_id(), &reestablish_2[0]);
+
+               // Once we call `get_and_clear_pending_msg_events` the holding cell is cleared and the HTLC
+               // claim should fly.
+               let ds_msgs = nodes[3].node.get_and_clear_pending_msg_events();
+               check_added_monitors!(nodes[3], 1);
+               assert_eq!(ds_msgs.len(), 2);
+               if let MessageSendEvent::SendChannelUpdate { .. } = ds_msgs[1] {} else { panic!(); }
+
+               let cs_updates = match ds_msgs[0] {
+                       MessageSendEvent::UpdateHTLCs { ref updates, .. } => {
+                               nodes[2].node.handle_update_fulfill_htlc(&nodes[3].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
+                               check_added_monitors!(nodes[2], 1);
+                               let cs_updates = get_htlc_update_msgs!(nodes[2], nodes[0].node.get_our_node_id());
+                               expect_payment_forwarded!(nodes[2], nodes[0], nodes[3], Some(1000), false, false);
+                               commitment_signed_dance!(nodes[2], nodes[3], updates.commitment_signed, false, true);
+                               cs_updates
+                       }
+                       _ => panic!(),
+               };
+
+               nodes[0].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &cs_updates.update_fulfill_htlcs[0]);
+               commitment_signed_dance!(nodes[0], nodes[2], cs_updates.commitment_signed, false, true);
+               expect_payment_sent!(nodes[0], payment_preimage);
+       }
+}
+
+#[test]
+fn test_partial_claim_before_restart() {
+       do_test_partial_claim_before_restart(false);
+       do_test_partial_claim_before_restart(true);
+}
+
 /// The possible events which may trigger a `max_dust_htlc_exposure` breach
 #[derive(Clone, Copy, PartialEq)]
 enum ExposureEvent {