+
+fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_htlc: bool, use_intercept: bool) {
+ if !use_cs_commitment { assert!(!claim_htlc); }
+ // If we go to forward a payment, and the ChannelMonitor persistence completes, but the
+ // ChannelManager does not, we shouldn't try to forward the payment again, nor should we fail
+ // it back until the ChannelMonitor decides the fate of the HTLC.
+ // This was never an issue, but it may be easy to regress here going forward.
+ let chanmon_cfgs = create_chanmon_cfgs(3);
+ let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+ let mut intercept_forwards_config = test_default_channel_config();
+ intercept_forwards_config.accept_intercept_htlcs = true;
+ let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, Some(intercept_forwards_config), None]);
+
+ let persister;
+ let new_chain_monitor;
+ let nodes_1_deserialized;
+
+ let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+ let chan_id_1 = create_announced_chan_between_nodes(&nodes, 0, 1).2;
+ let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2).2;
+
+ let intercept_scid = nodes[1].node.get_intercept_scid();
+
+ let (mut route, payment_hash, payment_preimage, payment_secret) =
+ get_route_and_payment_hash!(nodes[0], nodes[2], 1_000_000);
+ if use_intercept {
+ route.paths[0].hops[1].short_channel_id = intercept_scid;
+ }
+ let payment_id = PaymentId(nodes[0].keys_manager.backing.get_secure_random_bytes());
+ let htlc_expiry = nodes[0].best_block_info().1 + TEST_FINAL_CLTV;
+ nodes[0].node.send_payment_with_route(&route, payment_hash,
+ RecipientOnionFields::secret_only(payment_secret), payment_id).unwrap();
+ check_added_monitors!(nodes[0], 1);
+
+ let payment_event = SendEvent::from_node(&nodes[0]);
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
+ commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
+
+ // Store the `ChannelManager` before handling the `PendingHTLCsForwardable`/`HTLCIntercepted`
+ // events, expecting either event (and the HTLC itself) to be missing on reload even though its
+ // present when we serialized.
+ let node_encoded = nodes[1].node.encode();
+
+ let mut intercept_id = None;
+ let mut expected_outbound_amount_msat = None;
+ if use_intercept {
+ let events = nodes[1].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ Event::HTLCIntercepted { intercept_id: ev_id, expected_outbound_amount_msat: ev_amt, .. } => {
+ intercept_id = Some(ev_id);
+ expected_outbound_amount_msat = Some(ev_amt);
+ },
+ _ => panic!()
+ }
+ nodes[1].node.forward_intercepted_htlc(intercept_id.unwrap(), &chan_id_2,
+ nodes[2].node.get_our_node_id(), expected_outbound_amount_msat.unwrap()).unwrap();
+ }
+
+ expect_pending_htlcs_forwardable!(nodes[1]);
+
+ let payment_event = SendEvent::from_node(&nodes[1]);
+ nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
+ nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg);
+ check_added_monitors!(nodes[2], 1);
+
+ if claim_htlc {
+ get_monitor!(nodes[2], chan_id_2).provide_payment_preimage(&payment_hash, &payment_preimage,
+ &nodes[2].tx_broadcaster, &LowerBoundedFeeEstimator(nodes[2].fee_estimator), &nodes[2].logger);
+ }
+ assert!(nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
+
+ let _ = nodes[2].node.get_and_clear_pending_msg_events();
+
+ nodes[2].node.force_close_broadcasting_latest_txn(&chan_id_2, &nodes[1].node.get_our_node_id()).unwrap();
+ let cs_commitment_tx = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
+ assert_eq!(cs_commitment_tx.len(), if claim_htlc { 2 } else { 1 });
+
+ check_added_monitors!(nodes[2], 1);
+ check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed);
+ check_closed_broadcast!(nodes[2], true);
+
+ let chan_0_monitor_serialized = get_monitor!(nodes[1], chan_id_1).encode();
+ let chan_1_monitor_serialized = get_monitor!(nodes[1], chan_id_2).encode();
+ reload_node!(nodes[1], node_encoded, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], persister, new_chain_monitor, nodes_1_deserialized);
+
+ // Note that this checks that this is the only event on nodes[1], implying the
+ // `HTLCIntercepted` event has been removed in the `use_intercept` case.
+ check_closed_event!(nodes[1], 1, ClosureReason::OutdatedChannelManager);
+
+ if use_intercept {
+ // Attempt to forward the HTLC back out over nodes[1]' still-open channel, ensuring we get
+ // a intercept-doesn't-exist error.
+ let forward_err = nodes[1].node.forward_intercepted_htlc(intercept_id.unwrap(), &chan_id_1,
+ nodes[0].node.get_our_node_id(), expected_outbound_amount_msat.unwrap()).unwrap_err();
+ assert_eq!(forward_err, APIError::APIMisuseError {
+ err: format!("Payment with intercept id {} not found", log_bytes!(intercept_id.unwrap().0))
+ });
+ }
+
+ nodes[1].node.timer_tick_occurred();
+ let bs_commitment_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
+ assert_eq!(bs_commitment_tx.len(), 1);
+ check_added_monitors!(nodes[1], 1);
+
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+ reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+
+ if use_cs_commitment {
+ // If we confirm a commitment transaction that has the HTLC on-chain, nodes[1] should wait
+ // for an HTLC-spending transaction before it does anything with the HTLC upstream.
+ confirm_transaction(&nodes[1], &cs_commitment_tx[0]);
+ assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+
+ if claim_htlc {
+ confirm_transaction(&nodes[1], &cs_commitment_tx[1]);
+ } else {
+ connect_blocks(&nodes[1], htlc_expiry - nodes[1].best_block_info().1 + 1);
+ let bs_htlc_timeout_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
+ assert_eq!(bs_htlc_timeout_tx.len(), 1);
+ confirm_transaction(&nodes[1], &bs_htlc_timeout_tx[0]);
+ }
+ } else {
+ confirm_transaction(&nodes[1], &bs_commitment_tx[0]);
+ }
+
+ if !claim_htlc {
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]);
+ } else {
+ expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, true);
+ }
+ check_added_monitors!(nodes[1], 1);
+
+ let events = nodes[1].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ match &events[0] {
+ MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { update_fulfill_htlcs, update_fail_htlcs, commitment_signed, .. }, .. } => {
+ if claim_htlc {
+ nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlcs[0]);
+ } else {
+ nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]);
+ }
+ commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false);
+ },
+ _ => panic!("Unexpected event"),
+ }
+
+ if claim_htlc {
+ expect_payment_sent!(nodes[0], payment_preimage);
+ } else {
+ expect_payment_failed!(nodes[0], payment_hash, false);
+ }
+}
+
+#[test]
+fn forwarded_payment_no_manager_persistence() {
+ do_forwarded_payment_no_manager_persistence(true, true, false);
+ do_forwarded_payment_no_manager_persistence(true, false, false);
+ do_forwarded_payment_no_manager_persistence(false, false, false);
+}
+
+#[test]
+fn intercepted_payment_no_manager_persistence() {
+ do_forwarded_payment_no_manager_persistence(true, true, true);
+ do_forwarded_payment_no_manager_persistence(true, false, true);
+ do_forwarded_payment_no_manager_persistence(false, false, true);
+}
+
+#[test]
+fn removed_payment_no_manager_persistence() {
+ // If an HTLC is failed to us on a channel, and the ChannelMonitor persistence completes, but
+ // the corresponding ChannelManager persistence does not, we need to ensure that the HTLC is
+ // still failed back to the previous hop even though the ChannelMonitor now no longer is aware
+ // of the HTLC. This was previously broken as no attempt was made to figure out which HTLCs
+ // were left dangling when a channel was force-closed due to a stale ChannelManager.
+ let chanmon_cfgs = create_chanmon_cfgs(3);
+ let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+
+ let persister;
+ let new_chain_monitor;
+ let nodes_1_deserialized;
+
+ let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+ let chan_id_1 = create_announced_chan_between_nodes(&nodes, 0, 1).2;
+ let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2).2;
+
+ let (_, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
+
+ let node_encoded = nodes[1].node.encode();
+
+ nodes[2].node.fail_htlc_backwards(&payment_hash);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], [HTLCDestination::FailedPayment { payment_hash }]);
+ check_added_monitors!(nodes[2], 1);
+ let events = nodes[2].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ match &events[0] {
+ MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { update_fail_htlcs, commitment_signed, .. }, .. } => {
+ nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &update_fail_htlcs[0]);
+ commitment_signed_dance!(nodes[1], nodes[2], commitment_signed, false);
+ },
+ _ => panic!("Unexpected event"),
+ }
+
+ let chan_0_monitor_serialized = get_monitor!(nodes[1], chan_id_1).encode();
+ let chan_1_monitor_serialized = get_monitor!(nodes[1], chan_id_2).encode();
+ reload_node!(nodes[1], node_encoded, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], persister, new_chain_monitor, nodes_1_deserialized);
+
+ match nodes[1].node.pop_pending_event().unwrap() {
+ Event::ChannelClosed { ref reason, .. } => {
+ assert_eq!(*reason, ClosureReason::OutdatedChannelManager);
+ },
+ _ => panic!("Unexpected event"),
+ }
+
+ // Now that the ChannelManager has force-closed the channel which had the HTLC removed, it is
+ // now forgotten everywhere. The ChannelManager should have, as a side-effect of reload,
+ // learned that the HTLC is gone from the ChannelMonitor and added it to the to-fail-back set.
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+ reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]);
+ check_added_monitors!(nodes[1], 1);
+ let events = nodes[1].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ match &events[0] {
+ MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { update_fail_htlcs, commitment_signed, .. }, .. } => {
+ nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]);
+ commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false);
+ },
+ _ => panic!("Unexpected event"),
+ }
+
+ expect_payment_failed!(nodes[0], payment_hash, false);
+}